summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-devices14
-rw-r--r--Documentation/ABI/testing/sysfs-bus-fsl-mc21
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio18
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector36
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8125
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-cros-ec18
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac8
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-light-isl2901819
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-light-tsl2583 (renamed from drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583)14
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp45318
-rw-r--r--Documentation/ABI/testing/sysfs-class-fpga-bridge11
-rw-r--r--Documentation/ABI/testing/sysfs-class-mei16
-rw-r--r--Documentation/ABI/testing/sysfs-devices-deferred_probe12
-rw-r--r--Documentation/ABI/testing/sysfs-platform-phy-rcar-gen3-usb215
-rw-r--r--Documentation/ABI/testing/sysfs-platform-sst-atom17
-rw-r--r--Documentation/admin-guide/vga-softcursor.rst18
-rw-r--r--Documentation/device-mapper/delay.txt4
-rw-r--r--Documentation/device-mapper/dm-crypt.txt27
-rw-r--r--Documentation/device-mapper/dm-raid.txt4
-rw-r--r--Documentation/device-mapper/linear.txt8
-rw-r--r--Documentation/device-mapper/striped.txt4
-rw-r--r--Documentation/device-mapper/switch.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/cpu-capacity.txt236
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt10
-rw-r--r--Documentation/devicetree/bindings/display/ht16k33.txt42
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt3
-rw-r--r--Documentation/devicetree/bindings/fpga/fpga-region.txt494
-rw-r--r--Documentation/devicetree/bindings/hwmon/mcp3021.txt21
-rw-r--r--Documentation/devicetree/bindings/hwmon/tmp108.txt14
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt9
-rw-r--r--Documentation/devicetree/bindings/iio/adc/envelope-detector.txt54
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt83
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/dac/dpot-dac.txt41
-rw-r--r--Documentation/devicetree/bindings/iio/dac/mcp4725.txt35
-rw-r--r--Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt46
-rw-r--r--Documentation/devicetree/bindings/iio/humidity/hts221.txt22
-rw-r--r--Documentation/devicetree/bindings/iio/light/isl29018.txt28
-rw-r--r--Documentation/devicetree/bindings/iio/light/tsl2583.txt26
-rw-r--r--Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt30
-rw-r--r--Documentation/devicetree/bindings/iio/st-sensors.txt1
-rw-r--r--Documentation/devicetree/bindings/nvmem/brcm,ocotp.txt17
-rw-r--r--Documentation/devicetree/bindings/nvmem/lpc1850-otp.txt20
-rw-r--r--Documentation/devicetree/bindings/phy/meson8b-usb2-phy.txt (renamed from Documentation/devicetree/bindings/phy/meson-usb2-phy.txt)6
-rw-r--r--Documentation/devicetree/bindings/scsi/hisilicon-sas.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/axentia,tse850-pcm5142.txt88
-rw-r--r--Documentation/devicetree/bindings/sound/cs35l34.txt64
-rw-r--r--Documentation/devicetree/bindings/sound/cs42l42.txt110
-rw-r--r--Documentation/devicetree/bindings/sound/davinci-mcbsp.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt85
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-digital.txt20
-rw-r--r--Documentation/devicetree/bindings/sound/rt5514.txt3
-rw-r--r--Documentation/devicetree/bindings/sound/rt5663.txt6
-rwxr-xr-xDocumentation/devicetree/bindings/sound/rt5665.txt68
-rw-r--r--Documentation/devicetree/bindings/sound/samsung,tm2-audio.txt38
-rw-r--r--Documentation/devicetree/bindings/sound/sun4i-codec.txt65
-rw-r--r--Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt16
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320aic31xx.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/wm8580.txt4
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-armada-3700.txt25
-rw-r--r--Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt19
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sun6i.txt25
-rw-r--r--Documentation/devicetree/bindings/ufs/ufs-qcom.txt7
-rw-r--r--Documentation/devicetree/bindings/usb/da8xx-usb.txt43
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.txt5
-rw-r--r--Documentation/devicetree/bindings/usb/mt8173-mtu3.txt87
-rw-r--r--Documentation/devicetree/bindings/usb/mt8173-xhci.txt54
-rw-r--r--Documentation/devicetree/bindings/usb/ohci-da8xx.txt23
-rw-r--r--Documentation/devicetree/bindings/usb/s3c2410-usb.txt22
-rw-r--r--Documentation/devicetree/bindings/usb/usb-xhci.txt1
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt5
-rw-r--r--Documentation/dontdiff1
-rw-r--r--Documentation/filesystems/configfs/configfs.txt2
-rw-r--r--Documentation/filesystems/dax.txt22
-rw-r--r--Documentation/filesystems/ext4.txt13
-rw-r--r--Documentation/fpga/fpga-mgr.txt43
-rw-r--r--Documentation/hwmon/hwmon-kernel-api.txt58
-rw-r--r--Documentation/hwmon/tc65431
-rw-r--r--Documentation/hwmon/tmp10836
-rw-r--r--Documentation/livepatch/livepatch.txt2
-rw-r--r--Documentation/trace/intel_th.txt22
-rw-r--r--Documentation/trace/stm.txt37
-rw-r--r--Documentation/virtual/kvm/00-INDEX2
-rw-r--r--Documentation/virtual/kvm/api.txt5
-rw-r--r--Documentation/virtual/kvm/halt-polling.txt127
-rw-r--r--MAINTAINERS121
-rw-r--r--arch/arm/boot/dts/rk3036.dtsi1
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi1
-rw-r--r--arch/arm/boot/dts/rk3xxx.dtsi1
-rw-r--r--arch/arm/include/asm/xen/hypercall.h88
-rw-r--r--arch/arm/include/asm/xen/hypervisor.h40
-rw-r--r--arch/arm/include/asm/xen/interface.h86
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h99
-rw-r--r--arch/arm/include/asm/xen/page.h123
-rw-r--r--arch/arm/include/uapi/asm/kvm.h2
-rw-r--r--arch/arm/kvm/Kconfig1
-rw-r--r--arch/arm/kvm/Makefile1
-rw-r--r--arch/arm/kvm/arm.c6
-rw-r--r--arch/arm/xen/enlighten.c3
-rw-r--r--arch/arm/xen/mm.c1
-rw-r--r--arch/arm64/Kconfig12
-rw-r--r--arch/arm64/Kconfig.debug35
-rw-r--r--arch/arm64/Makefile10
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-evb.dts63
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi29
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi1
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/assembler.h48
-rw-r--r--arch/arm64/include/asm/cacheflush.h7
-rw-r--r--arch/arm64/include/asm/cpucaps.h3
-rw-r--r--arch/arm64/include/asm/cpufeature.h30
-rw-r--r--arch/arm64/include/asm/current.h22
-rw-r--r--arch/arm64/include/asm/debug-monitors.h3
-rw-r--r--arch/arm64/include/asm/efi.h26
-rw-r--r--arch/arm64/include/asm/elf.h12
-rw-r--r--arch/arm64/include/asm/futex.h17
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h6
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h7
-rw-r--r--arch/arm64/include/asm/mmu.h3
-rw-r--r--arch/arm64/include/asm/mmu_context.h53
-rw-r--r--arch/arm64/include/asm/neon.h3
-rw-r--r--arch/arm64/include/asm/opcodes.h5
-rw-r--r--arch/arm64/include/asm/percpu.h18
-rw-r--r--arch/arm64/include/asm/perf_event.h2
-rw-r--r--arch/arm64/include/asm/probes.h21
-rw-r--r--arch/arm64/include/asm/ptdump.h22
-rw-r--r--arch/arm64/include/asm/ptrace.h8
-rw-r--r--arch/arm64/include/asm/smp.h14
-rw-r--r--arch/arm64/include/asm/stack_pointer.h9
-rw-r--r--arch/arm64/include/asm/suspend.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h45
-rw-r--r--arch/arm64/include/asm/thread_info.h40
-rw-r--r--arch/arm64/include/asm/uaccess.h175
-rw-r--r--arch/arm64/include/asm/uprobes.h36
-rw-r--r--arch/arm64/include/asm/xen/hypercall.h2
-rw-r--r--arch/arm64/include/asm/xen/hypervisor.h2
-rw-r--r--arch/arm64/include/asm/xen/interface.h2
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h2
-rw-r--r--arch/arm64/include/asm/xen/page.h2
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c16
-rw-r--r--arch/arm64/kernel/asm-offsets.c13
-rw-r--r--arch/arm64/kernel/cpufeature.c18
-rw-r--r--arch/arm64/kernel/debug-monitors.c40
-rw-r--r--arch/arm64/kernel/efi.c8
-rw-r--r--arch/arm64/kernel/entry.S110
-rw-r--r--arch/arm64/kernel/fpsimd.c14
-rw-r--r--arch/arm64/kernel/head.S30
-rw-r--r--arch/arm64/kernel/hibernate.c4
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c153
-rw-r--r--arch/arm64/kernel/insn.c1
-rw-r--r--arch/arm64/kernel/kgdb.c3
-rw-r--r--arch/arm64/kernel/probes/Makefile2
-rw-r--r--arch/arm64/kernel/probes/decode-insn.c33
-rw-r--r--arch/arm64/kernel/probes/decode-insn.h8
-rw-r--r--arch/arm64/kernel/probes/kprobes.c36
-rw-r--r--arch/arm64/kernel/probes/simulate-insn.c16
-rw-r--r--arch/arm64/kernel/probes/uprobes.c216
-rw-r--r--arch/arm64/kernel/process.c38
-rw-r--r--arch/arm64/kernel/ptrace.c7
-rw-r--r--arch/arm64/kernel/return_address.c1
-rw-r--r--arch/arm64/kernel/setup.c9
-rw-r--r--arch/arm64/kernel/signal.c3
-rw-r--r--arch/arm64/kernel/sleep.S3
-rw-r--r--arch/arm64/kernel/smp.c14
-rw-r--r--arch/arm64/kernel/stacktrace.c7
-rw-r--r--arch/arm64/kernel/suspend.c6
-rw-r--r--arch/arm64/kernel/topology.c223
-rw-r--r--arch/arm64/kernel/traps.c28
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S5
-rw-r--r--arch/arm64/kvm/Kconfig4
-rw-r--r--arch/arm64/kvm/handle_exit.c11
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S9
-rw-r--r--arch/arm64/kvm/hyp/switch.c13
-rw-r--r--arch/arm64/kvm/reset.c6
-rw-r--r--arch/arm64/lib/clear_user.S11
-rw-r--r--arch/arm64/lib/copy_from_user.S11
-rw-r--r--arch/arm64/lib/copy_in_user.S11
-rw-r--r--arch/arm64/lib/copy_to_user.S11
-rw-r--r--arch/arm64/mm/Makefile3
-rw-r--r--arch/arm64/mm/cache.S6
-rw-r--r--arch/arm64/mm/context.c7
-rw-r--r--arch/arm64/mm/dma-mapping.c5
-rw-r--r--arch/arm64/mm/dump.c106
-rw-r--r--arch/arm64/mm/fault.c22
-rw-r--r--arch/arm64/mm/flush.c9
-rw-r--r--arch/arm64/mm/hugetlbpage.c22
-rw-r--r--arch/arm64/mm/mmu.c137
-rw-r--r--arch/arm64/mm/proc.S12
-rw-r--r--arch/arm64/mm/ptdump_debugfs.c31
-rw-r--r--arch/arm64/xen/hypercall.S15
-rw-r--r--arch/blackfin/mach-bf561/coreb.c10
-rw-r--r--arch/mips/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h47
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_host.h27
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h49
-rw-r--r--arch/powerpc/include/asm/mmu.h5
-rw-r--r--arch/powerpc/include/asm/opal.h3
-rw-r--r--arch/powerpc/include/asm/reg.h14
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h5
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c63
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv.c254
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c72
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c223
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c23
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S140
-rw-r--r--arch/powerpc/kvm/powerpc.c16
-rw-r--r--arch/powerpc/kvm/trace_hv.h2
-rw-r--r--arch/powerpc/mm/hash_native_64.c30
-rw-r--r--arch/powerpc/mm/hash_utils_64.c28
-rw-r--r--arch/powerpc/mm/pgtable-radix.c18
-rw-r--r--arch/powerpc/mm/pgtable_64.c34
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S3
-rw-r--r--arch/powerpc/platforms/powernv/opal.c2
-rw-r--r--arch/powerpc/platforms/ps3/htab.c2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c2
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/boot/compressed/Makefile2
-rw-r--r--arch/s390/boot/compressed/head.S2
-rw-r--r--arch/s390/configs/default_defconfig6
-rw-r--r--arch/s390/configs/gcov_defconfig4
-rw-r--r--arch/s390/configs/performance_defconfig4
-rw-r--r--arch/s390/crypto/prng.c6
-rw-r--r--arch/s390/hypfs/inode.c24
-rw-r--r--arch/s390/include/asm/Kbuild4
-rw-r--r--arch/s390/include/asm/asm-offsets.h1
-rw-r--r--arch/s390/include/asm/atomic.h207
-rw-r--r--arch/s390/include/asm/atomic_ops.h130
-rw-r--r--arch/s390/include/asm/bitops.h62
-rw-r--r--arch/s390/include/asm/cpu_mf.h3
-rw-r--r--arch/s390/include/asm/elf.h6
-rw-r--r--arch/s390/include/asm/facilities_src.h82
-rw-r--r--arch/s390/include/asm/ipl.h2
-rw-r--r--arch/s390/include/asm/lowcore.h5
-rw-r--r--arch/s390/include/asm/pci_clp.h5
-rw-r--r--arch/s390/include/asm/pgalloc.h22
-rw-r--r--arch/s390/include/asm/preempt.h137
-rw-r--r--arch/s390/include/asm/processor.h6
-rw-r--r--arch/s390/include/asm/sclp.h10
-rw-r--r--arch/s390/include/asm/scsw.h6
-rw-r--r--arch/s390/include/asm/smp.h8
-rw-r--r--arch/s390/include/asm/string.h3
-rw-r--r--arch/s390/include/asm/sysinfo.h7
-rw-r--r--arch/s390/include/asm/thread_info.h24
-rw-r--r--arch/s390/include/asm/timex.h41
-rw-r--r--arch/s390/include/asm/topology.h28
-rw-r--r--arch/s390/include/asm/uaccess.h6
-rw-r--r--arch/s390/include/asm/vdso.h2
-rw-r--r--arch/s390/include/uapi/asm/Kbuild5
-rw-r--r--arch/s390/kernel/Makefile65
-rw-r--r--arch/s390/kernel/asm-offsets.c17
-rw-r--r--arch/s390/kernel/compat_signal.c4
-rw-r--r--arch/s390/kernel/early.c50
-rw-r--r--arch/s390/kernel/entry.S51
-rw-r--r--arch/s390/kernel/head.S2
-rw-r--r--arch/s390/kernel/head64.S7
-rw-r--r--arch/s390/kernel/ipl.c7
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/s390/kernel/lgr.c5
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c53
-rw-r--r--arch/s390/kernel/process.c6
-rw-r--r--arch/s390/kernel/ptrace.c14
-rw-r--r--arch/s390/kernel/setup.c22
-rw-r--r--arch/s390/kernel/signal.c14
-rw-r--r--arch/s390/kernel/smp.c32
-rw-r--r--arch/s390/kernel/swsusp.S2
-rw-r--r--arch/s390/kernel/sysinfo.c33
-rw-r--r--arch/s390/kernel/time.c191
-rw-r--r--arch/s390/kernel/topology.c53
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S23
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S23
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S11
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S11
-rw-r--r--arch/s390/kernel/vtime.c26
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c57
-rw-r--r--arch/s390/lib/mem.S39
-rw-r--r--arch/s390/mm/fault.c1
-rw-r--r--arch/s390/mm/vmem.c9
-rw-r--r--arch/s390/numa/mode_emu.c38
-rw-r--r--arch/s390/numa/toptree.c16
-rw-r--r--arch/s390/pci/pci.c8
-rw-r--r--arch/s390/pci/pci_clp.c3
-rw-r--r--arch/s390/pci/pci_debug.c2
-rw-r--r--arch/s390/pci/pci_dma.c36
-rw-r--r--arch/s390/tools/Makefile2
-rw-r--r--arch/s390/tools/gen_facilities.c76
-rw-r--r--arch/sh/kernel/cpu/Makefile2
-rw-r--r--arch/sh/kernel/cpu/irq/Makefile2
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/include/asm/e820.h12
-rw-r--r--arch/x86/include/asm/kvm_host.h18
-rw-r--r--arch/x86/include/asm/vmx.h37
-rw-r--r--arch/x86/include/uapi/asm/vmx.h5
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c2
-rw-r--r--arch/x86/kvm/cpuid.c26
-rw-r--r--arch/x86/kvm/emulate.c200
-rw-r--r--arch/x86/kvm/hyperv.c2
-rw-r--r--arch/x86/kvm/i8254.c15
-rw-r--r--arch/x86/kvm/i8254.h3
-rw-r--r--arch/x86/kvm/lapic.c212
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/mmu.c32
-rw-r--r--arch/x86/kvm/svm.c21
-rw-r--r--arch/x86/kvm/vmx.c1092
-rw-r--r--arch/x86/kvm/x86.c92
-rw-r--r--arch/x86/pci/xen.c4
-rw-r--r--arch/x86/platform/ce4100/ce4100.c2
-rw-r--r--arch/x86/xen/pci-swiotlb-xen.c1
-rw-r--r--arch/x86/xen/setup.c6
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-mq-cpumap.c1
-rw-r--r--block/blk-mq.h1
-rw-r--r--block/bsg-lib.c23
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/ata/ahci.c39
-rw-r--r--drivers/ata/ahci_qoriq.c16
-rw-r--r--drivers/ata/libahci.c1
-rw-r--r--drivers/ata/libata-core.c42
-rw-r--r--drivers/ata/libata-scsi.c84
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/ata/pata_imx.c82
-rw-r--r--drivers/auxdisplay/Kconfig13
-rw-r--r--drivers/auxdisplay/Makefile1
-rw-r--r--drivers/auxdisplay/ht16k33.c563
-rw-r--r--drivers/base/Kconfig2
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/base.h15
-rw-r--r--drivers/base/cacheinfo.c138
-rw-r--r--drivers/base/class.c15
-rw-r--r--drivers/base/core.c578
-rw-r--r--drivers/base/dd.c79
-rw-r--r--drivers/base/devcoredump.c10
-rw-r--r--drivers/base/dma-mapping.c4
-rw-r--r--drivers/base/firmware_class.c178
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/power/main.c87
-rw-r--r--drivers/base/power/power.h10
-rw-r--r--drivers/base/power/runtime.c174
-rw-r--r--drivers/base/test/Kconfig9
-rw-r--r--drivers/base/test/Makefile1
-rw-r--r--drivers/base/test/test_async_driver_probe.c169
-rw-r--r--drivers/block/cciss_scsi.c72
-rw-r--r--drivers/block/xen-blkback/xenbus.c36
-rw-r--r--drivers/block/xen-blkfront.c81
-rw-r--r--drivers/char/Kconfig3
-rw-r--r--drivers/char/pcmcia/Kconfig11
-rw-r--r--drivers/char/pcmcia/Makefile1
-rw-r--r--drivers/char/pcmcia/scr24x_cs.c373
-rw-r--r--drivers/char/ppdev.c36
-rw-r--r--drivers/char/snsc.c2
-rw-r--r--drivers/char/tile-srom.c3
-rw-r--r--drivers/char/tpm/xen-tpmfront.c8
-rw-r--r--drivers/extcon/extcon-arizona.c8
-rw-r--r--drivers/extcon/extcon-usb-gpio.c169
-rw-r--r--drivers/firmware/efi/arm-runtime.c4
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/fpga/Kconfig39
-rw-r--r--drivers/fpga/Makefile9
-rw-r--r--drivers/fpga/altera-fpga2sdram.c180
-rw-r--r--drivers/fpga/altera-freeze-bridge.c273
-rw-r--r--drivers/fpga/altera-hps2fpga.c222
-rw-r--r--drivers/fpga/fpga-bridge.c395
-rw-r--r--drivers/fpga/fpga-mgr.c97
-rw-r--r--drivers/fpga/fpga-region.c603
-rw-r--r--drivers/fpga/socfpga-a10.c557
-rw-r--r--drivers/fpga/socfpga.c7
-rw-r--r--drivers/fpga/zynq-fpga.c56
-rw-r--r--drivers/hid/Kconfig19
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/hid-asus.c299
-rw-r--r--drivers/hid/hid-core.c14
-rw-r--r--drivers/hid/hid-cp2112.c205
-rw-r--r--drivers/hid/hid-ids.h19
-rw-r--r--drivers/hid/hid-input.c96
-rw-r--r--drivers/hid/hid-mf.c166
-rw-r--r--drivers/hid/hid-microsoft.c6
-rw-r--r--drivers/hid/hid-multitouch.c88
-rw-r--r--drivers/hid/hid-sensor-hub.c6
-rw-r--r--drivers/hid/hid-sony.c445
-rw-r--r--drivers/hid/hid-udraw-ps3.c474
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c146
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c67
-rw-r--r--drivers/hid/intel-ish-hid/ipc/utils.h64
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c9
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c6
-rw-r--r--drivers/hid/usbhid/hid-core.c6
-rw-r--r--drivers/hid/usbhid/hid-quirks.c5
-rw-r--r--drivers/hid/wacom.h2
-rw-r--r--drivers/hid/wacom_sys.c92
-rw-r--r--drivers/hid/wacom_wac.c601
-rw-r--r--drivers/hid/wacom_wac.h76
-rw-r--r--drivers/hv/channel.c93
-rw-r--r--drivers/hv/channel_mgmt.c10
-rw-r--r--drivers/hv/connection.c1
-rw-r--r--drivers/hv/hv.c6
-rw-r--r--drivers/hv/hv_balloon.c44
-rw-r--r--drivers/hv/hv_snapshot.c33
-rw-r--r--drivers/hv/hv_util.c9
-rw-r--r--drivers/hv/hyperv_vmbus.h12
-rw-r--r--drivers/hv/ring_buffer.c44
-rw-r--r--drivers/hv/vmbus_drv.c174
-rw-r--r--drivers/hwmon/Kconfig26
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/adm1025.c2
-rw-r--r--drivers/hwmon/adm1026.c26
-rw-r--r--drivers/hwmon/adm9240.c9
-rw-r--r--drivers/hwmon/adt7411.c301
-rw-r--r--drivers/hwmon/adt7462.c12
-rw-r--r--drivers/hwmon/adt7470.c6
-rw-r--r--drivers/hwmon/amc6821.c4
-rw-r--r--drivers/hwmon/coretemp.c321
-rw-r--r--drivers/hwmon/ds620.c2
-rw-r--r--drivers/hwmon/emc2103.c4
-rw-r--r--drivers/hwmon/emc6w201.c8
-rw-r--r--drivers/hwmon/g762.c11
-rw-r--r--drivers/hwmon/hwmon.c79
-rw-r--r--drivers/hwmon/lm85.c3
-rw-r--r--drivers/hwmon/lm87.c136
-rw-r--r--drivers/hwmon/mcp3021.c50
-rw-r--r--drivers/hwmon/nct7802.c8
-rw-r--r--drivers/hwmon/pmbus/adm1275.c20
-rw-r--r--drivers/hwmon/scpi-hwmon.c1
-rw-r--r--drivers/hwmon/smsc47m192.c5
-rw-r--r--drivers/hwmon/tc654.c514
-rw-r--r--drivers/hwmon/tmp108.c469
-rw-r--r--drivers/hwmon/via-cputemp.c77
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c31
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h5
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c12
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h4
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c9
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c48
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c43
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h2
-rw-r--r--drivers/hwtracing/coresight/coresight.c74
-rw-r--r--drivers/hwtracing/intel_th/core.c28
-rw-r--r--drivers/hwtracing/intel_th/gth.c26
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h4
-rw-r--r--drivers/hwtracing/stm/core.c8
-rw-r--r--drivers/iio/Kconfig2
-rw-r--r--drivers/iio/Makefile2
-rw-r--r--drivers/iio/accel/Kconfig45
-rw-r--r--drivers/iio/accel/Makefile5
-rw-r--r--drivers/iio/accel/da280.c183
-rw-r--r--drivers/iio/accel/da311.c305
-rw-r--r--drivers/iio/accel/dmard10.c266
-rw-r--r--drivers/iio/accel/mma7660.c2
-rw-r--r--drivers/iio/accel/mma8452.c79
-rw-r--r--drivers/iio/accel/sca3000.c1576
-rw-r--r--drivers/iio/accel/st_accel.h1
-rw-r--r--drivers/iio/accel/st_accel_core.c605
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/accel/st_accel_spi.c1
-rw-r--r--drivers/iio/adc/Kconfig46
-rw-r--r--drivers/iio/adc/Makefile4
-rw-r--r--drivers/iio/adc/ad7766.c330
-rw-r--r--drivers/iio/adc/at91_adc.c28
-rw-r--r--drivers/iio/adc/envelope-detector.c422
-rw-r--r--drivers/iio/adc/max1027.c17
-rw-r--r--drivers/iio/adc/stm32-adc-core.c303
-rw-r--r--drivers/iio/adc/stm32-adc-core.h52
-rw-r--r--drivers/iio/adc/stm32-adc.c518
-rw-r--r--drivers/iio/adc/ti-adc0832.c106
-rw-r--r--drivers/iio/adc/ti-adc161s626.c55
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c148
-rw-r--r--drivers/iio/common/Kconfig1
-rw-r--r--drivers/iio/common/Makefile1
-rw-r--r--drivers/iio/common/cros_ec_sensors/Kconfig22
-rw-r--r--drivers/iio/common/cros_ec_sensors/Makefile6
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c322
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c450
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h175
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c5
-rw-r--r--drivers/iio/counter/104-quad-8.c593
-rw-r--r--drivers/iio/counter/Kconfig24
-rw-r--r--drivers/iio/counter/Makefile7
-rw-r--r--drivers/iio/dac/Kconfig10
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad5592r.c2
-rw-r--r--drivers/iio/dac/dpot-dac.c266
-rw-r--r--drivers/iio/dac/mcp4725.c176
-rw-r--r--drivers/iio/gyro/Kconfig18
-rw-r--r--drivers/iio/gyro/Makefile5
-rw-r--r--drivers/iio/gyro/mpu3050-core.c1306
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c124
-rw-r--r--drivers/iio/gyro/mpu3050.h96
-rw-r--r--drivers/iio/gyro/st_gyro_core.c205
-rw-r--r--drivers/iio/humidity/Kconfig24
-rw-r--r--drivers/iio/humidity/Makefile7
-rw-r--r--drivers/iio/humidity/hdc100x.c130
-rw-r--r--drivers/iio/humidity/hts221.h73
-rw-r--r--drivers/iio/humidity/hts221_buffer.c168
-rw-r--r--drivers/iio/humidity/hts221_core.c687
-rw-r--r--drivers/iio/humidity/hts221_i2c.c110
-rw-r--r--drivers/iio/humidity/hts221_spi.c125
-rw-r--r--drivers/iio/humidity/si7020.c11
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c2
-rw-r--r--drivers/iio/industrialio-buffer.c7
-rw-r--r--drivers/iio/industrialio-core.c261
-rw-r--r--drivers/iio/industrialio-trigger.c21
-rw-r--r--drivers/iio/inkern.c123
-rw-r--r--drivers/iio/light/Kconfig19
-rw-r--r--drivers/iio/light/Makefile2
-rw-r--r--drivers/iio/light/isl29018.c (renamed from drivers/staging/iio/light/isl29018.c)159
-rw-r--r--drivers/iio/light/ltr501.c111
-rw-r--r--drivers/iio/light/max44000.c5
-rw-r--r--drivers/iio/light/tsl2583.c913
-rw-r--r--drivers/iio/magnetometer/ak8974.c8
-rw-r--r--drivers/iio/magnetometer/ak8975.c16
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c147
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c376
-rw-r--r--drivers/iio/potentiometer/mcp4531.c104
-rw-r--r--drivers/iio/potentiostat/Kconfig22
-rw-r--r--drivers/iio/potentiostat/Makefile6
-rw-r--r--drivers/iio/potentiostat/lmp91000.c446
-rw-r--r--drivers/iio/pressure/Kconfig10
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/abp060mg.c276
-rw-r--r--drivers/iio/pressure/mpl3115.c26
-rw-r--r--drivers/iio/pressure/ms5611_core.c19
-rw-r--r--drivers/iio/pressure/st_pressure_core.c257
-rw-r--r--drivers/iio/pressure/zpa2326.c4
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c2
-rw-r--r--drivers/input/misc/arizona-haptics.c13
-rw-r--r--drivers/input/misc/xen-kbdfront.c13
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c13
-rw-r--r--drivers/isdn/hisax/q931.c2
-rw-r--r--drivers/lightnvm/core.c10
-rw-r--r--drivers/mcb/mcb-parse.c2
-rw-r--r--drivers/md/Kconfig10
-rw-r--r--drivers/md/bitmap.c166
-rw-r--r--drivers/md/dm-bufio.c28
-rw-r--r--drivers/md/dm-cache-metadata.c3
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-cache-target.c3
-rw-r--r--drivers/md/dm-crypt.c214
-rw-r--r--drivers/md/dm-flakey.c53
-rw-r--r--drivers/md/dm-io.c34
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/dm-mpath.c42
-rw-r--r--drivers/md/dm-raid.c86
-rw-r--r--drivers/md/dm-rq.c18
-rw-r--r--drivers/md/dm-table.c43
-rw-r--r--drivers/md/dm-verity-target.c2
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/linear.c31
-rw-r--r--drivers/md/md.c701
-rw-r--r--drivers/md/md.h108
-rw-r--r--drivers/md/multipath.c92
-rw-r--r--drivers/md/persistent-data/dm-array.c2
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c19
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c4
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c14
-rw-r--r--drivers/md/raid0.c107
-rw-r--r--drivers/md/raid1.c247
-rw-r--r--drivers/md/raid1.h19
-rw-r--r--drivers/md/raid10.c295
-rw-r--r--drivers/md/raid10.h2
-rw-r--r--drivers/md/raid5-cache.c1833
-rw-r--r--drivers/md/raid5.c623
-rw-r--r--drivers/md/raid5.h172
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c7
-rw-r--r--drivers/media/usb/uvc/uvc_video.c6
-rw-r--r--drivers/message/fusion/mptbase.c28
-rw-r--r--drivers/message/fusion/mptscsih.c11
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c1
-rw-r--r--drivers/misc/genwqe/card_base.h1
-rw-r--r--drivers/misc/genwqe/card_utils.c12
-rw-r--r--drivers/misc/ibmasm/module.c2
-rw-r--r--drivers/misc/lkdtm_bugs.c3
-rw-r--r--drivers/misc/lkdtm_perms.c7
-rw-r--r--drivers/misc/mei/amthif.c2
-rw-r--r--drivers/misc/mei/bus-fixup.c100
-rw-r--r--drivers/misc/mei/bus.c217
-rw-r--r--drivers/misc/mei/client.c23
-rw-r--r--drivers/misc/mei/client.h2
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/hw-me.c90
-rw-r--r--drivers/misc/mei/hw-me.h2
-rw-r--r--drivers/misc/mei/hw-txe.c18
-rw-r--r--drivers/misc/mei/init.c6
-rw-r--r--drivers/misc/mei/interrupt.c7
-rw-r--r--drivers/misc/mei/main.c45
-rw-r--r--drivers/misc/mei/mei_dev.h38
-rw-r--r--drivers/misc/mei/pci-me.c1
-rw-r--r--drivers/mmc/Kconfig2
-rw-r--r--drivers/mmc/Makefile1
-rw-r--r--drivers/mmc/card/Kconfig70
-rw-r--r--drivers/mmc/card/Makefile10
-rw-r--r--drivers/mmc/core/Kconfig66
-rw-r--r--drivers/mmc/core/Makefile4
-rw-r--r--drivers/mmc/core/block.c (renamed from drivers/mmc/card/block.c)0
-rw-r--r--drivers/mmc/core/block.h (renamed from drivers/mmc/card/block.h)0
-rw-r--r--drivers/mmc/core/mmc_test.c (renamed from drivers/mmc/card/mmc_test.c)2
-rw-r--r--drivers/mmc/core/queue.c (renamed from drivers/mmc/card/queue.c)2
-rw-r--r--drivers/mmc/core/queue.h (renamed from drivers/mmc/card/queue.h)0
-rw-r--r--drivers/mmc/core/sdio_uart.c (renamed from drivers/mmc/card/sdio_uart.c)2
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c2
-rw-r--r--drivers/net/xen-netback/xenbus.c52
-rw-r--r--drivers/net/xen-netfront.c67
-rw-r--r--drivers/nfc/mei_phy.c42
-rw-r--r--drivers/nfc/microread/mei.c23
-rw-r--r--drivers/nfc/pn544/mei.c23
-rw-r--r--drivers/nvme/host/core.c21
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/nvme/target/configfs.c10
-rw-r--r--drivers/nvmem/Kconfig22
-rw-r--r--drivers/nvmem/Makefile4
-rw-r--r--drivers/nvmem/bcm-ocotp.c335
-rw-r--r--drivers/nvmem/lpc18xx_otp.c124
-rw-r--r--drivers/of/overlay.c47
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c7
-rw-r--r--drivers/pci/xen-pcifront.c6
-rw-r--r--drivers/phy/Kconfig33
-rw-r--r--drivers/phy/Makefile3
-rw-r--r--drivers/phy/phy-berlin-sata.c3
-rw-r--r--drivers/phy/phy-brcm-sata.c6
-rw-r--r--drivers/phy/phy-da8xx-usb.c5
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c15
-rw-r--r--drivers/phy/phy-exynos4210-usb2.c4
-rw-r--r--drivers/phy/phy-exynos4x12-usb2.c4
-rw-r--r--drivers/phy/phy-exynos5250-usb2.c2
-rw-r--r--drivers/phy/phy-meson8b-usb2.c286
-rw-r--r--drivers/phy/phy-miphy365x.c625
-rw-r--r--drivers/phy/phy-qcom-ufs-i.h7
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-14nm.c72
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-20nm.c65
-rw-r--r--drivers/phy/phy-qcom-ufs.c273
-rw-r--r--drivers/phy/phy-rcar-gen3-usb2.c118
-rw-r--r--drivers/phy/phy-rockchip-emmc.c2
-rw-r--r--drivers/phy/phy-rockchip-inno-usb2.c607
-rw-r--r--drivers/phy/phy-s5pv210-usb2.c4
-rw-r--r--drivers/phy/phy-stih41x-usb.c188
-rw-r--r--drivers/phy/phy-sun4i-usb.c14
-rw-r--r--drivers/phy/phy-ti-pipe3.c10
-rw-r--r--drivers/phy/phy-twl4030-usb.c3
-rw-r--r--drivers/phy/tegra/xusb-tegra124.c3
-rw-r--r--drivers/phy/tegra/xusb.c10
-rw-r--r--drivers/platform/chrome/cros_ec_dev.c159
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c6
-rw-r--r--drivers/power/reset/Kconfig10
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-poweroff.c1
-rw-r--r--drivers/power/reset/at91-reset.c2
-rw-r--r--drivers/power/reset/piix4-poweroff.c113
-rw-r--r--drivers/power/reset/syscon-reboot-mode.c1
-rw-r--r--drivers/power/reset/zx-reboot.c1
-rw-r--r--drivers/power/supply/ab8500_fg.c8
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c1
-rw-r--r--drivers/power/supply/bq24190_charger.c2
-rw-r--r--drivers/power/supply/bq27xxx_battery.c44
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c4
-rw-r--r--drivers/power/supply/ipaq_micro_battery.c2
-rw-r--r--drivers/power/supply/lp8788-charger.c3
-rw-r--r--drivers/power/supply/max17040_battery.c52
-rw-r--r--drivers/power/supply/max8997_charger.c1
-rw-r--r--drivers/power/supply/power_supply_core.c4
-rw-r--r--drivers/power/supply/wm8350_power.c2
-rw-r--r--drivers/power/supply/wm97xx_battery.c25
-rw-r--r--drivers/regulator/rk808-regulator.c9
-rw-r--r--drivers/s390/block/dasd.c301
-rw-r--r--drivers/s390/block/dasd_3990_erp.c52
-rw-r--r--drivers/s390/block/dasd_devmap.c326
-rw-r--r--drivers/s390/block/dasd_eckd.c323
-rw-r--r--drivers/s390/block/dasd_eckd.h5
-rw-r--r--drivers/s390/block/dasd_eer.c29
-rw-r--r--drivers/s390/block/dasd_erp.c2
-rw-r--r--drivers/s390/block/dasd_fba.c2
-rw-r--r--drivers/s390/block/dasd_int.h449
-rw-r--r--drivers/s390/char/con3215.c12
-rw-r--r--drivers/s390/char/sclp.h23
-rw-r--r--drivers/s390/char/sclp_cmd.c25
-rw-r--r--drivers/s390/char/sclp_ctl.c4
-rw-r--r--drivers/s390/char/sclp_early.c31
-rw-r--r--drivers/s390/char/sclp_quiesce.c4
-rw-r--r--drivers/s390/char/sclp_tty.c3
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/char/zcore.c22
-rw-r--r--drivers/s390/cio/cmf.c10
-rw-r--r--drivers/s390/cio/css.c6
-rw-r--r--drivers/s390/cio/device.c6
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c6
-rw-r--r--drivers/s390/cio/device_ops.c5
-rw-r--r--drivers/s390/crypto/ap_bus.c12
-rw-r--r--drivers/s390/scsi/zfcp_ext.h4
-rw-r--r--drivers/s390/scsi/zfcp_fc.c36
-rw-r--r--drivers/s390/virtio/virtio_ccw.c25
-rw-r--r--drivers/scsi/Kconfig35
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/NCR5380.c137
-rw-r--r--drivers/scsi/NCR5380.h87
-rw-r--r--drivers/scsi/aacraid/aacraid.h1
-rw-r--r--drivers/scsi/aacraid/comminit.c10
-rw-r--r--drivers/scsi/aacraid/commsup.c25
-rw-r--r--drivers/scsi/aacraid/linit.c20
-rw-r--r--drivers/scsi/advansys.c3
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c5
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h5
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c82
-rw-r--r--drivers/scsi/arm/cumana_1.c98
-rw-r--r--drivers/scsi/arm/oak.c34
-rw-r--r--drivers/scsi/atari_scsi.c77
-rw-r--r--drivers/scsi/be2iscsi/be_main.c8
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h30
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c62
-rw-r--r--drivers/scsi/bfa/bfad_im.h4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxlflash/common.h39
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c6
-rw-r--r--drivers/scsi/cxlflash/main.c410
-rw-r--r--drivers/scsi/cxlflash/sislite.h2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c16
-rw-r--r--drivers/scsi/dmx3191d.c33
-rw-r--r--drivers/scsi/dpt_i2o.c7
-rw-r--r--drivers/scsi/fcoe/fcoe.c25
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c157
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c83
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c30
-rw-r--r--drivers/scsi/fnic/fnic_trace.c4
-rw-r--r--drivers/scsi/fnic/fnic_trace.h2
-rw-r--r--drivers/scsi/fnic/vnic_dev.c10
-rw-r--r--drivers/scsi/g_NCR5380.c296
-rw-r--r--drivers/scsi/g_NCR5380.h32
-rw-r--r--drivers/scsi/g_NCR5380_mmio.c10
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c67
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c79
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c556
-rw-r--r--drivers/scsi/hpsa.c252
-rw-r--r--drivers/scsi/hpsa.h6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c40
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c900
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h5
-rw-r--r--drivers/scsi/ipr.c174
-rw-r--r--drivers/scsi/ipr.h7
-rw-r--r--drivers/scsi/ips.c13
-rw-r--r--drivers/scsi/isci/host.h1
-rw-r--r--drivers/scsi/isci/init.c23
-rw-r--r--drivers/scsi/isci/probe_roms.c1
-rw-r--r--drivers/scsi/isci/remote_node_context.c7
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/libfc/fc_disc.c61
-rw-r--r--drivers/scsi/libfc/fc_elsct.c2
-rw-r--r--drivers/scsi/libfc/fc_exch.c256
-rw-r--r--drivers/scsi/libfc/fc_fcp.c235
-rw-r--r--drivers/scsi/libfc/fc_libfc.c2
-rw-r--r--drivers/scsi/libfc/fc_lport.c126
-rw-r--r--drivers/scsi/libfc/fc_rport.c561
-rw-r--r--drivers/scsi/lpfc/lpfc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c160
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c422
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac_scsi.c83
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c136
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c23
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c186
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h39
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c69
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c129
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c8
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c2
-rw-r--r--drivers/scsi/pmcraid.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c449
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c15
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h18
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c27
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c97
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c15
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_lib.c51
-rw-r--r--drivers/scsi/scsi_transport_fc.c455
-rw-r--r--drivers/scsi/scsi_transport_srp.c52
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c102
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/scsi/sun3_scsi.c80
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c39
-rw-r--r--drivers/scsi/ufs/ufs.h5
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h9
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c5
-rw-r--r--drivers/scsi/ufs/ufshcd.c482
-rw-r--r--drivers/scsi/ufs/ufshcd.h46
-rw-r--r--drivers/scsi/ufs/ufshci.h3
-rw-r--r--drivers/scsi/ufs/unipro.h4
-rw-r--r--drivers/scsi/xen-scsifront.c193
-rw-r--r--drivers/spi/Kconfig19
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-armada-3700.c923
-rw-r--r--drivers/spi/spi-ath79.c1
-rw-r--r--drivers/spi/spi-atmel.c324
-rw-r--r--drivers/spi/spi-axi-spi-engine.c1
-rw-r--r--drivers/spi/spi-dw.c1
-rw-r--r--drivers/spi/spi-fsl-dspi.c306
-rw-r--r--drivers/spi/spi-fsl-espi.c728
-rw-r--r--drivers/spi/spi-fsl-lib.h4
-rw-r--r--drivers/spi/spi-fsl-lpspi.c525
-rw-r--r--drivers/spi/spi-imx.c35
-rw-r--r--drivers/spi/spi-jcore.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c11
-rw-r--r--drivers/spi/spi-orion.c83
-rw-r--r--drivers/spi/spi-pxa2xx.h1
-rw-r--r--drivers/spi/spi-rspi.c52
-rw-r--r--drivers/spi/spi-sh-msiof.c1
-rw-r--r--drivers/spi/spi-sun4i.c75
-rw-r--r--drivers/spi/spi-sun6i.c18
-rw-r--r--drivers/spi/spi-ti-qspi.c1
-rw-r--r--drivers/spi/spi-topcliff-pch.c13
-rw-r--r--drivers/spi/spi-xlp.c1
-rw-r--r--drivers/spi/spi.c26
-rw-r--r--drivers/spi/spidev.c1
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile3
-rw-r--r--drivers/staging/android/TODO8
-rw-r--r--drivers/staging/android/ashmem.c40
-rw-r--r--drivers/staging/android/ion/ion.c2
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c2
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c2
-rw-r--r--drivers/staging/android/uapi/ion_test.h1
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c2
-rw-r--r--drivers/staging/comedi/comedi.h55
-rw-r--r--drivers/staging/comedi/comedidev.h12
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c4
-rw-r--r--drivers/staging/comedi/drivers/mite.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c172
-rw-r--r--drivers/staging/comedi/drivers/ni_stc.h14
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c16
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c6
-rw-r--r--drivers/staging/comedi/drivers/s626.c182
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c12
-rw-r--r--drivers/staging/dgnc/Makefile3
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c44
-rw-r--r--drivers/staging/dgnc/dgnc_cls.h2
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c558
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h189
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.c6
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c111
-rw-r--r--drivers/staging/dgnc/dgnc_neo.h58
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.c703
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.h40
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c362
-rw-r--r--drivers/staging/dgnc/dgnc_tty.h6
-rw-r--r--drivers/staging/dgnc/digi.h107
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c69
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c68
-rw-r--r--drivers/staging/fbtft/fb_ili9325.c19
-rw-r--r--drivers/staging/fbtft/fb_ili9481.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9486.c2
-rw-r--r--drivers/staging/fbtft/fb_s6d02a1.c2
-rw-r--r--drivers/staging/fbtft/fb_st7735r.c2
-rw-r--r--drivers/staging/fbtft/fbtft-core.c33
-rw-r--r--drivers/staging/fbtft/fbtft.h4
-rw-r--r--drivers/staging/fbtft/fbtft_device.c12
-rw-r--r--drivers/staging/fbtft/flexfb.c373
-rw-r--r--drivers/staging/fsl-mc/bus/Kconfig24
-rw-r--r--drivers/staging/fsl-mc/bus/dpbp-cmd.h (renamed from drivers/staging/fsl-mc/include/dpbp-cmd.h)61
-rw-r--r--drivers/staging/fsl-mc/bus/dpbp.c74
-rw-r--r--drivers/staging/fsl-mc/bus/dpcon-cmd.h (renamed from drivers/staging/fsl-mc/include/dpcon-cmd.h)4
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp-cmd.h49
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.c70
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.h141
-rw-r--r--drivers/staging/fsl-mc/bus/dpmng-cmd.h14
-rw-r--r--drivers/staging/fsl-mc/bus/dpmng.c37
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-cmd.h90
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-driver.c23
-rw-r--r--drivers/staging/fsl-mc/bus/dprc.c69
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-allocator.c78
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-bus.c66
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-msi.c2
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-private.h3
-rw-r--r--drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c4
-rw-r--r--drivers/staging/fsl-mc/bus/mc-io.c4
-rw-r--r--drivers/staging/fsl-mc/bus/mc-sys.c12
-rw-r--r--drivers/staging/fsl-mc/include/dpbp.h169
-rw-r--r--drivers/staging/fsl-mc/include/dpmng.h18
-rw-r--r--drivers/staging/fsl-mc/include/dprc.h402
-rw-r--r--drivers/staging/fsl-mc/include/mc-bus.h6
-rw-r--r--drivers/staging/fsl-mc/include/mc-cmd.h44
-rw-r--r--drivers/staging/fsl-mc/include/mc-sys.h3
-rw-r--r--drivers/staging/fsl-mc/include/mc.h4
-rw-r--r--drivers/staging/fwserial/fwserial.c6
-rw-r--r--drivers/staging/gdm724x/gdm_lte.h14
-rw-r--r--drivers/staging/gdm724x/gdm_tty.h1
-rw-r--r--drivers/staging/gdm724x/netlink_k.h3
-rw-r--r--drivers/staging/greybus/arche-apb-ctrl.c8
-rw-r--r--drivers/staging/greybus/arche-platform.c3
-rw-r--r--drivers/staging/greybus/audio_codec.c5
-rw-r--r--drivers/staging/greybus/audio_codec.h1
-rw-r--r--drivers/staging/greybus/audio_manager.h3
-rw-r--r--drivers/staging/greybus/audio_manager_module.c35
-rw-r--r--drivers/staging/greybus/audio_manager_sysfs.c16
-rw-r--r--drivers/staging/greybus/audio_module.c7
-rw-r--r--drivers/staging/greybus/audio_topology.c8
-rw-r--r--drivers/staging/greybus/camera.c7
-rw-r--r--drivers/staging/greybus/es2.c5
-rw-r--r--drivers/staging/greybus/log.c6
-rw-r--r--drivers/staging/greybus/sdio.c3
-rw-r--r--drivers/staging/greybus/timesync.c6
-rw-r--r--drivers/staging/greybus/uart.c36
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c2
-rw-r--r--drivers/staging/i4l/act2000/act2000_isa.c1
-rw-r--r--drivers/staging/i4l/act2000/capi.c7
-rw-r--r--drivers/staging/i4l/act2000/module.c24
-rw-r--r--drivers/staging/i4l/icn/icn.c3
-rw-r--r--drivers/staging/i4l/icn/icn.h5
-rw-r--r--drivers/staging/i4l/pcbit/callbacks.c2
-rw-r--r--drivers/staging/i4l/pcbit/capi.c5
-rw-r--r--drivers/staging/i4l/pcbit/drv.c5
-rw-r--r--drivers/staging/i4l/pcbit/edss1.c2
-rw-r--r--drivers/staging/i4l/pcbit/layer2.c2
-rw-r--r--drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl25836
-rw-r--r--drivers/staging/iio/TODO70
-rw-r--r--drivers/staging/iio/accel/Kconfig10
-rw-r--r--drivers/staging/iio/accel/Makefile3
-rw-r--r--drivers/staging/iio/accel/sca3000.h279
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c1210
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c350
-rw-r--r--drivers/staging/iio/adc/Makefile1
-rw-r--r--drivers/staging/iio/adc/ad7192.c127
-rw-r--r--drivers/staging/iio/adc/ad7280a.c2
-rw-r--r--drivers/staging/iio/adc/ad7606.c (renamed from drivers/staging/iio/adc/ad7606_core.c)436
-rw-r--r--drivers/staging/iio/adc/ad7606.h58
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c23
-rw-r--r--drivers/staging/iio/adc/ad7606_ring.c102
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c19
-rw-r--r--drivers/staging/iio/adc/ad7780.c22
-rw-r--r--drivers/staging/iio/adc/ad7816.c2
-rw-r--r--drivers/staging/iio/addac/adt7316.c4
-rw-r--r--drivers/staging/iio/cdc/ad7150.c2
-rw-r--r--drivers/staging/iio/cdc/ad7152.c140
-rw-r--r--drivers/staging/iio/cdc/ad7746.c151
-rw-r--r--drivers/staging/iio/frequency/ad9832.c66
-rw-r--r--drivers/staging/iio/frequency/ad9832.h6
-rw-r--r--drivers/staging/iio/frequency/ad9834.c19
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c21
-rw-r--r--drivers/staging/iio/light/Kconfig19
-rw-r--r--drivers/staging/iio/light/Makefile2
-rw-r--r--drivers/staging/iio/light/tsl2583.c963
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c86
-rw-r--r--drivers/staging/iio/ring_hw.h27
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.c37
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.h22
-rw-r--r--drivers/staging/ks7010/ks_hostif.c124
-rw-r--r--drivers/staging/ks7010/ks_wlan.h143
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c436
-rw-r--r--drivers/staging/ks7010/michael_mic.c29
-rw-r--r--drivers/staging/ks7010/michael_mic.h20
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/curproc.h20
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h5
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h12
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h53
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h185
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h13
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_string.h24
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h8
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h8
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lnetst.h8
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c6
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c26
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c31
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h134
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c44
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c6
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c26
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c36
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c5
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c143
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_lock.c20
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_mem.c22
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_string.c58
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c187
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c32
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c62
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c10
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c28
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c26
-rw-r--r--drivers/staging/lustre/lnet/libcfs/prng.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c108
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h28
-rw-r--r--drivers/staging/lustre/lnet/libcfs/workitem.c50
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-me.c18
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/nidstrings.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c17
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c73
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c36
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h23
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c29
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h25
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c25
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c22
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h8
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h30
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c7
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c18
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c2
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c6
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h5
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c8
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h379
-rw-r--r--drivers/staging/lustre/lustre/include/llog_swab.h65
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h9
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h75
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h438
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h44
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_compat.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h50
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fld.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_ha.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h9
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lmv.h13
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h44
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h898
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_nrs.h717
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h70
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h7
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_swab.h102
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h291
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h264
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h15
-rw-r--r--drivers/staging/lustre/lustre/include/seq_range.h199
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c28
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c22
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h88
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c45
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c316
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c26
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_plain.c8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c36
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c132
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c42
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile10
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c195
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c968
-rw-r--r--drivers/staging/lustre/lustre/llite/glimpse.c139
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c41
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_misc.c47
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c395
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h121
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c342
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c69
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c68
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c302
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c65
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c20
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c15
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h41
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c185
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c48
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c41
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_req.c122
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c360
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c17
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_security.c88
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c11
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h3
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c407
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h29
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c52
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c208
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h100
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c116
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c50
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c720
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c698
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c293
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c46
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c3
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c292
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c61
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c22
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c98
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h12
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c80
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c60
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c107
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c317
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c167
-rw-r--r--drivers/staging/lustre/lustre/obdclass/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_internal.h23
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c290
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c452
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c68
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c228
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c80
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c139
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c9
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c27
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c84
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c70
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c7
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c65
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c140
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c11
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c65
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h50
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_dev.c15
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h43
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c330
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c143
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c171
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c186
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_quota.c52
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c669
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c338
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c95
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c66
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c20
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c98
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c15
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c106
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pers.c31
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h34
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c26
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c9
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c48
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c20
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c9
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c410
-rw-r--r--drivers/staging/lustre/sysfs-fs-lustre2
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c2
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c3
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c4
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c9
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c4
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c2
-rw-r--r--drivers/staging/media/s5p-cec/s5p_cec.c1
-rw-r--r--drivers/staging/media/st-cec/stih-cec.c4
-rw-r--r--drivers/staging/most/aim-network/networking.c53
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hdm.c5
-rw-r--r--drivers/staging/most/hdm-usb/hdm_usb.c230
-rw-r--r--drivers/staging/most/mostcore/core.c55
-rw-r--r--drivers/staging/netlogic/xlr_net.c20
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c78
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c32
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c46
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c9
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c1
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c69
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c22
-rw-r--r--drivers/staging/rtl8188eu/hal/rf_cfg.c70
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c51
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_led.c20
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c25
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c21
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h11
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h2
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h1
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h6
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_intf.h3
-rw-r--r--drivers/staging/rtl8188eu/include/recv_osdep.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h20
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_led.h12
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h13
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops_linux.h2
-rw-r--r--drivers/staging/rtl8188eu/include/xmit_osdep.h4
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c33
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c27
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c6
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c19
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c58
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c8
-rw-r--r--drivers/staging/rtl8192e/dot11d.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c7
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c4
-rw-r--r--drivers/staging/rtl8712/osdep_service.h9
-rw-r--r--drivers/staging/rtl8712/rtl8712_hal.h12
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c24
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c5
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c14
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c34
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.h18
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c34
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c13
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c80
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c50
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h6
-rw-r--r--drivers/staging/rts5208/ms.c393
-rw-r--r--drivers/staging/rts5208/ms.h4
-rw-r--r--drivers/staging/rts5208/rtsx.c55
-rw-r--r--drivers/staging/rts5208/rtsx.h2
-rw-r--r--drivers/staging/rts5208/rtsx_card.c94
-rw-r--r--drivers/staging/rts5208/rtsx_card.h16
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c17
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h137
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c319
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.h4
-rw-r--r--drivers/staging/rts5208/rtsx_sys.h4
-rw-r--r--drivers/staging/rts5208/rtsx_transport.h30
-rw-r--r--drivers/staging/rts5208/sd.c813
-rw-r--r--drivers/staging/rts5208/sd.h5
-rw-r--r--drivers/staging/rts5208/spi.c144
-rw-r--r--drivers/staging/rts5208/xd.c461
-rw-r--r--drivers/staging/rts5208/xd.h2
-rw-r--r--drivers/staging/skein/skein_api.c26
-rw-r--r--drivers/staging/skein/threefish_block.c16
-rw-r--r--drivers/staging/slicoss/Kconfig14
-rw-r--r--drivers/staging/slicoss/Makefile1
-rw-r--r--drivers/staging/slicoss/README7
-rw-r--r--drivers/staging/slicoss/TODO36
-rw-r--r--drivers/staging/slicoss/slic.h573
-rw-r--r--drivers/staging/slicoss/slichw.h652
-rw-r--r--drivers/staging/slicoss/slicoss.c3131
-rw-r--r--drivers/staging/sm750fb/Makefile2
-rw-r--r--drivers/staging/sm750fb/ddk750.h23
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c100
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.h89
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c75
-rw-r--r--drivers/staging/sm750fb/ddk750_display.h30
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c2
-rw-r--r--drivers/staging/sm750fb/ddk750_help.c17
-rw-r--r--drivers/staging/sm750fb/ddk750_help.h21
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.c15
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.c37
-rw-r--r--drivers/staging/sm750fb/ddk750_power.c74
-rw-r--r--drivers/staging/sm750fb/ddk750_power.h22
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c3
-rw-r--r--drivers/staging/sm750fb/ddk750_swi2c.c31
-rw-r--r--drivers/staging/sm750fb/ddk750_swi2c.h24
-rw-r--r--drivers/staging/sm750fb/sm750.c52
-rw-r--r--drivers/staging/sm750fb/sm750.h6
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c52
-rw-r--r--drivers/staging/sm750fb/sm750_accel.h10
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.c14
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.h14
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c40
-rw-r--r--drivers/staging/speakup/TODO2
-rw-r--r--drivers/staging/speakup/main.c42
-rw-r--r--drivers/staging/speakup/selection.c2
-rw-r--r--drivers/staging/speakup/serialio.c6
-rw-r--r--drivers/staging/speakup/speakup_soft.c46
-rw-r--r--drivers/staging/speakup/speakup_spkout.c31
-rw-r--r--drivers/staging/speakup/speakup_txprt.c29
-rw-r--r--drivers/staging/speakup/spk_priv_keyinfo.h148
-rw-r--r--drivers/staging/speakup/spk_types.h16
-rw-r--r--drivers/staging/speakup/synth.c22
-rw-r--r--drivers/staging/speakup/thread.c5
-rw-r--r--drivers/staging/speakup/varhandlers.c6
-rw-r--r--drivers/staging/unisys/include/iochannel.h335
-rw-r--r--drivers/staging/unisys/include/visorbus.h2
-rw-r--r--drivers/staging/unisys/visorbus/vbuschannel.h225
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c231
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_private.h4
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c44
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c653
-rw-r--r--drivers/staging/unisys/visorbus/vmcallinterface.h185
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c6
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c2
-rw-r--r--drivers/staging/vc04_services/Kconfig7
-rw-r--r--drivers/staging/vc04_services/Makefile2
-rw-r--r--drivers/staging/vc04_services/interface/vchi/TODO50
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi.h25
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h11
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c324
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c202
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c659
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h12
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h9
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c17
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h14
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h12
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h12
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c138
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c3
-rw-r--r--drivers/staging/vme/devices/vme_pio2.h13
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c2
-rw-r--r--drivers/staging/vme/devices/vme_user.c6
-rw-r--r--drivers/staging/vt6655/baseband.c58
-rw-r--r--drivers/staging/vt6655/baseband.h11
-rw-r--r--drivers/staging/vt6655/card.c45
-rw-r--r--drivers/staging/vt6655/card.h6
-rw-r--r--drivers/staging/vt6655/channel.c4
-rw-r--r--drivers/staging/vt6655/channel.h4
-rw-r--r--drivers/staging/vt6655/desc.h4
-rw-r--r--drivers/staging/vt6655/device.h16
-rw-r--r--drivers/staging/vt6655/device_cfg.h4
-rw-r--r--drivers/staging/vt6655/device_main.c9
-rw-r--r--drivers/staging/vt6655/dpc.c4
-rw-r--r--drivers/staging/vt6655/dpc.h4
-rw-r--r--drivers/staging/vt6655/key.c5
-rw-r--r--drivers/staging/vt6655/key.h5
-rw-r--r--drivers/staging/vt6655/mac.c8
-rw-r--r--drivers/staging/vt6655/mac.h327
-rw-r--r--drivers/staging/vt6655/power.c6
-rw-r--r--drivers/staging/vt6655/power.h5
-rw-r--r--drivers/staging/vt6655/rf.c718
-rw-r--r--drivers/staging/vt6655/rf.h5
-rw-r--r--drivers/staging/vt6655/rxtx.c8
-rw-r--r--drivers/staging/vt6655/rxtx.h4
-rw-r--r--drivers/staging/vt6655/srom.c36
-rw-r--r--drivers/staging/vt6655/srom.h11
-rw-r--r--drivers/staging/vt6655/tmacro.h4
-rw-r--r--drivers/staging/vt6655/upc.h4
-rw-r--r--drivers/staging/vt6656/baseband.h20
-rw-r--r--drivers/staging/vt6656/card.c15
-rw-r--r--drivers/staging/vt6656/mac.c2
-rw-r--r--drivers/staging/vt6656/main_usb.c8
-rw-r--r--drivers/staging/vt6656/rf.c10
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.c4
-rw-r--r--drivers/staging/wilc1000/host_interface.c12
-rw-r--r--drivers/staging/wilc1000/host_interface.h1
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c51
-rw-r--r--drivers/staging/wilc1000/wilc_debugfs.c6
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c3
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c6
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c24
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h1
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h6
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c6
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h128
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c2479
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c322
-rw-r--r--drivers/staging/wlan-ng/p80211conv.h100
-rw-r--r--drivers/staging/wlan-ng/p80211hdr.h118
-rw-r--r--drivers/staging/wlan-ng/p80211ioctl.h120
-rw-r--r--drivers/staging/wlan-ng/p80211metadef.h88
-rw-r--r--drivers/staging/wlan-ng/p80211mgmt.h194
-rw-r--r--drivers/staging/wlan-ng/p80211msg.h90
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c637
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h102
-rw-r--r--drivers/staging/wlan-ng/p80211req.c189
-rw-r--r--drivers/staging/wlan-ng/p80211req.h90
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c100
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c559
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c544
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h125
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c102
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c210
-rw-r--r--drivers/staging/xgifb/XGI_main.h54
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c196
-rw-r--r--drivers/staging/xgifb/vb_init.c56
-rw-r--r--drivers/staging/xgifb/vb_setmode.c667
-rw-r--r--drivers/staging/xgifb/vb_table.h9
-rw-r--r--drivers/staging/xgifb/vb_util.h4
-rw-r--r--drivers/target/target_core_fabric_configfs.c7
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c20
-rw-r--r--drivers/target/tcm_fc/tfc_io.c4
-rw-r--r--drivers/thunderbolt/nhi_regs.h6
-rw-r--r--drivers/tty/amiserial.c2
-rw-r--r--drivers/tty/nozomi.c51
-rw-r--r--drivers/tty/rocket.c4
-rw-r--r--drivers/tty/serial/8250/8250.h6
-rw-r--r--drivers/tty/serial/8250/8250_core.c7
-rw-r--r--drivers/tty/serial/8250/8250_dma.c9
-rw-r--r--drivers/tty/serial/8250/8250_dw.c22
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c231
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c7
-rw-r--r--drivers/tty/serial/8250/8250_mid.c4
-rw-r--r--drivers/tty/serial/8250/8250_of.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c57
-rw-r--r--drivers/tty/serial/8250/8250_port.c18
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c190
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c46
-rw-r--r--drivers/tty/serial/8250/Kconfig10
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/Kconfig19
-rw-r--r--drivers/tty/serial/Makefile7
-rw-r--r--drivers/tty/serial/amba-pl011.c56
-rw-r--r--drivers/tty/serial/crisv10.c2
-rw-r--r--drivers/tty/serial/fsl_lpuart.c64
-rw-r--r--drivers/tty/serial/ifx6x60.c1
-rw-r--r--drivers/tty/serial/ioc4_serial.c7
-rw-r--r--drivers/tty/serial/mxs-auart.c2
-rw-r--r--drivers/tty/serial/pxa.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c2
-rw-r--r--drivers/tty/serial/serial_core.c17
-rw-r--r--drivers/tty/serial/sh-sci.c20
-rw-r--r--drivers/tty/serial/sunhv.c3
-rw-r--r--drivers/tty/serial/sunsu.c1
-rw-r--r--drivers/tty/vt/consolemap.c115
-rw-r--r--drivers/tty/vt/keyboard.c4
-rw-r--r--drivers/tty/vt/vt.c89
-rw-r--r--drivers/uio/Kconfig9
-rw-r--r--drivers/uio/Makefile1
-rw-r--r--drivers/uio/uio_hv_generic.c218
-rw-r--r--drivers/uio/uio_pruss.c10
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c4
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h1
-rw-r--r--drivers/usb/chipidea/udc.c12
-rw-r--r--drivers/usb/chipidea/udc.h12
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c86
-rw-r--r--drivers/usb/class/cdc-acm.c216
-rw-r--r--drivers/usb/class/cdc-acm.h5
-rw-r--r--drivers/usb/class/usbtmc.c5
-rw-r--r--drivers/usb/core/buffer.c3
-rw-r--r--drivers/usb/core/config.c5
-rw-r--r--drivers/usb/core/devices.c12
-rw-r--r--drivers/usb/core/driver.c3
-rw-r--r--drivers/usb/core/endpoint.c7
-rw-r--r--drivers/usb/core/file.c2
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/hub.c114
-rw-r--r--drivers/usb/core/ledtrig-usbport.c7
-rw-r--r--drivers/usb/core/message.c3
-rw-r--r--drivers/usb/core/notify.c2
-rw-r--r--drivers/usb/core/sysfs.c17
-rw-r--r--drivers/usb/core/urb.c12
-rw-r--r--drivers/usb/core/usb.c3
-rw-r--r--drivers/usb/core/usb.h5
-rw-r--r--drivers/usb/dwc2/Makefile1
-rw-r--r--drivers/usb/dwc2/core.c930
-rw-r--r--drivers/usb/dwc2/core.h324
-rw-r--r--drivers/usb/dwc2/core_intr.c6
-rw-r--r--drivers/usb/dwc2/debugfs.c2
-rw-r--r--drivers/usb/dwc2/gadget.c1126
-rw-r--r--drivers/usb/dwc2/hcd.c264
-rw-r--r--drivers/usb/dwc2/hcd.h7
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c56
-rw-r--r--drivers/usb/dwc2/hcd_intr.c55
-rw-r--r--drivers/usb/dwc2/hcd_queue.c87
-rw-r--r--drivers/usb/dwc2/hw.h48
-rw-r--r--drivers/usb/dwc2/params.c1435
-rw-r--r--drivers/usb/dwc2/pci.c18
-rw-r--r--drivers/usb/dwc2/platform.c207
-rw-r--r--drivers/usb/dwc3/Kconfig6
-rw-r--r--drivers/usb/dwc3/Makefile6
-rw-r--r--drivers/usb/dwc3/core.c358
-rw-r--r--drivers/usb/dwc3/core.h53
-rw-r--r--drivers/usb/dwc3/debug.c32
-rw-r--r--drivers/usb/dwc3/debug.h41
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c10
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c134
-rw-r--r--drivers/usb/dwc3/dwc3-st.c1
-rw-r--r--drivers/usb/dwc3/ep0.c347
-rw-r--r--drivers/usb/dwc3/gadget.c578
-rw-r--r--drivers/usb/dwc3/gadget.h5
-rw-r--r--drivers/usb/dwc3/host.c88
-rw-r--r--drivers/usb/dwc3/io.h6
-rw-r--r--drivers/usb/dwc3/trace.h123
-rw-r--r--drivers/usb/gadget/composite.c23
-rw-r--r--drivers/usb/gadget/configfs.c8
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/function/f_hid.c67
-rw-r--r--drivers/usb/gadget/function/f_ncm.c11
-rw-r--r--drivers/usb/gadget/function/f_uac2.c14
-rw-r--r--drivers/usb/gadget/function/rndis.c12
-rw-r--r--drivers/usb/gadget/function/rndis.h51
-rw-r--r--drivers/usb/gadget/function/u_ether.c5
-rw-r--r--drivers/usb/gadget/function/u_serial.c7
-rw-r--r--drivers/usb/gadget/function/uvc.h18
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c25
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c3
-rw-r--r--drivers/usb/gadget/function/uvc_video.c2
-rw-r--r--drivers/usb/gadget/udc/at91_udc.h2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c8
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c2
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c5
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c3
-rw-r--r--drivers/usb/gadget/udc/fsl_usb2_udc.h2
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c2
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c2
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c4
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c34
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c3
-rw-r--r--drivers/usb/gadget/udc/net2272.c4
-rw-r--r--drivers/usb/gadget/udc/net2280.c6
-rw-r--r--drivers/usb/gadget/udc/omap_udc.h2
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.h2
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c4
-rw-r--r--drivers/usb/host/Kconfig5
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-fsl.c3
-rw-r--r--drivers/usb/host/ehci-hub.c14
-rw-r--r--drivers/usb/host/ehci-pci.c3
-rw-r--r--drivers/usb/host/ehci-q.c30
-rw-r--r--drivers/usb/host/ehci-sched.c3
-rw-r--r--drivers/usb/host/ehci-w90x900.c30
-rw-r--r--drivers/usb/host/ehci.h8
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c27
-rw-r--r--drivers/usb/host/ohci-at91.c123
-rw-r--r--drivers/usb/host/ohci-da8xx.c522
-rw-r--r--drivers/usb/host/ohci-hcd.c18
-rw-r--r--drivers/usb/host/ohci-mem.c6
-rw-r--r--drivers/usb/host/ohci-nxp.c7
-rw-r--r--drivers/usb/host/ohci-omap.c39
-rw-r--r--drivers/usb/host/ohci-pxa27x.c36
-rw-r--r--drivers/usb/host/ohci-s3c2410.c47
-rw-r--r--drivers/usb/host/xhci-mem.c16
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c4
-rw-r--r--drivers/usb/host/xhci-mtk.c38
-rw-r--r--drivers/usb/host/xhci-mtk.h1
-rw-r--r--drivers/usb/host/xhci-plat.c9
-rw-r--r--drivers/usb/host/xhci-rcar.c4
-rw-r--r--drivers/usb/host/xhci-rcar.h1
-rw-r--r--drivers/usb/host/xhci-ring.c686
-rw-r--r--drivers/usb/host/xhci.c44
-rw-r--r--drivers/usb/host/xhci.h13
-rw-r--r--drivers/usb/isp1760/isp1760-if.c2
-rw-r--r--drivers/usb/misc/chaoskey.c14
-rw-r--r--drivers/usb/misc/rio500.c2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c55
-rw-r--r--drivers/usb/misc/usbtest.c6
-rw-r--r--drivers/usb/mtu3/Kconfig54
-rw-r--r--drivers/usb/mtu3/Makefile18
-rw-r--r--drivers/usb/mtu3/mtu3.h417
-rw-r--r--drivers/usb/mtu3/mtu3_core.c863
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c379
-rw-r--r--drivers/usb/mtu3/mtu3_dr.h108
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c730
-rw-r--r--drivers/usb/mtu3/mtu3_gadget_ep0.c881
-rw-r--r--drivers/usb/mtu3/mtu3_host.c294
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h473
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c484
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.c573
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.h43
-rw-r--r--drivers/usb/musb/da8xx.c70
-rw-r--r--drivers/usb/musb/musb_core.c29
-rw-r--r--drivers/usb/musb/musb_core.h6
-rw-r--r--drivers/usb/musb/musb_gadget.c8
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/musb/musb_virthub.c1
-rw-r--r--drivers/usb/musb/omap2430.c6
-rw-r--r--drivers/usb/musb/sunxi.c25
-rw-r--r--drivers/usb/phy/Kconfig1
-rw-r--r--drivers/usb/phy/phy-am335x-control.c2
-rw-r--r--drivers/usb/phy/phy-generic.c9
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c23
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c5
-rw-r--r--drivers/usb/serial/Kconfig10
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/ch341.c113
-rw-r--r--drivers/usb/serial/cp210x.c411
-rw-r--r--drivers/usb/serial/f81534.c1409
-rw-r--r--drivers/usb/serial/ftdi_sio.c5
-rw-r--r--drivers/usb/serial/io_edgeport.c3
-rw-r--r--drivers/usb/serial/io_ti.c3
-rw-r--r--drivers/usb/serial/kl5kusb105.c35
-rw-r--r--drivers/usb/serial/mos7720.c3
-rw-r--r--drivers/usb/serial/mos7840.c3
-rw-r--r--drivers/usb/serial/opticon.c3
-rw-r--r--drivers/usb/serial/option.c7
-rw-r--r--drivers/usb/serial/quatech2.c3
-rw-r--r--drivers/usb/serial/ssu100.c3
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c3
-rw-r--r--drivers/usb/serial/usb_wwan.c3
-rw-r--r--drivers/usb/storage/usb.c1
-rw-r--r--drivers/usb/usbip/vhci_hcd.c3
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c1
-rw-r--r--drivers/usb/usbip/vudc_dev.c45
-rw-r--r--drivers/usb/usbip/vudc_transfer.c8
-rw-r--r--drivers/usb/wusbcore/dev-sysfs.c6
-rw-r--r--drivers/usb/wusbcore/security.c1
-rw-r--r--drivers/usb/wusbcore/wa-nep.c1
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c1
-rw-r--r--drivers/usb/wusbcore/wusbhc.c13
-rw-r--r--drivers/video/console/fbcon.c18
-rw-r--r--drivers/video/console/mdacon.c7
-rw-r--r--drivers/video/console/newport_con.c8
-rw-r--r--drivers/video/console/sticon.c7
-rw-r--r--drivers/video/console/vgacon.c133
-rw-r--r--drivers/video/fbdev/xen-fbfront.c13
-rw-r--r--drivers/watchdog/mei_wdt.c58
-rw-r--r--drivers/xen/balloon.c6
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/gntalloc.c9
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/platform-pci.c6
-rw-r--r--drivers/xen/swiotlb-xen.c27
-rw-r--r--drivers/xen/xen-pciback/xenbus.c8
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c8
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c22
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/crypto/Kconfig2
-rw-r--r--fs/crypto/crypto.c123
-rw-r--r--fs/crypto/fname.c8
-rw-r--r--fs/crypto/fscrypt_private.h93
-rw-r--r--fs/crypto/keyinfo.c8
-rw-r--r--fs/crypto/policy.c36
-rw-r--r--fs/dax.c1132
-rw-r--r--fs/dlm/ast.c2
-rw-r--r--fs/dlm/config.c2
-rw-r--r--fs/dlm/debug_fs.c2
-rw-r--r--fs/dlm/dlm_internal.h1
-rw-r--r--fs/dlm/lockspace.c2
-rw-r--r--fs/dlm/lowcomms.c28
-rw-r--r--fs/dlm/main.c2
-rw-r--r--fs/dlm/netlink.c2
-rw-r--r--fs/dlm/user.c1
-rw-r--r--fs/ext2/file.c35
-rw-r--r--fs/ext2/inode.c11
-rw-r--r--fs/ext4/Kconfig1
-rw-r--r--fs/ext4/acl.c2
-rw-r--r--fs/ext4/ext4.h31
-rw-r--r--fs/ext4/ext4_jbd2.h14
-rw-r--r--fs/ext4/extents.c27
-rw-r--r--fs/ext4/file.c184
-rw-r--r--fs/ext4/ialloc.c5
-rw-r--r--fs/ext4/inline.c18
-rw-r--r--fs/ext4/inode.c349
-rw-r--r--fs/ext4/ioctl.c82
-rw-r--r--fs/ext4/mballoc.c4
-rw-r--r--fs/ext4/namei.c24
-rw-r--r--fs/ext4/page-io.c3
-rw-r--r--fs/ext4/super.c152
-rw-r--r--fs/ext4/xattr.c45
-rw-r--r--fs/f2fs/acl.c2
-rw-r--r--fs/f2fs/checkpoint.c34
-rw-r--r--fs/f2fs/data.c196
-rw-r--r--fs/f2fs/debug.c29
-rw-r--r--fs/f2fs/dir.c30
-rw-r--r--fs/f2fs/extent_cache.c2
-rw-r--r--fs/f2fs/f2fs.h201
-rw-r--r--fs/f2fs/file.c86
-rw-r--r--fs/f2fs/gc.c29
-rw-r--r--fs/f2fs/inline.c14
-rw-r--r--fs/f2fs/inode.c47
-rw-r--r--fs/f2fs/namei.c6
-rw-r--r--fs/f2fs/node.c226
-rw-r--r--fs/f2fs/node.h13
-rw-r--r--fs/f2fs/recovery.c46
-rw-r--r--fs/f2fs/segment.c236
-rw-r--r--fs/f2fs/segment.h28
-rw-r--r--fs/f2fs/shrinker.c10
-rw-r--r--fs/f2fs/super.c281
-rw-r--r--fs/f2fs/xattr.c4
-rw-r--r--fs/iomap.c5
-rw-r--r--fs/jfs/ioctl.c2
-rw-r--r--fs/kernfs/inode.c4
-rw-r--r--fs/mbcache.c41
-rw-r--r--fs/proc/generic.c1
-rw-r--r--fs/proc/internal.h1
-rw-r--r--fs/xfs/xfs_aops.c26
-rw-r--r--fs/xfs/xfs_aops.h3
-rw-r--r--fs/xfs/xfs_file.c10
-rw-r--r--include/dt-bindings/sound/cs42l42.h73
-rw-r--r--include/linux/ahci-remap.h28
-rw-r--r--include/linux/ata.h6
-rw-r--r--include/linux/blk-mq.h1
-rw-r--r--include/linux/blkdev.h14
-rw-r--r--include/linux/bsg-lib.h4
-rw-r--r--include/linux/cacheinfo.h1
-rw-r--r--include/linux/configfs.h2
-rw-r--r--include/linux/console.h16
-rw-r--r--include/linux/dax.h60
-rw-r--r--include/linux/debugfs.h44
-rw-r--r--include/linux/device.h91
-rw-r--r--include/linux/f2fs_fs.h10
-rw-r--r--include/linux/fpga/fpga-bridge.h60
-rw-r--r--include/linux/fpga/fpga-mgr.h29
-rw-r--r--include/linux/fscrypto.h134
-rw-r--r--include/linux/fsl_devices.h1
-rw-r--r--include/linux/hid.h5
-rw-r--r--include/linux/hwmon.h31
-rw-r--r--include/linux/hyperv.h53
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h1
-rw-r--r--include/linux/iio/consumer.h41
-rw-r--r--include/linux/iio/dac/mcp4725.h12
-rw-r--r--include/linux/iio/iio.h48
-rw-r--r--include/linux/iio/sysfs.h24
-rw-r--r--include/linux/iio/trigger.h2
-rw-r--r--include/linux/iio/types.h5
-rw-r--r--include/linux/iomap.h1
-rw-r--r--include/linux/irqchip/arm-gic-v3.h8
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/kvm_host.h10
-rw-r--r--include/linux/libata.h5
-rw-r--r--include/linux/mei_cl_bus.h51
-rw-r--r--include/linux/mfd/cros_ec.h10
-rw-r--r--include/linux/mfd/cros_ec_commands.h183
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h8
-rw-r--r--include/linux/miscdevice.h7
-rw-r--r--include/linux/ntb.h2
-rw-r--r--include/linux/of.h25
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/phy/phy-qcom-ufs.h18
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_runtime.h10
-rw-r--r--include/linux/power/bq27xxx_battery.h3
-rw-r--r--include/linux/proc_fs.h2
-rw-r--r--include/linux/restart_block.h51
-rw-r--r--include/linux/serial_8250.h8
-rw-r--r--include/linux/serial_core.h2
-rw-r--r--include/linux/spi/spi.h1
-rw-r--r--include/linux/swiotlb.h14
-rw-r--r--include/linux/thread_info.h45
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/gadget.h4
-rw-r--r--include/linux/usb/hcd.h19
-rw-r--r--include/linux/vme.h1
-rw-r--r--include/linux/vt_kern.h2
-rw-r--r--include/linux/workqueue.h11
-rw-r--r--include/scsi/libfc.h206
-rw-r--r--include/scsi/scsi_host.h8
-rw-r--r--include/scsi/scsi_transport_fc.h62
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h21
-rw-r--r--include/sound/compress_driver.h1
-rw-r--r--include/sound/core.h20
-rw-r--r--include/sound/cs35l34.h35
-rw-r--r--include/sound/dmaengine_pcm.h6
-rw-r--r--include/sound/emu10k1.h3
-rw-r--r--include/sound/rt5514.h20
-rwxr-xr-xinclude/sound/rt5665.h47
-rw-r--r--include/sound/simple_card_utils.h8
-rw-r--r--include/sound/soc-dai.h43
-rw-r--r--include/sound/soc-dapm.h14
-rw-r--r--include/sound/soc-topology.h2
-rw-r--r--include/sound/soc.h87
-rw-r--r--include/trace/events/f2fs.h21
-rw-r--r--include/uapi/linux/dm-log-userspace.h53
-rw-r--r--include/uapi/linux/fs.h14
-rw-r--r--include/uapi/linux/hw_breakpoint.h4
-rw-r--r--include/uapi/linux/iio/types.h2
-rw-r--r--include/uapi/linux/kvm.h5
-rw-r--r--include/uapi/linux/raid/md_p.h7
-rw-r--r--include/uapi/linux/usb/ch9.h24
-rw-r--r--include/uapi/sound/asoc.h90
-rw-r--r--include/uapi/sound/snd_sst_tokens.h8
-rw-r--r--include/xen/arm/hypercall.h87
-rw-r--r--include/xen/arm/hypervisor.h39
-rw-r--r--include/xen/arm/interface.h85
-rw-r--r--include/xen/arm/page-coherent.h98
-rw-r--r--include/xen/arm/page.h122
-rw-r--r--include/xen/swiotlb-xen.h3
-rw-r--r--include/xen/xenbus.h4
-rw-r--r--init/main.c10
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/power/qos.c11
-rw-r--r--kernel/workqueue.c103
-rw-r--r--lib/debugobjects.c2
-rw-r--r--lib/kobject_uevent.c6
-rw-r--r--lib/raid6/avx2.c232
-rw-r--r--lib/swiotlb.c81
-rw-r--r--mm/filemap.c5
-rw-r--r--mm/memory.c1
-rw-r--r--mm/nommu.c1
-rw-r--r--mm/percpu.c3
-rw-r--r--mm/slab.c7
-rwxr-xr-xscripts/checkkconfigsymbols.py2
-rw-r--r--scripts/gcc-plugins/latent_entropy_plugin.c2
-rw-r--r--scripts/gcc-plugins/sancov_plugin.c2
-rw-r--r--sound/core/oss/pcm_oss.c2
-rw-r--r--sound/core/rawmidi.c4
-rw-r--r--sound/drivers/opl3/opl3_lib.c2
-rw-r--r--sound/firewire/bebob/bebob.c4
-rw-r--r--sound/pci/ac97/ac97_codec.c2
-rw-r--r--sound/pci/als4000.c2
-rw-r--r--sound/pci/au88x0/au88x0_game.c2
-rw-r--r--sound/pci/azt3328.c2
-rw-r--r--sound/pci/cmipci.c2
-rw-r--r--sound/pci/cs4281.c2
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c2
-rw-r--r--sound/pci/cs46xx/dsp_spos.c3
-rw-r--r--sound/pci/echoaudio/layla24_dsp.c2
-rw-r--r--sound/pci/emu10k1/emu10k1.c11
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c228
-rw-r--r--sound/pci/ens1370.c2
-rw-r--r--sound/pci/es1938.c2
-rw-r--r--sound/pci/es1968.c2
-rw-r--r--sound/pci/hda/hda_auto_parser.c4
-rw-r--r--sound/pci/hda/patch_ca0132.c1
-rw-r--r--sound/pci/hda/patch_conexant.c17
-rw-r--r--sound/pci/hda/patch_realtek.c46
-rw-r--r--sound/pci/riptide/riptide.c2
-rw-r--r--sound/pci/sonicvibes.c2
-rw-r--r--sound/pci/trident/trident_main.c2
-rw-r--r--sound/pci/via82xx.c2
-rw-r--r--sound/pci/ymfpci/ymfpci.h2
-rw-r--r--sound/soc/atmel/Kconfig10
-rw-r--r--sound/soc/atmel/Makefile2
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c83
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.h1
-rw-r--r--sound/soc/atmel/atmel_wm8904.c2
-rw-r--r--sound/soc/atmel/tse850-pcm5142.c472
-rw-r--r--sound/soc/bcm/Kconfig1
-rw-r--r--sound/soc/codecs/Kconfig31
-rw-r--r--sound/soc/codecs/Makefile11
-rw-r--r--sound/soc/codecs/ab8500-codec.c2
-rw-r--r--sound/soc/codecs/adau17x1.c2
-rw-r--r--sound/soc/codecs/ak4641.c22
-rw-r--r--sound/soc/codecs/ak4641.h47
-rw-r--r--sound/soc/codecs/arizona.c153
-rw-r--r--sound/soc/codecs/arizona.h109
-rw-r--r--sound/soc/codecs/cs35l34.c1251
-rw-r--r--sound/soc/codecs/cs35l34.h269
-rw-r--r--sound/soc/codecs/cs42l42.c1986
-rw-r--r--sound/soc/codecs/cs42l42.h776
-rw-r--r--sound/soc/codecs/cs42l56.c18
-rw-r--r--sound/soc/codecs/cs42l73.c4
-rw-r--r--sound/soc/codecs/cs42xx8.c10
-rw-r--r--sound/soc/codecs/cs47l24.c55
-rw-r--r--sound/soc/codecs/da7219-aad.c18
-rw-r--r--sound/soc/codecs/da7219.c139
-rw-r--r--sound/soc/codecs/da7219.h5
-rw-r--r--sound/soc/codecs/es8328.h37
-rw-r--r--sound/soc/codecs/msm8916-wcd-analog.c890
-rw-r--r--sound/soc/codecs/msm8916-wcd-digital.c923
-rw-r--r--sound/soc/codecs/nau8825.c142
-rw-r--r--sound/soc/codecs/nau8825.h16
-rw-r--r--sound/soc/codecs/rl6231.c1
-rw-r--r--sound/soc/codecs/rl6347a.c2
-rw-r--r--sound/soc/codecs/rt298.c24
-rw-r--r--sound/soc/codecs/rt5514-spi.c1
-rw-r--r--sound/soc/codecs/rt5514.c17
-rw-r--r--sound/soc/codecs/rt5514.h2
-rw-r--r--sound/soc/codecs/rt5616.c3
-rw-r--r--sound/soc/codecs/rt5640.c5
-rw-r--r--sound/soc/codecs/rt5640.h6
-rw-r--r--sound/soc/codecs/rt5660.c4
-rw-r--r--sound/soc/codecs/rt5660.h3
-rw-r--r--sound/soc/codecs/rt5663.c1137
-rw-r--r--sound/soc/codecs/rt5663.h1162
-rw-r--r--sound/soc/codecs/rt5665.c4874
-rw-r--r--sound/soc/codecs/rt5665.h1990
-rw-r--r--sound/soc/codecs/rt5670.c16
-rw-r--r--sound/soc/codecs/rt5670.h1
-rw-r--r--sound/soc/codecs/rt5677-spi.c1
-rw-r--r--sound/soc/codecs/stac9766.c162
-rw-r--r--sound/soc/codecs/stac9766.h17
-rw-r--r--sound/soc/codecs/sti-sas.c179
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c3
-rw-r--r--sound/soc/codecs/tlv320aic31xx.h1
-rw-r--r--sound/soc/codecs/tlv320aic3x.c2
-rw-r--r--sound/soc/codecs/uda1380.c77
-rw-r--r--sound/soc/codecs/uda1380.h4
-rw-r--r--sound/soc/codecs/wm2200.c4
-rw-r--r--sound/soc/codecs/wm5102.c59
-rw-r--r--sound/soc/codecs/wm5110.c61
-rw-r--r--sound/soc/codecs/wm8523.c24
-rw-r--r--sound/soc/codecs/wm8580.c123
-rw-r--r--sound/soc/codecs/wm8753.h3
-rw-r--r--sound/soc/codecs/wm8978.h2
-rw-r--r--sound/soc/codecs/wm8997.c39
-rw-r--r--sound/soc/codecs/wm8998.c38
-rw-r--r--sound/soc/codecs/wm9081.c2
-rw-r--r--sound/soc/codecs/wm9705.c138
-rw-r--r--sound/soc/codecs/wm9705.h11
-rw-r--r--sound/soc/codecs/wm9712.c177
-rw-r--r--sound/soc/codecs/wm9712.h11
-rw-r--r--sound/soc/codecs/wm9713.c2
-rw-r--r--sound/soc/codecs/wm9713.h4
-rw-r--r--sound/soc/codecs/wm_adsp.c354
-rw-r--r--sound/soc/codecs/wm_adsp.h27
-rw-r--r--sound/soc/codecs/wmfw.h4
-rw-r--r--sound/soc/fsl/Kconfig1
-rw-r--r--sound/soc/fsl/efika-audio-fabric.c1
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c2
-rw-r--r--sound/soc/fsl/imx-wm8962.c2
-rw-r--r--sound/soc/generic/simple-card-utils.c5
-rw-r--r--sound/soc/generic/simple-card.c2
-rw-r--r--sound/soc/generic/simple-scu-card.c115
-rw-r--r--sound/soc/intel/atom/sst-atom-controls.c2
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c6
-rw-r--r--sound/soc/intel/atom/sst/sst.c39
-rw-r--r--sound/soc/intel/atom/sst/sst.h1
-rw-r--r--sound/soc/intel/atom/sst/sst_acpi.c2
-rw-r--r--sound/soc/intel/atom/sst/sst_ipc.c11
-rw-r--r--sound/soc/intel/atom/sst/sst_stream.c4
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-ipc.c3
-rw-r--r--sound/soc/intel/boards/bdw-rt5677.c2
-rw-r--r--sound/soc/intel/boards/broadwell.c18
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c26
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c4
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c68
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c4
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c4
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c10
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c46
-rw-r--r--sound/soc/intel/boards/haswell.c2
-rw-r--r--sound/soc/intel/boards/mfld_machine.c4
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_max98357a.c6
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_ssm4567.c6
-rw-r--r--sound/soc/intel/boards/skl_rt286.c4
-rw-r--r--sound/soc/intel/common/sst-acpi.h17
-rw-r--r--sound/soc/intel/common/sst-ipc.c85
-rw-r--r--sound/soc/intel/common/sst-ipc.h8
-rw-r--r--sound/soc/intel/common/sst-match-acpi.c57
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c3
-rw-r--r--sound/soc/intel/skylake/bxt-sst.c145
-rw-r--r--sound/soc/intel/skylake/skl-messages.c39
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c28
-rw-r--r--sound/soc/intel/skylake/skl-sst-cldma.c1
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.h12
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.c71
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.h37
-rw-r--r--sound/soc/intel/skylake/skl-sst-utils.c2
-rw-r--r--sound/soc/intel/skylake/skl-topology.c47
-rw-r--r--sound/soc/intel/skylake/skl-topology.h28
-rw-r--r--sound/soc/intel/skylake/skl.c59
-rw-r--r--sound/soc/intel/skylake/skl.h6
-rw-r--r--sound/soc/kirkwood/armada-370-db.c2
-rw-r--r--sound/soc/mxs/mxs-saif.c13
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c2
-rw-r--r--sound/soc/pxa/corgi.c6
-rw-r--r--sound/soc/pxa/e740_wm9705.c1
-rw-r--r--sound/soc/pxa/e750_wm9705.c1
-rw-r--r--sound/soc/pxa/e800_wm9712.c1
-rw-r--r--sound/soc/pxa/em-x270.c1
-rw-r--r--sound/soc/pxa/hx4700.c2
-rw-r--r--sound/soc/pxa/magician.c2
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c2
-rw-r--r--sound/soc/pxa/palm27x.c1
-rw-r--r--sound/soc/pxa/poodle.c4
-rw-r--r--sound/soc/pxa/pxa-ssp.h6
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.h3
-rw-r--r--sound/soc/pxa/spitz.c6
-rw-r--r--sound/soc/pxa/tosa.c7
-rw-r--r--sound/soc/qcom/apq8016_sbc.c11
-rw-r--r--sound/soc/qcom/lpass-platform.c49
-rw-r--r--sound/soc/qcom/storm.c2
-rw-r--r--sound/soc/rockchip/rk3399_gru_sound.c8
-rw-r--r--sound/soc/rockchip/rockchip_max98090.c2
-rw-r--r--sound/soc/rockchip/rockchip_rt5645.c2
-rw-r--r--sound/soc/samsung/Kconfig58
-rw-r--r--sound/soc/samsung/Makefile9
-rw-r--r--sound/soc/samsung/ac97.c437
-rw-r--r--sound/soc/samsung/dmaengine.c8
-rw-r--r--sound/soc/samsung/i2s.c8
-rw-r--r--sound/soc/samsung/ln2440sbc_alc650.c72
-rw-r--r--sound/soc/samsung/pcm.c60
-rw-r--r--sound/soc/samsung/regs-ac97.h66
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c2
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c51
-rw-r--r--sound/soc/samsung/s3c24xx_uda134x.c79
-rw-r--r--sound/soc/samsung/smdk2443_wm9710.c68
-rw-r--r--sound/soc/samsung/smdk_wm8580.c30
-rw-r--r--sound/soc/samsung/smdk_wm8580pcm.c175
-rw-r--r--sound/soc/samsung/smdk_wm9713.c108
-rw-r--r--sound/soc/samsung/tm2_wm5110.c552
-rw-r--r--sound/soc/sh/Kconfig3
-rw-r--r--sound/soc/sh/rcar/adg.c61
-rw-r--r--sound/soc/sh/rcar/core.c175
-rw-r--r--sound/soc/sh/rcar/dma.c295
-rw-r--r--sound/soc/sh/rcar/dvc.c2
-rw-r--r--sound/soc/sh/rcar/gen.c12
-rw-r--r--sound/soc/sh/rcar/rsnd.h156
-rw-r--r--sound/soc/sh/rcar/src.c13
-rw-r--r--sound/soc/sh/rcar/ssi.c28
-rw-r--r--sound/soc/sh/rcar/ssiu.c20
-rw-r--r--sound/soc/soc-compress.c98
-rw-r--r--sound/soc/soc-core.c181
-rw-r--r--sound/soc/soc-dapm.c154
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c13
-rw-r--r--sound/soc/soc-pcm.c2
-rw-r--r--sound/soc/soc-topology.c751
-rw-r--r--sound/soc/soc-utils.c199
-rw-r--r--sound/soc/sti/sti_uniperif.c43
-rw-r--r--sound/soc/sti/uniperif.h2
-rw-r--r--sound/soc/sti/uniperif_player.c91
-rw-r--r--sound/soc/sti/uniperif_reader.c41
-rw-r--r--sound/soc/sunxi/Kconfig8
-rw-r--r--sound/soc/sunxi/Makefile1
-rw-r--r--sound/soc/sunxi/sun4i-codec.c867
-rw-r--r--sound/soc/sunxi/sun4i-i2s.c105
-rw-r--r--sound/soc/sunxi/sun8i-codec-analog.c665
-rw-r--r--sound/soc/tegra/tegra_alc5632.c2
-rw-r--r--sound/soc/tegra/tegra_max98090.c2
-rw-r--r--sound/soc/tegra/tegra_rt5640.c2
-rw-r--r--sound/soc/tegra/tegra_rt5677.c2
-rw-r--r--sound/soc/tegra/tegra_sgtl5000.c2
-rw-r--r--sound/soc/tegra/tegra_wm8753.c2
-rw-r--r--sound/soc/tegra/tegra_wm8903.c2
-rw-r--r--sound/soc/tegra/trimslice.c2
-rw-r--r--sound/soc/zte/Kconfig16
-rw-r--r--sound/soc/zte/Makefile4
-rw-r--r--sound/soc/zte/zx-i2s.c (renamed from sound/soc/zte/zx296702-i2s.c)0
-rw-r--r--sound/soc/zte/zx-spdif.c (renamed from sound/soc/zte/zx296702-spdif.c)2
-rw-r--r--sound/usb/card.c1
-rw-r--r--sound/usb/endpoint.c32
-rw-r--r--sound/usb/hiface/pcm.c2
-rw-r--r--sound/usb/line6/driver.h9
-rw-r--r--sound/usb/line6/podhd.c26
-rw-r--r--sound/usb/mixer.c3
-rw-r--r--sound/usb/pcm.c31
-rw-r--r--sound/usb/quirks.c38
-rw-r--r--tools/hv/Makefile3
-rw-r--r--tools/hv/hv_fcopy_daemon.c7
-rw-r--r--tools/hv/hv_kvp_daemon.c20
-rw-r--r--tools/iio/iio_generic_buffer.c18
-rw-r--r--tools/include/uapi/linux/hw_breakpoint.h4
-rw-r--r--tools/power/acpi/tools/ec/ec_access.c2
-rw-r--r--tools/spi/spidev_test.c2
-rw-r--r--tools/testing/selftests/breakpoints/Makefile5
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test_arm64.c236
-rw-r--r--tools/usb/usbip/.gitignore4
-rw-r--r--tools/usb/usbip/src/usbipd.c7
-rw-r--r--virt/kvm/arm/arch_timer.c17
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c11
-rw-r--r--virt/kvm/arm/vgic/vgic-kvm-device.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c3
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c2
-rw-r--r--virt/kvm/arm/vgic/vgic.h26
-rw-r--r--virt/kvm/kvm_main.c87
2070 files changed, 105217 insertions, 55537 deletions
diff --git a/Documentation/ABI/stable/sysfs-devices b/Documentation/ABI/stable/sysfs-devices
index df449d79b563..35c457f8ce73 100644
--- a/Documentation/ABI/stable/sysfs-devices
+++ b/Documentation/ABI/stable/sysfs-devices
@@ -8,3 +8,17 @@ Description:
Any device associated with a device-tree node will have
an of_path symlink pointing to the corresponding device
node in /sys/firmware/devicetree/
+
+What: /sys/devices/*/devspec
+Date: October 2016
+Contact: Device Tree mailing list <devicetree@vger.kernel.org>
+Description:
+ If CONFIG_OF is enabled, then this file is present. When
+ read, it returns full name of the device node.
+
+What: /sys/devices/*/obppath
+Date: October 2016
+Contact: Device Tree mailing list <devicetree@vger.kernel.org>
+Description:
+ If CONFIG_OF is enabled, then this file is present. When
+ read, it returns full name of the device node.
diff --git a/Documentation/ABI/testing/sysfs-bus-fsl-mc b/Documentation/ABI/testing/sysfs-bus-fsl-mc
new file mode 100644
index 000000000000..80256b8b4f26
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-fsl-mc
@@ -0,0 +1,21 @@
+What: /sys/bus/fsl-mc/drivers/.../bind
+Date: December 2016
+Contact: stuart.yoder@nxp.com
+Description:
+ Writing a device location to this file will cause
+ the driver to attempt to bind to the device found at
+ this location. The format for the location is Object.Id
+ and is the same as found in /sys/bus/fsl-mc/devices/.
+ For example:
+ # echo dpni.2 > /sys/bus/fsl-mc/drivers/fsl_dpaa2_eth/bind
+
+What: /sys/bus/fsl-mc/drivers/.../unbind
+Date: December 2016
+Contact: stuart.yoder@nxp.com
+Description:
+ Writing a device location to this file will cause the
+ driver to attempt to unbind from the device found at
+ this location. The format for the location is Object.Id
+ and is the same as found in /sys/bus/fsl-mc/devices/.
+ For example:
+ # echo dpni.2 > /sys/bus/fsl-mc/drivers/fsl_dpaa2_eth/unbind
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index fee35c00cc4e..b8f220f978dd 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -329,6 +329,7 @@ What: /sys/bus/iio/devices/iio:deviceX/in_pressure_scale
What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_scale
What: /sys/bus/iio/devices/iio:deviceX/in_velocity_sqrt(x^2+y^2+z^2)_scale
What: /sys/bus/iio/devices/iio:deviceX/in_illuminance_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_countY_scale
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -1579,3 +1580,20 @@ Contact: linux-iio@vger.kernel.org
Description:
Raw (unscaled no offset etc.) electric conductivity reading that
can be processed to siemens per meter.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_countY_raw
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Raw counter device counts from channel Y. For quadrature
+ counters, multiplication by an available [Y]_scale results in
+ the counts of a single quadrature signal phase from channel Y.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_indexY_raw
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Raw counter device index value from channel Y. This attribute
+ provides an absolute positional reference (e.g. a pulse once per
+ revolution) which may be used to home positional systems as
+ required.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector b/Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector
new file mode 100644
index 000000000000..2071f9bcfaa5
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector
@@ -0,0 +1,36 @@
+What: /sys/bus/iio/devices/iio:deviceX/in_altvoltageY_invert
+Date: October 2016
+KernelVersion: 4.9
+Contact: Peter Rosin <peda@axentia.se>
+Description:
+ The DAC is used to find the peak level of an alternating
+ voltage input signal by a binary search using the output
+ of a comparator wired to an interrupt pin. Like so:
+ _
+ | \
+ input +------>-------|+ \
+ | \
+ .-------. | }---.
+ | | | / |
+ | dac|-->--|- / |
+ | | |_/ |
+ | | |
+ | | |
+ | irq|------<-------'
+ | |
+ '-------'
+ The boolean invert attribute (0/1) should be set when the
+ input signal is centered around the maximum value of the
+ dac instead of zero. The envelope detector will search
+ from below in this case and will also invert the result.
+ The edge/level of the interrupt is also switched to its
+ opposite value.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_altvoltageY_compare_interval
+Date: October 2016
+KernelVersion: 4.9
+Contact: Peter Rosin <peda@axentia.se>
+Description:
+ Number of milliseconds to wait for the comparator in each
+ step of the binary search for the input peak level. Needs
+ to relate to the frequency of the input signal.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8 b/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
new file mode 100644
index 000000000000..ba676520b953
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
@@ -0,0 +1,125 @@
+What: /sys/bus/iio/devices/iio:deviceX/in_count_count_direction_available
+What: /sys/bus/iio/devices/iio:deviceX/in_count_count_mode_available
+What: /sys/bus/iio/devices/iio:deviceX/in_count_noise_error_available
+What: /sys/bus/iio/devices/iio:deviceX/in_count_quadrature_mode_available
+What: /sys/bus/iio/devices/iio:deviceX/in_index_index_polarity_available
+What: /sys/bus/iio/devices/iio:deviceX/in_index_synchronous_mode_available
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Discrete set of available values for the respective counter
+ configuration are listed in this file.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_countY_count_direction
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Read-only attribute that indicates whether the counter for
+ channel Y is counting up or down.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_countY_count_mode
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Count mode for channel Y. Four count modes are available:
+ normal, range limit, non-recycle, and modulo-n. The preset value
+ for channel Y is used by the count mode where required.
+
+ Normal:
+ Counting is continuous in either direction.
+
+ Range Limit:
+ An upper or lower limit is set, mimicking limit switches
+ in the mechanical counterpart. The upper limit is set to
+ the preset value, while the lower limit is set to 0. The
+ counter freezes at count = preset when counting up, and
+ at count = 0 when counting down. At either of these
+ limits, the counting is resumed only when the count
+ direction is reversed.
+
+ Non-recycle:
+ Counter is disabled whenever a 24-bit count overflow or
+ underflow takes place. The counter is re-enabled when a
+ new count value is loaded to the counter via a preset
+ operation or write to raw.
+
+ Modulo-N:
+ A count boundary is set between 0 and the preset value.
+ The counter is reset to 0 at count = preset when
+ counting up, while the counter is set to the preset
+ value at count = 0 when counting down; the counter does
+ not freeze at the bundary points, but counts
+ continuously throughout.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_countY_noise_error
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Read-only attribute that indicates whether excessive noise is
+ present at the channel Y count inputs in quadrature clock mode;
+ irrelevant in non-quadrature clock mode.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_countY_preset
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ If the counter device supports preset registers, the preset
+ count for channel Y is provided by this attribute.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_countY_quadrature_mode
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Configure channel Y counter for non-quadrature or quadrature
+ clock mode. Selecting non-quadrature clock mode will disable
+ synchronous load mode. In quadrature clock mode, the channel Y
+ scale attribute selects the encoder phase division (scale of 1
+ selects full-cycle, scale of 0.5 selects half-cycle, scale of
+ 0.25 selects quarter-cycle) processed by the channel Y counter.
+
+ Non-quadrature:
+ The filter and decoder circuit are bypassed. Encoder A
+ input serves as the count input and B as the UP/DOWN
+ direction control input, with B = 1 selecting UP Count
+ mode and B = 0 selecting Down Count mode.
+
+ Quadrature:
+ Encoder A and B inputs are digitally filtered and
+ decoded for UP/DN clock.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_countY_set_to_preset_on_index
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Whether to set channel Y counter with channel Y preset value
+ when channel Y index input is active, or continuously count.
+ Valid attribute values are boolean.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_indexY_index_polarity
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Active level of channel Y index input; irrelevant in
+ non-synchronous load mode.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_indexY_synchronous_mode
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Configure channel Y counter for non-synchronous or synchronous
+ load mode. Synchronous load mode cannot be selected in
+ non-quadrature clock mode.
+
+ Non-synchronous:
+ A logic low level is the active level at this index
+ input. The index function (as enabled via
+ set_to_preset_on_index) is performed directly on the
+ active level of the index input.
+
+ Synchronous:
+ Intended for interfacing with encoder Index output in
+ quadrature clock mode. The active level is configured
+ via index_polarity. The index function (as enabled via
+ set_to_preset_on_index) is performed synchronously with
+ the quadrature clock on the active level of the index
+ input.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-cros-ec b/Documentation/ABI/testing/sysfs-bus-iio-cros-ec
new file mode 100644
index 000000000000..297b9720f024
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-cros-ec
@@ -0,0 +1,18 @@
+What: /sys/bus/iio/devices/iio:deviceX/calibrate
+Date: July 2015
+KernelVersion: 4.7
+Contact: linux-iio@vger.kernel.org
+Description:
+ Writing '1' will perform a FOC (Fast Online Calibration). The
+ corresponding calibration offsets can be read from *_calibbias
+ entries.
+
+What: /sys/bus/iio/devices/iio:deviceX/location
+Date: July 2015
+KernelVersion: 4.7
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute returns a string with the physical location where
+ the motion sensor is placed. For example, in a laptop a motion
+ sensor can be located on the base or on the lid. Current valid
+ values are 'base' and 'lid'.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac b/Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac
new file mode 100644
index 000000000000..580e93f373f6
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac
@@ -0,0 +1,8 @@
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw_available
+Date: October 2016
+KernelVersion: 4.9
+Contact: Peter Rosin <peda@axentia.se>
+Description:
+ The range of available values represented as the minimum value,
+ the step and the maximum value, all enclosed in square brackets.
+ Example: [0 1 256]
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-light-isl29018 b/Documentation/ABI/testing/sysfs-bus-iio-light-isl29018
new file mode 100644
index 000000000000..f0ce0a0476ea
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-light-isl29018
@@ -0,0 +1,19 @@
+What: /sys/bus/iio/devices/iio:deviceX/proximity_on_chip_ambient_infrared_suppression
+Date: January 2011
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ From ISL29018 Data Sheet (FN6619.4, Oct 8, 2012) regarding the
+ infrared suppression:
+
+ Scheme 0, makes full n (4, 8, 12, 16) bits (unsigned) proximity
+ detection. The range of Scheme 0 proximity count is from 0 to
+ 2^n. Logic 1 of this bit, Scheme 1, makes n-1 (3, 7, 11, 15)
+ bits (2's complementary) proximity_less_ambient detection. The
+ range of Scheme 1 proximity count is from -2^(n-1) to 2^(n-1).
+ The sign bit is extended for resolutions less than 16. While
+ Scheme 0 has wider dynamic range, Scheme 1 proximity detection
+ is less affected by the ambient IR noise variation.
+
+ 0 Sensing IR from LED and ambient
+ 1 Sensing IR from LED with ambient IR rejection
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583 b/Documentation/ABI/testing/sysfs-bus-iio-light-tsl2583
index 660781df409f..a2e19964e87e 100644
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583
+++ b/Documentation/ABI/testing/sysfs-bus-iio-light-tsl2583
@@ -1,18 +1,18 @@
-What: /sys/bus/iio/devices/device[n]/lux_table
+What: /sys/bus/iio/devices/device[n]/in_illuminance_calibrate
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
- This property gets/sets the table of coefficients
- used in calculating illuminance in lux.
+ This property causes an internal calibration of the als gain trim
+ value which is later used in calculating illuminance in lux.
-What: /sys/bus/iio/devices/device[n]/illuminance0_calibrate
+What: /sys/bus/iio/devices/device[n]/in_illuminance_lux_table
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
- This property causes an internal calibration of the als gain trim
- value which is later used in calculating illuminance in lux.
+ This property gets/sets the table of coefficients
+ used in calculating illuminance in lux.
-What: /sys/bus/iio/devices/device[n]/illuminance0_input_target
+What: /sys/bus/iio/devices/device[n]/in_illuminance_input_target
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531 b/Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531
new file mode 100644
index 000000000000..2a91fbe394fc
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531
@@ -0,0 +1,8 @@
+What: /sys/bus/iio/devices/iio:deviceX/out_resistance_raw_available
+Date: October 2016
+KernelVersion: 4.9
+Contact: Peter Rosin <peda@axentia.se>
+Description:
+ The range of available values represented as the minimum value,
+ the step and the maximum value, all enclosed in square brackets.
+ Example: [0 1 256]
diff --git a/Documentation/ABI/testing/sysfs-class-fpga-bridge b/Documentation/ABI/testing/sysfs-class-fpga-bridge
new file mode 100644
index 000000000000..312ae2c579d8
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-fpga-bridge
@@ -0,0 +1,11 @@
+What: /sys/class/fpga_bridge/<bridge>/name
+Date: January 2016
+KernelVersion: 4.5
+Contact: Alan Tull <atull@opensource.altera.com>
+Description: Name of low level FPGA bridge driver.
+
+What: /sys/class/fpga_bridge/<bridge>/state
+Date: January 2016
+KernelVersion: 4.5
+Contact: Alan Tull <atull@opensource.altera.com>
+Description: Show bridge state as "enabled" or "disabled"
diff --git a/Documentation/ABI/testing/sysfs-class-mei b/Documentation/ABI/testing/sysfs-class-mei
index 80d9888a8ece..5096a82f4cde 100644
--- a/Documentation/ABI/testing/sysfs-class-mei
+++ b/Documentation/ABI/testing/sysfs-class-mei
@@ -29,3 +29,19 @@ Description: Display fw status registers content
Also number of registers varies between 1 and 6
depending on generation.
+What: /sys/class/mei/meiN/hbm_ver
+Date: Aug 2016
+KernelVersion: 4.9
+Contact: Tomas Winkler <tomas.winkler@intel.com>
+Description: Display the negotiated HBM protocol version.
+
+ The HBM protocol version negotiated
+ between the driver and the device.
+
+What: /sys/class/mei/meiN/hbm_ver_drv
+Date: Aug 2016
+KernelVersion: 4.9
+Contact: Tomas Winkler <tomas.winkler@intel.com>
+Description: Display the driver HBM protocol version.
+
+ The HBM protocol version supported by the driver.
diff --git a/Documentation/ABI/testing/sysfs-devices-deferred_probe b/Documentation/ABI/testing/sysfs-devices-deferred_probe
new file mode 100644
index 000000000000..58553d7a321f
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-deferred_probe
@@ -0,0 +1,12 @@
+What: /sys/devices/.../deferred_probe
+Date: August 2016
+Contact: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Description:
+ The /sys/devices/.../deferred_probe attribute is
+ present for all devices. If a driver detects during
+ probing a device that a related device is not yet
+ ready, it may defer probing of the first device. The
+ kernel will retry probing the first device after any
+ other device is successfully probed. This attribute
+ reads as 1 if probing of this device is currently
+ deferred, or 0 otherwise.
diff --git a/Documentation/ABI/testing/sysfs-platform-phy-rcar-gen3-usb2 b/Documentation/ABI/testing/sysfs-platform-phy-rcar-gen3-usb2
new file mode 100644
index 000000000000..6212697bbf6f
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-phy-rcar-gen3-usb2
@@ -0,0 +1,15 @@
+What: /sys/devices/platform/<phy-name>/role
+Date: October 2016
+KernelVersion: 4.10
+Contact: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Description:
+ This file can be read and write.
+ The file can show/change the phy mode for role swap of usb.
+
+ Write the following strings to change the mode:
+ "host" - switching mode from peripheral to host.
+ "peripheral" - switching mode from host to peripheral.
+
+ Read the file, then it shows the following strings:
+ "host" - The mode is host now.
+ "peripheral" - The mode is peripheral now.
diff --git a/Documentation/ABI/testing/sysfs-platform-sst-atom b/Documentation/ABI/testing/sysfs-platform-sst-atom
new file mode 100644
index 000000000000..0d07c0395660
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-sst-atom
@@ -0,0 +1,17 @@
+What: /sys/devices/platform/8086%x:00/firmware_version
+Date: November 2016
+KernelVersion: 4.10
+Contact: "Sebastien Guiriec" <sebastien.guiriec@intel.com>
+Description:
+ LPE Firmware version for SST driver on all atom
+ plaforms (BYT/CHT/Merrifield/BSW).
+ If the FW has never been loaded it will display:
+ "FW not yet loaded"
+ If FW has been loaded it will display:
+ "v01.aa.bb.cc"
+ aa: Major version is reflecting SoC version:
+ 0d: BYT FW
+ 0b: BSW FW
+ 07: Merrifield FW
+ bb: Minor version
+ cc: Build version
diff --git a/Documentation/admin-guide/vga-softcursor.rst b/Documentation/admin-guide/vga-softcursor.rst
index a663a745cff4..f52175457e60 100644
--- a/Documentation/admin-guide/vga-softcursor.rst
+++ b/Documentation/admin-guide/vga-softcursor.rst
@@ -4,15 +4,13 @@ Software cursor for VGA
by Pavel Machek <pavel@atrey.karlin.mff.cuni.cz>
and Martin Mares <mj@atrey.karlin.mff.cuni.cz>
-Linux now has some ability to manipulate cursor appearance. Normally, you
-can set the size of hardware cursor (and also work around some ugly bugs in
-those miserable Trident cards [#f1]_. You can now play a few new tricks:
-you can make your cursor look
-
-like a non-blinking red block, make it inverse background of the character it's
-over or to highlight that character and still choose whether the original
-hardware cursor should remain visible or not. There may be other things I have
-never thought of.
+Linux now has some ability to manipulate cursor appearance. Normally,
+you can set the size of hardware cursor. You can now play a few new
+tricks: you can make your cursor look like a non-blinking red block,
+make it inverse background of the character it's over or to highlight
+that character and still choose whether the original hardware cursor
+should remain visible or not. There may be other things I have never
+thought of.
The cursor appearance is controlled by a ``<ESC>[?1;2;3c`` escape sequence
where 1, 2 and 3 are parameters described below. If you omit any of them,
@@ -48,8 +46,6 @@ third parameter
Bit setting takes place before bit toggling, so you can simply clear a
bit by including it in both the set mask and the toggle mask.
-.. [#f1] see ``#define TRIDENT_GLITCH`` in ``drivers/video/vgacon.c``.
-
Examples
--------
diff --git a/Documentation/device-mapper/delay.txt b/Documentation/device-mapper/delay.txt
index a07b5927f4a8..4b1d22a44ce4 100644
--- a/Documentation/device-mapper/delay.txt
+++ b/Documentation/device-mapper/delay.txt
@@ -16,12 +16,12 @@ Example scripts
[[
#!/bin/sh
# Create device delaying rw operation for 500ms
-echo "0 `blockdev --getsize $1` delay $1 0 500" | dmsetup create delayed
+echo "0 `blockdev --getsz $1` delay $1 0 500" | dmsetup create delayed
]]
[[
#!/bin/sh
# Create device delaying only write operation for 500ms and
# splitting reads and writes to different devices $1 $2
-echo "0 `blockdev --getsize $1` delay $1 0 0 $2 0 500" | dmsetup create delayed
+echo "0 `blockdev --getsz $1` delay $1 0 0 $2 0 500" | dmsetup create delayed
]]
diff --git a/Documentation/device-mapper/dm-crypt.txt b/Documentation/device-mapper/dm-crypt.txt
index 692171fe9da0..ff1f87bf26e8 100644
--- a/Documentation/device-mapper/dm-crypt.txt
+++ b/Documentation/device-mapper/dm-crypt.txt
@@ -21,13 +21,30 @@ Parameters: <cipher> <key> <iv_offset> <device path> \
/proc/crypto contains supported crypto modes
<key>
- Key used for encryption. It is encoded as a hexadecimal number.
+ Key used for encryption. It is encoded either as a hexadecimal number
+ or it can be passed as <key_string> prefixed with single colon
+ character (':') for keys residing in kernel keyring service.
You can only use key sizes that are valid for the selected cipher
in combination with the selected iv mode.
Note that for some iv modes the key string can contain additional
keys (for example IV seed) so the key contains more parts concatenated
into a single string.
+<key_string>
+ The kernel keyring key is identified by string in following format:
+ <key_size>:<key_type>:<key_description>.
+
+<key_size>
+ The encryption key size in bytes. The kernel key payload size must match
+ the value passed in <key_size>.
+
+<key_type>
+ Either 'logon' or 'user' kernel key type.
+
+<key_description>
+ The kernel keyring key description crypt target should look for
+ when loading key of <key_type>.
+
<keycount>
Multi-key compatibility mode. You can define <keycount> keys and
then sectors are encrypted according to their offsets (sector 0 uses key0;
@@ -85,7 +102,13 @@ https://gitlab.com/cryptsetup/cryptsetup
[[
#!/bin/sh
# Create a crypt device using dmsetup
-dmsetup create crypt1 --table "0 `blockdev --getsize $1` crypt aes-cbc-essiv:sha256 babebabebabebabebabebabebabebabe 0 $1 0"
+dmsetup create crypt1 --table "0 `blockdev --getsz $1` crypt aes-cbc-essiv:sha256 babebabebabebabebabebabebabebabe 0 $1 0"
+]]
+
+[[
+#!/bin/sh
+# Create a crypt device using dmsetup when encryption key is stored in keyring service
+dmsetup create crypt2 --table "0 `blockdev --getsize $1` crypt aes-cbc-essiv:sha256 :32:logon:my_prefix:my_key 0 $1 0"
]]
[[
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index 9bd531aa2279..5e3786fd9ea7 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -242,6 +242,10 @@ recovery. Here is a fuller description of the individual fields:
in RAID1/10 or wrong parity values found in RAID4/5/6.
This value is valid only after a "check" of the array
is performed. A healthy array has a 'mismatch_cnt' of 0.
+ <data_offset> The current data offset to the start of the user data on
+ each component device of a raid set (see the respective
+ raid parameter to support out-of-place reshaping).
+
Message Interface
-----------------
diff --git a/Documentation/device-mapper/linear.txt b/Documentation/device-mapper/linear.txt
index d5307d380a45..7cb98d89d3f8 100644
--- a/Documentation/device-mapper/linear.txt
+++ b/Documentation/device-mapper/linear.txt
@@ -16,15 +16,15 @@ Example scripts
[[
#!/bin/sh
# Create an identity mapping for a device
-echo "0 `blockdev --getsize $1` linear $1 0" | dmsetup create identity
+echo "0 `blockdev --getsz $1` linear $1 0" | dmsetup create identity
]]
[[
#!/bin/sh
# Join 2 devices together
-size1=`blockdev --getsize $1`
-size2=`blockdev --getsize $2`
+size1=`blockdev --getsz $1`
+size2=`blockdev --getsz $2`
echo "0 $size1 linear $1 0
$size1 $size2 linear $2 0" | dmsetup create joined
]]
@@ -44,7 +44,7 @@ if (!defined($dev)) {
die("Please specify a device.\n");
}
-my $dev_size = `blockdev --getsize $dev`;
+my $dev_size = `blockdev --getsz $dev`;
my $extents = int($dev_size / $extent_size) -
(($dev_size % $extent_size) ? 1 : 0);
diff --git a/Documentation/device-mapper/striped.txt b/Documentation/device-mapper/striped.txt
index 45f3b91ea4c3..07ec492cceee 100644
--- a/Documentation/device-mapper/striped.txt
+++ b/Documentation/device-mapper/striped.txt
@@ -37,9 +37,9 @@ if (!$num_devs) {
die("Specify at least one device\n");
}
-$min_dev_size = `blockdev --getsize $devs[0]`;
+$min_dev_size = `blockdev --getsz $devs[0]`;
for ($i = 1; $i < $num_devs; $i++) {
- my $this_size = `blockdev --getsize $devs[$i]`;
+ my $this_size = `blockdev --getsz $devs[$i]`;
$min_dev_size = ($min_dev_size < $this_size) ?
$min_dev_size : $this_size;
}
diff --git a/Documentation/device-mapper/switch.txt b/Documentation/device-mapper/switch.txt
index 424835e57f27..5bd4831db4a8 100644
--- a/Documentation/device-mapper/switch.txt
+++ b/Documentation/device-mapper/switch.txt
@@ -123,7 +123,7 @@ Assume that you have volumes vg1/switch0 vg1/switch1 vg1/switch2 with
the same size.
Create a switch device with 64kB region size:
- dmsetup create switch --table "0 `blockdev --getsize /dev/vg1/switch0`
+ dmsetup create switch --table "0 `blockdev --getsz /dev/vg1/switch0`
switch 3 128 0 /dev/vg1/switch0 0 /dev/vg1/switch1 0 /dev/vg1/switch2 0"
Set mappings for the first 7 entries to point to devices switch0, switch1,
diff --git a/Documentation/devicetree/bindings/arm/cpu-capacity.txt b/Documentation/devicetree/bindings/arm/cpu-capacity.txt
new file mode 100644
index 000000000000..7809fbe0cdb7
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/cpu-capacity.txt
@@ -0,0 +1,236 @@
+==========================================
+ARM CPUs capacity bindings
+==========================================
+
+==========================================
+1 - Introduction
+==========================================
+
+ARM systems may be configured to have cpus with different power/performance
+characteristics within the same chip. In this case, additional information has
+to be made available to the kernel for it to be aware of such differences and
+take decisions accordingly.
+
+==========================================
+2 - CPU capacity definition
+==========================================
+
+CPU capacity is a number that provides the scheduler information about CPUs
+heterogeneity. Such heterogeneity can come from micro-architectural differences
+(e.g., ARM big.LITTLE systems) or maximum frequency at which CPUs can run
+(e.g., SMP systems with multiple frequency domains). Heterogeneity in this
+context is about differing performance characteristics; this binding tries to
+capture a first-order approximation of the relative performance of CPUs.
+
+CPU capacities are obtained by running a suitable benchmark. This binding makes
+no guarantees on the validity or suitability of any particular benchmark, the
+final capacity should, however, be:
+
+* A "single-threaded" or CPU affine benchmark
+* Divided by the running frequency of the CPU executing the benchmark
+* Not subject to dynamic frequency scaling of the CPU
+
+For the time being we however advise usage of the Dhrystone benchmark. What
+above thus becomes:
+
+CPU capacities are obtained by running the Dhrystone benchmark on each CPU at
+max frequency (with caches enabled). The obtained DMIPS score is then divided
+by the frequency (in MHz) at which the benchmark has been run, so that
+DMIPS/MHz are obtained. Such values are then normalized w.r.t. the highest
+score obtained in the system.
+
+==========================================
+3 - capacity-dmips-mhz
+==========================================
+
+capacity-dmips-mhz is an optional cpu node [1] property: u32 value
+representing CPU capacity expressed in normalized DMIPS/MHz. At boot time, the
+maximum frequency available to the cpu is then used to calculate the capacity
+value internally used by the kernel.
+
+capacity-dmips-mhz property is all-or-nothing: if it is specified for a cpu
+node, it has to be specified for every other cpu nodes, or the system will
+fall back to the default capacity value for every CPU. If cpufreq is not
+available, final capacities are calculated by directly using capacity-dmips-
+mhz values (normalized w.r.t. the highest value found while parsing the DT).
+
+===========================================
+4 - Examples
+===========================================
+
+Example 1 (ARM 64-bit, 6-cpu system, two clusters):
+capacities-dmips-mhz are scaled w.r.t. 1024 (cpu@0 and cpu@1)
+supposing cluster0@max-freq=1100 and custer1@max-freq=850,
+final capacities are 1024 for cluster0 and 446 for cluster1
+
+cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&A57_0>;
+ };
+ core1 {
+ cpu = <&A57_1>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&A53_0>;
+ };
+ core1 {
+ cpu = <&A53_1>;
+ };
+ core2 {
+ cpu = <&A53_2>;
+ };
+ core3 {
+ cpu = <&A53_3>;
+ };
+ };
+ };
+
+ idle-states {
+ entry-method = "arm,psci";
+
+ CPU_SLEEP_0: cpu-sleep-0 {
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x0010000>;
+ local-timer-stop;
+ entry-latency-us = <100>;
+ exit-latency-us = <250>;
+ min-residency-us = <150>;
+ };
+
+ CLUSTER_SLEEP_0: cluster-sleep-0 {
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x1010000>;
+ local-timer-stop;
+ entry-latency-us = <800>;
+ exit-latency-us = <700>;
+ min-residency-us = <2500>;
+ };
+ };
+
+ A57_0: cpu@0 {
+ compatible = "arm,cortex-a57","arm,armv8";
+ reg = <0x0 0x0>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&A57_L2>;
+ clocks = <&scpi_dvfs 0>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ capacity-dmips-mhz = <1024>;
+ };
+
+ A57_1: cpu@1 {
+ compatible = "arm,cortex-a57","arm,armv8";
+ reg = <0x0 0x1>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&A57_L2>;
+ clocks = <&scpi_dvfs 0>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ capacity-dmips-mhz = <1024>;
+ };
+
+ A53_0: cpu@100 {
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x100>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ clocks = <&scpi_dvfs 1>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ capacity-dmips-mhz = <578>;
+ };
+
+ A53_1: cpu@101 {
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x101>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ clocks = <&scpi_dvfs 1>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ capacity-dmips-mhz = <578>;
+ };
+
+ A53_2: cpu@102 {
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x102>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ clocks = <&scpi_dvfs 1>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ capacity-dmips-mhz = <578>;
+ };
+
+ A53_3: cpu@103 {
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x103>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ clocks = <&scpi_dvfs 1>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ capacity-dmips-mhz = <578>;
+ };
+
+ A57_L2: l2-cache0 {
+ compatible = "cache";
+ };
+
+ A53_L2: l2-cache1 {
+ compatible = "cache";
+ };
+};
+
+Example 2 (ARM 32-bit, 4-cpu system, two clusters,
+ cpus 0,1@1GHz, cpus 2,3@500MHz):
+capacities-dmips-mhz are scaled w.r.t. 2 (cpu@0 and cpu@1), this means that first
+cpu@0 and cpu@1 are twice fast than cpu@2 and cpu@3 (at the same frequency)
+
+cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ capacity-dmips-mhz = <2>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <1>;
+ capacity-dmips-mhz = <2>;
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0x100>;
+ capacity-dmips-mhz = <1>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0x101>;
+ capacity-dmips-mhz = <1>;
+ };
+};
+
+===========================================
+5 - References
+===========================================
+
+[1] ARM Linux Kernel documentation - CPUs bindings
+ Documentation/devicetree/bindings/arm/cpus.txt
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index e6782d50cbcd..c1dcf4cade2e 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -241,6 +241,14 @@ nodes to be present and contain the properties described below.
# List of phandles to idle state nodes supported
by this cpu [3].
+ - capacity-dmips-mhz
+ Usage: Optional
+ Value type: <u32>
+ Definition:
+ # u32 value representing CPU capacity [3] in
+ DMIPS/MHz, relative to highest capacity-dmips-mhz
+ in the system.
+
- rockchip,pmu
Usage: optional for systems that have an "enable-method"
property value of "rockchip,rk3066-smp"
@@ -464,3 +472,5 @@ cpus {
[2] arm/msm/qcom,kpss-acc.txt
[3] ARM Linux kernel documentation - idle states bindings
Documentation/devicetree/bindings/arm/idle-states.txt
+[3] ARM Linux kernel documentation - cpu capacity bindings
+ Documentation/devicetree/bindings/arm/cpu-capacity.txt
diff --git a/Documentation/devicetree/bindings/display/ht16k33.txt b/Documentation/devicetree/bindings/display/ht16k33.txt
new file mode 100644
index 000000000000..8e5b30b87754
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ht16k33.txt
@@ -0,0 +1,42 @@
+Holtek ht16k33 RAM mapping 16*8 LED controller driver with keyscan
+-------------------------------------------------------------------------------
+
+Required properties:
+- compatible: "holtek,ht16k33"
+- reg: I2C slave address of the chip.
+- interrupt-parent: A phandle pointing to the interrupt controller
+ serving the interrupt for this chip.
+- interrupts: Interrupt specification for the key pressed interrupt.
+- refresh-rate-hz: Display update interval in HZ.
+- debounce-delay-ms: Debouncing interval time in milliseconds.
+- linux,keymap: The keymap for keys as described in the binding
+ document (devicetree/bindings/input/matrix-keymap.txt).
+
+Optional properties:
+- linux,no-autorepeat: Disable keyrepeat.
+- default-brightness-level: Initial brightness level [0-15] (default: 15).
+
+Example:
+
+&i2c1 {
+ ht16k33: ht16k33@70 {
+ compatible = "holtek,ht16k33";
+ reg = <0x70>;
+ refresh-rate-hz = <20>;
+ debounce-delay-ms = <50>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <5 (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING)>;
+ linux,keymap = <
+ MATRIX_KEY(2, 0, KEY_F6)
+ MATRIX_KEY(3, 0, KEY_F8)
+ MATRIX_KEY(4, 0, KEY_F10)
+ MATRIX_KEY(5, 0, KEY_F4)
+ MATRIX_KEY(6, 0, KEY_F2)
+ MATRIX_KEY(2, 1, KEY_F5)
+ MATRIX_KEY(3, 1, KEY_F7)
+ MATRIX_KEY(4, 1, KEY_F9)
+ MATRIX_KEY(5, 1, KEY_F3)
+ MATRIX_KEY(6, 1, KEY_F1)
+ >;
+ };
+};
diff --git a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
index af0b903de293..dfc14f71e81f 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
+++ b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
@@ -5,7 +5,10 @@ connected to a GPIO pin.
Required properties:
- compatible: Should be "linux,extcon-usb-gpio"
+
+Either one of id-gpio or vbus-gpio must be present. Both can be present as well.
- id-gpio: gpio for USB ID pin. See gpio binding.
+- vbus-gpio: gpio for USB VBUS pin.
Example: Examples of extcon-usb-gpio node in dra7-evm.dts as listed below:
extcon_usb1 {
diff --git a/Documentation/devicetree/bindings/fpga/fpga-region.txt b/Documentation/devicetree/bindings/fpga/fpga-region.txt
new file mode 100644
index 000000000000..3b32ba15a717
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/fpga-region.txt
@@ -0,0 +1,494 @@
+FPGA Region Device Tree Binding
+
+Alan Tull 2016
+
+ CONTENTS
+ - Introduction
+ - Terminology
+ - Sequence
+ - FPGA Region
+ - Supported Use Models
+ - Device Tree Examples
+ - Constraints
+
+
+Introduction
+============
+
+FPGA Regions represent FPGA's and partial reconfiguration regions of FPGA's in
+the Device Tree. FPGA Regions provide a way to program FPGAs under device tree
+control.
+
+This device tree binding document hits some of the high points of FPGA usage and
+attempts to include terminology used by both major FPGA manufacturers. This
+document isn't a replacement for any manufacturers specifications for FPGA
+usage.
+
+
+Terminology
+===========
+
+Full Reconfiguration
+ * The entire FPGA is programmed.
+
+Partial Reconfiguration (PR)
+ * A section of an FPGA is reprogrammed while the rest of the FPGA is not
+ affected.
+ * Not all FPGA's support PR.
+
+Partial Reconfiguration Region (PRR)
+ * Also called a "reconfigurable partition"
+ * A PRR is a specific section of a FPGA reserved for reconfiguration.
+ * A base (or static) FPGA image may create a set of PRR's that later may
+ be independently reprogrammed many times.
+ * The size and specific location of each PRR is fixed.
+ * The connections at the edge of each PRR are fixed. The image that is loaded
+ into a PRR must fit and must use a subset of the region's connections.
+ * The busses within the FPGA are split such that each region gets its own
+ branch that may be gated independently.
+
+Persona
+ * Also called a "partial bit stream"
+ * An FPGA image that is designed to be loaded into a PRR. There may be
+ any number of personas designed to fit into a PRR, but only one at at time
+ may be loaded.
+ * A persona may create more regions.
+
+FPGA Bridge
+ * FPGA Bridges gate bus signals between a host and FPGA.
+ * FPGA Bridges should be disabled while the FPGA is being programmed to
+ prevent spurious signals on the cpu bus and to the soft logic.
+ * FPGA bridges may be actual hardware or soft logic on an FPGA.
+ * During Full Reconfiguration, hardware bridges between the host and FPGA
+ will be disabled.
+ * During Partial Reconfiguration of a specific region, that region's bridge
+ will be used to gate the busses. Traffic to other regions is not affected.
+ * In some implementations, the FPGA Manager transparantly handles gating the
+ buses, eliminating the need to show the hardware FPGA bridges in the
+ device tree.
+ * An FPGA image may create a set of reprogrammable regions, each having its
+ own bridge and its own split of the busses in the FPGA.
+
+FPGA Manager
+ * An FPGA Manager is a hardware block that programs an FPGA under the control
+ of a host processor.
+
+Base Image
+ * Also called the "static image"
+ * An FPGA image that is designed to do full reconfiguration of the FPGA.
+ * A base image may set up a set of partial reconfiguration regions that may
+ later be reprogrammed.
+
+ ---------------- ----------------------------------
+ | Host CPU | | FPGA |
+ | | | |
+ | ----| | ----------- -------- |
+ | | H | | |==>| Bridge0 |<==>| PRR0 | |
+ | | W | | | ----------- -------- |
+ | | | | | |
+ | | B |<=====>|<==| ----------- -------- |
+ | | R | | |==>| Bridge1 |<==>| PRR1 | |
+ | | I | | | ----------- -------- |
+ | | D | | | |
+ | | G | | | ----------- -------- |
+ | | E | | |==>| Bridge2 |<==>| PRR2 | |
+ | ----| | ----------- -------- |
+ | | | |
+ ---------------- ----------------------------------
+
+Figure 1: An FPGA set up with a base image that created three regions. Each
+region (PRR0-2) gets its own split of the busses that is independently gated by
+a soft logic bridge (Bridge0-2) in the FPGA. The contents of each PRR can be
+reprogrammed independently while the rest of the system continues to function.
+
+
+Sequence
+========
+
+When a DT overlay that targets a FPGA Region is applied, the FPGA Region will
+do the following:
+
+ 1. Disable appropriate FPGA bridges.
+ 2. Program the FPGA using the FPGA manager.
+ 3. Enable the FPGA bridges.
+ 4. The Device Tree overlay is accepted into the live tree.
+ 5. Child devices are populated.
+
+When the overlay is removed, the child nodes will be removed and the FPGA Region
+will disable the bridges.
+
+
+FPGA Region
+===========
+
+FPGA Regions represent FPGA's and FPGA PR regions in the device tree. An FPGA
+Region brings together the elements needed to program on a running system and
+add the child devices:
+
+ * FPGA Manager
+ * FPGA Bridges
+ * image-specific information needed to to the programming.
+ * child nodes
+
+The intended use is that a Device Tree overlay (DTO) can be used to reprogram an
+FPGA while an operating system is running.
+
+An FPGA Region that exists in the live Device Tree reflects the current state.
+If the live tree shows a "firmware-name" property or child nodes under a FPGA
+Region, the FPGA already has been programmed. A DTO that targets a FPGA Region
+and adds the "firmware-name" property is taken as a request to reprogram the
+FPGA. After reprogramming is successful, the overlay is accepted into the live
+tree.
+
+The base FPGA Region in the device tree represents the FPGA and supports full
+reconfiguration. It must include a phandle to an FPGA Manager. The base
+FPGA region will be the child of one of the hardware bridges (the bridge that
+allows register access) between the cpu and the FPGA. If there are more than
+one bridge to control during FPGA programming, the region will also contain a
+list of phandles to the additional hardware FPGA Bridges.
+
+For partial reconfiguration (PR), each PR region will have an FPGA Region.
+These FPGA regions are children of FPGA bridges which are then children of the
+base FPGA region. The "Full Reconfiguration to add PRR's" example below shows
+this.
+
+If an FPGA Region does not specify a FPGA Manager, it will inherit the FPGA
+Manager specified by its ancestor FPGA Region. This supports both the case
+where the same FPGA Manager is used for all of a FPGA as well the case where
+a different FPGA Manager is used for each region.
+
+FPGA Regions do not inherit their ancestor FPGA regions' bridges. This prevents
+shutting down bridges that are upstream from the other active regions while one
+region is getting reconfigured (see Figure 1 above). During PR, the FPGA's
+hardware bridges remain enabled. The PR regions' bridges will be FPGA bridges
+within the static image of the FPGA.
+
+Required properties:
+- compatible : should contain "fpga-region"
+- fpga-mgr : should contain a phandle to an FPGA Manager. Child FPGA Regions
+ inherit this property from their ancestor regions. A fpga-mgr property
+ in a region will override any inherited FPGA manager.
+- #address-cells, #size-cells, ranges : must be present to handle address space
+ mapping for child nodes.
+
+Optional properties:
+- firmware-name : should contain the name of an FPGA image file located on the
+ firmware search path. If this property shows up in a live device tree
+ it indicates that the FPGA has already been programmed with this image.
+ If this property is in an overlay targeting a FPGA region, it is a
+ request to program the FPGA with that image.
+- fpga-bridges : should contain a list of phandles to FPGA Bridges that must be
+ controlled during FPGA programming along with the parent FPGA bridge.
+ This property is optional if the FPGA Manager handles the bridges.
+ If the fpga-region is the child of a fpga-bridge, the list should not
+ contain the parent bridge.
+- partial-fpga-config : boolean, set if partial reconfiguration is to be done,
+ otherwise full reconfiguration is done.
+- external-fpga-config : boolean, set if the FPGA has already been configured
+ prior to OS boot up.
+- region-unfreeze-timeout-us : The maximum time in microseconds to wait for
+ bridges to successfully become enabled after the region has been
+ programmed.
+- region-freeze-timeout-us : The maximum time in microseconds to wait for
+ bridges to successfully become disabled before the region has been
+ programmed.
+- child nodes : devices in the FPGA after programming.
+
+In the example below, when an overlay is applied targeting fpga-region0,
+fpga_mgr is used to program the FPGA. Two bridges are controlled during
+programming: the parent fpga_bridge0 and fpga_bridge1. Because the region is
+the child of fpga_bridge0, only fpga_bridge1 needs to be specified in the
+fpga-bridges property. During programming, these bridges are disabled, the
+firmware specified in the overlay is loaded to the FPGA using the FPGA manager
+specified in the region. If FPGA programming succeeds, the bridges are
+reenabled and the overlay makes it into the live device tree. The child devices
+are then populated. If FPGA programming fails, the bridges are left disabled
+and the overlay is rejected. The overlay's ranges property maps the lwhps
+bridge's region (0xff200000) and the hps bridge's region (0xc0000000) for use by
+the two child devices.
+
+Example:
+Base tree contains:
+
+ fpga_mgr: fpga-mgr@ff706000 {
+ compatible = "altr,socfpga-fpga-mgr";
+ reg = <0xff706000 0x1000
+ 0xffb90000 0x20>;
+ interrupts = <0 175 4>;
+ };
+
+ fpga_bridge0: fpga-bridge@ff400000 {
+ compatible = "altr,socfpga-lwhps2fpga-bridge";
+ reg = <0xff400000 0x100000>;
+ resets = <&rst LWHPS2FPGA_RESET>;
+ clocks = <&l4_main_clk>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ fpga_region0: fpga-region0 {
+ compatible = "fpga-region";
+ fpga-mgr = <&fpga_mgr>;
+ };
+ };
+
+ fpga_bridge1: fpga-bridge@ff500000 {
+ compatible = "altr,socfpga-hps2fpga-bridge";
+ reg = <0xff500000 0x10000>;
+ resets = <&rst HPS2FPGA_RESET>;
+ clocks = <&l4_main_clk>;
+ };
+
+Overlay contains:
+
+/dts-v1/ /plugin/;
+/ {
+ fragment@0 {
+ target = <&fpga_region0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ __overlay__ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ firmware-name = "soc_system.rbf";
+ fpga-bridges = <&fpga_bridge1>;
+ ranges = <0x20000 0xff200000 0x100000>,
+ <0x0 0xc0000000 0x20000000>;
+
+ gpio@10040 {
+ compatible = "altr,pio-1.0";
+ reg = <0x10040 0x20>;
+ altr,gpio-bank-width = <4>;
+ #gpio-cells = <2>;
+ clocks = <2>;
+ gpio-controller;
+ };
+
+ onchip-memory {
+ device_type = "memory";
+ compatible = "altr,onchipmem-15.1";
+ reg = <0x0 0x10000>;
+ };
+ };
+ };
+};
+
+
+Supported Use Models
+====================
+
+In all cases the live DT must have the FPGA Manager, FPGA Bridges (if any), and
+a FPGA Region. The target of the Device Tree Overlay is the FPGA Region. Some
+uses are specific to a FPGA device.
+
+ * No FPGA Bridges
+ In this case, the FPGA Manager which programs the FPGA also handles the
+ bridges behind the scenes. No FPGA Bridge devices are needed for full
+ reconfiguration.
+
+ * Full reconfiguration with hardware bridges
+ In this case, there are hardware bridges between the processor and FPGA that
+ need to be controlled during full reconfiguration. Before the overlay is
+ applied, the live DT must include the FPGA Manager, FPGA Bridges, and a
+ FPGA Region. The FPGA Region is the child of the bridge that allows
+ register access to the FPGA. Additional bridges may be listed in a
+ fpga-bridges property in the FPGA region or in the device tree overlay.
+
+ * Partial reconfiguration with bridges in the FPGA
+ In this case, the FPGA will have one or more PRR's that may be programmed
+ separately while the rest of the FPGA can remain active. To manage this,
+ bridges need to exist in the FPGA that can gate the buses going to each FPGA
+ region while the buses are enabled for other sections. Before any partial
+ reconfiguration can be done, a base FPGA image must be loaded which includes
+ PRR's with FPGA bridges. The device tree should have a FPGA region for each
+ PRR.
+
+Device Tree Examples
+====================
+
+The intention of this section is to give some simple examples, focusing on
+the placement of the elements detailed above, especially:
+ * FPGA Manager
+ * FPGA Bridges
+ * FPGA Region
+ * ranges
+ * target-path or target
+
+For the purposes of this section, I'm dividing the Device Tree into two parts,
+each with its own requirements. The two parts are:
+ * The live DT prior to the overlay being added
+ * The DT overlay
+
+The live Device Tree must contain an FPGA Region, an FPGA Manager, and any FPGA
+Bridges. The FPGA Region's "fpga-mgr" property specifies the manager by phandle
+to handle programming the FPGA. If the FPGA Region is the child of another FPGA
+Region, the parent's FPGA Manager is used. If FPGA Bridges need to be involved,
+they are specified in the FPGA Region by the "fpga-bridges" property. During
+FPGA programming, the FPGA Region will disable the bridges that are in its
+"fpga-bridges" list and will re-enable them after FPGA programming has
+succeeded.
+
+The Device Tree Overlay will contain:
+ * "target-path" or "target"
+ The insertion point where the the contents of the overlay will go into the
+ live tree. target-path is a full path, while target is a phandle.
+ * "ranges"
+ The address space mapping from processor to FPGA bus(ses).
+ * "firmware-name"
+ Specifies the name of the FPGA image file on the firmware search
+ path. The search path is described in the firmware class documentation.
+ * "partial-fpga-config"
+ This binding is a boolean and should be present if partial reconfiguration
+ is to be done.
+ * child nodes corresponding to hardware that will be loaded in this region of
+ the FPGA.
+
+Device Tree Example: Full Reconfiguration without Bridges
+=========================================================
+
+Live Device Tree contains:
+ fpga_mgr0: fpga-mgr@f8007000 {
+ compatible = "xlnx,zynq-devcfg-1.0";
+ reg = <0xf8007000 0x100>;
+ interrupt-parent = <&intc>;
+ interrupts = <0 8 4>;
+ clocks = <&clkc 12>;
+ clock-names = "ref_clk";
+ syscon = <&slcr>;
+ };
+
+ fpga_region0: fpga-region0 {
+ compatible = "fpga-region";
+ fpga-mgr = <&fpga_mgr0>;
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ ranges;
+ };
+
+DT Overlay contains:
+/dts-v1/ /plugin/;
+/ {
+fragment@0 {
+ target = <&fpga_region0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ __overlay__ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ firmware-name = "zynq-gpio.bin";
+
+ gpio1: gpio@40000000 {
+ compatible = "xlnx,xps-gpio-1.00.a";
+ reg = <0x40000000 0x10000>;
+ gpio-controller;
+ #gpio-cells = <0x2>;
+ xlnx,gpio-width= <0x6>;
+ };
+ };
+};
+
+Device Tree Example: Full Reconfiguration to add PRR's
+======================================================
+
+The base FPGA Region is specified similar to the first example above.
+
+This example programs the FPGA to have two regions that can later be partially
+configured. Each region has its own bridge in the FPGA fabric.
+
+DT Overlay contains:
+/dts-v1/ /plugin/;
+/ {
+ fragment@0 {
+ target = <&fpga_region0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ __overlay__ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ firmware-name = "base.rbf";
+
+ fpga-bridge@4400 {
+ compatible = "altr,freeze-bridge";
+ reg = <0x4400 0x10>;
+
+ fpga_region1: fpga-region1 {
+ compatible = "fpga-region";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ ranges;
+ };
+ };
+
+ fpga-bridge@4420 {
+ compatible = "altr,freeze-bridge";
+ reg = <0x4420 0x10>;
+
+ fpga_region2: fpga-region2 {
+ compatible = "fpga-region";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ ranges;
+ };
+ };
+ };
+ };
+};
+
+Device Tree Example: Partial Reconfiguration
+============================================
+
+This example reprograms one of the PRR's set up in the previous example.
+
+The sequence that occurs when this overlay is similar to the above, the only
+differences are that the FPGA is partially reconfigured due to the
+"partial-fpga-config" boolean and the only bridge that is controlled during
+programming is the FPGA based bridge of fpga_region1.
+
+/dts-v1/ /plugin/;
+/ {
+ fragment@0 {
+ target = <&fpga_region1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ __overlay__ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ firmware-name = "soc_image2.rbf";
+ partial-fpga-config;
+
+ gpio@10040 {
+ compatible = "altr,pio-1.0";
+ reg = <0x10040 0x20>;
+ clocks = <0x2>;
+ altr,gpio-bank-width = <0x4>;
+ resetvalue = <0x0>;
+ #gpio-cells = <0x2>;
+ gpio-controller;
+ };
+ };
+ };
+};
+
+Constraints
+===========
+
+It is beyond the scope of this document to fully describe all the FPGA design
+constraints required to make partial reconfiguration work[1] [2] [3], but a few
+deserve quick mention.
+
+A persona must have boundary connections that line up with those of the partion
+or region it is designed to go into.
+
+During programming, transactions through those connections must be stopped and
+the connections must be held at a fixed logic level. This can be achieved by
+FPGA Bridges that exist on the FPGA fabric prior to the partial reconfiguration.
+
+--
+[1] www.altera.com/content/dam/altera-www/global/en_US/pdfs/literature/ug/ug_partrecon.pdf
+[2] tspace.library.utoronto.ca/bitstream/1807/67932/1/Byma_Stuart_A_201411_MAS_thesis.pdf
+[3] http://www.xilinx.com/support/documentation/sw_manuals/xilinx14_1/ug702.pdf
diff --git a/Documentation/devicetree/bindings/hwmon/mcp3021.txt b/Documentation/devicetree/bindings/hwmon/mcp3021.txt
new file mode 100644
index 000000000000..294318ba6914
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/mcp3021.txt
@@ -0,0 +1,21 @@
+mcp3021 properties
+
+Required properties:
+- compatible: Must be one of the following:
+ - "microchip,mcp3021" for mcp3021
+ - "microchip,mcp3221" for mcp3221
+- reg: I2C address
+
+Optional properties:
+
+- reference-voltage-microvolt
+ Reference voltage in microvolt (uV)
+
+Example:
+
+mcp3021@4d {
+ compatible = "microchip,mcp3021";
+ reg = <0x4d>;
+
+ reference-voltage-microvolt = <4500000>; /* 4.5 V */
+};
diff --git a/Documentation/devicetree/bindings/hwmon/tmp108.txt b/Documentation/devicetree/bindings/hwmon/tmp108.txt
new file mode 100644
index 000000000000..8c4b10df86d9
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/tmp108.txt
@@ -0,0 +1,14 @@
+TMP108 temperature sensor
+-------------------------
+
+This device supports I2C only.
+
+Requires node properties:
+- compatible : "ti,tmp108"
+- reg : the I2C address of the device. This is 0x48, 0x49, 0x4a, or 0x4b.
+
+Example:
+ tmp108@48 {
+ compatible = "ti,tmp108";
+ reg = <0x48>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index fbbad6446741..df720ca00fcf 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -39,11 +39,13 @@ dallas,ds75 Digital Thermometer and Thermostat
dlg,da9053 DA9053: flexible system level PMIC with multicore support
dlg,da9063 DA9063: system PMIC for quad-core application processors
domintech,dmard09 DMARD09: 3-axis Accelerometer
+domintech,dmard10 DMARD10: 3-axis Accelerometer
epson,rx8010 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE
epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer
fsl,mc13892 MC13892: Power Management Integrated Circuit (PMIC) for i.MX35/51
+fsl,mma7660 MMA7660FC: 3-Axis Orientation/Motion Detection Sensor
fsl,mma8450 MMA8450Q: Xtrinsic Low-power, 3-axis Xtrinsic Accelerometer
fsl,mpl3115 MPL3115: Absolute Digital Pressure Sensor
fsl,mpr121 MPR121: Proximity Capacitive Touch Sensor Controller
@@ -57,6 +59,7 @@ maxim,max1237 Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
maxim,max6625 9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
mc,rv3029c2 Real Time Clock Module with I2C-Bus
mcube,mc3230 mCube 3-axis 8-bit digital accelerometer
+memsic,mxc6225 MEMSIC 2-axis 8-bit digital accelerometer
microchip,mcp4531-502 Microchip 7-bit Single I2C Digital Potentiometer (5k)
microchip,mcp4531-103 Microchip 7-bit Single I2C Digital Potentiometer (10k)
microchip,mcp4531-503 Microchip 7-bit Single I2C Digital Potentiometer (50k)
@@ -121,6 +124,11 @@ microchip,mcp4662-502 Microchip 8-bit Dual I2C Digital Potentiometer with NV Mem
microchip,mcp4662-103 Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (10k)
microchip,mcp4662-503 Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (50k)
microchip,mcp4662-104 Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (100k)
+microchip,tc654 PWM Fan Speed Controller With Fan Fault Detection
+microchip,tc655 PWM Fan Speed Controller With Fan Fault Detection
+miramems,da226 MiraMEMS DA226 2-axis 14-bit digital accelerometer
+miramems,da280 MiraMEMS DA280 3-axis 14-bit digital accelerometer
+miramems,da311 MiraMEMS DA311 3-axis 12-bit digital accelerometer
national,lm63 Temperature sensor with integrated fan control
national,lm75 I2C TEMP SENSOR
national,lm80 Serial Interface ACPI-Compatible Microprocessor System Hardware Monitor
@@ -146,6 +154,7 @@ ricoh,rv5c387a I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
samsung,24ad0xd1 S524AD0XF1 (128K/256K-bit Serial EEPROM for Low Power)
sgx,vz89x SGX Sensortech VZ89X Sensors
sii,s35390a 2-wire CMOS real-time clock
+silabs,si7020 Relative Humidity and Temperature Sensors
skyworks,sky81452 Skyworks SKY81452: Six-Channel White LED Driver with Touch Panel Bias Supply
st,24c256 i2c serial eeprom (24cxx)
st,m41t00 Serial real-time clock (RTC)
diff --git a/Documentation/devicetree/bindings/iio/adc/envelope-detector.txt b/Documentation/devicetree/bindings/iio/adc/envelope-detector.txt
new file mode 100644
index 000000000000..27544bdd4478
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/envelope-detector.txt
@@ -0,0 +1,54 @@
+Bindings for ADC envelope detector using a DAC and a comparator
+
+The DAC is used to find the peak level of an alternating voltage input
+signal by a binary search using the output of a comparator wired to
+an interrupt pin. Like so:
+ _
+ | \
+ input +------>-------|+ \
+ | \
+ .-------. | }---.
+ | | | / |
+ | dac|-->--|- / |
+ | | |_/ |
+ | | |
+ | | |
+ | irq|------<-------'
+ | |
+ '-------'
+
+Required properties:
+- compatible: Should be "axentia,tse850-envelope-detector"
+- io-channels: Channel node of the dac to be used for comparator input.
+- io-channel-names: Should be "dac".
+- interrupt specification for one client interrupt,
+ see ../../interrupt-controller/interrupts.txt for details.
+- interrupt-names: Should be "comp".
+
+Example:
+
+ &i2c {
+ dpot: mcp4651-104@28 {
+ compatible = "microchip,mcp4651-104";
+ reg = <0x28>;
+ #io-channel-cells = <1>;
+ };
+ };
+
+ dac: dac {
+ compatible = "dpot-dac";
+ vref-supply = <&reg_3v3>;
+ io-channels = <&dpot 0>;
+ io-channel-names = "dpot";
+ #io-channel-cells = <1>;
+ };
+
+ envelope-detector {
+ compatible = "axentia,tse850-envelope-detector";
+ io-channels = <&dac 0>;
+ io-channel-names = "dac";
+
+ interrupt-parent = <&gpio>;
+ interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-names = "comp";
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
new file mode 100644
index 000000000000..49ed82e89870
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
@@ -0,0 +1,83 @@
+STMicroelectronics STM32 ADC device driver
+
+STM32 ADC is a successive approximation analog-to-digital converter.
+It has several multiplexed input channels. Conversions can be performed
+in single, continuous, scan or discontinuous mode. Result of the ADC is
+stored in a left-aligned or right-aligned 32-bit data register.
+Conversions can be launched in software or using hardware triggers.
+
+The analog watchdog feature allows the application to detect if the input
+voltage goes beyond the user-defined, higher or lower thresholds.
+
+Each STM32 ADC block can have up to 3 ADC instances.
+
+Each instance supports two contexts to manage conversions, each one has its
+own configurable sequence and trigger:
+- regular conversion can be done in sequence, running in background
+- injected conversions have higher priority, and so have the ability to
+ interrupt regular conversion sequence (either triggered in SW or HW).
+ Regular sequence is resumed, in case it has been interrupted.
+
+Contents of a stm32 adc root node:
+-----------------------------------
+Required properties:
+- compatible: Should be "st,stm32f4-adc-core".
+- reg: Offset and length of the ADC block register set.
+- interrupts: Must contain the interrupt for ADC block.
+- clocks: Clock for the analog circuitry (common to all ADCs).
+- clock-names: Must be "adc".
+- interrupt-controller: Identifies the controller node as interrupt-parent
+- vref-supply: Phandle to the vref input analog reference voltage.
+- #interrupt-cells = <1>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Optional properties:
+- A pinctrl state named "default" for each ADC channel may be defined to set
+ inX ADC pins in mode of operation for analog input on external pin.
+
+Contents of a stm32 adc child node:
+-----------------------------------
+An ADC block node should contain at least one subnode, representing an
+ADC instance available on the machine.
+
+Required properties:
+- compatible: Should be "st,stm32f4-adc".
+- reg: Offset of ADC instance in ADC block (e.g. may be 0x0, 0x100, 0x200).
+- clocks: Input clock private to this ADC instance.
+- interrupt-parent: Phandle to the parent interrupt controller.
+- interrupts: IRQ Line for the ADC (e.g. may be 0 for adc@0, 1 for adc@100 or
+ 2 for adc@200).
+- st,adc-channels: List of single-ended channels muxed for this ADC.
+ It can have up to 16 channels, numbered from 0 to 15 (resp. for in0..in15).
+- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers" in
+ Documentation/devicetree/bindings/iio/iio-bindings.txt
+
+Example:
+ adc: adc@40012000 {
+ compatible = "st,stm32f4-adc-core";
+ reg = <0x40012000 0x400>;
+ interrupts = <18>;
+ clocks = <&rcc 0 168>;
+ clock-names = "adc";
+ vref-supply = <&reg_vref>;
+ interrupt-controller;
+ pinctrl-names = "default";
+ pinctrl-0 = <&adc3_in8_pin>;
+
+ #interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@0 {
+ compatible = "st,stm32f4-adc";
+ #io-channel-cells = <1>;
+ reg = <0x0>;
+ clocks = <&rcc 0 168>;
+ interrupt-parent = <&adc>;
+ interrupts = <0>;
+ st,adc-channels = <8>;
+ };
+ ...
+ other adc child nodes follow...
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt b/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt
index 9ed2315781e4..3d25011f0c99 100644
--- a/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt
+++ b/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt
@@ -3,6 +3,7 @@
Required properties:
- compatible: Should be "ti,adc141s626" or "ti,adc161s626"
- reg: spi chip select number for the device
+ - vdda-supply: supply voltage to VDDA pin
Recommended properties:
- spi-max-frequency: Definition as per
@@ -11,6 +12,7 @@ Recommended properties:
Example:
adc@0 {
compatible = "ti,adc161s626";
+ vdda-supply = <&vdda_fixed>;
reg = <0>;
spi-max-frequency = <4300000>;
};
diff --git a/Documentation/devicetree/bindings/iio/dac/dpot-dac.txt b/Documentation/devicetree/bindings/iio/dac/dpot-dac.txt
new file mode 100644
index 000000000000..fdf47a01bfef
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/dpot-dac.txt
@@ -0,0 +1,41 @@
+Bindings for DAC emulation using a digital potentiometer
+
+It is assumed that the dpot is used as a voltage divider between the
+current dpot wiper setting and the maximum resistance of the dpot. The
+divided voltage is provided by a vref regulator.
+
+ .------.
+ .-----------. | |
+ | vref |--' .---.
+ | regulator |--. | |
+ '-----------' | | d |
+ | | p |
+ | | o | wiper
+ | | t |<---------+
+ | | |
+ | '---' dac output voltage
+ | |
+ '------+------------+
+
+Required properties:
+- compatible: Should be "dpot-dac"
+- vref-supply: The regulator supplying the voltage divider.
+- io-channels: Channel node of the dpot to be used for the voltage division.
+- io-channel-names: Should be "dpot".
+
+Example:
+
+ &i2c {
+ dpot: mcp4651-503@28 {
+ compatible = "microchip,mcp4651-503";
+ reg = <0x28>;
+ #io-channel-cells = <1>;
+ };
+ };
+
+ dac {
+ compatible = "dpot-dac";
+ vref-supply = <&reg_3v3>;
+ io-channels = <&dpot 0>;
+ io-channel-names = "dpot";
+ };
diff --git a/Documentation/devicetree/bindings/iio/dac/mcp4725.txt b/Documentation/devicetree/bindings/iio/dac/mcp4725.txt
new file mode 100644
index 000000000000..1bc6c093fbfe
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/mcp4725.txt
@@ -0,0 +1,35 @@
+Microchip mcp4725 and mcp4726 DAC device driver
+
+Required properties:
+ - compatible: Must be "microchip,mcp4725" or "microchip,mcp4726"
+ - reg: Should contain the DAC I2C address
+ - vdd-supply: Phandle to the Vdd power supply. This supply is used as a
+ voltage reference on mcp4725. It is used as a voltage reference on
+ mcp4726 if there is no vref-supply specified.
+
+Optional properties (valid only for mcp4726):
+ - vref-supply: Optional phandle to the Vref power supply. Vref pin is
+ used as a voltage reference when this supply is specified.
+ - microchip,vref-buffered: Boolean to enable buffering of the external
+ Vref pin. This boolean is not valid without the vref-supply. Quoting
+ the datasheet: This is offered in cases where the reference voltage
+ does not have the current capability not to drop its voltage when
+ connected to the internal resistor ladder circuit.
+
+Examples:
+
+ /* simple mcp4725 */
+ mcp4725@60 {
+ compatible = "microchip,mcp4725";
+ reg = <0x60>;
+ vdd-supply = <&vdac_vdd>;
+ };
+
+ /* mcp4726 with the buffered external reference voltage */
+ mcp4726@60 {
+ compatible = "microchip,mcp4726";
+ reg = <0x60>;
+ vdd-supply = <&vdac_vdd>;
+ vref-supply = <&vdac_vref>;
+ microchip,vref-buffered;
+ };
diff --git a/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
new file mode 100644
index 000000000000..b0d3b59966bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
@@ -0,0 +1,46 @@
+Invensense MPU-3050 Gyroscope device tree bindings
+
+Required properties:
+ - compatible : should be "invensense,mpu3050"
+ - reg : the I2C address of the sensor
+
+Optional properties:
+ - interrupt-parent : should be the phandle for the interrupt controller
+ - interrupts : interrupt mapping for the trigger interrupt from the
+ internal oscillator. The following IRQ modes are supported:
+ IRQ_TYPE_EDGE_RISING, IRQ_TYPE_EDGE_FALLING, IRQ_TYPE_LEVEL_HIGH and
+ IRQ_TYPE_LEVEL_LOW. The driver should detect and configure the hardware
+ for the desired interrupt type.
+ - vdd-supply : supply regulator for the main power voltage.
+ - vlogic-supply : supply regulator for the signal voltage.
+ - mount-matrix : see iio/mount-matrix.txt
+
+Optional subnodes:
+ - The MPU-3050 will pass through and forward the I2C signals from the
+ incoming I2C bus, alternatively drive traffic to a slave device (usually
+ an accelerometer) on its own initiative. Therefore is supports a subnode
+ i2c gate node. For details see: i2c/i2c-gate.txt
+
+Example:
+
+mpu3050@68 {
+ compatible = "invensense,mpu3050";
+ reg = <0x68>;
+ interrupt-parent = <&foo>;
+ interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
+ vdd-supply = <&bar>;
+ vlogic-supply = <&baz>;
+
+ /* External I2C interface */
+ i2c-gate {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fnord@18 {
+ compatible = "fnord";
+ reg = <0x18>;
+ interrupt-parent = <&foo>;
+ interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/iio/humidity/hts221.txt b/Documentation/devicetree/bindings/iio/humidity/hts221.txt
new file mode 100644
index 000000000000..b20ab9c12080
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/humidity/hts221.txt
@@ -0,0 +1,22 @@
+* HTS221 STM humidity + temperature sensor
+
+Required properties:
+- compatible: should be "st,hts221"
+- reg: i2c address of the sensor / spi cs line
+
+Optional properties:
+- interrupt-parent: should be the phandle for the interrupt controller
+- interrupts: interrupt mapping for IRQ. It should be configured with
+ flags IRQ_TYPE_LEVEL_HIGH or IRQ_TYPE_EDGE_RISING.
+
+ Refer to interrupt-controller/interrupts.txt for generic interrupt
+ client node bindings.
+
+Example:
+
+hts221@5f {
+ compatible = "st,hts221";
+ reg = <0x5f>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <0 IRQ_TYPE_EDGE_RISING>;
+};
diff --git a/Documentation/devicetree/bindings/iio/light/isl29018.txt b/Documentation/devicetree/bindings/iio/light/isl29018.txt
new file mode 100644
index 000000000000..425ab459e209
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/isl29018.txt
@@ -0,0 +1,28 @@
+* ISL 29018/29023/29035 I2C ALS, Proximity, and Infrared sensor
+
+Required properties:
+
+ - compatible: Should be one of
+ "isil,isl29018"
+ "isil,isl29023"
+ "isil,isl29035"
+ - reg: the I2C address of the device
+
+Optional properties:
+
+ - interrupt-parent: should be the phandle for the interrupt controller
+ - interrupts: the sole interrupt generated by the device
+
+ Refer to interrupt-controller/interrupts.txt for generic interrupt client
+ node bindings.
+
+ - vcc-supply: phandle to the regulator that provides power to the sensor.
+
+Example:
+
+isl29018@44 {
+ compatible = "isil,isl29018";
+ reg = <0x44>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(Z, 2) IRQ_TYPE_LEVEL_HIGH>;
+};
diff --git a/Documentation/devicetree/bindings/iio/light/tsl2583.txt b/Documentation/devicetree/bindings/iio/light/tsl2583.txt
new file mode 100644
index 000000000000..8e2066c83f70
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/tsl2583.txt
@@ -0,0 +1,26 @@
+* TAOS TSL 2580/2581/2583 ALS sensor
+
+Required properties:
+
+ - compatible: Should be one of
+ "amstaos,tsl2580"
+ "amstaos,tsl2581"
+ "amstaos,tsl2583"
+ - reg: the I2C address of the device
+
+Optional properties:
+
+ - interrupt-parent: should be the phandle for the interrupt controller
+ - interrupts: the sole interrupt generated by the device
+
+ Refer to interrupt-controller/interrupts.txt for generic interrupt client
+ node bindings.
+
+ - vcc-supply: phandle to the regulator that provides power to the sensor.
+
+Example:
+
+tsl2581@29 {
+ compatible = "amstaos,tsl2581";
+ reg = <0x29>;
+};
diff --git a/Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt b/Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt
new file mode 100644
index 000000000000..b9b621e94cd7
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt
@@ -0,0 +1,30 @@
+* Texas Instruments LMP91000 potentiostat
+
+http://www.ti.com/lit/ds/symlink/lmp91000.pdf
+
+Required properties:
+
+ - compatible: should be "ti,lmp91000"
+ - reg: the I2C address of the device
+ - io-channels: the phandle of the iio provider
+
+ - ti,external-tia-resistor: if the property ti,tia-gain-ohm is not defined this
+ needs to be set to signal that an external resistor value is being used.
+
+Optional properties:
+
+ - ti,tia-gain-ohm: ohm value of the internal resistor for the transimpedance
+ amplifier. Must be 2750, 3500, 7000, 14000, 35000, 120000, or 350000 ohms.
+
+ - ti,rload-ohm: ohm value of the internal resistor load applied to the gas
+ sensor. Must be 10, 33, 50, or 100 (default) ohms.
+
+Example:
+
+lmp91000@48 {
+ compatible = "ti,lmp91000";
+ reg = <0x48>;
+ ti,tia-gain-ohm = <7500>;
+ ti,rload = <100>;
+ io-channels = <&adc>;
+};
diff --git a/Documentation/devicetree/bindings/iio/st-sensors.txt b/Documentation/devicetree/bindings/iio/st-sensors.txt
index e41fe340162b..c040c9ad1889 100644
--- a/Documentation/devicetree/bindings/iio/st-sensors.txt
+++ b/Documentation/devicetree/bindings/iio/st-sensors.txt
@@ -42,6 +42,7 @@ Accelerometers:
- st,lsm303agr-accel
- st,lis2dh12-accel
- st,h3lis331dl-accel
+- st,lng2dm-accel
Gyroscopes:
- st,l3g4200d-gyro
diff --git a/Documentation/devicetree/bindings/nvmem/brcm,ocotp.txt b/Documentation/devicetree/bindings/nvmem/brcm,ocotp.txt
new file mode 100644
index 000000000000..6462e12d8de6
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/brcm,ocotp.txt
@@ -0,0 +1,17 @@
+Broadcom OTP memory controller
+
+Required Properties:
+- compatible: "brcm,ocotp" for the first generation Broadcom OTPC which is used
+ in Cygnus and supports 32 bit read/write. Use "brcm,ocotp-v2" for the second
+ generation Broadcom OTPC which is used in SoC's such as Stingray and supports
+ 64-bit read/write.
+- reg: Base address of the OTP controller.
+- brcm,ocotp-size: Amount of memory available, in 32 bit words
+
+Example:
+
+otp: otp@0301c800 {
+ compatible = "brcm,ocotp";
+ reg = <0x0301c800 0x2c>;
+ brcm,ocotp-size = <2048>;
+};
diff --git a/Documentation/devicetree/bindings/nvmem/lpc1850-otp.txt b/Documentation/devicetree/bindings/nvmem/lpc1850-otp.txt
new file mode 100644
index 000000000000..853b6a754644
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/lpc1850-otp.txt
@@ -0,0 +1,20 @@
+* NXP LPC18xx OTP memory
+
+Internal OTP (One Time Programmable) memory for NXP LPC18xx/43xx devices.
+
+Required properties:
+ - compatible: Should be "nxp,lpc1850-otp"
+ - reg: Must contain an entry with the physical base address and length
+ for each entry in reg-names.
+ - address-cells: must be set to 1.
+ - size-cells: must be set to 1.
+
+See nvmem.txt for more information.
+
+Example:
+ otp: otp@40045000 {
+ compatible = "nxp,lpc1850-otp";
+ reg = <0x40045000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/meson-usb2-phy.txt b/Documentation/devicetree/bindings/phy/meson8b-usb2-phy.txt
index 9da5ea234154..5fa73b9d20f5 100644
--- a/Documentation/devicetree/bindings/phy/meson-usb2-phy.txt
+++ b/Documentation/devicetree/bindings/phy/meson8b-usb2-phy.txt
@@ -1,4 +1,4 @@
-* Amlogic USB2 PHY
+* Amlogic Meson8b and GXBB USB2 PHY
Required properties:
- compatible: Depending on the platform this should be one of:
@@ -16,10 +16,10 @@ Optional properties:
Example:
-usb0_phy: usb_phy@0 {
+usb0_phy: usb-phy@c0000000 {
compatible = "amlogic,meson-gxbb-usb2-phy";
#phy-cells = <0>;
- reg = <0x0 0x0 0x0 0x20>;
+ reg = <0x0 0xc0000000 0x0 0x20>;
resets = <&reset RESET_USB_OTG>;
clocks = <&clkc CLKID_USB>, <&clkc CLKID_USB0>;
clock-names = "usb_general", "usb";
diff --git a/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt b/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
index bf2411f366e5..2a42a323fa1a 100644
--- a/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
+++ b/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
@@ -6,6 +6,7 @@ Main node required properties:
- compatible : value should be as follows:
(a) "hisilicon,hip05-sas-v1" for v1 hw in hip05 chipset
(b) "hisilicon,hip06-sas-v2" for v2 hw in hip06 chipset
+ (c) "hisilicon,hip07-sas-v2" for v2 hw in hip07 chipset
- sas-addr : array of 8 bytes for host SAS address
- reg : Address and length of the SAS register
- hisilicon,sas-syscon: phandle of syscon used for sas control
diff --git a/Documentation/devicetree/bindings/sound/axentia,tse850-pcm5142.txt b/Documentation/devicetree/bindings/sound/axentia,tse850-pcm5142.txt
new file mode 100644
index 000000000000..5b9b38f578bb
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/axentia,tse850-pcm5142.txt
@@ -0,0 +1,88 @@
+Devicetree bindings for the Axentia TSE-850 audio complex
+
+Required properties:
+ - compatible: "axentia,tse850-pcm5142"
+ - axentia,ssc-controller: The phandle of the atmel SSC controller used as
+ cpu dai.
+ - axentia,audio-codec: The phandle of the PCM5142 codec.
+ - axentia,add-gpios: gpio specifier that controls the mixer.
+ - axentia,loop1-gpios: gpio specifier that controls loop relays on channel 1.
+ - axentia,loop2-gpios: gpio specifier that controls loop relays on channel 2.
+ - axentia,ana-supply: Regulator that supplies the output amplifier. Must
+ support voltages in the 2V - 20V range, in 1V steps.
+
+The schematics explaining the gpios are as follows:
+
+ loop1 relays
+ IN1 +---o +------------+ o---+ OUT1
+ \ /
+ + +
+ | / |
+ +--o +--. |
+ | add | |
+ | V |
+ | .---. |
+ DAC +----------->|Sum|---+
+ | '---' |
+ | |
+ + +
+
+ IN2 +---o--+------------+--o---+ OUT2
+ loop2 relays
+
+The 'loop1' gpio pin controlls two relays, which are either in loop position,
+meaning that input and output are directly connected, or they are in mixer
+position, meaning that the signal is passed through the 'Sum' mixer. Similarly
+for 'loop2'.
+
+In the above, the 'loop1' relays are inactive, thus feeding IN1 to the mixer
+(if 'add' is active) and feeding the mixer output to OUT1. The 'loop2' relays
+are active, short-cutting the TSE-850 from channel 2. IN1, IN2, OUT1 and OUT2
+are TSE-850 connectors and DAC is the PCB name of the (filtered) output from
+the PCM5142 codec.
+
+Example:
+
+ &i2c {
+ codec: pcm5142@4c {
+ compatible = "ti,pcm5142";
+
+ reg = <0x4c>;
+
+ AVDD-supply = <&reg_3v3>;
+ DVDD-supply = <&reg_3v3>;
+ CPVDD-supply = <&reg_3v3>;
+
+ clocks = <&sck>;
+
+ pll-in = <3>;
+ pll-out = <6>;
+ };
+ };
+
+ ana: ana-reg {
+ compatible = "pwm-regulator";
+
+ regulator-name = "ANA";
+
+ pwms = <&pwm0 2 1000 PWM_POLARITY_INVERTED>;
+ pwm-dutycycle-unit = <1000>;
+ pwm-dutycycle-range = <100 1000>;
+
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <20000000>;
+ regulator-ramp-delay = <1000>;
+ };
+
+ sound {
+ compatible = "axentia,tse850-pcm5142";
+
+ axentia,ssc-controller = <&ssc0>;
+ axentia,audio-codec = <&codec>;
+
+ axentia,add-gpios = <&pioA 8 GPIO_ACTIVE_LOW>;
+ axentia,loop1-gpios = <&pioA 10 GPIO_ACTIVE_LOW>;
+ axentia,loop2-gpios = <&pioA 11 GPIO_ACTIVE_LOW>;
+
+ axentia,ana-supply = <&ana>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/cs35l34.txt b/Documentation/devicetree/bindings/sound/cs35l34.txt
new file mode 100644
index 000000000000..b218ead2e68e
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs35l34.txt
@@ -0,0 +1,64 @@
+CS35L34 Speaker Amplifier
+
+Required properties:
+
+ - compatible : "cirrus,cs35l34"
+
+ - reg : the I2C address of the device for I2C.
+
+ - VA-supply, VP-supply : power supplies for the device,
+ as covered in
+ Documentation/devicetree/bindings/regulator/regulator.txt.
+
+ - cirrus,boost-vtge-millivolt : Boost Voltage Value. Configures the boost
+ converter's output voltage in mV. The range is from VP to 8V with
+ increments of 100mV.
+
+ - cirrus,boost-nanohenry: Inductor value for boost converter. The value is
+ in nH and they can be values of 1000nH, 1100nH, 1200nH, 1500nH, and 2200nH.
+
+Optional properties:
+
+ - reset-gpios: GPIO used to reset the amplifier.
+
+ - interrupt-parent : Specifies the phandle of the interrupt controller to
+ which the IRQs from CS35L34 are delivered to.
+ - interrupts : IRQ line info CS35L34.
+ (See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+ for further information relating to interrupt properties)
+
+ - cirrus,boost-peak-milliamp : Boost converter peak current limit in mA. The
+ range starts at 1200mA and goes to a maximum of 3840mA with increments of
+ 80mA. The default value is 2480mA.
+
+ - cirrus,i2s-sdinloc : ADSP SDIN I2S channel location. Indicates whether the
+ received mono data is in the left or right portion of the I2S frame
+ according to the AD0 pin or directly via this configuration.
+ 0x0 (Default) = Selected by AD0 input (if AD0 = LOW, use left channel),
+ 0x2 = Left,
+ 0x1 = Selected by the inversion of the AD0 input (if AD0 = LOW, use right
+ channel),
+ 0x3 = Right.
+
+ - cirrus,gain-zc-disable: Boolean property. If set, the gain change will take
+ effect without waiting for a zero cross.
+
+ - cirrus,tdm-rising-edge: Boolean property. If set, data is on the rising edge of
+ SCLK. Otherwise, data is on the falling edge of SCLK.
+
+
+Example:
+
+cs35l34: cs35l34@40 {
+ compatible = "cirrus,cs35l34";
+ reg = <0x40>;
+
+ interrupt-parent = <&gpio8>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+ reset-gpios = <&gpio 10 0>;
+
+ cirrus,boost-vtge-milltvolt = <8000>; /* 8V */
+ cirrus,boost-ind-nanohenry = <1000>; /* 1uH */
+ cirrus,boost-peak-milliamp = <3000>; /* 3A */
+};
diff --git a/Documentation/devicetree/bindings/sound/cs42l42.txt b/Documentation/devicetree/bindings/sound/cs42l42.txt
new file mode 100644
index 000000000000..9a2c5e2423d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs42l42.txt
@@ -0,0 +1,110 @@
+CS42L42 audio CODEC
+
+Required properties:
+
+ - compatible : "cirrus,cs42l42"
+
+ - reg : the I2C address of the device for I2C.
+
+ - VP-supply, VCP-supply, VD_FILT-supply, VL-supply, VA-supply :
+ power supplies for the device, as covered in
+ Documentation/devicetree/bindings/regulator/regulator.txt.
+
+Optional properties:
+
+ - reset-gpios : a GPIO spec for the reset pin. If specified, it will be
+ deasserted before communication to the codec starts.
+
+ - interrupt-parent : Specifies the phandle of the interrupt controller to
+ which the IRQs from CS42L42 are delivered to.
+
+ - interrupts : IRQ line info CS42L42.
+ (See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+ for further information relating to interrupt properties)
+
+ - cirrus,ts-inv : Boolean property. For jacks that invert the tip sense
+ polarity. Normal jacks will short tip sense pin to HS1 when headphones are
+ plugged in and leave tip sense floating when not plugged in. Inverting jacks
+ short tip sense when unplugged and float when plugged in.
+
+ 0 = (Default) Non-inverted
+ 1 = Inverted
+
+ - cirrus,ts-dbnc-rise : Debounce the rising edge of TIP_SENSE_PLUG. With no
+ debounce, the tip sense pin might be noisy on a plug event.
+
+ 0 - 0ms,
+ 1 - 125ms,
+ 2 - 250ms,
+ 3 - 500ms,
+ 4 - 750ms,
+ 5 - (Default) 1s,
+ 6 - 1.25s,
+ 7 - 1.5s,
+
+ - cirrus,ts-dbnc-fall : Debounce the falling edge of TIP_SENSE_UNPLUG.
+ With no debounce, the tip sense pin might be noisy on an unplug event.
+
+ 0 - 0ms,
+ 1 - 125ms,
+ 2 - 250ms,
+ 3 - 500ms,
+ 4 - 750ms,
+ 5 - (Default) 1s,
+ 6 - 1.25s,
+ 7 - 1.5s,
+
+ - cirrus,btn-det-init-dbnce : This sets how long the driver sleeps after
+ enabling button detection interrupts. After auto-detection and before
+ servicing button interrupts, the HS bias needs time to settle. If you
+ don't wait, there is possibility for erroneous button interrupt.
+
+ 0ms - 200ms,
+ Default = 100ms
+
+ - cirrus,btn-det-event-dbnce : This sets how long the driver delays after
+ receiving a button press interrupt. With level detect interrupts, you want
+ to wait a small amount of time to make sure the button press is making a
+ clean connection with the bias resistors.
+
+ 0ms - 20ms,
+ Default = 10ms
+
+ - cirrus,bias-lvls : For a level-detect headset button scheme, each button
+ will bias the mic pin to a certain voltage. To determine which button was
+ pressed, the driver will compare this biased voltage to sequential,
+ decreasing voltages and will stop when a comparator is tripped,
+ indicating a comparator voltage < bias voltage. This value represents a
+ percentage of the internally generated HS bias voltage. For different
+ hardware setups, a designer might want to tweak this. This is an array of
+ descending values for the comparator voltage.
+
+ Array of 4 values
+ Each 0-63
+ < x1 x2 x3 x4 >
+ Default = < 15 8 4 1>
+
+
+Example:
+
+cs42l42: cs42l42@48 {
+ compatible = "cirrus,cs42l42";
+ reg = <0x48>;
+ VA-supply = <&dummy_vreg>;
+ VP-supply = <&dummy_vreg>;
+ VCP-supply = <&dummy_vreg>;
+ VD_FILT-supply = <&dummy_vreg>;
+ VL-supply = <&dummy_vreg>;
+
+ reset-gpios = <&axi_gpio_0 1 0>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <55 8>
+
+ cirrus,ts-inv = <0x00>;
+ cirrus,ts-dbnc-rise = <0x05>;
+ cirrus,ts-dbnc-fall = <0x00>;
+ cirrus,btn-det-init-dbnce = <100>;
+ cirrus,btn-det-event-dbnce = <10>;
+ cirrus,bias-lvls = <0x0F 0x08 0x04 0x01>;
+ cirrus,hs-bias-ramp-rate = <0x02>;
+}; \ No newline at end of file
diff --git a/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt b/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt
index 55b53e1fd72c..e0b6165c9cfc 100644
--- a/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt
+++ b/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt
@@ -43,7 +43,7 @@ mcbsp0: mcbsp@1d10000 {
<0x00310000 0x1000>;
reg-names = "mpu", "dat";
interrupts = <97 98>;
- interrupts-names = "rx", "tx";
+ interrupt-names = "rx", "tx";
dmas = <&edma0 3 1
&edma0 2 1>;
dma-names = "tx", "rx";
diff --git a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
index d9d8635ff94c..6a4aadc4ce06 100644
--- a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
@@ -44,8 +44,7 @@ Required dai-link subnodes:
Required CPU/CODEC subnodes properties:
-link-name : Name of the dai link.
--sound-dai : phandle and port of CPU/CODEC
--capture-dai : phandle and port of CPU/CODEC
+-sound-dai : phandle/s and port of CPU/CODEC
Example:
@@ -73,7 +72,7 @@ sound: sound {
sound-dai = <&lpass MI2S_PRIMARY>;
};
codec {
- sound-dai = <&wcd_codec 0>;
+ sound-dai = <&lpass_codec 0>, <&wcd_codec 0>;
};
};
diff --git a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt
new file mode 100644
index 000000000000..ccb401cfef9d
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt
@@ -0,0 +1,85 @@
+msm8916 analog audio CODEC
+
+Bindings for codec Analog IP which is integrated in pmic pm8916,
+
+## Bindings for codec core on pmic:
+
+Required properties
+ - compatible = "qcom,pm8916-wcd-analog-codec";
+ - reg: represents the slave base address provided to the peripheral.
+ - interrupt-parent : The parent interrupt controller.
+ - interrupts: List of interrupts in given SPMI peripheral.
+ - interrupt-names: Names specified to above list of interrupts in same
+ order. List of supported interrupt names are:
+ "cdc_spk_cnp_int" - Speaker click and pop interrupt.
+ "cdc_spk_clip_int" - Speaker clip interrupt.
+ "cdc_spk_ocp_int" - Speaker over current protect interrupt.
+ "mbhc_ins_rem_det1" - jack insert removal detect interrupt 1.
+ "mbhc_but_rel_det" - button release interrupt.
+ "mbhc_but_press_det" - button press event
+ "mbhc_ins_rem_det" - jack insert removal detect interrupt.
+ "mbhc_switch_int" - multi button headset interrupt.
+ "cdc_ear_ocp_int" - Earphone over current protect interrupt.
+ "cdc_hphr_ocp_int" - Headphone R over current protect interrupt.
+ "cdc_hphl_ocp_det" - Headphone L over current protect interrupt.
+ "cdc_ear_cnp_int" - earphone cnp interrupt.
+ "cdc_hphr_cnp_int" - hphr click and pop interrupt.
+ "cdc_hphl_cnp_int" - hphl click and pop interrupt.
+
+ - clocks: Handle to mclk.
+ - clock-names: should be "mclk"
+ - vdd-cdc-io-supply: phandle to VDD_CDC_IO regulator DT node.
+ - vdd-cdc-tx-rx-cx-supply: phandle to VDD_CDC_TX/RX/CX regulator DT node.
+ - vdd-micbias-supply: phandle of VDD_MICBIAS supply's regulator DT node.
+
+Optional Properties:
+- qcom,micbias1-ext-cap: boolean, present if micbias1 has external capacitor
+ connected.
+- qcom,micbias2-ext-cap: boolean, present if micbias2 has external capacitor
+ connected.
+
+Example:
+
+spmi_bus {
+ ...
+ audio-codec@f000{
+ compatible = "qcom,pm8916-wcd-analog-codec";
+ reg = <0xf000 0x200>;
+ reg-names = "pmic-codec-core";
+ clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>;
+ clock-names = "mclk";
+ interrupt-parent = <&spmi_bus>;
+ interrupts = <0x1 0xf0 0x0 IRQ_TYPE_NONE>,
+ <0x1 0xf0 0x1 IRQ_TYPE_NONE>,
+ <0x1 0xf0 0x2 IRQ_TYPE_NONE>,
+ <0x1 0xf0 0x3 IRQ_TYPE_NONE>,
+ <0x1 0xf0 0x4 IRQ_TYPE_NONE>,
+ <0x1 0xf0 0x5 IRQ_TYPE_NONE>,
+ <0x1 0xf0 0x6 IRQ_TYPE_NONE>,
+ <0x1 0xf0 0x7 IRQ_TYPE_NONE>,
+ <0x1 0xf1 0x0 IRQ_TYPE_NONE>,
+ <0x1 0xf1 0x1 IRQ_TYPE_NONE>,
+ <0x1 0xf1 0x2 IRQ_TYPE_NONE>,
+ <0x1 0xf1 0x3 IRQ_TYPE_NONE>,
+ <0x1 0xf1 0x4 IRQ_TYPE_NONE>,
+ <0x1 0xf1 0x5 IRQ_TYPE_NONE>;
+ interrupt-names = "cdc_spk_cnp_int",
+ "cdc_spk_clip_int",
+ "cdc_spk_ocp_int",
+ "mbhc_ins_rem_det1",
+ "mbhc_but_rel_det",
+ "mbhc_but_press_det",
+ "mbhc_ins_rem_det",
+ "mbhc_switch_int",
+ "cdc_ear_ocp_int",
+ "cdc_hphr_ocp_int",
+ "cdc_hphl_ocp_det",
+ "cdc_ear_cnp_int",
+ "cdc_hphr_cnp_int",
+ "cdc_hphl_cnp_int";
+ VDD-CDC-IO-supply = <&pm8916_l5>;
+ VDD-CDC-TX-RX-CX-supply = <&pm8916_l5>;
+ VDD-MICBIAS-supply = <&pm8916_l13>;
+ #sound-dai-cells = <1>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-digital.txt b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-digital.txt
new file mode 100644
index 000000000000..1c8e4cb25176
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-digital.txt
@@ -0,0 +1,20 @@
+msm8916 digital audio CODEC
+
+## Bindings for codec core in lpass:
+
+Required properties
+ - compatible = "qcom,msm8916-wcd-digital-codec";
+ - reg: address space for lpass codec.
+ - clocks: Handle to mclk and ahbclk
+ - clock-names: should be "mclk", "ahbix-clk".
+
+Example:
+
+audio-codec@771c000{
+ compatible = "qcom,msm8916-wcd-digital-codec";
+ reg = <0x0771c000 0x400>;
+ clocks = <&gcc GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK>,
+ <&gcc GCC_CODEC_DIGCODEC_CLK>;
+ clock-names = "ahbix-clk", "mclk";
+ #sound-dai-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/sound/rt5514.txt b/Documentation/devicetree/bindings/sound/rt5514.txt
index 9cabfc18cb57..929ca6756b02 100644
--- a/Documentation/devicetree/bindings/sound/rt5514.txt
+++ b/Documentation/devicetree/bindings/sound/rt5514.txt
@@ -13,6 +13,9 @@ Optional properties:
- clocks: The phandle of the master clock to the CODEC
- clock-names: Should be "mclk"
+- realtek,dmic-init-delay-ms
+ Set the DMIC initial delay (ms) to wait it ready.
+
Pins on the device (for linking into audio routes) for RT5514:
* DMIC1L
diff --git a/Documentation/devicetree/bindings/sound/rt5663.txt b/Documentation/devicetree/bindings/sound/rt5663.txt
index 7d3c974c6e2e..70eaeaed2b18 100644
--- a/Documentation/devicetree/bindings/sound/rt5663.txt
+++ b/Documentation/devicetree/bindings/sound/rt5663.txt
@@ -1,10 +1,10 @@
-RT5663/RT5668 audio CODEC
+RT5663 audio CODEC
This device supports I2C only.
Required properties:
-- compatible : One of "realtek,rt5663" or "realtek,rt5668".
+- compatible : "realtek,rt5663".
- reg : The I2C address of the device.
@@ -12,7 +12,7 @@ Required properties:
Optional properties:
-Pins on the device (for linking into audio routes) for RT5663/RT5668:
+Pins on the device (for linking into audio routes) for RT5663:
* IN1P
* IN1N
diff --git a/Documentation/devicetree/bindings/sound/rt5665.txt b/Documentation/devicetree/bindings/sound/rt5665.txt
new file mode 100755
index 000000000000..419c89219681
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rt5665.txt
@@ -0,0 +1,68 @@
+RT5665/RT5666/RT5668 audio CODEC
+
+This device supports I2C only.
+
+Required properties:
+
+- compatible : One of "realtek,rt5665", "realtek,rt5666" or "realtek,rt5668".
+
+- reg : The I2C address of the device.
+
+- interrupts : The CODEC's interrupt output.
+
+Optional properties:
+
+- realtek,in1-differential
+- realtek,in2-differential
+- realtek,in3-differential
+- realtek,in4-differential
+ Boolean. Indicate MIC1/2/3/4 input are differential, rather than single-ended.
+
+- realtek,dmic1-data-pin
+ 0: dmic1 is not used
+ 1: using GPIO4 pin as dmic1 data pin
+ 2: using IN2N pin as dmic2 data pin
+
+- realtek,dmic2-data-pin
+ 0: dmic2 is not used
+ 1: using GPIO5 pin as dmic2 data pin
+ 2: using IN2P pin as dmic2 data pin
+
+- realtek,jd-src
+ 0: No JD is used
+ 1: using JD1 as JD source
+
+- realtek,ldo1-en-gpios : The GPIO that controls the CODEC's LDO1_EN pin.
+
+Pins on the device (for linking into audio routes) for RT5659/RT5658:
+
+ * DMIC L1
+ * DMIC R1
+ * DMIC L2
+ * DMIC R2
+ * IN1P
+ * IN1N
+ * IN2P
+ * IN2N
+ * IN3P
+ * IN3N
+ * IN4P
+ * IN4N
+ * HPOL
+ * HPOR
+ * LOUTL
+ * LOUTR
+ * MONOOUT
+ * PDML
+ * PDMR
+
+Example:
+
+rt5659 {
+ compatible = "realtek,rt5665";
+ reg = <0x1b>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(W, 3) GPIO_ACTIVE_HIGH>;
+ realtek,ldo1-en-gpios =
+ <&gpio TEGRA_GPIO(V, 3) GPIO_ACTIVE_HIGH>;
+};
diff --git a/Documentation/devicetree/bindings/sound/samsung,tm2-audio.txt b/Documentation/devicetree/bindings/sound/samsung,tm2-audio.txt
new file mode 100644
index 000000000000..94442e5673b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/samsung,tm2-audio.txt
@@ -0,0 +1,38 @@
+Samsung Exynos5433 TM2(E) audio complex with WM5110 codec
+
+Required properties:
+
+ - compatible : "samsung,tm2-audio"
+ - model : the user-visible name of this sound complex
+ - audio-codec : the phandle of the wm5110 audio codec node,
+ as described in ../mfd/arizona.txt
+ - i2s-controller : the phandle of the I2S controller
+ - audio-amplifier : the phandle of the MAX98504 amplifier
+ - samsung,audio-routing : a list of the connections between audio components;
+ each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's
+ source; valid names for sources and sinks are the
+ WM5110's and MAX98504's pins and the jacks on the
+ board: HP, SPK, Main Mic, Sub Mic, Third Mic,
+ Headset Mic
+ - mic-bias-gpios : GPIO pin that enables the Main Mic bias regulator
+
+
+Example:
+
+sound {
+ compatible = "samsung,tm2-audio";
+ audio-codec = <&wm5110>;
+ i2s-controller = <&i2s0>;
+ audio-amplifier = <&max98504>;
+ mic-bias-gpios = <&gpr3 2 0>;
+ model = "wm5110";
+ samsung,audio-routing =
+ "HP", "HPOUT1L",
+ "HP", "HPOUT1R",
+ "SPK", "SPKOUT",
+ "SPKOUT", "HPOUT2L",
+ "SPKOUT", "HPOUT2R",
+ "Main Mic", "MICBIAS2",
+ "IN1R", "Main Mic";
+};
diff --git a/Documentation/devicetree/bindings/sound/sun4i-codec.txt b/Documentation/devicetree/bindings/sound/sun4i-codec.txt
index 0dce690f78f5..3033bd8aab0f 100644
--- a/Documentation/devicetree/bindings/sound/sun4i-codec.txt
+++ b/Documentation/devicetree/bindings/sound/sun4i-codec.txt
@@ -1,8 +1,12 @@
* Allwinner A10 Codec
Required properties:
-- compatible: must be either "allwinner,sun4i-a10-codec" or
- "allwinner,sun7i-a20-codec"
+- compatible: must be one of the following compatibles:
+ - "allwinner,sun4i-a10-codec"
+ - "allwinner,sun6i-a31-codec"
+ - "allwinner,sun7i-a20-codec"
+ - "allwinner,sun8i-a23-codec"
+ - "allwinner,sun8i-h3-codec"
- reg: must contain the registers location and length
- interrupts: must contain the codec interrupt
- dmas: DMA channels for tx and rx dma. See the DMA client binding,
@@ -17,6 +21,43 @@ Required properties:
Optional properties:
- allwinner,pa-gpios: gpio to enable external amplifier
+Required properties for the following compatibles:
+ - "allwinner,sun6i-a31-codec"
+ - "allwinner,sun8i-a23-codec"
+ - "allwinner,sun8i-h3-codec"
+- resets: phandle to the reset control for this device
+- allwinner,audio-routing: A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's
+ source. Valid names include:
+
+ Audio pins on the SoC:
+ "HP"
+ "HPCOM"
+ "LINEIN"
+ "LINEOUT" (not on sun8i-a23)
+ "MIC1"
+ "MIC2"
+ "MIC3" (sun6i-a31 only)
+
+ Microphone biases from the SoC:
+ "HBIAS"
+ "MBIAS"
+
+ Board connectors:
+ "Headphone"
+ "Headset Mic"
+ "Line In"
+ "Line Out"
+ "Mic"
+ "Speaker"
+
+Required properties for the following compatibles:
+ - "allwinner,sun8i-a23-codec"
+ - "allwinner,sun8i-h3-codec"
+- allwinner,codec-analog-controls: A phandle to the codec analog controls
+ block in the PRCM.
+
Example:
codec: codec@01c22c00 {
#sound-dai-cells = <0>;
@@ -28,3 +69,23 @@ codec: codec@01c22c00 {
dmas = <&dma 0 19>, <&dma 0 19>;
dma-names = "rx", "tx";
};
+
+codec: codec@01c22c00 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,sun6i-a31-codec";
+ reg = <0x01c22c00 0x98>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_APB1_CODEC>, <&ccu CLK_CODEC>;
+ clock-names = "apb", "codec";
+ resets = <&ccu RST_APB1_CODEC>;
+ dmas = <&dma 15>, <&dma 15>;
+ dma-names = "rx", "tx";
+ allwinner,audio-routing =
+ "Headphone", "HP",
+ "Speaker", "LINEOUT",
+ "LINEIN", "Line In",
+ "MIC1", "MBIAS",
+ "MIC1", "Mic",
+ "MIC2", "HBIAS",
+ "MIC2", "Headset Mic";
+};
diff --git a/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt b/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt
new file mode 100644
index 000000000000..779b735781ba
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt
@@ -0,0 +1,16 @@
+* Allwinner Codec Analog Controls
+
+Required properties:
+- compatible: must be one of the following compatibles:
+ - "allwinner,sun8i-a23-codec-analog"
+ - "allwinner,sun8i-h3-codec-analog"
+
+Required properties if not a sub-node of the PRCM node:
+- reg: must contain the registers location and length
+
+Example:
+prcm: prcm@01f01400 {
+ codec_analog: codec-analog {
+ compatible = "allwinner,sun8i-a23-codec-analog";
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
index 9340d2ddcc54..6fbba562eaa7 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
@@ -12,6 +12,7 @@ Required properties:
"ti,tlv320aic3120" - TLV320AIC3120 (mono speaker amp, MiniDSP)
"ti,tlv320aic3111" - TLV320AIC3111 (stereo speaker amp, MiniDSP)
"ti,tlv320dac3100" - TLV320DAC3100 (no ADC, mono speaker amp, no MiniDSP)
+ "ti,tlv320dac3101" - TLV320DAC3101 (no ADC, stereo speaker amp, no MiniDSP)
- reg - <int> - I2C slave address
- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
diff --git a/Documentation/devicetree/bindings/sound/wm8580.txt b/Documentation/devicetree/bindings/sound/wm8580.txt
index 7d9821f348da..78fce9b14954 100644
--- a/Documentation/devicetree/bindings/sound/wm8580.txt
+++ b/Documentation/devicetree/bindings/sound/wm8580.txt
@@ -1,10 +1,10 @@
-WM8580 audio CODEC
+WM8580 and WM8581 audio CODEC
This device supports I2C only.
Required properties:
- - compatible : "wlf,wm8580"
+ - compatible : "wlf,wm8580", "wlf,wm8581"
- reg : the I2C address of the device.
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index aa005c1d10d9..da6614c63796 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -10,6 +10,7 @@ Required properties:
"renesas,msiof-r8a7792" (R-Car V2H)
"renesas,msiof-r8a7793" (R-Car M2-N)
"renesas,msiof-r8a7794" (R-Car E2)
+ "renesas,msiof-r8a7796" (R-Car M3-W)
"renesas,msiof-sh73a0" (SH-Mobile AG5)
- reg : A list of offsets and lengths of the register sets for
the device.
diff --git a/Documentation/devicetree/bindings/spi/spi-armada-3700.txt b/Documentation/devicetree/bindings/spi/spi-armada-3700.txt
new file mode 100644
index 000000000000..1564aa8c02cd
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-armada-3700.txt
@@ -0,0 +1,25 @@
+* Marvell Armada 3700 SPI Controller
+
+Required Properties:
+
+- compatible: should be "marvell,armada-3700-spi"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- interrupts: The interrupt number. The interrupt specifier format depends on
+ the interrupt controller and of its driver.
+- clocks: Must contain the clock source, usually from the North Bridge clocks.
+- num-cs: The number of chip selects that is supported by this SPI Controller
+- #address-cells: should be 1.
+- #size-cells: should be 0.
+
+Example:
+
+ spi0: spi@10600 {
+ compatible = "marvell,armada-3700-spi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10600 0x5d>;
+ clocks = <&nb_perih_clk 7>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ num-cs = <4>;
+ };
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
new file mode 100644
index 000000000000..225ace1d0c65
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
@@ -0,0 +1,19 @@
+* Freescale Low Power SPI (LPSPI) for i.MX
+
+Required properties:
+- compatible :
+ - "fsl,imx7ulp-spi" for LPSPI compatible with the one integrated on i.MX7ULP soc
+- reg : address and length of the lpspi master registers
+- interrupt-parent : core interrupt controller
+- interrupts : lpspi interrupt
+- clocks : lpspi clock specifier
+
+Examples:
+
+lpspi2: lpspi@40290000 {
+ compatible = "fsl,imx7ulp-spi";
+ reg = <0x40290000 0x10000>;
+ interrupt-parent = <&intc>;
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7ULP_CLK_LPSPI2>;
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-sun6i.txt b/Documentation/devicetree/bindings/spi/spi-sun6i.txt
index 21de73db6a05..2ec99b86b622 100644
--- a/Documentation/devicetree/bindings/spi/spi-sun6i.txt
+++ b/Documentation/devicetree/bindings/spi/spi-sun6i.txt
@@ -1,7 +1,7 @@
-Allwinner A31 SPI controller
+Allwinner A31/H3 SPI controller
Required properties:
-- compatible: Should be "allwinner,sun6i-a31-spi".
+- compatible: Should be "allwinner,sun6i-a31-spi" or "allwinner,sun8i-h3-spi".
- reg: Should contain register location and length.
- interrupts: Should contain interrupt.
- clocks: phandle to the clocks feeding the SPI controller. Two are
@@ -12,6 +12,11 @@ Required properties:
- resets: phandle to the reset controller asserting this device in
reset
+Optional properties:
+- dmas: DMA specifiers for rx and tx dma. See the DMA client binding,
+ Documentation/devicetree/bindings/dma/dma.txt
+- dma-names: DMA request names should include "rx" and "tx" if present.
+
Example:
spi1: spi@01c69000 {
@@ -22,3 +27,19 @@ spi1: spi@01c69000 {
clock-names = "ahb", "mod";
resets = <&ahb1_rst 21>;
};
+
+spi0: spi@01c68000 {
+ compatible = "allwinner,sun8i-h3-spi";
+ reg = <0x01c68000 0x1000>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_SPI0>, <&ccu CLK_SPI0>;
+ clock-names = "ahb", "mod";
+ dmas = <&dma 23>, <&dma 23>;
+ dma-names = "rx", "tx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0_pins>;
+ resets = <&ccu RST_BUS_SPI0>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+};
diff --git a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
index 070baf4d7d97..b6b5130e5f65 100644
--- a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
+++ b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
@@ -7,8 +7,11 @@ To bind UFS PHY with UFS host controller, the controller node should
contain a phandle reference to UFS PHY node.
Required properties:
-- compatible : compatible list, contains "qcom,ufs-phy-qmp-20nm"
- or "qcom,ufs-phy-qmp-14nm" according to the relevant phy in use.
+- compatible : compatible list, contains one of the following -
+ "qcom,ufs-phy-qmp-20nm" for 20nm ufs phy,
+ "qcom,ufs-phy-qmp-14nm" for legacy 14nm ufs phy,
+ "qcom,msm8996-ufs-phy-qmp-14nm" for 14nm ufs phy
+ present on MSM8996 chipset.
- reg : should contain PHY register address space (mandatory),
- reg-names : indicates various resources passed to driver (via reg proptery) by name.
Required "reg-names" is "phy_mem".
diff --git a/Documentation/devicetree/bindings/usb/da8xx-usb.txt b/Documentation/devicetree/bindings/usb/da8xx-usb.txt
new file mode 100644
index 000000000000..ccb844aba7d4
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/da8xx-usb.txt
@@ -0,0 +1,43 @@
+TI DA8xx MUSB
+~~~~~~~~~~~~~
+For DA8xx/OMAP-L1x/AM17xx/AM18xx platforms.
+
+Required properties:
+~~~~~~~~~~~~~~~~~~~~
+ - compatible : Should be set to "ti,da830-musb".
+
+ - reg: Offset and length of the USB controller register set.
+
+ - interrupts: The USB interrupt number.
+
+ - interrupt-names: Should be set to "mc".
+
+ - dr_mode: The USB operation mode. Should be one of "host", "peripheral" or "otg".
+
+ - phys: Phandle for the PHY device
+
+ - phy-names: Should be "usb-phy"
+
+Optional properties:
+~~~~~~~~~~~~~~~~~~~~
+ - vbus-supply: Phandle to a regulator providing the USB bus power.
+
+Example:
+ usb_phy: usb-phy {
+ compatible = "ti,da830-usb-phy";
+ #phy-cells = <0>;
+ status = "okay";
+ };
+ usb0: usb@200000 {
+ compatible = "ti,da830-musb";
+ reg = <0x00200000 0x10000>;
+ interrupts = <58>;
+ interrupt-names = "mc";
+
+ dr_mode = "host";
+ vbus-supply = <&usb_vbus>;
+ phys = <&usb_phy 0>;
+ phy-names = "usb-phy";
+
+ status = "okay";
+ };
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index 2c30a5479069..6c7c2bce6d0c 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -12,6 +12,7 @@ Required properties:
- "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs;
- "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs;
- "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs;
+ - "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs;
- snps,dwc2: A generic DWC2 USB controller with default parameters.
- reg : Should contain 1 register range (address and length)
- interrupts : Should contain 1 interrupt
@@ -25,11 +26,13 @@ Optional properties:
Refer to phy/phy-bindings.txt for generic phy consumer properties
- dr_mode: shall be one of "host", "peripheral" and "otg"
Refer to usb/generic.txt
-- g-use-dma: enable dma usage in gadget driver.
- g-rx-fifo-size: size of rx fifo size in gadget mode.
- g-np-tx-fifo-size: size of non-periodic tx fifo size in gadget mode.
- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0) in gadget mode.
+Deprecated properties:
+- g-use-dma: gadget DMA mode is automatically detected
+
Example:
usb@101c0000 {
diff --git a/Documentation/devicetree/bindings/usb/mt8173-mtu3.txt b/Documentation/devicetree/bindings/usb/mt8173-mtu3.txt
new file mode 100644
index 000000000000..e049d199bf0d
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/mt8173-mtu3.txt
@@ -0,0 +1,87 @@
+The device node for Mediatek USB3.0 DRD controller
+
+Required properties:
+ - compatible : should be "mediatek,mt8173-mtu3"
+ - reg : specifies physical base address and size of the registers
+ - reg-names: should be "mac" for device IP and "ippc" for IP port control
+ - interrupts : interrupt used by the device IP
+ - power-domains : a phandle to USB power domain node to control USB's
+ mtcmos
+ - vusb33-supply : regulator of USB avdd3.3v
+ - clocks : a list of phandle + clock-specifier pairs, one for each
+ entry in clock-names
+ - clock-names : must contain "sys_ck" for clock of controller;
+ "wakeup_deb_p0" and "wakeup_deb_p1" are optional, they are
+ depends on "mediatek,enable-wakeup"
+ - phys : a list of phandle + phy specifier pairs
+ - dr_mode : should be one of "host", "peripheral" or "otg",
+ refer to usb/generic.txt
+
+Optional properties:
+ - #address-cells, #size-cells : should be '2' if the device has sub-nodes
+ with 'reg' property
+ - ranges : allows valid 1:1 translation between child's address space and
+ parent's address space
+ - extcon : external connector for vbus and idpin changes detection, needed
+ when supports dual-role mode.
+ - vbus-supply : reference to the VBUS regulator, needed when supports
+ dual-role mode.
+ - pinctl-names : a pinctrl state named "default" must be defined,
+ "id_float" and "id_ground" are optinal which depends on
+ "mediatek,enable-manual-drd"
+ - pinctrl-0 : pin control group
+ See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt
+
+ - maximum-speed : valid arguments are "super-speed", "high-speed" and
+ "full-speed"; refer to usb/generic.txt
+ - enable-manual-drd : supports manual dual-role switch via debugfs; usually
+ used when receptacle is TYPE-A and also wants to support dual-role
+ mode.
+ - mediatek,enable-wakeup : supports ip sleep wakeup used by host mode
+ - mediatek,syscon-wakeup : phandle to syscon used to access USB wakeup
+ control register, it depends on "mediatek,enable-wakeup".
+
+Sub-nodes:
+The xhci should be added as subnode to mtu3 as shown in the following example
+if host mode is enabled. The DT binding details of xhci can be found in:
+Documentation/devicetree/bindings/usb/mt8173-xhci.txt
+
+Example:
+ssusb: usb@11271000 {
+ compatible = "mediatek,mt8173-mtu3";
+ reg = <0 0x11271000 0 0x3000>,
+ <0 0x11280700 0 0x0100>;
+ reg-names = "mac", "ippc";
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_LOW>;
+ phys = <&phy_port0 PHY_TYPE_USB3>,
+ <&phy_port1 PHY_TYPE_USB2>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
+ clocks = <&topckgen CLK_TOP_USB30_SEL>,
+ <&pericfg CLK_PERI_USB0>,
+ <&pericfg CLK_PERI_USB1>;
+ clock-names = "sys_ck",
+ "wakeup_deb_p0",
+ "wakeup_deb_p1";
+ vusb33-supply = <&mt6397_vusb_reg>;
+ vbus-supply = <&usb_p0_vbus>;
+ extcon = <&extcon_usb>;
+ dr_mode = "otg";
+ mediatek,enable-wakeup;
+ mediatek,syscon-wakeup = <&pericfg>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "disabled";
+
+ usb_host: xhci@11270000 {
+ compatible = "mediatek,mt8173-xhci";
+ reg = <0 0x11270000 0 0x1000>;
+ reg-names = "mac";
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
+ clocks = <&topckgen CLK_TOP_USB30_SEL>;
+ clock-names = "sys_ck";
+ vusb33-supply = <&mt6397_vusb_reg>;
+ status = "disabled";
+ };
+};
diff --git a/Documentation/devicetree/bindings/usb/mt8173-xhci.txt b/Documentation/devicetree/bindings/usb/mt8173-xhci.txt
index b3a7ffa48852..2a930bd52b94 100644
--- a/Documentation/devicetree/bindings/usb/mt8173-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/mt8173-xhci.txt
@@ -2,10 +2,18 @@ MT8173 xHCI
The device node for Mediatek SOC USB3.0 host controller
+There are two scenarios: the first one only supports xHCI driver;
+the second one supports dual-role mode, and the host is based on xHCI
+driver. Take account of backward compatibility, we divide bindings
+into two parts.
+
+1st: only supports xHCI driver
+------------------------------------------------------------------------
+
Required properties:
- compatible : should contain "mediatek,mt8173-xhci"
- - reg : specifies physical base address and size of the registers,
- the first one for MAC, the second for IPPC
+ - reg : specifies physical base address and size of the registers
+ - reg-names: should be "mac" for xHCI MAC and "ippc" for IP port control
- interrupts : interrupt used by the controller
- power-domains : a phandle to USB power domain node to control USB's
mtcmos
@@ -27,12 +35,16 @@ Optional properties:
control register, it depends on "mediatek,wakeup-src".
- vbus-supply : reference to the VBUS regulator;
- usb3-lpm-capable : supports USB3.0 LPM
+ - pinctrl-names : a pinctrl state named "default" must be defined
+ - pinctrl-0 : pin control group
+ See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt
Example:
usb30: usb@11270000 {
compatible = "mediatek,mt8173-xhci";
reg = <0 0x11270000 0 0x1000>,
<0 0x11280700 0 0x0100>;
+ reg-names = "mac", "ippc";
interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
clocks = <&topckgen CLK_TOP_USB30_SEL>,
@@ -49,3 +61,41 @@ usb30: usb@11270000 {
mediatek,syscon-wakeup = <&pericfg>;
mediatek,wakeup-src = <1>;
};
+
+2nd: dual-role mode with xHCI driver
+------------------------------------------------------------------------
+
+In the case, xhci is added as subnode to mtu3. An example and the DT binding
+details of mtu3 can be found in:
+Documentation/devicetree/bindings/usb/mtu3.txt
+
+Required properties:
+ - compatible : should contain "mediatek,mt8173-xhci"
+ - reg : specifies physical base address and size of the registers
+ - reg-names: should be "mac" for xHCI MAC
+ - interrupts : interrupt used by the host controller
+ - power-domains : a phandle to USB power domain node to control USB's
+ mtcmos
+ - vusb33-supply : regulator of USB avdd3.3v
+
+ - clocks : a list of phandle + clock-specifier pairs, one for each
+ entry in clock-names
+ - clock-names : must be
+ "sys_ck": for clock of xHCI MAC
+
+Optional properties:
+ - vbus-supply : reference to the VBUS regulator;
+ - usb3-lpm-capable : supports USB3.0 LPM
+
+Example:
+usb30: usb@11270000 {
+ compatible = "mediatek,mt8173-xhci";
+ reg = <0 0x11270000 0 0x1000>;
+ reg-names = "mac";
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
+ clocks = <&topckgen CLK_TOP_USB30_SEL>;
+ clock-names = "sys_ck";
+ vusb33-supply = <&mt6397_vusb_reg>;
+ usb3-lpm-capable;
+};
diff --git a/Documentation/devicetree/bindings/usb/ohci-da8xx.txt b/Documentation/devicetree/bindings/usb/ohci-da8xx.txt
new file mode 100644
index 000000000000..2dc8f67eda39
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ohci-da8xx.txt
@@ -0,0 +1,23 @@
+DA8XX USB OHCI controller
+
+Required properties:
+
+ - compatible: Should be "ti,da830-ohci"
+ - reg: Should contain one register range i.e. start and length
+ - interrupts: Description of the interrupt line
+ - phys: Phandle for the PHY device
+ - phy-names: Should be "usb-phy"
+
+Optional properties:
+ - vbus-supply: phandle of regulator that controls vbus power / over-current
+
+Example:
+
+ohci: usb@0225000 {
+ compatible = "ti,da830-ohci";
+ reg = <0x225000 0x1000>;
+ interrupts = <59>;
+ phys = <&usb_phy 1>;
+ phy-names = "usb-phy";
+ vbus-supply = <&reg_usb_ohci>;
+};
diff --git a/Documentation/devicetree/bindings/usb/s3c2410-usb.txt b/Documentation/devicetree/bindings/usb/s3c2410-usb.txt
new file mode 100644
index 000000000000..e45b38ce2986
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/s3c2410-usb.txt
@@ -0,0 +1,22 @@
+Samsung S3C2410 and compatible SoC USB controller
+
+OHCI
+
+Required properties:
+ - compatible: should be "samsung,s3c2410-ohci" for USB host controller
+ - reg: address and lenght of the controller memory mapped region
+ - interrupts: interrupt number for the USB OHCI controller
+ - clocks: Should reference the bus and host clocks
+ - clock-names: Should contain two strings
+ "usb-bus-host" for the USB bus clock
+ "usb-host" for the USB host clock
+
+Example:
+
+usb0: ohci@49000000 {
+ compatible = "samsung,s3c2410-ohci";
+ reg = <0x49000000 0x100>;
+ interrupts = <0 0 26 3>;
+ clocks = <&clocks UCLK>, <&clocks HCLK_USBH>;
+ clock-names = "usb-bus-host", "usb-host";
+};
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
index 966885c636d0..0b7d8576001c 100644
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -11,6 +11,7 @@ Required properties:
- "renesas,xhci-r8a7791" for r8a7791 SoC
- "renesas,xhci-r8a7793" for r8a7793 SoC
- "renesas,xhci-r8a7795" for r8a7795 SoC
+ - "renesas,xhci-r8a7796" for r8a7796 SoC
- "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 compatible device
- "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device
- "xhci-platform" (deprecated)
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index d9c51d7f4aac..98371753a08f 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -39,6 +39,7 @@ auo AU Optronics Corporation
auvidea Auvidea GmbH
avago Avago Technologies
avic Shanghai AVIC Optoelectronics Co., Ltd.
+axentia Axentia Technologies AB
axis Axis Communications AB
boe BOE Technology Group Co., Ltd.
bosch Bosch Sensortec GmbH
@@ -126,6 +127,7 @@ hitex Hitex Development Tools
holt Holt Integrated Circuits, Inc.
honeywell Honeywell
hp Hewlett Packard
+holtek Holtek Semiconductor, Inc.
i2se I2SE GmbH
ibm International Business Machines (IBM)
idt Integrated Device Technologies, Inc.
@@ -160,16 +162,19 @@ lltc Linear Technology Corporation
lsi LSI Corp. (LSI Logic)
marvell Marvell Technology Group Ltd.
maxim Maxim Integrated Products
+mcube mCube
meas Measurement Specialties
mediatek MediaTek Inc.
melexis Melexis N.V.
melfas MELFAS Inc.
+memsic MEMSIC Inc.
merrii Merrii Technology Co., Ltd.
micrel Micrel Inc.
microchip Microchip Technology Inc.
microcrystal Micro Crystal AG
micron Micron Technology Inc.
minix MINIX Technology Ltd.
+miramems MiraMEMS Sensing Technology Co., Ltd.
mitsubishi Mitsubishi Electric Corporation
mosaixtech Mosaix Technologies, Inc.
moxa Moxa
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 5385cba941d2..a23edccd2059 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -139,7 +139,6 @@ hpet_example
hugepage-mmap
hugepage-shm
ihex2fw
-ikconfig.h*
inat-tables.c
initramfs_list
int16.c
diff --git a/Documentation/filesystems/configfs/configfs.txt b/Documentation/filesystems/configfs/configfs.txt
index 8ec9136aae56..3828e85345ae 100644
--- a/Documentation/filesystems/configfs/configfs.txt
+++ b/Documentation/filesystems/configfs/configfs.txt
@@ -174,7 +174,7 @@ among other things. For that, it needs a type.
void (*release)(struct config_item *);
int (*allow_link)(struct config_item *src,
struct config_item *target);
- int (*drop_link)(struct config_item *src,
+ void (*drop_link)(struct config_item *src,
struct config_item *target);
};
diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt
index 23d18b8a49d5..a7e6e14aeb08 100644
--- a/Documentation/filesystems/dax.txt
+++ b/Documentation/filesystems/dax.txt
@@ -58,22 +58,22 @@ Implementation Tips for Filesystem Writers
Filesystem support consists of
- adding support to mark inodes as being DAX by setting the S_DAX flag in
i_flags
-- implementing the direct_IO address space operation, and calling
- dax_do_io() instead of blockdev_direct_IO() if S_DAX is set
+- implementing ->read_iter and ->write_iter operations which use dax_iomap_rw()
+ when inode has S_DAX flag set
- implementing an mmap file operation for DAX files which sets the
VM_MIXEDMAP and VM_HUGEPAGE flags on the VMA, and setting the vm_ops to
- include handlers for fault, pmd_fault and page_mkwrite (which should
- probably call dax_fault(), dax_pmd_fault() and dax_mkwrite(), passing the
- appropriate get_block() callback)
-- calling dax_truncate_page() instead of block_truncate_page() for DAX files
-- calling dax_zero_page_range() instead of zero_user() for DAX files
+ include handlers for fault, pmd_fault, page_mkwrite, pfn_mkwrite. These
+ handlers should probably call dax_iomap_fault() (for fault and page_mkwrite
+ handlers), dax_iomap_pmd_fault(), dax_pfn_mkwrite() passing the appropriate
+ iomap operations.
+- calling iomap_zero_range() passing appropriate iomap operations instead of
+ block_truncate_page() for DAX files
- ensuring that there is sufficient locking between reads, writes,
truncates and page faults
-The get_block() callback passed to the DAX functions may return
-uninitialised extents. If it does, it must ensure that simultaneous
-calls to get_block() (for example by a page-fault racing with a read()
-or a write()) work correctly.
+The iomap handlers for allocating blocks must make sure that allocated blocks
+are zeroed out and converted to written extents before being returned to avoid
+exposure of uninitialized data through mmap.
These filesystems may be used for inspiration:
- ext2: see Documentation/filesystems/ext2.txt
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index 6c0108eb0137..3698ed3146e3 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -351,14 +351,13 @@ nouid32 Disables 32-bit UIDs and GIDs. This is for
interoperability with older kernels which only
store and expect 16-bit values.
-block_validity This options allows to enables/disables the in-kernel
+block_validity(*) These options enable or disable the in-kernel
noblock_validity facility for tracking filesystem metadata blocks
- within internal data structures. This allows multi-
- block allocator and other routines to quickly locate
- extents which might overlap with filesystem metadata
- blocks. This option is intended for debugging
- purposes and since it negatively affects the
- performance, it is off by default.
+ within internal data structures. This allows multi-
+ block allocator and other routines to notice
+ bugs or corrupted allocation bitmaps which cause
+ blocks to be allocated which overlap with
+ filesystem metadata blocks.
dioread_lock Controls whether or not ext4 should use the DIO read
dioread_nolock locking. If the dioread_nolock option is specified
diff --git a/Documentation/fpga/fpga-mgr.txt b/Documentation/fpga/fpga-mgr.txt
index ce3e84fa9023..86ee5078fd03 100644
--- a/Documentation/fpga/fpga-mgr.txt
+++ b/Documentation/fpga/fpga-mgr.txt
@@ -18,31 +18,37 @@ API Functions:
To program the FPGA from a file or from a buffer:
-------------------------------------------------
- int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags,
+ int fpga_mgr_buf_load(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
const char *buf, size_t count);
Load the FPGA from an image which exists as a buffer in memory.
- int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
+ int fpga_mgr_firmware_load(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
const char *image_name);
Load the FPGA from an image which exists as a file. The image file must be on
-the firmware search path (see the firmware class documentation).
-
-For both these functions, flags == 0 for normal full reconfiguration or
-FPGA_MGR_PARTIAL_RECONFIG for partial reconfiguration. If successful, the FPGA
-ends up in operating mode. Return 0 on success or a negative error code.
+the firmware search path (see the firmware class documentation). If successful,
+the FPGA ends up in operating mode. Return 0 on success or a negative error
+code.
+A FPGA design contained in a FPGA image file will likely have particulars that
+affect how the image is programmed to the FPGA. These are contained in struct
+fpga_image_info. Currently the only such particular is a single flag bit
+indicating whether the image is for full or partial reconfiguration.
To get/put a reference to a FPGA manager:
-----------------------------------------
struct fpga_manager *of_fpga_mgr_get(struct device_node *node);
+ struct fpga_manager *fpga_mgr_get(struct device *dev);
+
+Given a DT node or device, get an exclusive reference to a FPGA manager.
void fpga_mgr_put(struct fpga_manager *mgr);
-Given a DT node, get an exclusive reference to a FPGA manager or release
-the reference.
+Release the reference.
To register or unregister the low level FPGA-specific driver:
@@ -70,8 +76,11 @@ struct device_node *mgr_node = ...
char *buf = ...
int count = ...
+/* struct with information about the FPGA image to program. */
+struct fpga_image_info info;
+
/* flags indicates whether to do full or partial reconfiguration */
-int flags = 0;
+info.flags = 0;
int ret;
@@ -79,7 +88,7 @@ int ret;
struct fpga_manager *mgr = of_fpga_mgr_get(mgr_node);
/* Load the buffer to the FPGA */
-ret = fpga_mgr_buf_load(mgr, flags, buf, count);
+ret = fpga_mgr_buf_load(mgr, &info, buf, count);
/* Release the FPGA manager */
fpga_mgr_put(mgr);
@@ -96,8 +105,11 @@ struct device_node *mgr_node = ...
/* FPGA image is in this file which is in the firmware search path */
const char *path = "fpga-image-9.rbf"
+/* struct with information about the FPGA image to program. */
+struct fpga_image_info info;
+
/* flags indicates whether to do full or partial reconfiguration */
-int flags = 0;
+info.flags = 0;
int ret;
@@ -105,7 +117,7 @@ int ret;
struct fpga_manager *mgr = of_fpga_mgr_get(mgr_node);
/* Get the firmware image (path) and load it to the FPGA */
-ret = fpga_mgr_firmware_load(mgr, flags, path);
+ret = fpga_mgr_firmware_load(mgr, &info, path);
/* Release the FPGA manager */
fpga_mgr_put(mgr);
@@ -157,7 +169,10 @@ The programming sequence is:
2. .write (may be called once or multiple times)
3. .write_complete
-The .write_init function will prepare the FPGA to receive the image data.
+The .write_init function will prepare the FPGA to receive the image data. The
+buffer passed into .write_init will be atmost .initial_header_size bytes long,
+if the whole bitstream is not immediately available then the core code will
+buffer up at least this much before starting.
The .write function writes a buffer to the FPGA. The buffer may be contain the
whole FPGA image or may be a smaller chunk of an FPGA image. In the latter
diff --git a/Documentation/hwmon/hwmon-kernel-api.txt b/Documentation/hwmon/hwmon-kernel-api.txt
index ef9d74947f5c..2505ae67e2b6 100644
--- a/Documentation/hwmon/hwmon-kernel-api.txt
+++ b/Documentation/hwmon/hwmon-kernel-api.txt
@@ -23,7 +23,6 @@ Each hardware monitoring driver must #include <linux/hwmon.h> and, in most
cases, <linux/hwmon-sysfs.h>. linux/hwmon.h declares the following
register/unregister functions:
-struct device *hwmon_device_register(struct device *dev);
struct device *
hwmon_device_register_with_groups(struct device *dev, const char *name,
void *drvdata,
@@ -38,36 +37,31 @@ struct device *
hwmon_device_register_with_info(struct device *dev,
const char *name, void *drvdata,
const struct hwmon_chip_info *info,
- const struct attribute_group **groups);
+ const struct attribute_group **extra_groups);
struct device *
devm_hwmon_device_register_with_info(struct device *dev,
- const char *name,
- void *drvdata,
- const struct hwmon_chip_info *info,
- const struct attribute_group **groups);
+ const char *name,
+ void *drvdata,
+ const struct hwmon_chip_info *info,
+ const struct attribute_group **extra_groups);
void hwmon_device_unregister(struct device *dev);
void devm_hwmon_device_unregister(struct device *dev);
-hwmon_device_register registers a hardware monitoring device. The parameter
-of this function is a pointer to the parent device.
-This function returns a pointer to the newly created hardware monitoring device
-or PTR_ERR for failure. If this registration function is used, hardware
-monitoring sysfs attributes are expected to have been created and attached to
-the parent device prior to calling hwmon_device_register. A name attribute must
-have been created by the caller.
-
-hwmon_device_register_with_groups is similar to hwmon_device_register. However,
-it has additional parameters. The name parameter is a pointer to the hwmon
-device name. The registration function wil create a name sysfs attribute
-pointing to this name. The drvdata parameter is the pointer to the local
-driver data. hwmon_device_register_with_groups will attach this pointer
-to the newly allocated hwmon device. The pointer can be retrieved by the driver
-using dev_get_drvdata() on the hwmon device pointer. The groups parameter is
+hwmon_device_register_with_groups registers a hardware monitoring device.
+The first parameter of this function is a pointer to the parent device.
+The name parameter is a pointer to the hwmon device name. The registration
+function wil create a name sysfs attribute pointing to this name.
+The drvdata parameter is the pointer to the local driver data.
+hwmon_device_register_with_groups will attach this pointer to the newly
+allocated hwmon device. The pointer can be retrieved by the driver using
+dev_get_drvdata() on the hwmon device pointer. The groups parameter is
a pointer to a list of sysfs attribute groups. The list must be NULL terminated.
hwmon_device_register_with_groups creates the hwmon device with name attribute
as well as all sysfs attributes attached to the hwmon device.
+This function returns a pointer to the newly created hardware monitoring device
+or PTR_ERR for failure.
devm_hwmon_device_register_with_groups is similar to
hwmon_device_register_with_groups. However, it is device managed, meaning the
@@ -87,13 +81,13 @@ hwmon_device_unregister deregisters a registered hardware monitoring device.
The parameter of this function is the pointer to the registered hardware
monitoring device structure. This function must be called from the driver
remove function if the hardware monitoring device was registered with
-hwmon_device_register, hwmon_device_register_with_groups, or
-hwmon_device_register_with_info.
+hwmon_device_register_with_groups or hwmon_device_register_with_info.
devm_hwmon_device_unregister does not normally have to be called. It is only
needed for error handling, and only needed if the driver probe fails after
-the call to devm_hwmon_device_register_with_groups and if the automatic
-(device managed) removal would be too late.
+the call to devm_hwmon_device_register_with_groups or
+hwmon_device_register_with_info and if the automatic (device managed)
+removal would be too late.
Using devm_hwmon_device_register_with_info()
--------------------------------------------
@@ -106,9 +100,9 @@ const char *name Device name
void *drvdata Driver private data
const struct hwmon_chip_info *info
Pointer to chip description.
-const struct attribute_group **groups
- Null-terminated list of additional sysfs attribute
- groups.
+const struct attribute_group **extra_groups
+ Null-terminated list of additional non-standard
+ sysfs attribute groups.
This function returns a pointer to the created hardware monitoring device
on success and a negative error code for failure.
@@ -160,7 +154,7 @@ It contains following fields:
* type: The hardware monitoring sensor type.
Supported sensor types are
* hwmon_chip A virtual sensor type, used to describe attributes
- which apply to the entire chip.
+ * which are not bound to a specific input or output
* hwmon_temp Temperature sensor
* hwmon_in Voltage sensor
* hwmon_curr Current sensor
@@ -293,9 +287,9 @@ Driver-provided sysfs attributes
If the hardware monitoring device is registered with
hwmon_device_register_with_info or devm_hwmon_device_register_with_info,
-it is most likely not necessary to provide sysfs attributes. Only non-standard
-sysfs attributes need to be provided when one of those registration functions
-is used.
+it is most likely not necessary to provide sysfs attributes. Only additional
+non-standard sysfs attributes need to be provided when one of those registration
+functions is used.
The header file linux/hwmon-sysfs.h provides a number of useful macros to
declare and use hardware monitoring sysfs attributes.
diff --git a/Documentation/hwmon/tc654 b/Documentation/hwmon/tc654
new file mode 100644
index 000000000000..91a2843f5f98
--- /dev/null
+++ b/Documentation/hwmon/tc654
@@ -0,0 +1,31 @@
+Kernel driver tc654
+===================
+
+Supported chips:
+ * Microship TC654 and TC655
+ Prefix: 'tc654'
+ Datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/20001734C.pdf
+
+Authors:
+ Chris Packham <chris.packham@alliedtelesis.co.nz>
+ Masahiko Iwamoto <iwamoto@allied-telesis.co.jp>
+
+Description
+-----------
+This driver implements support for the Microchip TC654 and TC655.
+
+The TC654 uses the 2-wire interface compatible with the SMBUS 2.0
+specification. The TC654 has two (2) inputs for measuring fan RPM and
+one (1) PWM output which can be used for fan control.
+
+Configuration Notes
+-------------------
+Ordinarily the pwm1_mode ABI is used for controlling the pwm output
+mode. However, for this chip the output is always pwm, and the
+pwm1_mode determines if the pwm output is controlled via the pwm1 value
+or via the Vin analog input.
+
+
+Setting pwm1_mode to 1 will cause the pwm output to be driven based on
+the pwm1 value. Setting pwm1_mode to 0 will cause the pwm output to be
+driven based on the Vin input.
diff --git a/Documentation/hwmon/tmp108 b/Documentation/hwmon/tmp108
new file mode 100644
index 000000000000..25802df23010
--- /dev/null
+++ b/Documentation/hwmon/tmp108
@@ -0,0 +1,36 @@
+Kernel driver tmp108
+====================
+
+Supported chips:
+ * Texas Instruments TMP108
+ Prefix: 'tmp108'
+ Addresses scanned: none
+ Datasheet: http://www.ti.com/product/tmp108
+
+Author:
+ John Muir <john@jmuir.com>
+
+Description
+-----------
+
+The Texas Instruments TMP108 implements one temperature sensor. An alert pin
+can be set when temperatures exceed minimum or maximum values plus or minus a
+hysteresis value. (This driver does not support interrupts for the alert pin,
+and the device runs in comparator mode.)
+
+The sensor is accurate to 0.75C over the range of -25 to +85 C, and to 1.0
+degree from -40 to +125 C. Resolution of the sensor is 0.0625 degree. The
+operating temperature has a minimum of -55 C and a maximum of +150 C.
+Hysteresis values can be set to 0, 1, 2, or 4C.
+
+The TMP108 has a programmable update rate that can select between 8, 4, 1, and
+0.5 Hz.
+
+By default the TMP108 reads the temperature continuously. To conserve power,
+the TMP108 has a one-shot mode where the device is normally shut-down. When a
+one shot is requested the temperature is read, the result can be retrieved,
+and then the device is shut down automatically. (This driver only supports
+continuous mode.)
+
+The driver provides the common sysfs-interface for temperatures (see
+Documentation/hwmon/sysfs-interface under Temperatures).
diff --git a/Documentation/livepatch/livepatch.txt b/Documentation/livepatch/livepatch.txt
index 6c43f6ebee8d..f5967316deb9 100644
--- a/Documentation/livepatch/livepatch.txt
+++ b/Documentation/livepatch/livepatch.txt
@@ -87,7 +87,7 @@ The theory about how to apply functions a safe way is rather complex.
The aim is to define a so-called consistency model. It attempts to define
conditions when the new implementation could be used so that the system
stays consistent. The theory is not yet finished. See the discussion at
-http://thread.gmane.org/gmane.linux.kernel/1823033/focus=1828189
+https://lkml.kernel.org/r/20141107140458.GA21774@suse.cz
The current consistency model is very simple. It guarantees that either
the old or the new function is called. But various functions get redirected
diff --git a/Documentation/trace/intel_th.txt b/Documentation/trace/intel_th.txt
index f7fc5ba5df8d..f92070e7dde0 100644
--- a/Documentation/trace/intel_th.txt
+++ b/Documentation/trace/intel_th.txt
@@ -97,3 +97,25 @@ $ echo 0 > /sys/bus/intel_th/devices/0-msc0/active
# and now you can collect the trace from the device node:
$ cat /dev/intel_th0/msc0 > my_stp_trace
+
+Host Debugger Mode
+==================
+
+It is possible to configure the Trace Hub and control its trace
+capture from a remote debug host, which should be connected via one of
+the hardware debugging interfaces, which will then be used to both
+control Intel Trace Hub and transfer its trace data to the debug host.
+
+The driver needs to be told that such an arrangement is taking place
+so that it does not touch any capture/port configuration and avoids
+conflicting with the debug host's configuration accesses. The only
+activity that the driver will perform in this mode is collecting
+software traces to the Software Trace Hub (an stm class device). The
+user is still responsible for setting up adequate master/channel
+mappings that the decoder on the receiving end would recognize.
+
+In order to enable the host mode, set the 'host_mode' parameter of the
+'intel_th' kernel module to 'y'. None of the virtual output devices
+will show up on the intel_th bus. Also, trace configuration and
+capture controlling attribute groups of the 'gth' device will not be
+exposed. The 'sth' device will operate as usual.
diff --git a/Documentation/trace/stm.txt b/Documentation/trace/stm.txt
index ea035f9dbfd7..11cff47eecce 100644
--- a/Documentation/trace/stm.txt
+++ b/Documentation/trace/stm.txt
@@ -69,12 +69,43 @@ stm device's channel mmio region is 64 bytes and hardware page size is
width==64, you should be able to mmap() one page on this file
descriptor and obtain direct access to an mmio region for 64 channels.
+Examples of STM devices are Intel(R) Trace Hub [1] and Coresight STM
+[2].
+
+stm_source
+==========
+
For kernel-based trace sources, there is "stm_source" device
class. Devices of this class can be connected and disconnected to/from
-stm devices at runtime via a sysfs attribute.
+stm devices at runtime via a sysfs attribute called "stm_source_link"
+by writing the name of the desired stm device there, for example:
-Examples of STM devices are Intel(R) Trace Hub [1] and Coresight STM
-[2].
+$ echo dummy_stm.0 > /sys/class/stm_source/console/stm_source_link
+
+For examples on how to use stm_source interface in the kernel, refer
+to stm_console or stm_heartbeat drivers.
+
+Each stm_source device will need to assume a master and a range of
+channels, depending on how many channels it requires. These are
+allocated for the device according to the policy configuration. If
+there's a node in the root of the policy directory that matches the
+stm_source device's name (for example, "console"), this node will be
+used to allocate master and channel numbers. If there's no such policy
+node, the stm core will pick the first contiguous chunk of channels
+within the first available master. Note that the node must exist
+before the stm_source device is connected to its stm device.
+
+stm_console
+===========
+
+One implementation of this interface also used in the example above is
+the "stm_console" driver, which basically provides a one-way console
+for kernel messages over an stm device.
+
+To configure the master/channel pair that will be assigned to this
+console in the STP stream, create a "console" policy entry (see the
+beginning of this text on how to do that). When initialized, it will
+consume one channel.
[1] https://software.intel.com/sites/default/files/managed/d3/3c/intel-th-developer-manual.pdf
[2] http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0444b/index.html
diff --git a/Documentation/virtual/kvm/00-INDEX b/Documentation/virtual/kvm/00-INDEX
index fee9f2bf9c64..69fe1a8b7ad1 100644
--- a/Documentation/virtual/kvm/00-INDEX
+++ b/Documentation/virtual/kvm/00-INDEX
@@ -6,6 +6,8 @@ cpuid.txt
- KVM-specific cpuid leaves (x86).
devices/
- KVM_CAP_DEVICE_CTRL userspace API.
+halt-polling.txt
+ - notes on halt-polling
hypercalls.txt
- KVM hypercalls.
locking.txt
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 6bbceb9a3a19..03145b7cafaa 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2034,6 +2034,8 @@ registers, find a list below:
PPC | KVM_REG_PPC_WORT | 64
PPC | KVM_REG_PPC_SPRG9 | 64
PPC | KVM_REG_PPC_DBSR | 32
+ PPC | KVM_REG_PPC_TIDR | 64
+ PPC | KVM_REG_PPC_PSSCR | 64
PPC | KVM_REG_PPC_TM_GPR0 | 64
...
PPC | KVM_REG_PPC_TM_GPR31 | 64
@@ -2050,6 +2052,7 @@ registers, find a list below:
PPC | KVM_REG_PPC_TM_VSCR | 32
PPC | KVM_REG_PPC_TM_DSCR | 64
PPC | KVM_REG_PPC_TM_TAR | 64
+ PPC | KVM_REG_PPC_TM_XER | 64
| |
MIPS | KVM_REG_MIPS_R0 | 64
...
@@ -2209,7 +2212,7 @@ after pausing the vcpu, but before it is resumed.
4.71 KVM_SIGNAL_MSI
Capability: KVM_CAP_SIGNAL_MSI
-Architectures: x86 arm64
+Architectures: x86 arm arm64
Type: vm ioctl
Parameters: struct kvm_msi (in)
Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
diff --git a/Documentation/virtual/kvm/halt-polling.txt b/Documentation/virtual/kvm/halt-polling.txt
new file mode 100644
index 000000000000..4a8418318769
--- /dev/null
+++ b/Documentation/virtual/kvm/halt-polling.txt
@@ -0,0 +1,127 @@
+The KVM halt polling system
+===========================
+
+The KVM halt polling system provides a feature within KVM whereby the latency
+of a guest can, under some circumstances, be reduced by polling in the host
+for some time period after the guest has elected to no longer run by cedeing.
+That is, when a guest vcpu has ceded, or in the case of powerpc when all of the
+vcpus of a single vcore have ceded, the host kernel polls for wakeup conditions
+before giving up the cpu to the scheduler in order to let something else run.
+
+Polling provides a latency advantage in cases where the guest can be run again
+very quickly by at least saving us a trip through the scheduler, normally on
+the order of a few micro-seconds, although performance benefits are workload
+dependant. In the event that no wakeup source arrives during the polling
+interval or some other task on the runqueue is runnable the scheduler is
+invoked. Thus halt polling is especially useful on workloads with very short
+wakeup periods where the time spent halt polling is minimised and the time
+savings of not invoking the scheduler are distinguishable.
+
+The generic halt polling code is implemented in:
+
+ virt/kvm/kvm_main.c: kvm_vcpu_block()
+
+The powerpc kvm-hv specific case is implemented in:
+
+ arch/powerpc/kvm/book3s_hv.c: kvmppc_vcore_blocked()
+
+Halt Polling Interval
+=====================
+
+The maximum time for which to poll before invoking the scheduler, referred to
+as the halt polling interval, is increased and decreased based on the perceived
+effectiveness of the polling in an attempt to limit pointless polling.
+This value is stored in either the vcpu struct:
+
+ kvm_vcpu->halt_poll_ns
+
+or in the case of powerpc kvm-hv, in the vcore struct:
+
+ kvmppc_vcore->halt_poll_ns
+
+Thus this is a per vcpu (or vcore) value.
+
+During polling if a wakeup source is received within the halt polling interval,
+the interval is left unchanged. In the event that a wakeup source isn't
+received during the polling interval (and thus schedule is invoked) there are
+two options, either the polling interval and total block time[0] were less than
+the global max polling interval (see module params below), or the total block
+time was greater than the global max polling interval.
+
+In the event that both the polling interval and total block time were less than
+the global max polling interval then the polling interval can be increased in
+the hope that next time during the longer polling interval the wake up source
+will be received while the host is polling and the latency benefits will be
+received. The polling interval is grown in the function grow_halt_poll_ns() and
+is multiplied by the module parameter halt_poll_ns_grow.
+
+In the event that the total block time was greater than the global max polling
+interval then the host will never poll for long enough (limited by the global
+max) to wakeup during the polling interval so it may as well be shrunk in order
+to avoid pointless polling. The polling interval is shrunk in the function
+shrink_halt_poll_ns() and is divided by the module parameter
+halt_poll_ns_shrink, or set to 0 iff halt_poll_ns_shrink == 0.
+
+It is worth noting that this adjustment process attempts to hone in on some
+steady state polling interval but will only really do a good job for wakeups
+which come at an approximately constant rate, otherwise there will be constant
+adjustment of the polling interval.
+
+[0] total block time: the time between when the halt polling function is
+ invoked and a wakeup source received (irrespective of
+ whether the scheduler is invoked within that function).
+
+Module Parameters
+=================
+
+The kvm module has 3 tuneable module parameters to adjust the global max
+polling interval as well as the rate at which the polling interval is grown and
+shrunk. These variables are defined in include/linux/kvm_host.h and as module
+parameters in virt/kvm/kvm_main.c, or arch/powerpc/kvm/book3s_hv.c in the
+powerpc kvm-hv case.
+
+Module Parameter | Description | Default Value
+--------------------------------------------------------------------------------
+halt_poll_ns | The global max polling interval | KVM_HALT_POLL_NS_DEFAULT
+ | which defines the ceiling value |
+ | of the polling interval for | (per arch value)
+ | each vcpu. |
+--------------------------------------------------------------------------------
+halt_poll_ns_grow | The value by which the halt | 2
+ | polling interval is multiplied |
+ | in the grow_halt_poll_ns() |
+ | function. |
+--------------------------------------------------------------------------------
+halt_poll_ns_shrink | The value by which the halt | 0
+ | polling interval is divided in |
+ | the shrink_halt_poll_ns() |
+ | function. |
+--------------------------------------------------------------------------------
+
+These module parameters can be set from the debugfs files in:
+
+ /sys/module/kvm/parameters/
+
+Note: that these module parameters are system wide values and are not able to
+ be tuned on a per vm basis.
+
+Further Notes
+=============
+
+- Care should be taken when setting the halt_poll_ns module parameter as a
+large value has the potential to drive the cpu usage to 100% on a machine which
+would be almost entirely idle otherwise. This is because even if a guest has
+wakeups during which very little work is done and which are quite far apart, if
+the period is shorter than the global max polling interval (halt_poll_ns) then
+the host will always poll for the entire block time and thus cpu utilisation
+will go to 100%.
+
+- Halt polling essentially presents a trade off between power usage and latency
+and the module parameters should be used to tune the affinity for this. Idle
+cpu time is essentially converted to host kernel time with the aim of decreasing
+latency when entering the guest.
+
+- Halt polling will only be conducted by the host when no other tasks are
+runnable on that cpu, otherwise the polling will cease immediately and
+schedule will be invoked to allow that other task to run. Thus this doesn't
+allow a guest to denial of service the cpu.
diff --git a/MAINTAINERS b/MAINTAINERS
index 34ef63763566..60a01bdaf224 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -260,6 +260,12 @@ L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-104-idio-16.c
+ACCES 104-QUAD-8 IIO DRIVER
+M: William Breathitt Gray <vilhelm.gray@gmail.com>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: drivers/iio/counter/104-quad-8.c
+
ACENIC DRIVER
M: Jes Sorensen <jes@trained-monkey.org>
L: linux-acenic@sunsite.dk
@@ -803,7 +809,7 @@ S: Supported
F: drivers/iio/*/ad*
X: drivers/iio/*/adjd*
F: drivers/staging/iio/*/ad*
-F: staging/iio/trigger/iio-trig-bfin-timer.c
+F: drivers/staging/iio/trigger/iio-trig-bfin-timer.c
ANALOG DEVICES INC DMA DRIVERS
M: Lars-Peter Clausen <lars@metafoo.de>
@@ -2338,6 +2344,13 @@ F: include/uapi/linux/ax25.h
F: include/net/ax25.h
F: net/ax25/
+AXENTIA ASOC DRIVERS
+M: Peter Rosin <peda@axentia.se>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/sound/axentia,*
+F: sound/soc/atmel/tse850-pcm5142.c
+
AZ6007 DVB DRIVER
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org>
@@ -2612,6 +2625,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git
S: Maintained
N: bcm2835
+F: drivers/staging/vc04_services
BROADCOM BCM47XX MIPS ARCHITECTURE
M: Hauke Mehrtens <hauke@hauke-m.de>
@@ -3060,6 +3074,12 @@ F: drivers/usb/host/whci/
F: drivers/usb/wusbcore/
F: include/linux/usb/wusb*
+HT16K33 LED CONTROLLER DRIVER
+M: Robin van der Gracht <robin@protonic.nl>
+S: Maintained
+F: drivers/auxdisplay/ht16k33.c
+F: Documentation/devicetree/bindings/display/ht16k33.txt
+
CFAG12864B LCD DRIVER
M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
W: http://miguelojeda.es/auxdisplay.htm
@@ -3179,15 +3199,15 @@ S: Supported
F: drivers/clocksource
CISCO FCOE HBA DRIVER
-M: Hiral Patel <hiralpat@cisco.com>
-M: Suma Ramars <sramars@cisco.com>
-M: Brian Uchino <buchino@cisco.com>
+M: Satish Kharat <satishkh@cisco.com>
+M: Sesidhar Baddela <sebaddel@cisco.com>
+M: Karan Tilak Kumar <kartilak@cisco.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/fnic/
CISCO SCSI HBA DRIVER
-M: Narsimhulu Musini <nmusini@cisco.com>
+M: Karan Tilak Kumar <kartilak@cisco.com>
M: Sesidhar Baddela <sebaddel@cisco.com>
L: linux-scsi@vger.kernel.org
S: Supported
@@ -4774,11 +4794,11 @@ M: David Woodhouse <dwmw2@infradead.org>
L: linux-embedded@vger.kernel.org
S: Maintained
-EMULEX/AVAGO LPFC FC/FCOE SCSI DRIVER
-M: James Smart <james.smart@avagotech.com>
-M: Dick Kennedy <dick.kennedy@avagotech.com>
+EMULEX/BROADCOM LPFC FC/FCOE SCSI DRIVER
+M: James Smart <james.smart@broadcom.com>
+M: Dick Kennedy <dick.kennedy@broadcom.com>
L: linux-scsi@vger.kernel.org
-W: http://www.avagotech.com
+W: http://www.broadcom.com
S: Supported
F: drivers/scsi/lpfc/
@@ -5036,7 +5056,9 @@ K: fmc_d.*register
FPGA MANAGER FRAMEWORK
M: Alan Tull <atull@opensource.altera.com>
R: Moritz Fischer <moritz.fischer@ettus.com>
+L: linux-fpga@vger.kernel.org
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git
F: drivers/fpga/
F: include/linux/fpga/fpga-mgr.h
W: http://www.rocketboards.org
@@ -5192,13 +5214,6 @@ F: sound/soc/fsl/fsl*
F: sound/soc/fsl/imx*
F: sound/soc/fsl/mpc8610_hpcd.c
-FREESCALE QORIQ MANAGEMENT COMPLEX DRIVER
-M: "J. German Rivera" <German.Rivera@freescale.com>
-M: Stuart Yoder <stuart.yoder@nxp.com>
-L: linux-kernel@vger.kernel.org
-S: Maintained
-F: drivers/staging/fsl-mc/
-
FREEVXFS FILESYSTEM
M: Christoph Hellwig <hch@infradead.org>
W: ftp://ftp.openlinux.org/pub/people/hch/vxfs
@@ -5232,6 +5247,7 @@ F: include/linux/fscache*.h
FS-CRYPTO: FILE SYSTEM LEVEL ENCRYPTION SUPPORT
M: Theodore Y. Ts'o <tytso@mit.edu>
M: Jaegeuk Kim <jaegeuk@kernel.org>
+L: linux-fsdevel@vger.kernel.org
S: Supported
F: fs/crypto/
F: include/linux/fscrypto.h
@@ -5708,7 +5724,6 @@ F: drivers/watchdog/hpwdt.c
HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
M: Don Brace <don.brace@microsemi.com>
-L: iss_storagedev@hp.com
L: esc.storagedev@microsemi.com
L: linux-scsi@vger.kernel.org
S: Supported
@@ -5719,7 +5734,6 @@ F: include/uapi/linux/cciss*.h
HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss)
M: Don Brace <don.brace@microsemi.com>
-L: iss_storagedev@hp.com
L: esc.storagedev@microsemi.com
L: linux-scsi@vger.kernel.org
S: Supported
@@ -5940,6 +5954,7 @@ F: drivers/input/serio/hyperv-keyboard.c
F: drivers/pci/host/pci-hyperv.c
F: drivers/net/hyperv/
F: drivers/scsi/storvsc_drv.c
+F: drivers/uio/uio_hv_generic.c
F: drivers/video/fbdev/hyperv_fb.c
F: include/linux/hyperv.h
F: tools/hv/
@@ -6215,6 +6230,22 @@ L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/rc/iguanair.c
+IIO DIGITAL POTENTIOMETER DAC
+M: Peter Rosin <peda@axentia.se>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac
+F: Documentation/devicetree/bindings/iio/dac/dpot-dac.txt
+F: drivers/iio/dac/dpot-dac.c
+
+IIO ENVELOPE DETECTOR
+M: Peter Rosin <peda@axentia.se>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector
+F: Documentation/devicetree/bindings/iio/adc/envelope-detector.txt
+F: drivers/iio/adc/envelope-detector.c
+
IIO SUBSYSTEM AND DRIVERS
M: Jonathan Cameron <jic23@kernel.org>
R: Hartmut Knaack <knaack.h@gmx.de>
@@ -6596,6 +6627,13 @@ S: Maintained
F: arch/x86/include/asm/pmc_core.h
F: drivers/platform/x86/intel_pmc_core*
+INVENSENSE MPU-3050 GYROSCOPE DRIVER
+M: Linus Walleij <linus.walleij@linaro.org>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: drivers/iio/gyro/mpu3050*
+F: Documentation/devicetree/bindings/iio/gyroscope/inv,mpu3050.txt
+
IOC3 ETHERNET DRIVER
M: Ralf Baechle <ralf@linux-mips.org>
L: linux-mips@linux-mips.org
@@ -7803,6 +7841,7 @@ MCP4531 MICROCHIP DIGITAL POTENTIOMETER DRIVER
M: Peter Rosin <peda@axentia.se>
L: linux-iio@vger.kernel.org
S: Maintained
+F: Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531
F: drivers/iio/potentiometer/mcp4531.c
MEASUREMENT COMPUTING CIO-DAC IIO DRIVER
@@ -7934,12 +7973,12 @@ S: Maintained
F: drivers/net/wireless/mediatek/mt7601u/
MEGARAID SCSI/SAS DRIVERS
-M: Kashyap Desai <kashyap.desai@avagotech.com>
-M: Sumit Saxena <sumit.saxena@avagotech.com>
-M: Uday Lingala <uday.lingala@avagotech.com>
-L: megaraidlinux.pdl@avagotech.com
+M: Kashyap Desai <kashyap.desai@broadcom.com>
+M: Sumit Saxena <sumit.saxena@broadcom.com>
+M: Shivasharan S <shivasharan.srikanteshwara@broadcom.com>
+L: megaraidlinux.pdl@broadcom.com
L: linux-scsi@vger.kernel.org
-W: http://www.lsi.com
+W: http://www.avagotech.com/support/
S: Maintained
F: Documentation/scsi/megaraid.txt
F: drivers/scsi/megaraid.*
@@ -8419,7 +8458,6 @@ F: drivers/scsi/arm/oak.c
F: drivers/scsi/atari_scsi.*
F: drivers/scsi/dmx3191d.c
F: drivers/scsi/g_NCR5380.*
-F: drivers/scsi/g_NCR5380_mmio.c
F: drivers/scsi/mac_scsi.*
F: drivers/scsi/sun3_scsi.*
F: drivers/scsi/sun3_scsi_vme.c
@@ -9207,7 +9245,7 @@ F: drivers/misc/panel.c
PARALLEL PORT SUBSYSTEM
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
-M: Sudip Mukherjee <sudip@vectorindia.org>
+M: Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
L: linux-parport@lists.infradead.org (subscribers-only)
S: Maintained
F: drivers/parport/
@@ -10056,6 +10094,12 @@ F: fs/qnx4/
F: include/uapi/linux/qnx4_fs.h
F: include/uapi/linux/qnxtypes.h
+QORIQ DPAA2 FSL-MC BUS DRIVER
+M: Stuart Yoder <stuart.yoder@nxp.com>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: drivers/staging/fsl-mc/
+
QT1010 MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
@@ -10518,7 +10562,7 @@ F: arch/s390/pci/
F: drivers/pci/hotplug/s390_pci_hpc.c
S390 ZCRYPT DRIVER
-M: Ingo Tuchscherer <ingo.tuchscherer@de.ibm.com>
+M: Harald Freudenberger <freude@de.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
@@ -10811,6 +10855,11 @@ W: http://www.sunplus.com
S: Supported
F: arch/score/
+SCR24X CHIP CARD INTERFACE DRIVER
+M: Lubomir Rintel <lkundrak@v3.sk>
+S: Supported
+F: drivers/char/pcmcia/scr24x_cs.c
+
SYSTEM CONTROL & POWER INTERFACE (SCPI) Message Protocol drivers
M: Sudeep Holla <sudeep.holla@arm.com>
L: linux-arm-kernel@lists.infradead.org
@@ -11214,7 +11263,7 @@ F: include/media/i2c/ov2659.h
SILICON MOTION SM712 FRAME BUFFER DRIVER
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
M: Teddy Wang <teddy.wang@siliconmotion.com>
-M: Sudip Mukherjee <sudip@vectorindia.org>
+M: Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/fbdev/sm712*
@@ -11642,17 +11691,11 @@ F: drivers/staging/rtl8712/
STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
M: Teddy Wang <teddy.wang@siliconmotion.com>
-M: Sudip Mukherjee <sudip@vectorindia.org>
+M: Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/staging/sm750fb/
-STAGING - SLICOSS
-M: Lior Dotan <liodot@gmail.com>
-M: Christopher Harrer <charrer@alacritech.com>
-S: Odd Fixes
-F: drivers/staging/slicoss/
-
STAGING - SPEAKUP CONSOLE SPEECH DRIVER
M: William Hubbs <w.d.hubbs@gmail.com>
M: Chris Brannon <chris@the-brannons.com>
@@ -12452,6 +12495,12 @@ S: Maintained
F: Documentation/filesystems/udf.txt
F: fs/udf/
+UDRAW TABLET
+M: Bastien Nocera <hadess@hadess.net>
+L: linux-input@vger.kernel.org
+S: Maintained
+F: drivers/hid/hid-udraw.c
+
UFS FILESYSTEM
M: Evgeniy Dushistov <dushistov@mail.ru>
S: Maintained
@@ -12508,7 +12557,8 @@ F: Documentation/scsi/ufs.txt
F: drivers/scsi/ufs/
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS
-M: Joao Pinto <Joao.Pinto@synopsys.com>
+M: Manjunath M Bettegowda <manjumb@synopsys.com>
+M: Prabu Thangamuthu <prabut@synopsys.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/ufs/*dwc*
@@ -13316,7 +13366,6 @@ F: drivers/media/tuners/tuner-xc2028.*
XEN HYPERVISOR INTERFACE
M: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-M: David Vrabel <david.vrabel@citrix.com>
M: Juergen Gross <jgross@suse.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
index a935523a1eb8..7c2dc19925a1 100644
--- a/arch/arm/boot/dts/rk3036.dtsi
+++ b/arch/arm/boot/dts/rk3036.dtsi
@@ -204,7 +204,6 @@
g-np-tx-fifo-size = <16>;
g-rx-fifo-size = <275>;
g-tx-fifo-size = <256 128 128 64 64 32>;
- g-use-dma;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 17ec2e2d7a60..74a749c566ee 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -596,7 +596,6 @@
g-np-tx-fifo-size = <16>;
g-rx-fifo-size = <275>;
g-tx-fifo-size = <256 128 128 64 64 32>;
- g-use-dma;
phys = <&usbphy0>;
phy-names = "usb2-phy";
status = "disabled";
diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
index e15beb3c671e..8fbd3c806fa0 100644
--- a/arch/arm/boot/dts/rk3xxx.dtsi
+++ b/arch/arm/boot/dts/rk3xxx.dtsi
@@ -181,7 +181,6 @@
g-np-tx-fifo-size = <16>;
g-rx-fifo-size = <275>;
g-tx-fifo-size = <256 128 128 64 64 32>;
- g-use-dma;
phys = <&usbphy0>;
phy-names = "usb2-phy";
status = "disabled";
diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h
index 9d874db13c0e..3522cbaed316 100644
--- a/arch/arm/include/asm/xen/hypercall.h
+++ b/arch/arm/include/asm/xen/hypercall.h
@@ -1,87 +1 @@
-/******************************************************************************
- * hypercall.h
- *
- * Linux-specific hypervisor handling.
- *
- * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation; or, when distributed
- * separately from the Linux kernel or incorporated into other
- * software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef _ASM_ARM_XEN_HYPERCALL_H
-#define _ASM_ARM_XEN_HYPERCALL_H
-
-#include <linux/bug.h>
-
-#include <xen/interface/xen.h>
-#include <xen/interface/sched.h>
-#include <xen/interface/platform.h>
-
-long privcmd_call(unsigned call, unsigned long a1,
- unsigned long a2, unsigned long a3,
- unsigned long a4, unsigned long a5);
-int HYPERVISOR_xen_version(int cmd, void *arg);
-int HYPERVISOR_console_io(int cmd, int count, char *str);
-int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
-int HYPERVISOR_sched_op(int cmd, void *arg);
-int HYPERVISOR_event_channel_op(int cmd, void *arg);
-unsigned long HYPERVISOR_hvm_op(int op, void *arg);
-int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
-int HYPERVISOR_physdev_op(int cmd, void *arg);
-int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
-int HYPERVISOR_tmem_op(void *arg);
-int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type);
-int HYPERVISOR_platform_op_raw(void *arg);
-static inline int HYPERVISOR_platform_op(struct xen_platform_op *op)
-{
- op->interface_version = XENPF_INTERFACE_VERSION;
- return HYPERVISOR_platform_op_raw(op);
-}
-int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr);
-
-static inline int
-HYPERVISOR_suspend(unsigned long start_info_mfn)
-{
- struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
-
- /* start_info_mfn is unused on ARM */
- return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
-}
-
-static inline void
-MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
- unsigned int new_val, unsigned long flags)
-{
- BUG();
-}
-
-static inline void
-MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
- int count, int *success_count, domid_t domid)
-{
- BUG();
-}
-
-#endif /* _ASM_ARM_XEN_HYPERCALL_H */
+#include <xen/arm/hypercall.h>
diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h
index 95251512e2c4..d6e7709d0688 100644
--- a/arch/arm/include/asm/xen/hypervisor.h
+++ b/arch/arm/include/asm/xen/hypervisor.h
@@ -1,39 +1 @@
-#ifndef _ASM_ARM_XEN_HYPERVISOR_H
-#define _ASM_ARM_XEN_HYPERVISOR_H
-
-#include <linux/init.h>
-
-extern struct shared_info *HYPERVISOR_shared_info;
-extern struct start_info *xen_start_info;
-
-/* Lazy mode for batching updates / context switch */
-enum paravirt_lazy_mode {
- PARAVIRT_LAZY_NONE,
- PARAVIRT_LAZY_MMU,
- PARAVIRT_LAZY_CPU,
-};
-
-static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
-{
- return PARAVIRT_LAZY_NONE;
-}
-
-extern struct dma_map_ops *xen_dma_ops;
-
-#ifdef CONFIG_XEN
-void __init xen_early_init(void);
-#else
-static inline void xen_early_init(void) { return; }
-#endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-static inline void xen_arch_register_cpu(int num)
-{
-}
-
-static inline void xen_arch_unregister_cpu(int num)
-{
-}
-#endif
-
-#endif /* _ASM_ARM_XEN_HYPERVISOR_H */
+#include <xen/arm/hypervisor.h>
diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h
index 75d596862892..88c0d75da190 100644
--- a/arch/arm/include/asm/xen/interface.h
+++ b/arch/arm/include/asm/xen/interface.h
@@ -1,85 +1 @@
-/******************************************************************************
- * Guest OS interface to ARM Xen.
- *
- * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
- */
-
-#ifndef _ASM_ARM_XEN_INTERFACE_H
-#define _ASM_ARM_XEN_INTERFACE_H
-
-#include <linux/types.h>
-
-#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
-
-#define __DEFINE_GUEST_HANDLE(name, type) \
- typedef struct { union { type *p; uint64_aligned_t q; }; } \
- __guest_handle_ ## name
-
-#define DEFINE_GUEST_HANDLE_STRUCT(name) \
- __DEFINE_GUEST_HANDLE(name, struct name)
-#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
-#define GUEST_HANDLE(name) __guest_handle_ ## name
-
-#define set_xen_guest_handle(hnd, val) \
- do { \
- if (sizeof(hnd) == 8) \
- *(uint64_t *)&(hnd) = 0; \
- (hnd).p = val; \
- } while (0)
-
-#define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op
-
-#ifndef __ASSEMBLY__
-/* Explicitly size integers that represent pfns in the interface with
- * Xen so that we can have one ABI that works for 32 and 64 bit guests.
- * Note that this means that the xen_pfn_t type may be capable of
- * representing pfn's which the guest cannot represent in its own pfn
- * type. However since pfn space is controlled by the guest this is
- * fine since it simply wouldn't be able to create any sure pfns in
- * the first place.
- */
-typedef uint64_t xen_pfn_t;
-#define PRI_xen_pfn "llx"
-typedef uint64_t xen_ulong_t;
-#define PRI_xen_ulong "llx"
-typedef int64_t xen_long_t;
-#define PRI_xen_long "llx"
-/* Guest handles for primitive C types. */
-__DEFINE_GUEST_HANDLE(uchar, unsigned char);
-__DEFINE_GUEST_HANDLE(uint, unsigned int);
-DEFINE_GUEST_HANDLE(char);
-DEFINE_GUEST_HANDLE(int);
-DEFINE_GUEST_HANDLE(void);
-DEFINE_GUEST_HANDLE(uint64_t);
-DEFINE_GUEST_HANDLE(uint32_t);
-DEFINE_GUEST_HANDLE(xen_pfn_t);
-DEFINE_GUEST_HANDLE(xen_ulong_t);
-
-/* Maximum number of virtual CPUs in multi-processor guests. */
-#define MAX_VIRT_CPUS 1
-
-struct arch_vcpu_info { };
-struct arch_shared_info { };
-
-/* TODO: Move pvclock definitions some place arch independent */
-struct pvclock_vcpu_time_info {
- u32 version;
- u32 pad0;
- u64 tsc_timestamp;
- u64 system_time;
- u32 tsc_to_system_mul;
- s8 tsc_shift;
- u8 flags;
- u8 pad[2];
-} __attribute__((__packed__)); /* 32 bytes */
-
-/* It is OK to have a 12 bytes struct with no padding because it is packed */
-struct pvclock_wall_clock {
- u32 version;
- u32 sec;
- u32 nsec;
- u32 sec_hi;
-} __attribute__((__packed__));
-#endif
-
-#endif /* _ASM_ARM_XEN_INTERFACE_H */
+#include <xen/arm/interface.h>
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index 95ce6ac3a971..b3ef061d8b74 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -1,98 +1 @@
-#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
-#define _ASM_ARM_XEN_PAGE_COHERENT_H
-
-#include <asm/page.h>
-#include <linux/dma-mapping.h>
-
-void __xen_dma_map_page(struct device *hwdev, struct page *page,
- dma_addr_t dev_addr, unsigned long offset, size_t size,
- enum dma_data_direction dir, unsigned long attrs);
-void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
-void __xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir);
-
-void __xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir);
-
-static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
-{
- return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
-}
-
-static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
-{
- __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
-}
-
-static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
- dma_addr_t dev_addr, unsigned long offset, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- unsigned long page_pfn = page_to_xen_pfn(page);
- unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
- unsigned long compound_pages =
- (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
- bool local = (page_pfn <= dev_pfn) &&
- (dev_pfn - page_pfn < compound_pages);
-
- /*
- * Dom0 is mapped 1:1, while the Linux page can span across
- * multiple Xen pages, it's not possible for it to contain a
- * mix of local and foreign Xen pages. So if the first xen_pfn
- * == mfn the page is local otherwise it's a foreign page
- * grant-mapped in dom0. If the page is local we can safely
- * call the native dma_ops function, otherwise we call the xen
- * specific function.
- */
- if (local)
- __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
- else
- __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
-}
-
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- unsigned long pfn = PFN_DOWN(handle);
- /*
- * Dom0 is mapped 1:1, while the Linux page can be spanned accross
- * multiple Xen page, it's not possible to have a mix of local and
- * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
- * foreign mfn will always return false. If the page is local we can
- * safely call the native dma_ops function, otherwise we call the xen
- * specific function.
- */
- if (pfn_valid(pfn)) {
- if (__generic_dma_ops(hwdev)->unmap_page)
- __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
- } else
- __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
-}
-
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- unsigned long pfn = PFN_DOWN(handle);
- if (pfn_valid(pfn)) {
- if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
- __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
- } else
- __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
-}
-
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- unsigned long pfn = PFN_DOWN(handle);
- if (pfn_valid(pfn)) {
- if (__generic_dma_ops(hwdev)->sync_single_for_device)
- __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
- } else
- __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
-}
-
-#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
+#include <xen/arm/page-coherent.h>
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 415dbc6e43fd..31bbc803cecb 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -1,122 +1 @@
-#ifndef _ASM_ARM_XEN_PAGE_H
-#define _ASM_ARM_XEN_PAGE_H
-
-#include <asm/page.h>
-#include <asm/pgtable.h>
-
-#include <linux/pfn.h>
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-
-#include <xen/xen.h>
-#include <xen/interface/grant_table.h>
-
-#define phys_to_machine_mapping_valid(pfn) (1)
-
-/* Xen machine address */
-typedef struct xmaddr {
- phys_addr_t maddr;
-} xmaddr_t;
-
-/* Xen pseudo-physical address */
-typedef struct xpaddr {
- phys_addr_t paddr;
-} xpaddr_t;
-
-#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
-#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
-
-#define INVALID_P2M_ENTRY (~0UL)
-
-/*
- * The pseudo-physical frame (pfn) used in all the helpers is always based
- * on Xen page granularity (i.e 4KB).
- *
- * A Linux page may be split across multiple non-contiguous Xen page so we
- * have to keep track with frame based on 4KB page granularity.
- *
- * PV drivers should never make a direct usage of those helpers (particularly
- * pfn_to_gfn and gfn_to_pfn).
- */
-
-unsigned long __pfn_to_mfn(unsigned long pfn);
-extern struct rb_root phys_to_mach;
-
-/* Pseudo-physical <-> Guest conversion */
-static inline unsigned long pfn_to_gfn(unsigned long pfn)
-{
- return pfn;
-}
-
-static inline unsigned long gfn_to_pfn(unsigned long gfn)
-{
- return gfn;
-}
-
-/* Pseudo-physical <-> BUS conversion */
-static inline unsigned long pfn_to_bfn(unsigned long pfn)
-{
- unsigned long mfn;
-
- if (phys_to_mach.rb_node != NULL) {
- mfn = __pfn_to_mfn(pfn);
- if (mfn != INVALID_P2M_ENTRY)
- return mfn;
- }
-
- return pfn;
-}
-
-static inline unsigned long bfn_to_pfn(unsigned long bfn)
-{
- return bfn;
-}
-
-#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn)
-
-/* VIRT <-> GUEST conversion */
-#define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT))
-#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT))
-
-/* Only used in PV code. But ARM guests are always HVM. */
-static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
-{
- BUG();
-}
-
-/* TODO: this shouldn't be here but it is because the frontend drivers
- * are using it (its rolled in headers) even though we won't hit the code path.
- * So for right now just punt with this.
- */
-static inline pte_t *lookup_address(unsigned long address, unsigned int *level)
-{
- BUG();
- return NULL;
-}
-
-extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
- struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count);
-
-extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_unmap_grant_ref *kunmap_ops,
- struct page **pages, unsigned int count);
-
-bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
-bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
- unsigned long nr_pages);
-
-static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
- return __set_phys_to_machine(pfn, mfn);
-}
-
-#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
-#define xen_unmap(cookie) iounmap((cookie))
-
-bool xen_arch_need_swiotlb(struct device *dev,
- phys_addr_t phys,
- dma_addr_t dev_addr);
-unsigned long xen_get_swiotlb_free_pages(unsigned int order);
-
-#endif /* _ASM_ARM_XEN_PAGE_H */
+#include <xen/arm/page.h>
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index b38c10c73579..af05f8e0903e 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -87,9 +87,11 @@ struct kvm_regs {
/* Supported VGICv3 address types */
#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
+#define KVM_VGIC_ITS_ADDR_TYPE 4
#define KVM_VGIC_V3_DIST_SIZE SZ_64K
#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
+#define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K)
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 3e1cd0452d67..90d0176fb30d 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -34,6 +34,7 @@ config KVM
select HAVE_KVM_IRQFD
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQ_ROUTING
+ select HAVE_KVM_MSI
depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
---help---
Support hosting virtualized guest machines.
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index f19842ea5418..d571243ab4d1 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -32,5 +32,6 @@ obj-y += $(KVM)/arm/vgic/vgic-mmio.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o
obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
+obj-y += $(KVM)/arm/vgic/vgic-its.o
obj-y += $(KVM)/irqchip.o
obj-y += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 19b5f5c1c0ff..8f92efa8460e 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -221,6 +221,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
+ case KVM_CAP_MSI_DEVID:
+ if (!kvm)
+ r = -EINVAL;
+ else
+ r = kvm->arch.vgic.msis_require_devid;
+ break;
default:
r = kvm_arch_dev_ioctl_check_extension(kvm, ext);
break;
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index f193414d0f6f..4986dc0c1dff 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -372,8 +372,7 @@ static int __init xen_guest_init(void)
* for secondary CPUs as they are brought up.
* For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
*/
- xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
- sizeof(struct vcpu_info));
+ xen_vcpu_info = alloc_percpu(struct vcpu_info);
if (xen_vcpu_info == NULL)
return -ENOMEM;
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index d062f08f5020..bd62d94f8ac5 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -186,7 +186,6 @@ struct dma_map_ops *xen_dma_ops;
EXPORT_SYMBOL(xen_dma_ops);
static struct dma_map_ops xen_swiotlb_dma_ops = {
- .mapping_error = xen_swiotlb_dma_mapping_error,
.alloc = xen_swiotlb_alloc_coherent,
.free = xen_swiotlb_free_coherent,
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 657be7f5014e..111742126897 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -110,6 +110,7 @@ config ARM64
select POWER_SUPPLY
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
+ select THREAD_INFO_IN_TASK
help
ARM 64-bit (AArch64) Linux support.
@@ -239,6 +240,9 @@ config PGTABLE_LEVELS
default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47
default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48
+config ARCH_SUPPORTS_UPROBES
+ def_bool y
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -791,6 +795,14 @@ config SETEND_EMULATION
If unsure, say Y
endif
+config ARM64_SW_TTBR0_PAN
+ bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
+ help
+ Enabling this option prevents the kernel from accessing
+ user-space memory directly by pointing TTBR0_EL1 to a reserved
+ zeroed area and reserved ASID. The user access routines
+ restore the valid TTBR0_EL1 temporarily.
+
menu "ARMv8.1 architectural features"
config ARM64_HW_AFDBM
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index b661fe742615..d1ebd46872fd 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -2,9 +2,13 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
-config ARM64_PTDUMP
+config ARM64_PTDUMP_CORE
+ def_bool n
+
+config ARM64_PTDUMP_DEBUGFS
bool "Export kernel pagetable layout to userspace via debugfs"
depends on DEBUG_KERNEL
+ select ARM64_PTDUMP_CORE
select DEBUG_FS
help
Say Y here if you want to show the kernel pagetable layout in a
@@ -38,6 +42,35 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
of TEXT_OFFSET and platforms must not require a specific
value.
+config DEBUG_WX
+ bool "Warn on W+X mappings at boot"
+ select ARM64_PTDUMP_CORE
+ ---help---
+ Generate a warning if any W+X mappings are found at boot.
+
+ This is useful for discovering cases where the kernel is leaving
+ W+X mappings after applying NX, as such mappings are a security risk.
+ This check also includes UXN, which should be set on all kernel
+ mappings.
+
+ Look for a message in dmesg output like this:
+
+ arm64/mm: Checked W+X mappings: passed, no W+X pages found.
+
+ or like this, if the check failed:
+
+ arm64/mm: Checked W+X mappings: FAILED, <N> W+X pages found.
+
+ Note that even if the check fails, your kernel is possibly
+ still fine, as W+X mappings are not a security hole in
+ themselves, what they do is that they make the exploitation
+ of other unfixed kernel bugs easier.
+
+ There is no runtime or memory usage effect of this option
+ once the kernel has booted up - it's a one time check.
+
+ If in doubt, say "Y".
+
config DEBUG_SET_MODULE_RONX
bool "Set loadable kernel module data as NX and text as RO"
depends on MODULES
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 3635b8662724..b9a4a934ca05 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -37,10 +37,16 @@ $(warning LSE atomics not supported by binutils)
endif
endif
-KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr)
+brokengasinst := $(call as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n,,-DCONFIG_BROKEN_GAS_INST=1)
+
+ifneq ($(brokengasinst),)
+$(warning Detected assembler with broken .inst; disassembly will be unreliable)
+endif
+
+KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
-KBUILD_AFLAGS += $(lseinstr)
+KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index 17839db585d5..e0ea60382087 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -747,7 +747,6 @@
clocks = <&sys_ctrl HI6220_USBOTG_HCLK>;
clock-names = "otg";
dr_mode = "otg";
- g-use-dma;
g-rx-fifo-size = <512>;
g-np-tx-fifo-size = <128>;
g-tx-fifo-size = <128 128 128 128 128 128>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
index 2a7f731c7759..0ecaad4333a7 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
@@ -34,15 +34,6 @@
chosen { };
- usb_p1_vbus: regulator@0 {
- compatible = "regulator-fixed";
- regulator-name = "usb_vbus";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&pio 130 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
connector {
compatible = "hdmi-connector";
label = "hdmi";
@@ -54,6 +45,29 @@
};
};
};
+
+ extcon_usb: extcon_iddig {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pio 16 GPIO_ACTIVE_HIGH>;
+ };
+
+ usb_p1_vbus: regulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pio 130 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ usb_p0_vbus: regulator@1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pio 9 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
};
&cec {
@@ -243,6 +257,20 @@
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
};
};
+
+ usb_id_pins_float: usb_iddig_pull_up {
+ pins_iddig {
+ pinmux = <MT8173_PIN_16_IDDIG__FUNC_IDDIG>;
+ bias-pull-up;
+ };
+ };
+
+ usb_id_pins_ground: usb_iddig_pull_down {
+ pins_iddig {
+ pinmux = <MT8173_PIN_16_IDDIG__FUNC_IDDIG>;
+ bias-pull-down;
+ };
+ };
};
&pwm0 {
@@ -469,12 +497,25 @@
status = "okay";
};
+&ssusb {
+ vusb33-supply = <&mt6397_vusb_reg>;
+ vbus-supply = <&usb_p0_vbus>;
+ extcon = <&extcon_usb>;
+ dr_mode = "otg";
+ mediatek,enable-wakeup;
+ pinctrl-names = "default", "id_float", "id_ground";
+ pinctrl-0 = <&usb_id_pins_float>;
+ pinctrl-1 = <&usb_id_pins_float>;
+ pinctrl-2 = <&usb_id_pins_ground>;
+ status = "okay";
+};
+
&uart0 {
status = "okay";
};
-&usb30 {
+&usb_host {
vusb33-supply = <&mt6397_vusb_reg>;
vbus-supply = <&usb_p1_vbus>;
- mediatek,wakeup-src = <1>;
+ status = "okay";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 1c71e256601d..c2d588ca59b7 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -707,11 +707,14 @@
status = "disabled";
};
- usb30: usb@11270000 {
- compatible = "mediatek,mt8173-xhci";
- reg = <0 0x11270000 0 0x1000>,
+ ssusb: usb@11271000 {
+ compatible = "mediatek,mt8173-mtu3";
+ reg = <0 0x11271000 0 0x3000>,
<0 0x11280700 0 0x0100>;
- interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
+ reg-names = "mac", "ippc";
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_LOW>;
+ phys = <&phy_port0 PHY_TYPE_USB3>,
+ <&phy_port1 PHY_TYPE_USB2>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
clocks = <&topckgen CLK_TOP_USB30_SEL>,
<&pericfg CLK_PERI_USB0>,
@@ -719,10 +722,22 @@
clock-names = "sys_ck",
"wakeup_deb_p0",
"wakeup_deb_p1";
- phys = <&phy_port0 PHY_TYPE_USB3>,
- <&phy_port1 PHY_TYPE_USB2>;
mediatek,syscon-wakeup = <&pericfg>;
- status = "okay";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "disabled";
+
+ usb_host: xhci@11270000 {
+ compatible = "mediatek,mt8173-xhci";
+ reg = <0 0x11270000 0 0x1000>;
+ reg-names = "mac";
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
+ clocks = <&topckgen CLK_TOP_USB30_SEL>;
+ clock-names = "sys_ck";
+ status = "disabled";
+ };
};
u3phy: usb-phy@11290000 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index 0fcb2147c9f9..df231c4df5a5 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -537,7 +537,6 @@
g-np-tx-fifo-size = <16>;
g-rx-fifo-size = <275>;
g-tx-fifo-size = <256 128 128 64 64 32>;
- g-use-dma;
status = "disabled";
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 6be08113a96d..c3caaddde6cc 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -82,6 +82,7 @@ CONFIG_KEXEC=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
CONFIG_CPU_IDLE=y
+CONFIG_HIBERNATION=y
CONFIG_ARM_CPUIDLE=y
CONFIG_CPU_FREQ=y
CONFIG_CPUFREQ_DT=y
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index b4ab238a59ec..8365a84c2640 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -1,7 +1,6 @@
generic-y += bugs.h
generic-y += clkdev.h
generic-y += cputime.h
-generic-y += current.h
generic-y += delay.h
generic-y += div64.h
generic-y += dma.h
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 28bfe6132eb6..446f6c46d4b1 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -41,6 +41,15 @@
msr daifclr, #2
.endm
+ .macro save_and_disable_irq, flags
+ mrs \flags, daif
+ msr daifset, #2
+ .endm
+
+ .macro restore_irq, flags
+ msr daif, \flags
+ .endm
+
/*
* Enable and disable debug exceptions.
*/
@@ -202,14 +211,25 @@ lr .req x30 // link register
.endm
/*
+ * @dst: Result of per_cpu(sym, smp_processor_id())
* @sym: The name of the per-cpu variable
- * @reg: Result of per_cpu(sym, smp_processor_id())
* @tmp: scratch register
*/
- .macro this_cpu_ptr, sym, reg, tmp
- adr_l \reg, \sym
+ .macro adr_this_cpu, dst, sym, tmp
+ adr_l \dst, \sym
mrs \tmp, tpidr_el1
- add \reg, \reg, \tmp
+ add \dst, \dst, \tmp
+ .endm
+
+ /*
+ * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
+ * @sym: The name of the per-cpu variable
+ * @tmp: scratch register
+ */
+ .macro ldr_this_cpu dst, sym, tmp
+ adr_l \dst, \sym
+ mrs \tmp, tpidr_el1
+ ldr \dst, [\dst, \tmp]
.endm
/*
@@ -395,4 +415,24 @@ alternative_endif
movk \reg, :abs_g0_nc:\val
.endm
+/*
+ * Return the current thread_info.
+ */
+ .macro get_thread_info, rd
+ mrs \rd, sp_el0
+ .endm
+
+/*
+ * Errata workaround post TTBR0_EL1 update.
+ */
+ .macro post_ttbr0_update_workaround
+#ifdef CONFIG_CAVIUM_ERRATUM_27456
+alternative_if ARM64_WORKAROUND_CAVIUM_27456
+ ic iallu
+ dsb nsh
+ isb
+alternative_else_nop_endif
+#endif
+ .endm
+
#endif /* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 2e5fb976a572..5a2a6ee65f65 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -65,12 +65,12 @@
* - kaddr - page address
* - size - region size
*/
-extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
extern void __clean_dcache_area_poc(void *addr, size_t len);
extern void __clean_dcache_area_pou(void *addr, size_t len);
extern long __flush_cache_user_range(unsigned long start, unsigned long end);
+extern void sync_icache_aliases(void *kaddr, unsigned long len);
static inline void flush_cache_mm(struct mm_struct *mm)
{
@@ -81,6 +81,11 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
{
}
+static inline void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+}
+
/*
* Cache maintenance functions used by the DMA API. No to be used directly.
*/
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 87b446535185..4174f09678c4 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -34,7 +34,8 @@
#define ARM64_HAS_32BIT_EL0 13
#define ARM64_HYP_OFFSET_LOW 14
#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
+#define ARM64_HAS_NO_FPSIMD 16
-#define ARM64_NCAPS 16
+#define ARM64_NCAPS 17
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 0bc0b1de90c4..b4989df48670 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -9,8 +9,6 @@
#ifndef __ASM_CPUFEATURE_H
#define __ASM_CPUFEATURE_H
-#include <linux/jump_label.h>
-
#include <asm/cpucaps.h>
#include <asm/hwcap.h>
#include <asm/sysreg.h>
@@ -27,6 +25,8 @@
#ifndef __ASSEMBLY__
+#include <linux/bug.h>
+#include <linux/jump_label.h>
#include <linux/kernel.h>
/* CPU feature register tracking */
@@ -104,14 +104,19 @@ static inline bool cpu_have_feature(unsigned int num)
return elf_hwcap & (1UL << num);
}
+/* System capability check for constant caps */
+static inline bool cpus_have_const_cap(int num)
+{
+ if (num >= ARM64_NCAPS)
+ return false;
+ return static_branch_unlikely(&cpu_hwcap_keys[num]);
+}
+
static inline bool cpus_have_cap(unsigned int num)
{
if (num >= ARM64_NCAPS)
return false;
- if (__builtin_constant_p(num))
- return static_branch_unlikely(&cpu_hwcap_keys[num]);
- else
- return test_bit(num, cpu_hwcaps);
+ return test_bit(num, cpu_hwcaps);
}
static inline void cpus_set_cap(unsigned int num)
@@ -200,7 +205,7 @@ static inline bool cpu_supports_mixed_endian_el0(void)
static inline bool system_supports_32bit_el0(void)
{
- return cpus_have_cap(ARM64_HAS_32BIT_EL0);
+ return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
}
static inline bool system_supports_mixed_endian_el0(void)
@@ -208,6 +213,17 @@ static inline bool system_supports_mixed_endian_el0(void)
return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
}
+static inline bool system_supports_fpsimd(void)
+{
+ return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
+}
+
+static inline bool system_uses_ttbr0_pan(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
+ !cpus_have_cap(ARM64_HAS_PAN);
+}
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/current.h b/arch/arm64/include/asm/current.h
new file mode 100644
index 000000000000..f2bcbe2d9889
--- /dev/null
+++ b/arch/arm64/include/asm/current.h
@@ -0,0 +1,22 @@
+#ifndef __ASM_CURRENT_H
+#define __ASM_CURRENT_H
+
+#include <linux/compiler.h>
+
+#include <asm/sysreg.h>
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+static __always_inline struct task_struct *get_current(void)
+{
+ return (struct task_struct *)read_sysreg(sp_el0);
+}
+
+#define current get_current()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_CURRENT_H */
+
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index b71420a12f26..a44cf5225429 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -68,6 +68,9 @@
#define BRK64_ESR_MASK 0xFFFF
#define BRK64_ESR_KPROBES 0x0004
#define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (BRK64_ESR_KPROBES << 5))
+/* uprobes BRK opcodes with ESR encoding */
+#define BRK64_ESR_UPROBES 0x0005
+#define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (BRK64_ESR_UPROBES << 5))
/* AArch32 */
#define DBG_ESR_EVT_BKPT 0x4
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 771b3f0bc757..0b6b1633017f 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -1,6 +1,7 @@
#ifndef _ASM_EFI_H
#define _ASM_EFI_H
+#include <asm/cpufeature.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/neon.h>
@@ -78,7 +79,30 @@ static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
static inline void efi_set_pgd(struct mm_struct *mm)
{
- switch_mm(NULL, mm, NULL);
+ __switch_mm(mm);
+
+ if (system_uses_ttbr0_pan()) {
+ if (mm != current->active_mm) {
+ /*
+ * Update the current thread's saved ttbr0 since it is
+ * restored as part of a return from exception. Set
+ * the hardware TTBR0_EL1 using cpu_switch_mm()
+ * directly to enable potential errata workarounds.
+ */
+ update_saved_ttbr0(current, mm);
+ cpu_switch_mm(mm->pgd, mm);
+ } else {
+ /*
+ * Defer the switch to the current thread's TTBR0_EL1
+ * until uaccess_enable(). Restore the current
+ * thread's saved ttbr0 corresponding to its active_mm
+ * (if different from init_mm).
+ */
+ cpu_set_reserved_ttbr0();
+ if (current->active_mm != &init_mm)
+ update_saved_ttbr0(current, current->active_mm);
+ }
+ }
}
void efi_virtmap_load(void);
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index a55384f4a5d7..5d1700425efe 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -138,7 +138,11 @@ typedef struct user_fpsimd_state elf_fpregset_t;
*/
#define ELF_PLAT_INIT(_r, load_addr) (_r)->regs[0] = 0
-#define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT);
+#define SET_PERSONALITY(ex) \
+({ \
+ clear_bit(TIF_32BIT, &current->mm->context.flags); \
+ clear_thread_flag(TIF_32BIT); \
+})
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \
@@ -183,7 +187,11 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
((x)->e_flags & EF_ARM_EABI_MASK))
#define compat_start_thread compat_start_thread
-#define COMPAT_SET_PERSONALITY(ex) set_thread_flag(TIF_32BIT);
+#define COMPAT_SET_PERSONALITY(ex) \
+({ \
+ set_bit(TIF_32BIT, &current->mm->context.flags); \
+ set_thread_flag(TIF_32BIT); \
+ })
#define COMPAT_ARCH_DLINFO
extern int aarch32_setup_vectors_page(struct linux_binprm *bprm,
int uses_interp);
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index f2585cdd32c2..85c4a8981d47 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -21,15 +21,12 @@
#include <linux/futex.h>
#include <linux/uaccess.h>
-#include <asm/alternative.h>
-#include <asm/cpufeature.h>
#include <asm/errno.h>
-#include <asm/sysreg.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
+do { \
+ uaccess_enable(); \
asm volatile( \
- ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
- CONFIG_ARM64_PAN) \
" prfm pstl1strm, %2\n" \
"1: ldxr %w1, %2\n" \
insn "\n" \
@@ -44,11 +41,11 @@
" .popsection\n" \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
- ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
- CONFIG_ARM64_PAN) \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
: "r" (oparg), "Ir" (-EFAULT) \
- : "memory")
+ : "memory"); \
+ uaccess_disable(); \
+} while (0)
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
@@ -118,8 +115,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
+ uaccess_enable();
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
" prfm pstl1strm, %2\n"
"1: ldxr %w1, %2\n"
" sub %w3, %w1, %w4\n"
@@ -134,10 +131,10 @@ ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
" .popsection\n"
_ASM_EXTABLE(1b, 4b)
_ASM_EXTABLE(2b, 4b)
-ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
: "memory");
+ uaccess_disable();
*uval = val;
return ret;
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index 9510ace570e2..b6b167ac082b 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -77,7 +77,11 @@ static inline void decode_ctrl_reg(u32 reg,
/* Lengths */
#define ARM_BREAKPOINT_LEN_1 0x1
#define ARM_BREAKPOINT_LEN_2 0x3
+#define ARM_BREAKPOINT_LEN_3 0x7
#define ARM_BREAKPOINT_LEN_4 0xf
+#define ARM_BREAKPOINT_LEN_5 0x1f
+#define ARM_BREAKPOINT_LEN_6 0x3f
+#define ARM_BREAKPOINT_LEN_7 0x7f
#define ARM_BREAKPOINT_LEN_8 0xff
/* Kernel stepping */
@@ -119,7 +123,7 @@ struct perf_event;
struct pmu;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
- int *gen_len, int *gen_type);
+ int *gen_len, int *gen_type, int *offset);
extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 7e51d1b57c0c..7803343e5881 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -19,6 +19,7 @@
#ifndef __ASM_KERNEL_PGTABLE_H
#define __ASM_KERNEL_PGTABLE_H
+#include <asm/pgtable.h>
#include <asm/sparsemem.h>
/*
@@ -54,6 +55,12 @@
#define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+#define RESERVED_TTBR0_SIZE (PAGE_SIZE)
+#else
+#define RESERVED_TTBR0_SIZE (0)
+#endif
+
/* Initial memory map size */
#if ARM64_SWAPPER_USES_SECTION_MAPS
#define SWAPPER_BLOCK_SHIFT SECTION_SHIFT
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 8d9fce037b2f..47619411f0ff 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -19,6 +19,7 @@
typedef struct {
atomic64_t id;
void *vdso;
+ unsigned long flags;
} mm_context_t;
/*
@@ -34,7 +35,7 @@ extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void init_mem_pgprot(void);
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
- pgprot_t prot, bool allow_block_mappings);
+ pgprot_t prot, bool page_mappings_only);
extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index a50185375f09..0363fe80455c 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
#include <asm/proc-fns.h>
#include <asm-generic/mm_hooks.h>
#include <asm/cputype.h>
@@ -103,7 +104,7 @@ static inline void cpu_uninstall_idmap(void)
local_flush_tlb_all();
cpu_set_default_tcr_t0sz();
- if (mm != &init_mm)
+ if (mm != &init_mm && !system_uses_ttbr0_pan())
cpu_switch_mm(mm->pgd, mm);
}
@@ -163,20 +164,26 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
-/*
- * This is the actual mm switch as far as the scheduler
- * is concerned. No registers are touched. We avoid
- * calling the CPU specific function when the mm hasn't
- * actually changed.
- */
-static inline void
-switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+static inline void update_saved_ttbr0(struct task_struct *tsk,
+ struct mm_struct *mm)
{
- unsigned int cpu = smp_processor_id();
+ if (system_uses_ttbr0_pan()) {
+ BUG_ON(mm->pgd == swapper_pg_dir);
+ task_thread_info(tsk)->ttbr0 =
+ virt_to_phys(mm->pgd) | ASID(mm) << 48;
+ }
+}
+#else
+static inline void update_saved_ttbr0(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+}
+#endif
- if (prev == next)
- return;
+static inline void __switch_mm(struct mm_struct *next)
+{
+ unsigned int cpu = smp_processor_id();
/*
* init_mm.pgd does not contain any user mappings and it is always
@@ -190,8 +197,26 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
check_and_switch_context(next, cpu);
}
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ if (prev != next)
+ __switch_mm(next);
+
+ /*
+ * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
+ * value may have not been initialised yet (activate_mm caller) or the
+ * ASID has changed since the last run (following the context switch
+ * of another thread of the same process). Avoid setting the reserved
+ * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
+ */
+ if (next != &init_mm)
+ update_saved_ttbr0(tsk, next);
+}
+
#define deactivate_mm(tsk,mm) do { } while (0)
-#define activate_mm(prev,next) switch_mm(prev, next, NULL)
+#define activate_mm(prev,next) switch_mm(prev, next, current)
void verify_cpu_asid_bits(void);
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
index 13ce4cc18e26..ad4cdc966c0f 100644
--- a/arch/arm64/include/asm/neon.h
+++ b/arch/arm64/include/asm/neon.h
@@ -9,8 +9,9 @@
*/
#include <linux/types.h>
+#include <asm/fpsimd.h>
-#define cpu_has_neon() (1)
+#define cpu_has_neon() system_supports_fpsimd()
#define kernel_neon_begin() kernel_neon_begin_partial(32)
diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h
deleted file mode 100644
index 123f45d92cd1..000000000000
--- a/arch/arm64/include/asm/opcodes.h
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef CONFIG_CPU_BIG_ENDIAN
-#define CONFIG_CPU_ENDIAN_BE8 CONFIG_CPU_BIG_ENDIAN
-#endif
-
-#include <../../arm/include/asm/opcodes.h>
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 5394c8405e66..3bd498e4de4c 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,6 +16,8 @@
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
+#include <asm/stack_pointer.h>
+
static inline void set_my_cpu_offset(unsigned long off)
{
asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
@@ -101,16 +103,16 @@ static inline unsigned long __percpu_read(void *ptr, int size)
switch (size) {
case 1:
- ret = ACCESS_ONCE(*(u8 *)ptr);
+ ret = READ_ONCE(*(u8 *)ptr);
break;
case 2:
- ret = ACCESS_ONCE(*(u16 *)ptr);
+ ret = READ_ONCE(*(u16 *)ptr);
break;
case 4:
- ret = ACCESS_ONCE(*(u32 *)ptr);
+ ret = READ_ONCE(*(u32 *)ptr);
break;
case 8:
- ret = ACCESS_ONCE(*(u64 *)ptr);
+ ret = READ_ONCE(*(u64 *)ptr);
break;
default:
BUILD_BUG();
@@ -123,16 +125,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
{
switch (size) {
case 1:
- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
+ WRITE_ONCE(*(u8 *)ptr, (u8)val);
break;
case 2:
- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
+ WRITE_ONCE(*(u16 *)ptr, (u16)val);
break;
case 4:
- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
+ WRITE_ONCE(*(u32 *)ptr, (u32)val);
break;
case 8:
- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
+ WRITE_ONCE(*(u64 *)ptr, (u64)val);
break;
default:
BUILD_BUG();
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 38b6a2b49d68..8d5cbec17d80 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -17,6 +17,8 @@
#ifndef __ASM_PERF_EVENT_H
#define __ASM_PERF_EVENT_H
+#include <asm/stack_pointer.h>
+
#define ARMV8_PMU_MAX_COUNTERS 32
#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
diff --git a/arch/arm64/include/asm/probes.h b/arch/arm64/include/asm/probes.h
index 5af574d632fa..6a5b28904c33 100644
--- a/arch/arm64/include/asm/probes.h
+++ b/arch/arm64/include/asm/probes.h
@@ -15,21 +15,22 @@
#ifndef _ARM_PROBES_H
#define _ARM_PROBES_H
-#include <asm/opcodes.h>
-
-struct kprobe;
-struct arch_specific_insn;
-
-typedef u32 kprobe_opcode_t;
-typedef void (kprobes_handler_t) (u32 opcode, long addr, struct pt_regs *);
+typedef u32 probe_opcode_t;
+typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *);
/* architecture specific copy of original instruction */
-struct arch_specific_insn {
- kprobe_opcode_t *insn;
+struct arch_probe_insn {
+ probe_opcode_t *insn;
pstate_check_t *pstate_cc;
- kprobes_handler_t *handler;
+ probes_handler_t *handler;
/* restore address after step xol */
unsigned long restore;
};
+#ifdef CONFIG_KPROBES
+typedef u32 kprobe_opcode_t;
+struct arch_specific_insn {
+ struct arch_probe_insn api;
+};
+#endif
#endif
diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h
index 07b8ed037dee..6afd8476c60c 100644
--- a/arch/arm64/include/asm/ptdump.h
+++ b/arch/arm64/include/asm/ptdump.h
@@ -16,9 +16,10 @@
#ifndef __ASM_PTDUMP_H
#define __ASM_PTDUMP_H
-#ifdef CONFIG_ARM64_PTDUMP
+#ifdef CONFIG_ARM64_PTDUMP_CORE
#include <linux/mm_types.h>
+#include <linux/seq_file.h>
struct addr_marker {
unsigned long start_address;
@@ -29,16 +30,25 @@ struct ptdump_info {
struct mm_struct *mm;
const struct addr_marker *markers;
unsigned long base_addr;
- unsigned long max_addr;
};
-int ptdump_register(struct ptdump_info *info, const char *name);
-
+void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info);
+#ifdef CONFIG_ARM64_PTDUMP_DEBUGFS
+int ptdump_debugfs_register(struct ptdump_info *info, const char *name);
#else
-static inline int ptdump_register(struct ptdump_info *info, const char *name)
+static inline int ptdump_debugfs_register(struct ptdump_info *info,
+ const char *name)
{
return 0;
}
-#endif /* CONFIG_ARM64_PTDUMP */
+#endif
+void ptdump_check_wx(void);
+#endif /* CONFIG_ARM64_PTDUMP_CORE */
+
+#ifdef CONFIG_DEBUG_WX
+#define debug_checkwx() ptdump_check_wx()
+#else
+#define debug_checkwx() do { } while (0)
+#endif
#endif /* __ASM_PTDUMP_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index ada08b5b036d..513daf050e84 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -217,6 +217,14 @@ int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
#include <asm-generic/ptrace.h>
+#define procedure_link_pointer(regs) ((regs)->regs[30])
+
+static inline void procedure_link_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ procedure_link_pointer(regs) = val;
+}
+
#undef profile_pc
extern unsigned long profile_pc(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 022644704a93..d050d720a1b4 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -29,11 +29,22 @@
#ifndef __ASSEMBLY__
+#include <asm/percpu.h>
+
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/thread_info.h>
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
+
+/*
+ * We don't use this_cpu_read(cpu_number) as that has implicit writes to
+ * preempt_count, and associated (compiler) barriers, that we'd like to avoid
+ * the expense of. If we're preemptible, the value can be stale at use anyway.
+ * And we can't use this_cpu_ptr() either, as that winds up recursing back
+ * here under CONFIG_DEBUG_PREEMPT=y.
+ */
+#define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number))
struct seq_file;
@@ -73,6 +84,7 @@ asmlinkage void secondary_start_kernel(void);
*/
struct secondary_data {
void *stack;
+ struct task_struct *task;
long status;
};
diff --git a/arch/arm64/include/asm/stack_pointer.h b/arch/arm64/include/asm/stack_pointer.h
new file mode 100644
index 000000000000..ffcdf742cddf
--- /dev/null
+++ b/arch/arm64/include/asm/stack_pointer.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_STACK_POINTER_H
+#define __ASM_STACK_POINTER_H
+
+/*
+ * how to get the current stack pointer from C
+ */
+register unsigned long current_stack_pointer asm ("sp");
+
+#endif /* __ASM_STACK_POINTER_H */
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index b8a313fd7a09..de5600f40adf 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -1,7 +1,7 @@
#ifndef __ASM_SUSPEND_H
#define __ASM_SUSPEND_H
-#define NR_CTX_REGS 10
+#define NR_CTX_REGS 12
#define NR_CALLEE_SAVED_REGS 12
/*
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 6c80b3699cb8..98ae03f8eedd 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -22,8 +22,6 @@
#include <linux/stringify.h>
-#include <asm/opcodes.h>
-
/*
* ARMv8 ARM reserves the following encoding for system registers:
* (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
@@ -37,6 +35,33 @@
#define sys_reg(op0, op1, crn, crm, op2) \
((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
+#ifndef CONFIG_BROKEN_GAS_INST
+
+#ifdef __ASSEMBLY__
+#define __emit_inst(x) .inst (x)
+#else
+#define __emit_inst(x) ".inst " __stringify((x)) "\n\t"
+#endif
+
+#else /* CONFIG_BROKEN_GAS_INST */
+
+#ifndef CONFIG_CPU_BIG_ENDIAN
+#define __INSTR_BSWAP(x) (x)
+#else /* CONFIG_CPU_BIG_ENDIAN */
+#define __INSTR_BSWAP(x) ((((x) << 24) & 0xff000000) | \
+ (((x) << 8) & 0x00ff0000) | \
+ (((x) >> 8) & 0x0000ff00) | \
+ (((x) >> 24) & 0x000000ff))
+#endif /* CONFIG_CPU_BIG_ENDIAN */
+
+#ifdef __ASSEMBLY__
+#define __emit_inst(x) .long __INSTR_BSWAP(x)
+#else /* __ASSEMBLY__ */
+#define __emit_inst(x) ".long " __stringify(__INSTR_BSWAP(x)) "\n\t"
+#endif /* __ASSEMBLY__ */
+
+#endif /* CONFIG_BROKEN_GAS_INST */
+
#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0)
#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
@@ -81,10 +106,10 @@
#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
#define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
-#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
- (!!x)<<8 | 0x1f)
-#define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM |\
- (!!x)<<8 | 0x1f)
+#define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \
+ (!!x)<<8 | 0x1f)
+#define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \
+ (!!x)<<8 | 0x1f)
/* Common SCTLR_ELx flags. */
#define SCTLR_ELx_EE (1 << 25)
@@ -228,11 +253,11 @@
.equ .L__reg_num_xzr, 31
.macro mrs_s, rt, sreg
- .inst 0xd5200000|(\sreg)|(.L__reg_num_\rt)
+ __emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt))
.endm
.macro msr_s, sreg, rt
- .inst 0xd5000000|(\sreg)|(.L__reg_num_\rt)
+ __emit_inst(0xd5000000|(\sreg)|(.L__reg_num_\rt))
.endm
#else
@@ -246,11 +271,11 @@ asm(
" .equ .L__reg_num_xzr, 31\n"
"\n"
" .macro mrs_s, rt, sreg\n"
-" .inst 0xd5200000|(\\sreg)|(.L__reg_num_\\rt)\n"
+ __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt))
" .endm\n"
"\n"
" .macro msr_s, sreg, rt\n"
-" .inst 0xd5000000|(\\sreg)|(.L__reg_num_\\rt)\n"
+ __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt))
" .endm\n"
);
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index e9ea5a6bd449..46c3b93cf865 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -36,58 +36,31 @@
struct task_struct;
+#include <asm/stack_pointer.h>
#include <asm/types.h>
typedef unsigned long mm_segment_t;
/*
* low level task data that entry.S needs immediate access to.
- * __switch_to() assumes cpu_context follows immediately after cpu_domain.
*/
struct thread_info {
unsigned long flags; /* low level flags */
mm_segment_t addr_limit; /* address limit */
- struct task_struct *task; /* main task structure */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ u64 ttbr0; /* saved TTBR0_EL1 */
+#endif
int preempt_count; /* 0 => preemptable, <0 => bug */
- int cpu; /* cpu */
};
#define INIT_THREAD_INFO(tsk) \
{ \
- .task = &tsk, \
- .flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
-/*
- * how to get the current stack pointer from C
- */
-register unsigned long current_stack_pointer asm ("sp");
-
-/*
- * how to get the thread information struct from C
- */
-static inline struct thread_info *current_thread_info(void) __attribute_const__;
-
-/*
- * struct thread_info can be accessed directly via sp_el0.
- *
- * We don't use read_sysreg() as we want the compiler to cache the value where
- * possible.
- */
-static inline struct thread_info *current_thread_info(void)
-{
- unsigned long sp_el0;
-
- asm ("mrs %0, sp_el0" : "=r" (sp_el0));
-
- return (struct thread_info *)sp_el0;
-}
-
#define thread_saved_pc(tsk) \
((unsigned long)(tsk->thread.cpu_context.pc))
#define thread_saved_sp(tsk) \
@@ -112,6 +85,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
+#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
#define TIF_NOHZ 7
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
@@ -132,10 +106,12 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
- _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
+ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
+ _TIF_UPROBE)
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 55d0adbf6509..d26750ca6e06 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -18,6 +18,12 @@
#ifndef __ASM_UACCESS_H
#define __ASM_UACCESS_H
+#include <asm/alternative.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/sysreg.h>
+
+#ifndef __ASSEMBLY__
+
/*
* User space memory access functions
*/
@@ -26,10 +32,8 @@
#include <linux/string.h>
#include <linux/thread_info.h>
-#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/ptrace.h>
-#include <asm/sysreg.h>
#include <asm/errno.h>
#include <asm/memory.h>
#include <asm/compiler.h>
@@ -120,6 +124,99 @@ static inline void set_fs(mm_segment_t fs)
" .popsection\n"
/*
+ * User access enabling/disabling.
+ */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+static inline void __uaccess_ttbr0_disable(void)
+{
+ unsigned long ttbr;
+
+ /* reserved_ttbr0 placed at the end of swapper_pg_dir */
+ ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
+ write_sysreg(ttbr, ttbr0_el1);
+ isb();
+}
+
+static inline void __uaccess_ttbr0_enable(void)
+{
+ unsigned long flags;
+
+ /*
+ * Disable interrupts to avoid preemption between reading the 'ttbr0'
+ * variable and the MSR. A context switch could trigger an ASID
+ * roll-over and an update of 'ttbr0'.
+ */
+ local_irq_save(flags);
+ write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
+ isb();
+ local_irq_restore(flags);
+}
+
+static inline bool uaccess_ttbr0_disable(void)
+{
+ if (!system_uses_ttbr0_pan())
+ return false;
+ __uaccess_ttbr0_disable();
+ return true;
+}
+
+static inline bool uaccess_ttbr0_enable(void)
+{
+ if (!system_uses_ttbr0_pan())
+ return false;
+ __uaccess_ttbr0_enable();
+ return true;
+}
+#else
+static inline bool uaccess_ttbr0_disable(void)
+{
+ return false;
+}
+
+static inline bool uaccess_ttbr0_enable(void)
+{
+ return false;
+}
+#endif
+
+#define __uaccess_disable(alt) \
+do { \
+ if (!uaccess_ttbr0_disable()) \
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
+ CONFIG_ARM64_PAN)); \
+} while (0)
+
+#define __uaccess_enable(alt) \
+do { \
+ if (!uaccess_ttbr0_enable()) \
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
+ CONFIG_ARM64_PAN)); \
+} while (0)
+
+static inline void uaccess_disable(void)
+{
+ __uaccess_disable(ARM64_HAS_PAN);
+}
+
+static inline void uaccess_enable(void)
+{
+ __uaccess_enable(ARM64_HAS_PAN);
+}
+
+/*
+ * These functions are no-ops when UAO is present.
+ */
+static inline void uaccess_disable_not_uao(void)
+{
+ __uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
+}
+
+static inline void uaccess_enable_not_uao(void)
+{
+ __uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
+}
+
+/*
* The "__xxx" versions of the user access functions do not verify the address
* space - it must have been done previously with a separate "access_ok()"
* call.
@@ -146,8 +243,7 @@ static inline void set_fs(mm_segment_t fs)
do { \
unsigned long __gu_val; \
__chk_user_ptr(ptr); \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
- CONFIG_ARM64_PAN)); \
+ uaccess_enable_not_uao(); \
switch (sizeof(*(ptr))) { \
case 1: \
__get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
@@ -168,9 +264,8 @@ do { \
default: \
BUILD_BUG(); \
} \
+ uaccess_disable_not_uao(); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
- CONFIG_ARM64_PAN)); \
} while (0)
#define __get_user(x, ptr) \
@@ -215,8 +310,7 @@ do { \
do { \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
- CONFIG_ARM64_PAN)); \
+ uaccess_enable_not_uao(); \
switch (sizeof(*(ptr))) { \
case 1: \
__put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
@@ -237,8 +331,7 @@ do { \
default: \
BUILD_BUG(); \
} \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
- CONFIG_ARM64_PAN)); \
+ uaccess_disable_not_uao(); \
} while (0)
#define __put_user(x, ptr) \
@@ -331,4 +424,66 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
+#else /* __ASSEMBLY__ */
+
+#include <asm/assembler.h>
+
+/*
+ * User access enabling/disabling macros.
+ */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ .macro __uaccess_ttbr0_disable, tmp1
+ mrs \tmp1, ttbr1_el1 // swapper_pg_dir
+ add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
+ msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
+ isb
+ .endm
+
+ .macro __uaccess_ttbr0_enable, tmp1
+ get_thread_info \tmp1
+ ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
+ msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
+ isb
+ .endm
+
+ .macro uaccess_ttbr0_disable, tmp1
+alternative_if_not ARM64_HAS_PAN
+ __uaccess_ttbr0_disable \tmp1
+alternative_else_nop_endif
+ .endm
+
+ .macro uaccess_ttbr0_enable, tmp1, tmp2
+alternative_if_not ARM64_HAS_PAN
+ save_and_disable_irq \tmp2 // avoid preemption
+ __uaccess_ttbr0_enable \tmp1
+ restore_irq \tmp2
+alternative_else_nop_endif
+ .endm
+#else
+ .macro uaccess_ttbr0_disable, tmp1
+ .endm
+
+ .macro uaccess_ttbr0_enable, tmp1, tmp2
+ .endm
+#endif
+
+/*
+ * These macros are no-ops when UAO is present.
+ */
+ .macro uaccess_disable_not_uao, tmp1
+ uaccess_ttbr0_disable \tmp1
+alternative_if ARM64_ALT_PAN_NOT_UAO
+ SET_PSTATE_PAN(1)
+alternative_else_nop_endif
+ .endm
+
+ .macro uaccess_enable_not_uao, tmp1, tmp2
+ uaccess_ttbr0_enable \tmp1, \tmp2
+alternative_if ARM64_ALT_PAN_NOT_UAO
+ SET_PSTATE_PAN(0)
+alternative_else_nop_endif
+ .endm
+
+#endif /* __ASSEMBLY__ */
+
#endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/include/asm/uprobes.h b/arch/arm64/include/asm/uprobes.h
new file mode 100644
index 000000000000..8d004073d0e8
--- /dev/null
+++ b/arch/arm64/include/asm/uprobes.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_UPROBES_H
+#define _ASM_UPROBES_H
+
+#include <asm/debug-monitors.h>
+#include <asm/insn.h>
+#include <asm/probes.h>
+
+#define MAX_UINSN_BYTES AARCH64_INSN_SIZE
+
+#define UPROBE_SWBP_INSN BRK64_OPCODE_UPROBES
+#define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE
+#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
+
+typedef u32 uprobe_opcode_t;
+
+struct arch_uprobe_task {
+};
+
+struct arch_uprobe {
+ union {
+ u8 insn[MAX_UINSN_BYTES];
+ u8 ixol[MAX_UINSN_BYTES];
+ };
+ struct arch_probe_insn api;
+ bool simulate;
+};
+
+#endif
diff --git a/arch/arm64/include/asm/xen/hypercall.h b/arch/arm64/include/asm/xen/hypercall.h
index 74b0c423ff5b..3522cbaed316 100644
--- a/arch/arm64/include/asm/xen/hypercall.h
+++ b/arch/arm64/include/asm/xen/hypercall.h
@@ -1 +1 @@
-#include <../../arm/include/asm/xen/hypercall.h>
+#include <xen/arm/hypercall.h>
diff --git a/arch/arm64/include/asm/xen/hypervisor.h b/arch/arm64/include/asm/xen/hypervisor.h
index f263da8e8769..d6e7709d0688 100644
--- a/arch/arm64/include/asm/xen/hypervisor.h
+++ b/arch/arm64/include/asm/xen/hypervisor.h
@@ -1 +1 @@
-#include <../../arm/include/asm/xen/hypervisor.h>
+#include <xen/arm/hypervisor.h>
diff --git a/arch/arm64/include/asm/xen/interface.h b/arch/arm64/include/asm/xen/interface.h
index 44457aebeed4..88c0d75da190 100644
--- a/arch/arm64/include/asm/xen/interface.h
+++ b/arch/arm64/include/asm/xen/interface.h
@@ -1 +1 @@
-#include <../../arm/include/asm/xen/interface.h>
+#include <xen/arm/interface.h>
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index 2052102b4e02..b3ef061d8b74 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -1 +1 @@
-#include <../../arm/include/asm/xen/page-coherent.h>
+#include <xen/arm/page-coherent.h>
diff --git a/arch/arm64/include/asm/xen/page.h b/arch/arm64/include/asm/xen/page.h
index bed87ec36780..31bbc803cecb 100644
--- a/arch/arm64/include/asm/xen/page.h
+++ b/arch/arm64/include/asm/xen/page.h
@@ -1 +1 @@
-#include <../../arm/include/asm/xen/page.h>
+#include <xen/arm/page.h>
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index b0988bb1bf64..04de188a36c9 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -14,10 +14,8 @@
#include <linux/slab.h>
#include <linux/sysctl.h>
-#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/insn.h>
-#include <asm/opcodes.h>
#include <asm/sysreg.h>
#include <asm/system_misc.h>
#include <asm/traps.h>
@@ -285,10 +283,10 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
#define __SWP_LL_SC_LOOPS 4
#define __user_swpX_asm(data, addr, res, temp, temp2, B) \
+do { \
+ uaccess_enable(); \
__asm__ __volatile__( \
" mov %w3, %w7\n" \
- ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
- CONFIG_ARM64_PAN) \
"0: ldxr"B" %w2, [%4]\n" \
"1: stxr"B" %w0, %w1, [%4]\n" \
" cbz %w0, 2f\n" \
@@ -306,12 +304,12 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
" .popsection" \
_ASM_EXTABLE(0b, 4b) \
_ASM_EXTABLE(1b, 4b) \
- ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
- CONFIG_ARM64_PAN) \
: "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT), \
"i" (__SWP_LL_SC_LOOPS) \
- : "memory")
+ : "memory"); \
+ uaccess_disable(); \
+} while (0)
#define __user_swp_asm(data, addr, res, temp, temp2) \
__user_swpX_asm(data, addr, res, temp, temp2, "")
@@ -352,6 +350,10 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
return res;
}
+#define ARM_OPCODE_CONDTEST_FAIL 0
+#define ARM_OPCODE_CONDTEST_PASS 1
+#define ARM_OPCODE_CONDTEST_UNCOND 2
+
#define ARM_OPCODE_CONDITION_UNCOND 0xf
static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 4a2f0f0fef32..bc049afc73a7 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -36,11 +36,13 @@ int main(void)
{
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
BLANK();
- DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
- DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+ DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
+ DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
+ DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
+#endif
+ DEFINE(TSK_STACK, offsetof(struct task_struct, stack));
BLANK();
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
BLANK();
@@ -123,6 +125,7 @@ int main(void)
DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
BLANK();
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
+ DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
BLANK();
#ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index c02504ea304b..fdf8f045929f 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -47,6 +47,7 @@ unsigned int compat_elf_hwcap2 __read_mostly;
#endif
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+EXPORT_SYMBOL(cpu_hwcaps);
DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcap_keys);
@@ -746,6 +747,14 @@ static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
}
+static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
+{
+ u64 pfr0 = read_system_reg(SYS_ID_AA64PFR0_EL1);
+
+ return cpuid_feature_extract_signed_field(pfr0,
+ ID_AA64PFR0_FP_SHIFT) < 0;
+}
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -829,6 +838,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.def_scope = SCOPE_SYSTEM,
.matches = hyp_offset_low,
},
+ {
+ /* FP/SIMD is not implemented */
+ .capability = ARM64_HAS_NO_FPSIMD,
+ .def_scope = SCOPE_SYSTEM,
+ .min_field_value = 0,
+ .matches = has_no_fpsimd,
+ },
{},
};
@@ -1102,5 +1118,5 @@ void __init setup_cpu_features(void)
static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
{
- return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO));
+ return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 73ae90ef434c..605df76f0a06 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -226,6 +226,8 @@ static void send_user_sigtrap(int si_code)
static int single_step_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
+ bool handler_found = false;
+
/*
* If we are stepping a pending breakpoint, call the hw_breakpoint
* handler first.
@@ -233,7 +235,14 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
if (!reinstall_suspended_bps(regs))
return 0;
- if (user_mode(regs)) {
+#ifdef CONFIG_KPROBES
+ if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
+ handler_found = true;
+#endif
+ if (!handler_found && call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
+ handler_found = true;
+
+ if (!handler_found && user_mode(regs)) {
send_user_sigtrap(TRAP_TRACE);
/*
@@ -243,15 +252,8 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
* to the active-not-pending state).
*/
user_rewind_single_step(current);
- } else {
-#ifdef CONFIG_KPROBES
- if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
- return 0;
-#endif
- if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
- return 0;
-
- pr_warning("Unexpected kernel single-step exception at EL1\n");
+ } else if (!handler_found) {
+ pr_warn("Unexpected kernel single-step exception at EL1\n");
/*
* Re-enable stepping since we know that we will be
* returning to regs.
@@ -304,16 +306,20 @@ NOKPROBE_SYMBOL(call_break_hook);
static int brk_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- if (user_mode(regs)) {
- send_user_sigtrap(TRAP_BRKPT);
- }
+ bool handler_found = false;
+
#ifdef CONFIG_KPROBES
- else if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) {
- if (kprobe_breakpoint_handler(regs, esr) != DBG_HOOK_HANDLED)
- return -EFAULT;
+ if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) {
+ if (kprobe_breakpoint_handler(regs, esr) == DBG_HOOK_HANDLED)
+ handler_found = true;
}
#endif
- else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
+ if (!handler_found && call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
+ handler_found = true;
+
+ if (!handler_found && user_mode(regs)) {
+ send_user_sigtrap(TRAP_BRKPT);
+ } else if (!handler_found) {
pr_warn("Unexpected kernel BRK exception at EL1\n");
return -EFAULT;
}
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index ba9bee389fd5..5d17f377d905 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -62,8 +62,8 @@ struct screen_info screen_info __section(.data);
int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
{
pteval_t prot_val = create_mapping_protection(md);
- bool allow_block_mappings = (md->type != EFI_RUNTIME_SERVICES_CODE &&
- md->type != EFI_RUNTIME_SERVICES_DATA);
+ bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
+ md->type == EFI_RUNTIME_SERVICES_DATA);
if (!PAGE_ALIGNED(md->phys_addr) ||
!PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
@@ -76,12 +76,12 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
* from the MMU routines. So avoid block mappings altogether in
* that case.
*/
- allow_block_mappings = false;
+ page_mappings_only = true;
}
create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
md->num_pages << EFI_PAGE_SHIFT,
- __pgprot(prot_val | PTE_NG), allow_block_mappings);
+ __pgprot(prot_val | PTE_NG), page_mappings_only);
return 0;
}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 223d54a4d66b..4f0d76339414 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -29,7 +29,9 @@
#include <asm/esr.h>
#include <asm/irq.h>
#include <asm/memory.h>
+#include <asm/ptrace.h>
#include <asm/thread_info.h>
+#include <asm/uaccess.h>
#include <asm/unistd.h>
/*
@@ -90,9 +92,8 @@
.if \el == 0
mrs x21, sp_el0
- mov tsk, sp
- and tsk, tsk, #~(THREAD_SIZE - 1) // Ensure MDSCR_EL1.SS is clear,
- ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
+ ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
+ ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
disable_step_tsk x19, x20 // exceptions when scheduling.
mov x29, xzr // fp pointed to user-space
@@ -100,15 +101,41 @@
add x21, sp, #S_FRAME_SIZE
get_thread_info tsk
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
- ldr x20, [tsk, #TI_ADDR_LIMIT]
+ ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
str x20, [sp, #S_ORIG_ADDR_LIMIT]
mov x20, #TASK_SIZE_64
- str x20, [tsk, #TI_ADDR_LIMIT]
+ str x20, [tsk, #TSK_TI_ADDR_LIMIT]
/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
.endif /* \el == 0 */
mrs x22, elr_el1
mrs x23, spsr_el1
stp lr, x21, [sp, #S_LR]
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ /*
+ * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
+ * EL0, there is no need to check the state of TTBR0_EL1 since
+ * accesses are always enabled.
+ * Note that the meaning of this bit differs from the ARMv8.1 PAN
+ * feature as all TTBR0_EL1 accesses are disabled, not just those to
+ * user mappings.
+ */
+alternative_if ARM64_HAS_PAN
+ b 1f // skip TTBR0 PAN
+alternative_else_nop_endif
+
+ .if \el != 0
+ mrs x21, ttbr0_el1
+ tst x21, #0xffff << 48 // Check for the reserved ASID
+ orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
+ b.eq 1f // TTBR0 access already disabled
+ and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
+ .endif
+
+ __uaccess_ttbr0_disable x21
+1:
+#endif
+
stp x22, x23, [sp, #S_PC]
/*
@@ -139,7 +166,7 @@
.if \el != 0
/* Restore the task's original addr_limit. */
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
- str x20, [tsk, #TI_ADDR_LIMIT]
+ str x20, [tsk, #TSK_TI_ADDR_LIMIT]
/* No need to restore UAO, it will be restored from SPSR_EL1 */
.endif
@@ -147,6 +174,40 @@
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
.if \el == 0
ct_user_enter
+ .endif
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ /*
+ * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
+ * PAN bit checking.
+ */
+alternative_if ARM64_HAS_PAN
+ b 2f // skip TTBR0 PAN
+alternative_else_nop_endif
+
+ .if \el != 0
+ tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
+ .endif
+
+ __uaccess_ttbr0_enable x0
+
+ .if \el == 0
+ /*
+ * Enable errata workarounds only if returning to user. The only
+ * workaround currently required for TTBR0_EL1 changes are for the
+ * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
+ * corruption).
+ */
+ post_ttbr0_update_workaround
+ .endif
+1:
+ .if \el != 0
+ and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
+ .endif
+2:
+#endif
+
+ .if \el == 0
ldr x23, [sp, #S_SP] // load return stack pointer
msr sp_el0, x23
#ifdef CONFIG_ARM64_ERRATUM_845719
@@ -162,6 +223,7 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif
#endif
.endif
+
msr elr_el1, x21 // set up the return data
msr spsr_el1, x22
ldp x0, x1, [sp, #16 * 0]
@@ -184,23 +246,20 @@ alternative_else_nop_endif
eret // return to kernel
.endm
- .macro get_thread_info, rd
- mrs \rd, sp_el0
- .endm
-
.macro irq_stack_entry
mov x19, sp // preserve the original sp
/*
- * Compare sp with the current thread_info, if the top
- * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
- * should switch to the irq stack.
+ * Compare sp with the base of the task stack.
+ * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
+ * and should switch to the irq stack.
*/
- and x25, x19, #~(THREAD_SIZE - 1)
- cmp x25, tsk
- b.ne 9998f
+ ldr x25, [tsk, TSK_STACK]
+ eor x25, x25, x19
+ and x25, x25, #~(THREAD_SIZE - 1)
+ cbnz x25, 9998f
- this_cpu_ptr irq_stack, x25, x26
+ adr_this_cpu x25, irq_stack, x26
mov x26, #IRQ_STACK_START_SP
add x26, x25, x26
@@ -427,9 +486,9 @@ el1_irq:
irq_handler
#ifdef CONFIG_PREEMPT
- ldr w24, [tsk, #TI_PREEMPT] // get preempt count
+ ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
cbnz w24, 1f // preempt count != 0
- ldr x0, [tsk, #TI_FLAGS] // get flags
+ ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
bl el1_preempt
1:
@@ -444,7 +503,7 @@ ENDPROC(el1_irq)
el1_preempt:
mov x24, lr
1: bl preempt_schedule_irq // irq en/disable is done inside
- ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
+ ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
ret x24
#endif
@@ -674,8 +733,7 @@ ENTRY(cpu_switch_to)
ldp x29, x9, [x8], #16
ldr lr, [x8]
mov sp, x9
- and x9, x9, #~(THREAD_SIZE - 1)
- msr sp_el0, x9
+ msr sp_el0, x1
ret
ENDPROC(cpu_switch_to)
@@ -686,7 +744,7 @@ ENDPROC(cpu_switch_to)
ret_fast_syscall:
disable_irq // disable interrupts
str x0, [sp, #S_X0] // returned x0
- ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
+ ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
and x2, x1, #_TIF_SYSCALL_WORK
cbnz x2, ret_fast_syscall_trace
and x2, x1, #_TIF_WORK_MASK
@@ -706,14 +764,14 @@ work_pending:
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on // enabled while in userspace
#endif
- ldr x1, [tsk, #TI_FLAGS] // re-check for single-step
+ ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
b finish_ret_to_user
/*
* "slow" syscall return path.
*/
ret_to_user:
disable_irq // disable interrupts
- ldr x1, [tsk, #TI_FLAGS]
+ ldr x1, [tsk, #TSK_TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
finish_ret_to_user:
@@ -746,7 +804,7 @@ el0_svc_naked: // compat entry point
enable_dbg_and_irq
ct_user_exit 1
- ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
+ ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks
tst x16, #_TIF_SYSCALL_WORK
b.ne __sys_trace
cmp scno, sc_nr // check upper syscall limit
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 394c61db5566..b883f1f75216 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -127,6 +127,8 @@ void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
void fpsimd_thread_switch(struct task_struct *next)
{
+ if (!system_supports_fpsimd())
+ return;
/*
* Save the current FPSIMD state to memory, but only if whatever is in
* the registers is in fact the most recent userland FPSIMD state of
@@ -157,6 +159,8 @@ void fpsimd_thread_switch(struct task_struct *next)
void fpsimd_flush_thread(void)
{
+ if (!system_supports_fpsimd())
+ return;
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
fpsimd_flush_task_state(current);
set_thread_flag(TIF_FOREIGN_FPSTATE);
@@ -168,6 +172,8 @@ void fpsimd_flush_thread(void)
*/
void fpsimd_preserve_current_state(void)
{
+ if (!system_supports_fpsimd())
+ return;
preempt_disable();
if (!test_thread_flag(TIF_FOREIGN_FPSTATE))
fpsimd_save_state(&current->thread.fpsimd_state);
@@ -181,6 +187,8 @@ void fpsimd_preserve_current_state(void)
*/
void fpsimd_restore_current_state(void)
{
+ if (!system_supports_fpsimd())
+ return;
preempt_disable();
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
struct fpsimd_state *st = &current->thread.fpsimd_state;
@@ -199,6 +207,8 @@ void fpsimd_restore_current_state(void)
*/
void fpsimd_update_current_state(struct fpsimd_state *state)
{
+ if (!system_supports_fpsimd())
+ return;
preempt_disable();
fpsimd_load_state(state);
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
@@ -228,6 +238,8 @@ static DEFINE_PER_CPU(struct fpsimd_partial_state, softirq_fpsimdstate);
*/
void kernel_neon_begin_partial(u32 num_regs)
{
+ if (WARN_ON(!system_supports_fpsimd()))
+ return;
if (in_interrupt()) {
struct fpsimd_partial_state *s = this_cpu_ptr(
in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
@@ -252,6 +264,8 @@ EXPORT_SYMBOL(kernel_neon_begin_partial);
void kernel_neon_end(void)
{
+ if (!system_supports_fpsimd())
+ return;
if (in_interrupt()) {
struct fpsimd_partial_state *s = this_cpu_ptr(
in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 332e33193ccf..4b1abac3485a 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -326,14 +326,14 @@ __create_page_tables:
* dirty cache lines being evicted.
*/
adrp x0, idmap_pg_dir
- adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE
+ adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
bl __inval_cache_range
/*
* Clear the idmap and swapper page tables.
*/
adrp x0, idmap_pg_dir
- adrp x6, swapper_pg_dir + SWAPPER_DIR_SIZE
+ adrp x6, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
1: stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16
@@ -412,7 +412,7 @@ __create_page_tables:
* tables again to remove any speculatively loaded cache lines.
*/
adrp x0, idmap_pg_dir
- adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE
+ adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
dmb sy
bl __inval_cache_range
@@ -428,7 +428,8 @@ ENDPROC(__create_page_tables)
__primary_switched:
adrp x4, init_thread_union
add sp, x4, #THREAD_SIZE
- msr sp_el0, x4 // Save thread_info
+ adr_l x5, init_task
+ msr sp_el0, x5 // Save thread_info
adr_l x8, vectors // load VBAR_EL1 with virtual
msr vbar_el1, x8 // vector table address
@@ -524,10 +525,21 @@ set_hcr:
msr hcr_el2, x0
isb
- /* Generic timers. */
+ /*
+ * Allow Non-secure EL1 and EL0 to access physical timer and counter.
+ * This is not necessary for VHE, since the host kernel runs in EL2,
+ * and EL0 accesses are configured in the later stage of boot process.
+ * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout
+ * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined
+ * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1
+ * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
+ * EL2.
+ */
+ cbnz x2, 1f
mrs x0, cnthctl_el2
orr x0, x0, #3 // Enable EL1 physical timers
msr cnthctl_el2, x0
+1:
msr cntvoff_el2, xzr // Clear virtual offset
#ifdef CONFIG_ARM_GIC_V3
@@ -699,10 +711,10 @@ __secondary_switched:
isb
adr_l x0, secondary_data
- ldr x0, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
- mov sp, x0
- and x0, x0, #~(THREAD_SIZE - 1)
- msr sp_el0, x0 // save thread_info
+ ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
+ mov sp, x1
+ ldr x2, [x0, #CPU_BOOT_TASK]
+ msr sp_el0, x2
mov x29, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index d55a7b09959b..fe301cbcb442 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -136,7 +136,7 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
/* Save the mpidr of the cpu we called cpu_suspend() on... */
if (sleep_cpu < 0) {
- pr_err("Failing to hibernate on an unkown CPU.\n");
+ pr_err("Failing to hibernate on an unknown CPU.\n");
return -ENODEV;
}
hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu);
@@ -547,7 +547,7 @@ out:
int hibernate_resume_nonboot_cpu_disable(void)
{
if (sleep_cpu < 0) {
- pr_err("Failing to resume from hibernate on an unkown CPU.\n");
+ pr_err("Failing to resume from hibernate on an unknown CPU.\n");
return -ENODEV;
}
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 948b73148d56..1b3c747fedda 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -317,9 +317,21 @@ static int get_hbp_len(u8 hbp_len)
case ARM_BREAKPOINT_LEN_2:
len_in_bytes = 2;
break;
+ case ARM_BREAKPOINT_LEN_3:
+ len_in_bytes = 3;
+ break;
case ARM_BREAKPOINT_LEN_4:
len_in_bytes = 4;
break;
+ case ARM_BREAKPOINT_LEN_5:
+ len_in_bytes = 5;
+ break;
+ case ARM_BREAKPOINT_LEN_6:
+ len_in_bytes = 6;
+ break;
+ case ARM_BREAKPOINT_LEN_7:
+ len_in_bytes = 7;
+ break;
case ARM_BREAKPOINT_LEN_8:
len_in_bytes = 8;
break;
@@ -349,7 +361,7 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
* to generic breakpoint descriptions.
*/
int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
- int *gen_len, int *gen_type)
+ int *gen_len, int *gen_type, int *offset)
{
/* Type */
switch (ctrl.type) {
@@ -369,17 +381,33 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
return -EINVAL;
}
+ if (!ctrl.len)
+ return -EINVAL;
+ *offset = __ffs(ctrl.len);
+
/* Len */
- switch (ctrl.len) {
+ switch (ctrl.len >> *offset) {
case ARM_BREAKPOINT_LEN_1:
*gen_len = HW_BREAKPOINT_LEN_1;
break;
case ARM_BREAKPOINT_LEN_2:
*gen_len = HW_BREAKPOINT_LEN_2;
break;
+ case ARM_BREAKPOINT_LEN_3:
+ *gen_len = HW_BREAKPOINT_LEN_3;
+ break;
case ARM_BREAKPOINT_LEN_4:
*gen_len = HW_BREAKPOINT_LEN_4;
break;
+ case ARM_BREAKPOINT_LEN_5:
+ *gen_len = HW_BREAKPOINT_LEN_5;
+ break;
+ case ARM_BREAKPOINT_LEN_6:
+ *gen_len = HW_BREAKPOINT_LEN_6;
+ break;
+ case ARM_BREAKPOINT_LEN_7:
+ *gen_len = HW_BREAKPOINT_LEN_7;
+ break;
case ARM_BREAKPOINT_LEN_8:
*gen_len = HW_BREAKPOINT_LEN_8;
break;
@@ -423,9 +451,21 @@ static int arch_build_bp_info(struct perf_event *bp)
case HW_BREAKPOINT_LEN_2:
info->ctrl.len = ARM_BREAKPOINT_LEN_2;
break;
+ case HW_BREAKPOINT_LEN_3:
+ info->ctrl.len = ARM_BREAKPOINT_LEN_3;
+ break;
case HW_BREAKPOINT_LEN_4:
info->ctrl.len = ARM_BREAKPOINT_LEN_4;
break;
+ case HW_BREAKPOINT_LEN_5:
+ info->ctrl.len = ARM_BREAKPOINT_LEN_5;
+ break;
+ case HW_BREAKPOINT_LEN_6:
+ info->ctrl.len = ARM_BREAKPOINT_LEN_6;
+ break;
+ case HW_BREAKPOINT_LEN_7:
+ info->ctrl.len = ARM_BREAKPOINT_LEN_7;
+ break;
case HW_BREAKPOINT_LEN_8:
info->ctrl.len = ARM_BREAKPOINT_LEN_8;
break;
@@ -517,18 +557,17 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
default:
return -EINVAL;
}
-
- info->address &= ~alignment_mask;
- info->ctrl.len <<= offset;
} else {
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
alignment_mask = 0x3;
else
alignment_mask = 0x7;
- if (info->address & alignment_mask)
- return -EINVAL;
+ offset = info->address & alignment_mask;
}
+ info->address &= ~alignment_mask;
+ info->ctrl.len <<= offset;
+
/*
* Disallow per-task kernel breakpoints since these would
* complicate the stepping code.
@@ -661,12 +700,47 @@ unlock:
}
NOKPROBE_SYMBOL(breakpoint_handler);
+/*
+ * Arm64 hardware does not always report a watchpoint hit address that matches
+ * one of the watchpoints set. It can also report an address "near" the
+ * watchpoint if a single instruction access both watched and unwatched
+ * addresses. There is no straight-forward way, short of disassembling the
+ * offending instruction, to map that address back to the watchpoint. This
+ * function computes the distance of the memory access from the watchpoint as a
+ * heuristic for the likelyhood that a given access triggered the watchpoint.
+ *
+ * See Section D2.10.5 "Determining the memory location that caused a Watchpoint
+ * exception" of ARMv8 Architecture Reference Manual for details.
+ *
+ * The function returns the distance of the address from the bytes watched by
+ * the watchpoint. In case of an exact match, it returns 0.
+ */
+static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
+ struct arch_hw_breakpoint_ctrl *ctrl)
+{
+ u64 wp_low, wp_high;
+ u32 lens, lene;
+
+ lens = __ffs(ctrl->len);
+ lene = __fls(ctrl->len);
+
+ wp_low = val + lens;
+ wp_high = val + lene;
+ if (addr < wp_low)
+ return wp_low - addr;
+ else if (addr > wp_high)
+ return addr - wp_high;
+ else
+ return 0;
+}
+
static int watchpoint_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- int i, step = 0, *kernel_step, access;
+ int i, step = 0, *kernel_step, access, closest_match = 0;
+ u64 min_dist = -1, dist;
u32 ctrl_reg;
- u64 val, alignment_mask;
+ u64 val;
struct perf_event *wp, **slots;
struct debug_info *debug_info;
struct arch_hw_breakpoint *info;
@@ -675,35 +749,15 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
slots = this_cpu_ptr(wp_on_reg);
debug_info = &current->thread.debug;
+ /*
+ * Find all watchpoints that match the reported address. If no exact
+ * match is found. Attribute the hit to the closest watchpoint.
+ */
+ rcu_read_lock();
for (i = 0; i < core_num_wrps; ++i) {
- rcu_read_lock();
-
wp = slots[i];
-
if (wp == NULL)
- goto unlock;
-
- info = counter_arch_bp(wp);
- /* AArch32 watchpoints are either 4 or 8 bytes aligned. */
- if (is_compat_task()) {
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
- alignment_mask = 0x7;
- else
- alignment_mask = 0x3;
- } else {
- alignment_mask = 0x7;
- }
-
- /* Check if the watchpoint value matches. */
- val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
- if (val != (addr & ~alignment_mask))
- goto unlock;
-
- /* Possible match, check the byte address select to confirm. */
- ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
- decode_ctrl_reg(ctrl_reg, &ctrl);
- if (!((1 << (addr & alignment_mask)) & ctrl.len))
- goto unlock;
+ continue;
/*
* Check that the access type matches.
@@ -712,18 +766,41 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
HW_BREAKPOINT_R;
if (!(access & hw_breakpoint_type(wp)))
- goto unlock;
+ continue;
+ /* Check if the watchpoint value and byte select match. */
+ val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
+ ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
+ decode_ctrl_reg(ctrl_reg, &ctrl);
+ dist = get_distance_from_watchpoint(addr, val, &ctrl);
+ if (dist < min_dist) {
+ min_dist = dist;
+ closest_match = i;
+ }
+ /* Is this an exact match? */
+ if (dist != 0)
+ continue;
+
+ info = counter_arch_bp(wp);
info->trigger = addr;
perf_bp_event(wp, regs);
/* Do we need to handle the stepping? */
if (is_default_overflow_handler(wp))
step = 1;
+ }
+ if (min_dist > 0 && min_dist != -1) {
+ /* No exact match found. */
+ wp = slots[closest_match];
+ info = counter_arch_bp(wp);
+ info->trigger = addr;
+ perf_bp_event(wp, regs);
-unlock:
- rcu_read_unlock();
+ /* Do we need to handle the stepping? */
+ if (is_default_overflow_handler(wp))
+ step = 1;
}
+ rcu_read_unlock();
if (!step)
return 0;
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 6f2ac4fc66ca..94b62c1fa4df 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -30,7 +30,6 @@
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
#include <asm/fixmap.h>
-#include <asm/opcodes.h>
#include <asm/insn.h>
#define AARCH64_INSN_SF_BIT BIT(31)
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index e017a9493b92..d217c9e95b06 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -247,6 +247,9 @@ NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
{
+ if (!kgdb_single_step)
+ return DBG_HOOK_ERROR;
+
kgdb_handle_exception(1, SIGTRAP, 0, regs);
return 0;
}
diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile
index ce06312e3d34..89b6df613dde 100644
--- a/arch/arm64/kernel/probes/Makefile
+++ b/arch/arm64/kernel/probes/Makefile
@@ -1,3 +1,5 @@
obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \
kprobes_trampoline.o \
simulate-insn.o
+obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o \
+ simulate-insn.o
diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
index d1731bf977ef..6bf6657a5a52 100644
--- a/arch/arm64/kernel/probes/decode-insn.c
+++ b/arch/arm64/kernel/probes/decode-insn.c
@@ -17,7 +17,6 @@
#include <linux/kprobes.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
-#include <asm/kprobes.h>
#include <asm/insn.h>
#include <asm/sections.h>
@@ -78,8 +77,8 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn)
* INSN_GOOD If instruction is supported and uses instruction slot,
* INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
*/
-static enum kprobe_insn __kprobes
-arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+enum probe_insn __kprobes
+arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *api)
{
/*
* Instructions reading or modifying the PC won't work from the XOL
@@ -89,26 +88,26 @@ arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
return INSN_GOOD;
if (aarch64_insn_is_bcond(insn)) {
- asi->handler = simulate_b_cond;
+ api->handler = simulate_b_cond;
} else if (aarch64_insn_is_cbz(insn) ||
aarch64_insn_is_cbnz(insn)) {
- asi->handler = simulate_cbz_cbnz;
+ api->handler = simulate_cbz_cbnz;
} else if (aarch64_insn_is_tbz(insn) ||
aarch64_insn_is_tbnz(insn)) {
- asi->handler = simulate_tbz_tbnz;
+ api->handler = simulate_tbz_tbnz;
} else if (aarch64_insn_is_adr_adrp(insn)) {
- asi->handler = simulate_adr_adrp;
+ api->handler = simulate_adr_adrp;
} else if (aarch64_insn_is_b(insn) ||
aarch64_insn_is_bl(insn)) {
- asi->handler = simulate_b_bl;
+ api->handler = simulate_b_bl;
} else if (aarch64_insn_is_br(insn) ||
aarch64_insn_is_blr(insn) ||
aarch64_insn_is_ret(insn)) {
- asi->handler = simulate_br_blr_ret;
+ api->handler = simulate_br_blr_ret;
} else if (aarch64_insn_is_ldr_lit(insn)) {
- asi->handler = simulate_ldr_literal;
+ api->handler = simulate_ldr_literal;
} else if (aarch64_insn_is_ldrsw_lit(insn)) {
- asi->handler = simulate_ldrsw_literal;
+ api->handler = simulate_ldrsw_literal;
} else {
/*
* Instruction cannot be stepped out-of-line and we don't
@@ -120,6 +119,7 @@ arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
return INSN_GOOD_NO_SLOT;
}
+#ifdef CONFIG_KPROBES
static bool __kprobes
is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
{
@@ -138,12 +138,12 @@ is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
return false;
}
-enum kprobe_insn __kprobes
+enum probe_insn __kprobes
arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
{
- enum kprobe_insn decoded;
- kprobe_opcode_t insn = le32_to_cpu(*addr);
- kprobe_opcode_t *scan_end = NULL;
+ enum probe_insn decoded;
+ probe_opcode_t insn = le32_to_cpu(*addr);
+ probe_opcode_t *scan_end = NULL;
unsigned long size = 0, offset = 0;
/*
@@ -162,7 +162,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
else
scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
}
- decoded = arm_probe_decode_insn(insn, asi);
+ decoded = arm_probe_decode_insn(insn, &asi->api);
if (decoded != INSN_REJECTED && scan_end)
if (is_probed_address_atomic(addr - 1, scan_end))
@@ -170,3 +170,4 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
return decoded;
}
+#endif
diff --git a/arch/arm64/kernel/probes/decode-insn.h b/arch/arm64/kernel/probes/decode-insn.h
index d438289646a6..76d3f315407f 100644
--- a/arch/arm64/kernel/probes/decode-insn.h
+++ b/arch/arm64/kernel/probes/decode-insn.h
@@ -23,13 +23,17 @@
*/
#define MAX_ATOMIC_CONTEXT_SIZE (128 / sizeof(kprobe_opcode_t))
-enum kprobe_insn {
+enum probe_insn {
INSN_REJECTED,
INSN_GOOD_NO_SLOT,
INSN_GOOD,
};
-enum kprobe_insn __kprobes
+#ifdef CONFIG_KPROBES
+enum probe_insn __kprobes
arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi);
+#endif
+enum probe_insn __kprobes
+arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *asi);
#endif /* _ARM_KERNEL_KPROBES_ARM64_H */
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index f5077ea7af6d..1decd2b2c730 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -44,31 +44,31 @@ post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{
/* prepare insn slot */
- p->ainsn.insn[0] = cpu_to_le32(p->opcode);
+ p->ainsn.api.insn[0] = cpu_to_le32(p->opcode);
- flush_icache_range((uintptr_t) (p->ainsn.insn),
- (uintptr_t) (p->ainsn.insn) +
+ flush_icache_range((uintptr_t) (p->ainsn.api.insn),
+ (uintptr_t) (p->ainsn.api.insn) +
MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
/*
* Needs restoring of return address after stepping xol.
*/
- p->ainsn.restore = (unsigned long) p->addr +
+ p->ainsn.api.restore = (unsigned long) p->addr +
sizeof(kprobe_opcode_t);
}
static void __kprobes arch_prepare_simulate(struct kprobe *p)
{
/* This instructions is not executed xol. No need to adjust the PC */
- p->ainsn.restore = 0;
+ p->ainsn.api.restore = 0;
}
static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- if (p->ainsn.handler)
- p->ainsn.handler((u32)p->opcode, (long)p->addr, regs);
+ if (p->ainsn.api.handler)
+ p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
/* single step simulated, now go for post processing */
post_kprobe_handler(kcb, regs);
@@ -98,18 +98,18 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return -EINVAL;
case INSN_GOOD_NO_SLOT: /* insn need simulation */
- p->ainsn.insn = NULL;
+ p->ainsn.api.insn = NULL;
break;
case INSN_GOOD: /* instruction uses slot */
- p->ainsn.insn = get_insn_slot();
- if (!p->ainsn.insn)
+ p->ainsn.api.insn = get_insn_slot();
+ if (!p->ainsn.api.insn)
return -ENOMEM;
break;
};
/* prepare the instruction */
- if (p->ainsn.insn)
+ if (p->ainsn.api.insn)
arch_prepare_ss_slot(p);
else
arch_prepare_simulate(p);
@@ -142,9 +142,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
- if (p->ainsn.insn) {
- free_insn_slot(p->ainsn.insn, 0);
- p->ainsn.insn = NULL;
+ if (p->ainsn.api.insn) {
+ free_insn_slot(p->ainsn.api.insn, 0);
+ p->ainsn.api.insn = NULL;
}
}
@@ -244,9 +244,9 @@ static void __kprobes setup_singlestep(struct kprobe *p,
}
- if (p->ainsn.insn) {
+ if (p->ainsn.api.insn) {
/* prepare for single stepping */
- slot = (unsigned long)p->ainsn.insn;
+ slot = (unsigned long)p->ainsn.api.insn;
set_ss_context(kcb, slot); /* mark pending ss */
@@ -295,8 +295,8 @@ post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
return;
/* return addr restore if non-branching insn */
- if (cur->ainsn.restore != 0)
- instruction_pointer_set(regs, cur->ainsn.restore);
+ if (cur->ainsn.api.restore != 0)
+ instruction_pointer_set(regs, cur->ainsn.api.restore);
/* restore back original saved kprobe variables and continue */
if (kcb->kprobe_status == KPROBE_REENTER) {
diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c
index 8977ce9d009d..357d3efe1366 100644
--- a/arch/arm64/kernel/probes/simulate-insn.c
+++ b/arch/arm64/kernel/probes/simulate-insn.c
@@ -13,28 +13,26 @@
* General Public License for more details.
*/
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include "simulate-insn.h"
-#define sign_extend(x, signbit) \
- ((x) | (0 - ((x) & (1 << (signbit)))))
-
#define bbl_displacement(insn) \
- sign_extend(((insn) & 0x3ffffff) << 2, 27)
+ sign_extend32(((insn) & 0x3ffffff) << 2, 27)
#define bcond_displacement(insn) \
- sign_extend(((insn >> 5) & 0x7ffff) << 2, 20)
+ sign_extend32(((insn >> 5) & 0x7ffff) << 2, 20)
#define cbz_displacement(insn) \
- sign_extend(((insn >> 5) & 0x7ffff) << 2, 20)
+ sign_extend32(((insn >> 5) & 0x7ffff) << 2, 20)
#define tbz_displacement(insn) \
- sign_extend(((insn >> 5) & 0x3fff) << 2, 15)
+ sign_extend32(((insn >> 5) & 0x3fff) << 2, 15)
#define ldr_displacement(insn) \
- sign_extend(((insn >> 5) & 0x7ffff) << 2, 20)
+ sign_extend32(((insn >> 5) & 0x7ffff) << 2, 20)
static inline void set_x_reg(struct pt_regs *regs, int reg, u64 val)
{
@@ -106,7 +104,7 @@ simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs)
xn = opcode & 0x1f;
imm = ((opcode >> 3) & 0x1ffffc) | ((opcode >> 29) & 0x3);
- imm = sign_extend(imm, 20);
+ imm = sign_extend64(imm, 20);
if (opcode & 0x80000000)
val = (imm<<12) + (addr & 0xfffffffffffff000);
else
diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
new file mode 100644
index 000000000000..26c998534dca
--- /dev/null
+++ b/arch/arm64/kernel/probes/uprobes.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/highmem.h>
+#include <linux/ptrace.h>
+#include <linux/uprobes.h>
+#include <asm/cacheflush.h>
+
+#include "decode-insn.h"
+
+#define UPROBE_INV_FAULT_CODE UINT_MAX
+
+void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+ void *src, unsigned long len)
+{
+ void *xol_page_kaddr = kmap_atomic(page);
+ void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK);
+
+ /* Initialize the slot */
+ memcpy(dst, src, len);
+
+ /* flush caches (dcache/icache) */
+ sync_icache_aliases(dst, len);
+
+ kunmap_atomic(xol_page_kaddr);
+}
+
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ unsigned long addr)
+{
+ probe_opcode_t insn;
+
+ /* TODO: Currently we do not support AARCH32 instruction probing */
+ if (test_bit(TIF_32BIT, &mm->context.flags))
+ return -ENOTSUPP;
+ else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
+ return -EINVAL;
+
+ insn = *(probe_opcode_t *)(&auprobe->insn[0]);
+
+ switch (arm_probe_decode_insn(insn, &auprobe->api)) {
+ case INSN_REJECTED:
+ return -EINVAL;
+
+ case INSN_GOOD_NO_SLOT:
+ auprobe->simulate = true;
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ /* Initialize with an invalid fault code to detect if ol insn trapped */
+ current->thread.fault_code = UPROBE_INV_FAULT_CODE;
+
+ /* Instruction points to execute ol */
+ instruction_pointer_set(regs, utask->xol_vaddr);
+
+ user_enable_single_step(current);
+
+ return 0;
+}
+
+int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ WARN_ON_ONCE(current->thread.fault_code != UPROBE_INV_FAULT_CODE);
+
+ /* Instruction points to execute next to breakpoint address */
+ instruction_pointer_set(regs, utask->vaddr + 4);
+
+ user_disable_single_step(current);
+
+ return 0;
+}
+bool arch_uprobe_xol_was_trapped(struct task_struct *t)
+{
+ /*
+ * Between arch_uprobe_pre_xol and arch_uprobe_post_xol, if an xol
+ * insn itself is trapped, then detect the case with the help of
+ * invalid fault code which is being set in arch_uprobe_pre_xol
+ */
+ if (t->thread.fault_code != UPROBE_INV_FAULT_CODE)
+ return true;
+
+ return false;
+}
+
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ probe_opcode_t insn;
+ unsigned long addr;
+
+ if (!auprobe->simulate)
+ return false;
+
+ insn = *(probe_opcode_t *)(&auprobe->insn[0]);
+ addr = instruction_pointer(regs);
+
+ if (auprobe->api.handler)
+ auprobe->api.handler(insn, addr, regs);
+
+ return true;
+}
+
+void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ /*
+ * Task has received a fatal signal, so reset back to probbed
+ * address.
+ */
+ instruction_pointer_set(regs, utask->vaddr);
+
+ user_disable_single_step(current);
+}
+
+bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+ struct pt_regs *regs)
+{
+ /*
+ * If a simple branch instruction (B) was called for retprobed
+ * assembly label then return true even when regs->sp and ret->stack
+ * are same. It will ensure that cleanup and reporting of return
+ * instances corresponding to callee label is done when
+ * handle_trampoline for called function is executed.
+ */
+ if (ctx == RP_CHECK_CHAIN_CALL)
+ return regs->sp <= ret->stack;
+ else
+ return regs->sp < ret->stack;
+}
+
+unsigned long
+arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
+ struct pt_regs *regs)
+{
+ unsigned long orig_ret_vaddr;
+
+ orig_ret_vaddr = procedure_link_pointer(regs);
+ /* Replace the return addr with trampoline addr */
+ procedure_link_pointer_set(regs, trampoline_vaddr);
+
+ return orig_ret_vaddr;
+}
+
+int arch_uprobe_exception_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ return NOTIFY_DONE;
+}
+
+static int uprobe_breakpoint_handler(struct pt_regs *regs,
+ unsigned int esr)
+{
+ if (user_mode(regs) && uprobe_pre_sstep_notifier(regs))
+ return DBG_HOOK_HANDLED;
+
+ return DBG_HOOK_ERROR;
+}
+
+static int uprobe_single_step_handler(struct pt_regs *regs,
+ unsigned int esr)
+{
+ struct uprobe_task *utask = current->utask;
+
+ if (user_mode(regs)) {
+ WARN_ON(utask &&
+ (instruction_pointer(regs) != utask->xol_vaddr + 4));
+
+ if (uprobe_post_sstep_notifier(regs))
+ return DBG_HOOK_HANDLED;
+ }
+
+ return DBG_HOOK_ERROR;
+}
+
+/* uprobe breakpoint handler hook */
+static struct break_hook uprobes_break_hook = {
+ .esr_mask = BRK64_ESR_MASK,
+ .esr_val = BRK64_ESR_UPROBES,
+ .fn = uprobe_breakpoint_handler,
+};
+
+/* uprobe single step handler hook */
+static struct step_hook uprobes_step_hook = {
+ .fn = uprobe_single_step_handler,
+};
+
+static int __init arch_init_uprobes(void)
+{
+ register_break_hook(&uprobes_break_hook);
+ register_step_hook(&uprobes_step_hook);
+
+ return 0;
+}
+
+device_initcall(arch_init_uprobes);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 01753cd7d3f0..a3a2816ba73a 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -45,6 +45,7 @@
#include <linux/personality.h>
#include <linux/notifier.h>
#include <trace/events/power.h>
+#include <linux/percpu.h>
#include <asm/alternative.h>
#include <asm/compat.h>
@@ -282,7 +283,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h;
if (IS_ENABLED(CONFIG_ARM64_UAO) &&
- cpus_have_cap(ARM64_HAS_UAO))
+ cpus_have_const_cap(ARM64_HAS_UAO))
childregs->pstate |= PSR_UAO_BIT;
p->thread.cpu_context.x19 = stack_start;
p->thread.cpu_context.x20 = stk_sz;
@@ -322,6 +323,20 @@ void uao_thread_switch(struct task_struct *next)
}
/*
+ * We store our current task in sp_el0, which is clobbered by userspace. Keep a
+ * shadow copy so that we can restore this upon entry from userspace.
+ *
+ * This is *only* for exception entry from EL0, and is not valid until we
+ * __switch_to() a user task.
+ */
+DEFINE_PER_CPU(struct task_struct *, __entry_task);
+
+static void entry_task_switch(struct task_struct *next)
+{
+ __this_cpu_write(__entry_task, next);
+}
+
+/*
* Thread switching.
*/
struct task_struct *__switch_to(struct task_struct *prev,
@@ -333,6 +348,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
contextidr_thread_switch(next);
+ entry_task_switch(next);
uao_thread_switch(next);
/*
@@ -350,27 +366,35 @@ struct task_struct *__switch_to(struct task_struct *prev,
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
- unsigned long stack_page;
+ unsigned long stack_page, ret = 0;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
+ stack_page = (unsigned long)try_get_task_stack(p);
+ if (!stack_page)
+ return 0;
+
frame.fp = thread_saved_fp(p);
frame.sp = thread_saved_sp(p);
frame.pc = thread_saved_pc(p);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = p->curr_ret_stack;
#endif
- stack_page = (unsigned long)task_stack_page(p);
do {
if (frame.sp < stack_page ||
frame.sp >= stack_page + THREAD_SIZE ||
unwind_frame(p, &frame))
- return 0;
- if (!in_sched_functions(frame.pc))
- return frame.pc;
+ goto out;
+ if (!in_sched_functions(frame.pc)) {
+ ret = frame.pc;
+ goto out;
+ }
} while (count ++ < 16);
- return 0;
+
+out:
+ put_task_stack(p);
+ return ret;
}
unsigned long arch_align_stack(unsigned long sp)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index e0c81da60f76..fc35e06ccaac 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -327,13 +327,13 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
struct arch_hw_breakpoint_ctrl ctrl,
struct perf_event_attr *attr)
{
- int err, len, type, disabled = !ctrl.enabled;
+ int err, len, type, offset, disabled = !ctrl.enabled;
attr->disabled = disabled;
if (disabled)
return 0;
- err = arch_bp_generic_fields(ctrl, &len, &type);
+ err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
if (err)
return err;
@@ -352,6 +352,7 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
attr->bp_len = len;
attr->bp_type = type;
+ attr->bp_addr += offset;
return 0;
}
@@ -404,7 +405,7 @@ static int ptrace_hbp_get_addr(unsigned int note_type,
if (IS_ERR(bp))
return PTR_ERR(bp);
- *addr = bp ? bp->attr.bp_addr : 0;
+ *addr = bp ? counter_arch_bp(bp)->address : 0;
return 0;
}
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 1718706fde83..12a87f2600f2 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -12,6 +12,7 @@
#include <linux/export.h>
#include <linux/ftrace.h>
+#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
struct return_address_data {
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index f534f492a268..a53f52ac81c6 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -291,6 +291,15 @@ void __init setup_arch(char **cmdline_p)
smp_init_cpus();
smp_build_mpidr_hash();
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ /*
+ * Make sure init_thread_info.ttbr0 always generates translation
+ * faults in case uaccess_enable() is inadvertently called by the init
+ * thread.
+ */
+ init_task.thread_info.ttbr0 = virt_to_phys(empty_zero_page);
+#endif
+
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
conswitchp = &vga_con;
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 404dd67080b9..c7b6de62f9d3 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -414,6 +414,9 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
} else {
local_irq_enable();
+ if (thread_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+
if (thread_flags & _TIF_SIGPENDING)
do_signal(regs);
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 1bec41b5fda3..df67652e46f0 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -125,9 +125,6 @@ ENTRY(_cpu_resume)
/* load sp from context */
ldr x2, [x0, #CPU_CTX_SP]
mov sp, x2
- /* save thread_info */
- and x2, x2, #~(THREAD_SIZE - 1)
- msr sp_el0, x2
/*
* cpu_do_resume expects x0 to contain context address pointer
*/
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 8507703dabe4..cb87234cfcf2 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -58,6 +58,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/ipi.h>
+DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
+
/*
* as from 2.5, kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
@@ -146,6 +149,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
* We need to tell the secondary core where to find its stack and the
* page tables.
*/
+ secondary_data.task = idle;
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
update_cpu_boot_status(CPU_MMU_OFF);
__flush_dcache_area(&secondary_data, sizeof(secondary_data));
@@ -170,6 +174,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
}
+ secondary_data.task = NULL;
secondary_data.stack = NULL;
status = READ_ONCE(secondary_data.status);
if (ret && status) {
@@ -208,7 +213,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
asmlinkage void secondary_start_kernel(void)
{
struct mm_struct *mm = &init_mm;
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu;
+
+ cpu = task_cpu(current);
+ set_my_cpu_offset(per_cpu_offset(cpu));
/*
* All kernel threads share the same mm context; grab a
@@ -217,8 +225,6 @@ asmlinkage void secondary_start_kernel(void)
atomic_inc(&mm->mm_count);
current->active_mm = mm;
- set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
-
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
* point to zero page to avoid speculatively fetching new entries.
@@ -718,6 +724,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
*/
for_each_possible_cpu(cpu) {
+ per_cpu(cpu_number, cpu) = cpu;
+
if (cpu == smp_processor_id())
continue;
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index c2efddfca18c..8a552a33c6ef 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -22,6 +22,7 @@
#include <linux/stacktrace.h>
#include <asm/irq.h>
+#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
/*
@@ -128,7 +129,6 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
break;
}
}
-EXPORT_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
@@ -181,6 +181,9 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
struct stack_trace_data data;
struct stackframe frame;
+ if (!try_get_task_stack(tsk))
+ return;
+
data.trace = trace;
data.skip = trace->skip;
@@ -202,6 +205,8 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
walk_stackframe(tsk, &frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
+
+ put_task_stack(tsk);
}
void save_stack_trace(struct stack_trace *trace)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index bb0cd787a9d3..1e3be9064cfa 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -47,12 +47,6 @@ void notrace __cpu_suspend_exit(void)
cpu_uninstall_idmap();
/*
- * Restore per-cpu offset before any kernel
- * subsystem relying on it has a chance to run.
- */
- set_my_cpu_offset(per_cpu_offset(cpu));
-
- /*
* PSTATE was not saved over suspend/resume, re-enable any detected
* features that might not have been set correctly.
*/
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 694f6deedbab..23e9e13bd2aa 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -19,10 +19,226 @@
#include <linux/nodemask.h>
#include <linux/of.h>
#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/cpufreq.h>
+#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/topology.h>
+static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+static DEFINE_MUTEX(cpu_scale_mutex);
+
+unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
+{
+ return per_cpu(cpu_scale, cpu);
+}
+
+static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
+{
+ per_cpu(cpu_scale, cpu) = capacity;
+}
+
+#ifdef CONFIG_PROC_SYSCTL
+static ssize_t cpu_capacity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+
+ return sprintf(buf, "%lu\n",
+ arch_scale_cpu_capacity(NULL, cpu->dev.id));
+}
+
+static ssize_t cpu_capacity_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ int this_cpu = cpu->dev.id, i;
+ unsigned long new_capacity;
+ ssize_t ret;
+
+ if (count) {
+ ret = kstrtoul(buf, 0, &new_capacity);
+ if (ret)
+ return ret;
+ if (new_capacity > SCHED_CAPACITY_SCALE)
+ return -EINVAL;
+
+ mutex_lock(&cpu_scale_mutex);
+ for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
+ set_capacity_scale(i, new_capacity);
+ mutex_unlock(&cpu_scale_mutex);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(cpu_capacity);
+
+static int register_cpu_capacity_sysctl(void)
+{
+ int i;
+ struct device *cpu;
+
+ for_each_possible_cpu(i) {
+ cpu = get_cpu_device(i);
+ if (!cpu) {
+ pr_err("%s: too early to get CPU%d device!\n",
+ __func__, i);
+ continue;
+ }
+ device_create_file(cpu, &dev_attr_cpu_capacity);
+ }
+
+ return 0;
+}
+subsys_initcall(register_cpu_capacity_sysctl);
+#endif
+
+static u32 capacity_scale;
+static u32 *raw_capacity;
+static bool cap_parsing_failed;
+
+static void __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
+{
+ int ret;
+ u32 cpu_capacity;
+
+ if (cap_parsing_failed)
+ return;
+
+ ret = of_property_read_u32(cpu_node,
+ "capacity-dmips-mhz",
+ &cpu_capacity);
+ if (!ret) {
+ if (!raw_capacity) {
+ raw_capacity = kcalloc(num_possible_cpus(),
+ sizeof(*raw_capacity),
+ GFP_KERNEL);
+ if (!raw_capacity) {
+ pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
+ cap_parsing_failed = true;
+ return;
+ }
+ }
+ capacity_scale = max(cpu_capacity, capacity_scale);
+ raw_capacity[cpu] = cpu_capacity;
+ pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
+ cpu_node->full_name, raw_capacity[cpu]);
+ } else {
+ if (raw_capacity) {
+ pr_err("cpu_capacity: missing %s raw capacity\n",
+ cpu_node->full_name);
+ pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
+ }
+ cap_parsing_failed = true;
+ kfree(raw_capacity);
+ }
+}
+
+static void normalize_cpu_capacity(void)
+{
+ u64 capacity;
+ int cpu;
+
+ if (!raw_capacity || cap_parsing_failed)
+ return;
+
+ pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
+ mutex_lock(&cpu_scale_mutex);
+ for_each_possible_cpu(cpu) {
+ pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
+ cpu, raw_capacity[cpu]);
+ capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
+ / capacity_scale;
+ set_capacity_scale(cpu, capacity);
+ pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
+ cpu, arch_scale_cpu_capacity(NULL, cpu));
+ }
+ mutex_unlock(&cpu_scale_mutex);
+}
+
+#ifdef CONFIG_CPU_FREQ
+static cpumask_var_t cpus_to_visit;
+static bool cap_parsing_done;
+static void parsing_done_workfn(struct work_struct *work);
+static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
+
+static int
+init_cpu_capacity_callback(struct notifier_block *nb,
+ unsigned long val,
+ void *data)
+{
+ struct cpufreq_policy *policy = data;
+ int cpu;
+
+ if (cap_parsing_failed || cap_parsing_done)
+ return 0;
+
+ switch (val) {
+ case CPUFREQ_NOTIFY:
+ pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
+ cpumask_pr_args(policy->related_cpus),
+ cpumask_pr_args(cpus_to_visit));
+ cpumask_andnot(cpus_to_visit,
+ cpus_to_visit,
+ policy->related_cpus);
+ for_each_cpu(cpu, policy->related_cpus) {
+ raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
+ policy->cpuinfo.max_freq / 1000UL;
+ capacity_scale = max(raw_capacity[cpu], capacity_scale);
+ }
+ if (cpumask_empty(cpus_to_visit)) {
+ normalize_cpu_capacity();
+ kfree(raw_capacity);
+ pr_debug("cpu_capacity: parsing done\n");
+ cap_parsing_done = true;
+ schedule_work(&parsing_done_work);
+ }
+ }
+ return 0;
+}
+
+static struct notifier_block init_cpu_capacity_notifier = {
+ .notifier_call = init_cpu_capacity_callback,
+};
+
+static int __init register_cpufreq_notifier(void)
+{
+ if (cap_parsing_failed)
+ return -EINVAL;
+
+ if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
+ pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
+ return -ENOMEM;
+ }
+ cpumask_copy(cpus_to_visit, cpu_possible_mask);
+
+ return cpufreq_register_notifier(&init_cpu_capacity_notifier,
+ CPUFREQ_POLICY_NOTIFIER);
+}
+core_initcall(register_cpufreq_notifier);
+
+static void parsing_done_workfn(struct work_struct *work)
+{
+ cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
+ CPUFREQ_POLICY_NOTIFIER);
+}
+
+#else
+static int __init free_raw_capacity(void)
+{
+ kfree(raw_capacity);
+
+ return 0;
+}
+core_initcall(free_raw_capacity);
+#endif
+
static int __init get_cpu_for_node(struct device_node *node)
{
struct device_node *cpu_node;
@@ -34,6 +250,7 @@ static int __init get_cpu_for_node(struct device_node *node)
for_each_possible_cpu(cpu) {
if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+ parse_cpu_capacity(cpu_node, cpu);
of_node_put(cpu_node);
return cpu;
}
@@ -178,13 +395,17 @@ static int __init parse_dt_topology(void)
* cluster with restricted subnodes.
*/
map = of_get_child_by_name(cn, "cpu-map");
- if (!map)
+ if (!map) {
+ cap_parsing_failed = true;
goto out;
+ }
ret = parse_cluster(map, 0);
if (ret != 0)
goto out_map;
+ normalize_cpu_capacity();
+
/*
* Check that all cores are in the topology; the SMP code will
* only mark cores described in the DT as possible.
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index c9986b3e0a96..5b830be79c01 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -38,6 +38,7 @@
#include <asm/esr.h>
#include <asm/insn.h>
#include <asm/traps.h>
+#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
#include <asm/exception.h>
#include <asm/system_misc.h>
@@ -147,6 +148,9 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
if (!tsk)
tsk = current;
+ if (!try_get_task_stack(tsk))
+ return;
+
/*
* Switching between stacks is valid when tracing current and in
* non-preemptible context.
@@ -212,6 +216,8 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
stack + sizeof(struct pt_regs));
}
}
+
+ put_task_stack(tsk);
}
void show_stack(struct task_struct *tsk, unsigned long *sp)
@@ -227,10 +233,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
#endif
#define S_SMP " SMP"
-static int __die(const char *str, int err, struct thread_info *thread,
- struct pt_regs *regs)
+static int __die(const char *str, int err, struct pt_regs *regs)
{
- struct task_struct *tsk = thread->task;
+ struct task_struct *tsk = current;
static int die_counter;
int ret;
@@ -245,7 +250,8 @@ static int __die(const char *str, int err, struct thread_info *thread,
print_modules();
__show_regs(regs);
pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
- TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
+ TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
+ end_of_stack(tsk));
if (!user_mode(regs)) {
dump_mem(KERN_EMERG, "Stack: ", regs->sp,
@@ -264,7 +270,6 @@ static DEFINE_RAW_SPINLOCK(die_lock);
*/
void die(const char *str, struct pt_regs *regs, int err)
{
- struct thread_info *thread = current_thread_info();
int ret;
oops_enter();
@@ -272,9 +277,9 @@ void die(const char *str, struct pt_regs *regs, int err)
raw_spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
- ret = __die(str, err, thread, regs);
+ ret = __die(str, err, regs);
- if (regs && kexec_should_crash(thread->task))
+ if (regs && kexec_should_crash(current))
crash_kexec(regs);
bust_spinlocks(0);
@@ -435,9 +440,10 @@ int cpu_enable_cache_maint_trap(void *__unused)
}
#define __user_cache_maint(insn, address, res) \
- if (untagged_addr(address) >= user_addr_max()) \
+ if (untagged_addr(address) >= user_addr_max()) { \
res = -EFAULT; \
- else \
+ } else { \
+ uaccess_ttbr0_enable(); \
asm volatile ( \
"1: " insn ", %1\n" \
" mov %w0, #0\n" \
@@ -449,7 +455,9 @@ int cpu_enable_cache_maint_trap(void *__unused)
" .popsection\n" \
_ASM_EXTABLE(1b, 3b) \
: "=r" (res) \
- : "r" (address), "i" (-EFAULT) )
+ : "r" (address), "i" (-EFAULT)); \
+ uaccess_ttbr0_disable(); \
+ }
static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
{
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 1105aab1e6d6..b8deffa9e1bf 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -216,6 +216,11 @@ SECTIONS
swapper_pg_dir = .;
. += SWAPPER_DIR_SIZE;
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ reserved_ttbr0 = .;
+ . += RESERVED_TTBR0_SIZE;
+#endif
+
_end = .;
STABS_DEBUG
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 6eaf12c1d627..52cb7ad9b2fd 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -16,9 +16,6 @@ menuconfig VIRTUALIZATION
if VIRTUALIZATION
-config KVM_ARM_VGIC_V3_ITS
- bool
-
config KVM
bool "Kernel-based Virtual Machine (KVM) support"
depends on OF
@@ -34,7 +31,6 @@ config KVM
select KVM_VFIO
select HAVE_KVM_EVENTFD
select HAVE_KVM_IRQFD
- select KVM_ARM_VGIC_V3_ITS
select KVM_ARM_PMU if HW_PERF_EVENTS
select HAVE_KVM_MSI
select HAVE_KVM_IRQCHIP
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index a204adf29f0a..1bfe30dfbfe7 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -57,6 +57,16 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
+/*
+ * Guest access to FP/ASIMD registers are routed to this handler only
+ * when the system doesn't support FP/ASIMD.
+ */
+static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
/**
* kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
* instruction executed by a guest
@@ -144,6 +154,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
[ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
[ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
+ [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
};
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 4e92399f7105..5e9052f087f2 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -106,9 +106,16 @@ el1_trap:
* x0: ESR_EC
*/
- /* Guest accessed VFP/SIMD registers, save host, restore Guest */
+ /*
+ * We trap the first access to the FP/SIMD to save the host context
+ * and restore the guest context lazily.
+ * If FP/SIMD is not implemented, handle the trap and inject an
+ * undefined instruction exception to the guest.
+ */
+alternative_if_not ARM64_HAS_NO_FPSIMD
cmp x0, #ESR_ELx_EC_FP_ASIMD
b.eq __fpsimd_guest_restore
+alternative_else_nop_endif
mrs x1, tpidr_el2
mov x0, #ARM_EXCEPTION_TRAP
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 83037cd62d01..75e83dd40d43 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -21,6 +21,7 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
+#include <asm/fpsimd.h>
static bool __hyp_text __fpsimd_enabled_nvhe(void)
{
@@ -76,16 +77,24 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
* traps are only taken to EL2 if the operation would not otherwise
* trap to EL1. Therefore, always make sure that for 32-bit guests,
* we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
+ * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
+ * it will cause an exception.
*/
val = vcpu->arch.hcr_el2;
- if (!(val & HCR_RW)) {
+ if (!(val & HCR_RW) && system_supports_fpsimd()) {
write_sysreg(1 << 30, fpexc32_el2);
isb();
}
write_sysreg(val, hcr_el2);
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2);
- /* Make sure we trap PMU access from EL0 to EL2 */
+ /*
+ * Make sure we trap PMU access from EL0 to EL2. Also sanitize
+ * PMSELR_EL0 to make sure it never contains the cycle
+ * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
+ * EL1 instead of being trapped to EL2.
+ */
+ write_sysreg(0, pmselr_el0);
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
__activate_traps_arch()();
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 5bc460884639..e95d4f68bf54 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -86,12 +86,6 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_VCPU_ATTRIBUTES:
r = 1;
break;
- case KVM_CAP_MSI_DEVID:
- if (!kvm)
- r = -EINVAL;
- else
- r = kvm->arch.vgic.msis_require_devid;
- break;
default:
r = 0;
}
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 5d1cad3ce6d6..d7150e30438a 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -17,10 +17,7 @@
*/
#include <linux/linkage.h>
-#include <asm/alternative.h>
-#include <asm/assembler.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
+#include <asm/uaccess.h>
.text
@@ -33,8 +30,7 @@
* Alignment fixed up by hardware.
*/
ENTRY(__clear_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
- CONFIG_ARM64_PAN)
+ uaccess_enable_not_uao x2, x3
mov x2, x1 // save the size for fixup return
subs x1, x1, #8
b.mi 2f
@@ -54,8 +50,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
b.mi 5f
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
- CONFIG_ARM64_PAN)
+ uaccess_disable_not_uao x2
ret
ENDPROC(__clear_user)
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 4fd67ea03bb0..cfe13396085b 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -16,11 +16,8 @@
#include <linux/linkage.h>
-#include <asm/alternative.h>
-#include <asm/assembler.h>
#include <asm/cache.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
+#include <asm/uaccess.h>
/*
* Copy from user space to a kernel buffer (alignment handled by the hardware)
@@ -67,12 +64,10 @@
end .req x5
ENTRY(__arch_copy_from_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
- CONFIG_ARM64_PAN)
+ uaccess_enable_not_uao x3, x4
add end, x0, x2
#include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
- CONFIG_ARM64_PAN)
+ uaccess_disable_not_uao x3
mov x0, #0 // Nothing to copy
ret
ENDPROC(__arch_copy_from_user)
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index f7292dd08c84..718b1c4e2f85 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -18,11 +18,8 @@
#include <linux/linkage.h>
-#include <asm/alternative.h>
-#include <asm/assembler.h>
#include <asm/cache.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
+#include <asm/uaccess.h>
/*
* Copy from user space to user space (alignment handled by the hardware)
@@ -68,12 +65,10 @@
end .req x5
ENTRY(__copy_in_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
- CONFIG_ARM64_PAN)
+ uaccess_enable_not_uao x3, x4
add end, x0, x2
#include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
- CONFIG_ARM64_PAN)
+ uaccess_disable_not_uao x3
mov x0, #0
ret
ENDPROC(__copy_in_user)
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 7a7efe255034..e99e31c9acac 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -16,11 +16,8 @@
#include <linux/linkage.h>
-#include <asm/alternative.h>
-#include <asm/assembler.h>
#include <asm/cache.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
+#include <asm/uaccess.h>
/*
* Copy to user space from a kernel buffer (alignment handled by the hardware)
@@ -66,12 +63,10 @@
end .req x5
ENTRY(__arch_copy_to_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
- CONFIG_ARM64_PAN)
+ uaccess_enable_not_uao x3, x4
add end, x0, x2
#include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
- CONFIG_ARM64_PAN)
+ uaccess_disable_not_uao x3
mov x0, #0
ret
ENDPROC(__arch_copy_to_user)
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 54bb209cae8e..e703fb9defad 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -3,7 +3,8 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
ioremap.o mmap.o pgd.o mmu.o \
context.o proc.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_ARM64_PTDUMP) += dump.o
+obj-$(CONFIG_ARM64_PTDUMP_CORE) += dump.o
+obj-$(CONFIG_ARM64_PTDUMP_DEBUGFS) += ptdump_debugfs.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_KASAN) += kasan_init.o
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 58b5a906ff78..da9576932322 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -23,6 +23,7 @@
#include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
+#include <asm/uaccess.h>
/*
* flush_icache_range(start,end)
@@ -48,6 +49,7 @@ ENTRY(flush_icache_range)
* - end - virtual end address of region
*/
ENTRY(__flush_cache_user_range)
+ uaccess_ttbr0_enable x2, x3
dcache_line_size x2, x3
sub x3, x2, #1
bic x4, x0, x3
@@ -69,10 +71,12 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
dsb ish
isb
mov x0, #0
+1:
+ uaccess_ttbr0_disable x1
ret
9:
mov x0, #-EFAULT
- ret
+ b 1b
ENDPROC(flush_icache_range)
ENDPROC(__flush_cache_user_range)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index efcf1f7ef1e4..4c63cb154859 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -221,7 +221,12 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
switch_mm_fastpath:
- cpu_switch_mm(mm->pgd, mm);
+ /*
+ * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
+ * emulating PAN.
+ */
+ if (!system_uses_ttbr0_pan())
+ cpu_switch_mm(mm->pgd, mm);
}
static int asids_init(void)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 3f74d0d98de6..aa6c8f834d9e 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -938,11 +938,6 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void arch_teardown_dma_ops(struct device *dev)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-
- if (WARN_ON(domain))
- iommu_detach_device(domain, dev);
-
dev->archdata.dma_ops = NULL;
}
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 9c3e75df2180..ca74a2aace42 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -50,6 +50,18 @@ static const struct addr_marker address_markers[] = {
{ -1, NULL },
};
+#define pt_dump_seq_printf(m, fmt, args...) \
+({ \
+ if (m) \
+ seq_printf(m, fmt, ##args); \
+})
+
+#define pt_dump_seq_puts(m, fmt) \
+({ \
+ if (m) \
+ seq_printf(m, fmt); \
+})
+
/*
* The page dumper groups page table entries of the same type into a single
* description. It uses pg_state to track the range information while
@@ -62,6 +74,9 @@ struct pg_state {
unsigned long start_address;
unsigned level;
u64 current_prot;
+ bool check_wx;
+ unsigned long wx_pages;
+ unsigned long uxn_pages;
};
struct prot_bits {
@@ -186,10 +201,39 @@ static void dump_prot(struct pg_state *st, const struct prot_bits *bits,
s = bits->clear;
if (s)
- seq_printf(st->seq, " %s", s);
+ pt_dump_seq_printf(st->seq, " %s", s);
}
}
+static void note_prot_uxn(struct pg_state *st, unsigned long addr)
+{
+ if (!st->check_wx)
+ return;
+
+ if ((st->current_prot & PTE_UXN) == PTE_UXN)
+ return;
+
+ WARN_ONCE(1, "arm64/mm: Found non-UXN mapping at address %p/%pS\n",
+ (void *)st->start_address, (void *)st->start_address);
+
+ st->uxn_pages += (addr - st->start_address) / PAGE_SIZE;
+}
+
+static void note_prot_wx(struct pg_state *st, unsigned long addr)
+{
+ if (!st->check_wx)
+ return;
+ if ((st->current_prot & PTE_RDONLY) == PTE_RDONLY)
+ return;
+ if ((st->current_prot & PTE_PXN) == PTE_PXN)
+ return;
+
+ WARN_ONCE(1, "arm64/mm: Found insecure W+X mapping at address %p/%pS\n",
+ (void *)st->start_address, (void *)st->start_address);
+
+ st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
+}
+
static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
u64 val)
{
@@ -200,14 +244,16 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
st->level = level;
st->current_prot = prot;
st->start_address = addr;
- seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
} else if (prot != st->current_prot || level != st->level ||
addr >= st->marker[1].start_address) {
const char *unit = units;
unsigned long delta;
if (st->current_prot) {
- seq_printf(st->seq, "0x%016lx-0x%016lx ",
+ note_prot_uxn(st, addr);
+ note_prot_wx(st, addr);
+ pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ",
st->start_address, addr);
delta = (addr - st->start_address) >> 10;
@@ -215,17 +261,17 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
delta >>= 10;
unit++;
}
- seq_printf(st->seq, "%9lu%c %s", delta, *unit,
+ pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
pg_level[st->level].name);
if (pg_level[st->level].bits)
dump_prot(st, pg_level[st->level].bits,
pg_level[st->level].num);
- seq_puts(st->seq, "\n");
+ pt_dump_seq_puts(st->seq, "\n");
}
if (addr >= st->marker[1].start_address) {
st->marker++;
- seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
}
st->start_address = addr;
@@ -235,7 +281,7 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
if (addr >= st->marker[1].start_address) {
st->marker++;
- seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
}
}
@@ -304,9 +350,8 @@ static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
}
}
-static int ptdump_show(struct seq_file *m, void *v)
+void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
{
- struct ptdump_info *info = m->private;
struct pg_state st = {
.seq = m,
.marker = info->markers,
@@ -315,33 +360,16 @@ static int ptdump_show(struct seq_file *m, void *v)
walk_pgd(&st, info->mm, info->base_addr);
note_page(&st, 0, 0, 0);
- return 0;
}
-static int ptdump_open(struct inode *inode, struct file *file)
+static void ptdump_initialize(void)
{
- return single_open(file, ptdump_show, inode->i_private);
-}
-
-static const struct file_operations ptdump_fops = {
- .open = ptdump_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-int ptdump_register(struct ptdump_info *info, const char *name)
-{
- struct dentry *pe;
unsigned i, j;
for (i = 0; i < ARRAY_SIZE(pg_level); i++)
if (pg_level[i].bits)
for (j = 0; j < pg_level[i].num; j++)
pg_level[i].mask |= pg_level[i].bits[j].mask;
-
- pe = debugfs_create_file(name, 0400, NULL, info, &ptdump_fops);
- return pe ? 0 : -ENOMEM;
}
static struct ptdump_info kernel_ptdump_info = {
@@ -350,8 +378,30 @@ static struct ptdump_info kernel_ptdump_info = {
.base_addr = VA_START,
};
+void ptdump_check_wx(void)
+{
+ struct pg_state st = {
+ .seq = NULL,
+ .marker = (struct addr_marker[]) {
+ { 0, NULL},
+ { -1, NULL},
+ },
+ .check_wx = true,
+ };
+
+ walk_pgd(&st, &init_mm, 0);
+ note_page(&st, 0, 0, 0);
+ if (st.wx_pages || st.uxn_pages)
+ pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
+ st.wx_pages, st.uxn_pages);
+ else
+ pr_info("Checked W+X mappings: passed, no W+X pages found\n");
+}
+
static int ptdump_init(void)
{
- return ptdump_register(&kernel_ptdump_info, "kernel_page_tables");
+ ptdump_initialize();
+ return ptdump_debugfs_register(&kernel_ptdump_info,
+ "kernel_page_tables");
}
device_initcall(ptdump_init);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 0f8788374815..a78a5c401806 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -269,13 +269,19 @@ out:
return fault;
}
-static inline bool is_permission_fault(unsigned int esr)
+static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs)
{
unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
- return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) ||
- (ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
+ if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
+ return false;
+
+ if (system_uses_ttbr0_pan())
+ return fsc_type == ESR_ELx_FSC_FAULT &&
+ (regs->pstate & PSR_PAN_BIT);
+ else
+ return fsc_type == ESR_ELx_FSC_PERM;
}
static bool is_el0_instruction_abort(unsigned int esr)
@@ -315,7 +321,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}
- if (is_permission_fault(esr) && (addr < USER_DS)) {
+ if (addr < USER_DS && is_permission_fault(esr, regs)) {
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
@@ -507,10 +513,10 @@ static const struct fault_info {
{ do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
- { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
- { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
- { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
- { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous parity error" },
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 8377329d8c97..554a2558c12e 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -25,14 +25,7 @@
#include <asm/cachetype.h>
#include <asm/tlbflush.h>
-void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
-{
- if (vma->vm_flags & VM_EXEC)
- __flush_icache_all();
-}
-
-static void sync_icache_aliases(void *kaddr, unsigned long len)
+void sync_icache_aliases(void *kaddr, unsigned long len)
{
unsigned long addr = (unsigned long)kaddr;
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 2e49bd252fe7..964b7549af5c 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -51,20 +51,8 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
*pgsize = PAGE_SIZE;
if (!pte_cont(pte))
return 1;
- if (!pgd_present(*pgd)) {
- VM_BUG_ON(!pgd_present(*pgd));
- return 1;
- }
pud = pud_offset(pgd, addr);
- if (!pud_present(*pud)) {
- VM_BUG_ON(!pud_present(*pud));
- return 1;
- }
pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd)) {
- VM_BUG_ON(!pmd_present(*pmd));
- return 1;
- }
if ((pte_t *)pmd == ptep) {
*pgsize = PMD_SIZE;
return CONT_PMDS;
@@ -212,7 +200,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
/* save the 1st pte to return */
pte = ptep_get_and_clear(mm, addr, cpte);
- for (i = 1; i < ncontig; ++i) {
+ for (i = 1, addr += pgsize; i < ncontig; ++i, addr += pgsize) {
/*
* If HW_AFDBM is enabled, then the HW could
* turn on the dirty bit for any of the page
@@ -250,7 +238,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
pfn = pte_pfn(*cpte);
ncontig = find_num_contig(vma->vm_mm, addr, cpte,
*cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte) {
+ for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
changed = ptep_set_access_flags(vma, addr, cpte,
pfn_pte(pfn,
hugeprot),
@@ -273,7 +261,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
cpte = huge_pte_offset(mm, addr);
ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte)
+ for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
ptep_set_wrprotect(mm, addr, cpte);
} else {
ptep_set_wrprotect(mm, addr, ptep);
@@ -291,7 +279,7 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
cpte = huge_pte_offset(vma->vm_mm, addr);
ncontig = find_num_contig(vma->vm_mm, addr, cpte,
*cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte)
+ for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
ptep_clear_flush(vma, addr, cpte);
} else {
ptep_clear_flush(vma, addr, ptep);
@@ -323,7 +311,7 @@ __setup("hugepagesz=", setup_hugepagesz);
static __init int add_default_hugepagesz(void)
{
if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
- hugetlb_add_hstate(CONT_PMD_SHIFT);
+ hugetlb_add_hstate(CONT_PTE_SHIFT);
return 0;
}
arch_initcall(add_default_hugepagesz);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 05615a3fdc6f..17243e43184e 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -28,8 +28,6 @@
#include <linux/memblock.h>
#include <linux/fs.h>
#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/stop_machine.h>
#include <asm/barrier.h>
#include <asm/cputype.h>
@@ -42,6 +40,7 @@
#include <asm/tlb.h>
#include <asm/memblock.h>
#include <asm/mmu_context.h>
+#include <asm/ptdump.h>
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
@@ -95,11 +94,24 @@ static phys_addr_t __init early_pgtable_alloc(void)
return phys;
}
+static bool pgattr_change_is_safe(u64 old, u64 new)
+{
+ /*
+ * The following mapping attributes may be updated in live
+ * kernel mappings without the need for break-before-make.
+ */
+ static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
+
+ return old == 0 || new == 0 || ((old ^ new) & ~mask) == 0;
+}
+
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void))
+ phys_addr_t (*pgtable_alloc)(void),
+ bool page_mappings_only)
{
+ pgprot_t __prot = prot;
pte_t *pte;
BUG_ON(pmd_sect(*pmd));
@@ -115,8 +127,28 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
pte = pte_set_fixmap_offset(pmd, addr);
do {
- set_pte(pte, pfn_pte(pfn, prot));
+ pte_t old_pte = *pte;
+
+ /*
+ * Set the contiguous bit for the subsequent group of PTEs if
+ * its size and alignment are appropriate.
+ */
+ if (((addr | PFN_PHYS(pfn)) & ~CONT_PTE_MASK) == 0) {
+ if (end - addr >= CONT_PTE_SIZE && !page_mappings_only)
+ __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
+ else
+ __prot = prot;
+ }
+
+ set_pte(pte, pfn_pte(pfn, __prot));
pfn++;
+
+ /*
+ * After the PTE entry has been populated once, we
+ * only allow updates to the permission attributes.
+ */
+ BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte)));
+
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_clear_fixmap();
@@ -125,8 +157,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
phys_addr_t (*pgtable_alloc)(void),
- bool allow_block_mappings)
+ bool page_mappings_only)
{
+ pgprot_t __prot = prot;
pmd_t *pmd;
unsigned long next;
@@ -146,27 +179,39 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
pmd = pmd_set_fixmap_offset(pud, addr);
do {
+ pmd_t old_pmd = *pmd;
+
next = pmd_addr_end(addr, end);
+
/* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
- allow_block_mappings) {
- pmd_t old_pmd =*pmd;
- pmd_set_huge(pmd, phys, prot);
+ !page_mappings_only) {
/*
- * Check for previous table entries created during
- * boot (__create_page_tables) and flush them.
+ * Set the contiguous bit for the subsequent group of
+ * PMDs if its size and alignment are appropriate.
*/
- if (!pmd_none(old_pmd)) {
- flush_tlb_all();
- if (pmd_table(old_pmd)) {
- phys_addr_t table = pmd_page_paddr(old_pmd);
- if (!WARN_ON_ONCE(slab_is_available()))
- memblock_free(table, PAGE_SIZE);
- }
+ if (((addr | phys) & ~CONT_PMD_MASK) == 0) {
+ if (end - addr >= CONT_PMD_SIZE)
+ __prot = __pgprot(pgprot_val(prot) |
+ PTE_CONT);
+ else
+ __prot = prot;
}
+ pmd_set_huge(pmd, phys, __prot);
+
+ /*
+ * After the PMD entry has been populated once, we
+ * only allow updates to the permission attributes.
+ */
+ BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
+ pmd_val(*pmd)));
} else {
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
- prot, pgtable_alloc);
+ prot, pgtable_alloc,
+ page_mappings_only);
+
+ BUG_ON(pmd_val(old_pmd) != 0 &&
+ pmd_val(old_pmd) != pmd_val(*pmd));
}
phys += next - addr;
} while (pmd++, addr = next, addr != end);
@@ -189,7 +234,7 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
phys_addr_t (*pgtable_alloc)(void),
- bool allow_block_mappings)
+ bool page_mappings_only)
{
pud_t *pud;
unsigned long next;
@@ -204,33 +249,28 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
pud = pud_set_fixmap_offset(pgd, addr);
do {
+ pud_t old_pud = *pud;
+
next = pud_addr_end(addr, end);
/*
* For 4K granule only, attempt to put down a 1GB block
*/
- if (use_1G_block(addr, next, phys) && allow_block_mappings) {
- pud_t old_pud = *pud;
+ if (use_1G_block(addr, next, phys) && !page_mappings_only) {
pud_set_huge(pud, phys, prot);
/*
- * If we have an old value for a pud, it will
- * be pointing to a pmd table that we no longer
- * need (from swapper_pg_dir).
- *
- * Look up the old pmd table and free it.
+ * After the PUD entry has been populated once, we
+ * only allow updates to the permission attributes.
*/
- if (!pud_none(old_pud)) {
- flush_tlb_all();
- if (pud_table(old_pud)) {
- phys_addr_t table = pud_page_paddr(old_pud);
- if (!WARN_ON_ONCE(slab_is_available()))
- memblock_free(table, PAGE_SIZE);
- }
- }
+ BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
+ pud_val(*pud)));
} else {
alloc_init_pmd(pud, addr, next, phys, prot,
- pgtable_alloc, allow_block_mappings);
+ pgtable_alloc, page_mappings_only);
+
+ BUG_ON(pud_val(old_pud) != 0 &&
+ pud_val(old_pud) != pud_val(*pud));
}
phys += next - addr;
} while (pud++, addr = next, addr != end);
@@ -242,7 +282,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot,
phys_addr_t (*pgtable_alloc)(void),
- bool allow_block_mappings)
+ bool page_mappings_only)
{
unsigned long addr, length, end, next;
pgd_t *pgd = pgd_offset_raw(pgdir, virt);
@@ -262,7 +302,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
do {
next = pgd_addr_end(addr, end);
alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
- allow_block_mappings);
+ page_mappings_only);
phys += next - addr;
} while (pgd++, addr = next, addr != end);
}
@@ -291,17 +331,17 @@ static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
&phys, virt);
return;
}
- __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, true);
+ __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, false);
}
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
- pgprot_t prot, bool allow_block_mappings)
+ pgprot_t prot, bool page_mappings_only)
{
BUG_ON(mm == &init_mm);
__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
- pgd_pgtable_alloc, allow_block_mappings);
+ pgd_pgtable_alloc, page_mappings_only);
}
static void create_mapping_late(phys_addr_t phys, unsigned long virt,
@@ -314,7 +354,7 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
}
__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
- NULL, !debug_pagealloc_enabled());
+ NULL, debug_pagealloc_enabled());
}
static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
@@ -332,7 +372,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
__create_pgd_mapping(pgd, start, __phys_to_virt(start),
end - start, PAGE_KERNEL,
early_pgtable_alloc,
- !debug_pagealloc_enabled());
+ debug_pagealloc_enabled());
return;
}
@@ -345,13 +385,13 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
__phys_to_virt(start),
kernel_start - start, PAGE_KERNEL,
early_pgtable_alloc,
- !debug_pagealloc_enabled());
+ debug_pagealloc_enabled());
if (kernel_end < end)
__create_pgd_mapping(pgd, kernel_end,
__phys_to_virt(kernel_end),
end - kernel_end, PAGE_KERNEL,
early_pgtable_alloc,
- !debug_pagealloc_enabled());
+ debug_pagealloc_enabled());
/*
* Map the linear alias of the [_text, __init_begin) interval as
@@ -361,7 +401,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
*/
__create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
kernel_end - kernel_start, PAGE_KERNEL_RO,
- early_pgtable_alloc, !debug_pagealloc_enabled());
+ early_pgtable_alloc, debug_pagealloc_enabled());
}
static void __init map_mem(pgd_t *pgd)
@@ -396,6 +436,11 @@ void mark_rodata_ro(void)
section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
section_size, PAGE_KERNEL_RO);
+
+ /* flush the TLBs after updating live kernel mappings */
+ flush_tlb_all();
+
+ debug_checkwx();
}
static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
@@ -408,7 +453,7 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
BUG_ON(!PAGE_ALIGNED(size));
__create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
- early_pgtable_alloc, !debug_pagealloc_enabled());
+ early_pgtable_alloc, debug_pagealloc_enabled());
vma->addr = va_start;
vma->phys_addr = pa_start;
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 352c73b6a59e..32682be978e0 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -70,11 +70,14 @@ ENTRY(cpu_do_suspend)
mrs x8, mdscr_el1
mrs x9, oslsr_el1
mrs x10, sctlr_el1
+ mrs x11, tpidr_el1
+ mrs x12, sp_el0
stp x2, x3, [x0]
stp x4, xzr, [x0, #16]
stp x5, x6, [x0, #32]
stp x7, x8, [x0, #48]
stp x9, x10, [x0, #64]
+ stp x11, x12, [x0, #80]
ret
ENDPROC(cpu_do_suspend)
@@ -90,6 +93,7 @@ ENTRY(cpu_do_resume)
ldp x6, x8, [x0, #32]
ldp x9, x10, [x0, #48]
ldp x11, x12, [x0, #64]
+ ldp x13, x14, [x0, #80]
msr tpidr_el0, x2
msr tpidrro_el0, x3
msr contextidr_el1, x4
@@ -112,6 +116,8 @@ ENTRY(cpu_do_resume)
msr mdscr_el1, x10
msr sctlr_el1, x12
+ msr tpidr_el1, x13
+ msr sp_el0, x14
/*
* Restore oslsr_el1 by writing oslar_el1
*/
@@ -136,11 +142,7 @@ ENTRY(cpu_do_switch_mm)
bfi x0, x1, #48, #16 // set the ASID
msr ttbr0_el1, x0 // set TTBR0
isb
-alternative_if ARM64_WORKAROUND_CAVIUM_27456
- ic iallu
- dsb nsh
- isb
-alternative_else_nop_endif
+ post_ttbr0_update_workaround
ret
ENDPROC(cpu_do_switch_mm)
diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c
new file mode 100644
index 000000000000..eee4d864350c
--- /dev/null
+++ b/arch/arm64/mm/ptdump_debugfs.c
@@ -0,0 +1,31 @@
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/ptdump.h>
+
+static int ptdump_show(struct seq_file *m, void *v)
+{
+ struct ptdump_info *info = m->private;
+ ptdump_walk_pgd(m, info);
+ return 0;
+}
+
+static int ptdump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ptdump_show, inode->i_private);
+}
+
+static const struct file_operations ptdump_fops = {
+ .open = ptdump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int ptdump_debugfs_register(struct ptdump_info *info, const char *name)
+{
+ struct dentry *pe;
+ pe = debugfs_create_file(name, 0400, NULL, info, &ptdump_fops);
+ return pe ? 0 : -ENOMEM;
+
+}
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index 329c8027b0a9..b41aff25426d 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -49,6 +49,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/uaccess.h>
#include <xen/interface/xen.h>
@@ -91,6 +92,20 @@ ENTRY(privcmd_call)
mov x2, x3
mov x3, x4
mov x4, x5
+ /*
+ * Privcmd calls are issued by the userspace. The kernel needs to
+ * enable access to TTBR0_EL1 as the hypervisor would issue stage 1
+ * translations to user memory via AT instructions. Since AT
+ * instructions are not affected by the PAN bit (ARMv8.1), we only
+ * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
+ * is enabled (it implies that hardware UAO and PAN disabled).
+ */
+ uaccess_ttbr0_enable x6, x7
hvc XEN_IMM
+
+ /*
+ * Disable userspace access from kernel once the hyp call completed.
+ */
+ uaccess_ttbr0_disable x6
ret
ENDPROC(privcmd_call);
diff --git a/arch/blackfin/mach-bf561/coreb.c b/arch/blackfin/mach-bf561/coreb.c
index 8a2543c654b3..cf27554e76bf 100644
--- a/arch/blackfin/mach-bf561/coreb.c
+++ b/arch/blackfin/mach-bf561/coreb.c
@@ -1,5 +1,7 @@
/* Load firmware into Core B on a BF561
*
+ * Author: Bas Vermeulen <bvermeul@blackstar.xs4all.nl>
+ *
* Copyright 2004-2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
*/
@@ -14,9 +16,9 @@
#include <linux/device.h>
#include <linux/fs.h>
+#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
-#include <linux/module.h>
#define CMD_COREB_START _IO('b', 0)
#define CMD_COREB_STOP _IO('b', 1)
@@ -59,8 +61,4 @@ static struct miscdevice coreb_dev = {
.name = "coreb",
.fops = &coreb_fops,
};
-module_misc_device(coreb_dev);
-
-MODULE_AUTHOR("Bas Vermeulen <bvermeul@blackstar.xs4all.nl>");
-MODULE_DESCRIPTION("BF561 Core B Support");
-MODULE_LICENSE("GPL");
+builtin_misc_device(coreb_dev);
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index fae2f9447792..6080582a26d1 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -341,7 +341,7 @@ void output_pm_defines(void)
void output_kvm_defines(void)
{
- COMMENT(" KVM/MIPS Specfic offsets. ");
+ COMMENT(" KVM/MIPS Specific offsets. ");
OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]);
OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]);
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index e407af2b7333..2e6a823fa502 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -70,7 +70,9 @@
#define HPTE_V_SSIZE_SHIFT 62
#define HPTE_V_AVPN_SHIFT 7
+#define HPTE_V_COMMON_BITS ASM_CONST(0x000fffffffffffff)
#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
+#define HPTE_V_AVPN_3_0 ASM_CONST(0x000fffffffffff80)
#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
@@ -80,14 +82,16 @@
#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
/*
- * ISA 3.0 have a different HPTE format.
+ * ISA 3.0 has a different HPTE format.
*/
#define HPTE_R_3_0_SSIZE_SHIFT 58
+#define HPTE_R_3_0_SSIZE_MASK (3ull << HPTE_R_3_0_SSIZE_SHIFT)
#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
#define HPTE_R_TS ASM_CONST(0x4000000000000000)
#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
#define HPTE_R_RPN_SHIFT 12
#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
+#define HPTE_R_RPN_3_0 ASM_CONST(0x01fffffffffff000)
#define HPTE_R_PP ASM_CONST(0x0000000000000003)
#define HPTE_R_PPP ASM_CONST(0x8000000000000003)
#define HPTE_R_N ASM_CONST(0x0000000000000004)
@@ -316,12 +320,43 @@ static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
*/
v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
v <<= HPTE_V_AVPN_SHIFT;
- if (!cpu_has_feature(CPU_FTR_ARCH_300))
- v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
+ v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
return v;
}
/*
+ * ISA v3.0 defines a new HPTE format, which differs from the old
+ * format in having smaller AVPN and ARPN fields, and the B field
+ * in the second dword instead of the first.
+ */
+static inline unsigned long hpte_old_to_new_v(unsigned long v)
+{
+ /* trim AVPN, drop B */
+ return v & HPTE_V_COMMON_BITS;
+}
+
+static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r)
+{
+ /* move B field from 1st to 2nd dword, trim ARPN */
+ return (r & ~HPTE_R_3_0_SSIZE_MASK) |
+ (((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT);
+}
+
+static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r)
+{
+ /* insert B field */
+ return (v & HPTE_V_COMMON_BITS) |
+ ((r & HPTE_R_3_0_SSIZE_MASK) <<
+ (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT));
+}
+
+static inline unsigned long hpte_new_to_old_r(unsigned long r)
+{
+ /* clear out B field */
+ return r & ~HPTE_R_3_0_SSIZE_MASK;
+}
+
+/*
* This function sets the AVPN and L fields of the HPTE appropriately
* using the base page size and actual page size.
*/
@@ -341,12 +376,8 @@ static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
* aligned for the requested page size
*/
static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
- int actual_psize, int ssize)
+ int actual_psize)
{
-
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- pa |= ((unsigned long) ssize) << HPTE_R_3_0_SSIZE_SHIFT;
-
/* A 4K page needs no special encoding */
if (actual_psize == MMU_PAGE_4K)
return pa & HPTE_R_RPN;
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 05cabed3d1bd..09a802bb702f 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -99,6 +99,7 @@
#define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40
#define BOOK3S_INTERRUPT_HMI 0xe60
#define BOOK3S_INTERRUPT_H_DOORBELL 0xe80
+#define BOOK3S_INTERRUPT_H_VIRT 0xea0
#define BOOK3S_INTERRUPT_PERFMON 0xf00
#define BOOK3S_INTERRUPT_ALTIVEC 0xf20
#define BOOK3S_INTERRUPT_VSX 0xf40
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 28350a294b1e..e59b172666cd 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -48,7 +48,7 @@
#ifdef CONFIG_KVM_MMIO
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#endif
-#define KVM_HALT_POLL_NS_DEFAULT 500000
+#define KVM_HALT_POLL_NS_DEFAULT 10000 /* 10 us */
/* These values are internal and can be increased later */
#define KVM_NR_IRQCHIPS 1
@@ -244,8 +244,10 @@ struct kvm_arch_memory_slot {
struct kvm_arch {
unsigned int lpid;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ unsigned int tlb_sets;
unsigned long hpt_virt;
struct revmap_entry *revmap;
+ atomic64_t mmio_update;
unsigned int host_lpid;
unsigned long host_lpcr;
unsigned long sdr1;
@@ -408,6 +410,24 @@ struct kvmppc_passthru_irqmap {
#define KVMPPC_IRQ_MPIC 1
#define KVMPPC_IRQ_XICS 2
+#define MMIO_HPTE_CACHE_SIZE 4
+
+struct mmio_hpte_cache_entry {
+ unsigned long hpte_v;
+ unsigned long hpte_r;
+ unsigned long rpte;
+ unsigned long pte_index;
+ unsigned long eaddr;
+ unsigned long slb_v;
+ long mmio_update;
+ unsigned int slb_base_pshift;
+};
+
+struct mmio_hpte_cache {
+ struct mmio_hpte_cache_entry entry[MMIO_HPTE_CACHE_SIZE];
+ unsigned int index;
+};
+
struct openpic;
struct kvm_vcpu_arch {
@@ -498,6 +518,8 @@ struct kvm_vcpu_arch {
ulong tcscr;
ulong acop;
ulong wort;
+ ulong tid;
+ ulong psscr;
ulong shadow_srr1;
#endif
u32 vrsave; /* also USPRG0 */
@@ -546,6 +568,7 @@ struct kvm_vcpu_arch {
u64 tfiar;
u32 cr_tm;
+ u64 xer_tm;
u64 lr_tm;
u64 ctr_tm;
u64 amr_tm;
@@ -655,9 +678,11 @@ struct kvm_vcpu_arch {
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
struct kvm_vcpu_arch_shared shregs;
+ struct mmio_hpte_cache mmio_cache;
unsigned long pgfault_addr;
long pgfault_index;
unsigned long pgfault_hpte[2];
+ struct mmio_hpte_cache_entry *pgfault_cache;
struct task_struct *run_task;
struct kvm_run *kvm_run;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index f6e49640dbe1..2da67bf1f2ec 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -483,9 +483,10 @@ extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
unsigned long host_irq);
extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
unsigned long host_irq);
-extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, u32 xirr,
- struct kvmppc_irq_map *irq_map,
- struct kvmppc_passthru_irqmap *pimap);
+extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
+ struct kvmppc_irq_map *irq_map,
+ struct kvmppc_passthru_irqmap *pimap,
+ bool *again);
extern int h_ipi_redirect;
#else
static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
@@ -510,6 +511,48 @@ static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
#endif
/*
+ * Prototypes for functions called only from assembler code.
+ * Having prototypes reduces sparse errors.
+ */
+long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ unsigned long ioba, unsigned long tce);
+long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ unsigned long liobn, unsigned long ioba,
+ unsigned long tce_list, unsigned long npages);
+long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
+ unsigned long liobn, unsigned long ioba,
+ unsigned long tce_value, unsigned long npages);
+long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
+ unsigned int yield_count);
+long kvmppc_h_random(struct kvm_vcpu *vcpu);
+void kvmhv_commence_exit(int trap);
+long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
+void kvmppc_subcore_enter_guest(void);
+void kvmppc_subcore_exit_guest(void);
+long kvmppc_realmode_hmi_handler(void);
+long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
+ long pte_index, unsigned long pteh, unsigned long ptel);
+long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index, unsigned long avpn);
+long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
+long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index, unsigned long avpn,
+ unsigned long va);
+long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index);
+long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index);
+long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index);
+long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
+ unsigned long slb_v, unsigned int status, bool data);
+unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
+int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr);
+int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
+int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
+
+/*
* Host-side operations we want to set up while running in real
* mode in the guest operating on the xics.
* Currently only VCPU wakeup is supported.
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index e311c25751a4..8d1499334257 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -214,6 +214,11 @@ extern u64 ppc64_rma_size;
/* Cleanup function used by kexec */
extern void mmu_cleanup_all(void);
extern void radix__mmu_cleanup_all(void);
+
+/* Functions for creating and updating partition table on POWER9 */
+extern void mmu_partition_table_init(void);
+extern void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
+ unsigned long dw1);
#endif /* CONFIG_PPC64 */
struct mm_struct;
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index e958b7096f19..5c7db0f1a708 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -220,9 +220,12 @@ int64_t opal_pci_set_power_state(uint64_t async_token, uint64_t id,
int64_t opal_pci_poll2(uint64_t id, uint64_t data);
int64_t opal_int_get_xirr(uint32_t *out_xirr, bool just_poll);
+int64_t opal_rm_int_get_xirr(__be32 *out_xirr, bool just_poll);
int64_t opal_int_set_cppr(uint8_t cppr);
int64_t opal_int_eoi(uint32_t xirr);
+int64_t opal_rm_int_eoi(uint32_t xirr);
int64_t opal_int_set_mfrr(uint32_t cpu, uint8_t mfrr);
+int64_t opal_rm_int_set_mfrr(uint32_t cpu, uint8_t mfrr);
int64_t opal_pci_tce_kill(uint64_t phb_id, uint32_t kill_type,
uint32_t pe_num, uint32_t tce_size,
uint64_t dma_addr, uint32_t npages);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 9e1499f98def..04aa1ee8cdb6 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -153,6 +153,8 @@
#define PSSCR_EC 0x00100000 /* Exit Criterion */
#define PSSCR_ESL 0x00200000 /* Enable State Loss */
#define PSSCR_SD 0x00400000 /* Status Disable */
+#define PSSCR_PLS 0xf000000000000000 /* Power-saving Level Status */
+#define PSSCR_GUEST_VIS 0xf0000000000003ff /* Guest-visible PSSCR fields */
/* Floating Point Status and Control Register (FPSCR) Fields */
#define FPSCR_FX 0x80000000 /* FPU exception summary */
@@ -236,6 +238,7 @@
#define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */
#define TEXASR_FS __MASK(63-36) /* TEXASR Failure Summary */
#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
+#define SPRN_TIDR 144 /* Thread ID register */
#define SPRN_CTRLF 0x088
#define SPRN_CTRLT 0x098
#define CTRL_CT 0xc0000000 /* current thread */
@@ -294,6 +297,7 @@
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
#define SPRN_LMRR 0x32D /* Load Monitor Region Register */
#define SPRN_LMSER 0x32E /* Load Monitor Section Enable Register */
+#define SPRN_ASDR 0x330 /* Access segment descriptor register */
#define SPRN_IC 0x350 /* Virtual Instruction Count */
#define SPRN_VTB 0x351 /* Virtual Time Base */
#define SPRN_LDBAR 0x352 /* LD Base Address Register */
@@ -305,6 +309,7 @@
/* HFSCR and FSCR bit numbers are the same */
#define FSCR_LM_LG 11 /* Enable Load Monitor Registers */
+#define FSCR_MSGP_LG 10 /* Enable MSGP */
#define FSCR_TAR_LG 8 /* Enable Target Address Register */
#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
#define FSCR_TM_LG 5 /* Enable Transactional Memory */
@@ -320,6 +325,7 @@
#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
#define HFSCR_LM __MASK(FSCR_LM_LG)
+#define HFSCR_MSGP __MASK(FSCR_MSGP_LG)
#define HFSCR_TAR __MASK(FSCR_TAR_LG)
#define HFSCR_EBB __MASK(FSCR_EBB_LG)
#define HFSCR_TM __MASK(FSCR_TM_LG)
@@ -358,6 +364,7 @@
#define LPCR_PECE_HVEE ASM_CONST(0x0000400000000000) /* P9 Wakeup on HV interrupts */
#define LPCR_MER ASM_CONST(0x0000000000000800) /* Mediated External Exception */
#define LPCR_MER_SH 11
+#define LPCR_GTSE ASM_CONST(0x0000000000000400) /* Guest Translation Shootdown Enable */
#define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */
#define LPCR_LPES 0x0000000c
#define LPCR_LPES0 ASM_CONST(0x0000000000000008) /* LPAR Env selector 0 */
@@ -378,6 +385,12 @@
#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */
#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */
#define PCR_TM_DIS (1ul << (63-2)) /* Trans. memory disable (POWER8) */
+/*
+ * These bits are used in the function kvmppc_set_arch_compat() to specify and
+ * determine both the compatibility level which we want to emulate and the
+ * compatibility level which the host is capable of emulating.
+ */
+#define PCR_ARCH_207 0x8 /* Architecture 2.07 */
#define PCR_ARCH_206 0x4 /* Architecture 2.06 */
#define PCR_ARCH_205 0x2 /* Architecture 2.05 */
#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
@@ -1219,6 +1232,7 @@
#define PVR_ARCH_206 0x0f000003
#define PVR_ARCH_206p 0x0f100003
#define PVR_ARCH_207 0x0f000004
+#define PVR_ARCH_300 0x0f000005
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index c93cf35ce379..3603b6f51b11 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -573,6 +573,10 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_SPRG9 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xba)
#define KVM_REG_PPC_DBSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbb)
+/* POWER9 registers */
+#define KVM_REG_PPC_TIDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc)
+#define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
+
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
*/
@@ -596,6 +600,7 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
#define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
#define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
+#define KVM_REG_PPC_TM_XER (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)
/* PPC64 eXternal Interrupt Controller Specification */
#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index caec7bf3b99a..195a9fc8f81c 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -487,6 +487,7 @@ int main(void)
/* book3s */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ DEFINE(KVM_TLB_SETS, offsetof(struct kvm, arch.tlb_sets));
DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
@@ -548,6 +549,8 @@ int main(void)
DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr));
DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop));
DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort));
+ DEFINE(VCPU_TID, offsetof(struct kvm_vcpu, arch.tid));
+ DEFINE(VCPU_PSSCR, offsetof(struct kvm_vcpu, arch.psscr));
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_map));
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
@@ -569,6 +572,7 @@ int main(void)
DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
+ DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm));
DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 37c027ca83b2..f3e1f5d29dce 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -174,7 +174,7 @@ __init_FSCR:
__init_HFSCR:
mfspr r3,SPRN_HFSCR
ori r3,r3,HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|\
- HFSCR_DSCR|HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB
+ HFSCR_DSCR|HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB|HFSCR_MSGP
mtspr SPRN_HFSCR,r3
blr
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 05f09ae82587..b795dd1ac2ef 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -88,6 +88,8 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
/* 128 (2**7) bytes in each HPTEG */
kvm->arch.hpt_mask = (1ul << (order - 7)) - 1;
+ atomic64_set(&kvm->arch.mmio_update, 0);
+
/* Allocate reverse map array */
rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte);
if (!rev) {
@@ -255,7 +257,7 @@ static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, msr);
}
-long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
+static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
long pte_index, unsigned long pteh,
unsigned long ptel, unsigned long *pte_idx_ret)
{
@@ -312,7 +314,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_slb *slbe;
unsigned long slb_v;
unsigned long pp, key;
- unsigned long v, gr;
+ unsigned long v, orig_v, gr;
__be64 *hptep;
int index;
int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
@@ -337,10 +339,12 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
return -ENOENT;
}
hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
- v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
+ v = orig_v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ v = hpte_new_to_old_v(v, be64_to_cpu(hptep[1]));
gr = kvm->arch.revmap[index].guest_rpte;
- unlock_hpte(hptep, v);
+ unlock_hpte(hptep, orig_v);
preempt_enable();
gpte->eaddr = eaddr;
@@ -438,6 +442,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
{
struct kvm *kvm = vcpu->kvm;
unsigned long hpte[3], r;
+ unsigned long hnow_v, hnow_r;
__be64 *hptep;
unsigned long mmu_seq, psize, pte_size;
unsigned long gpa_base, gfn_base;
@@ -451,6 +456,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int writing, write_ok;
struct vm_area_struct *vma;
unsigned long rcbits;
+ long mmio_update;
/*
* Real-mode code has already searched the HPT and found the
@@ -460,6 +466,19 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/
if (ea != vcpu->arch.pgfault_addr)
return RESUME_GUEST;
+
+ if (vcpu->arch.pgfault_cache) {
+ mmio_update = atomic64_read(&kvm->arch.mmio_update);
+ if (mmio_update == vcpu->arch.pgfault_cache->mmio_update) {
+ r = vcpu->arch.pgfault_cache->rpte;
+ psize = hpte_page_size(vcpu->arch.pgfault_hpte[0], r);
+ gpa_base = r & HPTE_R_RPN & ~(psize - 1);
+ gfn_base = gpa_base >> PAGE_SHIFT;
+ gpa = gpa_base | (ea & (psize - 1));
+ return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
+ dsisr & DSISR_ISSTORE);
+ }
+ }
index = vcpu->arch.pgfault_index;
hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
rev = &kvm->arch.revmap[index];
@@ -472,6 +491,10 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
unlock_hpte(hptep, hpte[0]);
preempt_enable();
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ hpte[0] = hpte_new_to_old_v(hpte[0], hpte[1]);
+ hpte[1] = hpte_new_to_old_r(hpte[1]);
+ }
if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
hpte[1] != vcpu->arch.pgfault_hpte[1])
return RESUME_GUEST;
@@ -575,16 +598,22 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/
if (psize < PAGE_SIZE)
psize = PAGE_SIZE;
- r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1));
+ r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) |
+ ((pfn << PAGE_SHIFT) & ~(psize - 1));
if (hpte_is_writable(r) && !write_ok)
r = hpte_make_readonly(r);
ret = RESUME_GUEST;
preempt_disable();
while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
cpu_relax();
- if ((be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK) != hpte[0] ||
- be64_to_cpu(hptep[1]) != hpte[1] ||
- rev->guest_rpte != hpte[2])
+ hnow_v = be64_to_cpu(hptep[0]);
+ hnow_r = be64_to_cpu(hptep[1]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ hnow_v = hpte_new_to_old_v(hnow_v, hnow_r);
+ hnow_r = hpte_new_to_old_r(hnow_r);
+ }
+ if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] ||
+ rev->guest_rpte != hpte[2])
/* HPTE has been changed under us; let the guest retry */
goto out_unlock;
hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
@@ -615,6 +644,10 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
}
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ r = hpte_old_to_new_r(hpte[0], r);
+ hpte[0] = hpte_old_to_new_v(hpte[0]);
+ }
hptep[1] = cpu_to_be64(r);
eieio();
__unlock_hpte(hptep, hpte[0]);
@@ -758,6 +791,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
hpte_rpn(ptel, psize) == gfn) {
hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
kvmppc_invalidate_hpte(kvm, hptep, i);
+ hptep[1] &= ~cpu_to_be64(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
/* Harvest R and C */
rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
*rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
@@ -1165,7 +1199,7 @@ static long record_hpte(unsigned long flags, __be64 *hptp,
unsigned long *hpte, struct revmap_entry *revp,
int want_valid, int first_pass)
{
- unsigned long v, r;
+ unsigned long v, r, hr;
unsigned long rcbits_unset;
int ok = 1;
int valid, dirty;
@@ -1192,6 +1226,11 @@ static long record_hpte(unsigned long flags, __be64 *hptp,
while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
cpu_relax();
v = be64_to_cpu(hptp[0]);
+ hr = be64_to_cpu(hptp[1]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ v = hpte_new_to_old_v(v, hr);
+ hr = hpte_new_to_old_r(hr);
+ }
/* re-evaluate valid and dirty from synchronized HPTE value */
valid = !!(v & HPTE_V_VALID);
@@ -1199,8 +1238,8 @@ static long record_hpte(unsigned long flags, __be64 *hptp,
/* Harvest R and C into guest view if necessary */
rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
- if (valid && (rcbits_unset & be64_to_cpu(hptp[1]))) {
- revp->guest_rpte |= (be64_to_cpu(hptp[1]) &
+ if (valid && (rcbits_unset & hr)) {
+ revp->guest_rpte |= (hr &
(HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED;
dirty = 1;
}
@@ -1608,7 +1647,7 @@ static ssize_t debugfs_htab_read(struct file *file, char __user *buf,
return ret;
}
-ssize_t debugfs_htab_write(struct file *file, const char __user *buf,
+static ssize_t debugfs_htab_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
return -EACCES;
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index d461c440889a..e4c4ea973e57 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -39,7 +39,6 @@
#include <asm/udbg.h>
#include <asm/iommu.h>
#include <asm/tce.h>
-#include <asm/iommu.h>
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 39ef1f4a7b02..8dcbe37a4dac 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -54,6 +54,9 @@
#include <asm/dbell.h>
#include <asm/hmi.h>
#include <asm/pnv-pci.h>
+#include <asm/mmu.h>
+#include <asm/opal.h>
+#include <asm/xics.h>
#include <linux/gfp.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
@@ -62,6 +65,7 @@
#include <linux/irqbypass.h>
#include <linux/module.h>
#include <linux/compiler.h>
+#include <linux/of.h>
#include "book3s.h"
@@ -104,23 +108,6 @@ module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect,
MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
#endif
-/* Maximum halt poll interval defaults to KVM_HALT_POLL_NS_DEFAULT */
-static unsigned int halt_poll_max_ns = KVM_HALT_POLL_NS_DEFAULT;
-module_param(halt_poll_max_ns, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(halt_poll_max_ns, "Maximum halt poll time in ns");
-
-/* Factor by which the vcore halt poll interval is grown, default is to double
- */
-static unsigned int halt_poll_ns_grow = 2;
-module_param(halt_poll_ns_grow, int, S_IRUGO);
-MODULE_PARM_DESC(halt_poll_ns_grow, "Factor halt poll time is grown by");
-
-/* Factor by which the vcore halt poll interval is shrunk, default is to reset
- */
-static unsigned int halt_poll_ns_shrink;
-module_param(halt_poll_ns_shrink, int, S_IRUGO);
-MODULE_PARM_DESC(halt_poll_ns_shrink, "Factor halt poll time is shrunk by");
-
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
@@ -146,12 +133,21 @@ static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
static bool kvmppc_ipi_thread(int cpu)
{
+ unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
+
+ /* On POWER9 we can use msgsnd to IPI any cpu */
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ msg |= get_hard_smp_processor_id(cpu);
+ smp_mb();
+ __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+ return true;
+ }
+
/* On POWER8 for IPIs to threads in the same core, use msgsnd */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
preempt_disable();
if (cpu_first_thread_sibling(cpu) ==
cpu_first_thread_sibling(smp_processor_id())) {
- unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
msg |= cpu_thread_in_core(cpu);
smp_mb();
__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
@@ -162,8 +158,12 @@ static bool kvmppc_ipi_thread(int cpu)
}
#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
- if (cpu >= 0 && cpu < nr_cpu_ids && paca[cpu].kvm_hstate.xics_phys) {
- xics_wake_cpu(cpu);
+ if (cpu >= 0 && cpu < nr_cpu_ids) {
+ if (paca[cpu].kvm_hstate.xics_phys) {
+ xics_wake_cpu(cpu);
+ return true;
+ }
+ opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
return true;
}
#endif
@@ -299,41 +299,54 @@ static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
vcpu->arch.pvr = pvr;
}
+/* Dummy value used in computing PCR value below */
+#define PCR_ARCH_300 (PCR_ARCH_207 << 1)
+
static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
{
- unsigned long pcr = 0;
+ unsigned long host_pcr_bit = 0, guest_pcr_bit = 0;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ /* We can (emulate) our own architecture version and anything older */
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ host_pcr_bit = PCR_ARCH_300;
+ else if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ host_pcr_bit = PCR_ARCH_207;
+ else if (cpu_has_feature(CPU_FTR_ARCH_206))
+ host_pcr_bit = PCR_ARCH_206;
+ else
+ host_pcr_bit = PCR_ARCH_205;
+
+ /* Determine lowest PCR bit needed to run guest in given PVR level */
+ guest_pcr_bit = host_pcr_bit;
if (arch_compat) {
switch (arch_compat) {
case PVR_ARCH_205:
- /*
- * If an arch bit is set in PCR, all the defined
- * higher-order arch bits also have to be set.
- */
- pcr = PCR_ARCH_206 | PCR_ARCH_205;
+ guest_pcr_bit = PCR_ARCH_205;
break;
case PVR_ARCH_206:
case PVR_ARCH_206p:
- pcr = PCR_ARCH_206;
+ guest_pcr_bit = PCR_ARCH_206;
break;
case PVR_ARCH_207:
+ guest_pcr_bit = PCR_ARCH_207;
+ break;
+ case PVR_ARCH_300:
+ guest_pcr_bit = PCR_ARCH_300;
break;
default:
return -EINVAL;
}
-
- if (!cpu_has_feature(CPU_FTR_ARCH_207S)) {
- /* POWER7 can't emulate POWER8 */
- if (!(pcr & PCR_ARCH_206))
- return -EINVAL;
- pcr &= ~PCR_ARCH_206;
- }
}
+ /* Check requested PCR bits don't exceed our capabilities */
+ if (guest_pcr_bit > host_pcr_bit)
+ return -EINVAL;
+
spin_lock(&vc->lock);
vc->arch_compat = arch_compat;
- vc->pcr = pcr;
+ /* Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit */
+ vc->pcr = host_pcr_bit - guest_pcr_bit;
spin_unlock(&vc->lock);
return 0;
@@ -945,6 +958,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case BOOK3S_INTERRUPT_EXTERNAL:
case BOOK3S_INTERRUPT_H_DOORBELL:
+ case BOOK3S_INTERRUPT_H_VIRT:
vcpu->stat.ext_intr_exits++;
r = RESUME_GUEST;
break;
@@ -1229,6 +1243,12 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_WORT:
*val = get_reg_val(id, vcpu->arch.wort);
break;
+ case KVM_REG_PPC_TIDR:
+ *val = get_reg_val(id, vcpu->arch.tid);
+ break;
+ case KVM_REG_PPC_PSSCR:
+ *val = get_reg_val(id, vcpu->arch.psscr);
+ break;
case KVM_REG_PPC_VPA_ADDR:
spin_lock(&vcpu->arch.vpa_update_lock);
*val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
@@ -1288,6 +1308,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_TM_CR:
*val = get_reg_val(id, vcpu->arch.cr_tm);
break;
+ case KVM_REG_PPC_TM_XER:
+ *val = get_reg_val(id, vcpu->arch.xer_tm);
+ break;
case KVM_REG_PPC_TM_LR:
*val = get_reg_val(id, vcpu->arch.lr_tm);
break;
@@ -1427,6 +1450,12 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_WORT:
vcpu->arch.wort = set_reg_val(id, *val);
break;
+ case KVM_REG_PPC_TIDR:
+ vcpu->arch.tid = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_PSSCR:
+ vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS;
+ break;
case KVM_REG_PPC_VPA_ADDR:
addr = set_reg_val(id, *val);
r = -EINVAL;
@@ -1498,6 +1527,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_TM_CR:
vcpu->arch.cr_tm = set_reg_val(id, *val);
break;
+ case KVM_REG_PPC_TM_XER:
+ vcpu->arch.xer_tm = set_reg_val(id, *val);
+ break;
case KVM_REG_PPC_TM_LR:
vcpu->arch.lr_tm = set_reg_val(id, *val);
break;
@@ -1540,6 +1572,20 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
return r;
}
+/*
+ * On POWER9, threads are independent and can be in different partitions.
+ * Therefore we consider each thread to be a subcore.
+ * There is a restriction that all threads have to be in the same
+ * MMU mode (radix or HPT), unfortunately, but since we only support
+ * HPT guests on a HPT host so far, that isn't an impediment yet.
+ */
+static int threads_per_vcore(void)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ return 1;
+ return threads_per_subcore;
+}
+
static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
{
struct kvmppc_vcore *vcore;
@@ -1554,7 +1600,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
init_swait_queue_head(&vcore->wq);
vcore->preempt_tb = TB_NIL;
vcore->lpcr = kvm->arch.lpcr;
- vcore->first_vcpuid = core * threads_per_subcore;
+ vcore->first_vcpuid = core * threads_per_vcore();
vcore->kvm = kvm;
INIT_LIST_HEAD(&vcore->preempt_list);
@@ -1717,7 +1763,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
int core;
struct kvmppc_vcore *vcore;
- core = id / threads_per_subcore;
+ core = id / threads_per_vcore();
if (core >= KVM_MAX_VCORES)
goto out;
@@ -1935,7 +1981,10 @@ static void kvmppc_wait_for_nap(void)
{
int cpu = smp_processor_id();
int i, loops;
+ int n_threads = threads_per_vcore();
+ if (n_threads <= 1)
+ return;
for (loops = 0; loops < 1000000; ++loops) {
/*
* Check if all threads are finished.
@@ -1943,17 +1992,17 @@ static void kvmppc_wait_for_nap(void)
* and the thread clears it when finished, so we look
* for any threads that still have a non-NULL vcore ptr.
*/
- for (i = 1; i < threads_per_subcore; ++i)
+ for (i = 1; i < n_threads; ++i)
if (paca[cpu + i].kvm_hstate.kvm_vcore)
break;
- if (i == threads_per_subcore) {
+ if (i == n_threads) {
HMT_medium();
return;
}
HMT_low();
}
HMT_medium();
- for (i = 1; i < threads_per_subcore; ++i)
+ for (i = 1; i < n_threads; ++i)
if (paca[cpu + i].kvm_hstate.kvm_vcore)
pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
}
@@ -2019,7 +2068,7 @@ static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
vc->vcore_state = VCORE_PREEMPT;
vc->pcpu = smp_processor_id();
- if (vc->num_threads < threads_per_subcore) {
+ if (vc->num_threads < threads_per_vcore()) {
spin_lock(&lp->lock);
list_add_tail(&vc->preempt_list, &lp->list);
spin_unlock(&lp->lock);
@@ -2123,8 +2172,7 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
cip->subcore_threads[sub] = vc->num_threads;
cip->subcore_vm[sub] = vc->kvm;
init_master_vcore(vc);
- list_del(&vc->preempt_list);
- list_add_tail(&vc->preempt_list, &cip->vcs[sub]);
+ list_move_tail(&vc->preempt_list, &cip->vcs[sub]);
return true;
}
@@ -2309,6 +2357,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
unsigned long cmd_bit, stat_bit;
int pcpu, thr;
int target_threads;
+ int controlled_threads;
/*
* Remove from the list any threads that have a signal pending
@@ -2327,11 +2376,18 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
vc->preempt_tb = TB_NIL;
/*
+ * Number of threads that we will be controlling: the same as
+ * the number of threads per subcore, except on POWER9,
+ * where it's 1 because the threads are (mostly) independent.
+ */
+ controlled_threads = threads_per_vcore();
+
+ /*
* Make sure we are running on primary threads, and that secondary
* threads are offline. Also check if the number of threads in this
* guest are greater than the current system threads per guest.
*/
- if ((threads_per_core > 1) &&
+ if ((controlled_threads > 1) &&
((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
for_each_runnable_thread(i, vcpu, vc) {
vcpu->arch.ret = -EBUSY;
@@ -2347,7 +2403,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
*/
init_core_info(&core_info, vc);
pcpu = smp_processor_id();
- target_threads = threads_per_subcore;
+ target_threads = controlled_threads;
if (target_smt_mode && target_smt_mode < target_threads)
target_threads = target_smt_mode;
if (vc->num_threads < target_threads)
@@ -2383,7 +2439,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
smp_wmb();
}
pcpu = smp_processor_id();
- for (thr = 0; thr < threads_per_subcore; ++thr)
+ for (thr = 0; thr < controlled_threads; ++thr)
paca[pcpu + thr].kvm_hstate.kvm_split_mode = sip;
/* Initiate micro-threading (split-core) if required */
@@ -2493,7 +2549,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
}
/* Let secondaries go back to the offline loop */
- for (i = 0; i < threads_per_subcore; ++i) {
+ for (i = 0; i < controlled_threads; ++i) {
kvmppc_release_hwthread(pcpu + i);
if (sip && sip->napped[i])
kvmppc_ipi_thread(pcpu + i);
@@ -2545,9 +2601,6 @@ static void grow_halt_poll_ns(struct kvmppc_vcore *vc)
vc->halt_poll_ns = 10000;
else
vc->halt_poll_ns *= halt_poll_ns_grow;
-
- if (vc->halt_poll_ns > halt_poll_max_ns)
- vc->halt_poll_ns = halt_poll_max_ns;
}
static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
@@ -2558,7 +2611,8 @@ static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
vc->halt_poll_ns /= halt_poll_ns_shrink;
}
-/* Check to see if any of the runnable vcpus on the vcore have pending
+/*
+ * Check to see if any of the runnable vcpus on the vcore have pending
* exceptions or are no longer ceded
*/
static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
@@ -2657,16 +2711,18 @@ out:
}
/* Adjust poll time */
- if (halt_poll_max_ns) {
+ if (halt_poll_ns) {
if (block_ns <= vc->halt_poll_ns)
;
/* We slept and blocked for longer than the max halt time */
- else if (vc->halt_poll_ns && block_ns > halt_poll_max_ns)
+ else if (vc->halt_poll_ns && block_ns > halt_poll_ns)
shrink_halt_poll_ns(vc);
/* We slept and our poll time is too small */
- else if (vc->halt_poll_ns < halt_poll_max_ns &&
- block_ns < halt_poll_max_ns)
+ else if (vc->halt_poll_ns < halt_poll_ns &&
+ block_ns < halt_poll_ns)
grow_halt_poll_ns(vc);
+ if (vc->halt_poll_ns > halt_poll_ns)
+ vc->halt_poll_ns = halt_poll_ns;
} else
vc->halt_poll_ns = 0;
@@ -2973,6 +3029,15 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
+ /*
+ * If we are making a new memslot, it might make
+ * some address that was previously cached as emulated
+ * MMIO be no longer emulated MMIO, so invalidate
+ * all the caches of emulated MMIO translations.
+ */
+ if (npages)
+ atomic64_inc(&kvm->arch.mmio_update);
+
if (npages && old->npages) {
/*
* If modifying a memslot, reset all the rmap dirty bits.
@@ -3017,6 +3082,22 @@ static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
return;
}
+static void kvmppc_setup_partition_table(struct kvm *kvm)
+{
+ unsigned long dw0, dw1;
+
+ /* PS field - page size for VRMA */
+ dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) |
+ ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1);
+ /* HTABSIZE and HTABORG fields */
+ dw0 |= kvm->arch.sdr1;
+
+ /* Second dword has GR=0; other fields are unused since UPRT=0 */
+ dw1 = 0;
+
+ mmu_partition_table_set_entry(kvm->arch.lpid, dw0, dw1);
+}
+
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
{
int err = 0;
@@ -3068,17 +3149,20 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
psize == 0x1000000))
goto out_srcu;
- /* Update VRMASD field in the LPCR */
senc = slb_pgsize_encoding(psize);
kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
(VRMA_VSID << SLB_VSID_SHIFT_1T);
- /* the -4 is to account for senc values starting at 0x10 */
- lpcr = senc << (LPCR_VRMASD_SH - 4);
-
/* Create HPTEs in the hash page table for the VRMA */
kvmppc_map_vrma(vcpu, memslot, porder);
- kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
+ /* Update VRMASD field in the LPCR */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /* the -4 is to account for senc values starting at 0x10 */
+ lpcr = senc << (LPCR_VRMASD_SH - 4);
+ kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
+ } else {
+ kvmppc_setup_partition_table(kvm);
+ }
/* Order updates to kvm->arch.lpcr etc. vs. hpte_setup_done */
smp_wmb();
@@ -3193,14 +3277,18 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
* Since we don't flush the TLB when tearing down a VM,
* and this lpid might have previously been used,
* make sure we flush on each core before running the new VM.
+ * On POWER9, the tlbie in mmu_partition_table_set_entry()
+ * does this flush for us.
*/
- cpumask_setall(&kvm->arch.need_tlb_flush);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ cpumask_setall(&kvm->arch.need_tlb_flush);
/* Start out with the default set of hcalls enabled */
memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
sizeof(kvm->arch.enabled_hcalls));
- kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
/* Init LPCR for virtual RMA mode */
kvm->arch.host_lpid = mfspr(SPRN_LPID);
@@ -3213,9 +3301,29 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
/* On POWER8 turn on online bit to enable PURR/SPURR */
if (cpu_has_feature(CPU_FTR_ARCH_207S))
lpcr |= LPCR_ONL;
+ /*
+ * On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed)
+ * Set HVICE bit to enable hypervisor virtualization interrupts.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ lpcr &= ~LPCR_VPM0;
+ lpcr |= LPCR_HVICE;
+ }
+
kvm->arch.lpcr = lpcr;
/*
+ * Work out how many sets the TLB has, for the use of
+ * the TLB invalidation loop in book3s_hv_rmhandlers.S.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */
+ else if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */
+ else
+ kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */
+
+ /*
* Track that we now have a HV mode VM active. This blocks secondary
* CPU threads from coming online.
*/
@@ -3279,9 +3387,9 @@ static int kvmppc_core_check_processor_compat_hv(void)
!cpu_has_feature(CPU_FTR_ARCH_206))
return -EIO;
/*
- * Disable KVM for Power9, untill the required bits merged.
+ * Disable KVM for Power9 in radix mode.
*/
- if (cpu_has_feature(CPU_FTR_ARCH_300))
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled())
return -EIO;
return 0;
@@ -3635,6 +3743,23 @@ static int kvmppc_book3s_init_hv(void)
if (r)
return r;
+ /*
+ * We need a way of accessing the XICS interrupt controller,
+ * either directly, via paca[cpu].kvm_hstate.xics_phys, or
+ * indirectly, via OPAL.
+ */
+#ifdef CONFIG_SMP
+ if (!get_paca()->kvm_hstate.xics_phys) {
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
+ if (!np) {
+ pr_err("KVM-HV: Cannot determine method for accessing XICS\n");
+ return -ENODEV;
+ }
+ }
+#endif
+
kvm_ops_hv.owner = THIS_MODULE;
kvmppc_hv_ops = &kvm_ops_hv;
@@ -3657,3 +3782,4 @@ module_exit(kvmppc_book3s_exit_hv);
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(KVM_MINOR);
MODULE_ALIAS("devname:kvm");
+
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 0c84d6bc8356..5bb24be0b346 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -26,6 +26,8 @@
#include <asm/dbell.h>
#include <asm/cputhreads.h>
#include <asm/io.h>
+#include <asm/opal.h>
+#include <asm/smp.h>
#define KVM_CMA_CHUNK_ORDER 18
@@ -205,12 +207,18 @@ static inline void rm_writeb(unsigned long paddr, u8 val)
void kvmhv_rm_send_ipi(int cpu)
{
unsigned long xics_phys;
+ unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
- /* On POWER8 for IPIs to threads in the same core, use msgsnd */
+ /* On POWER9 we can use msgsnd for any destination cpu. */
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ msg |= get_hard_smp_processor_id(cpu);
+ __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+ return;
+ }
+ /* On POWER8 for IPIs to threads in the same core, use msgsnd. */
if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
cpu_first_thread_sibling(cpu) ==
cpu_first_thread_sibling(raw_smp_processor_id())) {
- unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
msg |= cpu_thread_in_core(cpu);
__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
return;
@@ -218,7 +226,11 @@ void kvmhv_rm_send_ipi(int cpu)
/* Else poke the target with an IPI */
xics_phys = paca[cpu].kvm_hstate.xics_phys;
- rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
+ if (xics_phys)
+ rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
+ else
+ opal_rm_int_set_mfrr(get_hard_smp_processor_id(cpu),
+ IPI_PRIORITY);
}
/*
@@ -329,7 +341,7 @@ static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
* saved a copy of the XIRR in the PACA, it will be picked up by
* the host ICP driver.
*/
-static int kvmppc_check_passthru(u32 xisr, __be32 xirr)
+static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
{
struct kvmppc_passthru_irqmap *pimap;
struct kvmppc_irq_map *irq_map;
@@ -348,11 +360,11 @@ static int kvmppc_check_passthru(u32 xisr, __be32 xirr)
/* We're handling this interrupt, generic code doesn't need to */
local_paca->kvm_hstate.saved_xirr = 0;
- return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap);
+ return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again);
}
#else
-static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr)
+static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
{
return 1;
}
@@ -367,14 +379,31 @@ static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr)
* -1 if there was a guest wakeup IPI (which has now been cleared)
* -2 if there is PCI passthrough external interrupt that was handled
*/
+static long kvmppc_read_one_intr(bool *again);
long kvmppc_read_intr(void)
{
+ long ret = 0;
+ long rc;
+ bool again;
+
+ do {
+ again = false;
+ rc = kvmppc_read_one_intr(&again);
+ if (rc && (ret == 0 || rc > ret))
+ ret = rc;
+ } while (again);
+ return ret;
+}
+
+static long kvmppc_read_one_intr(bool *again)
+{
unsigned long xics_phys;
u32 h_xirr;
__be32 xirr;
u32 xisr;
u8 host_ipi;
+ int64_t rc;
/* see if a host IPI is pending */
host_ipi = local_paca->kvm_hstate.host_ipi;
@@ -383,8 +412,14 @@ long kvmppc_read_intr(void)
/* Now read the interrupt from the ICP */
xics_phys = local_paca->kvm_hstate.xics_phys;
- if (unlikely(!xics_phys))
- return 1;
+ if (!xics_phys) {
+ /* Use OPAL to read the XIRR */
+ rc = opal_rm_int_get_xirr(&xirr, false);
+ if (rc < 0)
+ return 1;
+ } else {
+ xirr = _lwzcix(xics_phys + XICS_XIRR);
+ }
/*
* Save XIRR for later. Since we get control in reverse endian
@@ -392,7 +427,6 @@ long kvmppc_read_intr(void)
* host endian. Note that xirr is the value read from the
* XIRR register, while h_xirr is the host endian version.
*/
- xirr = _lwzcix(xics_phys + XICS_XIRR);
h_xirr = be32_to_cpu(xirr);
local_paca->kvm_hstate.saved_xirr = h_xirr;
xisr = h_xirr & 0xffffff;
@@ -411,8 +445,16 @@ long kvmppc_read_intr(void)
* If it is an IPI, clear the MFRR and EOI it.
*/
if (xisr == XICS_IPI) {
- _stbcix(xics_phys + XICS_MFRR, 0xff);
- _stwcix(xics_phys + XICS_XIRR, xirr);
+ if (xics_phys) {
+ _stbcix(xics_phys + XICS_MFRR, 0xff);
+ _stwcix(xics_phys + XICS_XIRR, xirr);
+ } else {
+ opal_rm_int_set_mfrr(hard_smp_processor_id(), 0xff);
+ rc = opal_rm_int_eoi(h_xirr);
+ /* If rc > 0, there is another interrupt pending */
+ *again = rc > 0;
+ }
+
/*
* Need to ensure side effects of above stores
* complete before proceeding.
@@ -429,7 +471,11 @@ long kvmppc_read_intr(void)
/* We raced with the host,
* we need to resend that IPI, bummer
*/
- _stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY);
+ if (xics_phys)
+ _stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY);
+ else
+ opal_rm_int_set_mfrr(hard_smp_processor_id(),
+ IPI_PRIORITY);
/* Let side effects complete */
smp_mb();
return 1;
@@ -440,5 +486,5 @@ long kvmppc_read_intr(void)
return -1;
}
- return kvmppc_check_passthru(xisr, xirr);
+ return kvmppc_check_passthru(xisr, xirr, again);
}
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 0fa70a9618d7..7ef0993214f3 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -16,6 +16,7 @@
#include <asm/machdep.h>
#include <asm/cputhreads.h>
#include <asm/hmi.h>
+#include <asm/kvm_ppc.h>
/* SRR1 bits for machine check on POWER7 */
#define SRR1_MC_LDSTERR (1ul << (63-42))
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 99b4e9d5dd23..9ef3c4be952f 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -264,8 +264,10 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
if (pa)
pteh |= HPTE_V_VALID;
- else
+ else {
pteh |= HPTE_V_ABSENT;
+ ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
+ }
/*If we had host pte mapping then Check WIMG */
if (ptep && !hpte_cache_flags_ok(ptel, is_ci)) {
@@ -351,6 +353,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
/* inval in progress, write a non-present HPTE */
pteh |= HPTE_V_ABSENT;
pteh &= ~HPTE_V_VALID;
+ ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
unlock_rmap(rmap);
} else {
kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
@@ -361,6 +364,11 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
}
}
+ /* Convert to new format on P9 */
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ ptel = hpte_old_to_new_r(pteh, ptel);
+ pteh = hpte_old_to_new_v(pteh);
+ }
hpte[1] = cpu_to_be64(ptel);
/* Write the first HPTE dword, unlocking the HPTE and making it valid */
@@ -386,6 +394,13 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
#endif
+static inline int is_mmio_hpte(unsigned long v, unsigned long r)
+{
+ return ((v & HPTE_V_ABSENT) &&
+ (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
+ (HPTE_R_KEY_HI | HPTE_R_KEY_LO));
+}
+
static inline int try_lock_tlbie(unsigned int *lock)
{
unsigned int tmp, old;
@@ -409,13 +424,18 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
{
long i;
+ /*
+ * We use the POWER9 5-operand versions of tlbie and tlbiel here.
+ * Since we are using RIC=0 PRS=0 R=0, and P7/P8 tlbiel ignores
+ * the RS field, this is backwards-compatible with P7 and P8.
+ */
if (global) {
while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
cpu_relax();
if (need_sync)
asm volatile("ptesync" : : : "memory");
for (i = 0; i < npages; ++i)
- asm volatile(PPC_TLBIE(%1,%0) : :
+ asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
"r" (rbvalues[i]), "r" (kvm->arch.lpid));
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
kvm->arch.tlbie_lock = 0;
@@ -423,7 +443,8 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
if (need_sync)
asm volatile("ptesync" : : : "memory");
for (i = 0; i < npages; ++i)
- asm volatile("tlbiel %0" : : "r" (rbvalues[i]));
+ asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : :
+ "r" (rbvalues[i]), "r" (0));
asm volatile("ptesync" : : : "memory");
}
}
@@ -435,18 +456,23 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
__be64 *hpte;
unsigned long v, r, rb;
struct revmap_entry *rev;
- u64 pte;
+ u64 pte, orig_pte, pte_r;
if (pte_index >= kvm->arch.hpt_npte)
return H_PARAMETER;
hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
cpu_relax();
- pte = be64_to_cpu(hpte[0]);
+ pte = orig_pte = be64_to_cpu(hpte[0]);
+ pte_r = be64_to_cpu(hpte[1]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ pte = hpte_new_to_old_v(pte, pte_r);
+ pte_r = hpte_new_to_old_r(pte_r);
+ }
if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
((flags & H_ANDCOND) && (pte & avpn) != 0)) {
- __unlock_hpte(hpte, pte);
+ __unlock_hpte(hpte, orig_pte);
return H_NOT_FOUND;
}
@@ -454,7 +480,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
v = pte & ~HPTE_V_HVLOCK;
if (v & HPTE_V_VALID) {
hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
- rb = compute_tlbie_rb(v, be64_to_cpu(hpte[1]), pte_index);
+ rb = compute_tlbie_rb(v, pte_r, pte_index);
do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
/*
* The reference (R) and change (C) bits in a HPT
@@ -472,6 +498,9 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
note_hpte_modification(kvm, rev);
unlock_hpte(hpte, 0);
+ if (is_mmio_hpte(v, pte_r))
+ atomic64_inc(&kvm->arch.mmio_update);
+
if (v & HPTE_V_ABSENT)
v = (v & ~HPTE_V_ABSENT) | HPTE_V_VALID;
hpret[0] = v;
@@ -498,7 +527,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
int global;
long int ret = H_SUCCESS;
struct revmap_entry *rev, *revs[4];
- u64 hp0;
+ u64 hp0, hp1;
global = global_invalidates(kvm, 0);
for (i = 0; i < 4 && ret == H_SUCCESS; ) {
@@ -531,6 +560,11 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
}
found = 0;
hp0 = be64_to_cpu(hp[0]);
+ hp1 = be64_to_cpu(hp[1]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ hp0 = hpte_new_to_old_v(hp0, hp1);
+ hp1 = hpte_new_to_old_r(hp1);
+ }
if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
switch (flags & 3) {
case 0: /* absolute */
@@ -561,13 +595,14 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
args[j] |= rcbits << (56 - 5);
hp[0] = 0;
+ if (is_mmio_hpte(hp0, hp1))
+ atomic64_inc(&kvm->arch.mmio_update);
continue;
}
/* leave it locked */
hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
- tlbrb[n] = compute_tlbie_rb(be64_to_cpu(hp[0]),
- be64_to_cpu(hp[1]), pte_index);
+ tlbrb[n] = compute_tlbie_rb(hp0, hp1, pte_index);
indexes[n] = j;
hptes[n] = hp;
revs[n] = rev;
@@ -605,7 +640,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
__be64 *hpte;
struct revmap_entry *rev;
unsigned long v, r, rb, mask, bits;
- u64 pte;
+ u64 pte_v, pte_r;
if (pte_index >= kvm->arch.hpt_npte)
return H_PARAMETER;
@@ -613,14 +648,16 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
cpu_relax();
- pte = be64_to_cpu(hpte[0]);
- if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
- ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) {
- __unlock_hpte(hpte, pte);
+ v = pte_v = be64_to_cpu(hpte[0]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ v = hpte_new_to_old_v(v, be64_to_cpu(hpte[1]));
+ if ((v & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
+ ((flags & H_AVPN) && (v & ~0x7fUL) != avpn)) {
+ __unlock_hpte(hpte, pte_v);
return H_NOT_FOUND;
}
- v = pte;
+ pte_r = be64_to_cpu(hpte[1]);
bits = (flags << 55) & HPTE_R_PP0;
bits |= (flags << 48) & HPTE_R_KEY_HI;
bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
@@ -642,22 +679,26 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
* readonly to writable. If it should be writable, we'll
* take a trap and let the page fault code sort it out.
*/
- pte = be64_to_cpu(hpte[1]);
- r = (pte & ~mask) | bits;
- if (hpte_is_writable(r) && !hpte_is_writable(pte))
+ r = (pte_r & ~mask) | bits;
+ if (hpte_is_writable(r) && !hpte_is_writable(pte_r))
r = hpte_make_readonly(r);
/* If the PTE is changing, invalidate it first */
- if (r != pte) {
+ if (r != pte_r) {
rb = compute_tlbie_rb(v, r, pte_index);
- hpte[0] = cpu_to_be64((v & ~HPTE_V_VALID) |
+ hpte[0] = cpu_to_be64((pte_v & ~HPTE_V_VALID) |
HPTE_V_ABSENT);
do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags),
true);
+ /* Don't lose R/C bit updates done by hardware */
+ r |= be64_to_cpu(hpte[1]) & (HPTE_R_R | HPTE_R_C);
hpte[1] = cpu_to_be64(r);
}
}
- unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
+ unlock_hpte(hpte, pte_v & ~HPTE_V_HVLOCK);
asm volatile("ptesync" : : : "memory");
+ if (is_mmio_hpte(v, pte_r))
+ atomic64_inc(&kvm->arch.mmio_update);
+
return H_SUCCESS;
}
@@ -681,6 +722,10 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
r = be64_to_cpu(hpte[1]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ v = hpte_new_to_old_v(v, r);
+ r = hpte_new_to_old_r(r);
+ }
if (v & HPTE_V_ABSENT) {
v &= ~HPTE_V_ABSENT;
v |= HPTE_V_VALID;
@@ -798,10 +843,16 @@ void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index)
{
unsigned long rb;
+ u64 hp0, hp1;
hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
- rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
- pte_index);
+ hp0 = be64_to_cpu(hptep[0]);
+ hp1 = be64_to_cpu(hptep[1]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ hp0 = hpte_new_to_old_v(hp0, hp1);
+ hp1 = hpte_new_to_old_r(hp1);
+ }
+ rb = compute_tlbie_rb(hp0, hp1, pte_index);
do_tlbies(kvm, &rb, 1, 1, true);
}
EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
@@ -811,9 +862,15 @@ void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
{
unsigned long rb;
unsigned char rbyte;
+ u64 hp0, hp1;
- rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
- pte_index);
+ hp0 = be64_to_cpu(hptep[0]);
+ hp1 = be64_to_cpu(hptep[1]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ hp0 = hpte_new_to_old_v(hp0, hp1);
+ hp1 = hpte_new_to_old_r(hp1);
+ }
+ rb = compute_tlbie_rb(hp0, hp1, pte_index);
rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
/* modify only the second-last byte, which contains the ref bit */
*((char *)hptep + 14) = rbyte;
@@ -828,6 +885,37 @@ static int slb_base_page_shift[4] = {
20, /* 1M, unsupported */
};
+static struct mmio_hpte_cache_entry *mmio_cache_search(struct kvm_vcpu *vcpu,
+ unsigned long eaddr, unsigned long slb_v, long mmio_update)
+{
+ struct mmio_hpte_cache_entry *entry = NULL;
+ unsigned int pshift;
+ unsigned int i;
+
+ for (i = 0; i < MMIO_HPTE_CACHE_SIZE; i++) {
+ entry = &vcpu->arch.mmio_cache.entry[i];
+ if (entry->mmio_update == mmio_update) {
+ pshift = entry->slb_base_pshift;
+ if ((entry->eaddr >> pshift) == (eaddr >> pshift) &&
+ entry->slb_v == slb_v)
+ return entry;
+ }
+ }
+ return NULL;
+}
+
+static struct mmio_hpte_cache_entry *
+ next_mmio_cache_entry(struct kvm_vcpu *vcpu)
+{
+ unsigned int index = vcpu->arch.mmio_cache.index;
+
+ vcpu->arch.mmio_cache.index++;
+ if (vcpu->arch.mmio_cache.index == MMIO_HPTE_CACHE_SIZE)
+ vcpu->arch.mmio_cache.index = 0;
+
+ return &vcpu->arch.mmio_cache.entry[index];
+}
+
/* When called from virtmode, this func should be protected by
* preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
* can trigger deadlock issue.
@@ -842,7 +930,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
unsigned long avpn;
__be64 *hpte;
unsigned long mask, val;
- unsigned long v, r;
+ unsigned long v, r, orig_v;
/* Get page shift, work out hash and AVPN etc. */
mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
@@ -877,6 +965,8 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
for (i = 0; i < 16; i += 2) {
/* Read the PTE racily */
v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ v = hpte_new_to_old_v(v, be64_to_cpu(hpte[i+1]));
/* Check valid/absent, hash, segment size and AVPN */
if (!(v & valid) || (v & mask) != val)
@@ -885,8 +975,12 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
/* Lock the PTE and read it under the lock */
while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
cpu_relax();
- v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
+ v = orig_v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
r = be64_to_cpu(hpte[i+1]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ v = hpte_new_to_old_v(v, r);
+ r = hpte_new_to_old_r(r);
+ }
/*
* Check the HPTE again, including base page size
@@ -896,7 +990,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
/* Return with the HPTE still locked */
return (hash << 3) + (i >> 1);
- __unlock_hpte(&hpte[i], v);
+ __unlock_hpte(&hpte[i], orig_v);
}
if (val & HPTE_V_SECONDARY)
@@ -924,30 +1018,45 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
{
struct kvm *kvm = vcpu->kvm;
long int index;
- unsigned long v, r, gr;
+ unsigned long v, r, gr, orig_v;
__be64 *hpte;
unsigned long valid;
struct revmap_entry *rev;
unsigned long pp, key;
+ struct mmio_hpte_cache_entry *cache_entry = NULL;
+ long mmio_update = 0;
/* For protection fault, expect to find a valid HPTE */
valid = HPTE_V_VALID;
- if (status & DSISR_NOHPTE)
+ if (status & DSISR_NOHPTE) {
valid |= HPTE_V_ABSENT;
-
- index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
- if (index < 0) {
- if (status & DSISR_NOHPTE)
- return status; /* there really was no HPTE */
- return 0; /* for prot fault, HPTE disappeared */
+ mmio_update = atomic64_read(&kvm->arch.mmio_update);
+ cache_entry = mmio_cache_search(vcpu, addr, slb_v, mmio_update);
}
- hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
- v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
- r = be64_to_cpu(hpte[1]);
- rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
- gr = rev->guest_rpte;
+ if (cache_entry) {
+ index = cache_entry->pte_index;
+ v = cache_entry->hpte_v;
+ r = cache_entry->hpte_r;
+ gr = cache_entry->rpte;
+ } else {
+ index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
+ if (index < 0) {
+ if (status & DSISR_NOHPTE)
+ return status; /* there really was no HPTE */
+ return 0; /* for prot fault, HPTE disappeared */
+ }
+ hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
+ v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
+ r = be64_to_cpu(hpte[1]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ v = hpte_new_to_old_v(v, r);
+ r = hpte_new_to_old_r(r);
+ }
+ rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
+ gr = rev->guest_rpte;
- unlock_hpte(hpte, v);
+ unlock_hpte(hpte, orig_v);
+ }
/* For not found, if the HPTE is valid by now, retry the instruction */
if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
@@ -985,12 +1094,32 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
vcpu->arch.pgfault_index = index;
vcpu->arch.pgfault_hpte[0] = v;
vcpu->arch.pgfault_hpte[1] = r;
+ vcpu->arch.pgfault_cache = cache_entry;
/* Check the storage key to see if it is possibly emulated MMIO */
- if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
- (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
- (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
- return -2; /* MMIO emulation - load instr word */
+ if ((r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
+ (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) {
+ if (!cache_entry) {
+ unsigned int pshift = 12;
+ unsigned int pshift_index;
+
+ if (slb_v & SLB_VSID_L) {
+ pshift_index = ((slb_v & SLB_VSID_LP) >> 4);
+ pshift = slb_base_page_shift[pshift_index];
+ }
+ cache_entry = next_mmio_cache_entry(vcpu);
+ cache_entry->eaddr = addr;
+ cache_entry->slb_base_pshift = pshift;
+ cache_entry->pte_index = index;
+ cache_entry->hpte_v = v;
+ cache_entry->hpte_r = r;
+ cache_entry->rpte = gr;
+ cache_entry->slb_v = slb_v;
+ cache_entry->mmio_update = mmio_update;
+ }
+ if (data && (vcpu->arch.shregs.msr & MSR_IR))
+ return -2; /* MMIO emulation - load instr word */
+ }
return -1; /* send fault up to host kernel mode */
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index a0ea63ac2b52..06edc4366639 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -70,7 +70,11 @@ static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
hcpu = hcore << threads_shift;
kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
- icp_native_cause_ipi_rm(hcpu);
+ if (paca[hcpu].kvm_hstate.xics_phys)
+ icp_native_cause_ipi_rm(hcpu);
+ else
+ opal_rm_int_set_mfrr(get_hard_smp_processor_id(hcpu),
+ IPI_PRIORITY);
}
#else
static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { }
@@ -737,7 +741,7 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
unsigned long eoi_rc;
-static void icp_eoi(struct irq_chip *c, u32 hwirq, u32 xirr)
+static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
{
unsigned long xics_phys;
int64_t rc;
@@ -751,7 +755,12 @@ static void icp_eoi(struct irq_chip *c, u32 hwirq, u32 xirr)
/* EOI it */
xics_phys = local_paca->kvm_hstate.xics_phys;
- _stwcix(xics_phys + XICS_XIRR, xirr);
+ if (xics_phys) {
+ _stwcix(xics_phys + XICS_XIRR, xirr);
+ } else {
+ rc = opal_rm_int_eoi(be32_to_cpu(xirr));
+ *again = rc > 0;
+ }
}
static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu)
@@ -809,9 +818,10 @@ static void kvmppc_rm_handle_irq_desc(struct irq_desc *desc)
}
long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
- u32 xirr,
+ __be32 xirr,
struct kvmppc_irq_map *irq_map,
- struct kvmppc_passthru_irqmap *pimap)
+ struct kvmppc_passthru_irqmap *pimap,
+ bool *again)
{
struct kvmppc_xics *xics;
struct kvmppc_icp *icp;
@@ -825,7 +835,8 @@ long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
icp_rm_deliver_irq(xics, icp, irq);
/* EOI the interrupt */
- icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr);
+ icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr,
+ again);
if (check_too_hard(xics, icp) == H_TOO_HARD)
return 2;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index c3c1d1bcfc67..9338a818e05c 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -501,17 +501,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
cmpwi r0, 0
beq 57f
li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
- mfspr r4, SPRN_LPCR
- rlwimi r4, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
- mtspr SPRN_LPCR, r4
- isync
- std r0, HSTATE_SCRATCH0(r13)
- ptesync
- ld r0, HSTATE_SCRATCH0(r13)
-1: cmpd r0, r0
- bne 1b
- nap
- b .
+ mfspr r5, SPRN_LPCR
+ rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
+ b kvm_nap_sequence
57: li r0, 0
stbx r0, r3, r4
@@ -523,6 +515,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* *
*****************************************************************************/
+/* Stack frame offsets */
+#define STACK_SLOT_TID (112-16)
+#define STACK_SLOT_PSSCR (112-24)
+
.global kvmppc_hv_entry
kvmppc_hv_entry:
@@ -581,12 +577,14 @@ kvmppc_hv_entry:
ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
cmpwi r6,0
bne 10f
- ld r6,KVM_SDR1(r9)
lwz r7,KVM_LPID(r9)
+BEGIN_FTR_SECTION
+ ld r6,KVM_SDR1(r9)
li r0,LPID_RSVD /* switch to reserved LPID */
mtspr SPRN_LPID,r0
ptesync
mtspr SPRN_SDR1,r6 /* switch to partition page table */
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_LPID,r7
isync
@@ -607,12 +605,8 @@ kvmppc_hv_entry:
stdcx. r7,0,r6
bne 23b
/* Flush the TLB of any entries for this LPID */
- /* use arch 2.07S as a proxy for POWER8 */
-BEGIN_FTR_SECTION
- li r6,512 /* POWER8 has 512 sets */
-FTR_SECTION_ELSE
- li r6,128 /* POWER7 has 128 sets */
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
+ lwz r6,KVM_TLB_SETS(r9)
+ li r0,0 /* RS for P9 version of tlbiel */
mtctr r6
li r7,0x800 /* IS field = 0b10 */
ptesync
@@ -698,6 +692,14 @@ kvmppc_got_guest:
mtspr SPRN_PURR,r7
mtspr SPRN_SPURR,r8
+ /* Save host values of some registers */
+BEGIN_FTR_SECTION
+ mfspr r5, SPRN_TIDR
+ mfspr r6, SPRN_PSSCR
+ std r5, STACK_SLOT_TID(r1)
+ std r6, STACK_SLOT_PSSCR(r1)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
BEGIN_FTR_SECTION
/* Set partition DABR */
/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
@@ -750,14 +752,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
BEGIN_FTR_SECTION
ld r5, VCPU_MMCR + 24(r4)
ld r6, VCPU_SIER(r4)
+ mtspr SPRN_MMCR2, r5
+ mtspr SPRN_SIER, r6
+BEGIN_FTR_SECTION_NESTED(96)
lwz r7, VCPU_PMC + 24(r4)
lwz r8, VCPU_PMC + 28(r4)
ld r9, VCPU_MMCR + 32(r4)
- mtspr SPRN_MMCR2, r5
- mtspr SPRN_SIER, r6
mtspr SPRN_SPMC1, r7
mtspr SPRN_SPMC2, r8
mtspr SPRN_MMCRS, r9
+END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mtspr SPRN_MMCR0, r3
isync
@@ -813,20 +817,30 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtspr SPRN_EBBHR, r8
ld r5, VCPU_EBBRR(r4)
ld r6, VCPU_BESCR(r4)
- ld r7, VCPU_CSIGR(r4)
- ld r8, VCPU_TACR(r4)
+ lwz r7, VCPU_GUEST_PID(r4)
+ ld r8, VCPU_WORT(r4)
mtspr SPRN_EBBRR, r5
mtspr SPRN_BESCR, r6
- mtspr SPRN_CSIGR, r7
- mtspr SPRN_TACR, r8
+ mtspr SPRN_PID, r7
+ mtspr SPRN_WORT, r8
+BEGIN_FTR_SECTION
+ /* POWER8-only registers */
ld r5, VCPU_TCSCR(r4)
ld r6, VCPU_ACOP(r4)
- lwz r7, VCPU_GUEST_PID(r4)
- ld r8, VCPU_WORT(r4)
+ ld r7, VCPU_CSIGR(r4)
+ ld r8, VCPU_TACR(r4)
mtspr SPRN_TCSCR, r5
mtspr SPRN_ACOP, r6
- mtspr SPRN_PID, r7
- mtspr SPRN_WORT, r8
+ mtspr SPRN_CSIGR, r7
+ mtspr SPRN_TACR, r8
+FTR_SECTION_ELSE
+ /* POWER9-only registers */
+ ld r5, VCPU_TID(r4)
+ ld r6, VCPU_PSSCR(r4)
+ oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
+ mtspr SPRN_TIDR, r5
+ mtspr SPRN_PSSCR, r6
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
8:
/*
@@ -1341,20 +1355,29 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
std r8, VCPU_EBBHR(r9)
mfspr r5, SPRN_EBBRR
mfspr r6, SPRN_BESCR
- mfspr r7, SPRN_CSIGR
- mfspr r8, SPRN_TACR
+ mfspr r7, SPRN_PID
+ mfspr r8, SPRN_WORT
std r5, VCPU_EBBRR(r9)
std r6, VCPU_BESCR(r9)
- std r7, VCPU_CSIGR(r9)
- std r8, VCPU_TACR(r9)
+ stw r7, VCPU_GUEST_PID(r9)
+ std r8, VCPU_WORT(r9)
+BEGIN_FTR_SECTION
mfspr r5, SPRN_TCSCR
mfspr r6, SPRN_ACOP
- mfspr r7, SPRN_PID
- mfspr r8, SPRN_WORT
+ mfspr r7, SPRN_CSIGR
+ mfspr r8, SPRN_TACR
std r5, VCPU_TCSCR(r9)
std r6, VCPU_ACOP(r9)
- stw r7, VCPU_GUEST_PID(r9)
- std r8, VCPU_WORT(r9)
+ std r7, VCPU_CSIGR(r9)
+ std r8, VCPU_TACR(r9)
+FTR_SECTION_ELSE
+ mfspr r5, SPRN_TIDR
+ mfspr r6, SPRN_PSSCR
+ std r5, VCPU_TID(r9)
+ rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
+ rotldi r6, r6, 60
+ std r6, VCPU_PSSCR(r9)
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
/*
* Restore various registers to 0, where non-zero values
* set by the guest could disrupt the host.
@@ -1363,12 +1386,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtspr SPRN_IAMR, r0
mtspr SPRN_CIABR, r0
mtspr SPRN_DAWRX, r0
- mtspr SPRN_TCSCR, r0
mtspr SPRN_WORT, r0
+BEGIN_FTR_SECTION
+ mtspr SPRN_TCSCR, r0
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
li r0, 1
sldi r0, r0, 31
mtspr SPRN_MMCRS, r0
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
8:
/* Save and reset AMR and UAMOR before turning on the MMU */
@@ -1502,15 +1527,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
stw r8, VCPU_PMC + 20(r9)
BEGIN_FTR_SECTION
mfspr r5, SPRN_SIER
+ std r5, VCPU_SIER(r9)
+BEGIN_FTR_SECTION_NESTED(96)
mfspr r6, SPRN_SPMC1
mfspr r7, SPRN_SPMC2
mfspr r8, SPRN_MMCRS
- std r5, VCPU_SIER(r9)
stw r6, VCPU_PMC + 24(r9)
stw r7, VCPU_PMC + 28(r9)
std r8, VCPU_MMCR + 32(r9)
lis r4, 0x8000
mtspr SPRN_MMCRS, r4
+END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
22:
/* Clear out SLB */
@@ -1519,6 +1546,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
slbia
ptesync
+ /* Restore host values of some registers */
+BEGIN_FTR_SECTION
+ ld r5, STACK_SLOT_TID(r1)
+ ld r6, STACK_SLOT_PSSCR(r1)
+ mtspr SPRN_TIDR, r5
+ mtspr SPRN_PSSCR, r6
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
/*
* POWER7/POWER8 guest -> host partition switch code.
* We don't have to lock against tlbies but we do
@@ -1552,12 +1587,14 @@ kvmhv_switch_to_host:
beq 19f
/* Primary thread switches back to host partition */
- ld r6,KVM_HOST_SDR1(r4)
lwz r7,KVM_HOST_LPID(r4)
+BEGIN_FTR_SECTION
+ ld r6,KVM_HOST_SDR1(r4)
li r8,LPID_RSVD /* switch to reserved LPID */
mtspr SPRN_LPID,r8
ptesync
- mtspr SPRN_SDR1,r6 /* switch to partition page table */
+ mtspr SPRN_SDR1,r6 /* switch to host page table */
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_LPID,r7
isync
@@ -2211,6 +2248,21 @@ BEGIN_FTR_SECTION
ori r5, r5, LPCR_PECEDH
rlwimi r5, r3, 0, LPCR_PECEDP
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
+kvm_nap_sequence: /* desired LPCR value in r5 */
+BEGIN_FTR_SECTION
+ /*
+ * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
+ * enable state loss = 1 (allow SMT mode switch)
+ * requested level = 0 (just stop dispatching)
+ */
+ lis r3, (PSSCR_EC | PSSCR_ESL)@h
+ mtspr SPRN_PSSCR, r3
+ /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
+ li r4, LPCR_PECE_HVEE@higher
+ sldi r4, r4, 32
+ or r5, r5, r4
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mtspr SPRN_LPCR,r5
isync
li r0, 0
@@ -2219,7 +2271,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
ld r0, HSTATE_SCRATCH0(r13)
1: cmpd r0, r0
bne 1b
+BEGIN_FTR_SECTION
nap
+FTR_SECTION_ELSE
+ PPC_STOP
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
b .
33: mr r4, r3
@@ -2600,11 +2656,13 @@ kvmppc_save_tm:
mfctr r7
mfspr r8, SPRN_AMR
mfspr r10, SPRN_TAR
+ mfxer r11
std r5, VCPU_LR_TM(r9)
stw r6, VCPU_CR_TM(r9)
std r7, VCPU_CTR_TM(r9)
std r8, VCPU_AMR_TM(r9)
std r10, VCPU_TAR_TM(r9)
+ std r11, VCPU_XER_TM(r9)
/* Restore r12 as trap number. */
lwz r12, VCPU_TRAP(r9)
@@ -2697,11 +2755,13 @@ kvmppc_restore_tm:
ld r7, VCPU_CTR_TM(r4)
ld r8, VCPU_AMR_TM(r4)
ld r9, VCPU_TAR_TM(r4)
+ ld r10, VCPU_XER_TM(r4)
mtlr r5
mtcr r6
mtctr r7
mtspr SPRN_AMR, r8
mtspr SPRN_TAR, r9
+ mtxer r10
/*
* Load up PPR and DSCR values but don't put them in the actual SPRs
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 70963c845e96..efd1183a6b16 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -536,7 +536,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE:
case KVM_CAP_SPAPR_TCE_64:
- case KVM_CAP_PPC_ALLOC_HTAB:
case KVM_CAP_PPC_RTAS:
case KVM_CAP_PPC_FIXUP_HCALL:
case KVM_CAP_PPC_ENABLE_HCALL:
@@ -545,13 +544,20 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#endif
r = 1;
break;
+
+ case KVM_CAP_PPC_ALLOC_HTAB:
+ r = hv_enabled;
+ break;
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
case KVM_CAP_PPC_SMT:
- if (hv_enabled)
- r = threads_per_subcore;
- else
- r = 0;
+ r = 0;
+ if (hv_enabled) {
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ r = 1;
+ else
+ r = threads_per_subcore;
+ }
break;
case KVM_CAP_PPC_RMA:
r = 0;
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
index fb21990c0fb4..ebc6dd449556 100644
--- a/arch/powerpc/kvm/trace_hv.h
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -449,7 +449,7 @@ TRACE_EVENT(kvmppc_vcore_wakeup,
__entry->tgid = current->tgid;
),
- TP_printk("%s time %lld ns, tgid=%d",
+ TP_printk("%s time %llu ns, tgid=%d",
__entry->waited ? "wait" : "poll",
__entry->ns, __entry->tgid)
);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 83ddc0e171b0..ad9fd5245be2 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -221,13 +221,18 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
return -1;
hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize, apsize, ssize) | rflags;
+ hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
if (!(vflags & HPTE_V_BOLTED)) {
DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
i, hpte_v, hpte_r);
}
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);
+ hpte_v = hpte_old_to_new_v(hpte_v);
+ }
+
hptep->r = cpu_to_be64(hpte_r);
/* Guarantee the second dword is visible before the valid bit */
eieio();
@@ -295,6 +300,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
vpn, want_v & HPTE_V_AVPN, slot, newpp);
hpte_v = be64_to_cpu(hptep->v);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
* a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
@@ -309,6 +316,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
native_lock_hpte(hptep);
/* recheck with locks held */
hpte_v = be64_to_cpu(hptep->v);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
!(hpte_v & HPTE_V_VALID))) {
ret = -1;
@@ -350,6 +359,8 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
for (i = 0; i < HPTES_PER_GROUP; i++) {
hptep = htab_address + slot;
hpte_v = be64_to_cpu(hptep->v);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
/* HPTE matches */
@@ -409,6 +420,8 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
want_v = hpte_encode_avpn(vpn, bpsize, ssize);
native_lock_hpte(hptep);
hpte_v = be64_to_cpu(hptep->v);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
@@ -467,6 +480,8 @@ static void native_hugepage_invalidate(unsigned long vsid,
want_v = hpte_encode_avpn(vpn, psize, ssize);
native_lock_hpte(hptep);
hpte_v = be64_to_cpu(hptep->v);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
/* Even if we miss, we need to invalidate the TLB */
if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
@@ -504,6 +519,10 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
/* Look at the 8 bit LP value */
unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
+ hpte_r = hpte_new_to_old_r(hpte_r);
+ }
if (!(hpte_v & HPTE_V_LARGE)) {
size = MMU_PAGE_4K;
a_size = MMU_PAGE_4K;
@@ -512,11 +531,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
a_size = hpte_page_sizes[lp] >> 4;
}
/* This works for all page sizes, and for 256M and 1T segments */
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- *ssize = hpte_r >> HPTE_R_3_0_SSIZE_SHIFT;
- else
- *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
-
+ *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
shift = mmu_psize_defs[size].shift;
avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
@@ -639,6 +654,9 @@ static void native_flush_hash_range(unsigned long number, int local)
want_v = hpte_encode_avpn(vpn, psize, ssize);
native_lock_hpte(hptep);
hpte_v = be64_to_cpu(hptep->v);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ hpte_v = hpte_new_to_old_v(hpte_v,
+ be64_to_cpu(hptep->r));
if (!HPTE_V_COMPARE(hpte_v, want_v) ||
!(hpte_v & HPTE_V_VALID))
native_unlock_hpte(hptep);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 78dabf065ba9..8410b4bb36ed 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -796,37 +796,17 @@ static void update_hid_for_hash(void)
static void __init hash_init_partition_table(phys_addr_t hash_table,
unsigned long htab_size)
{
- unsigned long ps_field;
- unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
+ mmu_partition_table_init();
/*
- * slb llp encoding for the page size used in VPM real mode.
- * We can ignore that for lpid 0
+ * PS field (VRMA page size) is not used for LPID 0, hence set to 0.
+ * For now, UPRT is 0 and we have no segment table.
*/
- ps_field = 0;
htab_size = __ilog2(htab_size) - 18;
-
- BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
- partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
- MEMBLOCK_ALLOC_ANYWHERE));
-
- /* Initialize the Partition Table with no entries */
- memset((void *)partition_tb, 0, patb_size);
- partition_tb->patb0 = cpu_to_be64(ps_field | hash_table | htab_size);
- /*
- * FIXME!! This should be done via update_partition table
- * For now UPRT is 0 for us.
- */
- partition_tb->patb1 = 0;
+ mmu_partition_table_set_entry(0, hash_table | htab_size, 0);
pr_info("Partition table %p\n", partition_tb);
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
update_hid_for_hash();
- /*
- * update partition table control register,
- * 64 K size.
- */
- mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
-
}
static void __init htab_initialize(void)
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 688b54517655..8d941c692eb3 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -177,23 +177,15 @@ redo:
static void __init radix_init_partition_table(void)
{
- unsigned long rts_field;
+ unsigned long rts_field, dw0;
+ mmu_partition_table_init();
rts_field = radix__get_tree_size();
+ dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
+ mmu_partition_table_set_entry(0, dw0, 0);
- BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
- partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT);
- partition_tb->patb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) |
- RADIX_PGD_INDEX_SIZE | PATB_HR);
pr_info("Initializing Radix MMU\n");
pr_info("Partition table %p\n", partition_tb);
-
- memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
- /*
- * update partition table control register,
- * 64 K size.
- */
- mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
}
void __init radix_init_native(void)
@@ -378,6 +370,8 @@ void __init radix__early_init_mmu(void)
radix_init_partition_table();
}
+ memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
+
radix_init_pgtable();
}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index f5e8d4edb808..8bca7f58afc4 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -431,3 +431,37 @@ void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
}
}
#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+void __init mmu_partition_table_init(void)
+{
+ unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
+
+ BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
+ partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
+ MEMBLOCK_ALLOC_ANYWHERE));
+
+ /* Initialize the Partition Table with no entries */
+ memset((void *)partition_tb, 0, patb_size);
+
+ /*
+ * update partition table control register,
+ * 64 K size.
+ */
+ mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
+}
+
+void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
+ unsigned long dw1)
+{
+ partition_tb[lpid].patb0 = cpu_to_be64(dw0);
+ partition_tb[lpid].patb1 = cpu_to_be64(dw1);
+
+ /* Global flush of TLBs and partition table caches for this lpid */
+ asm volatile("ptesync" : : : "memory");
+ asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
+ "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+}
+EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
+#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 44d2d842cee7..3aa40f1b20f5 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -304,8 +304,11 @@ OPAL_CALL(opal_pci_get_presence_state, OPAL_PCI_GET_PRESENCE_STATE);
OPAL_CALL(opal_pci_get_power_state, OPAL_PCI_GET_POWER_STATE);
OPAL_CALL(opal_pci_set_power_state, OPAL_PCI_SET_POWER_STATE);
OPAL_CALL(opal_int_get_xirr, OPAL_INT_GET_XIRR);
+OPAL_CALL_REAL(opal_rm_int_get_xirr, OPAL_INT_GET_XIRR);
OPAL_CALL(opal_int_set_cppr, OPAL_INT_SET_CPPR);
OPAL_CALL(opal_int_eoi, OPAL_INT_EOI);
+OPAL_CALL_REAL(opal_rm_int_eoi, OPAL_INT_EOI);
OPAL_CALL(opal_int_set_mfrr, OPAL_INT_SET_MFRR);
+OPAL_CALL_REAL(opal_rm_int_set_mfrr, OPAL_INT_SET_MFRR);
OPAL_CALL(opal_pci_tce_kill, OPAL_PCI_TCE_KILL);
OPAL_CALL_REAL(opal_rm_pci_tce_kill, OPAL_PCI_TCE_KILL);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 6c9a65b52e63..b3b8930ac52f 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -896,3 +896,5 @@ EXPORT_SYMBOL_GPL(opal_leds_get_ind);
EXPORT_SYMBOL_GPL(opal_leds_set_ind);
/* Export this symbol for PowerNV Operator Panel class driver */
EXPORT_SYMBOL_GPL(opal_write_oppanel_async);
+/* Export this for KVM */
+EXPORT_SYMBOL_GPL(opal_int_set_mfrr);
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index cb3c50328de8..cc2b281a3766 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -63,7 +63,7 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
vflags &= ~HPTE_V_SECONDARY;
hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize, apsize, ssize) | rflags;
+ hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize, apsize) | rflags;
spin_lock_irqsave(&ps3_htab_lock, flags);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index aa35245d8d6d..f2c98f6c1c9c 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -145,7 +145,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
hpte_group, vpn, pa, rflags, vflags, psize);
hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize, apsize, ssize) | rflags;
+ hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
if (!(vflags & HPTE_V_BOLTED))
pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 028f97be5bae..c6722112527d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -136,6 +136,7 @@ config S390
select HAVE_CMPXCHG_LOCAL
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
+ select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -169,6 +170,7 @@ config S390
select OLD_SIGSUSPEND3
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
+ select THREAD_INFO_IN_TASK
select TTY
select VIRT_CPU_ACCOUNTING
select ARCH_HAS_SCALED_CPUTIME
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 0daa070d6c9d..6bd2c9022be3 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -10,7 +10,7 @@ targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
targets += misc.o piggy.o sizes.h head.o
-KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
+KBUILD_CFLAGS := -m64 -D__KERNEL__ -O2
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S
index 28c4f96a2d9c..11f6254c561e 100644
--- a/arch/s390/boot/compressed/head.S
+++ b/arch/s390/boot/compressed/head.S
@@ -46,7 +46,7 @@ mover_end:
.align 8
.Lstack:
- .quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
+ .quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
.Loffset:
.quad 0x11000
.Lmvsize:
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 45968686f918..e659daffe368 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -66,6 +66,8 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA=y
+CONFIG_CMA_DEBUG=y
+CONFIG_CMA_DEBUGFS=y
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZPOOL=m
CONFIG_ZBUD=m
@@ -366,6 +368,8 @@ CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
CONFIG_DEVTMPFS=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -438,7 +442,6 @@ CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
-CONFIG_VHOST_NET=m
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_INTEL is not set
@@ -693,3 +696,4 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
+CONFIG_VHOST_NET=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 1dd05e345c4d..95ceac50bc65 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -362,6 +362,8 @@ CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
CONFIG_DEVTMPFS=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -434,7 +436,6 @@ CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
-CONFIG_VHOST_NET=m
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_INTEL is not set
@@ -633,3 +634,4 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
+CONFIG_VHOST_NET=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 29d1178666f0..bc7b176f5795 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -362,6 +362,8 @@ CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
CONFIG_DEVTMPFS=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -434,7 +436,6 @@ CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
-CONFIG_VHOST_NET=m
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_INTEL is not set
@@ -632,3 +633,4 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
+CONFIG_VHOST_NET=m
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 9cc050f9536c..1113389d0a39 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -507,8 +507,10 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
prng_data->prngws.byte_counter += n;
prng_data->prngws.reseed_counter += n;
- if (copy_to_user(ubuf, prng_data->buf, chunk))
- return -EFAULT;
+ if (copy_to_user(ubuf, prng_data->buf, chunk)) {
+ ret = -EFAULT;
+ break;
+ }
nbytes -= chunk;
ret += chunk;
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 09bccb224d03..cf8a2d92467f 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -3,6 +3,7 @@
*
* Copyright IBM Corp. 2006, 2008
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ * License: GPL
*/
#define KMSG_COMPONENT "hypfs"
@@ -18,7 +19,8 @@
#include <linux/time.h>
#include <linux/parser.h>
#include <linux/sysfs.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kobject.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
#include <linux/uio.h>
@@ -443,7 +445,6 @@ static struct file_system_type hypfs_type = {
.mount = hypfs_mount,
.kill_sb = hypfs_kill_super
};
-MODULE_ALIAS_FS("s390_hypfs");
static const struct super_operations hypfs_s_ops = {
.statfs = simple_statfs,
@@ -497,21 +498,4 @@ fail_dbfs_exit:
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
return rc;
}
-
-static void __exit hypfs_exit(void)
-{
- unregister_filesystem(&hypfs_type);
- sysfs_remove_mount_point(hypervisor_kobj, "s390");
- hypfs_diag0c_exit();
- hypfs_sprp_exit();
- hypfs_vm_exit();
- hypfs_diag_exit();
- hypfs_dbfs_exit();
-}
-
-module_init(hypfs_init)
-module_exit(hypfs_exit)
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Michael Holzheu <holzheu@de.ibm.com>");
-MODULE_DESCRIPTION("s390 Hypervisor Filesystem");
+device_initcall(hypfs_init)
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 20f196b82a6e..8aea32fe8bd2 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,6 +1,6 @@
-
-
+generic-y += asm-offsets.h
generic-y += clkdev.h
+generic-y += dma-contiguous.h
generic-y += export.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
diff --git a/arch/s390/include/asm/asm-offsets.h b/arch/s390/include/asm/asm-offsets.h
deleted file mode 100644
index d370ee36a182..000000000000
--- a/arch/s390/include/asm/asm-offsets.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <generated/asm-offsets.h>
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index d28cc2f5b7b2..f7f69dfd2db2 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -1,13 +1,8 @@
/*
- * Copyright IBM Corp. 1999, 2009
+ * Copyright IBM Corp. 1999, 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Denis Joseph Barrow,
- * Arnd Bergmann <arndb@de.ibm.com>,
- *
- * Atomic operations that C can't guarantee us.
- * Useful for resource counting etc.
- * s390 uses 'Compare And Swap' for atomicity in SMP environment.
- *
+ * Arnd Bergmann,
*/
#ifndef __ARCH_S390_ATOMIC__
@@ -15,62 +10,12 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#include <asm/atomic_ops.h>
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
#define ATOMIC_INIT(i) { (i) }
-#define __ATOMIC_NO_BARRIER "\n"
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __ATOMIC_OR "lao"
-#define __ATOMIC_AND "lan"
-#define __ATOMIC_ADD "laa"
-#define __ATOMIC_XOR "lax"
-#define __ATOMIC_BARRIER "bcr 14,0\n"
-
-#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
-({ \
- int old_val; \
- \
- typecheck(atomic_t *, ptr); \
- asm volatile( \
- op_string " %0,%2,%1\n" \
- __barrier \
- : "=d" (old_val), "+Q" ((ptr)->counter) \
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define __ATOMIC_OR "or"
-#define __ATOMIC_AND "nr"
-#define __ATOMIC_ADD "ar"
-#define __ATOMIC_XOR "xr"
-#define __ATOMIC_BARRIER "\n"
-
-#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
-({ \
- int old_val, new_val; \
- \
- typecheck(atomic_t *, ptr); \
- asm volatile( \
- " l %0,%2\n" \
- "0: lr %1,%0\n" \
- op_string " %1,%3\n" \
- " cs %0,%1,%2\n" \
- " jl 0b" \
- : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
static inline int atomic_read(const atomic_t *v)
{
int c;
@@ -90,27 +35,23 @@ static inline void atomic_set(atomic_t *v, int i)
static inline int atomic_add_return(int i, atomic_t *v)
{
- return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
+ return __atomic_add_barrier(i, &v->counter) + i;
}
static inline int atomic_fetch_add(int i, atomic_t *v)
{
- return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER);
+ return __atomic_add_barrier(i, &v->counter);
}
static inline void atomic_add(int i, atomic_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
- asm volatile(
- "asi %0,%1\n"
- : "+Q" (v->counter)
- : "i" (i)
- : "cc", "memory");
+ __atomic_add_const(i, &v->counter);
return;
}
#endif
- __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
+ __atomic_add(i, &v->counter);
}
#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
@@ -125,19 +66,19 @@ static inline void atomic_add(int i, atomic_t *v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
-#define ATOMIC_OPS(op, OP) \
+#define ATOMIC_OPS(op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
- __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
+ __atomic_##op(i, &v->counter); \
} \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
- return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER); \
+ return __atomic_##op##_barrier(i, &v->counter); \
}
-ATOMIC_OPS(and, AND)
-ATOMIC_OPS(or, OR)
-ATOMIC_OPS(xor, XOR)
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
#undef ATOMIC_OPS
@@ -145,12 +86,7 @@ ATOMIC_OPS(xor, XOR)
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
- asm volatile(
- " cs %0,%2,%1"
- : "+d" (old), "+Q" (v->counter)
- : "d" (new)
- : "cc", "memory");
- return old;
+ return __atomic_cmpxchg(&v->counter, old, new);
}
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
@@ -168,65 +104,11 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
-
-#undef __ATOMIC_LOOP
-
#define ATOMIC64_INIT(i) { (i) }
-#define __ATOMIC64_NO_BARRIER "\n"
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __ATOMIC64_OR "laog"
-#define __ATOMIC64_AND "lang"
-#define __ATOMIC64_ADD "laag"
-#define __ATOMIC64_XOR "laxg"
-#define __ATOMIC64_BARRIER "bcr 14,0\n"
-
-#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
-({ \
- long long old_val; \
- \
- typecheck(atomic64_t *, ptr); \
- asm volatile( \
- op_string " %0,%2,%1\n" \
- __barrier \
- : "=d" (old_val), "+Q" ((ptr)->counter) \
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define __ATOMIC64_OR "ogr"
-#define __ATOMIC64_AND "ngr"
-#define __ATOMIC64_ADD "agr"
-#define __ATOMIC64_XOR "xgr"
-#define __ATOMIC64_BARRIER "\n"
-
-#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
-({ \
- long long old_val, new_val; \
- \
- typecheck(atomic64_t *, ptr); \
- asm volatile( \
- " lg %0,%2\n" \
- "0: lgr %1,%0\n" \
- op_string " %1,%3\n" \
- " csg %0,%1,%2\n" \
- " jl 0b" \
- : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-static inline long long atomic64_read(const atomic64_t *v)
+static inline long atomic64_read(const atomic64_t *v)
{
- long long c;
+ long c;
asm volatile(
" lg %0,%1\n"
@@ -234,71 +116,60 @@ static inline long long atomic64_read(const atomic64_t *v)
return c;
}
-static inline void atomic64_set(atomic64_t *v, long long i)
+static inline void atomic64_set(atomic64_t *v, long i)
{
asm volatile(
" stg %1,%0\n"
: "=Q" (v->counter) : "d" (i));
}
-static inline long long atomic64_add_return(long long i, atomic64_t *v)
+static inline long atomic64_add_return(long i, atomic64_t *v)
{
- return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
+ return __atomic64_add_barrier(i, &v->counter) + i;
}
-static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
+static inline long atomic64_fetch_add(long i, atomic64_t *v)
{
- return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER);
+ return __atomic64_add_barrier(i, &v->counter);
}
-static inline void atomic64_add(long long i, atomic64_t *v)
+static inline void atomic64_add(long i, atomic64_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
- asm volatile(
- "agsi %0,%1\n"
- : "+Q" (v->counter)
- : "i" (i)
- : "cc", "memory");
+ __atomic64_add_const(i, &v->counter);
return;
}
#endif
- __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
+ __atomic64_add(i, &v->counter);
}
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-static inline long long atomic64_cmpxchg(atomic64_t *v,
- long long old, long long new)
+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
{
- asm volatile(
- " csg %0,%2,%1"
- : "+d" (old), "+Q" (v->counter)
- : "d" (new)
- : "cc", "memory");
- return old;
+ return __atomic64_cmpxchg(&v->counter, old, new);
}
-#define ATOMIC64_OPS(op, OP) \
+#define ATOMIC64_OPS(op) \
static inline void atomic64_##op(long i, atomic64_t *v) \
{ \
- __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
+ __atomic64_##op(i, &v->counter); \
} \
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
{ \
- return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \
+ return __atomic64_##op##_barrier(i, &v->counter); \
}
-ATOMIC64_OPS(and, AND)
-ATOMIC64_OPS(or, OR)
-ATOMIC64_OPS(xor, XOR)
+ATOMIC64_OPS(and)
+ATOMIC64_OPS(or)
+ATOMIC64_OPS(xor)
#undef ATOMIC64_OPS
-#undef __ATOMIC64_LOOP
-static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+static inline int atomic64_add_unless(atomic64_t *v, long i, long u)
{
- long long c, old;
+ long c, old;
c = atomic64_read(v);
for (;;) {
@@ -312,9 +183,9 @@ static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
return c != u;
}
-static inline long long atomic64_dec_if_positive(atomic64_t *v)
+static inline long atomic64_dec_if_positive(atomic64_t *v)
{
- long long c, old, dec;
+ long c, old, dec;
c = atomic64_read(v);
for (;;) {
@@ -333,9 +204,9 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_inc(_v) atomic64_add(1, _v)
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
-#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
-#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long long)(_i), _v)
-#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
+#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v)
+#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v)
+#define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v)
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
#define atomic64_dec(_v) atomic64_sub(1, _v)
#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
new file mode 100644
index 000000000000..ac9e2b939d04
--- /dev/null
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -0,0 +1,130 @@
+/*
+ * Low level function for atomic operations
+ *
+ * Copyright IBM Corp. 1999, 2016
+ */
+
+#ifndef __ARCH_S390_ATOMIC_OPS__
+#define __ARCH_S390_ATOMIC_OPS__
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
+static inline op_type op_name(op_type val, op_type *ptr) \
+{ \
+ op_type old; \
+ \
+ asm volatile( \
+ op_string " %[old],%[val],%[ptr]\n" \
+ op_barrier \
+ : [old] "=d" (old), [ptr] "+Q" (*ptr) \
+ : [val] "d" (val) : "cc", "memory"); \
+ return old; \
+} \
+
+#define __ATOMIC_OPS(op_name, op_type, op_string) \
+ __ATOMIC_OP(op_name, op_type, op_string, "\n") \
+ __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
+
+__ATOMIC_OPS(__atomic_add, int, "laa")
+__ATOMIC_OPS(__atomic_and, int, "lan")
+__ATOMIC_OPS(__atomic_or, int, "lao")
+__ATOMIC_OPS(__atomic_xor, int, "lax")
+
+__ATOMIC_OPS(__atomic64_add, long, "laag")
+__ATOMIC_OPS(__atomic64_and, long, "lang")
+__ATOMIC_OPS(__atomic64_or, long, "laog")
+__ATOMIC_OPS(__atomic64_xor, long, "laxg")
+
+#undef __ATOMIC_OPS
+#undef __ATOMIC_OP
+
+static inline void __atomic_add_const(int val, int *ptr)
+{
+ asm volatile(
+ " asi %[ptr],%[val]\n"
+ : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
+}
+
+static inline void __atomic64_add_const(long val, long *ptr)
+{
+ asm volatile(
+ " agsi %[ptr],%[val]\n"
+ : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
+}
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC_OP(op_name, op_string) \
+static inline int op_name(int val, int *ptr) \
+{ \
+ int old, new; \
+ \
+ asm volatile( \
+ "0: lr %[new],%[old]\n" \
+ op_string " %[new],%[val]\n" \
+ " cs %[old],%[new],%[ptr]\n" \
+ " jl 0b" \
+ : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
+ : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
+ return old; \
+}
+
+#define __ATOMIC_OPS(op_name, op_string) \
+ __ATOMIC_OP(op_name, op_string) \
+ __ATOMIC_OP(op_name##_barrier, op_string)
+
+__ATOMIC_OPS(__atomic_add, "ar")
+__ATOMIC_OPS(__atomic_and, "nr")
+__ATOMIC_OPS(__atomic_or, "or")
+__ATOMIC_OPS(__atomic_xor, "xr")
+
+#undef __ATOMIC_OPS
+
+#define __ATOMIC64_OP(op_name, op_string) \
+static inline long op_name(long val, long *ptr) \
+{ \
+ long old, new; \
+ \
+ asm volatile( \
+ "0: lgr %[new],%[old]\n" \
+ op_string " %[new],%[val]\n" \
+ " csg %[old],%[new],%[ptr]\n" \
+ " jl 0b" \
+ : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
+ : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
+ return old; \
+}
+
+#define __ATOMIC64_OPS(op_name, op_string) \
+ __ATOMIC64_OP(op_name, op_string) \
+ __ATOMIC64_OP(op_name##_barrier, op_string)
+
+__ATOMIC64_OPS(__atomic64_add, "agr")
+__ATOMIC64_OPS(__atomic64_and, "ngr")
+__ATOMIC64_OPS(__atomic64_or, "ogr")
+__ATOMIC64_OPS(__atomic64_xor, "xgr")
+
+#undef __ATOMIC64_OPS
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+static inline int __atomic_cmpxchg(int *ptr, int old, int new)
+{
+ asm volatile(
+ " cs %[old],%[new],%[ptr]"
+ : [old] "+d" (old), [ptr] "+Q" (*ptr)
+ : [new] "d" (new) : "cc", "memory");
+ return old;
+}
+
+static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
+{
+ asm volatile(
+ " csg %[old],%[new],%[ptr]"
+ : [old] "+d" (old), [ptr] "+Q" (*ptr)
+ : [new] "d" (new) : "cc", "memory");
+ return old;
+}
+
+#endif /* __ARCH_S390_ATOMIC_OPS__ */
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 8043f10da6b5..d92047da5ccb 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -42,57 +42,9 @@
#include <linux/typecheck.h>
#include <linux/compiler.h>
+#include <asm/atomic_ops.h>
#include <asm/barrier.h>
-#define __BITOPS_NO_BARRIER "\n"
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __BITOPS_OR "laog"
-#define __BITOPS_AND "lang"
-#define __BITOPS_XOR "laxg"
-#define __BITOPS_BARRIER "bcr 14,0\n"
-
-#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
-({ \
- unsigned long __old; \
- \
- typecheck(unsigned long *, (__addr)); \
- asm volatile( \
- __op_string " %0,%2,%1\n" \
- __barrier \
- : "=d" (__old), "+Q" (*(__addr)) \
- : "d" (__val) \
- : "cc", "memory"); \
- __old; \
-})
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define __BITOPS_OR "ogr"
-#define __BITOPS_AND "ngr"
-#define __BITOPS_XOR "xgr"
-#define __BITOPS_BARRIER "\n"
-
-#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
-({ \
- unsigned long __old, __new; \
- \
- typecheck(unsigned long *, (__addr)); \
- asm volatile( \
- " lg %0,%2\n" \
- "0: lgr %1,%0\n" \
- __op_string " %1,%3\n" \
- " csg %0,%1,%2\n" \
- " jl 0b" \
- : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
- : "d" (__val) \
- : "cc", "memory"); \
- __old; \
-})
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
static inline unsigned long *
@@ -128,7 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER);
+ __atomic64_or(mask, addr);
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
@@ -149,7 +101,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER);
+ __atomic64_and(mask, addr);
}
static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
@@ -170,7 +122,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER);
+ __atomic64_xor(mask, addr);
}
static inline int
@@ -180,7 +132,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER);
+ old = __atomic64_or_barrier(mask, addr);
return (old & mask) != 0;
}
@@ -191,7 +143,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER);
+ old = __atomic64_and_barrier(mask, addr);
return (old & ~mask) != 0;
}
@@ -202,7 +154,7 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER);
+ old = __atomic64_xor_barrier(mask, addr);
return (old & mask) != 0;
}
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 03516476127b..b69d8bc231a5 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -104,7 +104,8 @@ struct hws_basic_entry {
unsigned int P:1; /* 28 PSW Problem state */
unsigned int AS:2; /* 29-30 PSW address-space control */
unsigned int I:1; /* 31 entry valid or invalid */
- unsigned int:16;
+ unsigned int CL:2; /* 32-33 Configuration Level */
+ unsigned int:14;
unsigned int prim_asn:16; /* primary ASN */
unsigned long long ia; /* Instruction Address */
unsigned long long gpp; /* Guest Program Parameter */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 1736c7d3c94c..f4381e1fb19e 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -193,7 +193,7 @@ extern char elf_platform[];
do { \
set_personality(PER_LINUX | \
(current->personality & (~PER_MASK))); \
- current_thread_info()->sys_call_table = \
+ current->thread.sys_call_table = \
(unsigned long) &sys_call_table; \
} while (0)
#else /* CONFIG_COMPAT */
@@ -204,11 +204,11 @@ do { \
(current->personality & ~PER_MASK)); \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
set_thread_flag(TIF_31BIT); \
- current_thread_info()->sys_call_table = \
+ current->thread.sys_call_table = \
(unsigned long) &sys_call_table_emu; \
} else { \
clear_thread_flag(TIF_31BIT); \
- current_thread_info()->sys_call_table = \
+ current->thread.sys_call_table = \
(unsigned long) &sys_call_table; \
} \
} while (0)
diff --git a/arch/s390/include/asm/facilities_src.h b/arch/s390/include/asm/facilities_src.h
deleted file mode 100644
index 3b758f66e48b..000000000000
--- a/arch/s390/include/asm/facilities_src.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright IBM Corp. 2015
- */
-
-#ifndef S390_GEN_FACILITIES_C
-#error "This file can only be included by gen_facilities.c"
-#endif
-
-#include <linux/kconfig.h>
-
-struct facility_def {
- char *name;
- int *bits;
-};
-
-static struct facility_def facility_defs[] = {
- {
- /*
- * FACILITIES_ALS contains the list of facilities that are
- * required to run a kernel that is compiled e.g. with
- * -march=<machine>.
- */
- .name = "FACILITIES_ALS",
- .bits = (int[]){
-#ifdef CONFIG_HAVE_MARCH_Z900_FEATURES
- 0, /* N3 instructions */
- 1, /* z/Arch mode installed */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
- 18, /* long displacement facility */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
- 7, /* stfle */
- 17, /* message security assist */
- 21, /* extended-immediate facility */
- 25, /* store clock fast */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
- 27, /* mvcos */
- 32, /* compare and swap and store */
- 33, /* compare and swap and store 2 */
- 34, /* general extension facility */
- 35, /* execute extensions */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
- 45, /* fast-BCR, etc. */
-#endif
-#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
- 49, /* misc-instruction-extensions */
- 52, /* interlocked facility 2 */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
- 53, /* load-and-zero-rightmost-byte, etc. */
-#endif
- -1 /* END */
- }
- },
- {
- .name = "FACILITIES_KVM",
- .bits = (int[]){
- 0, /* N3 instructions */
- 1, /* z/Arch mode installed */
- 2, /* z/Arch mode active */
- 3, /* DAT-enhancement */
- 4, /* idte segment table */
- 5, /* idte region table */
- 6, /* ASN-and-LX reuse */
- 7, /* stfle */
- 8, /* enhanced-DAT 1 */
- 9, /* sense-running-status */
- 10, /* conditional sske */
- 13, /* ipte-range */
- 14, /* nonquiescing key-setting */
- 73, /* transactional execution */
- 75, /* access-exception-fetch/store indication */
- 76, /* msa extension 3 */
- 77, /* msa extension 4 */
- 78, /* enhanced-DAT 2 */
- -1 /* END */
- }
- },
-};
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 4da22b2f0521..edb5161df7e2 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -97,7 +97,7 @@ void __init save_area_add_vxrs(struct save_area *, __vector128 *vxrs);
extern void do_reipl(void);
extern void do_halt(void);
extern void do_poff(void);
-extern void ipl_save_parameters(void);
+extern void ipl_verify_parameters(void);
extern void ipl_update_parameters(void);
extern size_t append_ipl_vmparm(char *, size_t);
extern size_t append_ipl_scpdata(char *, size_t);
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 7b93b78f423c..9bfad2ad6312 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -95,7 +95,7 @@ struct lowcore {
/* Current process. */
__u64 current_task; /* 0x0310 */
- __u64 thread_info; /* 0x0318 */
+ __u8 pad_0x318[0x320-0x318]; /* 0x0318 */
__u64 kernel_stack; /* 0x0320 */
/* Interrupt, panic and restart stack. */
@@ -126,7 +126,8 @@ struct lowcore {
__u64 percpu_offset; /* 0x0378 */
__u64 vdso_per_cpu_data; /* 0x0380 */
__u64 machine_flags; /* 0x0388 */
- __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
+ __u32 preempt_count; /* 0x0390 */
+ __u8 pad_0x0394[0x0398-0x0394]; /* 0x0394 */
__u64 gmap; /* 0x0398 */
__u32 spinlock_lockval; /* 0x03a0 */
__u32 fpu_flags; /* 0x03a4 */
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index e75c64cbcf08..c232ef9711f5 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -46,6 +46,8 @@ struct clp_fh_list_entry {
#define CLP_UTIL_STR_LEN 64
#define CLP_PFIP_NR_SEGMENTS 4
+extern bool zpci_unique_uid;
+
/* List PCI functions request */
struct clp_req_list_pci {
struct clp_req_hdr hdr;
@@ -59,7 +61,8 @@ struct clp_rsp_list_pci {
u64 resume_token;
u32 reserved2;
u16 max_fn;
- u8 reserved3;
+ u8 : 7;
+ u8 uid_checking : 1;
u8 entry_size;
struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES];
} __packed;
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index f4eb9843eed4..166f703dad7c 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -27,17 +27,17 @@ extern int page_table_allocate_pgste;
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{
- typedef struct { char _[n]; } addrtype;
-
- *s = val;
- n = (n / 256) - 1;
- asm volatile(
- " mvc 8(248,%0),0(%0)\n"
- "0: mvc 256(256,%0),0(%0)\n"
- " la %0,256(%0)\n"
- " brct %1,0b\n"
- : "+a" (s), "+d" (n), "=m" (*(addrtype *) s)
- : "m" (*(addrtype *) s));
+ struct addrtype { char _[256]; };
+ int i;
+
+ for (i = 0; i < n; i += 256) {
+ *s = val;
+ asm volatile(
+ "mvc 8(248,%[s]),0(%[s])\n"
+ : "+m" (*(struct addrtype *) s)
+ : [s] "a" (s));
+ s += 256 / sizeof(long);
+ }
}
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
new file mode 100644
index 000000000000..b0776b2c8dcf
--- /dev/null
+++ b/arch/s390/include/asm/preempt.h
@@ -0,0 +1,137 @@
+#ifndef __ASM_PREEMPT_H
+#define __ASM_PREEMPT_H
+
+#include <asm/current.h>
+#include <linux/thread_info.h>
+#include <asm/atomic_ops.h>
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
+
+static inline int preempt_count(void)
+{
+ return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
+}
+
+static inline void preempt_count_set(int pc)
+{
+ int old, new;
+
+ do {
+ old = READ_ONCE(S390_lowcore.preempt_count);
+ new = (old & PREEMPT_NEED_RESCHED) |
+ (pc & ~PREEMPT_NEED_RESCHED);
+ } while (__atomic_cmpxchg(&S390_lowcore.preempt_count,
+ old, new) != old);
+}
+
+#define init_task_preempt_count(p) do { } while (0)
+
+#define init_idle_preempt_count(p, cpu) do { \
+ S390_lowcore.preempt_count = PREEMPT_ENABLED; \
+} while (0)
+
+static inline void set_preempt_need_resched(void)
+{
+ __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
+}
+
+static inline void clear_preempt_need_resched(void)
+{
+ __atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
+}
+
+static inline bool test_preempt_need_resched(void)
+{
+ return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
+}
+
+static inline void __preempt_count_add(int val)
+{
+ if (__builtin_constant_p(val) && (val >= -128) && (val <= 127))
+ __atomic_add_const(val, &S390_lowcore.preempt_count);
+ else
+ __atomic_add(val, &S390_lowcore.preempt_count);
+}
+
+static inline void __preempt_count_sub(int val)
+{
+ __preempt_count_add(-val);
+}
+
+static inline bool __preempt_count_dec_and_test(void)
+{
+ return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
+}
+
+static inline bool should_resched(int preempt_offset)
+{
+ return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
+ preempt_offset);
+}
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define PREEMPT_ENABLED (0)
+
+static inline int preempt_count(void)
+{
+ return READ_ONCE(S390_lowcore.preempt_count);
+}
+
+static inline void preempt_count_set(int pc)
+{
+ S390_lowcore.preempt_count = pc;
+}
+
+#define init_task_preempt_count(p) do { } while (0)
+
+#define init_idle_preempt_count(p, cpu) do { \
+ S390_lowcore.preempt_count = PREEMPT_ENABLED; \
+} while (0)
+
+static inline void set_preempt_need_resched(void)
+{
+}
+
+static inline void clear_preempt_need_resched(void)
+{
+}
+
+static inline bool test_preempt_need_resched(void)
+{
+ return false;
+}
+
+static inline void __preempt_count_add(int val)
+{
+ S390_lowcore.preempt_count += val;
+}
+
+static inline void __preempt_count_sub(int val)
+{
+ S390_lowcore.preempt_count -= val;
+}
+
+static inline bool __preempt_count_dec_and_test(void)
+{
+ return !--S390_lowcore.preempt_count && tif_need_resched();
+}
+
+static inline bool should_resched(int preempt_offset)
+{
+ return unlikely(preempt_count() == preempt_offset &&
+ tif_need_resched());
+}
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#ifdef CONFIG_PREEMPT
+extern asmlinkage void preempt_schedule(void);
+#define __preempt_schedule() preempt_schedule()
+extern asmlinkage void preempt_schedule_notrace(void);
+#define __preempt_schedule_notrace() preempt_schedule_notrace()
+#endif /* CONFIG_PREEMPT */
+
+#endif /* __ASM_PREEMPT_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 9d3a21aedc97..6bca916a5ba0 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -110,14 +110,20 @@ typedef struct {
struct thread_struct {
unsigned int acrs[NUM_ACRS];
unsigned long ksp; /* kernel stack pointer */
+ unsigned long user_timer; /* task cputime in user space */
+ unsigned long system_timer; /* task cputime in kernel space */
+ unsigned long sys_call_table; /* system call table address */
mm_segment_t mm_segment;
unsigned long gmap_addr; /* address of last gmap fault. */
unsigned int gmap_write_flag; /* gmap fault write indication */
unsigned int gmap_int_code; /* int code of last gmap fault */
unsigned int gmap_pfault; /* signal of a pending guest pfault */
+ /* Per-thread information related to debugging */
struct per_regs per_user; /* User specified PER registers */
struct per_event per_event; /* Cause of the last PER trap */
unsigned long per_flags; /* Flags to control debug behavior */
+ unsigned int system_call; /* system call number in signal */
+ unsigned long last_break; /* last breaking-event-address. */
/* pfault_wait is used to block the process on a pfault event */
unsigned long pfault_wait;
struct list_head list;
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 2ad9c204b1a2..8db92a5b3bf1 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -101,7 +101,8 @@ struct zpci_report_error_header {
u8 data[0]; /* Subsequent Data passed verbatim to SCLP ET 24 */
} __packed;
-int sclp_get_core_info(struct sclp_core_info *info);
+int _sclp_get_core_info_early(struct sclp_core_info *info);
+int _sclp_get_core_info(struct sclp_core_info *info);
int sclp_core_configure(u8 core);
int sclp_core_deconfigure(u8 core);
int sclp_sdias_blk_count(void);
@@ -119,4 +120,11 @@ void sclp_early_detect(void);
void _sclp_print_early(const char *);
void sclp_ocf_cpc_name_copy(char *dst);
+static inline int sclp_get_core_info(struct sclp_core_info *info, int early)
+{
+ if (early)
+ return _sclp_get_core_info_early(info);
+ return _sclp_get_core_info(info);
+}
+
#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h
index 4af99cdaddf5..17a7904f001a 100644
--- a/arch/s390/include/asm/scsw.h
+++ b/arch/s390/include/asm/scsw.h
@@ -96,7 +96,8 @@ struct tm_scsw {
u32 dstat:8;
u32 cstat:8;
u32 fcxs:8;
- u32 schxs:8;
+ u32 ifob:1;
+ u32 sesq:7;
} __attribute__ ((packed));
/**
@@ -177,6 +178,9 @@ union scsw {
#define SCHN_STAT_INTF_CTRL_CHK 0x02
#define SCHN_STAT_CHAIN_CHECK 0x01
+#define SCSW_SESQ_DEV_NOFCX 3
+#define SCSW_SESQ_PATH_NOFCX 4
+
/*
* architectured values for first sense byte
*/
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 0cc383b9be7f..3deb134587b7 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -36,6 +36,7 @@ extern void smp_yield_cpu(int cpu);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
extern void smp_fill_possible_mask(void);
+extern void smp_detect_cpus(void);
#else /* CONFIG_SMP */
@@ -56,6 +57,7 @@ static inline int smp_store_status(int cpu) { return 0; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { }
static inline void smp_fill_possible_mask(void) { }
+static inline void smp_detect_cpus(void) { }
#endif /* CONFIG_SMP */
@@ -69,6 +71,12 @@ static inline void smp_stop_cpu(void)
}
}
+/* Return thread 0 CPU number as base CPU */
+static inline int smp_get_base_cpu(int cpu)
+{
+ return cpu - (cpu % (smp_cpu_mtid + 1));
+}
+
#ifdef CONFIG_HOTPLUG_CPU
extern int smp_rescan_cpus(void);
extern void __noreturn cpu_die(void);
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 8662f5c8e17f..15a3c005c274 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -14,6 +14,7 @@
#define __HAVE_ARCH_MEMCHR /* inline & arch function */
#define __HAVE_ARCH_MEMCMP /* arch function */
#define __HAVE_ARCH_MEMCPY /* gcc builtin & arch function */
+#define __HAVE_ARCH_MEMMOVE /* gcc builtin & arch function */
#define __HAVE_ARCH_MEMSCAN /* inline & arch function */
#define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */
#define __HAVE_ARCH_STRCAT /* inline & arch function */
@@ -32,6 +33,7 @@
extern int memcmp(const void *, const void *, size_t);
extern void *memcpy(void *, const void *, size_t);
extern void *memset(void *, int, size_t);
+extern void *memmove(void *, const void *, size_t);
extern int strcmp(const char *,const char *);
extern size_t strlcat(char *, const char *, size_t);
extern size_t strlcpy(char *, const char *, size_t);
@@ -40,7 +42,6 @@ extern char *strncpy(char *, const char *, size_t);
extern char *strrchr(const char *, int);
extern char *strstr(const char *, const char *);
-#undef __HAVE_ARCH_MEMMOVE
#undef __HAVE_ARCH_STRCHR
#undef __HAVE_ARCH_STRNCHR
#undef __HAVE_ARCH_STRNCMP
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 2728114d5484..229326c942c7 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -107,6 +107,11 @@ struct sysinfo_2_2_2 {
char reserved_3[5];
unsigned short cpus_dedicated;
unsigned short cpus_shared;
+ char reserved_4[3];
+ unsigned char vsne;
+ uuid_be uuid;
+ char reserved_5[160];
+ char ext_name[256];
};
#define LPAR_CHAR_DEDICATED (1 << 7)
@@ -127,7 +132,7 @@ struct sysinfo_3_2_2 {
unsigned int caf;
char cpi[16];
char reserved_1[3];
- char ext_name_encoding;
+ unsigned char evmne;
unsigned int reserved_2;
uuid_be uuid;
} vm[8];
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index f15c0398c363..a5b54a445eb8 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -12,10 +12,10 @@
/*
* Size of kernel stack for each process
*/
-#define THREAD_ORDER 2
+#define THREAD_SIZE_ORDER 2
#define ASYNC_ORDER 2
-#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
#ifndef __ASSEMBLY__
@@ -30,15 +30,7 @@
* - if the contents of this structure are changed, the assembly constants must also be changed
*/
struct thread_info {
- struct task_struct *task; /* main task structure */
unsigned long flags; /* low level flags */
- unsigned long sys_call_table; /* System call table address */
- unsigned int cpu; /* current CPU */
- int preempt_count; /* 0 => preemptable, <0 => BUG */
- unsigned int system_call;
- __u64 user_timer;
- __u64 system_timer;
- unsigned long last_break; /* last breaking-event-address. */
};
/*
@@ -46,26 +38,14 @@ struct thread_info {
*/
#define INIT_THREAD_INFO(tsk) \
{ \
- .task = &tsk, \
.flags = 0, \
- .cpu = 0, \
- .preempt_count = INIT_PREEMPT_COUNT, \
}
-#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
-/* how to get the thread information struct from C */
-static inline struct thread_info *current_thread_info(void)
-{
- return (struct thread_info *) S390_lowcore.thread_info;
-}
-
void arch_release_task_struct(struct task_struct *tsk);
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
-#define THREAD_SIZE_ORDER THREAD_ORDER
-
#endif
/*
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 0bb08f341c09..de8298800722 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -52,11 +52,9 @@ static inline void store_clock_comparator(__u64 *time)
void clock_comparator_work(void);
-void __init ptff_init(void);
+void __init time_early_init(void);
extern unsigned char ptff_function_mask[16];
-extern unsigned long lpar_offset;
-extern unsigned long initial_leap_seconds;
/* Function codes for the ptff instruction. */
#define PTFF_QAF 0x00 /* query available functions */
@@ -100,21 +98,28 @@ struct ptff_qui {
unsigned int pad_0x5c[41];
} __packed;
-static inline int ptff(void *ptff_block, size_t len, unsigned int func)
-{
- typedef struct { char _[len]; } addrtype;
- register unsigned int reg0 asm("0") = func;
- register unsigned long reg1 asm("1") = (unsigned long) ptff_block;
- int rc;
-
- asm volatile(
- " .word 0x0104\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (rc), "+m" (*(addrtype *) ptff_block)
- : "d" (reg0), "d" (reg1) : "cc");
- return rc;
-}
+/*
+ * ptff - Perform timing facility function
+ * @ptff_block: Pointer to ptff parameter block
+ * @len: Length of parameter block
+ * @func: Function code
+ * Returns: Condition code (0 on success)
+ */
+#define ptff(ptff_block, len, func) \
+({ \
+ struct addrtype { char _[len]; }; \
+ register unsigned int reg0 asm("0") = func; \
+ register unsigned long reg1 asm("1") = (unsigned long) (ptff_block);\
+ int rc; \
+ \
+ asm volatile( \
+ " .word 0x0104\n" \
+ " ipm %0\n" \
+ " srl %0,28\n" \
+ : "=d" (rc), "+m" (*(struct addrtype *) reg1) \
+ : "d" (reg0), "d" (reg1) : "cc"); \
+ rc; \
+})
static inline unsigned long long local_tick_disable(void)
{
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index f15f5571ca2b..fa1bfce10370 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -22,21 +22,22 @@ struct cpu_topology_s390 {
cpumask_t drawer_mask;
};
-DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
-
-#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
-#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
-#define topology_sibling_cpumask(cpu) \
- (&per_cpu(cpu_topology, cpu).thread_mask)
-#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
-#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
-#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
-#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
-#define topology_drawer_id(cpu) (per_cpu(cpu_topology, cpu).drawer_id)
-#define topology_drawer_cpumask(cpu) (&per_cpu(cpu_topology, cpu).drawer_mask)
+extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
+extern cpumask_t cpus_with_topology;
+
+#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
+#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
+#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_mask)
+#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
+#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
+#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
+#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
+#define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id)
+#define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask)
#define mc_capable() 1
+void topology_init_early(void);
int topology_cpu_init(struct cpu *);
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);
@@ -46,6 +47,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
#else /* CONFIG_SCHED_TOPOLOGY */
+static inline void topology_init_early(void) { }
static inline void topology_schedule_update(void) { }
static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
static inline void topology_expect_change(void) { }
@@ -65,7 +67,7 @@ static inline void topology_expect_change(void) { }
#define cpu_to_node cpu_to_node
static inline int cpu_to_node(int cpu)
{
- return per_cpu(cpu_topology, cpu).node_id;
+ return cpu_topology[cpu].node_id;
}
/* Returns a pointer to the cpumask of CPUs on node 'node'. */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 52d7c8709279..f82b04e85a21 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -37,14 +37,14 @@
#define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.mm_segment)
-#define set_fs(x) \
-({ \
+#define set_fs(x) \
+{ \
unsigned long __pto; \
current->thread.mm_segment = (x); \
__pto = current->thread.mm_segment.ar4 ? \
S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
__ctl_load(__pto, 7, 7); \
-})
+}
#define segment_eq(a,b) ((a).ar4 == (b).ar4)
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index d0a2dbf2433d..88bdc477a843 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -33,6 +33,8 @@ struct vdso_data {
__u32 ectg_available; /* ECTG instruction present 0x58 */
__u32 tk_mult; /* Mult. used for xtime_nsec 0x5c */
__u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
+ __u32 ts_dir; /* TOD steering direction 0x64 */
+ __u64 ts_end; /* TOD steering end 0x68 */
};
struct vdso_per_cpu_data {
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index cc44b09c25fc..bf736e764cb4 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -12,6 +12,7 @@ header-y += dasd.h
header-y += debug.h
header-y += errno.h
header-y += fcntl.h
+header-y += hypfs.h
header-y += ioctl.h
header-y += ioctls.h
header-y += ipcbuf.h
@@ -29,16 +30,16 @@ header-y += ptrace.h
header-y += qeth.h
header-y += resource.h
header-y += schid.h
+header-y += sclp_ctl.h
header-y += sembuf.h
header-y += setup.h
header-y += shmbuf.h
+header-y += sie.h
header-y += sigcontext.h
header-y += siginfo.h
header-y += signal.h
header-y += socket.h
header-y += sockios.h
-header-y += sclp_ctl.h
-header-y += sie.h
header-y += stat.h
header-y += statfs.h
header-y += swab.h
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 1f0fe98f6db9..36b5101c8606 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -2,20 +2,47 @@
# Makefile for the linux kernel.
#
-KCOV_INSTRUMENT_early.o := n
-KCOV_INSTRUMENT_sclp.o := n
-KCOV_INSTRUMENT_als.o := n
-
ifdef CONFIG_FUNCTION_TRACER
-# Don't trace early setup code and tracing code
-CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+
+# Do not trace tracer code
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+
+# Do not trace early setup code
+CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE)
+
+endif
+
+GCOV_PROFILE_als.o := n
+GCOV_PROFILE_early.o := n
+GCOV_PROFILE_sclp.o := n
+
+KCOV_INSTRUMENT_als.o := n
+KCOV_INSTRUMENT_early.o := n
+KCOV_INSTRUMENT_sclp.o := n
+
+UBSAN_SANITIZE_als.o := n
+UBSAN_SANITIZE_early.o := n
+UBSAN_SANITIZE_sclp.o := n
+
+#
+# Use -march=z900 for sclp.c and als.c to be able to print an error
+# message if the kernel is started on a machine which is too old
+#
+ifneq ($(CC_FLAGS_MARCH),-march=z900)
+CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
+CFLAGS_als.o += -march=z900
+CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
+CFLAGS_sclp.o += -march=z900
+AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
+AFLAGS_head.o += -march=z900
endif
#
# Passing null pointers is ok for smp code, since we access the lowcore here.
#
-CFLAGS_smp.o := -Wno-nonnull
+CFLAGS_smp.o := -Wno-nonnull
#
# Disable tailcall optimizations for stack / callchain walking functions
@@ -30,27 +57,7 @@ CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
#
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
-CFLAGS_sysinfo.o += -w
-
-#
-# Use -march=z900 for sclp.c and als.c to be able to print an error
-# message if the kernel is started on a machine which is too old
-#
-CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
-ifneq ($(CC_FLAGS_MARCH),-march=z900)
-CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
-CFLAGS_sclp.o += -march=z900
-CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
-CFLAGS_als.o += -march=z900
-AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
-AFLAGS_head.o += -march=z900
-endif
-GCOV_PROFILE_sclp.o := n
-GCOV_PROFILE_als.o := n
-UBSAN_SANITIZE_als.o := n
-UBSAN_SANITIZE_early.o := n
-UBSAN_SANITIZE_sclp.o := n
+CFLAGS_sysinfo.o += -w
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index f3df9e0a5dec..c4b3570ded5b 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -25,12 +25,14 @@
int main(void)
{
/* task struct offsets */
- OFFSET(__TASK_thread_info, task_struct, stack);
+ OFFSET(__TASK_stack, task_struct, stack);
OFFSET(__TASK_thread, task_struct, thread);
OFFSET(__TASK_pid, task_struct, pid);
BLANK();
/* thread struct offsets */
OFFSET(__THREAD_ksp, thread_struct, ksp);
+ OFFSET(__THREAD_sysc_table, thread_struct, sys_call_table);
+ OFFSET(__THREAD_last_break, thread_struct, last_break);
OFFSET(__THREAD_FPU_fpc, thread_struct, fpu.fpc);
OFFSET(__THREAD_FPU_regs, thread_struct, fpu.regs);
OFFSET(__THREAD_per_cause, thread_struct, per_event.cause);
@@ -39,14 +41,7 @@ int main(void)
OFFSET(__THREAD_trap_tdb, thread_struct, trap_tdb);
BLANK();
/* thread info offsets */
- OFFSET(__TI_task, thread_info, task);
- OFFSET(__TI_flags, thread_info, flags);
- OFFSET(__TI_sysc_table, thread_info, sys_call_table);
- OFFSET(__TI_cpu, thread_info, cpu);
- OFFSET(__TI_precount, thread_info, preempt_count);
- OFFSET(__TI_user_timer, thread_info, user_timer);
- OFFSET(__TI_system_timer, thread_info, system_timer);
- OFFSET(__TI_last_break, thread_info, last_break);
+ OFFSET(__TI_flags, task_struct, thread_info.flags);
BLANK();
/* pt_regs offsets */
OFFSET(__PT_ARGS, pt_regs, args);
@@ -79,6 +74,8 @@ int main(void)
OFFSET(__VDSO_ECTG_OK, vdso_data, ectg_available);
OFFSET(__VDSO_TK_MULT, vdso_data, tk_mult);
OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
+ OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir);
+ OFFSET(__VDSO_TS_END, vdso_data, ts_end);
OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr);
@@ -159,7 +156,6 @@ int main(void)
OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock);
OFFSET(__LC_CURRENT, lowcore, current_task);
- OFFSET(__LC_THREAD_INFO, lowcore, thread_info);
OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);
OFFSET(__LC_PANIC_STACK, lowcore, panic_stack);
@@ -173,6 +169,7 @@ int main(void)
OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset);
OFFSET(__LC_VDSO_PER_CPU, lowcore, vdso_per_cpu_data);
OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
+ OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
OFFSET(__LC_GMAP, lowcore, gmap);
OFFSET(__LC_PASTE, lowcore, paste);
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 4af60374eba0..6f2a6ab13cb5 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -446,7 +446,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
/* set extra registers only for synchronous signals */
regs->gprs[4] = regs->int_code & 127;
regs->gprs[5] = regs->int_parm_long;
- regs->gprs[6] = task_thread_info(current)->last_break;
+ regs->gprs[6] = current->thread.last_break;
}
return 0;
@@ -523,7 +523,7 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
regs->gprs[2] = ksig->sig;
regs->gprs[3] = (__force __u64) &frame->info;
regs->gprs[4] = (__force __u64) &frame->uc;
- regs->gprs[5] = task_thread_info(current)->last_break;
+ regs->gprs[5] = current->thread.last_break;
return 0;
}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 2374c5b46bbc..d038c8cea6cb 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -293,6 +293,7 @@ static noinline __init void setup_lowcore_early(void)
psw.addr = (unsigned long) s390_base_pgm_handler;
S390_lowcore.program_new_psw = psw;
s390_base_pgm_handler_fn = early_pgm_check_handler;
+ S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
}
static noinline __init void setup_facility_list(void)
@@ -391,7 +392,49 @@ static int __init cad_init(void)
}
early_initcall(cad_init);
-static __init void rescue_initrd(void)
+static __init void memmove_early(void *dst, const void *src, size_t n)
+{
+ unsigned long addr;
+ long incr;
+ psw_t old;
+
+ if (!n)
+ return;
+ incr = 1;
+ if (dst > src) {
+ incr = -incr;
+ dst += n - 1;
+ src += n - 1;
+ }
+ old = S390_lowcore.program_new_psw;
+ S390_lowcore.program_new_psw.mask = __extract_psw();
+ asm volatile(
+ " larl %[addr],1f\n"
+ " stg %[addr],%[psw_pgm_addr]\n"
+ "0: mvc 0(1,%[dst]),0(%[src])\n"
+ " agr %[dst],%[incr]\n"
+ " agr %[src],%[incr]\n"
+ " brctg %[n],0b\n"
+ "1:\n"
+ : [addr] "=&d" (addr),
+ [psw_pgm_addr] "=&Q" (S390_lowcore.program_new_psw.addr),
+ [dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n)
+ : [incr] "d" (incr)
+ : "cc", "memory");
+ S390_lowcore.program_new_psw = old;
+}
+
+static __init noinline void ipl_save_parameters(void)
+{
+ void *src, *dst;
+
+ src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
+ dst = (void *) IPL_PARMBLOCK_ORIGIN;
+ memmove_early(dst, src, PAGE_SIZE);
+ S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
+}
+
+static __init noinline void rescue_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
@@ -405,7 +448,7 @@ static __init void rescue_initrd(void)
return;
if (INITRD_START >= min_initrd_addr)
return;
- memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
+ memmove_early((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
INITRD_START = min_initrd_addr;
#endif
}
@@ -467,7 +510,8 @@ void __init startup_init(void)
ipl_save_parameters();
rescue_initrd();
clear_bss_section();
- ptff_init();
+ ipl_verify_parameters();
+ time_early_init();
init_kernel_storage_key();
lockdep_off();
setup_lowcore_early();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 49a30737adde..97298c58b2be 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -42,7 +42,7 @@ __PT_R13 = __PT_GPRS + 104
__PT_R14 = __PT_GPRS + 112
__PT_R15 = __PT_GPRS + 120
-STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
+STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
STACK_SIZE = 1 << STACK_SHIFT
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
@@ -123,8 +123,14 @@ _PIF_WORK = (_PIF_PER_TRAP)
.macro LAST_BREAK scratch
srag \scratch,%r10,23
+#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
jz .+10
- stg %r10,__TI_last_break(%r12)
+ stg %r10,__TASK_thread+__THREAD_last_break(%r12)
+#else
+ jz .+14
+ lghi \scratch,__TASK_thread
+ stg %r10,__THREAD_last_break(\scratch,%r12)
+#endif
.endm
.macro REENABLE_IRQS
@@ -186,14 +192,13 @@ ENTRY(__switch_to)
stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
lgr %r1,%r2
aghi %r1,__TASK_thread # thread_struct of prev task
- lg %r5,__TASK_thread_info(%r3) # get thread_info of next
+ lg %r5,__TASK_stack(%r3) # start of kernel stack of next
stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
lgr %r1,%r3
aghi %r1,__TASK_thread # thread_struct of next task
lgr %r15,%r5
aghi %r15,STACK_INIT # end of kernel stack of next
stg %r3,__LC_CURRENT # store task struct of next
- stg %r5,__LC_THREAD_INFO # store thread info of next
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
/* c4 is used in guest detection: arch/s390/kernel/perf_cpum_sf.c */
@@ -274,7 +279,7 @@ ENTRY(system_call)
.Lsysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
+ lg %r12,__LC_CURRENT
lghi %r14,_PIF_SYSCALL
.Lsysc_per:
lg %r15,__LC_KERNEL_STACK
@@ -288,7 +293,13 @@ ENTRY(system_call)
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
stg %r14,__PT_FLAGS(%r11)
.Lsysc_do_svc:
- lg %r10,__TI_sysc_table(%r12) # address of system call table
+ # load address of system call table
+#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
+ lg %r10,__TASK_thread+__THREAD_sysc_table(%r12)
+#else
+ lghi %r13,__TASK_thread
+ lg %r10,__THREAD_sysc_table(%r13,%r12)
+#endif
llgh %r8,__PT_INT_CODE+2(%r11)
slag %r8,%r8,2 # shift and test for svc 0
jnz .Lsysc_nr_ok
@@ -389,7 +400,6 @@ ENTRY(system_call)
TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
jno .Lsysc_return
lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
- lg %r10,__TI_sysc_table(%r12) # address of system call table
lghi %r8,0 # svc 0 returns -ENOSYS
llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
cghi %r1,NR_syscalls
@@ -457,7 +467,7 @@ ENTRY(system_call)
#
ENTRY(ret_from_fork)
la %r11,STACK_FRAME_OVERHEAD(%r15)
- lg %r12,__LC_THREAD_INFO
+ lg %r12,__LC_CURRENT
brasl %r14,schedule_tail
TRACE_IRQS_ON
ssm __LC_SVC_NEW_PSW # reenable interrupts
@@ -478,7 +488,7 @@ ENTRY(pgm_check_handler)
stpt __LC_SYNC_ENTER_TIMER
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
+ lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_PGM_OLD_PSW
tmhh %r8,0x0001 # test problem state bit
@@ -501,7 +511,7 @@ ENTRY(pgm_check_handler)
2: LAST_BREAK %r14
UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
lg %r15,__LC_KERNEL_STACK
- lg %r14,__TI_task(%r12)
+ lgr %r14,%r12
aghi %r14,__TASK_thread # pointer to thread_struct
lghi %r13,__LC_PGM_TDB
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
@@ -567,7 +577,7 @@ ENTRY(io_int_handler)
stpt __LC_ASYNC_ENTER_TIMER
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
+ lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_IO_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
@@ -626,7 +636,7 @@ ENTRY(io_int_handler)
jo .Lio_work_user # yes -> do resched & signal
#ifdef CONFIG_PREEMPT
# check for preemptive scheduling
- icm %r0,15,__TI_precount(%r12)
+ icm %r0,15,__LC_PREEMPT_COUNT
jnz .Lio_restore # preemption is disabled
TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
jno .Lio_restore
@@ -741,7 +751,7 @@ ENTRY(ext_int_handler)
stpt __LC_ASYNC_ENTER_TIMER
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
+ lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_EXT_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
@@ -798,13 +808,10 @@ ENTRY(save_fpu_regs)
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
bor %r14
stfpc __THREAD_FPU_fpc(%r2)
-.Lsave_fpu_regs_fpc_end:
lg %r3,__THREAD_FPU_regs(%r2)
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
jz .Lsave_fpu_regs_fp # no -> store FP regs
-.Lsave_fpu_regs_vx_low:
VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
-.Lsave_fpu_regs_vx_high:
VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
j .Lsave_fpu_regs_done # -> set CIF_FPU flag
.Lsave_fpu_regs_fp:
@@ -851,9 +858,7 @@ load_fpu_regs:
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
jz .Lload_fpu_regs_fp # -> no VX, load FP regs
-.Lload_fpu_regs_vx:
VLM %v0,%v15,0,%r4
-.Lload_fpu_regs_vx_high:
VLM %v16,%v31,256,%r4
j .Lload_fpu_regs_done
.Lload_fpu_regs_fp:
@@ -889,7 +894,7 @@ ENTRY(mcck_int_handler)
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
+ lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_MCK_OLD_PSW
TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
@@ -948,7 +953,7 @@ ENTRY(mcck_int_handler)
.Lmcck_panic:
lg %r15,__LC_PANIC_STACK
- aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
j .Lmcck_skip
#
@@ -1085,7 +1090,7 @@ cleanup_critical:
jhe 0f
# set up saved registers r10 and r12
stg %r10,16(%r11) # r10 last break
- stg %r12,32(%r11) # r12 thread-info pointer
+ stg %r12,32(%r11) # r12 task struct pointer
0: # check if the user time update has been done
clg %r9,BASED(.Lcleanup_system_call_insn+24)
jh 0f
@@ -1106,7 +1111,9 @@ cleanup_critical:
lg %r9,16(%r11)
srag %r9,%r9,23
jz 0f
- mvc __TI_last_break(8,%r12),16(%r11)
+ lgr %r9,%r12
+ aghi %r9,__TASK_thread
+ mvc __THREAD_last_break(8,%r9),16(%r11)
0: # set up saved register r11
lg %r15,__LC_KERNEL_STACK
la %r9,STACK_FRAME_OVERHEAD(%r15)
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 4431905f8cfa..0b5ebf8a3d30 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -315,7 +315,7 @@ ENTRY(startup_kdump)
jg startup_continue
.Lstack:
- .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
+ .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
.align 8
6: .long 0x7fffffff,0xffffffff
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 03c2b469c472..482d3526e32b 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -32,11 +32,10 @@ ENTRY(startup_continue)
#
# Setup stack
#
- larl %r15,init_thread_union
- stg %r15,__LC_THREAD_INFO # cache thread info in lowcore
- lg %r14,__TI_task(%r15) # cache current in lowcore
+ larl %r14,init_task
stg %r14,__LC_CURRENT
- aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
+ larl %r15,init_thread_union
+ aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) # init_task_union + THREAD_SIZE
stg %r15,__LC_KERNEL_STACK # set end of kernel stack
aghi %r15,-160
#
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 295bfb7124bc..ff3364a067ff 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1991,10 +1991,9 @@ void __init ipl_update_parameters(void)
diag308_set_works = 1;
}
-void __init ipl_save_parameters(void)
+void __init ipl_verify_parameters(void)
{
struct cio_iplinfo iplinfo;
- void *src, *dst;
if (cio_get_iplinfo(&iplinfo))
return;
@@ -2005,10 +2004,6 @@ void __init ipl_save_parameters(void)
if (!iplinfo.is_qdio)
return;
ipl_flags |= IPL_PARMBLOCK_VALID;
- src = (void *)(unsigned long)S390_lowcore.ipl_parmblock_ptr;
- dst = (void *)IPL_PARMBLOCK_ORIGIN;
- memmove(dst, src, PAGE_SIZE);
- S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
}
static LIST_HEAD(rcall);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 285d6561076d..ef60f4177331 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -168,7 +168,7 @@ void do_softirq_own_stack(void)
old = current_stack_pointer();
/* Check against async. stack address range. */
new = S390_lowcore.async_stack;
- if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
+ if (((new - old) >> (PAGE_SHIFT + THREAD_SIZE_ORDER)) != 0) {
/* Need to switch to the async. stack. */
new -= STACK_FRAME_OVERHEAD;
((struct stack_frame *) new)->back_chain = old;
diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c
index 6ea6d69339b5..ae7dff110054 100644
--- a/arch/s390/kernel/lgr.c
+++ b/arch/s390/kernel/lgr.c
@@ -5,7 +5,8 @@
* Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <asm/facility.h>
@@ -183,4 +184,4 @@ static int __init lgr_init(void)
lgr_timer_set();
return 0;
}
-module_init(lgr_init);
+device_initcall(lgr_init);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index fcc634c1479a..763dec18edcd 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -995,39 +995,36 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
regs.int_parm = CPU_MF_INT_SF_PRA;
sde_regs = (struct perf_sf_sde_regs *) &regs.int_parm_long;
- regs.psw.addr = sfr->basic.ia;
- if (sfr->basic.T)
- regs.psw.mask |= PSW_MASK_DAT;
- if (sfr->basic.W)
- regs.psw.mask |= PSW_MASK_WAIT;
- if (sfr->basic.P)
- regs.psw.mask |= PSW_MASK_PSTATE;
- switch (sfr->basic.AS) {
- case 0x0:
- regs.psw.mask |= PSW_ASC_PRIMARY;
- break;
- case 0x1:
- regs.psw.mask |= PSW_ASC_ACCREG;
- break;
- case 0x2:
- regs.psw.mask |= PSW_ASC_SECONDARY;
- break;
- case 0x3:
- regs.psw.mask |= PSW_ASC_HOME;
- break;
- }
+ psw_bits(regs.psw).ia = sfr->basic.ia;
+ psw_bits(regs.psw).t = sfr->basic.T;
+ psw_bits(regs.psw).w = sfr->basic.W;
+ psw_bits(regs.psw).p = sfr->basic.P;
+ psw_bits(regs.psw).as = sfr->basic.AS;
/*
- * A non-zero guest program parameter indicates a guest
- * sample.
- * Note that some early samples or samples from guests without
+ * Use the hardware provided configuration level to decide if the
+ * sample belongs to a guest or host. If that is not available,
+ * fall back to the following heuristics:
+ * A non-zero guest program parameter always indicates a guest
+ * sample. Some early samples or samples from guests without
* lpp usage would be misaccounted to the host. We use the asn
- * value as a heuristic to detect most of these guest samples.
- * If the value differs from the host hpp value, we assume
- * it to be a KVM guest.
+ * value as an addon heuristic to detect most of these guest samples.
+ * If the value differs from the host hpp value, we assume to be a
+ * KVM guest.
*/
- if (sfr->basic.gpp || sfr->basic.prim_asn != (u16) sfr->basic.hpp)
+ switch (sfr->basic.CL) {
+ case 1: /* logical partition */
+ sde_regs->in_guest = 0;
+ break;
+ case 2: /* virtual machine */
sde_regs->in_guest = 1;
+ break;
+ default: /* old machine, use heuristics */
+ if (sfr->basic.gpp ||
+ sfr->basic.prim_asn != (u16)sfr->basic.hpp)
+ sde_regs->in_guest = 1;
+ break;
+ }
overflow = 0;
if (perf_exclude_event(event, &regs, sde_regs))
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index bba4fa74b321..400d14f0b9f5 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -103,7 +103,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
unsigned long arg, struct task_struct *p)
{
- struct thread_info *ti;
struct fake_frame
{
struct stack_frame sf;
@@ -121,9 +120,8 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
/* Initialize per thread user and system timer values */
- ti = task_thread_info(p);
- ti->user_timer = 0;
- ti->system_timer = 0;
+ p->thread.user_timer = 0;
+ p->thread.system_timer = 0;
frame->sf.back_chain = 0;
/* new return point is ret_from_fork */
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 9336e824e2db..b81ab8882e2e 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -461,7 +461,7 @@ long arch_ptrace(struct task_struct *child, long request,
}
return 0;
case PTRACE_GET_LAST_BREAK:
- put_user(task_thread_info(child)->last_break,
+ put_user(child->thread.last_break,
(unsigned long __user *) data);
return 0;
case PTRACE_ENABLE_TE:
@@ -811,7 +811,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
}
return 0;
case PTRACE_GET_LAST_BREAK:
- put_user(task_thread_info(child)->last_break,
+ put_user(child->thread.last_break,
(unsigned int __user *) data);
return 0;
}
@@ -997,10 +997,10 @@ static int s390_last_break_get(struct task_struct *target,
if (count > 0) {
if (kbuf) {
unsigned long *k = kbuf;
- *k = task_thread_info(target)->last_break;
+ *k = target->thread.last_break;
} else {
unsigned long __user *u = ubuf;
- if (__put_user(task_thread_info(target)->last_break, u))
+ if (__put_user(target->thread.last_break, u))
return -EFAULT;
}
}
@@ -1113,7 +1113,7 @@ static int s390_system_call_get(struct task_struct *target,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- unsigned int *data = &task_thread_info(target)->system_call;
+ unsigned int *data = &target->thread.system_call;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
data, 0, sizeof(unsigned int));
}
@@ -1123,7 +1123,7 @@ static int s390_system_call_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- unsigned int *data = &task_thread_info(target)->system_call;
+ unsigned int *data = &target->thread.system_call;
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
data, 0, sizeof(unsigned int));
}
@@ -1327,7 +1327,7 @@ static int s390_compat_last_break_get(struct task_struct *target,
compat_ulong_t last_break;
if (count > 0) {
- last_break = task_thread_info(target)->last_break;
+ last_break = target->thread.last_break;
if (kbuf) {
unsigned long *k = kbuf;
*k = last_break;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7f7ba5f23f13..adfac9f0a89f 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -35,6 +35,7 @@
#include <linux/root_dev.h>
#include <linux/console.h>
#include <linux/kernel_stat.h>
+#include <linux/dma-contiguous.h>
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pfn.h>
@@ -303,7 +304,7 @@ static void __init setup_lowcore(void)
* Setup lowcore for boot cpu
*/
BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
- lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
+ lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc));
lc->restart_psw.mask = PSW_KERNEL_BITS;
lc->restart_psw.addr = (unsigned long) restart_int_handler;
lc->external_new_psw.mask = PSW_KERNEL_BITS |
@@ -324,15 +325,15 @@ static void __init setup_lowcore(void)
lc->kernel_stack = ((unsigned long) &init_thread_union)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->async_stack = (unsigned long)
- __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
+ memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE)
+ ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->panic_stack = (unsigned long)
- __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
+ memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE)
+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
- lc->current_task = (unsigned long) init_thread_union.thread_info.task;
- lc->thread_info = (unsigned long) &init_thread_union;
+ lc->current_task = (unsigned long)&init_task;
lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags;
+ lc->preempt_count = S390_lowcore.preempt_count;
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
MAX_FACILITY_BIT/8);
@@ -349,7 +350,7 @@ static void __init setup_lowcore(void)
lc->last_update_timer = S390_lowcore.last_update_timer;
lc->last_update_clock = S390_lowcore.last_update_clock;
- restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
+ restart_stack = memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE);
restart_stack += ASYNC_SIZE;
/*
@@ -412,7 +413,7 @@ static void __init setup_resources(void)
bss_resource.end = (unsigned long) &__bss_stop - 1;
for_each_memblock(memory, reg) {
- res = alloc_bootmem_low(sizeof(*res));
+ res = memblock_virt_alloc(sizeof(*res), 8);
res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
res->name = "System RAM";
@@ -426,7 +427,7 @@ static void __init setup_resources(void)
std_res->start > res->end)
continue;
if (std_res->end > res->end) {
- sub_res = alloc_bootmem_low(sizeof(*sub_res));
+ sub_res = memblock_virt_alloc(sizeof(*sub_res), 8);
*sub_res = *std_res;
sub_res->end = res->end;
std_res->start = res->end + 1;
@@ -445,7 +446,7 @@ static void __init setup_resources(void)
* part of the System RAM resource.
*/
if (crashk_res.end) {
- memblock_add(crashk_res.start, resource_size(&crashk_res));
+ memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
memblock_reserve(crashk_res.start, resource_size(&crashk_res));
insert_resource(&iomem_resource, &crashk_res);
}
@@ -903,6 +904,7 @@ void __init setup_arch(char **cmdline_p)
setup_memory_end();
setup_memory();
+ dma_contiguous_reserve(memory_end);
check_initrd();
reserve_crashkernel();
@@ -921,6 +923,8 @@ void __init setup_arch(char **cmdline_p)
cpu_detect_mhz_feature();
cpu_init();
numa_setup();
+ smp_detect_cpus();
+ topology_init_early();
/*
* Create kernel page tables and switch to virtual addressing.
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index d82562cf0a0e..9f241d1efeda 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -359,7 +359,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
/* set extra registers only for synchronous signals */
regs->gprs[4] = regs->int_code & 127;
regs->gprs[5] = regs->int_parm_long;
- regs->gprs[6] = task_thread_info(current)->last_break;
+ regs->gprs[6] = current->thread.last_break;
}
return 0;
}
@@ -430,7 +430,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
regs->gprs[2] = ksig->sig;
regs->gprs[3] = (unsigned long) &frame->info;
regs->gprs[4] = (unsigned long) &frame->uc;
- regs->gprs[5] = task_thread_info(current)->last_break;
+ regs->gprs[5] = current->thread.last_break;
return 0;
}
@@ -467,13 +467,13 @@ void do_signal(struct pt_regs *regs)
* the debugger may change all our registers, including the system
* call information.
*/
- current_thread_info()->system_call =
+ current->thread.system_call =
test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0;
if (get_signal(&ksig)) {
/* Whee! Actually deliver the signal. */
- if (current_thread_info()->system_call) {
- regs->int_code = current_thread_info()->system_call;
+ if (current->thread.system_call) {
+ regs->int_code = current->thread.system_call;
/* Check for system call restarting. */
switch (regs->gprs[2]) {
case -ERESTART_RESTARTBLOCK:
@@ -506,8 +506,8 @@ void do_signal(struct pt_regs *regs)
/* No handlers present - check for system call restart */
clear_pt_regs_flag(regs, PIF_SYSCALL);
- if (current_thread_info()->system_call) {
- regs->int_code = current_thread_info()->system_call;
+ if (current->thread.system_call) {
+ regs->int_code = current->thread.system_call;
switch (regs->gprs[2]) {
case -ERESTART_RESTARTBLOCK:
/* Restart with sys_restart_syscall */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index df4a508ff35c..e49f61aadaf9 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -19,6 +19,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
+#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
@@ -259,16 +260,14 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
{
struct lowcore *lc = pcpu->lowcore;
- struct thread_info *ti = task_thread_info(tsk);
lc->kernel_stack = (unsigned long) task_stack_page(tsk)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
- lc->thread_info = (unsigned long) task_thread_info(tsk);
lc->current_task = (unsigned long) tsk;
lc->lpp = LPP_MAGIC;
lc->current_pid = tsk->pid;
- lc->user_timer = ti->user_timer;
- lc->system_timer = ti->system_timer;
+ lc->user_timer = tsk->thread.user_timer;
+ lc->system_timer = tsk->thread.system_timer;
lc->steal_timer = 0;
}
@@ -662,14 +661,12 @@ int smp_cpu_get_polarization(int cpu)
return pcpu_devices[cpu].polarization;
}
-static struct sclp_core_info *smp_get_core_info(void)
+static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
{
static int use_sigp_detection;
- struct sclp_core_info *info;
int address;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (info && (use_sigp_detection || sclp_get_core_info(info))) {
+ if (use_sigp_detection || sclp_get_core_info(info, early)) {
use_sigp_detection = 1;
for (address = 0;
address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
@@ -683,7 +680,6 @@ static struct sclp_core_info *smp_get_core_info(void)
}
info->combined = info->configured;
}
- return info;
}
static int smp_add_present_cpu(int cpu);
@@ -724,17 +720,15 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
return nr;
}
-static void __init smp_detect_cpus(void)
+void __init smp_detect_cpus(void)
{
unsigned int cpu, mtid, c_cpus, s_cpus;
struct sclp_core_info *info;
u16 address;
/* Get CPU information */
- info = smp_get_core_info();
- if (!info)
- panic("smp_detect_cpus failed to allocate memory\n");
-
+ info = memblock_virt_alloc(sizeof(*info), 8);
+ smp_get_core_info(info, 1);
/* Find boot CPU type */
if (sclp.has_core_type) {
address = stap();
@@ -770,7 +764,7 @@ static void __init smp_detect_cpus(void)
get_online_cpus();
__smp_rescan_cpus(info, 0);
put_online_cpus();
- kfree(info);
+ memblock_free_early((unsigned long)info, sizeof(*info));
}
/*
@@ -807,7 +801,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
pcpu = pcpu_devices + cpu;
if (pcpu->state != CPU_STATE_CONFIGURED)
return -EIO;
- base = cpu - (cpu % (smp_cpu_mtid + 1));
+ base = smp_get_base_cpu(cpu);
for (i = 0; i <= smp_cpu_mtid; i++) {
if (base + i < nr_cpu_ids)
if (cpu_online(base + i))
@@ -907,7 +901,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/* request the 0x1202 external call external interrupt */
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1202");
- smp_detect_cpus();
}
void __init smp_prepare_boot_cpu(void)
@@ -973,7 +966,7 @@ static ssize_t cpu_configure_store(struct device *dev,
rc = -EBUSY;
/* disallow configuration changes of online cpus and cpu 0 */
cpu = dev->id;
- cpu -= cpu % (smp_cpu_mtid + 1);
+ cpu = smp_get_base_cpu(cpu);
if (cpu == 0)
goto out;
for (i = 0; i <= smp_cpu_mtid; i++)
@@ -1106,9 +1099,10 @@ int __ref smp_rescan_cpus(void)
struct sclp_core_info *info;
int nr;
- info = smp_get_core_info();
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ smp_get_core_info(info, 0);
get_online_cpus();
mutex_lock(&smp_cpu_state_mutex);
nr = __smp_rescan_cpus(info, 1);
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index 2d6b6e81f812..1ff21f05d7dd 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -194,7 +194,7 @@ pgm_check_entry:
/* Suspend CPU not available -> panic */
larl %r15,init_thread_union
- ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER)
+ ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
larl %r2,.Lpanic_string
larl %r3,_sclp_print_early
lghi %r1,0
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index bfda6aa40280..24021c1e3ecb 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -56,6 +56,20 @@ int stsi(void *sysinfo, int fc, int sel1, int sel2)
}
EXPORT_SYMBOL(stsi);
+static bool convert_ext_name(unsigned char encoding, char *name, size_t len)
+{
+ switch (encoding) {
+ case 1: /* EBCDIC */
+ EBCASC(name, len);
+ break;
+ case 2: /* UTF-8 */
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info)
{
int i;
@@ -207,24 +221,19 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
seq_printf(m, "LPAR CPUs S-MTID: %d\n", info->mt_stid);
seq_printf(m, "LPAR CPUs PS-MTID: %d\n", info->mt_psmtid);
}
+ if (convert_ext_name(info->vsne, info->ext_name, sizeof(info->ext_name))) {
+ seq_printf(m, "LPAR Extended Name: %-.256s\n", info->ext_name);
+ seq_printf(m, "LPAR UUID: %pUb\n", &info->uuid);
+ }
}
static void print_ext_name(struct seq_file *m, int lvl,
struct sysinfo_3_2_2 *info)
{
- if (info->vm[lvl].ext_name_encoding == 0)
- return;
- if (info->ext_names[lvl][0] == 0)
- return;
- switch (info->vm[lvl].ext_name_encoding) {
- case 1: /* EBCDIC */
- EBCASC(info->ext_names[lvl], sizeof(info->ext_names[lvl]));
- break;
- case 2: /* UTF-8 */
- break;
- default:
+ size_t len = sizeof(info->ext_names[lvl]);
+
+ if (!convert_ext_name(info->vm[lvl].evmne, info->ext_names[lvl], len))
return;
- }
seq_printf(m, "VM%02d Extended Name: %-.256s\n", lvl,
info->ext_names[lvl]);
}
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 0bfcc492987e..867d0a057046 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -59,19 +59,27 @@ ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
EXPORT_SYMBOL(s390_epoch_delta_notifier);
unsigned char ptff_function_mask[16];
-unsigned long lpar_offset;
-unsigned long initial_leap_seconds;
+
+static unsigned long long lpar_offset;
+static unsigned long long initial_leap_seconds;
+static unsigned long long tod_steering_end;
+static long long tod_steering_delta;
/*
* Get time offsets with PTFF
*/
-void __init ptff_init(void)
+void __init time_early_init(void)
{
struct ptff_qto qto;
struct ptff_qui qui;
+ /* Initialize TOD steering parameters */
+ tod_steering_end = sched_clock_base_cc;
+ vdso_data->ts_end = tod_steering_end;
+
if (!test_facility(28))
return;
+
ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
/* get LPAR offset */
@@ -80,7 +88,7 @@ void __init ptff_init(void)
/* get initial leap seconds */
if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
- initial_leap_seconds = (unsigned long)
+ initial_leap_seconds = (unsigned long long)
((long) qui.old_leap * 4096000000L);
}
@@ -123,18 +131,6 @@ void clock_comparator_work(void)
cd->event_handler(cd);
}
-/*
- * Fixup the clock comparator.
- */
-static void fixup_clock_comparator(unsigned long long delta)
-{
- /* If nobody is waiting there's nothing to fix. */
- if (S390_lowcore.clock_comparator == -1ULL)
- return;
- S390_lowcore.clock_comparator += delta;
- set_clock_comparator(S390_lowcore.clock_comparator);
-}
-
static int s390_next_event(unsigned long delta,
struct clock_event_device *evt)
{
@@ -215,7 +211,21 @@ void read_boot_clock64(struct timespec64 *ts)
static cycle_t read_tod_clock(struct clocksource *cs)
{
- return get_tod_clock();
+ unsigned long long now, adj;
+
+ preempt_disable(); /* protect from changes to steering parameters */
+ now = get_tod_clock();
+ adj = tod_steering_end - now;
+ if (unlikely((s64) adj >= 0))
+ /*
+ * manually steer by 1 cycle every 2^16 cycles. This
+ * corresponds to shifting the tod delta by 15. 1s is
+ * therefore steered in ~9h. The adjust will decrease
+ * over time, until it finally reaches 0.
+ */
+ now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
+ preempt_enable();
+ return now;
}
static struct clocksource clocksource_tod = {
@@ -384,6 +394,55 @@ static inline int check_sync_clock(void)
return rc;
}
+/*
+ * Apply clock delta to the global data structures.
+ * This is called once on the CPU that performed the clock sync.
+ */
+static void clock_sync_global(unsigned long long delta)
+{
+ unsigned long now, adj;
+ struct ptff_qto qto;
+
+ /* Fixup the monotonic sched clock. */
+ sched_clock_base_cc += delta;
+ /* Adjust TOD steering parameters. */
+ vdso_data->tb_update_count++;
+ now = get_tod_clock();
+ adj = tod_steering_end - now;
+ if (unlikely((s64) adj >= 0))
+ /* Calculate how much of the old adjustment is left. */
+ tod_steering_delta = (tod_steering_delta < 0) ?
+ -(adj >> 15) : (adj >> 15);
+ tod_steering_delta += delta;
+ if ((abs(tod_steering_delta) >> 48) != 0)
+ panic("TOD clock sync offset %lli is too large to drift\n",
+ tod_steering_delta);
+ tod_steering_end = now + (abs(tod_steering_delta) << 15);
+ vdso_data->ts_dir = (tod_steering_delta < 0) ? 0 : 1;
+ vdso_data->ts_end = tod_steering_end;
+ vdso_data->tb_update_count++;
+ /* Update LPAR offset. */
+ if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
+ lpar_offset = qto.tod_epoch_difference;
+ /* Call the TOD clock change notifier. */
+ atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, &delta);
+}
+
+/*
+ * Apply clock delta to the per-CPU data structures of this CPU.
+ * This is called for each online CPU after the call to clock_sync_global.
+ */
+static void clock_sync_local(unsigned long long delta)
+{
+ /* Add the delta to the clock comparator. */
+ if (S390_lowcore.clock_comparator != -1ULL) {
+ S390_lowcore.clock_comparator += delta;
+ set_clock_comparator(S390_lowcore.clock_comparator);
+ }
+ /* Adjust the last_update_clock time-stamp. */
+ S390_lowcore.last_update_clock += delta;
+}
+
/* Single threaded workqueue used for stp sync events */
static struct workqueue_struct *time_sync_wq;
@@ -397,31 +456,9 @@ static void __init time_init_wq(void)
struct clock_sync_data {
atomic_t cpus;
int in_sync;
- unsigned long long fixup_cc;
+ unsigned long long clock_delta;
};
-static void clock_sync_cpu(struct clock_sync_data *sync)
-{
- atomic_dec(&sync->cpus);
- enable_sync_clock();
- while (sync->in_sync == 0) {
- __udelay(1);
- /*
- * A different cpu changes *in_sync. Therefore use
- * barrier() to force memory access.
- */
- barrier();
- }
- if (sync->in_sync != 1)
- /* Didn't work. Clear per-cpu in sync bit again. */
- disable_sync_clock(NULL);
- /*
- * This round of TOD syncing is done. Set the clock comparator
- * to the next tick and let the processor continue.
- */
- fixup_clock_comparator(sync->fixup_cc);
-}
-
/*
* Server Time Protocol (STP) code.
*/
@@ -523,54 +560,46 @@ void stp_queue_work(void)
static int stp_sync_clock(void *data)
{
- static int first;
+ struct clock_sync_data *sync = data;
unsigned long long clock_delta;
- struct clock_sync_data *stp_sync;
- struct ptff_qto qto;
+ static int first;
int rc;
- stp_sync = data;
-
- if (xchg(&first, 1) == 1) {
- /* Slave */
- clock_sync_cpu(stp_sync);
- return 0;
- }
-
- /* Wait until all other cpus entered the sync function. */
- while (atomic_read(&stp_sync->cpus) != 0)
- cpu_relax();
-
enable_sync_clock();
-
- rc = 0;
- if (stp_info.todoff[0] || stp_info.todoff[1] ||
- stp_info.todoff[2] || stp_info.todoff[3] ||
- stp_info.tmd != 2) {
- rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0, &clock_delta);
- if (rc == 0) {
- /* fixup the monotonic sched clock */
- sched_clock_base_cc += clock_delta;
- if (ptff_query(PTFF_QTO) &&
- ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
- /* Update LPAR offset */
- lpar_offset = qto.tod_epoch_difference;
- atomic_notifier_call_chain(&s390_epoch_delta_notifier,
- 0, &clock_delta);
- stp_sync->fixup_cc = clock_delta;
- fixup_clock_comparator(clock_delta);
- rc = chsc_sstpi(stp_page, &stp_info,
- sizeof(struct stp_sstpi));
- if (rc == 0 && stp_info.tmd != 2)
- rc = -EAGAIN;
+ if (xchg(&first, 1) == 0) {
+ /* Wait until all other cpus entered the sync function. */
+ while (atomic_read(&sync->cpus) != 0)
+ cpu_relax();
+ rc = 0;
+ if (stp_info.todoff[0] || stp_info.todoff[1] ||
+ stp_info.todoff[2] || stp_info.todoff[3] ||
+ stp_info.tmd != 2) {
+ rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
+ &clock_delta);
+ if (rc == 0) {
+ sync->clock_delta = clock_delta;
+ clock_sync_global(clock_delta);
+ rc = chsc_sstpi(stp_page, &stp_info,
+ sizeof(struct stp_sstpi));
+ if (rc == 0 && stp_info.tmd != 2)
+ rc = -EAGAIN;
+ }
}
+ sync->in_sync = rc ? -EAGAIN : 1;
+ xchg(&first, 0);
+ } else {
+ /* Slave */
+ atomic_dec(&sync->cpus);
+ /* Wait for in_sync to be set. */
+ while (READ_ONCE(sync->in_sync) == 0)
+ __udelay(1);
}
- if (rc) {
+ if (sync->in_sync != 1)
+ /* Didn't work. Clear per-cpu in sync bit again. */
disable_sync_clock(NULL);
- stp_sync->in_sync = -EAGAIN;
- } else
- stp_sync->in_sync = 1;
- xchg(&first, 0);
+ /* Apply clock delta to per-CPU fields of this CPU. */
+ clock_sync_local(sync->clock_delta);
+
return 0;
}
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index e959c02e0cac..93dcbae1e98d 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
+#include <linux/bootmem.h>
#include <linux/cpuset.h>
#include <linux/device.h>
#include <linux/export.h>
@@ -41,15 +42,17 @@ static bool topology_enabled = true;
static DECLARE_WORK(topology_work, topology_work_fn);
/*
- * Socket/Book linked lists and per_cpu(cpu_topology) updates are
+ * Socket/Book linked lists and cpu_topology updates are
* protected by "sched_domains_mutex".
*/
static struct mask_info socket_info;
static struct mask_info book_info;
static struct mask_info drawer_info;
-DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
-EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
+struct cpu_topology_s390 cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
+
+cpumask_t cpus_with_topology;
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
@@ -97,7 +100,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
if (lcpu < 0)
continue;
for (i = 0; i <= smp_cpu_mtid; i++) {
- topo = &per_cpu(cpu_topology, lcpu + i);
+ topo = &cpu_topology[lcpu + i];
topo->drawer_id = drawer->id;
topo->book_id = book->id;
topo->socket_id = socket->id;
@@ -106,6 +109,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
cpumask_set_cpu(lcpu + i, &drawer->mask);
cpumask_set_cpu(lcpu + i, &book->mask);
cpumask_set_cpu(lcpu + i, &socket->mask);
+ cpumask_set_cpu(lcpu + i, &cpus_with_topology);
smp_cpu_set_polarization(lcpu + i, tl_core->pp);
}
}
@@ -220,7 +224,7 @@ static void update_cpu_masks(void)
int cpu;
for_each_possible_cpu(cpu) {
- topo = &per_cpu(cpu_topology, cpu);
+ topo = &cpu_topology[cpu];
topo->thread_mask = cpu_thread_map(cpu);
topo->core_mask = cpu_group_map(&socket_info, cpu);
topo->book_mask = cpu_group_map(&book_info, cpu);
@@ -231,6 +235,8 @@ static void update_cpu_masks(void)
topo->socket_id = cpu;
topo->book_id = cpu;
topo->drawer_id = cpu;
+ if (cpu_present(cpu))
+ cpumask_set_cpu(cpu, &cpus_with_topology);
}
}
numa_update_cpu_topology();
@@ -241,12 +247,12 @@ void store_topology(struct sysinfo_15_1_x *info)
stsi(info, 15, 1, min(topology_max_mnest, 4));
}
-int arch_update_cpu_topology(void)
+static int __arch_update_cpu_topology(void)
{
struct sysinfo_15_1_x *info = tl_info;
- struct device *dev;
- int cpu, rc = 0;
+ int rc = 0;
+ cpumask_clear(&cpus_with_topology);
if (MACHINE_HAS_TOPOLOGY) {
rc = 1;
store_topology(info);
@@ -255,6 +261,15 @@ int arch_update_cpu_topology(void)
update_cpu_masks();
if (!MACHINE_HAS_TOPOLOGY)
topology_update_polarization_simple();
+ return rc;
+}
+
+int arch_update_cpu_topology(void)
+{
+ struct device *dev;
+ int cpu, rc;
+
+ rc = __arch_update_cpu_topology();
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
@@ -394,23 +409,23 @@ int topology_cpu_init(struct cpu *cpu)
static const struct cpumask *cpu_thread_mask(int cpu)
{
- return &per_cpu(cpu_topology, cpu).thread_mask;
+ return &cpu_topology[cpu].thread_mask;
}
const struct cpumask *cpu_coregroup_mask(int cpu)
{
- return &per_cpu(cpu_topology, cpu).core_mask;
+ return &cpu_topology[cpu].core_mask;
}
static const struct cpumask *cpu_book_mask(int cpu)
{
- return &per_cpu(cpu_topology, cpu).book_mask;
+ return &cpu_topology[cpu].book_mask;
}
static const struct cpumask *cpu_drawer_mask(int cpu)
{
- return &per_cpu(cpu_topology, cpu).drawer_mask;
+ return &cpu_topology[cpu].drawer_mask;
}
static int __init early_parse_topology(char *p)
@@ -438,19 +453,20 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
nr_masks = max(nr_masks, 1);
for (i = 0; i < nr_masks; i++) {
- mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
+ mask->next = memblock_virt_alloc(sizeof(*mask->next), 8);
mask = mask->next;
}
}
-static int __init s390_topology_init(void)
+void __init topology_init_early(void)
{
struct sysinfo_15_1_x *info;
int i;
+ set_sched_topology(s390_topology);
if (!MACHINE_HAS_TOPOLOGY)
- return 0;
- tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
+ goto out;
+ tl_info = memblock_virt_alloc(sizeof(*tl_info), PAGE_SIZE);
info = tl_info;
store_topology(info);
pr_info("The CPU configuration topology of the machine is:");
@@ -460,10 +476,9 @@ static int __init s390_topology_init(void)
alloc_masks(info, &socket_info, 1);
alloc_masks(info, &book_info, 2);
alloc_masks(info, &drawer_info, 3);
- set_sched_topology(s390_topology);
- return 0;
+out:
+ __arch_update_cpu_topology();
}
-early_initcall(s390_topology_init);
static int __init topology_init(void)
{
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 5eec9afbb5b5..a5769b83d90e 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -99,8 +99,27 @@ __kernel_clock_gettime:
tml %r4,0x0001 /* pending update ? loop */
jnz 11b
stcke 0(%r15) /* Store TOD clock */
- lm %r0,%r1,1(%r15)
- s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+ lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */
+ s %r0,1(%r15) /* no - ts_steering_end */
+ sl %r1,5(%r15)
+ brc 3,22f
+ ahi %r0,-1
+22: ltr %r0,%r0 /* past end of steering? */
+ jm 24f
+ srdl %r0,15 /* 1 per 2^16 */
+ tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
+ jz 23f
+ lcr %r0,%r0 /* negative TOD offset */
+ lcr %r1,%r1
+ je 23f
+ ahi %r0,-1
+23: a %r0,1(%r15) /* add TOD timestamp */
+ al %r1,5(%r15)
+ brc 12,25f
+ ahi %r0,1
+ j 25f
+24: lm %r0,%r1,1(%r15) /* load TOD timestamp */
+25: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,12f
ahi %r0,-1
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index 719de6186b20..63b86dceb0bf 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -31,8 +31,27 @@ __kernel_gettimeofday:
tml %r4,0x0001 /* pending update ? loop */
jnz 1b
stcke 0(%r15) /* Store TOD clock */
- lm %r0,%r1,1(%r15)
- s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+ lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */
+ s %r0,1(%r15)
+ sl %r1,5(%r15)
+ brc 3,14f
+ ahi %r0,-1
+14: ltr %r0,%r0 /* past end of steering? */
+ jm 16f
+ srdl %r0,15 /* 1 per 2^16 */
+ tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
+ jz 15f
+ lcr %r0,%r0 /* negative TOD offset */
+ lcr %r1,%r1
+ je 15f
+ ahi %r0,-1
+15: a %r0,1(%r15) /* add TOD timestamp */
+ al %r1,5(%r15)
+ brc 12,17f
+ ahi %r0,1
+ j 17f
+16: lm %r0,%r1,1(%r15) /* load TOD timestamp */
+17: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,3f
ahi %r0,-1
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 61541fb93dc6..9c3b12626dba 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -83,8 +83,17 @@ __kernel_clock_gettime:
tmll %r4,0x0001 /* pending update ? loop */
jnz 5b
stcke 0(%r15) /* Store TOD clock */
- lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
lg %r1,1(%r15)
+ lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */
+ slgr %r0,%r1 /* now - ts_steering_end */
+ ltgr %r0,%r0 /* past end of steering ? */
+ jm 17f
+ srlg %r0,%r0,15 /* 1 per 2^16 */
+ tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
+ jz 18f
+ lcgr %r0,%r0 /* negative TOD offset */
+18: algr %r1,%r0 /* add steering offset */
+17: lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index 6ce46707663c..b02e62f3bc12 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -31,7 +31,16 @@ __kernel_gettimeofday:
jnz 0b
stcke 0(%r15) /* Store TOD clock */
lg %r1,1(%r15)
- sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+ lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */
+ slgr %r0,%r1 /* now - ts_steering_end */
+ ltgr %r0,%r0 /* past end of steering ? */
+ jm 6f
+ srlg %r0,%r0,15 /* 1 per 2^16 */
+ tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
+ jz 7f
+ lcgr %r0,%r0 /* negative TOD offset */
+7: algr %r1,%r0 /* add steering offset */
+6: sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 1bd5dde2d5a9..6b246aadf311 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -96,7 +96,6 @@ static void update_mt_scaling(void)
*/
static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
{
- struct thread_info *ti = task_thread_info(tsk);
u64 timer, clock, user, system, steal;
u64 user_scaled, system_scaled;
@@ -119,13 +118,13 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
update_mt_scaling();
- user = S390_lowcore.user_timer - ti->user_timer;
+ user = S390_lowcore.user_timer - tsk->thread.user_timer;
S390_lowcore.steal_timer -= user;
- ti->user_timer = S390_lowcore.user_timer;
+ tsk->thread.user_timer = S390_lowcore.user_timer;
- system = S390_lowcore.system_timer - ti->system_timer;
+ system = S390_lowcore.system_timer - tsk->thread.system_timer;
S390_lowcore.steal_timer -= system;
- ti->system_timer = S390_lowcore.system_timer;
+ tsk->thread.system_timer = S390_lowcore.system_timer;
user_scaled = user;
system_scaled = system;
@@ -153,15 +152,11 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
void vtime_task_switch(struct task_struct *prev)
{
- struct thread_info *ti;
-
do_account_vtime(prev, 0);
- ti = task_thread_info(prev);
- ti->user_timer = S390_lowcore.user_timer;
- ti->system_timer = S390_lowcore.system_timer;
- ti = task_thread_info(current);
- S390_lowcore.user_timer = ti->user_timer;
- S390_lowcore.system_timer = ti->system_timer;
+ prev->thread.user_timer = S390_lowcore.user_timer;
+ prev->thread.system_timer = S390_lowcore.system_timer;
+ S390_lowcore.user_timer = current->thread.user_timer;
+ S390_lowcore.system_timer = current->thread.system_timer;
}
/*
@@ -181,7 +176,6 @@ void vtime_account_user(struct task_struct *tsk)
*/
void vtime_account_irq_enter(struct task_struct *tsk)
{
- struct thread_info *ti = task_thread_info(tsk);
u64 timer, system, system_scaled;
timer = S390_lowcore.last_update_timer;
@@ -193,9 +187,9 @@ void vtime_account_irq_enter(struct task_struct *tsk)
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
update_mt_scaling();
- system = S390_lowcore.system_timer - ti->system_timer;
+ system = S390_lowcore.system_timer - tsk->thread.system_timer;
S390_lowcore.steal_timer -= system;
- ti->system_timer = S390_lowcore.system_timer;
+ tsk->thread.system_timer = S390_lowcore.system_timer;
system_scaled = system;
/* Do MT utilization scaling */
if (smp_cpu_mtid) {
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index be4db07f70d3..af13f1a135b6 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -415,7 +415,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
int rc;
mci.val = mchk->mcic;
- /* take care of lazy register loading via vcpu load/put */
+ /* take care of lazy register loading */
save_fpu_regs();
save_access_regs(vcpu->run->s.regs.acrs);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 9c7a1ecfe6bd..bec71e902be3 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1812,22 +1812,7 @@ __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- /* Save host register state */
- save_fpu_regs();
- vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
- vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
-
- if (MACHINE_HAS_VX)
- current->thread.fpu.regs = vcpu->run->s.regs.vrs;
- else
- current->thread.fpu.regs = vcpu->run->s.regs.fprs;
- current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
- if (test_fp_ctl(current->thread.fpu.fpc))
- /* User space provided an invalid FPC, let's clear it */
- current->thread.fpu.fpc = 0;
- save_access_regs(vcpu->arch.host_acrs);
- restore_access_regs(vcpu->run->s.regs.acrs);
gmap_enable(vcpu->arch.enabled_gmap);
atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
@@ -1844,16 +1829,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->arch.enabled_gmap = gmap_get_enabled();
gmap_disable(vcpu->arch.enabled_gmap);
- /* Save guest register state */
- save_fpu_regs();
- vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
-
- /* Restore host register state */
- current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
- current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
-
- save_access_regs(vcpu->run->s.regs.acrs);
- restore_access_regs(vcpu->arch.host_acrs);
}
static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
@@ -2243,7 +2218,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
{
memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
- restore_access_regs(vcpu->run->s.regs.acrs);
return 0;
}
@@ -2257,11 +2231,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- /* make sure the new values will be lazily loaded */
- save_fpu_regs();
if (test_fp_ctl(fpu->fpc))
return -EINVAL;
- current->thread.fpu.fpc = fpu->fpc;
+ vcpu->run->s.regs.fpc = fpu->fpc;
if (MACHINE_HAS_VX)
convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
(freg_t *) fpu->fprs);
@@ -2279,7 +2251,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
(__vector128 *) vcpu->run->s.regs.vrs);
else
memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
- fpu->fpc = current->thread.fpu.fpc;
+ fpu->fpc = vcpu->run->s.regs.fpc;
return 0;
}
@@ -2740,6 +2712,20 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (riccb->valid)
vcpu->arch.sie_block->ecb3 |= 0x01;
}
+ save_access_regs(vcpu->arch.host_acrs);
+ restore_access_regs(vcpu->run->s.regs.acrs);
+ /* save host (userspace) fprs/vrs */
+ save_fpu_regs();
+ vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
+ vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
+ if (MACHINE_HAS_VX)
+ current->thread.fpu.regs = vcpu->run->s.regs.vrs;
+ else
+ current->thread.fpu.regs = vcpu->run->s.regs.fprs;
+ current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
+ if (test_fp_ctl(current->thread.fpu.fpc))
+ /* User space provided an invalid FPC, let's clear it */
+ current->thread.fpu.fpc = 0;
kvm_run->kvm_dirty_regs = 0;
}
@@ -2758,6 +2744,15 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_run->s.regs.pft = vcpu->arch.pfault_token;
kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
+ save_access_regs(vcpu->run->s.regs.acrs);
+ restore_access_regs(vcpu->arch.host_acrs);
+ /* Save guest register state */
+ save_fpu_regs();
+ vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
+ /* Restore will be done lazily at return */
+ current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
+ current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
+
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -2874,7 +2869,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
{
/*
* The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
- * copying in vcpu load/put. Lets update our copies before we save
+ * switch in the run ioctl. Let's update our copies before we save
* it into the save area
*/
save_fpu_regs();
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index be9fa65bfac4..7422a706f310 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -8,6 +8,45 @@
#include <asm/export.h>
/*
+ * void *memmove(void *dest, const void *src, size_t n)
+ */
+ENTRY(memmove)
+ ltgr %r4,%r4
+ lgr %r1,%r2
+ bzr %r14
+ clgr %r2,%r3
+ jnh .Lmemmove_forward
+ la %r5,0(%r4,%r3)
+ clgr %r2,%r5
+ jl .Lmemmove_reverse
+.Lmemmove_forward:
+ aghi %r4,-1
+ srlg %r0,%r4,8
+ ltgr %r0,%r0
+ jz .Lmemmove_rest
+.Lmemmove_loop:
+ mvc 0(256,%r1),0(%r3)
+ la %r1,256(%r1)
+ la %r3,256(%r3)
+ brctg %r0,.Lmemmove_loop
+.Lmemmove_rest:
+ larl %r5,.Lmemmove_mvc
+ ex %r4,0(%r5)
+ br %r14
+.Lmemmove_reverse:
+ aghi %r4,-1
+.Lmemmove_reverse_loop:
+ ic %r0,0(%r4,%r3)
+ stc %r0,0(%r4,%r1)
+ brctg %r4,.Lmemmove_reverse_loop
+ ic %r0,0(%r4,%r3)
+ stc %r0,0(%r4,%r1)
+ br %r14
+.Lmemmove_mvc:
+ mvc 0(1,%r1),0(%r3)
+EXPORT_SYMBOL(memmove)
+
+/*
* memset implementation
*
* This code corresponds to the C construct below. We do distinguish
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 661d9fe63c43..d1faae5cdd12 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -733,6 +733,7 @@ block:
* return to userspace schedule() to block. */
__set_current_state(TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
+ set_preempt_need_resched();
}
}
out:
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 1848292766ef..45becc8a44ec 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -34,7 +34,7 @@ static void __ref *vmem_alloc_pages(unsigned int order)
if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order);
- return alloc_bootmem_align(size, size);
+ return (void *) memblock_alloc(size, size);
}
static inline pud_t *vmem_pud_alloc(void)
@@ -61,17 +61,16 @@ pmd_t *vmem_pmd_alloc(void)
pte_t __ref *vmem_pte_alloc(void)
{
+ unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
pte_t *pte;
if (slab_is_available())
pte = (pte_t *) page_table_alloc(&init_mm);
else
- pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t),
- PTRS_PER_PTE * sizeof(pte_t));
+ pte = (pte_t *) memblock_alloc(size, size);
if (!pte)
return NULL;
- clear_table((unsigned long *) pte, _PAGE_INVALID,
- PTRS_PER_PTE * sizeof(pte_t));
+ clear_table((unsigned long *) pte, _PAGE_INVALID, size);
return pte;
}
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index 37e0bb835516..cfd08384f0ab 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/memblock.h>
+#include <linux/bootmem.h>
#include <linux/node.h>
#include <linux/memory.h>
#include <linux/slab.h>
@@ -307,13 +308,11 @@ fail:
/*
* Allocate and initialize core to node mapping
*/
-static void create_core_to_node_map(void)
+static void __ref create_core_to_node_map(void)
{
int i;
- emu_cores = kzalloc(sizeof(*emu_cores), GFP_KERNEL);
- if (emu_cores == NULL)
- panic("Could not allocate cores to node memory");
+ emu_cores = memblock_virt_alloc(sizeof(*emu_cores), 8);
for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
emu_cores->to_node_id[i] = NODE_ID_FREE;
}
@@ -354,13 +353,13 @@ static struct toptree *toptree_from_topology(void)
phys = toptree_new(TOPTREE_ID_PHYS, 1);
- for_each_online_cpu(cpu) {
- top = &per_cpu(cpu_topology, cpu);
+ for_each_cpu(cpu, &cpus_with_topology) {
+ top = &cpu_topology[cpu];
node = toptree_get_child(phys, 0);
drawer = toptree_get_child(node, top->drawer_id);
book = toptree_get_child(drawer, top->book_id);
mc = toptree_get_child(book, top->socket_id);
- core = toptree_get_child(mc, top->core_id);
+ core = toptree_get_child(mc, smp_get_base_cpu(cpu));
if (!drawer || !book || !mc || !core)
panic("NUMA emulation could not allocate memory");
cpumask_set_cpu(cpu, &core->mask);
@@ -378,7 +377,7 @@ static void topology_add_core(struct toptree *core)
int cpu;
for_each_cpu(cpu, &core->mask) {
- top = &per_cpu(cpu_topology, cpu);
+ top = &cpu_topology[cpu];
cpumask_copy(&top->thread_mask, &core->mask);
cpumask_copy(&top->core_mask, &core_mc(core)->mask);
cpumask_copy(&top->book_mask, &core_book(core)->mask);
@@ -425,6 +424,27 @@ static void print_node_to_core_map(void)
}
}
+static void pin_all_possible_cpus(void)
+{
+ int core_id, node_id, cpu;
+ static int initialized;
+
+ if (initialized)
+ return;
+ print_node_to_core_map();
+ node_id = 0;
+ for_each_possible_cpu(cpu) {
+ core_id = smp_get_base_cpu(cpu);
+ if (emu_cores->to_node_id[core_id] != NODE_ID_FREE)
+ continue;
+ pin_core_to_node(core_id, node_id);
+ cpu_topology[cpu].node_id = node_id;
+ node_id = (node_id + 1) % emu_nodes;
+ }
+ print_node_to_core_map();
+ initialized = 1;
+}
+
/*
* Transfer physical topology into a NUMA topology and modify CPU masks
* according to the NUMA topology.
@@ -442,7 +462,7 @@ static void emu_update_cpu_topology(void)
toptree_free(phys);
toptree_to_topology(numa);
toptree_free(numa);
- print_node_to_core_map();
+ pin_all_possible_cpus();
}
/*
diff --git a/arch/s390/numa/toptree.c b/arch/s390/numa/toptree.c
index 902d350d859a..26f622b1cd11 100644
--- a/arch/s390/numa/toptree.c
+++ b/arch/s390/numa/toptree.c
@@ -7,6 +7,7 @@
*/
#include <linux/kernel.h>
+#include <linux/bootmem.h>
#include <linux/cpumask.h>
#include <linux/list.h>
#include <linux/list_sort.h>
@@ -25,10 +26,14 @@
* RETURNS:
* Pointer to the new tree node or NULL on error
*/
-struct toptree *toptree_alloc(int level, int id)
+struct toptree __ref *toptree_alloc(int level, int id)
{
- struct toptree *res = kzalloc(sizeof(struct toptree), GFP_KERNEL);
+ struct toptree *res;
+ if (slab_is_available())
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ else
+ res = memblock_virt_alloc(sizeof(*res), 8);
if (!res)
return res;
@@ -65,7 +70,7 @@ static void toptree_remove(struct toptree *cand)
* cleanly using toptree_remove. Possible children are freed
* recursively. In the end @cand itself is freed.
*/
-void toptree_free(struct toptree *cand)
+void __ref toptree_free(struct toptree *cand)
{
struct toptree *child, *tmp;
@@ -73,7 +78,10 @@ void toptree_free(struct toptree *cand)
toptree_remove(cand);
toptree_for_each_child_safe(child, tmp, cand)
toptree_free(child);
- kfree(cand);
+ if (slab_is_available())
+ kfree(cand);
+ else
+ memblock_free_early((unsigned long)cand, sizeof(*cand));
}
/**
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 15ffc19c8c0c..64e1734bebb7 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -722,6 +722,11 @@ struct dev_pm_ops pcibios_pm_ops = {
static int zpci_alloc_domain(struct zpci_dev *zdev)
{
+ if (zpci_unique_uid) {
+ zdev->domain = (u16) zdev->uid;
+ return 0;
+ }
+
spin_lock(&zpci_domain_lock);
zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
if (zdev->domain == ZPCI_NR_DEVICES) {
@@ -735,6 +740,9 @@ static int zpci_alloc_domain(struct zpci_dev *zdev)
static void zpci_free_domain(struct zpci_dev *zdev)
{
+ if (zpci_unique_uid)
+ return;
+
spin_lock(&zpci_domain_lock);
clear_bit(zdev->domain, zpci_domain);
spin_unlock(&zpci_domain_lock);
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 1a4512c8544a..e3ef63b36b5a 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -22,6 +22,8 @@
#include <asm/clp.h>
#include <uapi/asm/clp.h>
+bool zpci_unique_uid;
+
static inline void zpci_err_clp(unsigned int rsp, int rc)
{
struct {
@@ -315,6 +317,7 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
goto out;
}
+ zpci_unique_uid = rrb->response.uid_checking;
WARN_ON_ONCE(rrb->response.entry_size !=
sizeof(struct clp_fh_list_entry));
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index 38993b156924..c2f786f0ea06 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -69,7 +69,7 @@ static void pci_sw_counter_show(struct seq_file *m)
int i;
for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++)
- seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i],
+ seq_printf(m, "%26s:\t%lu\n", pci_sw_names[i],
atomic64_read(counter));
}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 6b2f72f523b9..1d7a9c71944a 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -181,14 +181,17 @@ static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
/*
* With zdev->tlb_refresh == 0, rpcit is not required to establish new
* translations when previously invalid translation-table entries are
- * validated. With lazy unmap, it also is skipped for previously valid
+ * validated. With lazy unmap, rpcit is skipped for previously valid
* entries, but a global rpcit is then required before any address can
* be re-used, i.e. after each iommu bitmap wrap-around.
*/
- if (!zdev->tlb_refresh &&
- (!s390_iommu_strict ||
- ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
- return 0;
+ if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
+ if (!zdev->tlb_refresh)
+ return 0;
+ } else {
+ if (!s390_iommu_strict)
+ return 0;
+ }
return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
PAGE_ALIGN(size));
@@ -257,7 +260,7 @@ static dma_addr_t dma_alloc_address(struct device *dev, int size)
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
if (offset == -1) {
- if (!zdev->tlb_refresh && !s390_iommu_strict) {
+ if (!s390_iommu_strict) {
/* global flush before DMA addresses are reused */
if (zpci_refresh_global(zdev))
goto out_error;
@@ -292,7 +295,7 @@ static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
if (!zdev->iommu_bitmap)
goto out;
- if (zdev->tlb_refresh || s390_iommu_strict)
+ if (s390_iommu_strict)
bitmap_clear(zdev->iommu_bitmap, offset, size);
else
bitmap_set(zdev->lazy_bitmap, offset, size);
@@ -388,8 +391,6 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
return NULL;
pa = page_to_phys(page);
- memset((void *) pa, 0, size);
-
map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
if (dma_mapping_error(dev, map)) {
free_pages(pa, get_order(size));
@@ -419,6 +420,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
size_t size, dma_addr_t *handle,
enum dma_data_direction dir)
{
+ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
dma_addr_t dma_addr_base, dma_addr;
int flags = ZPCI_PTE_VALID;
@@ -426,8 +428,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
unsigned long pa = 0;
int ret;
- size = PAGE_ALIGN(size);
- dma_addr_base = dma_alloc_address(dev, size >> PAGE_SHIFT);
+ dma_addr_base = dma_alloc_address(dev, nr_pages);
if (dma_addr_base == DMA_ERROR_CODE)
return -ENOMEM;
@@ -436,26 +437,27 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
flags |= ZPCI_TABLE_PROTECTED;
for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
- pa = page_to_phys(sg_page(s)) + s->offset;
- ret = __dma_update_trans(zdev, pa, dma_addr, s->length, flags);
+ pa = page_to_phys(sg_page(s));
+ ret = __dma_update_trans(zdev, pa, dma_addr,
+ s->offset + s->length, flags);
if (ret)
goto unmap;
- dma_addr += s->length;
+ dma_addr += s->offset + s->length;
}
ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
if (ret)
goto unmap;
*handle = dma_addr_base;
- atomic64_add(size >> PAGE_SHIFT, &zdev->mapped_pages);
+ atomic64_add(nr_pages, &zdev->mapped_pages);
return ret;
unmap:
dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
ZPCI_PTE_INVALID);
- dma_free_address(dev, dma_addr_base, size >> PAGE_SHIFT);
+ dma_free_address(dev, dma_addr_base, nr_pages);
zpci_err("map error:\n");
zpci_err_dma(ret, pa);
return ret;
@@ -564,7 +566,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
rc = -ENOMEM;
goto free_dma_table;
}
- if (!zdev->tlb_refresh && !s390_iommu_strict) {
+ if (!s390_iommu_strict) {
zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
if (!zdev->lazy_bitmap) {
rc = -ENOMEM;
diff --git a/arch/s390/tools/Makefile b/arch/s390/tools/Makefile
index 6d9814c9df2b..4b5e1e499527 100644
--- a/arch/s390/tools/Makefile
+++ b/arch/s390/tools/Makefile
@@ -9,7 +9,5 @@ define filechk_facilities.h
$(obj)/gen_facilities
endef
-$(obj)/gen_facilities.o: $(srctree)/arch/s390/tools/gen_facilities.c
-
include/generated/facilities.h: $(obj)/gen_facilities FORCE
$(call filechk,facilities.h)
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index fe4e6c910dd7..8cc53b1e6d03 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -7,13 +7,83 @@
*
*/
-#define S390_GEN_FACILITIES_C
-
#include <strings.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
-#include <asm/facilities_src.h>
+
+struct facility_def {
+ char *name;
+ int *bits;
+};
+
+static struct facility_def facility_defs[] = {
+ {
+ /*
+ * FACILITIES_ALS contains the list of facilities that are
+ * required to run a kernel that is compiled e.g. with
+ * -march=<machine>.
+ */
+ .name = "FACILITIES_ALS",
+ .bits = (int[]){
+#ifdef CONFIG_HAVE_MARCH_Z900_FEATURES
+ 0, /* N3 instructions */
+ 1, /* z/Arch mode installed */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
+ 18, /* long displacement facility */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+ 7, /* stfle */
+ 17, /* message security assist */
+ 21, /* extended-immediate facility */
+ 25, /* store clock fast */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+ 27, /* mvcos */
+ 32, /* compare and swap and store */
+ 33, /* compare and swap and store 2 */
+ 34, /* general extension facility */
+ 35, /* execute extensions */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ 45, /* fast-BCR, etc. */
+#endif
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+ 49, /* misc-instruction-extensions */
+ 52, /* interlocked facility 2 */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
+ 53, /* load-and-zero-rightmost-byte, etc. */
+#endif
+ -1 /* END */
+ }
+ },
+ {
+ .name = "FACILITIES_KVM",
+ .bits = (int[]){
+ 0, /* N3 instructions */
+ 1, /* z/Arch mode installed */
+ 2, /* z/Arch mode active */
+ 3, /* DAT-enhancement */
+ 4, /* idte segment table */
+ 5, /* idte region table */
+ 6, /* ASN-and-LX reuse */
+ 7, /* stfle */
+ 8, /* enhanced-DAT 1 */
+ 9, /* sense-running-status */
+ 10, /* conditional sske */
+ 13, /* ipte-range */
+ 14, /* nonquiescing key-setting */
+ 73, /* transactional execution */
+ 75, /* access-exception-fetch/store indication */
+ 76, /* msa extension 3 */
+ 77, /* msa extension 4 */
+ 78, /* enhanced-DAT 2 */
+ -1 /* END */
+ }
+ },
+};
static void print_facility_list(struct facility_def *def)
{
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index accc7ca722e1..252e9fee687f 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -1,5 +1,5 @@
#
-# Makefile for the Linux/SuperH CPU-specifc backends.
+# Makefile for the Linux/SuperH CPU-specific backends.
#
obj-$(CONFIG_CPU_SH2) = sh2/
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
index f0c7025a67d1..3f8e79402d7d 100644
--- a/arch/sh/kernel/cpu/irq/Makefile
+++ b/arch/sh/kernel/cpu/irq/Makefile
@@ -1,5 +1,5 @@
#
-# Makefile for the Linux/SuperH CPU-specifc IRQ handlers.
+# Makefile for the Linux/SuperH CPU-specific IRQ handlers.
#
obj-$(CONFIG_SUPERH32) += imask.o
obj-$(CONFIG_CPU_SH5) += intc-sh5.o
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 34d9e15857c3..44163e8c3868 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -25,7 +25,7 @@ KCOV_INSTRUMENT := n
targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4
-KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
+KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ -O2
KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC)
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
cflags-$(CONFIG_X86_32) := -march=i386
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 476b574de99e..ec23d8e1297c 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -1,13 +1,17 @@
#ifndef _ASM_X86_E820_H
#define _ASM_X86_E820_H
-#ifdef CONFIG_EFI
+/*
+ * E820_X_MAX is the maximum size of the extended E820 table. The extended
+ * table may contain up to 3 extra E820 entries per possible NUMA node, so we
+ * make room for 3 * MAX_NUMNODES possible entries, beyond the standard 128.
+ * Also note that E820_X_MAX *must* be defined before we include uapi/asm/e820.h.
+ */
#include <linux/numa.h>
#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES)
-#else /* ! CONFIG_EFI */
-#define E820_X_MAX E820MAX
-#endif
+
#include <uapi/asm/e820.h>
+
#ifndef __ASSEMBLY__
/* see comment in arch/x86/kernel/e820.c */
extern struct e820map *e820;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index bdde80731f49..7892530cbacf 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -191,6 +191,8 @@ enum {
#define PFERR_RSVD_BIT 3
#define PFERR_FETCH_BIT 4
#define PFERR_PK_BIT 5
+#define PFERR_GUEST_FINAL_BIT 32
+#define PFERR_GUEST_PAGE_BIT 33
#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
@@ -198,6 +200,13 @@ enum {
#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
#define PFERR_PK_MASK (1U << PFERR_PK_BIT)
+#define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
+#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
+
+#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \
+ PFERR_USER_MASK | \
+ PFERR_WRITE_MASK | \
+ PFERR_PRESENT_MASK)
/* apic attention bits */
#define KVM_APIC_CHECK_VAPIC 0
@@ -1062,6 +1071,7 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
+bool pdptrs_changed(struct kvm_vcpu *vcpu);
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes);
@@ -1124,7 +1134,8 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
struct x86_emulate_ctxt;
int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
-void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
+int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, unsigned short port);
+int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
@@ -1203,7 +1214,7 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
void *insn, int insn_len);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
@@ -1358,7 +1369,8 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
-void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
+int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
+int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
int kvm_is_in_guest(void);
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index a002b07a7099..2b5b2d4b924e 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -25,6 +25,7 @@
#define VMX_H
+#include <linux/bitops.h>
#include <linux/types.h>
#include <uapi/asm/vmx.h>
@@ -60,6 +61,7 @@
*/
#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
#define SECONDARY_EXEC_ENABLE_EPT 0x00000002
+#define SECONDARY_EXEC_DESC 0x00000004
#define SECONDARY_EXEC_RDTSCP 0x00000008
#define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
@@ -110,6 +112,36 @@
#define VMX_MISC_SAVE_EFER_LMA 0x00000020
#define VMX_MISC_ACTIVITY_HLT 0x00000040
+static inline u32 vmx_basic_vmcs_revision_id(u64 vmx_basic)
+{
+ return vmx_basic & GENMASK_ULL(30, 0);
+}
+
+static inline u32 vmx_basic_vmcs_size(u64 vmx_basic)
+{
+ return (vmx_basic & GENMASK_ULL(44, 32)) >> 32;
+}
+
+static inline int vmx_misc_preemption_timer_rate(u64 vmx_misc)
+{
+ return vmx_misc & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
+}
+
+static inline int vmx_misc_cr3_count(u64 vmx_misc)
+{
+ return (vmx_misc & GENMASK_ULL(24, 16)) >> 16;
+}
+
+static inline int vmx_misc_max_msr(u64 vmx_misc)
+{
+ return (vmx_misc & GENMASK_ULL(27, 25)) >> 25;
+}
+
+static inline int vmx_misc_mseg_revid(u64 vmx_misc)
+{
+ return (vmx_misc & GENMASK_ULL(63, 32)) >> 32;
+}
+
/* VMCS Encodings */
enum vmcs_field {
VIRTUAL_PROCESSOR_ID = 0x00000000,
@@ -399,10 +431,11 @@ enum vmcs_field {
#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 2)
#define VMX_NR_VPIDS (1 << 16)
+#define VMX_VPID_EXTENT_INDIVIDUAL_ADDR 0
#define VMX_VPID_EXTENT_SINGLE_CONTEXT 1
#define VMX_VPID_EXTENT_ALL_CONTEXT 2
+#define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL 3
-#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0
#define VMX_EPT_EXTENT_CONTEXT 1
#define VMX_EPT_EXTENT_GLOBAL 2
#define VMX_EPT_EXTENT_SHIFT 24
@@ -419,8 +452,10 @@ enum vmcs_field {
#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
#define VMX_VPID_INVVPID_BIT (1ull << 0) /* (32 - 32) */
+#define VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT (1ull << 8) /* (40 - 32) */
#define VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT (1ull << 9) /* (41 - 32) */
#define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT (1ull << 10) /* (42 - 32) */
+#define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT (1ull << 11) /* (43 - 32) */
#define VMX_EPT_DEFAULT_GAW 3
#define VMX_EPT_MAX_GAW 0x4
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 37fee272618f..14458658e988 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -65,6 +65,8 @@
#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
#define EXIT_REASON_APIC_ACCESS 44
#define EXIT_REASON_EOI_INDUCED 45
+#define EXIT_REASON_GDTR_IDTR 46
+#define EXIT_REASON_LDTR_TR 47
#define EXIT_REASON_EPT_VIOLATION 48
#define EXIT_REASON_EPT_MISCONFIG 49
#define EXIT_REASON_INVEPT 50
@@ -113,6 +115,8 @@
{ EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \
{ EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \
{ EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
+ { EXIT_REASON_GDTR_IDTR, "GDTR_IDTR" }, \
+ { EXIT_REASON_LDTR_TR, "LDTR_TR" }, \
{ EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
{ EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
{ EXIT_REASON_INVEPT, "INVEPT" }, \
@@ -129,6 +133,7 @@
{ EXIT_REASON_XRSTORS, "XRSTORS" }
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
+#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
#endif /* _UAPIVMX_H */
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index de6626c18e42..be6337156502 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -934,6 +934,8 @@ static int __populate_cache_leaves(unsigned int cpu)
ci_leaf_init(this_leaf++, &id4_regs);
__cache_cpumap_setup(cpu, idx, &id4_regs);
}
+ this_cpu_ci->cpu_map_populated = true;
+
return 0;
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 132e1ec67da0..00ef43233e03 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -516,7 +516,7 @@ int mce_available(struct cpuinfo_x86 *c)
static void mce_schedule_work(void)
{
- if (!mce_gen_pool_empty() && keventd_up())
+ if (!mce_gen_pool_empty())
schedule_work(&mce_work);
}
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 0aefb626fa8f..b2d3cf1ef54a 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -16,6 +16,7 @@
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
+#include <asm/processor.h>
#include <asm/user.h>
#include <asm/fpu/xstate.h>
#include "cpuid.h"
@@ -64,6 +65,11 @@ u64 kvm_supported_xcr0(void)
#define F(x) bit(X86_FEATURE_##x)
+/* These are scattered features in cpufeatures.h. */
+#define KVM_CPUID_BIT_AVX512_4VNNIW 2
+#define KVM_CPUID_BIT_AVX512_4FMAPS 3
+#define KF(x) bit(KVM_CPUID_BIT_##x)
+
int kvm_update_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
@@ -80,6 +86,10 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
best->ecx |= F(OSXSAVE);
}
+ best->edx &= ~F(APIC);
+ if (vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)
+ best->edx |= F(APIC);
+
if (apic) {
if (best->ecx & F(TSC_DEADLINE_TIMER))
apic->lapic_timer.timer_mode_mask = 3 << 17;
@@ -374,6 +384,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 7.0.ecx*/
const u32 kvm_cpuid_7_0_ecx_x86_features = F(PKU) | 0 /*OSPKE*/;
+ /* cpuid 7.0.edx*/
+ const u32 kvm_cpuid_7_0_edx_x86_features =
+ KF(AVX512_4VNNIW) | KF(AVX512_4FMAPS);
+
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
@@ -456,12 +470,14 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* PKU is not yet implemented for shadow paging. */
if (!tdp_enabled)
entry->ecx &= ~F(PKU);
+ entry->edx &= kvm_cpuid_7_0_edx_x86_features;
+ entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX);
} else {
entry->ebx = 0;
entry->ecx = 0;
+ entry->edx = 0;
}
entry->eax = 0;
- entry->edx = 0;
break;
}
case 9:
@@ -861,17 +877,17 @@ void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
}
EXPORT_SYMBOL_GPL(kvm_cpuid);
-void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
+int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
{
- u32 function, eax, ebx, ecx, edx;
+ u32 eax, ebx, ecx, edx;
- function = eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx);
kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
- kvm_x86_ops->skip_emulated_instruction(vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index a3ce9d260d68..56628a44668b 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -158,9 +158,11 @@
#define Src2GS (OpGS << Src2Shift)
#define Src2Mask (OpMask << Src2Shift)
#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
+#define AlignMask ((u64)7 << 41)
#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
-#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
-#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
+#define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
+#define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
+#define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
#define NoWrite ((u64)1 << 45) /* No writeback */
#define SrcWrite ((u64)1 << 46) /* Write back src operand */
@@ -446,6 +448,26 @@ FOP_END;
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
FOP_END;
+/*
+ * XXX: inoutclob user must know where the argument is being expanded.
+ * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
+ */
+#define asm_safe(insn, inoutclob...) \
+({ \
+ int _fault = 0; \
+ \
+ asm volatile("1:" insn "\n" \
+ "2:\n" \
+ ".pushsection .fixup, \"ax\"\n" \
+ "3: movl $1, %[_fault]\n" \
+ " jmp 2b\n" \
+ ".popsection\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : [_fault] "+qm"(_fault) inoutclob ); \
+ \
+ _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
+})
+
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
enum x86_intercept intercept,
enum x86_intercept_stage stage)
@@ -632,21 +654,26 @@ static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
* depending on whether they're AVX encoded or not.
*
* Also included is CMPXCHG16B which is not a vector instruction, yet it is
- * subject to the same check.
+ * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
+ * 512 bytes of data must be aligned to a 16 byte boundary.
*/
-static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
+static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
{
- if (likely(size < 16))
- return false;
+ u64 alignment = ctxt->d & AlignMask;
- if (ctxt->d & Aligned)
- return true;
- else if (ctxt->d & Unaligned)
- return false;
- else if (ctxt->d & Avx)
- return false;
- else
- return true;
+ if (likely(size < 16))
+ return 1;
+
+ switch (alignment) {
+ case Unaligned:
+ case Avx:
+ return 1;
+ case Aligned16:
+ return 16;
+ case Aligned:
+ default:
+ return size;
+ }
}
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
@@ -704,7 +731,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
}
break;
}
- if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
+ if (la & (insn_alignment(ctxt, size) - 1))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
bad:
@@ -3842,6 +3869,131 @@ static int em_movsxd(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
+static int check_fxsr(struct x86_emulate_ctxt *ctxt)
+{
+ u32 eax = 1, ebx, ecx = 0, edx;
+
+ ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
+ if (!(edx & FFL(FXSR)))
+ return emulate_ud(ctxt);
+
+ if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
+ return emulate_nm(ctxt);
+
+ /*
+ * Don't emulate a case that should never be hit, instead of working
+ * around a lack of fxsave64/fxrstor64 on old compilers.
+ */
+ if (ctxt->mode >= X86EMUL_MODE_PROT64)
+ return X86EMUL_UNHANDLEABLE;
+
+ return X86EMUL_CONTINUE;
+}
+
+/*
+ * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
+ * 1) 16 bit mode
+ * 2) 32 bit mode
+ * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
+ * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
+ * save and restore
+ * 3) 64-bit mode with REX.W prefix
+ * - like (2), but XMM 8-15 are being saved and restored
+ * 4) 64-bit mode without REX.W prefix
+ * - like (3), but FIP and FDP are 64 bit
+ *
+ * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
+ * desired result. (4) is not emulated.
+ *
+ * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
+ * and FPU DS) should match.
+ */
+static int em_fxsave(struct x86_emulate_ctxt *ctxt)
+{
+ struct fxregs_state fx_state;
+ size_t size;
+ int rc;
+
+ rc = check_fxsr(ctxt);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+ ctxt->ops->get_fpu(ctxt);
+
+ rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
+
+ ctxt->ops->put_fpu(ctxt);
+
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+ if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
+ size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
+ else
+ size = offsetof(struct fxregs_state, xmm_space[0]);
+
+ return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size);
+}
+
+static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
+ struct fxregs_state *new)
+{
+ int rc = X86EMUL_CONTINUE;
+ struct fxregs_state old;
+
+ rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+ /*
+ * 64 bit host will restore XMM 8-15, which is not correct on non-64
+ * bit guests. Load the current values in order to preserve 64 bit
+ * XMMs after fxrstor.
+ */
+#ifdef CONFIG_X86_64
+ /* XXX: accessing XMM 8-15 very awkwardly */
+ memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
+#endif
+
+ /*
+ * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but
+ * does save and restore MXCSR.
+ */
+ if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
+ memcpy(new->xmm_space, old.xmm_space, 8 * 16);
+
+ return rc;
+}
+
+static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
+{
+ struct fxregs_state fx_state;
+ int rc;
+
+ rc = check_fxsr(ctxt);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+ rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+ if (fx_state.mxcsr >> 16)
+ return emulate_gp(ctxt, 0);
+
+ ctxt->ops->get_fpu(ctxt);
+
+ if (ctxt->mode < X86EMUL_MODE_PROT64)
+ rc = fxrstor_fixup(ctxt, &fx_state);
+
+ if (rc == X86EMUL_CONTINUE)
+ rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
+
+ ctxt->ops->put_fpu(ctxt);
+
+ return rc;
+}
+
static bool valid_cr(int nr)
{
switch (nr) {
@@ -4194,7 +4346,9 @@ static const struct gprefix pfx_0f_ae_7 = {
};
static const struct group_dual group15 = { {
- N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
+ I(ModRM | Aligned16, em_fxsave),
+ I(ModRM | Aligned16, em_fxrstor),
+ N, N, N, N, N, GP(0, &pfx_0f_ae_7),
}, {
N, N, N, N, N, N, N, N,
} };
@@ -5066,21 +5220,13 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
- bool fault = false;
+ int rc;
ctxt->ops->get_fpu(ctxt);
- asm volatile("1: fwait \n\t"
- "2: \n\t"
- ".pushsection .fixup,\"ax\" \n\t"
- "3: \n\t"
- "movb $1, %[fault] \n\t"
- "jmp 2b \n\t"
- ".popsection \n\t"
- _ASM_EXTABLE(1b, 3b)
- : [fault]"+qm"(fault));
+ rc = asm_safe("fwait");
ctxt->ops->put_fpu(ctxt);
- if (unlikely(fault))
+ if (unlikely(rc != X86EMUL_CONTINUE))
return emulate_exception(ctxt, MF_VECTOR, 0, false);
return X86EMUL_CONTINUE;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 42b1c83741c8..99cde5220e07 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -291,7 +291,7 @@ static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata)
return ret;
}
-int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
+static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
{
struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
struct kvm_lapic_irq irq;
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 16a7134eedac..a78b445ce411 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -212,7 +212,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
*/
smp_mb();
if (atomic_dec_if_positive(&ps->pending) > 0)
- kthread_queue_work(&pit->worker, &pit->expired);
+ kthread_queue_work(pit->worker, &pit->expired);
}
void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
@@ -272,7 +272,7 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
if (atomic_read(&ps->reinject))
atomic_inc(&ps->pending);
- kthread_queue_work(&pt->worker, &pt->expired);
+ kthread_queue_work(pt->worker, &pt->expired);
if (ps->is_periodic) {
hrtimer_add_expires_ns(&ps->timer, ps->period);
@@ -667,10 +667,8 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
pid_nr = pid_vnr(pid);
put_pid(pid);
- kthread_init_worker(&pit->worker);
- pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
- "kvm-pit/%d", pid_nr);
- if (IS_ERR(pit->worker_task))
+ pit->worker = kthread_create_worker(0, "kvm-pit/%d", pid_nr);
+ if (IS_ERR(pit->worker))
goto fail_kthread;
kthread_init_work(&pit->expired, pit_do_work);
@@ -713,7 +711,7 @@ fail_register_speaker:
fail_register_pit:
mutex_unlock(&kvm->slots_lock);
kvm_pit_set_reinject(pit, false);
- kthread_stop(pit->worker_task);
+ kthread_destroy_worker(pit->worker);
fail_kthread:
kvm_free_irq_source_id(kvm, pit->irq_source_id);
fail_request:
@@ -730,8 +728,7 @@ void kvm_free_pit(struct kvm *kvm)
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
kvm_pit_set_reinject(pit, false);
hrtimer_cancel(&pit->pit_state.timer);
- kthread_flush_work(&pit->expired);
- kthread_stop(pit->worker_task);
+ kthread_destroy_worker(pit->worker);
kvm_free_irq_source_id(kvm, pit->irq_source_id);
kfree(pit);
}
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index 2f5af0798326..600bee9dcbbd 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -44,8 +44,7 @@ struct kvm_pit {
struct kvm_kpit_state pit_state;
int irq_source_id;
struct kvm_irq_mask_notifier mask_notifier;
- struct kthread_worker worker;
- struct task_struct *worker_task;
+ struct kthread_worker *worker;
struct kthread_work expired;
};
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 6f69340f9fa3..34a66b2d47e6 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -342,9 +342,11 @@ void __kvm_apic_update_irr(u32 *pir, void *regs)
u32 i, pir_val;
for (i = 0; i <= 7; i++) {
- pir_val = xchg(&pir[i], 0);
- if (pir_val)
+ pir_val = READ_ONCE(pir[i]);
+ if (pir_val) {
+ pir_val = xchg(&pir[i], 0);
*((u32 *)(regs + APIC_IRR + i * 0x10)) |= pir_val;
+ }
}
}
EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
@@ -1090,7 +1092,7 @@ static void apic_send_ipi(struct kvm_lapic *apic)
static u32 apic_get_tmcct(struct kvm_lapic *apic)
{
- ktime_t remaining;
+ ktime_t remaining, now;
s64 ns;
u32 tmcct;
@@ -1101,7 +1103,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
apic->lapic_timer.period == 0)
return 0;
- remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
+ now = ktime_get();
+ remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
if (ktime_to_ns(remaining) < 0)
remaining = ktime_set(0, 0);
@@ -1332,7 +1335,7 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
local_irq_save(flags);
- now = apic->lapic_timer.timer.base->get_time();
+ now = ktime_get();
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
if (likely(tscdeadline > guest_tsc)) {
ns = (tscdeadline - guest_tsc) * 1000000ULL;
@@ -1347,6 +1350,79 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
local_irq_restore(flags);
}
+static void start_sw_period(struct kvm_lapic *apic)
+{
+ if (!apic->lapic_timer.period)
+ return;
+
+ if (apic_lvtt_oneshot(apic) &&
+ ktime_after(ktime_get(),
+ apic->lapic_timer.target_expiration)) {
+ apic_timer_expired(apic);
+ return;
+ }
+
+ hrtimer_start(&apic->lapic_timer.timer,
+ apic->lapic_timer.target_expiration,
+ HRTIMER_MODE_ABS_PINNED);
+}
+
+static bool set_target_expiration(struct kvm_lapic *apic)
+{
+ ktime_t now;
+ u64 tscl = rdtsc();
+
+ now = ktime_get();
+ apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
+ * APIC_BUS_CYCLE_NS * apic->divide_count;
+
+ if (!apic->lapic_timer.period)
+ return false;
+
+ /*
+ * Do not allow the guest to program periodic timers with small
+ * interval, since the hrtimers are not throttled by the host
+ * scheduler.
+ */
+ if (apic_lvtt_period(apic)) {
+ s64 min_period = min_timer_period_us * 1000LL;
+
+ if (apic->lapic_timer.period < min_period) {
+ pr_info_ratelimited(
+ "kvm: vcpu %i: requested %lld ns "
+ "lapic timer period limited to %lld ns\n",
+ apic->vcpu->vcpu_id,
+ apic->lapic_timer.period, min_period);
+ apic->lapic_timer.period = min_period;
+ }
+ }
+
+ apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
+ PRIx64 ", "
+ "timer initial count 0x%x, period %lldns, "
+ "expire @ 0x%016" PRIx64 ".\n", __func__,
+ APIC_BUS_CYCLE_NS, ktime_to_ns(now),
+ kvm_lapic_get_reg(apic, APIC_TMICT),
+ apic->lapic_timer.period,
+ ktime_to_ns(ktime_add_ns(now,
+ apic->lapic_timer.period)));
+
+ apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
+ nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
+ apic->lapic_timer.target_expiration = ktime_add_ns(now, apic->lapic_timer.period);
+
+ return true;
+}
+
+static void advance_periodic_target_expiration(struct kvm_lapic *apic)
+{
+ apic->lapic_timer.tscdeadline +=
+ nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
+ apic->lapic_timer.target_expiration =
+ ktime_add_ns(apic->lapic_timer.target_expiration,
+ apic->lapic_timer.period);
+}
+
bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
{
if (!lapic_in_kernel(vcpu))
@@ -1356,52 +1432,59 @@ bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
-static void cancel_hv_tscdeadline(struct kvm_lapic *apic)
+static void cancel_hv_timer(struct kvm_lapic *apic)
{
kvm_x86_ops->cancel_hv_timer(apic->vcpu);
apic->lapic_timer.hv_timer_in_use = false;
}
-void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
-{
- struct kvm_lapic *apic = vcpu->arch.apic;
-
- WARN_ON(!apic->lapic_timer.hv_timer_in_use);
- WARN_ON(swait_active(&vcpu->wq));
- cancel_hv_tscdeadline(apic);
- apic_timer_expired(apic);
-}
-EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
-
-static bool start_hv_tscdeadline(struct kvm_lapic *apic)
+static bool start_hv_timer(struct kvm_lapic *apic)
{
u64 tscdeadline = apic->lapic_timer.tscdeadline;
- if (atomic_read(&apic->lapic_timer.pending) ||
+ if ((atomic_read(&apic->lapic_timer.pending) &&
+ !apic_lvtt_period(apic)) ||
kvm_x86_ops->set_hv_timer(apic->vcpu, tscdeadline)) {
if (apic->lapic_timer.hv_timer_in_use)
- cancel_hv_tscdeadline(apic);
+ cancel_hv_timer(apic);
} else {
apic->lapic_timer.hv_timer_in_use = true;
hrtimer_cancel(&apic->lapic_timer.timer);
/* In case the sw timer triggered in the window */
- if (atomic_read(&apic->lapic_timer.pending))
- cancel_hv_tscdeadline(apic);
+ if (atomic_read(&apic->lapic_timer.pending) &&
+ !apic_lvtt_period(apic))
+ cancel_hv_timer(apic);
}
trace_kvm_hv_timer_state(apic->vcpu->vcpu_id,
apic->lapic_timer.hv_timer_in_use);
return apic->lapic_timer.hv_timer_in_use;
}
+void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ WARN_ON(!apic->lapic_timer.hv_timer_in_use);
+ WARN_ON(swait_active(&vcpu->wq));
+ cancel_hv_timer(apic);
+ apic_timer_expired(apic);
+
+ if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
+ advance_periodic_target_expiration(apic);
+ if (!start_hv_timer(apic))
+ start_sw_period(apic);
+ }
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
+
void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
WARN_ON(apic->lapic_timer.hv_timer_in_use);
- if (apic_lvtt_tscdeadline(apic))
- start_hv_tscdeadline(apic);
+ start_hv_timer(apic);
}
EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
@@ -1413,62 +1496,28 @@ void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
if (!apic->lapic_timer.hv_timer_in_use)
return;
- cancel_hv_tscdeadline(apic);
+ cancel_hv_timer(apic);
if (atomic_read(&apic->lapic_timer.pending))
return;
- start_sw_tscdeadline(apic);
+ if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
+ start_sw_period(apic);
+ else if (apic_lvtt_tscdeadline(apic))
+ start_sw_tscdeadline(apic);
}
EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
static void start_apic_timer(struct kvm_lapic *apic)
{
- ktime_t now;
-
atomic_set(&apic->lapic_timer.pending, 0);
if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
- /* lapic timer in oneshot or periodic mode */
- now = apic->lapic_timer.timer.base->get_time();
- apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
- * APIC_BUS_CYCLE_NS * apic->divide_count;
-
- if (!apic->lapic_timer.period)
- return;
- /*
- * Do not allow the guest to program periodic timers with small
- * interval, since the hrtimers are not throttled by the host
- * scheduler.
- */
- if (apic_lvtt_period(apic)) {
- s64 min_period = min_timer_period_us * 1000LL;
-
- if (apic->lapic_timer.period < min_period) {
- pr_info_ratelimited(
- "kvm: vcpu %i: requested %lld ns "
- "lapic timer period limited to %lld ns\n",
- apic->vcpu->vcpu_id,
- apic->lapic_timer.period, min_period);
- apic->lapic_timer.period = min_period;
- }
- }
-
- hrtimer_start(&apic->lapic_timer.timer,
- ktime_add_ns(now, apic->lapic_timer.period),
- HRTIMER_MODE_ABS_PINNED);
-
- apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
- PRIx64 ", "
- "timer initial count 0x%x, period %lldns, "
- "expire @ 0x%016" PRIx64 ".\n", __func__,
- APIC_BUS_CYCLE_NS, ktime_to_ns(now),
- kvm_lapic_get_reg(apic, APIC_TMICT),
- apic->lapic_timer.period,
- ktime_to_ns(ktime_add_ns(now,
- apic->lapic_timer.period)));
+ if (set_target_expiration(apic) &&
+ !(kvm_x86_ops->set_hv_timer && start_hv_timer(apic)))
+ start_sw_period(apic);
} else if (apic_lvtt_tscdeadline(apic)) {
- if (!(kvm_x86_ops->set_hv_timer && start_hv_tscdeadline(apic)))
+ if (!(kvm_x86_ops->set_hv_timer && start_hv_timer(apic)))
start_sw_tscdeadline(apic);
}
}
@@ -1701,13 +1750,22 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu)
* LAPIC interface
*----------------------------------------------------------------------
*/
+u64 kvm_get_lapic_target_expiration_tsc(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ if (!lapic_in_kernel(vcpu))
+ return 0;
+
+ return apic->lapic_timer.tscdeadline;
+}
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
- if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
- apic_lvtt_period(apic))
+ if (!lapic_in_kernel(vcpu) ||
+ !apic_lvtt_tscdeadline(apic))
return 0;
return apic->lapic_timer.tscdeadline;
@@ -1748,14 +1806,17 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
u64 old_value = vcpu->arch.apic_base;
struct kvm_lapic *apic = vcpu->arch.apic;
- if (!apic) {
+ if (!apic)
value |= MSR_IA32_APICBASE_BSP;
- vcpu->arch.apic_base = value;
- return;
- }
vcpu->arch.apic_base = value;
+ if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
+ kvm_update_cpuid(vcpu);
+
+ if (!apic)
+ return;
+
/* update jump label if enable bit changes */
if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
if (value & MSR_IA32_APICBASE_ENABLE) {
@@ -1909,6 +1970,7 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
apic_timer_expired(apic);
if (lapic_is_periodic(apic)) {
+ advance_periodic_target_expiration(apic);
hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
return HRTIMER_RESTART;
} else
@@ -1993,6 +2055,10 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
kvm_apic_local_deliver(apic, APIC_LVTT);
if (apic_lvtt_tscdeadline(apic))
apic->lapic_timer.tscdeadline = 0;
+ if (apic_lvtt_oneshot(apic)) {
+ apic->lapic_timer.tscdeadline = 0;
+ apic->lapic_timer.target_expiration = ktime_set(0, 0);
+ }
atomic_set(&apic->lapic_timer.pending, 0);
}
}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index f60d01c29d51..e0c80233b3e1 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -15,6 +15,7 @@
struct kvm_timer {
struct hrtimer timer;
s64 period; /* unit: ns */
+ ktime_t target_expiration;
u32 timer_mode;
u32 timer_mode_mask;
u64 tscdeadline;
@@ -85,6 +86,7 @@ int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
+u64 kvm_get_lapic_target_expiration_tsc(struct kvm_vcpu *vcpu);
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 87c5880ba3b7..7012de4a1fed 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1660,17 +1660,9 @@ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
* This has some overhead, but not as much as the cost of swapping
* out actively used pages or breaking up actively used hugepages.
*/
- if (!shadow_accessed_mask) {
- /*
- * We are holding the kvm->mmu_lock, and we are blowing up
- * shadow PTEs. MMU notifier consumers need to be kept at bay.
- * This is correct as long as we don't decouple the mmu_lock
- * protected regions (like invalidate_range_start|end does).
- */
- kvm->mmu_notifier_seq++;
+ if (!shadow_accessed_mask)
return kvm_handle_hva_range(kvm, start, end, 0,
kvm_unmap_rmapp);
- }
return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
}
@@ -4509,7 +4501,7 @@ static void make_mmu_pages_available(struct kvm_vcpu *vcpu)
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
}
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
void *insn, int insn_len)
{
int r, emulation_type = EMULTYPE_RETRY;
@@ -4528,12 +4520,28 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
return r;
}
- r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
+ r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
+ false);
if (r < 0)
return r;
if (!r)
return 1;
+ /*
+ * Before emulating the instruction, check if the error code
+ * was due to a RO violation while translating the guest page.
+ * This can occur when using nested virtualization with nested
+ * paging in both guests. If true, we simply unprotect the page
+ * and resume the guest.
+ *
+ * Note: AMD only (since it supports the PFERR_GUEST_PAGE_MASK used
+ * in PFERR_NEXT_GUEST_PAGE)
+ */
+ if (error_code == PFERR_NESTED_GUEST_PAGE) {
+ kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
+ return 1;
+ }
+
if (mmio_info_in_cache(vcpu, cr2, direct))
emulation_type = 0;
emulate:
@@ -4967,7 +4975,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
* zap all shadow pages.
*/
if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
- printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n");
+ kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
kvm_mmu_invalidate_zap_all_pages(kvm);
}
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 8ca1eca5038d..08a4d3ab3455 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2074,7 +2074,7 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
static int pf_interception(struct vcpu_svm *svm)
{
u64 fault_address = svm->vmcb->control.exit_info_2;
- u32 error_code;
+ u64 error_code;
int r = 1;
switch (svm->apf_reason) {
@@ -2270,7 +2270,7 @@ static int io_interception(struct vcpu_svm *svm)
++svm->vcpu.stat.io_exits;
string = (io_info & SVM_IOIO_STR_MASK) != 0;
in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
- if (string || in)
+ if (string)
return emulate_instruction(vcpu, 0) == EMULATE_DONE;
port = io_info >> 16;
@@ -2278,7 +2278,8 @@ static int io_interception(struct vcpu_svm *svm)
svm->next_rip = svm->vmcb->control.exit_info_2;
skip_emulated_instruction(&svm->vcpu);
- return kvm_fast_pio_out(vcpu, size, port);
+ return in ? kvm_fast_pio_in(vcpu, size, port)
+ : kvm_fast_pio_out(vcpu, size, port);
}
static int nmi_interception(struct vcpu_svm *svm)
@@ -3150,8 +3151,7 @@ static int skinit_interception(struct vcpu_svm *svm)
static int wbinvd_interception(struct vcpu_svm *svm)
{
- kvm_emulate_wbinvd(&svm->vcpu);
- return 1;
+ return kvm_emulate_wbinvd(&svm->vcpu);
}
static int xsetbv_interception(struct vcpu_svm *svm)
@@ -3238,8 +3238,7 @@ static int task_switch_interception(struct vcpu_svm *svm)
static int cpuid_interception(struct vcpu_svm *svm)
{
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
- kvm_emulate_cpuid(&svm->vcpu);
- return 1;
+ return kvm_emulate_cpuid(&svm->vcpu);
}
static int iret_interception(struct vcpu_svm *svm)
@@ -3275,9 +3274,7 @@ static int rdpmc_interception(struct vcpu_svm *svm)
return emulate_on_interception(svm);
err = kvm_rdpmc(&svm->vcpu);
- kvm_complete_insn_gp(&svm->vcpu, err);
-
- return 1;
+ return kvm_complete_insn_gp(&svm->vcpu, err);
}
static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
@@ -3374,9 +3371,7 @@ static int cr_interception(struct vcpu_svm *svm)
}
kvm_register_write(&svm->vcpu, reg, val);
}
- kvm_complete_insn_gp(&svm->vcpu, err);
-
- return 1;
+ return kvm_complete_insn_gp(&svm->vcpu, err);
}
static int dr_interception(struct vcpu_svm *svm)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3980da515fd0..aae43c6f2472 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -132,6 +132,22 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
+#define VMX_VPID_EXTENT_SUPPORTED_MASK \
+ (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
+ VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
+ VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
+ VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
+
+/*
+ * Hyper-V requires all of these, so mark them as supported even though
+ * they are just treated the same as all-context.
+ */
+#define VMX_VPID_EXTENT_SUPPORTED_MASK \
+ (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
+ VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
+ VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
+ VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
+
/*
* These 2 parameters are used to config the controls for Pause-Loop Exiting:
* ple_gap: upper bound on the amount of time between two successive
@@ -446,23 +462,31 @@ struct nested_vmx {
u16 vpid02;
u16 last_vpid;
+ /*
+ * We only store the "true" versions of the VMX capability MSRs. We
+ * generate the "non-true" versions by setting the must-be-1 bits
+ * according to the SDM.
+ */
u32 nested_vmx_procbased_ctls_low;
u32 nested_vmx_procbased_ctls_high;
- u32 nested_vmx_true_procbased_ctls_low;
u32 nested_vmx_secondary_ctls_low;
u32 nested_vmx_secondary_ctls_high;
u32 nested_vmx_pinbased_ctls_low;
u32 nested_vmx_pinbased_ctls_high;
u32 nested_vmx_exit_ctls_low;
u32 nested_vmx_exit_ctls_high;
- u32 nested_vmx_true_exit_ctls_low;
u32 nested_vmx_entry_ctls_low;
u32 nested_vmx_entry_ctls_high;
- u32 nested_vmx_true_entry_ctls_low;
u32 nested_vmx_misc_low;
u32 nested_vmx_misc_high;
u32 nested_vmx_ept_caps;
u32 nested_vmx_vpid_caps;
+ u64 nested_vmx_basic;
+ u64 nested_vmx_cr0_fixed0;
+ u64 nested_vmx_cr0_fixed1;
+ u64 nested_vmx_cr4_fixed0;
+ u64 nested_vmx_cr4_fixed1;
+ u64 nested_vmx_vmcs_enum;
};
#define POSTED_INTR_ON 0
@@ -520,6 +544,12 @@ static inline void pi_set_sn(struct pi_desc *pi_desc)
(unsigned long *)&pi_desc->control);
}
+static inline void pi_clear_on(struct pi_desc *pi_desc)
+{
+ clear_bit(POSTED_INTR_ON,
+ (unsigned long *)&pi_desc->control);
+}
+
static inline int pi_test_on(struct pi_desc *pi_desc)
{
return test_bit(POSTED_INTR_ON,
@@ -920,16 +950,32 @@ static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
-static unsigned long *vmx_io_bitmap_a;
-static unsigned long *vmx_io_bitmap_b;
-static unsigned long *vmx_msr_bitmap_legacy;
-static unsigned long *vmx_msr_bitmap_longmode;
-static unsigned long *vmx_msr_bitmap_legacy_x2apic;
-static unsigned long *vmx_msr_bitmap_longmode_x2apic;
-static unsigned long *vmx_msr_bitmap_legacy_x2apic_apicv_inactive;
-static unsigned long *vmx_msr_bitmap_longmode_x2apic_apicv_inactive;
-static unsigned long *vmx_vmread_bitmap;
-static unsigned long *vmx_vmwrite_bitmap;
+enum {
+ VMX_IO_BITMAP_A,
+ VMX_IO_BITMAP_B,
+ VMX_MSR_BITMAP_LEGACY,
+ VMX_MSR_BITMAP_LONGMODE,
+ VMX_MSR_BITMAP_LEGACY_X2APIC_APICV,
+ VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV,
+ VMX_MSR_BITMAP_LEGACY_X2APIC,
+ VMX_MSR_BITMAP_LONGMODE_X2APIC,
+ VMX_VMREAD_BITMAP,
+ VMX_VMWRITE_BITMAP,
+ VMX_BITMAP_NR
+};
+
+static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
+
+#define vmx_io_bitmap_a (vmx_bitmap[VMX_IO_BITMAP_A])
+#define vmx_io_bitmap_b (vmx_bitmap[VMX_IO_BITMAP_B])
+#define vmx_msr_bitmap_legacy (vmx_bitmap[VMX_MSR_BITMAP_LEGACY])
+#define vmx_msr_bitmap_longmode (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE])
+#define vmx_msr_bitmap_legacy_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC_APICV])
+#define vmx_msr_bitmap_longmode_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV])
+#define vmx_msr_bitmap_legacy_x2apic (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC])
+#define vmx_msr_bitmap_longmode_x2apic (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC])
+#define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
+#define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
static bool cpu_has_load_ia32_efer;
static bool cpu_has_load_perf_global_ctrl;
@@ -2523,14 +2569,14 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) {
if (is_long_mode(vcpu))
- msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
+ msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv;
else
- msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
+ msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv;
} else {
if (is_long_mode(vcpu))
- msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv_inactive;
+ msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
else
- msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv_inactive;
+ msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
}
} else {
if (is_long_mode(vcpu))
@@ -2706,9 +2752,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
/* We support free control of debug control saving. */
- vmx->nested.nested_vmx_true_exit_ctls_low =
- vmx->nested.nested_vmx_exit_ctls_low &
- ~VM_EXIT_SAVE_DEBUG_CONTROLS;
+ vmx->nested.nested_vmx_exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
/* entry controls */
rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
@@ -2727,9 +2771,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
/* We support free control of debug control loading. */
- vmx->nested.nested_vmx_true_entry_ctls_low =
- vmx->nested.nested_vmx_entry_ctls_low &
- ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
+ vmx->nested.nested_vmx_entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
/* cpu-based controls */
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
@@ -2762,8 +2804,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
CPU_BASED_USE_MSR_BITMAPS;
/* We support free control of CR3 access interception. */
- vmx->nested.nested_vmx_true_procbased_ctls_low =
- vmx->nested.nested_vmx_procbased_ctls_low &
+ vmx->nested.nested_vmx_procbased_ctls_low &=
~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
/* secondary cpu-based controls */
@@ -2774,6 +2815,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
vmx->nested.nested_vmx_secondary_ctls_high &=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_RDTSCP |
+ SECONDARY_EXEC_DESC |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_ENABLE_VPID |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
@@ -2805,8 +2847,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
*/
if (enable_vpid)
vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
- VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |
- VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
+ VMX_VPID_EXTENT_SUPPORTED_MASK;
else
vmx->nested.nested_vmx_vpid_caps = 0;
@@ -2823,14 +2864,52 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
VMX_MISC_ACTIVITY_HLT;
vmx->nested.nested_vmx_misc_high = 0;
+
+ /*
+ * This MSR reports some information about VMX support. We
+ * should return information about the VMX we emulate for the
+ * guest, and the VMCS structure we give it - not about the
+ * VMX support of the underlying hardware.
+ */
+ vmx->nested.nested_vmx_basic =
+ VMCS12_REVISION |
+ VMX_BASIC_TRUE_CTLS |
+ ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
+ (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
+
+ if (cpu_has_vmx_basic_inout())
+ vmx->nested.nested_vmx_basic |= VMX_BASIC_INOUT;
+
+ /*
+ * These MSRs specify bits which the guest must keep fixed on
+ * while L1 is in VMXON mode (in L1's root mode, or running an L2).
+ * We picked the standard core2 setting.
+ */
+#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
+#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
+ vmx->nested.nested_vmx_cr0_fixed0 = VMXON_CR0_ALWAYSON;
+ vmx->nested.nested_vmx_cr4_fixed0 = VMXON_CR4_ALWAYSON;
+
+ /* These MSRs specify bits which the guest must keep fixed off. */
+ rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx->nested.nested_vmx_cr0_fixed1);
+ rdmsrl(MSR_IA32_VMX_CR4_FIXED1, vmx->nested.nested_vmx_cr4_fixed1);
+
+ /* highest index: VMX_PREEMPTION_TIMER_VALUE */
+ vmx->nested.nested_vmx_vmcs_enum = 0x2e;
+}
+
+/*
+ * if fixed0[i] == 1: val[i] must be 1
+ * if fixed1[i] == 0: val[i] must be 0
+ */
+static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
+{
+ return ((val & fixed1) | fixed0) == val;
}
static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
{
- /*
- * Bits 0 in high must be 0, and bits 1 in low must be 1.
- */
- return ((control & high) | low) == control;
+ return fixed_bits_valid(control, low, high);
}
static inline u64 vmx_control_msr(u32 low, u32 high)
@@ -2838,87 +2917,285 @@ static inline u64 vmx_control_msr(u32 low, u32 high)
return low | ((u64)high << 32);
}
-/* Returns 0 on success, non-0 otherwise. */
-static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
+static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
+{
+ superset &= mask;
+ subset &= mask;
+
+ return (superset | subset) == superset;
+}
+
+static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
+{
+ const u64 feature_and_reserved =
+ /* feature (except bit 48; see below) */
+ BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
+ /* reserved */
+ BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
+ u64 vmx_basic = vmx->nested.nested_vmx_basic;
+
+ if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
+ return -EINVAL;
+
+ /*
+ * KVM does not emulate a version of VMX that constrains physical
+ * addresses of VMX structures (e.g. VMCS) to 32-bits.
+ */
+ if (data & BIT_ULL(48))
+ return -EINVAL;
+
+ if (vmx_basic_vmcs_revision_id(vmx_basic) !=
+ vmx_basic_vmcs_revision_id(data))
+ return -EINVAL;
+
+ if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
+ return -EINVAL;
+
+ vmx->nested.nested_vmx_basic = data;
+ return 0;
+}
+
+static int
+vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
+{
+ u64 supported;
+ u32 *lowp, *highp;
+
+ switch (msr_index) {
+ case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
+ lowp = &vmx->nested.nested_vmx_pinbased_ctls_low;
+ highp = &vmx->nested.nested_vmx_pinbased_ctls_high;
+ break;
+ case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+ lowp = &vmx->nested.nested_vmx_procbased_ctls_low;
+ highp = &vmx->nested.nested_vmx_procbased_ctls_high;
+ break;
+ case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+ lowp = &vmx->nested.nested_vmx_exit_ctls_low;
+ highp = &vmx->nested.nested_vmx_exit_ctls_high;
+ break;
+ case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+ lowp = &vmx->nested.nested_vmx_entry_ctls_low;
+ highp = &vmx->nested.nested_vmx_entry_ctls_high;
+ break;
+ case MSR_IA32_VMX_PROCBASED_CTLS2:
+ lowp = &vmx->nested.nested_vmx_secondary_ctls_low;
+ highp = &vmx->nested.nested_vmx_secondary_ctls_high;
+ break;
+ default:
+ BUG();
+ }
+
+ supported = vmx_control_msr(*lowp, *highp);
+
+ /* Check must-be-1 bits are still 1. */
+ if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
+ return -EINVAL;
+
+ /* Check must-be-0 bits are still 0. */
+ if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
+ return -EINVAL;
+
+ *lowp = data;
+ *highp = data >> 32;
+ return 0;
+}
+
+static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
+{
+ const u64 feature_and_reserved_bits =
+ /* feature */
+ BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
+ BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
+ /* reserved */
+ GENMASK_ULL(13, 9) | BIT_ULL(31);
+ u64 vmx_misc;
+
+ vmx_misc = vmx_control_msr(vmx->nested.nested_vmx_misc_low,
+ vmx->nested.nested_vmx_misc_high);
+
+ if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
+ return -EINVAL;
+
+ if ((vmx->nested.nested_vmx_pinbased_ctls_high &
+ PIN_BASED_VMX_PREEMPTION_TIMER) &&
+ vmx_misc_preemption_timer_rate(data) !=
+ vmx_misc_preemption_timer_rate(vmx_misc))
+ return -EINVAL;
+
+ if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
+ return -EINVAL;
+
+ if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
+ return -EINVAL;
+
+ if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
+ return -EINVAL;
+
+ vmx->nested.nested_vmx_misc_low = data;
+ vmx->nested.nested_vmx_misc_high = data >> 32;
+ return 0;
+}
+
+static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
+{
+ u64 vmx_ept_vpid_cap;
+
+ vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.nested_vmx_ept_caps,
+ vmx->nested.nested_vmx_vpid_caps);
+
+ /* Every bit is either reserved or a feature bit. */
+ if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
+ return -EINVAL;
+
+ vmx->nested.nested_vmx_ept_caps = data;
+ vmx->nested.nested_vmx_vpid_caps = data >> 32;
+ return 0;
+}
+
+static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
+{
+ u64 *msr;
+
+ switch (msr_index) {
+ case MSR_IA32_VMX_CR0_FIXED0:
+ msr = &vmx->nested.nested_vmx_cr0_fixed0;
+ break;
+ case MSR_IA32_VMX_CR4_FIXED0:
+ msr = &vmx->nested.nested_vmx_cr4_fixed0;
+ break;
+ default:
+ BUG();
+ }
+
+ /*
+ * 1 bits (which indicates bits which "must-be-1" during VMX operation)
+ * must be 1 in the restored value.
+ */
+ if (!is_bitwise_subset(data, *msr, -1ULL))
+ return -EINVAL;
+
+ *msr = data;
+ return 0;
+}
+
+/*
+ * Called when userspace is restoring VMX MSRs.
+ *
+ * Returns 0 on success, non-0 otherwise.
+ */
+static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
switch (msr_index) {
case MSR_IA32_VMX_BASIC:
+ return vmx_restore_vmx_basic(vmx, data);
+ case MSR_IA32_VMX_PINBASED_CTLS:
+ case MSR_IA32_VMX_PROCBASED_CTLS:
+ case MSR_IA32_VMX_EXIT_CTLS:
+ case MSR_IA32_VMX_ENTRY_CTLS:
+ /*
+ * The "non-true" VMX capability MSRs are generated from the
+ * "true" MSRs, so we do not support restoring them directly.
+ *
+ * If userspace wants to emulate VMX_BASIC[55]=0, userspace
+ * should restore the "true" MSRs with the must-be-1 bits
+ * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
+ * DEFAULT SETTINGS".
+ */
+ return -EINVAL;
+ case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
+ case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+ case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+ case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+ case MSR_IA32_VMX_PROCBASED_CTLS2:
+ return vmx_restore_control_msr(vmx, msr_index, data);
+ case MSR_IA32_VMX_MISC:
+ return vmx_restore_vmx_misc(vmx, data);
+ case MSR_IA32_VMX_CR0_FIXED0:
+ case MSR_IA32_VMX_CR4_FIXED0:
+ return vmx_restore_fixed0_msr(vmx, msr_index, data);
+ case MSR_IA32_VMX_CR0_FIXED1:
+ case MSR_IA32_VMX_CR4_FIXED1:
+ /*
+ * These MSRs are generated based on the vCPU's CPUID, so we
+ * do not support restoring them directly.
+ */
+ return -EINVAL;
+ case MSR_IA32_VMX_EPT_VPID_CAP:
+ return vmx_restore_vmx_ept_vpid_cap(vmx, data);
+ case MSR_IA32_VMX_VMCS_ENUM:
+ vmx->nested.nested_vmx_vmcs_enum = data;
+ return 0;
+ default:
/*
- * This MSR reports some information about VMX support. We
- * should return information about the VMX we emulate for the
- * guest, and the VMCS structure we give it - not about the
- * VMX support of the underlying hardware.
+ * The rest of the VMX capability MSRs do not support restore.
*/
- *pdata = VMCS12_REVISION | VMX_BASIC_TRUE_CTLS |
- ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
- (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
- if (cpu_has_vmx_basic_inout())
- *pdata |= VMX_BASIC_INOUT;
+ return -EINVAL;
+ }
+}
+
+/* Returns 0 on success, non-0 otherwise. */
+static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ switch (msr_index) {
+ case MSR_IA32_VMX_BASIC:
+ *pdata = vmx->nested.nested_vmx_basic;
break;
case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
case MSR_IA32_VMX_PINBASED_CTLS:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_pinbased_ctls_low,
vmx->nested.nested_vmx_pinbased_ctls_high);
+ if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
+ *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
break;
case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
- *pdata = vmx_control_msr(
- vmx->nested.nested_vmx_true_procbased_ctls_low,
- vmx->nested.nested_vmx_procbased_ctls_high);
- break;
case MSR_IA32_VMX_PROCBASED_CTLS:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_procbased_ctls_low,
vmx->nested.nested_vmx_procbased_ctls_high);
+ if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
+ *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
break;
case MSR_IA32_VMX_TRUE_EXIT_CTLS:
- *pdata = vmx_control_msr(
- vmx->nested.nested_vmx_true_exit_ctls_low,
- vmx->nested.nested_vmx_exit_ctls_high);
- break;
case MSR_IA32_VMX_EXIT_CTLS:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_exit_ctls_low,
vmx->nested.nested_vmx_exit_ctls_high);
+ if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
+ *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
break;
case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
- *pdata = vmx_control_msr(
- vmx->nested.nested_vmx_true_entry_ctls_low,
- vmx->nested.nested_vmx_entry_ctls_high);
- break;
case MSR_IA32_VMX_ENTRY_CTLS:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_entry_ctls_low,
vmx->nested.nested_vmx_entry_ctls_high);
+ if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
+ *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
break;
case MSR_IA32_VMX_MISC:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_misc_low,
vmx->nested.nested_vmx_misc_high);
break;
- /*
- * These MSRs specify bits which the guest must keep fixed (on or off)
- * while L1 is in VMXON mode (in L1's root mode, or running an L2).
- * We picked the standard core2 setting.
- */
-#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
-#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
case MSR_IA32_VMX_CR0_FIXED0:
- *pdata = VMXON_CR0_ALWAYSON;
+ *pdata = vmx->nested.nested_vmx_cr0_fixed0;
break;
case MSR_IA32_VMX_CR0_FIXED1:
- *pdata = -1ULL;
+ *pdata = vmx->nested.nested_vmx_cr0_fixed1;
break;
case MSR_IA32_VMX_CR4_FIXED0:
- *pdata = VMXON_CR4_ALWAYSON;
+ *pdata = vmx->nested.nested_vmx_cr4_fixed0;
break;
case MSR_IA32_VMX_CR4_FIXED1:
- *pdata = -1ULL;
+ *pdata = vmx->nested.nested_vmx_cr4_fixed1;
break;
case MSR_IA32_VMX_VMCS_ENUM:
- *pdata = 0x2e; /* highest index: VMX_PREEMPTION_TIMER_VALUE */
+ *pdata = vmx->nested.nested_vmx_vmcs_enum;
break;
case MSR_IA32_VMX_PROCBASED_CTLS2:
*pdata = vmx_control_msr(
@@ -3101,7 +3378,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmx_leave_nested(vcpu);
break;
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
- return 1; /* they are read-only */
+ if (!msr_info->host_initiated)
+ return 1; /* they are read-only */
+ if (!nested_vmx_allowed(vcpu))
+ return 1;
+ return vmx_set_vmx_msr(vcpu, msr_index, data);
case MSR_IA32_XSS:
if (!vmx_xsaves_supported())
return 1;
@@ -3863,6 +4144,40 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
(unsigned long *)&vcpu->arch.regs_dirty);
}
+static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed0;
+ u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed1;
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high &
+ SECONDARY_EXEC_UNRESTRICTED_GUEST &&
+ nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
+ fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
+
+ return fixed_bits_valid(val, fixed0, fixed1);
+}
+
+static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed0;
+ u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed1;
+
+ return fixed_bits_valid(val, fixed0, fixed1);
+}
+
+static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr4_fixed0;
+ u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr4_fixed1;
+
+ return fixed_bits_valid(val, fixed0, fixed1);
+}
+
+/* No difference in the restrictions on guest and host CR4 in VMX operation. */
+#define nested_guest_cr4_valid nested_cr4_valid
+#define nested_host_cr4_valid nested_cr4_valid
+
static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
@@ -3991,8 +4306,8 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (!nested_vmx_allowed(vcpu))
return 1;
}
- if (to_vmx(vcpu)->nested.vmxon &&
- ((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON))
+
+ if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
return 1;
vcpu->arch.cr4 = cr4;
@@ -4569,41 +4884,6 @@ static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
}
}
-static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
- u32 msr, int type)
-{
- int f = sizeof(unsigned long);
-
- if (!cpu_has_vmx_msr_bitmap())
- return;
-
- /*
- * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
- * have the write-low and read-high bitmap offsets the wrong way round.
- * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
- */
- if (msr <= 0x1fff) {
- if (type & MSR_TYPE_R)
- /* read-low */
- __set_bit(msr, msr_bitmap + 0x000 / f);
-
- if (type & MSR_TYPE_W)
- /* write-low */
- __set_bit(msr, msr_bitmap + 0x800 / f);
-
- } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
- msr &= 0x1fff;
- if (type & MSR_TYPE_R)
- /* read-high */
- __set_bit(msr, msr_bitmap + 0x400 / f);
-
- if (type & MSR_TYPE_W)
- /* write-high */
- __set_bit(msr, msr_bitmap + 0xc00 / f);
-
- }
-}
-
/*
* If a msr is allowed by L0, we should check whether it is allowed by L1.
* The corresponding bit will be cleared unless both of L0 and L1 allow it.
@@ -4659,48 +4939,18 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
msr, MSR_TYPE_R | MSR_TYPE_W);
}
-static void vmx_enable_intercept_msr_read_x2apic(u32 msr, bool apicv_active)
-{
- if (apicv_active) {
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
- msr, MSR_TYPE_R);
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
- msr, MSR_TYPE_R);
- } else {
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
- msr, MSR_TYPE_R);
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
- msr, MSR_TYPE_R);
- }
-}
-
-static void vmx_disable_intercept_msr_read_x2apic(u32 msr, bool apicv_active)
+static void vmx_disable_intercept_msr_x2apic(u32 msr, int type, bool apicv_active)
{
if (apicv_active) {
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
- msr, MSR_TYPE_R);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
- msr, MSR_TYPE_R);
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv,
+ msr, type);
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv,
+ msr, type);
} else {
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
- msr, MSR_TYPE_R);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
- msr, MSR_TYPE_R);
- }
-}
-
-static void vmx_disable_intercept_msr_write_x2apic(u32 msr, bool apicv_active)
-{
- if (apicv_active) {
__vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
- msr, MSR_TYPE_W);
+ msr, type);
__vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
- msr, MSR_TYPE_W);
- } else {
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
- msr, MSR_TYPE_W);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
- msr, MSR_TYPE_W);
+ msr, type);
}
}
@@ -4822,9 +5072,15 @@ static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (!pi_test_and_clear_on(&vmx->pi_desc))
+ if (!pi_test_on(&vmx->pi_desc))
return;
+ pi_clear_on(&vmx->pi_desc);
+ /*
+ * IOMMU can write to PIR.ON, so the barrier matters even on UP.
+ * But on x86 this is just a compiler barrier anyway.
+ */
+ smp_mb__after_atomic();
kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
}
@@ -5583,7 +5839,7 @@ static int handle_triple_fault(struct kvm_vcpu *vcpu)
static int handle_io(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification;
- int size, in, string;
+ int size, in, string, ret;
unsigned port;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
@@ -5597,9 +5853,14 @@ static int handle_io(struct kvm_vcpu *vcpu)
port = exit_qualification >> 16;
size = (exit_qualification & 7) + 1;
- skip_emulated_instruction(vcpu);
- return kvm_fast_pio_out(vcpu, size, port);
+ ret = kvm_skip_emulated_instruction(vcpu);
+
+ /*
+ * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
+ * KVM_EXIT_DEBUG here.
+ */
+ return kvm_fast_pio_out(vcpu, size, port) && ret;
}
static void
@@ -5613,18 +5874,6 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
hypercall[2] = 0xc1;
}
-static bool nested_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
-{
- unsigned long always_on = VMXON_CR0_ALWAYSON;
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-
- if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high &
- SECONDARY_EXEC_UNRESTRICTED_GUEST &&
- nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
- always_on &= ~(X86_CR0_PE | X86_CR0_PG);
- return (val & always_on) == always_on;
-}
-
/* called to set cr0 as appropriate for a mov-to-cr0 exit. */
static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
{
@@ -5643,7 +5892,7 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
val = (val & ~vmcs12->cr0_guest_host_mask) |
(vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
- if (!nested_cr0_valid(vcpu, val))
+ if (!nested_guest_cr0_valid(vcpu, val))
return 1;
if (kvm_set_cr0(vcpu, val))
@@ -5652,8 +5901,9 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
return 0;
} else {
if (to_vmx(vcpu)->nested.vmxon &&
- ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
+ !nested_host_cr0_valid(vcpu, val))
return 1;
+
return kvm_set_cr0(vcpu, val);
}
}
@@ -5697,6 +5947,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
int cr;
int reg;
int err;
+ int ret;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
cr = exit_qualification & 15;
@@ -5708,25 +5959,27 @@ static int handle_cr(struct kvm_vcpu *vcpu)
switch (cr) {
case 0:
err = handle_set_cr0(vcpu, val);
- kvm_complete_insn_gp(vcpu, err);
- return 1;
+ return kvm_complete_insn_gp(vcpu, err);
case 3:
err = kvm_set_cr3(vcpu, val);
- kvm_complete_insn_gp(vcpu, err);
- return 1;
+ return kvm_complete_insn_gp(vcpu, err);
case 4:
err = handle_set_cr4(vcpu, val);
- kvm_complete_insn_gp(vcpu, err);
- return 1;
+ return kvm_complete_insn_gp(vcpu, err);
case 8: {
u8 cr8_prev = kvm_get_cr8(vcpu);
u8 cr8 = (u8)val;
err = kvm_set_cr8(vcpu, cr8);
- kvm_complete_insn_gp(vcpu, err);
+ ret = kvm_complete_insn_gp(vcpu, err);
if (lapic_in_kernel(vcpu))
- return 1;
+ return ret;
if (cr8_prev <= cr8)
- return 1;
+ return ret;
+ /*
+ * TODO: we might be squashing a
+ * KVM_GUESTDBG_SINGLESTEP-triggered
+ * KVM_EXIT_DEBUG here.
+ */
vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
return 0;
}
@@ -5735,23 +5988,20 @@ static int handle_cr(struct kvm_vcpu *vcpu)
case 2: /* clts */
handle_clts(vcpu);
trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
- skip_emulated_instruction(vcpu);
vmx_fpu_activate(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
case 1: /*mov from cr*/
switch (cr) {
case 3:
val = kvm_read_cr3(vcpu);
kvm_register_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
case 8:
val = kvm_get_cr8(vcpu);
kvm_register_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
break;
case 3: /* lmsw */
@@ -5759,8 +6009,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
kvm_lmsw(vcpu, val);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
default:
break;
}
@@ -5831,8 +6080,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
return 1;
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
@@ -5864,8 +6112,7 @@ static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
static int handle_cpuid(struct kvm_vcpu *vcpu)
{
- kvm_emulate_cpuid(vcpu);
- return 1;
+ return kvm_emulate_cpuid(vcpu);
}
static int handle_rdmsr(struct kvm_vcpu *vcpu)
@@ -5886,8 +6133,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
/* FIXME: handling of bits 32:63 of rax, rdx */
vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
static int handle_wrmsr(struct kvm_vcpu *vcpu)
@@ -5907,8 +6153,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
}
trace_kvm_msr_write(ecx, data);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
@@ -5952,8 +6197,7 @@ static int handle_invlpg(struct kvm_vcpu *vcpu)
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
kvm_mmu_invlpg(vcpu, exit_qualification);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
static int handle_rdpmc(struct kvm_vcpu *vcpu)
@@ -5961,15 +6205,12 @@ static int handle_rdpmc(struct kvm_vcpu *vcpu)
int err;
err = kvm_rdpmc(vcpu);
- kvm_complete_insn_gp(vcpu, err);
-
- return 1;
+ return kvm_complete_insn_gp(vcpu, err);
}
static int handle_wbinvd(struct kvm_vcpu *vcpu)
{
- kvm_emulate_wbinvd(vcpu);
- return 1;
+ return kvm_emulate_wbinvd(vcpu);
}
static int handle_xsetbv(struct kvm_vcpu *vcpu)
@@ -5978,20 +6219,20 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu)
u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
if (kvm_set_xcr(vcpu, index, new_bv) == 0)
- skip_emulated_instruction(vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
return 1;
}
static int handle_xsaves(struct kvm_vcpu *vcpu)
{
- skip_emulated_instruction(vcpu);
+ kvm_skip_emulated_instruction(vcpu);
WARN(1, "this should never happen\n");
return 1;
}
static int handle_xrstors(struct kvm_vcpu *vcpu)
{
- skip_emulated_instruction(vcpu);
+ kvm_skip_emulated_instruction(vcpu);
WARN(1, "this should never happen\n");
return 1;
}
@@ -6012,8 +6253,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
(offset == APIC_EOI)) {
kvm_lapic_set_eoi(vcpu);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
}
return emulate_instruction(vcpu, 0) == EMULATE_DONE;
@@ -6161,9 +6401,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
- skip_emulated_instruction(vcpu);
trace_kvm_fast_mmio(gpa);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
ret = handle_mmio_page_fault(vcpu, gpa, true);
@@ -6348,50 +6587,13 @@ static __init int hardware_setup(void)
for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
kvm_define_shared_msr(i, vmx_msr_index[i]);
- vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_io_bitmap_a)
- return r;
+ for (i = 0; i < VMX_BITMAP_NR; i++) {
+ vmx_bitmap[i] = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_bitmap[i])
+ goto out;
+ }
vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_io_bitmap_b)
- goto out;
-
- vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy)
- goto out1;
-
- vmx_msr_bitmap_legacy_x2apic =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy_x2apic)
- goto out2;
-
- vmx_msr_bitmap_legacy_x2apic_apicv_inactive =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy_x2apic_apicv_inactive)
- goto out3;
-
- vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode)
- goto out4;
-
- vmx_msr_bitmap_longmode_x2apic =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode_x2apic)
- goto out5;
-
- vmx_msr_bitmap_longmode_x2apic_apicv_inactive =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode_x2apic_apicv_inactive)
- goto out6;
-
- vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_vmread_bitmap)
- goto out7;
-
- vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_vmwrite_bitmap)
- goto out8;
-
memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
@@ -6409,7 +6611,7 @@ static __init int hardware_setup(void)
if (setup_vmcs_config(&vmcs_config) < 0) {
r = -EIO;
- goto out9;
+ goto out;
}
if (boot_cpu_has(X86_FEATURE_NX))
@@ -6472,39 +6674,34 @@ static __init int hardware_setup(void)
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
- memcpy(vmx_msr_bitmap_legacy_x2apic,
+ memcpy(vmx_msr_bitmap_legacy_x2apic_apicv,
vmx_msr_bitmap_legacy, PAGE_SIZE);
- memcpy(vmx_msr_bitmap_longmode_x2apic,
+ memcpy(vmx_msr_bitmap_longmode_x2apic_apicv,
vmx_msr_bitmap_longmode, PAGE_SIZE);
- memcpy(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
+ memcpy(vmx_msr_bitmap_legacy_x2apic,
vmx_msr_bitmap_legacy, PAGE_SIZE);
- memcpy(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
+ memcpy(vmx_msr_bitmap_longmode_x2apic,
vmx_msr_bitmap_longmode, PAGE_SIZE);
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
+ for (msr = 0x800; msr <= 0x8ff; msr++) {
+ if (msr == 0x839 /* TMCCT */)
+ continue;
+ vmx_disable_intercept_msr_x2apic(msr, MSR_TYPE_R, true);
+ }
+
/*
- * enable_apicv && kvm_vcpu_apicv_active()
+ * TPR reads and writes can be virtualized even if virtual interrupt
+ * delivery is not in use.
*/
- for (msr = 0x800; msr <= 0x8ff; msr++)
- vmx_disable_intercept_msr_read_x2apic(msr, true);
+ vmx_disable_intercept_msr_x2apic(0x808, MSR_TYPE_W, true);
+ vmx_disable_intercept_msr_x2apic(0x808, MSR_TYPE_R | MSR_TYPE_W, false);
- /* TMCCT */
- vmx_enable_intercept_msr_read_x2apic(0x839, true);
- /* TPR */
- vmx_disable_intercept_msr_write_x2apic(0x808, true);
/* EOI */
- vmx_disable_intercept_msr_write_x2apic(0x80b, true);
+ vmx_disable_intercept_msr_x2apic(0x80b, MSR_TYPE_W, true);
/* SELF-IPI */
- vmx_disable_intercept_msr_write_x2apic(0x83f, true);
-
- /*
- * (enable_apicv && !kvm_vcpu_apicv_active()) ||
- * !enable_apicv
- */
- /* TPR */
- vmx_disable_intercept_msr_read_x2apic(0x808, false);
- vmx_disable_intercept_msr_write_x2apic(0x808, false);
+ vmx_disable_intercept_msr_x2apic(0x83f, MSR_TYPE_W, true);
if (enable_ept) {
kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
@@ -6551,42 +6748,19 @@ static __init int hardware_setup(void)
return alloc_kvm_area();
-out9:
- free_page((unsigned long)vmx_vmwrite_bitmap);
-out8:
- free_page((unsigned long)vmx_vmread_bitmap);
-out7:
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic_apicv_inactive);
-out6:
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
-out5:
- free_page((unsigned long)vmx_msr_bitmap_longmode);
-out4:
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic_apicv_inactive);
-out3:
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
-out2:
- free_page((unsigned long)vmx_msr_bitmap_legacy);
-out1:
- free_page((unsigned long)vmx_io_bitmap_b);
out:
- free_page((unsigned long)vmx_io_bitmap_a);
+ for (i = 0; i < VMX_BITMAP_NR; i++)
+ free_page((unsigned long)vmx_bitmap[i]);
return r;
}
static __exit void hardware_unsetup(void)
{
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic_apicv_inactive);
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic_apicv_inactive);
- free_page((unsigned long)vmx_msr_bitmap_legacy);
- free_page((unsigned long)vmx_msr_bitmap_longmode);
- free_page((unsigned long)vmx_io_bitmap_b);
- free_page((unsigned long)vmx_io_bitmap_a);
- free_page((unsigned long)vmx_vmwrite_bitmap);
- free_page((unsigned long)vmx_vmread_bitmap);
+ int i;
+
+ for (i = 0; i < VMX_BITMAP_NR; i++)
+ free_page((unsigned long)vmx_bitmap[i]);
free_kvm_area();
}
@@ -6600,16 +6774,13 @@ static int handle_pause(struct kvm_vcpu *vcpu)
if (ple_gap)
grow_ple_window(vcpu);
- skip_emulated_instruction(vcpu);
kvm_vcpu_on_spin(vcpu);
-
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
static int handle_nop(struct kvm_vcpu *vcpu)
{
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
static int handle_mwait(struct kvm_vcpu *vcpu)
@@ -6916,8 +7087,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
*/
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
nested_vmx_failInvalid(vcpu);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
page = nested_get_page(vcpu, vmptr);
@@ -6925,8 +7095,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
*(u32 *)kmap(page) != VMCS12_REVISION) {
nested_vmx_failInvalid(vcpu);
kunmap(page);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
kunmap(page);
vmx->nested.vmxon_ptr = vmptr;
@@ -6935,30 +7104,26 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_INVALID_ADDRESS);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
if (vmptr == vmx->nested.vmxon_ptr) {
nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_VMXON_POINTER);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
break;
case EXIT_REASON_VMPTRLD:
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INVALID_ADDRESS);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
if (vmptr == vmx->nested.vmxon_ptr) {
nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_VMXON_POINTER);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
break;
default:
@@ -7014,8 +7179,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
if (vmx->nested.vmxon) {
nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
@@ -7055,9 +7219,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
vmx->nested.vmxon = true;
- skip_emulated_instruction(vcpu);
nested_vmx_succeed(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
out_shadow_vmcs:
kfree(vmx->nested.cached_vmcs12);
@@ -7176,9 +7339,8 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
free_nested(to_vmx(vcpu));
- skip_emulated_instruction(vcpu);
nested_vmx_succeed(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
/* Emulate the VMCLEAR instruction */
@@ -7217,9 +7379,8 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
nested_free_vmcs02(vmx, vmptr);
- skip_emulated_instruction(vcpu);
nested_vmx_succeed(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
@@ -7417,7 +7578,6 @@ static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (vmx->nested.current_vmptr == -1ull) {
nested_vmx_failInvalid(vcpu);
- skip_emulated_instruction(vcpu);
return 0;
}
return 1;
@@ -7431,17 +7591,18 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
gva_t gva = 0;
- if (!nested_vmx_check_permission(vcpu) ||
- !nested_vmx_check_vmcs12(vcpu))
+ if (!nested_vmx_check_permission(vcpu))
return 1;
+ if (!nested_vmx_check_vmcs12(vcpu))
+ return kvm_skip_emulated_instruction(vcpu);
+
/* Decode instruction info and find the field to read */
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
/* Read the field, zero-extended to a u64 field_value */
if (vmcs12_read_any(vcpu, field, &field_value) < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
/*
* Now copy part of this value to register or memory, as requested.
@@ -7461,8 +7622,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
}
nested_vmx_succeed(vcpu);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
@@ -7481,10 +7641,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
u64 field_value = 0;
struct x86_exception e;
- if (!nested_vmx_check_permission(vcpu) ||
- !nested_vmx_check_vmcs12(vcpu))
+ if (!nested_vmx_check_permission(vcpu))
return 1;
+ if (!nested_vmx_check_vmcs12(vcpu))
+ return kvm_skip_emulated_instruction(vcpu);
+
if (vmx_instruction_info & (1u << 10))
field_value = kvm_register_readl(vcpu,
(((vmx_instruction_info) >> 3) & 0xf));
@@ -7504,19 +7666,16 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
if (vmcs_field_readonly(field)) {
nested_vmx_failValid(vcpu,
VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
if (vmcs12_write_any(vcpu, field, field_value) < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
nested_vmx_succeed(vcpu);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
/* Emulate the VMPTRLD instruction */
@@ -7537,8 +7696,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
page = nested_get_page(vcpu, vmptr);
if (page == NULL) {
nested_vmx_failInvalid(vcpu);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
new_vmcs12 = kmap(page);
if (new_vmcs12->revision_id != VMCS12_REVISION) {
@@ -7546,8 +7704,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
nested_release_page_clean(page);
nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
nested_release_vmcs12(vmx);
@@ -7571,8 +7728,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
}
nested_vmx_succeed(vcpu);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
/* Emulate the VMPTRST instruction */
@@ -7597,8 +7753,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
return 1;
}
nested_vmx_succeed(vcpu);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
/* Emulate the INVEPT instruction */
@@ -7636,8 +7791,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
if (type >= 32 || !(types & (1 << type))) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
/* According to the Intel VMX instruction reference, the memory
@@ -7668,8 +7822,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
break;
}
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
static int handle_invvpid(struct kvm_vcpu *vcpu)
@@ -7694,13 +7847,13 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
- types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7;
+ types = (vmx->nested.nested_vmx_vpid_caps &
+ VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
if (type >= 32 || !(types & (1 << type))) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
/* according to the intel vmx instruction reference, the memory
@@ -7716,23 +7869,26 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
}
switch (type) {
+ case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
case VMX_VPID_EXTENT_SINGLE_CONTEXT:
- /*
- * Old versions of KVM use the single-context version so we
- * have to support it; just treat it the same as all-context.
- */
+ case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
+ if (!vpid) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+ break;
case VMX_VPID_EXTENT_ALL_CONTEXT:
- __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
- nested_vmx_succeed(vcpu);
break;
default:
- /* Trap individual address invalidation invvpid calls */
- BUG_ON(1);
- break;
+ WARN_ON_ONCE(1);
+ return kvm_skip_emulated_instruction(vcpu);
}
- skip_emulated_instruction(vcpu);
- return 1;
+ __vmx_flush_tlb(vcpu, vmx->nested.vpid02);
+ nested_vmx_succeed(vcpu);
+
+ return kvm_skip_emulated_instruction(vcpu);
}
static int handle_pml_full(struct kvm_vcpu *vcpu)
@@ -8071,6 +8227,8 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
case EXIT_REASON_IO_INSTRUCTION:
return nested_vmx_exit_handled_io(vcpu, vmcs12);
+ case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
case EXIT_REASON_MSR_READ:
case EXIT_REASON_MSR_WRITE:
return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
@@ -8620,11 +8778,6 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
register void *__sp asm(_ASM_SP);
- /*
- * If external interrupt exists, IF bit is set in rflags/eflags on the
- * interrupt stack frame, and interrupt will be enabled on a return
- * from interrupt handler.
- */
if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
== (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
unsigned int vector;
@@ -8809,7 +8962,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
msrs[i].host);
}
-void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
+static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 tscl;
@@ -9279,6 +9432,50 @@ static void vmcs_set_secondary_exec_control(u32 new_ctl)
(new_ctl & ~mask) | (cur_ctl & mask));
}
+/*
+ * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits
+ * (indicating "allowed-1") if they are supported in the guest's CPUID.
+ */
+static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_cpuid_entry2 *entry;
+
+ vmx->nested.nested_vmx_cr0_fixed1 = 0xffffffff;
+ vmx->nested.nested_vmx_cr4_fixed1 = X86_CR4_PCE;
+
+#define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \
+ if (entry && (entry->_reg & (_cpuid_mask))) \
+ vmx->nested.nested_vmx_cr4_fixed1 |= (_cr4_mask); \
+} while (0)
+
+ entry = kvm_find_cpuid_entry(vcpu, 0x1, 0);
+ cr4_fixed1_update(X86_CR4_VME, edx, bit(X86_FEATURE_VME));
+ cr4_fixed1_update(X86_CR4_PVI, edx, bit(X86_FEATURE_VME));
+ cr4_fixed1_update(X86_CR4_TSD, edx, bit(X86_FEATURE_TSC));
+ cr4_fixed1_update(X86_CR4_DE, edx, bit(X86_FEATURE_DE));
+ cr4_fixed1_update(X86_CR4_PSE, edx, bit(X86_FEATURE_PSE));
+ cr4_fixed1_update(X86_CR4_PAE, edx, bit(X86_FEATURE_PAE));
+ cr4_fixed1_update(X86_CR4_MCE, edx, bit(X86_FEATURE_MCE));
+ cr4_fixed1_update(X86_CR4_PGE, edx, bit(X86_FEATURE_PGE));
+ cr4_fixed1_update(X86_CR4_OSFXSR, edx, bit(X86_FEATURE_FXSR));
+ cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, bit(X86_FEATURE_XMM));
+ cr4_fixed1_update(X86_CR4_VMXE, ecx, bit(X86_FEATURE_VMX));
+ cr4_fixed1_update(X86_CR4_SMXE, ecx, bit(X86_FEATURE_SMX));
+ cr4_fixed1_update(X86_CR4_PCIDE, ecx, bit(X86_FEATURE_PCID));
+ cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, bit(X86_FEATURE_XSAVE));
+
+ entry = kvm_find_cpuid_entry(vcpu, 0x7, 0);
+ cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, bit(X86_FEATURE_FSGSBASE));
+ cr4_fixed1_update(X86_CR4_SMEP, ebx, bit(X86_FEATURE_SMEP));
+ cr4_fixed1_update(X86_CR4_SMAP, ebx, bit(X86_FEATURE_SMAP));
+ cr4_fixed1_update(X86_CR4_PKE, ecx, bit(X86_FEATURE_PKU));
+ /* TODO: Use X86_CR4_UMIP and X86_FEATURE_UMIP macros */
+ cr4_fixed1_update(bit(11), ecx, bit(2));
+
+#undef cr4_fixed1_update
+}
+
static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
@@ -9320,6 +9517,9 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
else
to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
+
+ if (nested_vmx_allowed(vcpu))
+ nested_vmx_cr_fixed1_bits_update(vcpu);
}
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
@@ -9774,6 +9974,49 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
return 0;
}
+static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ unsigned long invalid_mask;
+
+ invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
+ return (val & invalid_mask) == 0;
+}
+
+/*
+ * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
+ * emulating VM entry into a guest with EPT enabled.
+ * Returns 0 on success, 1 on failure. Invalid state exit qualification code
+ * is assigned to entry_failure_code on failure.
+ */
+static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
+ unsigned long *entry_failure_code)
+{
+ if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
+ if (!nested_cr3_valid(vcpu, cr3)) {
+ *entry_failure_code = ENTRY_FAIL_DEFAULT;
+ return 1;
+ }
+
+ /*
+ * If PAE paging and EPT are both on, CR3 is not used by the CPU and
+ * must not be dereferenced.
+ */
+ if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) &&
+ !nested_ept) {
+ if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
+ *entry_failure_code = ENTRY_FAIL_PDPTE;
+ return 1;
+ }
+ }
+
+ vcpu->arch.cr3 = cr3;
+ __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+ }
+
+ kvm_mmu_reset_context(vcpu);
+ return 0;
+}
+
/*
* prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
* L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
@@ -9782,11 +10025,15 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
* needs. In addition to modifying the active vmcs (which is vmcs02), this
* function also has additional necessary side-effects, like setting various
* vcpu->arch fields.
+ * Returns 0 on success, 1 on failure. Invalid state exit qualification code
+ * is assigned to entry_failure_code on failure.
*/
-static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ unsigned long *entry_failure_code)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exec_control;
+ bool nested_ept_enabled = false;
vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
@@ -9951,6 +10198,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs12->guest_intr_status);
}
+ nested_ept_enabled = (exec_control & SECONDARY_EXEC_ENABLE_EPT) != 0;
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
}
@@ -9964,6 +10212,15 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmx_set_constant_host_state(vmx);
/*
+ * Set the MSR load/store lists to match L0's settings.
+ */
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+ vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
+
+ /*
* HOST_RSP is normally set correctly in vmx_vcpu_run() just before
* entry, but only if the current (host) sp changed from the value
* we wrote last (vmx->host_rsp). This cache is no longer relevant
@@ -10069,15 +10326,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
nested_ept_init_mmu_context(vcpu);
}
- if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
- vcpu->arch.efer = vmcs12->guest_ia32_efer;
- else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
- vcpu->arch.efer |= (EFER_LMA | EFER_LME);
- else
- vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
- /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
- vmx_set_efer(vcpu, vcpu->arch.efer);
-
/*
* This sets GUEST_CR0 to vmcs12->guest_cr0, with possibly a modified
* TS bit (for lazy fpu) and bits which we consider mandatory enabled.
@@ -10092,8 +10340,20 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmx_set_cr4(vcpu, vmcs12->guest_cr4);
vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
- /* shadow page tables on either EPT or shadow page tables */
- kvm_set_cr3(vcpu, vmcs12->guest_cr3);
+ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
+ vcpu->arch.efer = vmcs12->guest_ia32_efer;
+ else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
+ vcpu->arch.efer |= (EFER_LMA | EFER_LME);
+ else
+ vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
+ /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
+ vmx_set_efer(vcpu, vcpu->arch.efer);
+
+ /* Shadow page tables on either EPT or shadow page tables. */
+ if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled,
+ entry_failure_code))
+ return 1;
+
kvm_mmu_reset_context(vcpu);
if (!enable_ept)
@@ -10111,6 +10371,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
+ return 0;
}
/*
@@ -10125,12 +10386,14 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
struct loaded_vmcs *vmcs02;
bool ia32e;
u32 msr_entry_idx;
+ unsigned long exit_qualification;
- if (!nested_vmx_check_permission(vcpu) ||
- !nested_vmx_check_vmcs12(vcpu))
+ if (!nested_vmx_check_permission(vcpu))
return 1;
- skip_emulated_instruction(vcpu);
+ if (!nested_vmx_check_vmcs12(vcpu))
+ goto out;
+
vmcs12 = get_vmcs12(vcpu);
if (enable_shadow_vmcs)
@@ -10150,37 +10413,37 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
nested_vmx_failValid(vcpu,
launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
: VMXERR_VMRESUME_NONLAUNCHED_VMCS);
- return 1;
+ goto out;
}
if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) {
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
- return 1;
+ goto out;
}
if (!nested_get_vmcs12_pages(vcpu, vmcs12)) {
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
- return 1;
+ goto out;
}
if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) {
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
- return 1;
+ goto out;
}
if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) {
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
- return 1;
+ goto out;
}
if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) {
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
- return 1;
+ goto out;
}
if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
- vmx->nested.nested_vmx_true_procbased_ctls_low,
+ vmx->nested.nested_vmx_procbased_ctls_low,
vmx->nested.nested_vmx_procbased_ctls_high) ||
!vmx_control_verify(vmcs12->secondary_vm_exec_control,
vmx->nested.nested_vmx_secondary_ctls_low,
@@ -10189,33 +10452,34 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
vmx->nested.nested_vmx_pinbased_ctls_low,
vmx->nested.nested_vmx_pinbased_ctls_high) ||
!vmx_control_verify(vmcs12->vm_exit_controls,
- vmx->nested.nested_vmx_true_exit_ctls_low,
+ vmx->nested.nested_vmx_exit_ctls_low,
vmx->nested.nested_vmx_exit_ctls_high) ||
!vmx_control_verify(vmcs12->vm_entry_controls,
- vmx->nested.nested_vmx_true_entry_ctls_low,
+ vmx->nested.nested_vmx_entry_ctls_low,
vmx->nested.nested_vmx_entry_ctls_high))
{
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
- return 1;
+ goto out;
}
- if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
- ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
+ if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
+ !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
+ !nested_cr3_valid(vcpu, vmcs12->host_cr3)) {
nested_vmx_failValid(vcpu,
VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
- return 1;
+ goto out;
}
- if (!nested_cr0_valid(vcpu, vmcs12->guest_cr0) ||
- ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
+ if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
+ !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
- return 1;
+ goto out;
}
if (vmcs12->vmcs_link_pointer != -1ull) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
- return 1;
+ goto out;
}
/*
@@ -10235,7 +10499,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
- return 1;
+ goto out;
}
}
@@ -10253,7 +10517,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
- return 1;
+ goto out;
}
}
@@ -10266,6 +10530,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
if (!vmcs02)
return -ENOMEM;
+ /*
+ * After this point, the trap flag no longer triggers a singlestep trap
+ * on the vm entry instructions. Don't call
+ * kvm_skip_emulated_instruction.
+ */
+ skip_emulated_instruction(vcpu);
enter_guest_mode(vcpu);
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
@@ -10280,7 +10550,13 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
vmx_segment_cache_clear(vmx);
- prepare_vmcs02(vcpu, vmcs12);
+ if (prepare_vmcs02(vcpu, vmcs12, &exit_qualification)) {
+ leave_guest_mode(vcpu);
+ vmx_load_vmcs01(vcpu);
+ nested_vmx_entry_failure(vcpu, vmcs12,
+ EXIT_REASON_INVALID_STATE, exit_qualification);
+ return 1;
+ }
msr_entry_idx = nested_vmx_load_msr(vcpu,
vmcs12->vm_entry_msr_load_addr,
@@ -10307,6 +10583,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* the success flag) when L2 exits (see nested_vmx_vmexit()).
*/
return 1;
+
+out:
+ return kvm_skip_emulated_instruction(vcpu);
}
/*
@@ -10612,6 +10891,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
struct kvm_segment seg;
+ unsigned long entry_failure_code;
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
vcpu->arch.efer = vmcs12->host_ia32_efer;
@@ -10649,8 +10929,12 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
nested_ept_uninit_mmu_context(vcpu);
- kvm_set_cr3(vcpu, vmcs12->host_cr3);
- kvm_mmu_reset_context(vcpu);
+ /*
+ * Only PDPTE load can fail as the value of cr3 was checked on entry and
+ * couldn't have changed.
+ */
+ if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
+ nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
if (!enable_ept)
vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
@@ -10751,6 +11035,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ u32 vm_inst_error = 0;
/* trying to cancel vmlaunch/vmresume is a bug */
WARN_ON_ONCE(vmx->nested.nested_run_pending);
@@ -10763,6 +11048,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vmcs12->vm_exit_msr_store_count))
nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
+ if (unlikely(vmx->fail))
+ vm_inst_error = vmcs_read32(VM_INSTRUCTION_ERROR);
+
vmx_load_vmcs01(vcpu);
if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
@@ -10791,6 +11079,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
load_vmcs12_host_state(vcpu, vmcs12);
/* Update any VMCS fields that might have changed while L2 ran */
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (vmx->hv_deadline_tsc == -1)
vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
@@ -10839,7 +11129,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
*/
if (unlikely(vmx->fail)) {
vmx->fail = 0;
- nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR));
+ nested_vmx_failValid(vcpu, vm_inst_error);
} else
nested_vmx_succeed(vcpu);
if (enable_shadow_vmcs)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2912684ff902..1f0d2383f5ee 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -434,12 +434,14 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception);
-void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
+int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
{
if (err)
kvm_inject_gp(vcpu, 0);
else
- kvm_x86_ops->skip_emulated_instruction(vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
+
+ return 1;
}
EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
@@ -573,7 +575,7 @@ out:
}
EXPORT_SYMBOL_GPL(load_pdptrs);
-static bool pdptrs_changed(struct kvm_vcpu *vcpu)
+bool pdptrs_changed(struct kvm_vcpu *vcpu)
{
u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
bool changed = true;
@@ -599,6 +601,7 @@ out:
return changed;
}
+EXPORT_SYMBOL_GPL(pdptrs_changed);
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
@@ -2178,7 +2181,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
- u64 gpa_offset;
struct kvm_arch *ka = &vcpu->kvm->arch;
kvmclock_reset(vcpu);
@@ -2200,8 +2202,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- gpa_offset = data & ~(PAGE_MASK | 1);
-
if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.pv_time, data & ~1ULL,
sizeof(struct pvclock_vcpu_time_info)))
@@ -2296,7 +2296,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (kvm_pmu_is_valid_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr_info);
if (!ignore_msrs) {
- vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
+ vcpu_debug_ratelimited(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
msr, data);
return 1;
} else {
@@ -2508,7 +2508,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
if (!ignore_msrs) {
- vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr_info->index);
+ vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n",
+ msr_info->index);
return 1;
} else {
vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index);
@@ -2812,7 +2813,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
if (kvm_lapic_hv_timer_in_use(vcpu) &&
kvm_x86_ops->set_hv_timer(vcpu,
- kvm_get_lapic_tscdeadline_msr(vcpu)))
+ kvm_get_lapic_target_expiration_tsc(vcpu)))
kvm_lapic_switch_to_sw_timer(vcpu);
/*
* On a host with synchronized TSC, there is no need to update
@@ -4832,7 +4833,7 @@ static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
}
-int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
+static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
{
if (!need_emulate_wbinvd(vcpu))
return X86EMUL_CONTINUE;
@@ -4852,8 +4853,8 @@ int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
{
- kvm_x86_ops->skip_emulated_instruction(vcpu);
- return kvm_emulate_wbinvd_noskip(vcpu);
+ kvm_emulate_wbinvd_noskip(vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
@@ -5451,7 +5452,6 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflag
kvm_run->exit_reason = KVM_EXIT_DEBUG;
*r = EMULATE_USER_EXIT;
} else {
- vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF;
/*
* "Certain debug exceptions may clear bit 0-3. The
* remaining contents of the DR6 register are never
@@ -5464,6 +5464,17 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflag
}
}
+int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
+{
+ unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
+ int r = EMULATE_DONE;
+
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+ kvm_vcpu_check_singlestep(vcpu, rflags, &r);
+ return r == EMULATE_DONE;
+}
+EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
+
static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
{
if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
@@ -5649,6 +5660,49 @@ int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
}
EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
+static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
+{
+ unsigned long val;
+
+ /* We should only ever be called with arch.pio.count equal to 1 */
+ BUG_ON(vcpu->arch.pio.count != 1);
+
+ /* For size less than 4 we merge, else we zero extend */
+ val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
+ : 0;
+
+ /*
+ * Since vcpu->arch.pio.count == 1 let emulator_pio_in_emulated perform
+ * the copy and tracing
+ */
+ emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, vcpu->arch.pio.size,
+ vcpu->arch.pio.port, &val, 1);
+ kvm_register_write(vcpu, VCPU_REGS_RAX, val);
+
+ return 1;
+}
+
+int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, unsigned short port)
+{
+ unsigned long val;
+ int ret;
+
+ /* For size less than 4 we merge, else we zero extend */
+ val = (size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) : 0;
+
+ ret = emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, size, port,
+ &val, 1);
+ if (ret) {
+ kvm_register_write(vcpu, VCPU_REGS_RAX, val);
+ return ret;
+ }
+
+ vcpu->arch.complete_userspace_io = complete_fast_pio_in;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_fast_pio_in);
+
static int kvmclock_cpu_down_prep(unsigned int cpu)
{
__this_cpu_write(cpu_tsc_khz, 0);
@@ -5998,8 +6052,12 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{
- kvm_x86_ops->skip_emulated_instruction(vcpu);
- return kvm_vcpu_halt(vcpu);
+ int ret = kvm_skip_emulated_instruction(vcpu);
+ /*
+ * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
+ * KVM_EXIT_DEBUG here.
+ */
+ return kvm_vcpu_halt(vcpu) && ret;
}
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
@@ -6030,9 +6088,9 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
unsigned long nr, a0, a1, a2, a3, ret;
- int op_64_bit, r = 1;
+ int op_64_bit, r;
- kvm_x86_ops->skip_emulated_instruction(vcpu);
+ r = kvm_skip_emulated_instruction(vcpu);
if (kvm_hv_hypercall_enabled(vcpu->kvm))
return kvm_hv_hypercall(vcpu);
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index bedfab98077a..e1fb269c87af 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -264,8 +264,8 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
return 0;
error:
- dev_err(&dev->dev,
- "Xen PCI frontend has not registered MSI/MSI-X support!\n");
+ dev_err(&dev->dev, "Failed to create MSI%s! ret=%d!\n",
+ type == PCI_CAP_ID_MSI ? "" : "-X", irq);
return irq;
}
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index b27bccd4390f..821cb41f00e6 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -89,7 +89,7 @@ static void ce4100_mem_serial_out(struct uart_port *p, int offset, int value)
}
static void ce4100_serial_fixup(int port, struct uart_port *up,
- unsigned short *capabilites)
+ u32 *capabilites)
{
#ifdef CONFIG_EARLY_PRINTK
/*
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 0e98e5d241d0..a9fafb5c8738 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -19,7 +19,6 @@
int xen_swiotlb __read_mostly;
static struct dma_map_ops xen_swiotlb_dma_ops = {
- .mapping_error = xen_swiotlb_dma_mapping_error,
.alloc = xen_swiotlb_alloc_coherent,
.free = xen_swiotlb_free_coherent,
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index f8960fca0827..8c394e30e5fe 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -41,7 +41,7 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
unsigned long xen_released_pages;
/* E820 map used during setting up memory. */
-static struct e820entry xen_e820_map[E820MAX] __initdata;
+static struct e820entry xen_e820_map[E820_X_MAX] __initdata;
static u32 xen_e820_map_entries __initdata;
/*
@@ -750,7 +750,7 @@ char * __init xen_memory_setup(void)
max_pfn = min(max_pfn, xen_start_info->nr_pages);
mem_end = PFN_PHYS(max_pfn);
- memmap.nr_entries = E820MAX;
+ memmap.nr_entries = ARRAY_SIZE(xen_e820_map);
set_xen_guest_handle(memmap.buffer, xen_e820_map);
op = xen_initial_domain() ?
@@ -923,7 +923,7 @@ char * __init xen_auto_xlated_memory_setup(void)
int i;
int rc;
- memmap.nr_entries = E820MAX;
+ memmap.nr_entries = ARRAY_SIZE(xen_e820_map);
set_xen_guest_handle(memmap.buffer, xen_e820_map);
rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
diff --git a/block/blk-core.c b/block/blk-core.c
index bd642a43b98b..61ba08c58b64 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1154,6 +1154,7 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
blk_rq_init(q, rq);
blk_rq_set_rl(rq, rl);
+ blk_rq_set_prio(rq, ioc);
rq->cmd_flags = op;
rq->rq_flags = rq_flags;
@@ -1626,7 +1627,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
req->errors = 0;
req->__sector = bio->bi_iter.bi_sector;
- req->ioprio = bio_prio(bio);
+ if (ioprio_valid(bio_prio(bio)))
+ req->ioprio = bio_prio(bio);
blk_rq_bio_prep(req->q, req, bio);
}
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 19b1d9c5f07e..8e61e8640e17 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -87,6 +87,7 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set)
free_cpumask_var(cpus);
return 0;
}
+EXPORT_SYMBOL_GPL(blk_mq_map_queues);
/*
* We have no quick way of doing reverse lookups. This is only used at
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 3a54dd32a6fc..63e9116cddbd 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -42,7 +42,6 @@ void blk_mq_disable_hotplug(void);
/*
* CPU -> queue mappings
*/
-int blk_mq_map_queues(struct blk_mq_tag_set *set);
extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index b2a61e3ecb14..9d652a992316 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -32,8 +32,13 @@
* bsg_destroy_job - routine to teardown/delete a bsg job
* @job: bsg_job that is to be torn down
*/
-static void bsg_destroy_job(struct bsg_job *job)
+static void bsg_destroy_job(struct kref *kref)
{
+ struct bsg_job *job = container_of(kref, struct bsg_job, kref);
+ struct request *rq = job->req;
+
+ blk_end_request_all(rq, rq->errors);
+
put_device(job->dev); /* release reference for the request */
kfree(job->request_payload.sg_list);
@@ -41,6 +46,18 @@ static void bsg_destroy_job(struct bsg_job *job)
kfree(job);
}
+void bsg_job_put(struct bsg_job *job)
+{
+ kref_put(&job->kref, bsg_destroy_job);
+}
+EXPORT_SYMBOL_GPL(bsg_job_put);
+
+int bsg_job_get(struct bsg_job *job)
+{
+ return kref_get_unless_zero(&job->kref);
+}
+EXPORT_SYMBOL_GPL(bsg_job_get);
+
/**
* bsg_job_done - completion routine for bsg requests
* @job: bsg_job that is complete
@@ -83,8 +100,7 @@ static void bsg_softirq_done(struct request *rq)
{
struct bsg_job *job = rq->special;
- blk_end_request_all(rq, rq->errors);
- bsg_destroy_job(job);
+ bsg_job_put(job);
}
static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
@@ -142,6 +158,7 @@ static int bsg_create_job(struct device *dev, struct request *req)
job->dev = dev;
/* take a reference for the request */
get_device(job->dev);
+ kref_init(&job->kref);
return 0;
failjob_rls_rqst_payload:
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index c5f9cbe0ae21..83e5f7e1a20d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -104,7 +104,7 @@ config ACPI_PROCFS_POWER
Say N to delete power /proc/acpi/ directories that have moved to /sys/
config ACPI_REV_OVERRIDE_POSSIBLE
- bool "Allow supported ACPI revision to be overriden"
+ bool "Allow supported ACPI revision to be overridden"
depends on X86
default y
help
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 74f4c662f776..2fc52407306c 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -46,6 +46,8 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
+#include <linux/ahci-remap.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "ahci.h"
#define DRV_NAME "ahci"
@@ -1400,6 +1402,40 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
}
#endif
+static void ahci_remap_check(struct pci_dev *pdev, int bar,
+ struct ahci_host_priv *hpriv)
+{
+ int i, count = 0;
+ u32 cap;
+
+ /*
+ * Check if this device might have remapped nvme devices.
+ */
+ if (pdev->vendor != PCI_VENDOR_ID_INTEL ||
+ pci_resource_len(pdev, bar) < SZ_512K ||
+ bar != AHCI_PCI_BAR_STANDARD ||
+ !(readl(hpriv->mmio + AHCI_VSCAP) & 1))
+ return;
+
+ cap = readq(hpriv->mmio + AHCI_REMAP_CAP);
+ for (i = 0; i < AHCI_MAX_REMAP; i++) {
+ if ((cap & (1 << i)) == 0)
+ continue;
+ if (readl(hpriv->mmio + ahci_remap_dcc(i))
+ != PCI_CLASS_STORAGE_EXPRESS)
+ continue;
+
+ /* We've found a remapped device */
+ count++;
+ }
+
+ if (!count)
+ return;
+
+ dev_warn(&pdev->dev, "Found %d remapped NVMe devices.\n", count);
+ dev_warn(&pdev->dev, "Switch your BIOS from RAID to AHCI mode to use them.\n");
+}
+
static int ahci_get_irq_vector(struct ata_host *host, int port)
{
return pci_irq_vector(to_pci_dev(host->dev), port);
@@ -1541,6 +1577,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
+ /* detect remapped nvme devices */
+ ahci_remap_check(pdev, ahci_pci_bar, hpriv);
+
/* must set flag prior to save config in order to take effect */
if (ahci_broken_devslp(pdev))
hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 1eba8dff875e..9884c8c6e934 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -46,11 +46,13 @@
#define LS1021A_AXICC_ADDR 0xC0
#define SATA_ECC_DISABLE 0x00020000
+#define LS1046A_SATA_ECC_DIS 0x80000000
enum ahci_qoriq_type {
AHCI_LS1021A,
AHCI_LS1043A,
AHCI_LS2080A,
+ AHCI_LS1046A,
};
struct ahci_qoriq_priv {
@@ -63,6 +65,7 @@ static const struct of_device_id ahci_qoriq_of_match[] = {
{ .compatible = "fsl,ls1021a-ahci", .data = (void *)AHCI_LS1021A},
{ .compatible = "fsl,ls1043a-ahci", .data = (void *)AHCI_LS1043A},
{ .compatible = "fsl,ls2080a-ahci", .data = (void *)AHCI_LS2080A},
+ { .compatible = "fsl,ls1046a-ahci", .data = (void *)AHCI_LS1046A},
{},
};
MODULE_DEVICE_TABLE(of, ahci_qoriq_of_match);
@@ -175,6 +178,13 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
+
+ case AHCI_LS1046A:
+ writel(LS1046A_SATA_ECC_DIS, qpriv->ecc_addr);
+ writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
+ break;
}
return 0;
@@ -204,9 +214,9 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
qoriq_priv->type = (enum ahci_qoriq_type)of_id->data;
- if (qoriq_priv->type == AHCI_LS1021A) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "sata-ecc");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "sata-ecc");
+ if (res) {
qoriq_priv->ecc_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(qoriq_priv->ecc_addr))
return PTR_ERR(qoriq_priv->ecc_addr);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 0d028ead99e8..ee7db3119b18 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -140,6 +140,7 @@ EXPORT_SYMBOL_GPL(ahci_shost_attrs);
struct device_attribute *ahci_sdev_attrs[] = {
&dev_attr_sw_activity,
&dev_attr_unload_heads,
+ &dev_attr_ncq_prio_enable,
NULL
};
EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 59ce0dd50701..9cd0a2d41816 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -739,6 +739,7 @@ u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
* @n_block: Number of blocks
* @tf_flags: RW/FUA etc...
* @tag: tag
+ * @class: IO priority class
*
* LOCKING:
* None.
@@ -753,7 +754,7 @@ u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
*/
int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
- unsigned int tag)
+ unsigned int tag, int class)
{
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf->flags |= tf_flags;
@@ -785,6 +786,12 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
tf->device = ATA_LBA;
if (tf->flags & ATA_TFLAG_FUA)
tf->device |= 1 << 7;
+
+ if (dev->flags & ATA_DFLAG_NCQ_PRIO) {
+ if (class == IOPRIO_CLASS_RT)
+ tf->hob_nsect |= ATA_PRIO_HIGH <<
+ ATA_SHIFT_PRIO;
+ }
} else if (dev->flags & ATA_DFLAG_LBA) {
tf->flags |= ATA_TFLAG_LBA;
@@ -2156,6 +2163,37 @@ static void ata_dev_config_ncq_non_data(struct ata_device *dev)
}
}
+static void ata_dev_config_ncq_prio(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+ unsigned int err_mask;
+
+ if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) {
+ dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
+ return;
+ }
+
+ err_mask = ata_read_log_page(dev,
+ ATA_LOG_SATA_ID_DEV_DATA,
+ ATA_LOG_SATA_SETTINGS,
+ ap->sector_buf,
+ 1);
+ if (err_mask) {
+ ata_dev_dbg(dev,
+ "failed to get Identify Device data, Emask 0x%x\n",
+ err_mask);
+ return;
+ }
+
+ if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) {
+ dev->flags |= ATA_DFLAG_NCQ_PRIO;
+ } else {
+ dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
+ ata_dev_dbg(dev, "SATA page does not support priority\n");
+ }
+
+}
+
static int ata_dev_config_ncq(struct ata_device *dev,
char *desc, size_t desc_sz)
{
@@ -2205,6 +2243,8 @@ static int ata_dev_config_ncq(struct ata_device *dev,
ata_dev_config_ncq_send_recv(dev);
if (ata_id_has_ncq_non_data(dev->id))
ata_dev_config_ncq_non_data(dev);
+ if (ata_id_has_ncq_prio(dev->id))
+ ata_dev_config_ncq_prio(dev);
}
return 0;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 8e575fbdf31d..1f863e757ee4 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -50,6 +50,7 @@
#include <linux/uaccess.h>
#include <linux/suspend.h>
#include <asm/unaligned.h>
+#include <linux/ioprio.h>
#include "libata.h"
#include "libata-transport.h"
@@ -270,6 +271,83 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
ata_scsi_park_show, ata_scsi_park_store);
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
+static ssize_t ata_ncq_prio_enable_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap;
+ struct ata_device *dev;
+ bool ncq_prio_enable;
+ int rc = 0;
+
+ ap = ata_shost_to_port(sdev->host);
+
+ spin_lock_irq(ap->lock);
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (!dev) {
+ rc = -ENODEV;
+ goto unlock;
+ }
+
+ ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE;
+
+unlock:
+ spin_unlock_irq(ap->lock);
+
+ return rc ? rc : snprintf(buf, 20, "%u\n", ncq_prio_enable);
+}
+
+static ssize_t ata_ncq_prio_enable_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap;
+ struct ata_device *dev;
+ long int input;
+ int rc;
+
+ rc = kstrtol(buf, 10, &input);
+ if (rc)
+ return rc;
+ if ((input < 0) || (input > 1))
+ return -EINVAL;
+
+ ap = ata_shost_to_port(sdev->host);
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (unlikely(!dev))
+ return -ENODEV;
+
+ spin_lock_irq(ap->lock);
+ if (input)
+ dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLE;
+ else
+ dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
+
+ dev->link->eh_info.action |= ATA_EH_REVALIDATE;
+ dev->link->eh_info.flags |= ATA_EHI_QUIET;
+ ata_port_schedule_eh(ap);
+ spin_unlock_irq(ap->lock);
+
+ ata_port_wait_eh(ap);
+
+ if (input) {
+ spin_lock_irq(ap->lock);
+ if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
+ dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
+ rc = -EIO;
+ }
+ spin_unlock_irq(ap->lock);
+ }
+
+ return rc ? rc : len;
+}
+
+DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
+ ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
+EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
+
void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
u8 sk, u8 asc, u8 ascq)
{
@@ -401,6 +479,7 @@ EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
struct device_attribute *ata_common_sdev_attrs[] = {
&dev_attr_unload_heads,
+ &dev_attr_ncq_prio_enable,
NULL
};
EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
@@ -1756,6 +1835,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
const u8 *cdb = scmd->cmnd;
+ struct request *rq = scmd->request;
+ int class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
unsigned int tf_flags = 0;
u64 block;
u32 n_block;
@@ -1822,7 +1903,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
qc->nbytes = n_block * scmd->device->sector_size;
rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
- qc->tag);
+ qc->tag, class);
+
if (likely(rc == 0))
return 0;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 3b301a48007c..8f3a5596dd67 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -66,7 +66,7 @@ extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf);
extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
- unsigned int tag);
+ unsigned int tag, int class);
extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
struct ata_device *dev);
extern unsigned ata_exec_internal(struct ata_device *dev,
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 139d20778b29..d4caa23f5a88 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -11,19 +11,26 @@
*
* TODO:
* - dmaengine support
- * - check if timing stuff needed
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <scsi/scsi_host.h>
+
#include <linux/ata.h>
+#include <linux/clk.h>
#include <linux/libata.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/clk.h>
#define DRV_NAME "pata_imx"
+#define PATA_IMX_ATA_TIME_OFF 0x00
+#define PATA_IMX_ATA_TIME_ON 0x01
+#define PATA_IMX_ATA_TIME_1 0x02
+#define PATA_IMX_ATA_TIME_2W 0x03
+#define PATA_IMX_ATA_TIME_2R 0x04
+#define PATA_IMX_ATA_TIME_AX 0x05
+#define PATA_IMX_ATA_TIME_PIO_RDX 0x06
+#define PATA_IMX_ATA_TIME_4 0x07
+#define PATA_IMX_ATA_TIME_9 0x08
+
#define PATA_IMX_ATA_CONTROL 0x24
#define PATA_IMX_ATA_CTRL_FIFO_RST_B (1<<7)
#define PATA_IMX_ATA_CTRL_ATA_RST_B (1<<6)
@@ -33,6 +40,10 @@
#define PATA_IMX_DRIVE_DATA 0xA0
#define PATA_IMX_DRIVE_CONTROL 0xD8
+static u32 pio_t4[] = { 30, 20, 15, 10, 10 };
+static u32 pio_t9[] = { 20, 15, 10, 10, 10 };
+static u32 pio_tA[] = { 35, 35, 35, 35, 35 };
+
struct pata_imx_priv {
struct clk *clk;
/* timings/interrupt/control regs */
@@ -40,28 +51,49 @@ struct pata_imx_priv {
u32 ata_ctl;
};
-static int pata_imx_set_mode(struct ata_link *link, struct ata_device **unused)
+static void pata_imx_set_timing(struct ata_device *adev,
+ struct pata_imx_priv *priv)
+{
+ struct ata_timing timing;
+ unsigned long clkrate;
+ u32 T, mode;
+
+ clkrate = clk_get_rate(priv->clk);
+
+ if (adev->pio_mode < XFER_PIO_0 || adev->pio_mode > XFER_PIO_4 ||
+ !clkrate)
+ return;
+
+ T = 1000000000 / clkrate;
+ ata_timing_compute(adev, adev->pio_mode, &timing, T * 1000, 0);
+
+ mode = adev->pio_mode - XFER_PIO_0;
+
+ writeb(3, priv->host_regs + PATA_IMX_ATA_TIME_OFF);
+ writeb(3, priv->host_regs + PATA_IMX_ATA_TIME_ON);
+ writeb(timing.setup, priv->host_regs + PATA_IMX_ATA_TIME_1);
+ writeb(timing.act8b, priv->host_regs + PATA_IMX_ATA_TIME_2W);
+ writeb(timing.act8b, priv->host_regs + PATA_IMX_ATA_TIME_2R);
+ writeb(1, priv->host_regs + PATA_IMX_ATA_TIME_PIO_RDX);
+
+ writeb(pio_t4[mode] / T + 1, priv->host_regs + PATA_IMX_ATA_TIME_4);
+ writeb(pio_t9[mode] / T + 1, priv->host_regs + PATA_IMX_ATA_TIME_9);
+ writeb(pio_tA[mode] / T + 1, priv->host_regs + PATA_IMX_ATA_TIME_AX);
+}
+
+static void pata_imx_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
- struct ata_device *dev;
- struct ata_port *ap = link->ap;
struct pata_imx_priv *priv = ap->host->private_data;
u32 val;
- ata_for_each_dev(dev, link, ENABLED) {
- dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
- dev->xfer_shift = ATA_SHIFT_PIO;
- dev->flags |= ATA_DFLAG_PIO;
-
- val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
- if (ata_pio_need_iordy(dev))
- val |= PATA_IMX_ATA_CTRL_IORDY_EN;
- else
- val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
- __raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
+ pata_imx_set_timing(adev, priv);
- ata_dev_info(dev, "configured for PIO\n");
- }
- return 0;
+ val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
+ if (ata_pio_need_iordy(adev))
+ val |= PATA_IMX_ATA_CTRL_IORDY_EN;
+ else
+ val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
+ __raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
}
static struct scsi_host_template pata_imx_sht = {
@@ -72,7 +104,7 @@ static struct ata_port_operations pata_imx_port_ops = {
.inherits = &ata_sff_port_ops,
.sff_data_xfer = ata_sff_data_xfer_noirq,
.cable_detect = ata_cable_unknown,
- .set_mode = pata_imx_set_mode,
+ .set_piomode = pata_imx_set_piomode,
};
static void pata_imx_setup_port(struct ata_ioports *ioaddr)
@@ -128,7 +160,7 @@ static int pata_imx_probe(struct platform_device *pdev)
ap = host->ports[0];
ap->ops = &pata_imx_port_ops;
- ap->pio_mask = ATA_PIO0;
+ ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS;
io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 10e1b9eee10e..4ef4c5caed4f 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -128,4 +128,17 @@ config IMG_ASCII_LCD
development boards such as the MIPS Boston, MIPS Malta & MIPS SEAD3
from Imagination Technologies.
+config HT16K33
+ tristate "Holtek Ht16K33 LED controller with keyscan"
+ depends on FB && OF && I2C && INPUT
+ select FB_SYS_FOPS
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select INPUT_MATRIXKMAP
+ select FB_BACKLIGHT
+ help
+ Say yes here to add support for Holtek HT16K33, RAM mapping 16*8
+ LED controller driver with keyscan.
+
endif # AUXDISPLAY
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index 3127175c89df..cb3dd847713b 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_KS0108) += ks0108.o
obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o
obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o
+obj-$(CONFIG_HT16K33) += ht16k33.o
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
new file mode 100644
index 000000000000..eeb323f56c07
--- /dev/null
+++ b/drivers/auxdisplay/ht16k33.c
@@ -0,0 +1,563 @@
+/*
+ * HT16K33 driver
+ *
+ * Author: Robin van der Gracht <robin@protonic.nl>
+ *
+ * Copyright: (C) 2016 Protonic Holland.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/fb.h>
+#include <linux/slab.h>
+#include <linux/backlight.h>
+#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/workqueue.h>
+#include <linux/mm.h>
+
+/* Registers */
+#define REG_SYSTEM_SETUP 0x20
+#define REG_SYSTEM_SETUP_OSC_ON BIT(0)
+
+#define REG_DISPLAY_SETUP 0x80
+#define REG_DISPLAY_SETUP_ON BIT(0)
+
+#define REG_ROWINT_SET 0xA0
+#define REG_ROWINT_SET_INT_EN BIT(0)
+#define REG_ROWINT_SET_INT_ACT_HIGH BIT(1)
+
+#define REG_BRIGHTNESS 0xE0
+
+/* Defines */
+#define DRIVER_NAME "ht16k33"
+
+#define MIN_BRIGHTNESS 0x1
+#define MAX_BRIGHTNESS 0x10
+
+#define HT16K33_MATRIX_LED_MAX_COLS 8
+#define HT16K33_MATRIX_LED_MAX_ROWS 16
+#define HT16K33_MATRIX_KEYPAD_MAX_COLS 3
+#define HT16K33_MATRIX_KEYPAD_MAX_ROWS 12
+
+#define BYTES_PER_ROW (HT16K33_MATRIX_LED_MAX_ROWS / 8)
+#define HT16K33_FB_SIZE (HT16K33_MATRIX_LED_MAX_COLS * BYTES_PER_ROW)
+
+struct ht16k33_keypad {
+ struct input_dev *dev;
+ spinlock_t lock;
+ struct delayed_work work;
+ uint32_t cols;
+ uint32_t rows;
+ uint32_t row_shift;
+ uint32_t debounce_ms;
+ uint16_t last_key_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
+};
+
+struct ht16k33_fbdev {
+ struct fb_info *info;
+ uint32_t refresh_rate;
+ uint8_t *buffer;
+ uint8_t *cache;
+ struct delayed_work work;
+};
+
+struct ht16k33_priv {
+ struct i2c_client *client;
+ struct ht16k33_keypad keypad;
+ struct ht16k33_fbdev fbdev;
+ struct workqueue_struct *workqueue;
+};
+
+static struct fb_fix_screeninfo ht16k33_fb_fix = {
+ .id = DRIVER_NAME,
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_MONO10,
+ .xpanstep = 0,
+ .ypanstep = 0,
+ .ywrapstep = 0,
+ .line_length = HT16K33_MATRIX_LED_MAX_ROWS,
+ .accel = FB_ACCEL_NONE,
+};
+
+static struct fb_var_screeninfo ht16k33_fb_var = {
+ .xres = HT16K33_MATRIX_LED_MAX_ROWS,
+ .yres = HT16K33_MATRIX_LED_MAX_COLS,
+ .xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS,
+ .yres_virtual = HT16K33_MATRIX_LED_MAX_COLS,
+ .bits_per_pixel = 1,
+ .red = { 0, 1, 0 },
+ .green = { 0, 1, 0 },
+ .blue = { 0, 1, 0 },
+ .left_margin = 0,
+ .right_margin = 0,
+ .upper_margin = 0,
+ .lower_margin = 0,
+ .vmode = FB_VMODE_NONINTERLACED,
+};
+
+static int ht16k33_display_on(struct ht16k33_priv *priv)
+{
+ uint8_t data = REG_DISPLAY_SETUP | REG_DISPLAY_SETUP_ON;
+
+ return i2c_smbus_write_byte(priv->client, data);
+}
+
+static int ht16k33_display_off(struct ht16k33_priv *priv)
+{
+ return i2c_smbus_write_byte(priv->client, REG_DISPLAY_SETUP);
+}
+
+static void ht16k33_fb_queue(struct ht16k33_priv *priv)
+{
+ struct ht16k33_fbdev *fbdev = &priv->fbdev;
+
+ queue_delayed_work(priv->workqueue, &fbdev->work,
+ msecs_to_jiffies(HZ / fbdev->refresh_rate));
+}
+
+static void ht16k33_keypad_queue(struct ht16k33_priv *priv)
+{
+ struct ht16k33_keypad *keypad = &priv->keypad;
+
+ queue_delayed_work(priv->workqueue, &keypad->work,
+ msecs_to_jiffies(keypad->debounce_ms));
+}
+
+/*
+ * This gets the fb data from cache and copies it to ht16k33 display RAM
+ */
+static void ht16k33_fb_update(struct work_struct *work)
+{
+ struct ht16k33_fbdev *fbdev =
+ container_of(work, struct ht16k33_fbdev, work.work);
+ struct ht16k33_priv *priv =
+ container_of(fbdev, struct ht16k33_priv, fbdev);
+
+ uint8_t *p1, *p2;
+ int len, pos = 0, first = -1;
+
+ p1 = fbdev->cache;
+ p2 = fbdev->buffer;
+
+ /* Search for the first byte with changes */
+ while (pos < HT16K33_FB_SIZE && first < 0) {
+ if (*(p1++) - *(p2++))
+ first = pos;
+ pos++;
+ }
+
+ /* No changes found */
+ if (first < 0)
+ goto requeue;
+
+ len = HT16K33_FB_SIZE - first;
+ p1 = fbdev->cache + HT16K33_FB_SIZE - 1;
+ p2 = fbdev->buffer + HT16K33_FB_SIZE - 1;
+
+ /* Determine i2c transfer length */
+ while (len > 1) {
+ if (*(p1--) - *(p2--))
+ break;
+ len--;
+ }
+
+ p1 = fbdev->cache + first;
+ p2 = fbdev->buffer + first;
+ if (!i2c_smbus_write_i2c_block_data(priv->client, first, len, p2))
+ memcpy(p1, p2, len);
+requeue:
+ ht16k33_fb_queue(priv);
+}
+
+static int ht16k33_keypad_start(struct input_dev *dev)
+{
+ struct ht16k33_priv *priv = input_get_drvdata(dev);
+ struct ht16k33_keypad *keypad = &priv->keypad;
+
+ /*
+ * Schedule an immediate key scan to capture current key state;
+ * columns will be activated and IRQs be enabled after the scan.
+ */
+ queue_delayed_work(priv->workqueue, &keypad->work, 0);
+ return 0;
+}
+
+static void ht16k33_keypad_stop(struct input_dev *dev)
+{
+ struct ht16k33_priv *priv = input_get_drvdata(dev);
+ struct ht16k33_keypad *keypad = &priv->keypad;
+
+ cancel_delayed_work(&keypad->work);
+ /*
+ * ht16k33_keypad_scan() will leave IRQs enabled;
+ * we should disable them now.
+ */
+ disable_irq_nosync(priv->client->irq);
+}
+
+static int ht16k33_initialize(struct ht16k33_priv *priv)
+{
+ uint8_t byte;
+ int err;
+ uint8_t data[HT16K33_MATRIX_LED_MAX_COLS * 2];
+
+ /* Clear RAM (8 * 16 bits) */
+ memset(data, 0, sizeof(data));
+ err = i2c_smbus_write_block_data(priv->client, 0, sizeof(data), data);
+ if (err)
+ return err;
+
+ /* Turn on internal oscillator */
+ byte = REG_SYSTEM_SETUP_OSC_ON | REG_SYSTEM_SETUP;
+ err = i2c_smbus_write_byte(priv->client, byte);
+ if (err)
+ return err;
+
+ /* Configure INT pin */
+ byte = REG_ROWINT_SET | REG_ROWINT_SET_INT_ACT_HIGH;
+ if (priv->client->irq > 0)
+ byte |= REG_ROWINT_SET_INT_EN;
+ return i2c_smbus_write_byte(priv->client, byte);
+}
+
+/*
+ * This gets the keys from keypad and reports it to input subsystem
+ */
+static void ht16k33_keypad_scan(struct work_struct *work)
+{
+ struct ht16k33_keypad *keypad =
+ container_of(work, struct ht16k33_keypad, work.work);
+ struct ht16k33_priv *priv =
+ container_of(keypad, struct ht16k33_priv, keypad);
+ const unsigned short *keycodes = keypad->dev->keycode;
+ uint16_t bits_changed, new_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
+ uint8_t data[HT16K33_MATRIX_KEYPAD_MAX_COLS * 2];
+ int row, col, code;
+ bool reschedule = false;
+
+ if (i2c_smbus_read_i2c_block_data(priv->client, 0x40, 6, data) != 6) {
+ dev_err(&priv->client->dev, "Failed to read key data\n");
+ goto end;
+ }
+
+ for (col = 0; col < keypad->cols; col++) {
+ new_state[col] = (data[col * 2 + 1] << 8) | data[col * 2];
+ if (new_state[col])
+ reschedule = true;
+ bits_changed = keypad->last_key_state[col] ^ new_state[col];
+
+ while (bits_changed) {
+ row = ffs(bits_changed) - 1;
+ code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
+ input_event(keypad->dev, EV_MSC, MSC_SCAN, code);
+ input_report_key(keypad->dev, keycodes[code],
+ new_state[col] & BIT(row));
+ bits_changed &= ~BIT(row);
+ }
+ }
+ input_sync(keypad->dev);
+ memcpy(keypad->last_key_state, new_state, sizeof(new_state));
+
+end:
+ if (reschedule)
+ ht16k33_keypad_queue(priv);
+ else
+ enable_irq(priv->client->irq);
+}
+
+static irqreturn_t ht16k33_irq_thread(int irq, void *dev)
+{
+ struct ht16k33_priv *priv = dev;
+
+ disable_irq_nosync(priv->client->irq);
+ ht16k33_keypad_queue(priv);
+
+ return IRQ_HANDLED;
+}
+
+static int ht16k33_bl_update_status(struct backlight_device *bl)
+{
+ int brightness = bl->props.brightness;
+ struct ht16k33_priv *priv = bl_get_data(bl);
+
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.fb_blank != FB_BLANK_UNBLANK ||
+ bl->props.state & BL_CORE_FBBLANK || brightness == 0) {
+ return ht16k33_display_off(priv);
+ }
+
+ ht16k33_display_on(priv);
+ return i2c_smbus_write_byte(priv->client,
+ REG_BRIGHTNESS | (brightness - 1));
+}
+
+static int ht16k33_bl_check_fb(struct backlight_device *bl, struct fb_info *fi)
+{
+ struct ht16k33_priv *priv = bl_get_data(bl);
+
+ return (fi == NULL) || (fi->par == priv);
+}
+
+static const struct backlight_ops ht16k33_bl_ops = {
+ .update_status = ht16k33_bl_update_status,
+ .check_fb = ht16k33_bl_check_fb,
+};
+
+static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct ht16k33_priv *priv = info->par;
+
+ return vm_insert_page(vma, vma->vm_start,
+ virt_to_page(priv->fbdev.buffer));
+}
+
+static struct fb_ops ht16k33_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_read = fb_sys_read,
+ .fb_write = fb_sys_write,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_mmap = ht16k33_mmap,
+};
+
+static int ht16k33_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err;
+ uint32_t rows, cols, dft_brightness;
+ struct backlight_device *bl;
+ struct backlight_properties bl_props;
+ struct ht16k33_priv *priv;
+ struct ht16k33_keypad *keypad;
+ struct ht16k33_fbdev *fbdev;
+ struct device_node *node = client->dev.of_node;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "i2c_check_functionality error\n");
+ return -EIO;
+ }
+
+ if (client->irq <= 0) {
+ dev_err(&client->dev, "No IRQ specified\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+ i2c_set_clientdata(client, priv);
+ fbdev = &priv->fbdev;
+ keypad = &priv->keypad;
+
+ priv->workqueue = create_singlethread_workqueue(DRIVER_NAME "-wq");
+ if (priv->workqueue == NULL)
+ return -ENOMEM;
+
+ err = ht16k33_initialize(priv);
+ if (err)
+ goto err_destroy_wq;
+
+ /* Framebuffer (2 bytes per column) */
+ BUILD_BUG_ON(PAGE_SIZE < HT16K33_FB_SIZE);
+ fbdev->buffer = (unsigned char *) get_zeroed_page(GFP_KERNEL);
+ if (!fbdev->buffer) {
+ err = -ENOMEM;
+ goto err_free_fbdev;
+ }
+
+ fbdev->cache = devm_kmalloc(&client->dev, HT16K33_FB_SIZE, GFP_KERNEL);
+ if (!fbdev->cache) {
+ err = -ENOMEM;
+ goto err_fbdev_buffer;
+ }
+
+ fbdev->info = framebuffer_alloc(0, &client->dev);
+ if (!fbdev->info) {
+ err = -ENOMEM;
+ goto err_fbdev_buffer;
+ }
+
+ err = of_property_read_u32(node, "refresh-rate-hz",
+ &fbdev->refresh_rate);
+ if (err) {
+ dev_err(&client->dev, "refresh rate not specified\n");
+ goto err_fbdev_info;
+ }
+ fb_bl_default_curve(fbdev->info, 0, MIN_BRIGHTNESS, MAX_BRIGHTNESS);
+
+ INIT_DELAYED_WORK(&fbdev->work, ht16k33_fb_update);
+ fbdev->info->fbops = &ht16k33_fb_ops;
+ fbdev->info->screen_base = (char __iomem *) fbdev->buffer;
+ fbdev->info->screen_size = HT16K33_FB_SIZE;
+ fbdev->info->fix = ht16k33_fb_fix;
+ fbdev->info->var = ht16k33_fb_var;
+ fbdev->info->pseudo_palette = NULL;
+ fbdev->info->flags = FBINFO_FLAG_DEFAULT;
+ fbdev->info->par = priv;
+
+ err = register_framebuffer(fbdev->info);
+ if (err)
+ goto err_fbdev_info;
+
+ /* Keypad */
+ keypad->dev = devm_input_allocate_device(&client->dev);
+ if (!keypad->dev) {
+ err = -ENOMEM;
+ goto err_fbdev_unregister;
+ }
+
+ keypad->dev->name = DRIVER_NAME"-keypad";
+ keypad->dev->id.bustype = BUS_I2C;
+ keypad->dev->open = ht16k33_keypad_start;
+ keypad->dev->close = ht16k33_keypad_stop;
+
+ if (!of_get_property(node, "linux,no-autorepeat", NULL))
+ __set_bit(EV_REP, keypad->dev->evbit);
+
+ err = of_property_read_u32(node, "debounce-delay-ms",
+ &keypad->debounce_ms);
+ if (err) {
+ dev_err(&client->dev, "key debounce delay not specified\n");
+ goto err_fbdev_unregister;
+ }
+
+ err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ ht16k33_irq_thread,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ DRIVER_NAME, priv);
+ if (err) {
+ dev_err(&client->dev, "irq request failed %d, error %d\n",
+ client->irq, err);
+ goto err_fbdev_unregister;
+ }
+
+ disable_irq_nosync(client->irq);
+ rows = HT16K33_MATRIX_KEYPAD_MAX_ROWS;
+ cols = HT16K33_MATRIX_KEYPAD_MAX_COLS;
+ err = matrix_keypad_parse_of_params(&client->dev, &rows, &cols);
+ if (err)
+ goto err_fbdev_unregister;
+
+ err = matrix_keypad_build_keymap(NULL, NULL, rows, cols, NULL,
+ keypad->dev);
+ if (err) {
+ dev_err(&client->dev, "failed to build keymap\n");
+ goto err_fbdev_unregister;
+ }
+
+ input_set_drvdata(keypad->dev, priv);
+ keypad->rows = rows;
+ keypad->cols = cols;
+ keypad->row_shift = get_count_order(cols);
+ INIT_DELAYED_WORK(&keypad->work, ht16k33_keypad_scan);
+
+ err = input_register_device(keypad->dev);
+ if (err)
+ goto err_fbdev_unregister;
+
+ /* Backlight */
+ memset(&bl_props, 0, sizeof(struct backlight_properties));
+ bl_props.type = BACKLIGHT_RAW;
+ bl_props.max_brightness = MAX_BRIGHTNESS;
+
+ bl = devm_backlight_device_register(&client->dev, DRIVER_NAME"-bl",
+ &client->dev, priv,
+ &ht16k33_bl_ops, &bl_props);
+ if (IS_ERR(bl)) {
+ dev_err(&client->dev, "failed to register backlight\n");
+ err = PTR_ERR(bl);
+ goto err_keypad_unregister;
+ }
+
+ err = of_property_read_u32(node, "default-brightness-level",
+ &dft_brightness);
+ if (err) {
+ dft_brightness = MAX_BRIGHTNESS;
+ } else if (dft_brightness > MAX_BRIGHTNESS) {
+ dev_warn(&client->dev,
+ "invalid default brightness level: %u, using %u\n",
+ dft_brightness, MAX_BRIGHTNESS);
+ dft_brightness = MAX_BRIGHTNESS;
+ }
+
+ bl->props.brightness = dft_brightness;
+ ht16k33_bl_update_status(bl);
+
+ ht16k33_fb_queue(priv);
+ return 0;
+
+err_keypad_unregister:
+ input_unregister_device(keypad->dev);
+err_fbdev_unregister:
+ unregister_framebuffer(fbdev->info);
+err_fbdev_info:
+ framebuffer_release(fbdev->info);
+err_fbdev_buffer:
+ free_page((unsigned long) fbdev->buffer);
+err_free_fbdev:
+ kfree(fbdev);
+err_destroy_wq:
+ destroy_workqueue(priv->workqueue);
+
+ return err;
+}
+
+static int ht16k33_remove(struct i2c_client *client)
+{
+ struct ht16k33_priv *priv = i2c_get_clientdata(client);
+ struct ht16k33_keypad *keypad = &priv->keypad;
+ struct ht16k33_fbdev *fbdev = &priv->fbdev;
+
+ ht16k33_keypad_stop(keypad->dev);
+
+ cancel_delayed_work(&fbdev->work);
+ unregister_framebuffer(fbdev->info);
+ framebuffer_release(fbdev->info);
+ free_page((unsigned long) fbdev->buffer);
+
+ destroy_workqueue(priv->workqueue);
+ return 0;
+}
+
+static const struct i2c_device_id ht16k33_i2c_match[] = {
+ { "ht16k33", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ht16k33_i2c_match);
+
+static const struct of_device_id ht16k33_of_match[] = {
+ { .compatible = "holtek,ht16k33", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ht16k33_of_match);
+
+static struct i2c_driver ht16k33_driver = {
+ .probe = ht16k33_probe,
+ .remove = ht16k33_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(ht16k33_of_match),
+ },
+ .id_table = ht16k33_i2c_match,
+};
+module_i2c_driver(ht16k33_driver);
+
+MODULE_DESCRIPTION("Holtek HT16K33 driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robin van der Gracht <robin@protonic.nl>");
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index afa67f9ed726..d718ae4b907a 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -224,6 +224,8 @@ config DEBUG_TEST_DRIVER_REMOVE
unusable. You should say N here unless you are explicitly looking to
test this functionality.
+source "drivers/base/test/Kconfig"
+
config SYS_HYPERVISOR
bool
default n
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 2609ba20b396..f2816f6ff76a 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -24,5 +24,7 @@ obj-$(CONFIG_PINCTRL) += pinctrl.o
obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
+obj-y += test/
+
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/base.h b/drivers/base/base.h
index e05db388bd1c..ada9dce34e6d 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -107,6 +107,9 @@ extern void bus_remove_device(struct device *dev);
extern int bus_add_driver(struct device_driver *drv);
extern void bus_remove_driver(struct device_driver *drv);
+extern void device_release_driver_internal(struct device *dev,
+ struct device_driver *drv,
+ struct device *parent);
extern void driver_detach(struct device_driver *drv);
extern int driver_probe_device(struct device_driver *drv, struct device *dev);
@@ -138,6 +141,8 @@ extern void device_unblock_probing(void);
extern struct kset *devices_kset;
extern void devices_kset_move_last(struct device *dev);
+extern struct device_attribute dev_attr_deferred_probe;
+
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
extern void module_add_driver(struct module *mod, struct device_driver *drv);
extern void module_remove_driver(struct device_driver *drv);
@@ -152,3 +157,13 @@ extern int devtmpfs_init(void);
#else
static inline int devtmpfs_init(void) { return 0; }
#endif
+
+/* Device links support */
+extern int device_links_read_lock(void);
+extern void device_links_read_unlock(int idx);
+extern int device_links_check_suppliers(struct device *dev);
+extern void device_links_driver_bound(struct device *dev);
+extern void device_links_driver_cleanup(struct device *dev);
+extern void device_links_no_driver(struct device *dev);
+extern bool device_links_busy(struct device *dev);
+extern void device_links_unbind_consumers(struct device *dev);
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 47983a2ba621..1e3903d0d994 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -16,6 +16,9 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/cacheinfo.h>
#include <linux/compiler.h>
@@ -85,7 +88,120 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
{
return sib_leaf->of_node == this_leaf->of_node;
}
+
+/* OF properties to query for a given cache type */
+struct cache_type_info {
+ const char *size_prop;
+ const char *line_size_props[2];
+ const char *nr_sets_prop;
+};
+
+static const struct cache_type_info cache_type_info[] = {
+ {
+ .size_prop = "cache-size",
+ .line_size_props = { "cache-line-size",
+ "cache-block-size", },
+ .nr_sets_prop = "cache-sets",
+ }, {
+ .size_prop = "i-cache-size",
+ .line_size_props = { "i-cache-line-size",
+ "i-cache-block-size", },
+ .nr_sets_prop = "i-cache-sets",
+ }, {
+ .size_prop = "d-cache-size",
+ .line_size_props = { "d-cache-line-size",
+ "d-cache-block-size", },
+ .nr_sets_prop = "d-cache-sets",
+ },
+};
+
+static inline int get_cacheinfo_idx(enum cache_type type)
+{
+ if (type == CACHE_TYPE_UNIFIED)
+ return 0;
+ return type;
+}
+
+static void cache_size(struct cacheinfo *this_leaf)
+{
+ const char *propname;
+ const __be32 *cache_size;
+ int ct_idx;
+
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ propname = cache_type_info[ct_idx].size_prop;
+
+ cache_size = of_get_property(this_leaf->of_node, propname, NULL);
+ if (cache_size)
+ this_leaf->size = of_read_number(cache_size, 1);
+}
+
+/* not cache_line_size() because that's a macro in include/linux/cache.h */
+static void cache_get_line_size(struct cacheinfo *this_leaf)
+{
+ const __be32 *line_size;
+ int i, lim, ct_idx;
+
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
+
+ for (i = 0; i < lim; i++) {
+ const char *propname;
+
+ propname = cache_type_info[ct_idx].line_size_props[i];
+ line_size = of_get_property(this_leaf->of_node, propname, NULL);
+ if (line_size)
+ break;
+ }
+
+ if (line_size)
+ this_leaf->coherency_line_size = of_read_number(line_size, 1);
+}
+
+static void cache_nr_sets(struct cacheinfo *this_leaf)
+{
+ const char *propname;
+ const __be32 *nr_sets;
+ int ct_idx;
+
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ propname = cache_type_info[ct_idx].nr_sets_prop;
+
+ nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
+ if (nr_sets)
+ this_leaf->number_of_sets = of_read_number(nr_sets, 1);
+}
+
+static void cache_associativity(struct cacheinfo *this_leaf)
+{
+ unsigned int line_size = this_leaf->coherency_line_size;
+ unsigned int nr_sets = this_leaf->number_of_sets;
+ unsigned int size = this_leaf->size;
+
+ /*
+ * If the cache is fully associative, there is no need to
+ * check the other properties.
+ */
+ if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
+ this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
+}
+
+static void cache_of_override_properties(unsigned int cpu)
+{
+ int index;
+ struct cacheinfo *this_leaf;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+
+ for (index = 0; index < cache_leaves(cpu); index++) {
+ this_leaf = this_cpu_ci->info_list + index;
+ cache_size(this_leaf);
+ cache_get_line_size(this_leaf);
+ cache_nr_sets(this_leaf);
+ cache_associativity(this_leaf);
+ }
+}
#else
+static void cache_of_override_properties(unsigned int cpu) { }
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
@@ -104,9 +220,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
unsigned int index;
- int ret;
+ int ret = 0;
- ret = cache_setup_of_node(cpu);
+ if (this_cpu_ci->cpu_map_populated)
+ return 0;
+
+ if (of_have_populated_dt())
+ ret = cache_setup_of_node(cpu);
+ else if (!acpi_disabled)
+ /* No cache property/hierarchy support yet in ACPI */
+ ret = -ENOTSUPP;
if (ret)
return ret;
@@ -161,6 +284,12 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
}
}
+static void cache_override_properties(unsigned int cpu)
+{
+ if (of_have_populated_dt())
+ return cache_of_override_properties(cpu);
+}
+
static void free_cache_attributes(unsigned int cpu)
{
if (!per_cpu_cacheinfo(cpu))
@@ -203,10 +332,11 @@ static int detect_cache_attributes(unsigned int cpu)
*/
ret = cache_shared_cpu_map_setup(cpu);
if (ret) {
- pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
- cpu);
+ pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
goto free_ci;
}
+
+ cache_override_properties(cpu);
return 0;
free_ci:
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 71059e32bebc..a2b2896693d6 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -163,6 +163,18 @@ static void klist_class_dev_put(struct klist_node *n)
put_device(dev);
}
+static int class_add_groups(struct class *cls,
+ const struct attribute_group **groups)
+{
+ return sysfs_create_groups(&cls->p->subsys.kobj, groups);
+}
+
+static void class_remove_groups(struct class *cls,
+ const struct attribute_group **groups)
+{
+ return sysfs_remove_groups(&cls->p->subsys.kobj, groups);
+}
+
int __class_register(struct class *cls, struct lock_class_key *key)
{
struct subsys_private *cp;
@@ -203,6 +215,8 @@ int __class_register(struct class *cls, struct lock_class_key *key)
kfree(cp);
return error;
}
+ error = class_add_groups(class_get(cls), cls->class_groups);
+ class_put(cls);
error = add_class_attrs(class_get(cls));
class_put(cls);
return error;
@@ -213,6 +227,7 @@ void class_unregister(struct class *cls)
{
pr_debug("device class '%s': unregistering\n", cls->name);
remove_class_attrs(cls);
+ class_remove_groups(cls, cls->class_groups);
kset_unregister(&cls->p->subsys);
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index ce057a568673..020ea7f05520 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -44,6 +44,572 @@ static int __init sysfs_deprecated_setup(char *arg)
early_param("sysfs.deprecated", sysfs_deprecated_setup);
#endif
+/* Device links support. */
+
+#ifdef CONFIG_SRCU
+static DEFINE_MUTEX(device_links_lock);
+DEFINE_STATIC_SRCU(device_links_srcu);
+
+static inline void device_links_write_lock(void)
+{
+ mutex_lock(&device_links_lock);
+}
+
+static inline void device_links_write_unlock(void)
+{
+ mutex_unlock(&device_links_lock);
+}
+
+int device_links_read_lock(void)
+{
+ return srcu_read_lock(&device_links_srcu);
+}
+
+void device_links_read_unlock(int idx)
+{
+ srcu_read_unlock(&device_links_srcu, idx);
+}
+#else /* !CONFIG_SRCU */
+static DECLARE_RWSEM(device_links_lock);
+
+static inline void device_links_write_lock(void)
+{
+ down_write(&device_links_lock);
+}
+
+static inline void device_links_write_unlock(void)
+{
+ up_write(&device_links_lock);
+}
+
+int device_links_read_lock(void)
+{
+ down_read(&device_links_lock);
+ return 0;
+}
+
+void device_links_read_unlock(int not_used)
+{
+ up_read(&device_links_lock);
+}
+#endif /* !CONFIG_SRCU */
+
+/**
+ * device_is_dependent - Check if one device depends on another one
+ * @dev: Device to check dependencies for.
+ * @target: Device to check against.
+ *
+ * Check if @target depends on @dev or any device dependent on it (its child or
+ * its consumer etc). Return 1 if that is the case or 0 otherwise.
+ */
+static int device_is_dependent(struct device *dev, void *target)
+{
+ struct device_link *link;
+ int ret;
+
+ if (WARN_ON(dev == target))
+ return 1;
+
+ ret = device_for_each_child(dev, target, device_is_dependent);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (WARN_ON(link->consumer == target))
+ return 1;
+
+ ret = device_is_dependent(link->consumer, target);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int device_reorder_to_tail(struct device *dev, void *not_used)
+{
+ struct device_link *link;
+
+ /*
+ * Devices that have not been registered yet will be put to the ends
+ * of the lists during the registration, so skip them here.
+ */
+ if (device_is_registered(dev))
+ devices_kset_move_last(dev);
+
+ if (device_pm_initialized(dev))
+ device_pm_move_last(dev);
+
+ device_for_each_child(dev, NULL, device_reorder_to_tail);
+ list_for_each_entry(link, &dev->links.consumers, s_node)
+ device_reorder_to_tail(link->consumer, NULL);
+
+ return 0;
+}
+
+/**
+ * device_link_add - Create a link between two devices.
+ * @consumer: Consumer end of the link.
+ * @supplier: Supplier end of the link.
+ * @flags: Link flags.
+ *
+ * The caller is responsible for the proper synchronization of the link creation
+ * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
+ * runtime PM framework to take the link into account. Second, if the
+ * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will
+ * be forced into the active metastate and reference-counted upon the creation
+ * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
+ * ignored.
+ *
+ * If the DL_FLAG_AUTOREMOVE is set, the link will be removed automatically
+ * when the consumer device driver unbinds from it. The combination of both
+ * DL_FLAG_AUTOREMOVE and DL_FLAG_STATELESS set is invalid and will cause NULL
+ * to be returned.
+ *
+ * A side effect of the link creation is re-ordering of dpm_list and the
+ * devices_kset list by moving the consumer device and all devices depending
+ * on it to the ends of these lists (that does not happen to devices that have
+ * not been registered when this function is called).
+ *
+ * The supplier device is required to be registered when this function is called
+ * and NULL will be returned if that is not the case. The consumer device need
+ * not be registered, however.
+ */
+struct device_link *device_link_add(struct device *consumer,
+ struct device *supplier, u32 flags)
+{
+ struct device_link *link;
+
+ if (!consumer || !supplier ||
+ ((flags & DL_FLAG_STATELESS) && (flags & DL_FLAG_AUTOREMOVE)))
+ return NULL;
+
+ device_links_write_lock();
+ device_pm_lock();
+
+ /*
+ * If the supplier has not been fully registered yet or there is a
+ * reverse dependency between the consumer and the supplier already in
+ * the graph, return NULL.
+ */
+ if (!device_pm_initialized(supplier)
+ || device_is_dependent(consumer, supplier)) {
+ link = NULL;
+ goto out;
+ }
+
+ list_for_each_entry(link, &supplier->links.consumers, s_node)
+ if (link->consumer == consumer)
+ goto out;
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ goto out;
+
+ if (flags & DL_FLAG_PM_RUNTIME) {
+ if (flags & DL_FLAG_RPM_ACTIVE) {
+ if (pm_runtime_get_sync(supplier) < 0) {
+ pm_runtime_put_noidle(supplier);
+ kfree(link);
+ link = NULL;
+ goto out;
+ }
+ link->rpm_active = true;
+ }
+ pm_runtime_new_link(consumer);
+ }
+ get_device(supplier);
+ link->supplier = supplier;
+ INIT_LIST_HEAD(&link->s_node);
+ get_device(consumer);
+ link->consumer = consumer;
+ INIT_LIST_HEAD(&link->c_node);
+ link->flags = flags;
+
+ /* Determine the initial link state. */
+ if (flags & DL_FLAG_STATELESS) {
+ link->status = DL_STATE_NONE;
+ } else {
+ switch (supplier->links.status) {
+ case DL_DEV_DRIVER_BOUND:
+ switch (consumer->links.status) {
+ case DL_DEV_PROBING:
+ /*
+ * Balance the decrementation of the supplier's
+ * runtime PM usage counter after consumer probe
+ * in driver_probe_device().
+ */
+ if (flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_get_sync(supplier);
+
+ link->status = DL_STATE_CONSUMER_PROBE;
+ break;
+ case DL_DEV_DRIVER_BOUND:
+ link->status = DL_STATE_ACTIVE;
+ break;
+ default:
+ link->status = DL_STATE_AVAILABLE;
+ break;
+ }
+ break;
+ case DL_DEV_UNBINDING:
+ link->status = DL_STATE_SUPPLIER_UNBIND;
+ break;
+ default:
+ link->status = DL_STATE_DORMANT;
+ break;
+ }
+ }
+
+ /*
+ * Move the consumer and all of the devices depending on it to the end
+ * of dpm_list and the devices_kset list.
+ *
+ * It is necessary to hold dpm_list locked throughout all that or else
+ * we may end up suspending with a wrong ordering of it.
+ */
+ device_reorder_to_tail(consumer, NULL);
+
+ list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
+ list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
+
+ dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
+
+ out:
+ device_pm_unlock();
+ device_links_write_unlock();
+ return link;
+}
+EXPORT_SYMBOL_GPL(device_link_add);
+
+static void device_link_free(struct device_link *link)
+{
+ put_device(link->consumer);
+ put_device(link->supplier);
+ kfree(link);
+}
+
+#ifdef CONFIG_SRCU
+static void __device_link_free_srcu(struct rcu_head *rhead)
+{
+ device_link_free(container_of(rhead, struct device_link, rcu_head));
+}
+
+static void __device_link_del(struct device_link *link)
+{
+ dev_info(link->consumer, "Dropping the link to %s\n",
+ dev_name(link->supplier));
+
+ if (link->flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_drop_link(link->consumer);
+
+ list_del_rcu(&link->s_node);
+ list_del_rcu(&link->c_node);
+ call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
+}
+#else /* !CONFIG_SRCU */
+static void __device_link_del(struct device_link *link)
+{
+ dev_info(link->consumer, "Dropping the link to %s\n",
+ dev_name(link->supplier));
+
+ list_del(&link->s_node);
+ list_del(&link->c_node);
+ device_link_free(link);
+}
+#endif /* !CONFIG_SRCU */
+
+/**
+ * device_link_del - Delete a link between two devices.
+ * @link: Device link to delete.
+ *
+ * The caller must ensure proper synchronization of this function with runtime
+ * PM.
+ */
+void device_link_del(struct device_link *link)
+{
+ device_links_write_lock();
+ device_pm_lock();
+ __device_link_del(link);
+ device_pm_unlock();
+ device_links_write_unlock();
+}
+EXPORT_SYMBOL_GPL(device_link_del);
+
+static void device_links_missing_supplier(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry(link, &dev->links.suppliers, c_node)
+ if (link->status == DL_STATE_CONSUMER_PROBE)
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+}
+
+/**
+ * device_links_check_suppliers - Check presence of supplier drivers.
+ * @dev: Consumer device.
+ *
+ * Check links from this device to any suppliers. Walk the list of the device's
+ * links to suppliers and see if all of them are available. If not, simply
+ * return -EPROBE_DEFER.
+ *
+ * We need to guarantee that the supplier will not go away after the check has
+ * been positive here. It only can go away in __device_release_driver() and
+ * that function checks the device's links to consumers. This means we need to
+ * mark the link as "consumer probe in progress" to make the supplier removal
+ * wait for us to complete (or bad things may happen).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+int device_links_check_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int ret = 0;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.suppliers, c_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->status != DL_STATE_AVAILABLE) {
+ device_links_missing_supplier(dev);
+ ret = -EPROBE_DEFER;
+ break;
+ }
+ WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
+ }
+ dev->links.status = DL_DEV_PROBING;
+
+ device_links_write_unlock();
+ return ret;
+}
+
+/**
+ * device_links_driver_bound - Update device links after probing its driver.
+ * @dev: Device to update the links for.
+ *
+ * The probe has been successful, so update links from this device to any
+ * consumers by changing their status to "available".
+ *
+ * Also change the status of @dev's links to suppliers to "active".
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_driver_bound(struct device *dev)
+{
+ struct device_link *link;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ WARN_ON(link->status != DL_STATE_DORMANT);
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ }
+
+ list_for_each_entry(link, &dev->links.suppliers, c_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
+ WRITE_ONCE(link->status, DL_STATE_ACTIVE);
+ }
+
+ dev->links.status = DL_DEV_DRIVER_BOUND;
+
+ device_links_write_unlock();
+}
+
+/**
+ * __device_links_no_driver - Update links of a device without a driver.
+ * @dev: Device without a drvier.
+ *
+ * Delete all non-persistent links from this device to any suppliers.
+ *
+ * Persistent links stay around, but their status is changed to "available",
+ * unless they already are in the "supplier unbind in progress" state in which
+ * case they need not be updated.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+static void __device_links_no_driver(struct device *dev)
+{
+ struct device_link *link, *ln;
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->flags & DL_FLAG_AUTOREMOVE)
+ __device_link_del(link);
+ else if (link->status != DL_STATE_SUPPLIER_UNBIND)
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ }
+
+ dev->links.status = DL_DEV_NO_DRIVER;
+}
+
+void device_links_no_driver(struct device *dev)
+{
+ device_links_write_lock();
+ __device_links_no_driver(dev);
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_driver_cleanup - Update links after driver removal.
+ * @dev: Device whose driver has just gone away.
+ *
+ * Update links to consumers for @dev by changing their status to "dormant" and
+ * invoke %__device_links_no_driver() to update links to suppliers for it as
+ * appropriate.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_driver_cleanup(struct device *dev)
+{
+ struct device_link *link;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ WARN_ON(link->flags & DL_FLAG_AUTOREMOVE);
+ WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
+ WRITE_ONCE(link->status, DL_STATE_DORMANT);
+ }
+
+ __device_links_no_driver(dev);
+
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_busy - Check if there are any busy links to consumers.
+ * @dev: Device to check.
+ *
+ * Check each consumer of the device and return 'true' if its link's status
+ * is one of "consumer probe" or "active" (meaning that the given consumer is
+ * probing right now or its driver is present). Otherwise, change the link
+ * state to "supplier unbind" to prevent the consumer from being probed
+ * successfully going forward.
+ *
+ * Return 'false' if there are no probing or active consumers.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+bool device_links_busy(struct device *dev)
+{
+ struct device_link *link;
+ bool ret = false;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->status == DL_STATE_CONSUMER_PROBE
+ || link->status == DL_STATE_ACTIVE) {
+ ret = true;
+ break;
+ }
+ WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
+ }
+
+ dev->links.status = DL_DEV_UNBINDING;
+
+ device_links_write_unlock();
+ return ret;
+}
+
+/**
+ * device_links_unbind_consumers - Force unbind consumers of the given device.
+ * @dev: Device to unbind the consumers of.
+ *
+ * Walk the list of links to consumers for @dev and if any of them is in the
+ * "consumer probe" state, wait for all device probes in progress to complete
+ * and start over.
+ *
+ * If that's not the case, change the status of the link to "supplier unbind"
+ * and check if the link was in the "active" state. If so, force the consumer
+ * driver to unbind and start over (the consumer will not re-probe as we have
+ * changed the state of the link already).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_unbind_consumers(struct device *dev)
+{
+ struct device_link *link;
+
+ start:
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ enum device_link_state status;
+
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ status = link->status;
+ if (status == DL_STATE_CONSUMER_PROBE) {
+ device_links_write_unlock();
+
+ wait_for_device_probe();
+ goto start;
+ }
+ WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
+ if (status == DL_STATE_ACTIVE) {
+ struct device *consumer = link->consumer;
+
+ get_device(consumer);
+
+ device_links_write_unlock();
+
+ device_release_driver_internal(consumer, NULL,
+ consumer->parent);
+ put_device(consumer);
+ goto start;
+ }
+ }
+
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_purge - Delete existing links to other devices.
+ * @dev: Target device.
+ */
+static void device_links_purge(struct device *dev)
+{
+ struct device_link *link, *ln;
+
+ /*
+ * Delete all of the remaining links from this device to any other
+ * devices (either consumers or suppliers).
+ */
+ device_links_write_lock();
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
+ WARN_ON(link->status == DL_STATE_ACTIVE);
+ __device_link_del(link);
+ }
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
+ WARN_ON(link->status != DL_STATE_DORMANT &&
+ link->status != DL_STATE_NONE);
+ __device_link_del(link);
+ }
+
+ device_links_write_unlock();
+}
+
+/* Device links support end. */
+
int (*platform_notify)(struct device *dev) = NULL;
int (*platform_notify_remove)(struct device *dev) = NULL;
static struct kobject *dev_kobj;
@@ -494,8 +1060,14 @@ static int device_add_attrs(struct device *dev)
goto err_remove_dev_groups;
}
+ error = device_create_file(dev, &dev_attr_deferred_probe);
+ if (error)
+ goto err_remove_online;
+
return 0;
+ err_remove_online:
+ device_remove_file(dev, &dev_attr_online);
err_remove_dev_groups:
device_remove_groups(dev, dev->groups);
err_remove_type_groups:
@@ -513,6 +1085,7 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class;
const struct device_type *type = dev->type;
+ device_remove_file(dev, &dev_attr_deferred_probe);
device_remove_file(dev, &dev_attr_online);
device_remove_groups(dev, dev->groups);
@@ -711,6 +1284,9 @@ void device_initialize(struct device *dev)
#ifdef CONFIG_GENERIC_MSI_IRQ
INIT_LIST_HEAD(&dev->msi_list);
#endif
+ INIT_LIST_HEAD(&dev->links.consumers);
+ INIT_LIST_HEAD(&dev->links.suppliers);
+ dev->links.status = DL_DEV_NO_DRIVER;
}
EXPORT_SYMBOL_GPL(device_initialize);
@@ -1258,6 +1834,8 @@ void device_del(struct device *dev)
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DEL_DEVICE, dev);
+
+ device_links_purge(dev);
dpm_sysfs_remove(dev);
if (parent)
klist_del(&dev->p->knode_parent);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d76cd97a98b6..a8b258e5407b 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -53,6 +53,19 @@ static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
+static ssize_t deferred_probe_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ bool value;
+
+ mutex_lock(&deferred_probe_mutex);
+ value = !list_empty(&dev->p->deferred_probe);
+ mutex_unlock(&deferred_probe_mutex);
+
+ return sprintf(buf, "%d\n", value);
+}
+DEVICE_ATTR_RO(deferred_probe);
+
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
* to prohibit probing of devices as it could be unsafe.
@@ -244,6 +257,7 @@ static void driver_bound(struct device *dev)
__func__, dev_name(dev));
klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
+ device_links_driver_bound(dev);
device_pm_check_callbacks(dev);
@@ -338,6 +352,10 @@ static int really_probe(struct device *dev, struct device_driver *drv)
return ret;
}
+ ret = device_links_check_suppliers(dev);
+ if (ret)
+ return ret;
+
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
@@ -416,6 +434,7 @@ probe_failed:
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
pinctrl_bind_failed:
+ device_links_no_driver(dev);
devres_release_all(dev);
driver_sysfs_remove(dev);
dev->driver = NULL;
@@ -508,6 +527,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
drv->bus->name, __func__, dev_name(dev), drv->name);
+ pm_runtime_get_suppliers(dev);
if (dev->parent)
pm_runtime_get_sync(dev->parent);
@@ -518,6 +538,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
if (dev->parent)
pm_runtime_put(dev->parent);
+ pm_runtime_put_suppliers(dev);
return ret;
}
@@ -772,7 +793,7 @@ EXPORT_SYMBOL_GPL(driver_attach);
* __device_release_driver() must be called with @dev lock held.
* When called for a USB interface, @dev->parent lock must be held as well.
*/
-static void __device_release_driver(struct device *dev)
+static void __device_release_driver(struct device *dev, struct device *parent)
{
struct device_driver *drv;
@@ -781,7 +802,27 @@ static void __device_release_driver(struct device *dev)
if (driver_allows_async_probing(drv))
async_synchronize_full();
+ while (device_links_busy(dev)) {
+ device_unlock(dev);
+ if (parent)
+ device_unlock(parent);
+
+ device_links_unbind_consumers(dev);
+ if (parent)
+ device_lock(parent);
+
+ device_lock(dev);
+ /*
+ * A concurrent invocation of the same function might
+ * have released the driver successfully while this one
+ * was waiting, so check for that.
+ */
+ if (dev->driver != drv)
+ return;
+ }
+
pm_runtime_get_sync(dev);
+ pm_runtime_clean_up_links(dev);
driver_sysfs_remove(dev);
@@ -796,6 +837,8 @@ static void __device_release_driver(struct device *dev)
dev->bus->remove(dev);
else if (drv->remove)
drv->remove(dev);
+
+ device_links_driver_cleanup(dev);
devres_release_all(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
@@ -812,12 +855,32 @@ static void __device_release_driver(struct device *dev)
}
}
+void device_release_driver_internal(struct device *dev,
+ struct device_driver *drv,
+ struct device *parent)
+{
+ if (parent)
+ device_lock(parent);
+
+ device_lock(dev);
+ if (!drv || drv == dev->driver)
+ __device_release_driver(dev, parent);
+
+ device_unlock(dev);
+ if (parent)
+ device_unlock(parent);
+}
+
/**
* device_release_driver - manually detach device from driver.
* @dev: device.
*
* Manually detach device from driver.
* When called for a USB interface, @dev->parent lock must be held.
+ *
+ * If this function is to be called with @dev->parent lock held, ensure that
+ * the device's consumers are unbound in advance or that their locks can be
+ * acquired under the @dev->parent lock.
*/
void device_release_driver(struct device *dev)
{
@@ -826,9 +889,7 @@ void device_release_driver(struct device *dev)
* within their ->remove callback for the same device, they
* will deadlock right here.
*/
- device_lock(dev);
- __device_release_driver(dev);
- device_unlock(dev);
+ device_release_driver_internal(dev, NULL, NULL);
}
EXPORT_SYMBOL_GPL(device_release_driver);
@@ -853,15 +914,7 @@ void driver_detach(struct device_driver *drv)
dev = dev_prv->device;
get_device(dev);
spin_unlock(&drv->p->klist_devices.k_lock);
-
- if (dev->parent) /* Needed for USB */
- device_lock(dev->parent);
- device_lock(dev);
- if (dev->driver == drv)
- __device_release_driver(dev);
- device_unlock(dev);
- if (dev->parent)
- device_unlock(dev->parent);
+ device_release_driver_internal(dev, drv, dev->parent);
put_device(dev);
}
}
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 240374fd1838..7be310f7db73 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -160,18 +160,20 @@ static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
return count;
}
+static CLASS_ATTR_RW(disabled);
-static struct class_attribute devcd_class_attrs[] = {
- __ATTR_RW(disabled),
- __ATTR_NULL
+static struct attribute *devcd_class_attrs[] = {
+ &class_attr_disabled.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(devcd_class);
static struct class devcd_class = {
.name = "devcoredump",
.owner = THIS_MODULE,
.dev_release = devcd_dev_release,
.dev_groups = devcd_dev_groups,
- .class_attrs = devcd_class_attrs,
+ .class_groups = devcd_class_groups,
};
static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 8f8b68c80986..efd71cf4fdea 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -108,13 +108,13 @@ void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
EXPORT_SYMBOL(dmam_free_coherent);
/**
- * dmam_alloc_non_coherent - Managed dma_alloc_non_coherent()
+ * dmam_alloc_non_coherent - Managed dma_alloc_noncoherent()
* @dev: Device to allocate non_coherent memory for
* @size: Size of allocation
* @dma_handle: Out argument for allocated DMA handle
* @gfp: Allocation flags
*
- * Managed dma_alloc_non_coherent(). Memory allocated using this
+ * Managed dma_alloc_noncoherent(). Memory allocated using this
* function will be automatically released on driver detach.
*
* RETURNS:
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 22d1760a4278..4497d263209f 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -30,6 +30,7 @@
#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <linux/security.h>
+#include <linux/swait.h>
#include <generated/utsrelease.h>
@@ -91,10 +92,11 @@ static inline bool fw_is_builtin_firmware(const struct firmware *fw)
}
#endif
-enum {
+enum fw_status {
+ FW_STATUS_UNKNOWN,
FW_STATUS_LOADING,
FW_STATUS_DONE,
- FW_STATUS_ABORT,
+ FW_STATUS_ABORTED,
};
static int loading_timeout = 60; /* In seconds */
@@ -104,6 +106,82 @@ static inline long firmware_loading_timeout(void)
return loading_timeout > 0 ? loading_timeout * HZ : MAX_JIFFY_OFFSET;
}
+/*
+ * Concurrent request_firmware() for the same firmware need to be
+ * serialized. struct fw_state is simple state machine which hold the
+ * state of the firmware loading.
+ */
+struct fw_state {
+ struct swait_queue_head wq;
+ enum fw_status status;
+};
+
+static void fw_state_init(struct fw_state *fw_st)
+{
+ init_swait_queue_head(&fw_st->wq);
+ fw_st->status = FW_STATUS_UNKNOWN;
+}
+
+static inline bool __fw_state_is_done(enum fw_status status)
+{
+ return status == FW_STATUS_DONE || status == FW_STATUS_ABORTED;
+}
+
+static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
+{
+ long ret;
+
+ ret = swait_event_interruptible_timeout(fw_st->wq,
+ __fw_state_is_done(READ_ONCE(fw_st->status)),
+ timeout);
+ if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
+ return -ENOENT;
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return ret < 0 ? ret : 0;
+}
+
+static void __fw_state_set(struct fw_state *fw_st,
+ enum fw_status status)
+{
+ WRITE_ONCE(fw_st->status, status);
+
+ if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
+ swake_up(&fw_st->wq);
+}
+
+#define fw_state_start(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_LOADING)
+#define fw_state_done(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_DONE)
+#define fw_state_wait(fw_st) \
+ __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
+
+#ifndef CONFIG_FW_LOADER_USER_HELPER
+
+#define fw_state_is_aborted(fw_st) false
+
+#else /* CONFIG_FW_LOADER_USER_HELPER */
+
+static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
+{
+ return fw_st->status == status;
+}
+
+#define fw_state_aborted(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_ABORTED)
+#define fw_state_is_done(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_DONE)
+#define fw_state_is_loading(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_LOADING)
+#define fw_state_is_aborted(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_ABORTED)
+#define fw_state_wait_timeout(fw_st, timeout) \
+ __fw_state_wait_common(fw_st, timeout)
+
+#endif /* CONFIG_FW_LOADER_USER_HELPER */
+
/* firmware behavior options */
#define FW_OPT_UEVENT (1U << 0)
#define FW_OPT_NOWAIT (1U << 1)
@@ -145,9 +223,8 @@ struct firmware_cache {
struct firmware_buf {
struct kref ref;
struct list_head list;
- struct completion completion;
struct firmware_cache *fwc;
- unsigned long status;
+ struct fw_state fw_st;
void *data;
size_t size;
size_t allocated_size;
@@ -205,7 +282,7 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
buf->fwc = fwc;
buf->data = dbuf;
buf->allocated_size = size;
- init_completion(&buf->completion);
+ fw_state_init(&buf->fw_st);
#ifdef CONFIG_FW_LOADER_USER_HELPER
INIT_LIST_HEAD(&buf->pending_list);
#endif
@@ -305,15 +382,6 @@ static const char * const fw_path[] = {
module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
-static void fw_finish_direct_load(struct device *device,
- struct firmware_buf *buf)
-{
- mutex_lock(&fw_lock);
- set_bit(FW_STATUS_DONE, &buf->status);
- complete_all(&buf->completion);
- mutex_unlock(&fw_lock);
-}
-
static int
fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
{
@@ -360,7 +428,7 @@ fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
}
dev_dbg(device, "direct-loading %s\n", buf->fw_id);
buf->size = size;
- fw_finish_direct_load(device, buf);
+ fw_state_done(&buf->fw_st);
break;
}
__putname(path);
@@ -478,12 +546,11 @@ static void __fw_load_abort(struct firmware_buf *buf)
* There is a small window in which user can write to 'loading'
* between loading done and disappearance of 'loading'
*/
- if (test_bit(FW_STATUS_DONE, &buf->status))
+ if (fw_state_is_done(&buf->fw_st))
return;
list_del_init(&buf->pending_list);
- set_bit(FW_STATUS_ABORT, &buf->status);
- complete_all(&buf->completion);
+ fw_state_aborted(&buf->fw_st);
}
static void fw_load_abort(struct firmware_priv *fw_priv)
@@ -496,9 +563,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
fw_priv->buf = NULL;
}
-#define is_fw_load_aborted(buf) \
- test_bit(FW_STATUS_ABORT, &(buf)->status)
-
static LIST_HEAD(pending_fw_head);
/* reboot notifier for avoid deadlock with usermode_lock */
@@ -546,11 +610,13 @@ static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
return count;
}
+static CLASS_ATTR_RW(timeout);
-static struct class_attribute firmware_class_attrs[] = {
- __ATTR_RW(timeout),
- __ATTR_NULL
+static struct attribute *firmware_class_attrs[] = {
+ &class_attr_timeout.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(firmware_class);
static void fw_dev_release(struct device *dev)
{
@@ -585,7 +651,7 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
static struct class firmware_class = {
.name = "firmware",
- .class_attrs = firmware_class_attrs,
+ .class_groups = firmware_class_groups,
.dev_uevent = firmware_uevent,
.dev_release = fw_dev_release,
};
@@ -598,7 +664,7 @@ static ssize_t firmware_loading_show(struct device *dev,
mutex_lock(&fw_lock);
if (fw_priv->buf)
- loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
+ loading = fw_state_is_loading(&fw_priv->buf->fw_st);
mutex_unlock(&fw_lock);
return sprintf(buf, "%d\n", loading);
@@ -653,23 +719,20 @@ static ssize_t firmware_loading_store(struct device *dev,
switch (loading) {
case 1:
/* discarding any previous partial load */
- if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
+ if (!fw_state_is_done(&fw_buf->fw_st)) {
for (i = 0; i < fw_buf->nr_pages; i++)
__free_page(fw_buf->pages[i]);
vfree(fw_buf->pages);
fw_buf->pages = NULL;
fw_buf->page_array_size = 0;
fw_buf->nr_pages = 0;
- set_bit(FW_STATUS_LOADING, &fw_buf->status);
+ fw_state_start(&fw_buf->fw_st);
}
break;
case 0:
- if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) {
+ if (fw_state_is_loading(&fw_buf->fw_st)) {
int rc;
- set_bit(FW_STATUS_DONE, &fw_buf->status);
- clear_bit(FW_STATUS_LOADING, &fw_buf->status);
-
/*
* Several loading requests may be pending on
* one same firmware buf, so let all requests
@@ -691,10 +754,11 @@ static ssize_t firmware_loading_store(struct device *dev,
*/
list_del_init(&fw_buf->pending_list);
if (rc) {
- set_bit(FW_STATUS_ABORT, &fw_buf->status);
+ fw_state_aborted(&fw_buf->fw_st);
written = rc;
+ } else {
+ fw_state_done(&fw_buf->fw_st);
}
- complete_all(&fw_buf->completion);
break;
}
/* fallthrough */
@@ -755,7 +819,7 @@ static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
mutex_lock(&fw_lock);
buf = fw_priv->buf;
- if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
+ if (!buf || fw_state_is_done(&buf->fw_st)) {
ret_count = -ENODEV;
goto out;
}
@@ -842,7 +906,7 @@ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
mutex_lock(&fw_lock);
buf = fw_priv->buf;
- if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
+ if (!buf || fw_state_is_done(&buf->fw_st)) {
retval = -ENODEV;
goto out;
}
@@ -955,17 +1019,14 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
timeout = MAX_JIFFY_OFFSET;
}
- retval = wait_for_completion_interruptible_timeout(&buf->completion,
- timeout);
- if (retval == -ERESTARTSYS || !retval) {
+ retval = fw_state_wait_timeout(&buf->fw_st, timeout);
+ if (retval < 0) {
mutex_lock(&fw_lock);
fw_load_abort(fw_priv);
mutex_unlock(&fw_lock);
- } else if (retval > 0) {
- retval = 0;
}
- if (is_fw_load_aborted(buf))
+ if (fw_state_is_aborted(&buf->fw_st))
retval = -EAGAIN;
else if (buf->is_paged_buf && !buf->data)
retval = -ENOMEM;
@@ -1015,35 +1076,12 @@ fw_load_from_user_helper(struct firmware *firmware, const char *name,
return -ENOENT;
}
-/* No abort during direct loading */
-#define is_fw_load_aborted(buf) false
-
#ifdef CONFIG_PM_SLEEP
static inline void kill_requests_without_uevent(void) { }
#endif
#endif /* CONFIG_FW_LOADER_USER_HELPER */
-
-/* wait until the shared firmware_buf becomes ready (or error) */
-static int sync_cached_firmware_buf(struct firmware_buf *buf)
-{
- int ret = 0;
-
- mutex_lock(&fw_lock);
- while (!test_bit(FW_STATUS_DONE, &buf->status)) {
- if (is_fw_load_aborted(buf)) {
- ret = -ENOENT;
- break;
- }
- mutex_unlock(&fw_lock);
- ret = wait_for_completion_interruptible(&buf->completion);
- mutex_lock(&fw_lock);
- }
- mutex_unlock(&fw_lock);
- return ret;
-}
-
/* prepare firmware and firmware_buf structs;
* return 0 if a firmware is already assigned, 1 if need to load one,
* or a negative error code
@@ -1077,7 +1115,7 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
firmware->priv = buf;
if (ret > 0) {
- ret = sync_cached_firmware_buf(buf);
+ ret = fw_state_wait(&buf->fw_st);
if (!ret) {
fw_set_page_data(buf, firmware);
return 0; /* assigned */
@@ -1095,7 +1133,7 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
struct firmware_buf *buf = fw->priv;
mutex_lock(&fw_lock);
- if (!buf->size || is_fw_load_aborted(buf)) {
+ if (!buf->size || fw_state_is_aborted(&buf->fw_st)) {
mutex_unlock(&fw_lock);
return -ENOENT;
}
@@ -1345,9 +1383,9 @@ static void request_firmware_work_func(struct work_struct *work)
*
* Asynchronous variant of request_firmware() for user contexts:
* - sleep for as small periods as possible since it may
- * increase kernel boot time of built-in device drivers
- * requesting firmware in their ->probe() methods, if
- * @gfp is GFP_KERNEL.
+ * increase kernel boot time of built-in device drivers
+ * requesting firmware in their ->probe() methods, if
+ * @gfp is GFP_KERNEL.
*
* - can't sleep at all if @gfp is GFP_ATOMIC.
**/
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 62c63c0c5c22..bb69e58c29f3 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -226,11 +226,9 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
{
unsigned long start_pfn;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
- struct page *first_page;
int ret;
start_pfn = section_nr_to_pfn(phys_index);
- first_page = pfn_to_page(start_pfn);
switch (action) {
case MEM_ONLINE:
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index eb474c882ebe..48c6294e9c34 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -131,6 +131,7 @@ void device_pm_add(struct device *dev)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
+ dev->power.in_dpm_list = true;
mutex_unlock(&dpm_list_mtx);
}
@@ -145,6 +146,7 @@ void device_pm_remove(struct device *dev)
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
list_del_init(&dev->power.entry);
+ dev->power.in_dpm_list = false;
mutex_unlock(&dpm_list_mtx);
device_wakeup_disable(dev);
pm_runtime_remove(dev);
@@ -244,6 +246,62 @@ static void dpm_wait_for_children(struct device *dev, bool async)
device_for_each_child(dev, &async, dpm_wait_fn);
}
+static void dpm_wait_for_suppliers(struct device *dev, bool async)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ /*
+ * If the supplier goes away right after we've checked the link to it,
+ * we'll wait for its completion to change the state, but that's fine,
+ * because the only things that will block as a result are the SRCU
+ * callbacks freeing the link objects for the links in the list we're
+ * walking.
+ */
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_wait(link->supplier, async);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_wait_for_superior(struct device *dev, bool async)
+{
+ dpm_wait(dev->parent, async);
+ dpm_wait_for_suppliers(dev, async);
+}
+
+static void dpm_wait_for_consumers(struct device *dev, bool async)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ /*
+ * The status of a device link can only be changed from "dormant" by a
+ * probe, but that cannot happen during system suspend/resume. In
+ * theory it can change to "dormant" at that time, but then it is
+ * reasonable to wait for the target device anyway (eg. if it goes
+ * away, it's better to wait for it to go away completely and then
+ * continue instead of trying to continue in parallel with its
+ * unregistration).
+ */
+ list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_wait(link->consumer, async);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_wait_for_subordinate(struct device *dev, bool async)
+{
+ dpm_wait_for_children(dev, async);
+ dpm_wait_for_consumers(dev, async);
+}
+
/**
* pm_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
@@ -488,7 +546,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_noirq_suspended)
goto Out;
- dpm_wait(dev->parent, async);
+ dpm_wait_for_superior(dev, async);
if (dev->pm_domain) {
info = "noirq power domain ";
@@ -618,7 +676,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_late_suspended)
goto Out;
- dpm_wait(dev->parent, async);
+ dpm_wait_for_superior(dev, async);
if (dev->pm_domain) {
info = "early power domain ";
@@ -750,7 +808,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
}
- dpm_wait(dev->parent, async);
+ dpm_wait_for_superior(dev, async);
dpm_watchdog_set(&wd, dev);
device_lock(dev);
@@ -1027,7 +1085,7 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
- dpm_wait_for_children(dev, async);
+ dpm_wait_for_subordinate(dev, async);
if (async_error)
goto Complete;
@@ -1174,7 +1232,7 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
__pm_runtime_disable(dev, false);
- dpm_wait_for_children(dev, async);
+ dpm_wait_for_subordinate(dev, async);
if (async_error)
goto Complete;
@@ -1342,6 +1400,22 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
return error;
}
+static void dpm_clear_suppliers_direct_complete(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ spin_lock_irq(&link->supplier->power.lock);
+ link->supplier->power.direct_complete = false;
+ spin_unlock_irq(&link->supplier->power.lock);
+ }
+
+ device_links_read_unlock(idx);
+}
+
/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
@@ -1358,7 +1432,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
- dpm_wait_for_children(dev, async);
+ dpm_wait_for_subordinate(dev, async);
if (async_error)
goto Complete;
@@ -1454,6 +1528,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
spin_unlock_irq(&parent->power.lock);
}
+ dpm_clear_suppliers_direct_complete(dev);
}
device_unlock(dev);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index a84332aefc2d..a46e97e515c5 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -144,6 +144,11 @@ extern void device_pm_move_after(struct device *, struct device *);
extern void device_pm_move_last(struct device *);
extern void device_pm_check_callbacks(struct device *dev);
+static inline bool device_pm_initialized(struct device *dev)
+{
+ return dev->power.in_dpm_list;
+}
+
#else /* !CONFIG_PM_SLEEP */
static inline void device_pm_sleep_init(struct device *dev) {}
@@ -163,6 +168,11 @@ static inline void device_pm_move_last(struct device *dev) {}
static inline void device_pm_check_callbacks(struct device *dev) {}
+static inline bool device_pm_initialized(struct device *dev)
+{
+ return device_is_registered(dev);
+}
+
#endif /* !CONFIG_PM_SLEEP */
static inline void device_pm_init(struct device *dev)
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 26856d050037..872eac4cb1df 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -12,6 +12,8 @@
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <trace/events/rpm.h>
+
+#include "../base.h"
#include "power.h"
typedef int (*pm_callback_t)(struct device *);
@@ -259,6 +261,42 @@ static int rpm_check_suspend_allowed(struct device *dev)
return retval;
}
+static int rpm_get_suppliers(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ int retval;
+
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ continue;
+
+ if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
+ link->rpm_active)
+ continue;
+
+ retval = pm_runtime_get_sync(link->supplier);
+ if (retval < 0) {
+ pm_runtime_put_noidle(link->supplier);
+ return retval;
+ }
+ link->rpm_active = true;
+ }
+ return 0;
+}
+
+static void rpm_put_suppliers(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (link->rpm_active &&
+ READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
+ pm_runtime_put(link->supplier);
+ link->rpm_active = false;
+ }
+}
+
/**
* __rpm_callback - Run a given runtime PM callback for a given device.
* @cb: Runtime PM callback to run.
@@ -267,19 +305,57 @@ static int rpm_check_suspend_allowed(struct device *dev)
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
- int retval;
+ int retval, idx;
+ bool use_links = dev->power.links_count > 0;
- if (dev->power.irq_safe)
+ if (dev->power.irq_safe) {
spin_unlock(&dev->power.lock);
- else
+ } else {
spin_unlock_irq(&dev->power.lock);
+ /*
+ * Resume suppliers if necessary.
+ *
+ * The device's runtime PM status cannot change until this
+ * routine returns, so it is safe to read the status outside of
+ * the lock.
+ */
+ if (use_links && dev->power.runtime_status == RPM_RESUMING) {
+ idx = device_links_read_lock();
+
+ retval = rpm_get_suppliers(dev);
+ if (retval)
+ goto fail;
+
+ device_links_read_unlock(idx);
+ }
+ }
+
retval = cb(dev);
- if (dev->power.irq_safe)
+ if (dev->power.irq_safe) {
spin_lock(&dev->power.lock);
- else
+ } else {
+ /*
+ * If the device is suspending and the callback has returned
+ * success, drop the usage counters of the suppliers that have
+ * been reference counted on its resume.
+ *
+ * Do that if resume fails too.
+ */
+ if (use_links
+ && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
+ || (dev->power.runtime_status == RPM_RESUMING && retval))) {
+ idx = device_links_read_lock();
+
+ fail:
+ rpm_put_suppliers(dev);
+
+ device_links_read_unlock(idx);
+ }
+
spin_lock_irq(&dev->power.lock);
+ }
return retval;
}
@@ -1458,6 +1534,94 @@ void pm_runtime_remove(struct device *dev)
}
/**
+ * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
+ * @dev: Device whose driver is going to be removed.
+ *
+ * Check links from this device to any consumers and if any of them have active
+ * runtime PM references to the device, drop the usage counter of the device
+ * (once per link).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ *
+ * Since the device is guaranteed to be runtime-active at the point this is
+ * called, nothing else needs to be done here.
+ *
+ * Moreover, this is called after device_links_busy() has returned 'false', so
+ * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
+ * therefore rpm_active can't be manipulated concurrently.
+ */
+void pm_runtime_clean_up_links(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->rpm_active) {
+ pm_runtime_put_noidle(dev);
+ link->rpm_active = false;
+ }
+ }
+
+ device_links_read_unlock(idx);
+}
+
+/**
+ * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
+ * @dev: Consumer device.
+ */
+void pm_runtime_get_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (link->flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_get_sync(link->supplier);
+
+ device_links_read_unlock(idx);
+}
+
+/**
+ * pm_runtime_put_suppliers - Drop references to supplier devices.
+ * @dev: Consumer device.
+ */
+void pm_runtime_put_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (link->flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_put(link->supplier);
+
+ device_links_read_unlock(idx);
+}
+
+void pm_runtime_new_link(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ dev->power.links_count++;
+ spin_unlock_irq(&dev->power.lock);
+}
+
+void pm_runtime_drop_link(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ WARN_ON(dev->power.links_count == 0);
+ dev->power.links_count--;
+ spin_unlock_irq(&dev->power.lock);
+}
+
+/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.
*
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
new file mode 100644
index 000000000000..9aa0d45a60db
--- /dev/null
+++ b/drivers/base/test/Kconfig
@@ -0,0 +1,9 @@
+config TEST_ASYNC_DRIVER_PROBE
+ tristate "Build kernel module to test asynchronous driver probing"
+ depends on m
+ help
+ Enabling this option produces a kernel module that allows
+ testing asynchronous driver probing by the device core.
+ The module name will be test_async_driver_probe.ko
+
+ If unsure say N.
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
new file mode 100644
index 000000000000..90477c5fd9f9
--- /dev/null
+++ b/drivers/base/test/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
new file mode 100644
index 000000000000..304d5c2bd5e9
--- /dev/null
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+
+#define TEST_PROBE_DELAY (5 * 1000) /* 5 sec */
+#define TEST_PROBE_THRESHOLD (TEST_PROBE_DELAY / 2)
+
+static int test_probe(struct platform_device *pdev)
+{
+ dev_info(&pdev->dev, "sleeping for %d msecs in probe\n",
+ TEST_PROBE_DELAY);
+ msleep(TEST_PROBE_DELAY);
+ dev_info(&pdev->dev, "done sleeping\n");
+
+ return 0;
+}
+
+static struct platform_driver async_driver = {
+ .driver = {
+ .name = "test_async_driver",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = test_probe,
+};
+
+static struct platform_driver sync_driver = {
+ .driver = {
+ .name = "test_sync_driver",
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+ },
+ .probe = test_probe,
+};
+
+static struct platform_device *async_dev_1, *async_dev_2;
+static struct platform_device *sync_dev_1;
+
+static int __init test_async_probe_init(void)
+{
+ ktime_t calltime, delta;
+ unsigned long long duration;
+ int error;
+
+ pr_info("registering first asynchronous device...\n");
+
+ async_dev_1 = platform_device_register_simple("test_async_driver", 1,
+ NULL, 0);
+ if (IS_ERR(async_dev_1)) {
+ error = PTR_ERR(async_dev_1);
+ pr_err("failed to create async_dev_1: %d", error);
+ return error;
+ }
+
+ pr_info("registering asynchronous driver...\n");
+ calltime = ktime_get();
+ error = platform_driver_register(&async_driver);
+ if (error) {
+ pr_err("Failed to register async_driver: %d\n", error);
+ goto err_unregister_async_dev_1;
+ }
+
+ delta = ktime_sub(ktime_get(), calltime);
+ duration = (unsigned long long) ktime_to_ms(delta);
+ pr_info("registration took %lld msecs\n", duration);
+ if (duration > TEST_PROBE_THRESHOLD) {
+ pr_err("test failed: probe took too long\n");
+ error = -ETIMEDOUT;
+ goto err_unregister_async_driver;
+ }
+
+ pr_info("registering second asynchronous device...\n");
+ calltime = ktime_get();
+ async_dev_2 = platform_device_register_simple("test_async_driver", 2,
+ NULL, 0);
+ if (IS_ERR(async_dev_2)) {
+ error = PTR_ERR(async_dev_2);
+ pr_err("failed to create async_dev_2: %d", error);
+ goto err_unregister_async_driver;
+ }
+
+ delta = ktime_sub(ktime_get(), calltime);
+ duration = (unsigned long long) ktime_to_ms(delta);
+ pr_info("registration took %lld msecs\n", duration);
+ if (duration > TEST_PROBE_THRESHOLD) {
+ pr_err("test failed: probe took too long\n");
+ error = -ETIMEDOUT;
+ goto err_unregister_async_dev_2;
+ }
+
+ pr_info("registering synchronous driver...\n");
+
+ error = platform_driver_register(&sync_driver);
+ if (error) {
+ pr_err("Failed to register async_driver: %d\n", error);
+ goto err_unregister_async_dev_2;
+ }
+
+ pr_info("registering synchronous device...\n");
+ calltime = ktime_get();
+ sync_dev_1 = platform_device_register_simple("test_sync_driver", 1,
+ NULL, 0);
+ if (IS_ERR(sync_dev_1)) {
+ error = PTR_ERR(sync_dev_1);
+ pr_err("failed to create sync_dev_1: %d", error);
+ goto err_unregister_sync_driver;
+ }
+
+ delta = ktime_sub(ktime_get(), calltime);
+ duration = (unsigned long long) ktime_to_ms(delta);
+ pr_info("registration took %lld msecs\n", duration);
+ if (duration < TEST_PROBE_THRESHOLD) {
+ pr_err("test failed: probe was too quick\n");
+ error = -ETIMEDOUT;
+ goto err_unregister_sync_dev_1;
+ }
+
+ pr_info("completed successfully");
+
+ return 0;
+
+err_unregister_sync_dev_1:
+ platform_device_unregister(sync_dev_1);
+
+err_unregister_sync_driver:
+ platform_driver_unregister(&sync_driver);
+
+err_unregister_async_dev_2:
+ platform_device_unregister(async_dev_2);
+
+err_unregister_async_driver:
+ platform_driver_unregister(&async_driver);
+
+err_unregister_async_dev_1:
+ platform_device_unregister(async_dev_1);
+
+ return error;
+}
+module_init(test_async_probe_init);
+
+static void __exit test_async_probe_exit(void)
+{
+ platform_driver_unregister(&async_driver);
+ platform_driver_unregister(&sync_driver);
+ platform_device_unregister(async_dev_1);
+ platform_device_unregister(async_dev_2);
+ platform_device_unregister(sync_dev_1);
+}
+module_exit(test_async_probe_exit);
+
+MODULE_DESCRIPTION("Test module for asynchronous driver probing");
+MODULE_AUTHOR("Dmitry Torokhov <dtor@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 1537302e56e3..a18de9d727b0 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -260,43 +260,6 @@ scsi_cmd_stack_free(ctlr_info_t *h)
}
#if 0
-static int xmargin=8;
-static int amargin=60;
-
-static void
-print_bytes (unsigned char *c, int len, int hex, int ascii)
-{
-
- int i;
- unsigned char *x;
-
- if (hex)
- {
- x = c;
- for (i=0;i<len;i++)
- {
- if ((i % xmargin) == 0 && i>0) printk("\n");
- if ((i % xmargin) == 0) printk("0x%04x:", i);
- printk(" %02x", *x);
- x++;
- }
- printk("\n");
- }
- if (ascii)
- {
- x = c;
- for (i=0;i<len;i++)
- {
- if ((i % amargin) == 0 && i>0) printk("\n");
- if ((i % amargin) == 0) printk("0x%04x:", i);
- if (*x > 26 && *x < 128) printk("%c", *x);
- else printk(".");
- x++;
- }
- printk("\n");
- }
-}
-
static void
print_cmd(CommandList_struct *cp)
{
@@ -305,30 +268,13 @@ print_cmd(CommandList_struct *cp)
printk("sgtot:%d\n", cp->Header.SGTotal);
printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper,
cp->Header.Tag.lower);
- printk("LUN:0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- cp->Header.LUN.LunAddrBytes[0],
- cp->Header.LUN.LunAddrBytes[1],
- cp->Header.LUN.LunAddrBytes[2],
- cp->Header.LUN.LunAddrBytes[3],
- cp->Header.LUN.LunAddrBytes[4],
- cp->Header.LUN.LunAddrBytes[5],
- cp->Header.LUN.LunAddrBytes[6],
- cp->Header.LUN.LunAddrBytes[7]);
+ printk("LUN:0x%8phN\n", cp->Header.LUN.LunAddrBytes);
printk("CDBLen:%d\n", cp->Request.CDBLen);
printk("Type:%d\n",cp->Request.Type.Type);
printk("Attr:%d\n",cp->Request.Type.Attribute);
printk(" Dir:%d\n",cp->Request.Type.Direction);
printk("Timeout:%d\n",cp->Request.Timeout);
- printk( "CDB: %02x %02x %02x %02x %02x %02x %02x %02x"
- " %02x %02x %02x %02x %02x %02x %02x %02x\n",
- cp->Request.CDB[0], cp->Request.CDB[1],
- cp->Request.CDB[2], cp->Request.CDB[3],
- cp->Request.CDB[4], cp->Request.CDB[5],
- cp->Request.CDB[6], cp->Request.CDB[7],
- cp->Request.CDB[8], cp->Request.CDB[9],
- cp->Request.CDB[10], cp->Request.CDB[11],
- cp->Request.CDB[12], cp->Request.CDB[13],
- cp->Request.CDB[14], cp->Request.CDB[15]),
+ printk("CDB: %16ph\n", cp->Request.CDB);
printk("edesc.Addr: 0x%08x/0%08x, Len = %d\n",
cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower,
cp->ErrDesc.Len);
@@ -340,9 +286,7 @@ print_cmd(CommandList_struct *cp)
printk("offense size:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size);
printk("offense byte:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num);
printk("offense value:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
-
}
-
#endif
static int
@@ -782,8 +726,10 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
"reported\n", c);
break;
case CMD_INVALID: {
- /* print_bytes(c, sizeof(*c), 1, 0);
- print_cmd(c); */
+ /*
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false);
+ print_cmd(c);
+ */
/* We get CMD_INVALID if you address a non-existent tape drive instead
of a selection timeout (no response). You will see this if you yank
out a tape drive, then try to access it. This is kind of a shame
@@ -985,8 +931,10 @@ cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c)
dev_warn(&h->pdev->dev,
"%p is reported invalid (probably means "
"target device no longer present)\n", c);
- /* print_bytes((unsigned char *) c, sizeof(*c), 1, 0);
- print_cmd(c); */
+ /*
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false);
+ print_cmd(c);
+ */
}
break;
case CMD_PROTOCOL_ERR:
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3cc6d1d86f1e..415e79b69d34 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -533,13 +533,11 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info
struct xenbus_device *dev = be->dev;
struct xen_blkif *blkif = be->blkif;
int err;
- int state = 0, discard_enable;
+ int state = 0;
struct block_device *bdev = be->blkif->vbd.bdev;
struct request_queue *q = bdev_get_queue(bdev);
- err = xenbus_scanf(XBT_NIL, dev->nodename, "discard-enable", "%d",
- &discard_enable);
- if (err == 1 && !discard_enable)
+ if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
return;
if (blk_queue_discard(q)) {
@@ -1039,30 +1037,24 @@ static int connect_ring(struct backend_info *be)
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -ENOSYS;
}
- err = xenbus_scanf(XBT_NIL, dev->otherend,
- "feature-persistent", "%u", &pers_grants);
- if (err <= 0)
- pers_grants = 0;
-
+ pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent",
+ 0);
be->blkif->vbd.feature_gnt_persistent = pers_grants;
be->blkif->vbd.overflow_max_grants = 0;
/*
* Read the number of hardware queues from frontend.
*/
- err = xenbus_scanf(XBT_NIL, dev->otherend, "multi-queue-num-queues",
- "%u", &requested_num_queues);
- if (err < 0) {
- requested_num_queues = 1;
- } else {
- if (requested_num_queues > xenblk_max_queues
- || requested_num_queues == 0) {
- /* Buggy or malicious guest. */
- xenbus_dev_fatal(dev, err,
- "guest requested %u queues, exceeding the maximum of %u.",
- requested_num_queues, xenblk_max_queues);
- return -ENOSYS;
- }
+ requested_num_queues = xenbus_read_unsigned(dev->otherend,
+ "multi-queue-num-queues",
+ 1);
+ if (requested_num_queues > xenblk_max_queues
+ || requested_num_queues == 0) {
+ /* Buggy or malicious guest. */
+ xenbus_dev_fatal(dev, err,
+ "guest requested %u queues, exceeding the maximum of %u.",
+ requested_num_queues, xenblk_max_queues);
+ return -ENOSYS;
}
be->blkif->nr_rings = requested_num_queues;
if (xen_blkif_alloc_rings(be->blkif))
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index c000fdf048b2..b2bdfa81f929 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1758,17 +1758,13 @@ static int talk_to_blkback(struct xenbus_device *dev,
const char *message = NULL;
struct xenbus_transaction xbt;
int err;
- unsigned int i, max_page_order = 0;
- unsigned int ring_page_order = 0;
+ unsigned int i, max_page_order;
+ unsigned int ring_page_order;
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "max-ring-page-order", "%u", &max_page_order);
- if (err != 1)
- info->nr_ring_pages = 1;
- else {
- ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
- info->nr_ring_pages = 1 << ring_page_order;
- }
+ max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
+ "max-ring-page-order", 0);
+ ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
+ info->nr_ring_pages = 1 << ring_page_order;
for (i = 0; i < info->nr_rings; i++) {
struct blkfront_ring_info *rinfo = &info->rinfo[i];
@@ -1877,18 +1873,14 @@ again:
static int negotiate_mq(struct blkfront_info *info)
{
- unsigned int backend_max_queues = 0;
- int err;
+ unsigned int backend_max_queues;
unsigned int i;
BUG_ON(info->nr_rings);
/* Check if backend supports multiple queues. */
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "multi-queue-max-queues", "%u", &backend_max_queues);
- if (err < 0)
- backend_max_queues = 1;
-
+ backend_max_queues = xenbus_read_unsigned(info->xbdev->otherend,
+ "multi-queue-max-queues", 1);
info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
/* We need at least one ring. */
if (!info->nr_rings)
@@ -2196,7 +2188,6 @@ static void blkfront_setup_discard(struct blkfront_info *info)
int err;
unsigned int discard_granularity;
unsigned int discard_alignment;
- unsigned int discard_secure;
info->feature_discard = 1;
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
@@ -2207,10 +2198,9 @@ static void blkfront_setup_discard(struct blkfront_info *info)
info->discard_granularity = discard_granularity;
info->discard_alignment = discard_alignment;
}
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "discard-secure", "%u", &discard_secure);
- if (err > 0)
- info->feature_secdiscard = !!discard_secure;
+ info->feature_secdiscard =
+ !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
+ 0);
}
static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
@@ -2302,16 +2292,11 @@ out_of_memory:
*/
static void blkfront_gather_backend_features(struct blkfront_info *info)
{
- int err;
- int barrier, flush, discard, persistent;
unsigned int indirect_segments;
info->feature_flush = 0;
info->feature_fua = 0;
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-barrier", "%d", &barrier);
-
/*
* If there's no "feature-barrier" defined, then it means
* we're dealing with a very old backend which writes
@@ -2319,7 +2304,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
*
* If there are barriers, then we use flush.
*/
- if (err > 0 && barrier) {
+ if (xenbus_read_unsigned(info->xbdev->otherend, "feature-barrier", 0)) {
info->feature_flush = 1;
info->feature_fua = 1;
}
@@ -2328,35 +2313,23 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
* And if there is "feature-flush-cache" use that above
* barriers.
*/
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-flush-cache", "%d", &flush);
-
- if (err > 0 && flush) {
+ if (xenbus_read_unsigned(info->xbdev->otherend, "feature-flush-cache",
+ 0)) {
info->feature_flush = 1;
info->feature_fua = 0;
}
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-discard", "%d", &discard);
-
- if (err > 0 && discard)
+ if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
blkfront_setup_discard(info);
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-persistent", "%d", &persistent);
- if (err <= 0)
- info->feature_persistent = 0;
- else
- info->feature_persistent = persistent;
+ info->feature_persistent =
+ xenbus_read_unsigned(info->xbdev->otherend,
+ "feature-persistent", 0);
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-max-indirect-segments", "%u",
- &indirect_segments);
- if (err <= 0)
- info->max_indirect_segments = 0;
- else
- info->max_indirect_segments = min(indirect_segments,
- xen_blkif_max_segments);
+ indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
+ "feature-max-indirect-segments", 0);
+ info->max_indirect_segments = min(indirect_segments,
+ xen_blkif_max_segments);
}
/*
@@ -2421,11 +2394,9 @@ static void blkfront_connect(struct blkfront_info *info)
* provide this. Assume physical sector size to be the same as
* sector_size in that case.
*/
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "physical-sector-size", "%u", &physical_sector_size);
- if (err != 1)
- physical_sector_size = sector_size;
-
+ physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend,
+ "physical-sector-size",
+ sector_size);
blkfront_gather_backend_features(info);
for (i = 0; i < info->nr_rings; i++) {
err = blkfront_setup_indirect(&info->rinfo[i]);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 45ba878ae025..fde005ef9d36 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -17,7 +17,6 @@ config DEVMEM
config DEVKMEM
bool "/dev/kmem virtual device support"
- default y
help
Say Y here if you want to support the /dev/kmem device. The
/dev/kmem device is rarely used, but can be used for certain
@@ -579,7 +578,7 @@ config DEVPORT
source "drivers/s390/char/Kconfig"
config TILE_SROM
- bool "Character-device access via hypervisor to the Tilera SPI ROM"
+ tristate "Character-device access via hypervisor to the Tilera SPI ROM"
depends on TILE
default y
---help---
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
index 8d3dfb0c8a26..1d1e7da8ad27 100644
--- a/drivers/char/pcmcia/Kconfig
+++ b/drivers/char/pcmcia/Kconfig
@@ -43,6 +43,17 @@ config CARDMAN_4040
(http://www.omnikey.com/), or a current development version of OpenCT
(http://www.opensc-project.org/opensc).
+config SCR24X
+ tristate "SCR24x Chip Card Interface support"
+ depends on PCMCIA
+ help
+ Enable support for the SCR24x PCMCIA Chip Card Interface.
+
+ To compile this driver as a module, choose M here.
+ The module will be called scr24x_cs..
+
+ If unsure say N.
+
config IPWIRELESS
tristate "IPWireless 3G UMTS PCMCIA card support"
depends on PCMCIA && NETDEVICES && TTY
diff --git a/drivers/char/pcmcia/Makefile b/drivers/char/pcmcia/Makefile
index 0aae20985d57..5b836bc21406 100644
--- a/drivers/char/pcmcia/Makefile
+++ b/drivers/char/pcmcia/Makefile
@@ -7,3 +7,4 @@
obj-$(CONFIG_SYNCLINK_CS) += synclink_cs.o
obj-$(CONFIG_CARDMAN_4000) += cm4000_cs.o
obj-$(CONFIG_CARDMAN_4040) += cm4040_cs.o
+obj-$(CONFIG_SCR24X) += scr24x_cs.o
diff --git a/drivers/char/pcmcia/scr24x_cs.c b/drivers/char/pcmcia/scr24x_cs.c
new file mode 100644
index 000000000000..f6b43d9350f0
--- /dev/null
+++ b/drivers/char/pcmcia/scr24x_cs.c
@@ -0,0 +1,373 @@
+/*
+ * SCR24x PCMCIA Smart Card Reader Driver
+ *
+ * Copyright (C) 2005-2006 TL Sudheendran
+ * Copyright (C) 2016 Lubomir Rintel
+ *
+ * Derived from "scr24x_v4.2.6_Release.tar.gz" driver by TL Sudheendran.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#define CCID_HEADER_SIZE 10
+#define CCID_LENGTH_OFFSET 1
+#define CCID_MAX_LEN 271
+
+#define SCR24X_DATA(n) (1 + n)
+#define SCR24X_CMD_STATUS 7
+#define CMD_START 0x40
+#define CMD_WRITE_BYTE 0x41
+#define CMD_READ_BYTE 0x42
+#define STATUS_BUSY 0x80
+
+struct scr24x_dev {
+ struct device *dev;
+ struct cdev c_dev;
+ unsigned char buf[CCID_MAX_LEN];
+ int devno;
+ struct mutex lock;
+ struct kref refcnt;
+ u8 __iomem *regs;
+};
+
+#define SCR24X_DEVS 8
+static DECLARE_BITMAP(scr24x_minors, SCR24X_DEVS);
+
+static struct class *scr24x_class;
+static dev_t scr24x_devt;
+
+static void scr24x_delete(struct kref *kref)
+{
+ struct scr24x_dev *dev = container_of(kref, struct scr24x_dev,
+ refcnt);
+
+ kfree(dev);
+}
+
+static int scr24x_wait_ready(struct scr24x_dev *dev)
+{
+ u_char status;
+ int timeout = 100;
+
+ do {
+ status = ioread8(dev->regs + SCR24X_CMD_STATUS);
+ if (!(status & STATUS_BUSY))
+ return 0;
+
+ msleep(20);
+ } while (--timeout);
+
+ return -EIO;
+}
+
+static int scr24x_open(struct inode *inode, struct file *filp)
+{
+ struct scr24x_dev *dev = container_of(inode->i_cdev,
+ struct scr24x_dev, c_dev);
+
+ kref_get(&dev->refcnt);
+ filp->private_data = dev;
+
+ return nonseekable_open(inode, filp);
+}
+
+static int scr24x_release(struct inode *inode, struct file *filp)
+{
+ struct scr24x_dev *dev = filp->private_data;
+
+ /* We must not take the dev->lock here as scr24x_delete()
+ * might be called to remove the dev structure altogether.
+ * We don't need the lock anyway, since after the reference
+ * acquired in probe() is released in remove() the chrdev
+ * is already unregistered and noone can possibly acquire
+ * a reference via open() anymore. */
+ kref_put(&dev->refcnt, scr24x_delete);
+ return 0;
+}
+
+static int read_chunk(struct scr24x_dev *dev, size_t offset, size_t limit)
+{
+ size_t i, y;
+ int ret;
+
+ for (i = offset; i < limit; i += 5) {
+ iowrite8(CMD_READ_BYTE, dev->regs + SCR24X_CMD_STATUS);
+ ret = scr24x_wait_ready(dev);
+ if (ret < 0)
+ return ret;
+
+ for (y = 0; y < 5 && i + y < limit; y++)
+ dev->buf[i + y] = ioread8(dev->regs + SCR24X_DATA(y));
+ }
+
+ return 0;
+}
+
+static ssize_t scr24x_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct scr24x_dev *dev = filp->private_data;
+ int ret;
+ int len;
+
+ if (count < CCID_HEADER_SIZE)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&dev->lock))
+ return -ERESTARTSYS;
+
+ if (!dev->dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = scr24x_wait_ready(dev);
+ if (ret < 0)
+ goto out;
+ len = CCID_HEADER_SIZE;
+ ret = read_chunk(dev, 0, len);
+ if (ret < 0)
+ goto out;
+
+ len += le32_to_cpu(*(__le32 *)(&dev->buf[CCID_LENGTH_OFFSET]));
+ if (len > sizeof(dev->buf)) {
+ ret = -EIO;
+ goto out;
+ }
+ ret = read_chunk(dev, CCID_HEADER_SIZE, len);
+ if (ret < 0)
+ goto out;
+
+ if (len < count)
+ count = len;
+
+ if (copy_to_user(buf, dev->buf, count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static ssize_t scr24x_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct scr24x_dev *dev = filp->private_data;
+ size_t i, y;
+ int ret;
+
+ if (mutex_lock_interruptible(&dev->lock))
+ return -ERESTARTSYS;
+
+ if (!dev->dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (count > sizeof(dev->buf)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (copy_from_user(dev->buf, buf, count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = scr24x_wait_ready(dev);
+ if (ret < 0)
+ goto out;
+
+ iowrite8(CMD_START, dev->regs + SCR24X_CMD_STATUS);
+ ret = scr24x_wait_ready(dev);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < count; i += 5) {
+ for (y = 0; y < 5 && i + y < count; y++)
+ iowrite8(dev->buf[i + y], dev->regs + SCR24X_DATA(y));
+
+ iowrite8(CMD_WRITE_BYTE, dev->regs + SCR24X_CMD_STATUS);
+ ret = scr24x_wait_ready(dev);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static const struct file_operations scr24x_fops = {
+ .owner = THIS_MODULE,
+ .read = scr24x_read,
+ .write = scr24x_write,
+ .open = scr24x_open,
+ .release = scr24x_release,
+ .llseek = no_llseek,
+};
+
+static int scr24x_config_check(struct pcmcia_device *link, void *priv_data)
+{
+ if (resource_size(link->resource[PCMCIA_IOPORT_0]) != 0x11)
+ return -ENODEV;
+ return pcmcia_request_io(link);
+}
+
+static int scr24x_probe(struct pcmcia_device *link)
+{
+ struct scr24x_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->devno = find_first_zero_bit(scr24x_minors, SCR24X_DEVS);
+ if (dev->devno >= SCR24X_DEVS) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ mutex_init(&dev->lock);
+ kref_init(&dev->refcnt);
+
+ link->priv = dev;
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+
+ ret = pcmcia_loop_config(link, scr24x_config_check, NULL);
+ if (ret < 0)
+ goto err;
+
+ dev->dev = &link->dev;
+ dev->regs = devm_ioport_map(&link->dev,
+ link->resource[PCMCIA_IOPORT_0]->start,
+ resource_size(link->resource[PCMCIA_IOPORT_0]));
+ if (!dev->regs) {
+ ret = -EIO;
+ goto err;
+ }
+
+ cdev_init(&dev->c_dev, &scr24x_fops);
+ dev->c_dev.owner = THIS_MODULE;
+ dev->c_dev.ops = &scr24x_fops;
+ ret = cdev_add(&dev->c_dev, MKDEV(MAJOR(scr24x_devt), dev->devno), 1);
+ if (ret < 0)
+ goto err;
+
+ ret = pcmcia_enable_device(link);
+ if (ret < 0) {
+ pcmcia_disable_device(link);
+ goto err;
+ }
+
+ device_create(scr24x_class, NULL, MKDEV(MAJOR(scr24x_devt), dev->devno),
+ NULL, "scr24x%d", dev->devno);
+
+ dev_info(&link->dev, "SCR24x Chip Card Interface\n");
+ return 0;
+
+err:
+ if (dev->devno < SCR24X_DEVS)
+ clear_bit(dev->devno, scr24x_minors);
+ kfree (dev);
+ return ret;
+}
+
+static void scr24x_remove(struct pcmcia_device *link)
+{
+ struct scr24x_dev *dev = (struct scr24x_dev *)link->priv;
+
+ device_destroy(scr24x_class, MKDEV(MAJOR(scr24x_devt), dev->devno));
+ mutex_lock(&dev->lock);
+ pcmcia_disable_device(link);
+ cdev_del(&dev->c_dev);
+ clear_bit(dev->devno, scr24x_minors);
+ dev->dev = NULL;
+ mutex_unlock(&dev->lock);
+
+ kref_put(&dev->refcnt, scr24x_delete);
+}
+
+static const struct pcmcia_device_id scr24x_ids[] = {
+ PCMCIA_DEVICE_PROD_ID12("HP", "PC Card Smart Card Reader",
+ 0x53cb94f9, 0xbfdf89a5),
+ PCMCIA_DEVICE_PROD_ID1("SCR241 PCMCIA", 0x6271efa3),
+ PCMCIA_DEVICE_PROD_ID1("SCR243 PCMCIA", 0x2054e8de),
+ PCMCIA_DEVICE_PROD_ID1("SCR24x PCMCIA", 0x54a33665),
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, scr24x_ids);
+
+static struct pcmcia_driver scr24x_driver = {
+ .owner = THIS_MODULE,
+ .name = "scr24x_cs",
+ .probe = scr24x_probe,
+ .remove = scr24x_remove,
+ .id_table = scr24x_ids,
+};
+
+static int __init scr24x_init(void)
+{
+ int ret;
+
+ scr24x_class = class_create(THIS_MODULE, "scr24x");
+ if (IS_ERR(scr24x_class))
+ return PTR_ERR(scr24x_class);
+
+ ret = alloc_chrdev_region(&scr24x_devt, 0, SCR24X_DEVS, "scr24x");
+ if (ret < 0) {
+ class_destroy(scr24x_class);
+ return ret;
+ }
+
+ ret = pcmcia_register_driver(&scr24x_driver);
+ if (ret < 0) {
+ unregister_chrdev_region(scr24x_devt, SCR24X_DEVS);
+ class_destroy(scr24x_class);
+ }
+
+ return ret;
+}
+
+static void __exit scr24x_exit(void)
+{
+ pcmcia_unregister_driver(&scr24x_driver);
+ unregister_chrdev_region(scr24x_devt, SCR24X_DEVS);
+ class_destroy(scr24x_class);
+}
+
+module_init(scr24x_init);
+module_exit(scr24x_exit);
+
+MODULE_AUTHOR("Lubomir Rintel");
+MODULE_DESCRIPTION("SCR24x PCMCIA Smart Card Reader Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 6af1ce04b3da..02819e0703c8 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -86,6 +86,9 @@ struct pp_struct {
long default_inactivity;
};
+/* should we use PARDEVICE_MAX here? */
+static struct device *devices[PARPORT_MAX];
+
/* pp_struct.flags bitfields */
#define PP_CLAIMED (1<<0)
#define PP_EXCL (1<<1)
@@ -294,7 +297,7 @@ static int register_device(int minor, struct pp_struct *pp)
port = parport_find_number(minor);
if (!port) {
- printk(KERN_WARNING "%s: no associated port!\n", name);
+ pr_warn("%s: no associated port!\n", name);
kfree(name);
return -ENXIO;
}
@@ -305,10 +308,10 @@ static int register_device(int minor, struct pp_struct *pp)
ppdev_cb.private = pp;
pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
parport_put_port(port);
+ kfree(name);
if (!pdev) {
- printk(KERN_WARNING "%s: failed to register device!\n", name);
- kfree(name);
+ pr_warn("%s: failed to register device!\n", name);
return -ENXIO;
}
@@ -789,13 +792,29 @@ static const struct file_operations pp_fops = {
static void pp_attach(struct parport *port)
{
- device_create(ppdev_class, port->dev, MKDEV(PP_MAJOR, port->number),
- NULL, "parport%d", port->number);
+ struct device *ret;
+
+ if (devices[port->number])
+ return;
+
+ ret = device_create(ppdev_class, port->dev,
+ MKDEV(PP_MAJOR, port->number), NULL,
+ "parport%d", port->number);
+ if (IS_ERR(ret)) {
+ pr_err("Failed to create device parport%d\n",
+ port->number);
+ return;
+ }
+ devices[port->number] = ret;
}
static void pp_detach(struct parport *port)
{
+ if (!devices[port->number])
+ return;
+
device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number));
+ devices[port->number] = NULL;
}
static int pp_probe(struct pardevice *par_dev)
@@ -822,8 +841,7 @@ static int __init ppdev_init(void)
int err = 0;
if (register_chrdev(PP_MAJOR, CHRDEV, &pp_fops)) {
- printk(KERN_WARNING CHRDEV ": unable to get major %d\n",
- PP_MAJOR);
+ pr_warn(CHRDEV ": unable to get major %d\n", PP_MAJOR);
return -EIO;
}
ppdev_class = class_create(THIS_MODULE, CHRDEV);
@@ -833,11 +851,11 @@ static int __init ppdev_init(void)
}
err = parport_register_driver(&pp_driver);
if (err < 0) {
- printk(KERN_WARNING CHRDEV ": unable to register with parport\n");
+ pr_warn(CHRDEV ": unable to register with parport\n");
goto out_class;
}
- printk(KERN_INFO PP_VERSION "\n");
+ pr_info(PP_VERSION "\n");
goto out;
out_class:
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index 10e56323f390..ec07f0e99732 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -285,7 +285,7 @@ scdrv_write(struct file *file, const char __user *buf,
DECLARE_WAITQUEUE(wait, current);
if (file->f_flags & O_NONBLOCK) {
- spin_unlock(&sd->sd_wlock);
+ spin_unlock_irqrestore(&sd->sd_wlock, flags);
up(&sd->sd_wbs);
return -EAGAIN;
}
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index 398800edb2cc..3d4cca64b2d4 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -312,7 +312,8 @@ ATTRIBUTE_GROUPS(srom_dev);
static char *srom_devnode(struct device *dev, umode_t *mode)
{
- *mode = S_IRUGO | S_IWUSR;
+ if (mode)
+ *mode = 0644;
return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev));
}
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 62028f483bba..50072cc4fe5c 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -337,18 +337,14 @@ static int tpmfront_resume(struct xenbus_device *dev)
static void backend_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
- int val;
-
switch (backend_state) {
case XenbusStateInitialised:
case XenbusStateConnected:
if (dev->state == XenbusStateConnected)
break;
- if (xenbus_scanf(XBT_NIL, dev->otherend,
- "feature-protocol-v2", "%d", &val) < 0)
- val = 0;
- if (!val) {
+ if (!xenbus_read_unsigned(dev->otherend, "feature-protocol-v2",
+ 0)) {
xenbus_dev_fatal(dev, -EINVAL,
"vTPM protocol 2 required");
return;
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 56e6c4c7c60d..d836d4ce5ee4 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -274,9 +274,10 @@ static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
struct arizona *arizona = info->arizona;
const char *widget = arizona_extcon_get_micbias(info);
struct snd_soc_dapm_context *dapm = arizona->dapm;
+ struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
int ret;
- ret = snd_soc_dapm_force_enable_pin(dapm, widget);
+ ret = snd_soc_component_force_enable_pin(component, widget);
if (ret != 0)
dev_warn(arizona->dev, "Failed to enable %s: %d\n",
widget, ret);
@@ -284,7 +285,7 @@ static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
snd_soc_dapm_sync(dapm);
if (!arizona->pdata.micd_force_micbias) {
- ret = snd_soc_dapm_disable_pin(arizona->dapm, widget);
+ ret = snd_soc_component_disable_pin(component, widget);
if (ret != 0)
dev_warn(arizona->dev, "Failed to disable %s: %d\n",
widget, ret);
@@ -349,6 +350,7 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
struct arizona *arizona = info->arizona;
const char *widget = arizona_extcon_get_micbias(info);
struct snd_soc_dapm_context *dapm = arizona->dapm;
+ struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
bool change;
int ret;
@@ -356,7 +358,7 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
ARIZONA_MICD_ENA, 0,
&change);
- ret = snd_soc_dapm_disable_pin(dapm, widget);
+ ret = snd_soc_component_disable_pin(component, widget);
if (ret != 0)
dev_warn(arizona->dev,
"Failed to disable %s: %d\n",
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index a27d350f69e3..d589c5feff3d 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -24,7 +24,6 @@
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
-#include <linux/pm_wakeirq.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/acpi.h>
@@ -36,7 +35,9 @@ struct usb_extcon_info {
struct extcon_dev *edev;
struct gpio_desc *id_gpiod;
+ struct gpio_desc *vbus_gpiod;
int id_irq;
+ int vbus_irq;
unsigned long debounce_jiffies;
struct delayed_work wq_detcable;
@@ -48,31 +49,47 @@ static const unsigned int usb_extcon_cable[] = {
EXTCON_NONE,
};
+/*
+ * "USB" = VBUS and "USB-HOST" = !ID, so we have:
+ * Both "USB" and "USB-HOST" can't be set as active at the
+ * same time so if "USB-HOST" is active (i.e. ID is 0) we keep "USB" inactive
+ * even if VBUS is on.
+ *
+ * State | ID | VBUS
+ * ----------------------------------------
+ * [1] USB | H | H
+ * [2] none | H | L
+ * [3] USB-HOST | L | H
+ * [4] USB-HOST | L | L
+ *
+ * In case we have only one of these signals:
+ * - VBUS only - we want to distinguish between [1] and [2], so ID is always 1.
+ * - ID only - we want to distinguish between [1] and [4], so VBUS = ID.
+*/
static void usb_extcon_detect_cable(struct work_struct *work)
{
- int id;
+ int id, vbus;
struct usb_extcon_info *info = container_of(to_delayed_work(work),
struct usb_extcon_info,
wq_detcable);
- /* check ID and update cable state */
- id = gpiod_get_value_cansleep(info->id_gpiod);
- if (id) {
- /*
- * ID = 1 means USB HOST cable detached.
- * As we don't have event for USB peripheral cable attached,
- * we simulate USB peripheral attach here.
- */
+ /* check ID and VBUS and update cable state */
+ id = info->id_gpiod ?
+ gpiod_get_value_cansleep(info->id_gpiod) : 1;
+ vbus = info->vbus_gpiod ?
+ gpiod_get_value_cansleep(info->vbus_gpiod) : id;
+
+ /* at first we clean states which are no longer active */
+ if (id)
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, false);
- extcon_set_state_sync(info->edev, EXTCON_USB, true);
- } else {
- /*
- * ID = 0 means USB HOST cable attached.
- * As we don't have event for USB peripheral cable detached,
- * we simulate USB peripheral detach here.
- */
+ if (!vbus)
extcon_set_state_sync(info->edev, EXTCON_USB, false);
+
+ if (!id) {
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, true);
+ } else {
+ if (vbus)
+ extcon_set_state_sync(info->edev, EXTCON_USB, true);
}
}
@@ -101,12 +118,21 @@ static int usb_extcon_probe(struct platform_device *pdev)
return -ENOMEM;
info->dev = dev;
- info->id_gpiod = devm_gpiod_get(&pdev->dev, "id", GPIOD_IN);
- if (IS_ERR(info->id_gpiod)) {
- dev_err(dev, "failed to get ID GPIO\n");
- return PTR_ERR(info->id_gpiod);
+ info->id_gpiod = devm_gpiod_get_optional(&pdev->dev, "id", GPIOD_IN);
+ info->vbus_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus",
+ GPIOD_IN);
+
+ if (!info->id_gpiod && !info->vbus_gpiod) {
+ dev_err(dev, "failed to get gpios\n");
+ return -ENODEV;
}
+ if (IS_ERR(info->id_gpiod))
+ return PTR_ERR(info->id_gpiod);
+
+ if (IS_ERR(info->vbus_gpiod))
+ return PTR_ERR(info->vbus_gpiod);
+
info->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(dev, "failed to allocate extcon device\n");
@@ -119,32 +145,56 @@ static int usb_extcon_probe(struct platform_device *pdev)
return ret;
}
- ret = gpiod_set_debounce(info->id_gpiod,
- USB_GPIO_DEBOUNCE_MS * 1000);
+ if (info->id_gpiod)
+ ret = gpiod_set_debounce(info->id_gpiod,
+ USB_GPIO_DEBOUNCE_MS * 1000);
+ if (!ret && info->vbus_gpiod)
+ ret = gpiod_set_debounce(info->vbus_gpiod,
+ USB_GPIO_DEBOUNCE_MS * 1000);
+
if (ret < 0)
info->debounce_jiffies = msecs_to_jiffies(USB_GPIO_DEBOUNCE_MS);
INIT_DELAYED_WORK(&info->wq_detcable, usb_extcon_detect_cable);
- info->id_irq = gpiod_to_irq(info->id_gpiod);
- if (info->id_irq < 0) {
- dev_err(dev, "failed to get ID IRQ\n");
- return info->id_irq;
+ if (info->id_gpiod) {
+ info->id_irq = gpiod_to_irq(info->id_gpiod);
+ if (info->id_irq < 0) {
+ dev_err(dev, "failed to get ID IRQ\n");
+ return info->id_irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
+ usb_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ pdev->name, info);
+ if (ret < 0) {
+ dev_err(dev, "failed to request handler for ID IRQ\n");
+ return ret;
+ }
}
- ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
- usb_irq_handler,
- IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- pdev->name, info);
- if (ret < 0) {
- dev_err(dev, "failed to request handler for ID IRQ\n");
- return ret;
+ if (info->vbus_gpiod) {
+ info->vbus_irq = gpiod_to_irq(info->vbus_gpiod);
+ if (info->vbus_irq < 0) {
+ dev_err(dev, "failed to get VBUS IRQ\n");
+ return info->vbus_irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, info->vbus_irq, NULL,
+ usb_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ pdev->name, info);
+ if (ret < 0) {
+ dev_err(dev, "failed to request handler for VBUS IRQ\n");
+ return ret;
+ }
}
platform_set_drvdata(pdev, info);
device_init_wakeup(dev, true);
- dev_pm_set_wake_irq(dev, info->id_irq);
/* Perform initial detection */
usb_extcon_detect_cable(&info->wq_detcable.work);
@@ -157,8 +207,6 @@ static int usb_extcon_remove(struct platform_device *pdev)
struct usb_extcon_info *info = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&info->wq_detcable);
-
- dev_pm_clear_wake_irq(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
return 0;
@@ -170,12 +218,32 @@ static int usb_extcon_suspend(struct device *dev)
struct usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
+ if (device_may_wakeup(dev)) {
+ if (info->id_gpiod) {
+ ret = enable_irq_wake(info->id_irq);
+ if (ret)
+ return ret;
+ }
+ if (info->vbus_gpiod) {
+ ret = enable_irq_wake(info->vbus_irq);
+ if (ret) {
+ if (info->id_gpiod)
+ disable_irq_wake(info->id_irq);
+
+ return ret;
+ }
+ }
+ }
+
/*
* We don't want to process any IRQs after this point
* as GPIOs used behind I2C subsystem might not be
* accessible until resume completes. So disable IRQ.
*/
- disable_irq(info->id_irq);
+ if (info->id_gpiod)
+ disable_irq(info->id_irq);
+ if (info->vbus_gpiod)
+ disable_irq(info->vbus_irq);
return ret;
}
@@ -185,7 +253,28 @@ static int usb_extcon_resume(struct device *dev)
struct usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
- enable_irq(info->id_irq);
+ if (device_may_wakeup(dev)) {
+ if (info->id_gpiod) {
+ ret = disable_irq_wake(info->id_irq);
+ if (ret)
+ return ret;
+ }
+ if (info->vbus_gpiod) {
+ ret = disable_irq_wake(info->vbus_irq);
+ if (ret) {
+ if (info->id_gpiod)
+ enable_irq_wake(info->id_irq);
+
+ return ret;
+ }
+ }
+ }
+
+ if (info->id_gpiod)
+ enable_irq(info->id_irq);
+ if (info->vbus_gpiod)
+ enable_irq(info->vbus_irq);
+
if (!device_may_wakeup(dev))
queue_delayed_work(system_power_efficient_wq,
&info->wq_detcable, 0);
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 7c75a8d9091a..349dc3e1e52e 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -39,7 +39,7 @@ static struct mm_struct efi_mm = {
.mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
};
-#ifdef CONFIG_ARM64_PTDUMP
+#ifdef CONFIG_ARM64_PTDUMP_DEBUGFS
#include <asm/ptdump.h>
static struct ptdump_info efi_ptdump_info = {
@@ -53,7 +53,7 @@ static struct ptdump_info efi_ptdump_info = {
static int __init ptdump_init(void)
{
- return ptdump_register(&efi_ptdump_info, "efi_page_tables");
+ return ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables");
}
device_initcall(ptdump_init);
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 6621b13c370f..d564d25df8ab 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -6,7 +6,7 @@
#
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
-cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
+cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index cd84934774cc..ce861a2853a4 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -13,12 +13,26 @@ config FPGA
if FPGA
+config FPGA_REGION
+ tristate "FPGA Region"
+ depends on OF && FPGA_BRIDGE
+ help
+ FPGA Regions allow loading FPGA images under control of
+ the Device Tree.
+
config FPGA_MGR_SOCFPGA
tristate "Altera SOCFPGA FPGA Manager"
- depends on ARCH_SOCFPGA
+ depends on ARCH_SOCFPGA || COMPILE_TEST
help
FPGA manager driver support for Altera SOCFPGA.
+config FPGA_MGR_SOCFPGA_A10
+ tristate "Altera SoCFPGA Arria10"
+ depends on ARCH_SOCFPGA || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ FPGA manager driver support for Altera Arria10 SoCFPGA.
+
config FPGA_MGR_ZYNQ_FPGA
tristate "Xilinx Zynq FPGA"
depends on ARCH_ZYNQ || COMPILE_TEST
@@ -26,6 +40,29 @@ config FPGA_MGR_ZYNQ_FPGA
help
FPGA manager driver support for Xilinx Zynq FPGAs.
+config FPGA_BRIDGE
+ tristate "FPGA Bridge Framework"
+ depends on OF
+ help
+ Say Y here if you want to support bridges connected between host
+ processors and FPGAs or between FPGAs.
+
+config SOCFPGA_FPGA_BRIDGE
+ tristate "Altera SoCFPGA FPGA Bridges"
+ depends on ARCH_SOCFPGA && FPGA_BRIDGE
+ help
+ Say Y to enable drivers for FPGA bridges for Altera SOCFPGA
+ devices.
+
+config ALTERA_FREEZE_BRIDGE
+ tristate "Altera FPGA Freeze Bridge"
+ depends on ARCH_SOCFPGA && FPGA_BRIDGE
+ help
+ Say Y to enable drivers for Altera FPGA Freeze bridges. A
+ freeze bridge is a bridge that exists in the FPGA fabric to
+ isolate one region of the FPGA from the busses while that
+ region is being reprogrammed.
+
endif # FPGA
endmenu
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 8d83fc6b1613..8df07bcf42a6 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -7,4 +7,13 @@ obj-$(CONFIG_FPGA) += fpga-mgr.o
# FPGA Manager Drivers
obj-$(CONFIG_FPGA_MGR_SOCFPGA) += socfpga.o
+obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10) += socfpga-a10.o
obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o
+
+# FPGA Bridge Drivers
+obj-$(CONFIG_FPGA_BRIDGE) += fpga-bridge.o
+obj-$(CONFIG_SOCFPGA_FPGA_BRIDGE) += altera-hps2fpga.o altera-fpga2sdram.o
+obj-$(CONFIG_ALTERA_FREEZE_BRIDGE) += altera-freeze-bridge.o
+
+# High Level Interfaces
+obj-$(CONFIG_FPGA_REGION) += fpga-region.o
diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c
new file mode 100644
index 000000000000..d4eeb74388da
--- /dev/null
+++ b/drivers/fpga/altera-fpga2sdram.c
@@ -0,0 +1,180 @@
+/*
+ * FPGA to SDRAM Bridge Driver for Altera SoCFPGA Devices
+ *
+ * Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * This driver manages a bridge between an FPGA and the SDRAM used by the ARM
+ * host processor system (HPS).
+ *
+ * The bridge contains 4 read ports, 4 write ports, and 6 command ports.
+ * Reconfiguring these ports requires that no SDRAM transactions occur during
+ * reconfiguration. The code reconfiguring the ports cannot run out of SDRAM
+ * nor can the FPGA access the SDRAM during reconfiguration. This driver does
+ * not support reconfiguring the ports. The ports are configured by code
+ * running out of on chip ram before Linux is started and the configuration
+ * is passed in a handoff register in the system manager.
+ *
+ * This driver supports enabling and disabling of the configured ports, which
+ * allows for safe reprogramming of the FPGA, assuming that the new FPGA image
+ * uses the same port configuration. Bridges must be disabled before
+ * reprogramming the FPGA and re-enabled after the FPGA has been programmed.
+ */
+
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+
+#define ALT_SDR_CTL_FPGAPORTRST_OFST 0x80
+#define ALT_SDR_CTL_FPGAPORTRST_PORTRSTN_MSK 0x00003fff
+#define ALT_SDR_CTL_FPGAPORTRST_RD_SHIFT 0
+#define ALT_SDR_CTL_FPGAPORTRST_WR_SHIFT 4
+#define ALT_SDR_CTL_FPGAPORTRST_CTRL_SHIFT 8
+
+/*
+ * From the Cyclone V HPS Memory Map document:
+ * These registers are used to store handoff information between the
+ * preloader and the OS. These 8 registers can be used to store any
+ * information. The contents of these registers have no impact on
+ * the state of the HPS hardware.
+ */
+#define SYSMGR_ISWGRP_HANDOFF3 (0x8C)
+
+#define F2S_BRIDGE_NAME "fpga2sdram"
+
+struct alt_fpga2sdram_data {
+ struct device *dev;
+ struct regmap *sdrctl;
+ int mask;
+};
+
+static int alt_fpga2sdram_enable_show(struct fpga_bridge *bridge)
+{
+ struct alt_fpga2sdram_data *priv = bridge->priv;
+ int value;
+
+ regmap_read(priv->sdrctl, ALT_SDR_CTL_FPGAPORTRST_OFST, &value);
+
+ return (value & priv->mask) == priv->mask;
+}
+
+static inline int _alt_fpga2sdram_enable_set(struct alt_fpga2sdram_data *priv,
+ bool enable)
+{
+ return regmap_update_bits(priv->sdrctl, ALT_SDR_CTL_FPGAPORTRST_OFST,
+ priv->mask, enable ? priv->mask : 0);
+}
+
+static int alt_fpga2sdram_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ return _alt_fpga2sdram_enable_set(bridge->priv, enable);
+}
+
+struct prop_map {
+ char *prop_name;
+ u32 *prop_value;
+ u32 prop_max;
+};
+
+static const struct fpga_bridge_ops altera_fpga2sdram_br_ops = {
+ .enable_set = alt_fpga2sdram_enable_set,
+ .enable_show = alt_fpga2sdram_enable_show,
+};
+
+static const struct of_device_id altera_fpga_of_match[] = {
+ { .compatible = "altr,socfpga-fpga2sdram-bridge" },
+ {},
+};
+
+static int alt_fpga_bridge_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct alt_fpga2sdram_data *priv;
+ u32 enable;
+ struct regmap *sysmgr;
+ int ret = 0;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ priv->sdrctl = syscon_regmap_lookup_by_compatible("altr,sdr-ctl");
+ if (IS_ERR(priv->sdrctl)) {
+ dev_err(dev, "regmap for altr,sdr-ctl lookup failed.\n");
+ return PTR_ERR(priv->sdrctl);
+ }
+
+ sysmgr = syscon_regmap_lookup_by_compatible("altr,sys-mgr");
+ if (IS_ERR(sysmgr)) {
+ dev_err(dev, "regmap for altr,sys-mgr lookup failed.\n");
+ return PTR_ERR(sysmgr);
+ }
+
+ /* Get f2s bridge configuration saved in handoff register */
+ regmap_read(sysmgr, SYSMGR_ISWGRP_HANDOFF3, &priv->mask);
+
+ ret = fpga_bridge_register(dev, F2S_BRIDGE_NAME,
+ &altera_fpga2sdram_br_ops, priv);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "driver initialized with handoff %08x\n", priv->mask);
+
+ if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
+ if (enable > 1) {
+ dev_warn(dev, "invalid bridge-enable %u > 1\n", enable);
+ } else {
+ dev_info(dev, "%s bridge\n",
+ (enable ? "enabling" : "disabling"));
+ ret = _alt_fpga2sdram_enable_set(priv, enable);
+ if (ret) {
+ fpga_bridge_unregister(&pdev->dev);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int alt_fpga_bridge_remove(struct platform_device *pdev)
+{
+ fpga_bridge_unregister(&pdev->dev);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
+
+static struct platform_driver altera_fpga_driver = {
+ .probe = alt_fpga_bridge_probe,
+ .remove = alt_fpga_bridge_remove,
+ .driver = {
+ .name = "altera_fpga2sdram_bridge",
+ .of_match_table = of_match_ptr(altera_fpga_of_match),
+ },
+};
+
+module_platform_driver(altera_fpga_driver);
+
+MODULE_DESCRIPTION("Altera SoCFPGA FPGA to SDRAM Bridge");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c
new file mode 100644
index 000000000000..8dcd9fb22cb9
--- /dev/null
+++ b/drivers/fpga/altera-freeze-bridge.c
@@ -0,0 +1,273 @@
+/*
+ * FPGA Freeze Bridge Controller
+ *
+ * Copyright (C) 2016 Altera Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/fpga/fpga-bridge.h>
+
+#define FREEZE_CSR_STATUS_OFFSET 0
+#define FREEZE_CSR_CTRL_OFFSET 4
+#define FREEZE_CSR_ILLEGAL_REQ_OFFSET 8
+#define FREEZE_CSR_REG_VERSION 12
+
+#define FREEZE_CSR_SUPPORTED_VERSION 2
+
+#define FREEZE_CSR_STATUS_FREEZE_REQ_DONE BIT(0)
+#define FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE BIT(1)
+
+#define FREEZE_CSR_CTRL_FREEZE_REQ BIT(0)
+#define FREEZE_CSR_CTRL_RESET_REQ BIT(1)
+#define FREEZE_CSR_CTRL_UNFREEZE_REQ BIT(2)
+
+#define FREEZE_BRIDGE_NAME "freeze"
+
+struct altera_freeze_br_data {
+ struct device *dev;
+ void __iomem *base_addr;
+ bool enable;
+};
+
+/*
+ * Poll status until status bit is set or we have a timeout.
+ */
+static int altera_freeze_br_req_ack(struct altera_freeze_br_data *priv,
+ u32 timeout, u32 req_ack)
+{
+ struct device *dev = priv->dev;
+ void __iomem *csr_illegal_req_addr = priv->base_addr +
+ FREEZE_CSR_ILLEGAL_REQ_OFFSET;
+ u32 status, illegal, ctrl;
+ int ret = -ETIMEDOUT;
+
+ do {
+ illegal = readl(csr_illegal_req_addr);
+ if (illegal) {
+ dev_err(dev, "illegal request detected 0x%x", illegal);
+
+ writel(1, csr_illegal_req_addr);
+
+ illegal = readl(csr_illegal_req_addr);
+ if (illegal)
+ dev_err(dev, "illegal request not cleared 0x%x",
+ illegal);
+
+ ret = -EINVAL;
+ break;
+ }
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+ dev_dbg(dev, "%s %x %x\n", __func__, status, req_ack);
+ status &= req_ack;
+ if (status) {
+ ctrl = readl(priv->base_addr + FREEZE_CSR_CTRL_OFFSET);
+ dev_dbg(dev, "%s request %x acknowledged %x %x\n",
+ __func__, req_ack, status, ctrl);
+ ret = 0;
+ break;
+ }
+
+ udelay(1);
+ } while (timeout--);
+
+ if (ret == -ETIMEDOUT)
+ dev_err(dev, "%s timeout waiting for 0x%x\n",
+ __func__, req_ack);
+
+ return ret;
+}
+
+static int altera_freeze_br_do_freeze(struct altera_freeze_br_data *priv,
+ u32 timeout)
+{
+ struct device *dev = priv->dev;
+ void __iomem *csr_ctrl_addr = priv->base_addr +
+ FREEZE_CSR_CTRL_OFFSET;
+ u32 status;
+ int ret;
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+
+ dev_dbg(dev, "%s %d %d\n", __func__, status, readl(csr_ctrl_addr));
+
+ if (status & FREEZE_CSR_STATUS_FREEZE_REQ_DONE) {
+ dev_dbg(dev, "%s bridge already disabled %d\n",
+ __func__, status);
+ return 0;
+ } else if (!(status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE)) {
+ dev_err(dev, "%s bridge not enabled %d\n", __func__, status);
+ return -EINVAL;
+ }
+
+ writel(FREEZE_CSR_CTRL_FREEZE_REQ, csr_ctrl_addr);
+
+ ret = altera_freeze_br_req_ack(priv, timeout,
+ FREEZE_CSR_STATUS_FREEZE_REQ_DONE);
+
+ if (ret)
+ writel(0, csr_ctrl_addr);
+ else
+ writel(FREEZE_CSR_CTRL_RESET_REQ, csr_ctrl_addr);
+
+ return ret;
+}
+
+static int altera_freeze_br_do_unfreeze(struct altera_freeze_br_data *priv,
+ u32 timeout)
+{
+ struct device *dev = priv->dev;
+ void __iomem *csr_ctrl_addr = priv->base_addr +
+ FREEZE_CSR_CTRL_OFFSET;
+ u32 status;
+ int ret;
+
+ writel(0, csr_ctrl_addr);
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+
+ dev_dbg(dev, "%s %d %d\n", __func__, status, readl(csr_ctrl_addr));
+
+ if (status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE) {
+ dev_dbg(dev, "%s bridge already enabled %d\n",
+ __func__, status);
+ return 0;
+ } else if (!(status & FREEZE_CSR_STATUS_FREEZE_REQ_DONE)) {
+ dev_err(dev, "%s bridge not frozen %d\n", __func__, status);
+ return -EINVAL;
+ }
+
+ writel(FREEZE_CSR_CTRL_UNFREEZE_REQ, csr_ctrl_addr);
+
+ ret = altera_freeze_br_req_ack(priv, timeout,
+ FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE);
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+
+ dev_dbg(dev, "%s %d %d\n", __func__, status, readl(csr_ctrl_addr));
+
+ writel(0, csr_ctrl_addr);
+
+ return ret;
+}
+
+/*
+ * enable = 1 : allow traffic through the bridge
+ * enable = 0 : disable traffic through the bridge
+ */
+static int altera_freeze_br_enable_set(struct fpga_bridge *bridge,
+ bool enable)
+{
+ struct altera_freeze_br_data *priv = bridge->priv;
+ struct fpga_image_info *info = bridge->info;
+ u32 timeout = 0;
+ int ret;
+
+ if (enable) {
+ if (info)
+ timeout = info->enable_timeout_us;
+
+ ret = altera_freeze_br_do_unfreeze(bridge->priv, timeout);
+ } else {
+ if (info)
+ timeout = info->disable_timeout_us;
+
+ ret = altera_freeze_br_do_freeze(bridge->priv, timeout);
+ }
+
+ if (!ret)
+ priv->enable = enable;
+
+ return ret;
+}
+
+static int altera_freeze_br_enable_show(struct fpga_bridge *bridge)
+{
+ struct altera_freeze_br_data *priv = bridge->priv;
+
+ return priv->enable;
+}
+
+static struct fpga_bridge_ops altera_freeze_br_br_ops = {
+ .enable_set = altera_freeze_br_enable_set,
+ .enable_show = altera_freeze_br_enable_show,
+};
+
+static const struct of_device_id altera_freeze_br_of_match[] = {
+ { .compatible = "altr,freeze-bridge-controller", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altera_freeze_br_of_match);
+
+static int altera_freeze_br_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct altera_freeze_br_data *priv;
+ struct resource *res;
+ u32 status, revision;
+
+ if (!np)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base_addr))
+ return PTR_ERR(priv->base_addr);
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+ if (status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE)
+ priv->enable = 1;
+
+ revision = readl(priv->base_addr + FREEZE_CSR_REG_VERSION);
+ if (revision != FREEZE_CSR_SUPPORTED_VERSION)
+ dev_warn(dev,
+ "%s Freeze Controller unexpected revision %d != %d\n",
+ __func__, revision, FREEZE_CSR_SUPPORTED_VERSION);
+
+ return fpga_bridge_register(dev, FREEZE_BRIDGE_NAME,
+ &altera_freeze_br_br_ops, priv);
+}
+
+static int altera_freeze_br_remove(struct platform_device *pdev)
+{
+ fpga_bridge_unregister(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver altera_freeze_br_driver = {
+ .probe = altera_freeze_br_probe,
+ .remove = altera_freeze_br_remove,
+ .driver = {
+ .name = "altera_freeze_br",
+ .of_match_table = of_match_ptr(altera_freeze_br_of_match),
+ },
+};
+
+module_platform_driver(altera_freeze_br_driver);
+
+MODULE_DESCRIPTION("Altera Freeze Bridge");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
new file mode 100644
index 000000000000..4b354c79be31
--- /dev/null
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -0,0 +1,222 @@
+/*
+ * FPGA to/from HPS Bridge Driver for Altera SoCFPGA Devices
+ *
+ * Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved.
+ *
+ * Includes this patch from the mailing list:
+ * fpga: altera-hps2fpga: fix HPS2FPGA bridge visibility to L3 masters
+ * Signed-off-by: Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * This driver manages bridges on a Altera SOCFPGA between the ARM host
+ * processor system (HPS) and the embedded FPGA.
+ *
+ * This driver supports enabling and disabling of the configured ports, which
+ * allows for safe reprogramming of the FPGA, assuming that the new FPGA image
+ * uses the same port configuration. Bridges must be disabled before
+ * reprogramming the FPGA and re-enabled after the FPGA has been programmed.
+ */
+
+#include <linux/clk.h>
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define ALT_L3_REMAP_OFST 0x0
+#define ALT_L3_REMAP_MPUZERO_MSK 0x00000001
+#define ALT_L3_REMAP_H2F_MSK 0x00000008
+#define ALT_L3_REMAP_LWH2F_MSK 0x00000010
+
+#define HPS2FPGA_BRIDGE_NAME "hps2fpga"
+#define LWHPS2FPGA_BRIDGE_NAME "lwhps2fpga"
+#define FPGA2HPS_BRIDGE_NAME "fpga2hps"
+
+struct altera_hps2fpga_data {
+ const char *name;
+ struct reset_control *bridge_reset;
+ struct regmap *l3reg;
+ unsigned int remap_mask;
+ struct clk *clk;
+};
+
+static int alt_hps2fpga_enable_show(struct fpga_bridge *bridge)
+{
+ struct altera_hps2fpga_data *priv = bridge->priv;
+
+ return reset_control_status(priv->bridge_reset);
+}
+
+/* The L3 REMAP register is write only, so keep a cached value. */
+static unsigned int l3_remap_shadow;
+static spinlock_t l3_remap_lock;
+
+static int _alt_hps2fpga_enable_set(struct altera_hps2fpga_data *priv,
+ bool enable)
+{
+ unsigned long flags;
+ int ret;
+
+ /* bring bridge out of reset */
+ if (enable)
+ ret = reset_control_deassert(priv->bridge_reset);
+ else
+ ret = reset_control_assert(priv->bridge_reset);
+ if (ret)
+ return ret;
+
+ /* Allow bridge to be visible to L3 masters or not */
+ if (priv->remap_mask) {
+ spin_lock_irqsave(&l3_remap_lock, flags);
+ l3_remap_shadow |= ALT_L3_REMAP_MPUZERO_MSK;
+
+ if (enable)
+ l3_remap_shadow |= priv->remap_mask;
+ else
+ l3_remap_shadow &= ~priv->remap_mask;
+
+ ret = regmap_write(priv->l3reg, ALT_L3_REMAP_OFST,
+ l3_remap_shadow);
+ spin_unlock_irqrestore(&l3_remap_lock, flags);
+ }
+
+ return ret;
+}
+
+static int alt_hps2fpga_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ return _alt_hps2fpga_enable_set(bridge->priv, enable);
+}
+
+static const struct fpga_bridge_ops altera_hps2fpga_br_ops = {
+ .enable_set = alt_hps2fpga_enable_set,
+ .enable_show = alt_hps2fpga_enable_show,
+};
+
+static struct altera_hps2fpga_data hps2fpga_data = {
+ .name = HPS2FPGA_BRIDGE_NAME,
+ .remap_mask = ALT_L3_REMAP_H2F_MSK,
+};
+
+static struct altera_hps2fpga_data lwhps2fpga_data = {
+ .name = LWHPS2FPGA_BRIDGE_NAME,
+ .remap_mask = ALT_L3_REMAP_LWH2F_MSK,
+};
+
+static struct altera_hps2fpga_data fpga2hps_data = {
+ .name = FPGA2HPS_BRIDGE_NAME,
+};
+
+static const struct of_device_id altera_fpga_of_match[] = {
+ { .compatible = "altr,socfpga-hps2fpga-bridge",
+ .data = &hps2fpga_data },
+ { .compatible = "altr,socfpga-lwhps2fpga-bridge",
+ .data = &lwhps2fpga_data },
+ { .compatible = "altr,socfpga-fpga2hps-bridge",
+ .data = &fpga2hps_data },
+ {},
+};
+
+static int alt_fpga_bridge_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct altera_hps2fpga_data *priv;
+ const struct of_device_id *of_id;
+ u32 enable;
+ int ret;
+
+ of_id = of_match_device(altera_fpga_of_match, dev);
+ priv = (struct altera_hps2fpga_data *)of_id->data;
+
+ priv->bridge_reset = of_reset_control_get_by_index(dev->of_node, 0);
+ if (IS_ERR(priv->bridge_reset)) {
+ dev_err(dev, "Could not get %s reset control\n", priv->name);
+ return PTR_ERR(priv->bridge_reset);
+ }
+
+ if (priv->remap_mask) {
+ priv->l3reg = syscon_regmap_lookup_by_compatible("altr,l3regs");
+ if (IS_ERR(priv->l3reg)) {
+ dev_err(dev, "regmap for altr,l3regs lookup failed\n");
+ return PTR_ERR(priv->l3reg);
+ }
+ }
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "no clock specified\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "could not enable clock\n");
+ return -EBUSY;
+ }
+
+ spin_lock_init(&l3_remap_lock);
+
+ if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
+ if (enable > 1) {
+ dev_warn(dev, "invalid bridge-enable %u > 1\n", enable);
+ } else {
+ dev_info(dev, "%s bridge\n",
+ (enable ? "enabling" : "disabling"));
+
+ ret = _alt_hps2fpga_enable_set(priv, enable);
+ if (ret) {
+ fpga_bridge_unregister(&pdev->dev);
+ return ret;
+ }
+ }
+ }
+
+ return fpga_bridge_register(dev, priv->name, &altera_hps2fpga_br_ops,
+ priv);
+}
+
+static int alt_fpga_bridge_remove(struct platform_device *pdev)
+{
+ struct fpga_bridge *bridge = platform_get_drvdata(pdev);
+ struct altera_hps2fpga_data *priv = bridge->priv;
+
+ fpga_bridge_unregister(&pdev->dev);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
+
+static struct platform_driver alt_fpga_bridge_driver = {
+ .probe = alt_fpga_bridge_probe,
+ .remove = alt_fpga_bridge_remove,
+ .driver = {
+ .name = "altera_hps2fpga_bridge",
+ .of_match_table = of_match_ptr(altera_fpga_of_match),
+ },
+};
+
+module_platform_driver(alt_fpga_bridge_driver);
+
+MODULE_DESCRIPTION("Altera SoCFPGA HPS to FPGA Bridge");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
new file mode 100644
index 000000000000..33ee83e6373c
--- /dev/null
+++ b/drivers/fpga/fpga-bridge.c
@@ -0,0 +1,395 @@
+/*
+ * FPGA Bridge Framework Driver
+ *
+ * Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+static DEFINE_IDA(fpga_bridge_ida);
+static struct class *fpga_bridge_class;
+
+/* Lock for adding/removing bridges to linked lists*/
+spinlock_t bridge_list_lock;
+
+static int fpga_bridge_of_node_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+/**
+ * fpga_bridge_enable - Enable transactions on the bridge
+ *
+ * @bridge: FPGA bridge
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+int fpga_bridge_enable(struct fpga_bridge *bridge)
+{
+ dev_dbg(&bridge->dev, "enable\n");
+
+ if (bridge->br_ops && bridge->br_ops->enable_set)
+ return bridge->br_ops->enable_set(bridge, 1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_enable);
+
+/**
+ * fpga_bridge_disable - Disable transactions on the bridge
+ *
+ * @bridge: FPGA bridge
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+int fpga_bridge_disable(struct fpga_bridge *bridge)
+{
+ dev_dbg(&bridge->dev, "disable\n");
+
+ if (bridge->br_ops && bridge->br_ops->enable_set)
+ return bridge->br_ops->enable_set(bridge, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_disable);
+
+/**
+ * of_fpga_bridge_get - get an exclusive reference to a fpga bridge
+ *
+ * @np: node pointer of a FPGA bridge
+ * @info: fpga image specific information
+ *
+ * Return fpga_bridge struct if successful.
+ * Return -EBUSY if someone already has a reference to the bridge.
+ * Return -ENODEV if @np is not a FPGA Bridge.
+ */
+struct fpga_bridge *of_fpga_bridge_get(struct device_node *np,
+ struct fpga_image_info *info)
+
+{
+ struct device *dev;
+ struct fpga_bridge *bridge;
+ int ret = -ENODEV;
+
+ dev = class_find_device(fpga_bridge_class, NULL, np,
+ fpga_bridge_of_node_match);
+ if (!dev)
+ goto err_dev;
+
+ bridge = to_fpga_bridge(dev);
+ if (!bridge)
+ goto err_dev;
+
+ bridge->info = info;
+
+ if (!mutex_trylock(&bridge->mutex)) {
+ ret = -EBUSY;
+ goto err_dev;
+ }
+
+ if (!try_module_get(dev->parent->driver->owner))
+ goto err_ll_mod;
+
+ dev_dbg(&bridge->dev, "get\n");
+
+ return bridge;
+
+err_ll_mod:
+ mutex_unlock(&bridge->mutex);
+err_dev:
+ put_device(dev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(of_fpga_bridge_get);
+
+/**
+ * fpga_bridge_put - release a reference to a bridge
+ *
+ * @bridge: FPGA bridge
+ */
+void fpga_bridge_put(struct fpga_bridge *bridge)
+{
+ dev_dbg(&bridge->dev, "put\n");
+
+ bridge->info = NULL;
+ module_put(bridge->dev.parent->driver->owner);
+ mutex_unlock(&bridge->mutex);
+ put_device(&bridge->dev);
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_put);
+
+/**
+ * fpga_bridges_enable - enable bridges in a list
+ * @bridge_list: list of FPGA bridges
+ *
+ * Enable each bridge in the list. If list is empty, do nothing.
+ *
+ * Return 0 for success or empty bridge list; return error code otherwise.
+ */
+int fpga_bridges_enable(struct list_head *bridge_list)
+{
+ struct fpga_bridge *bridge;
+ struct list_head *node;
+ int ret;
+
+ list_for_each(node, bridge_list) {
+ bridge = list_entry(node, struct fpga_bridge, node);
+ ret = fpga_bridge_enable(bridge);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridges_enable);
+
+/**
+ * fpga_bridges_disable - disable bridges in a list
+ *
+ * @bridge_list: list of FPGA bridges
+ *
+ * Disable each bridge in the list. If list is empty, do nothing.
+ *
+ * Return 0 for success or empty bridge list; return error code otherwise.
+ */
+int fpga_bridges_disable(struct list_head *bridge_list)
+{
+ struct fpga_bridge *bridge;
+ struct list_head *node;
+ int ret;
+
+ list_for_each(node, bridge_list) {
+ bridge = list_entry(node, struct fpga_bridge, node);
+ ret = fpga_bridge_disable(bridge);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridges_disable);
+
+/**
+ * fpga_bridges_put - put bridges
+ *
+ * @bridge_list: list of FPGA bridges
+ *
+ * For each bridge in the list, put the bridge and remove it from the list.
+ * If list is empty, do nothing.
+ */
+void fpga_bridges_put(struct list_head *bridge_list)
+{
+ struct fpga_bridge *bridge;
+ struct list_head *node, *next;
+ unsigned long flags;
+
+ list_for_each_safe(node, next, bridge_list) {
+ bridge = list_entry(node, struct fpga_bridge, node);
+
+ fpga_bridge_put(bridge);
+
+ spin_lock_irqsave(&bridge_list_lock, flags);
+ list_del(&bridge->node);
+ spin_unlock_irqrestore(&bridge_list_lock, flags);
+ }
+}
+EXPORT_SYMBOL_GPL(fpga_bridges_put);
+
+/**
+ * fpga_bridges_get_to_list - get a bridge, add it to a list
+ *
+ * @np: node pointer of a FPGA bridge
+ * @info: fpga image specific information
+ * @bridge_list: list of FPGA bridges
+ *
+ * Get an exclusive reference to the bridge and and it to the list.
+ *
+ * Return 0 for success, error code from of_fpga_bridge_get() othewise.
+ */
+int fpga_bridge_get_to_list(struct device_node *np,
+ struct fpga_image_info *info,
+ struct list_head *bridge_list)
+{
+ struct fpga_bridge *bridge;
+ unsigned long flags;
+
+ bridge = of_fpga_bridge_get(np, info);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+
+ spin_lock_irqsave(&bridge_list_lock, flags);
+ list_add(&bridge->node, bridge_list);
+ spin_unlock_irqrestore(&bridge_list_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_get_to_list);
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_bridge *bridge = to_fpga_bridge(dev);
+
+ return sprintf(buf, "%s\n", bridge->name);
+}
+
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_bridge *bridge = to_fpga_bridge(dev);
+ int enable = 1;
+
+ if (bridge->br_ops && bridge->br_ops->enable_show)
+ enable = bridge->br_ops->enable_show(bridge);
+
+ return sprintf(buf, "%s\n", enable ? "enabled" : "disabled");
+}
+
+static DEVICE_ATTR_RO(name);
+static DEVICE_ATTR_RO(state);
+
+static struct attribute *fpga_bridge_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_state.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(fpga_bridge);
+
+/**
+ * fpga_bridge_register - register a fpga bridge driver
+ * @dev: FPGA bridge device from pdev
+ * @name: FPGA bridge name
+ * @br_ops: pointer to structure of fpga bridge ops
+ * @priv: FPGA bridge private data
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+int fpga_bridge_register(struct device *dev, const char *name,
+ const struct fpga_bridge_ops *br_ops, void *priv)
+{
+ struct fpga_bridge *bridge;
+ int id, ret = 0;
+
+ if (!name || !strlen(name)) {
+ dev_err(dev, "Attempt to register with no name!\n");
+ return -EINVAL;
+ }
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ id = ida_simple_get(&fpga_bridge_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ ret = id;
+ goto error_kfree;
+ }
+
+ mutex_init(&bridge->mutex);
+ INIT_LIST_HEAD(&bridge->node);
+
+ bridge->name = name;
+ bridge->br_ops = br_ops;
+ bridge->priv = priv;
+
+ device_initialize(&bridge->dev);
+ bridge->dev.class = fpga_bridge_class;
+ bridge->dev.parent = dev;
+ bridge->dev.of_node = dev->of_node;
+ bridge->dev.id = id;
+ dev_set_drvdata(dev, bridge);
+
+ ret = dev_set_name(&bridge->dev, "br%d", id);
+ if (ret)
+ goto error_device;
+
+ ret = device_add(&bridge->dev);
+ if (ret)
+ goto error_device;
+
+ of_platform_populate(dev->of_node, NULL, NULL, dev);
+
+ dev_info(bridge->dev.parent, "fpga bridge [%s] registered\n",
+ bridge->name);
+
+ return 0;
+
+error_device:
+ ida_simple_remove(&fpga_bridge_ida, id);
+error_kfree:
+ kfree(bridge);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_register);
+
+/**
+ * fpga_bridge_unregister - unregister a fpga bridge driver
+ * @dev: FPGA bridge device from pdev
+ */
+void fpga_bridge_unregister(struct device *dev)
+{
+ struct fpga_bridge *bridge = dev_get_drvdata(dev);
+
+ /*
+ * If the low level driver provides a method for putting bridge into
+ * a desired state upon unregister, do it.
+ */
+ if (bridge->br_ops && bridge->br_ops->fpga_bridge_remove)
+ bridge->br_ops->fpga_bridge_remove(bridge);
+
+ device_unregister(&bridge->dev);
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_unregister);
+
+static void fpga_bridge_dev_release(struct device *dev)
+{
+ struct fpga_bridge *bridge = to_fpga_bridge(dev);
+
+ ida_simple_remove(&fpga_bridge_ida, bridge->dev.id);
+ kfree(bridge);
+}
+
+static int __init fpga_bridge_dev_init(void)
+{
+ spin_lock_init(&bridge_list_lock);
+
+ fpga_bridge_class = class_create(THIS_MODULE, "fpga_bridge");
+ if (IS_ERR(fpga_bridge_class))
+ return PTR_ERR(fpga_bridge_class);
+
+ fpga_bridge_class->dev_groups = fpga_bridge_groups;
+ fpga_bridge_class->dev_release = fpga_bridge_dev_release;
+
+ return 0;
+}
+
+static void __exit fpga_bridge_dev_exit(void)
+{
+ class_destroy(fpga_bridge_class);
+ ida_destroy(&fpga_bridge_ida);
+}
+
+MODULE_DESCRIPTION("FPGA Bridge Driver");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
+
+subsys_initcall(fpga_bridge_dev_init);
+module_exit(fpga_bridge_dev_exit);
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index 953dc9195937..f0a69d3e60a5 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -32,19 +32,20 @@ static struct class *fpga_mgr_class;
/**
* fpga_mgr_buf_load - load fpga from image in buffer
* @mgr: fpga manager
- * @flags: flags setting fpga confuration modes
+ * @info: fpga image specific information
* @buf: buffer contain fpga image
* @count: byte count of buf
*
* Step the low level fpga manager through the device-specific steps of getting
* an FPGA ready to be configured, writing the image to it, then doing whatever
* post-configuration steps necessary. This code assumes the caller got the
- * mgr pointer from of_fpga_mgr_get() and checked that it is not an error code.
+ * mgr pointer from of_fpga_mgr_get() or fpga_mgr_get() and checked that it is
+ * not an error code.
*
* Return: 0 on success, negative error code otherwise.
*/
-int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, const char *buf,
- size_t count)
+int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
+ const char *buf, size_t count)
{
struct device *dev = &mgr->dev;
int ret;
@@ -52,10 +53,12 @@ int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, const char *buf,
/*
* Call the low level driver's write_init function. This will do the
* device-specific things to get the FPGA into the state where it is
- * ready to receive an FPGA image.
+ * ready to receive an FPGA image. The low level driver only gets to
+ * see the first initial_header_size bytes in the buffer.
*/
mgr->state = FPGA_MGR_STATE_WRITE_INIT;
- ret = mgr->mops->write_init(mgr, flags, buf, count);
+ ret = mgr->mops->write_init(mgr, info, buf,
+ min(mgr->mops->initial_header_size, count));
if (ret) {
dev_err(dev, "Error preparing FPGA for writing\n");
mgr->state = FPGA_MGR_STATE_WRITE_INIT_ERR;
@@ -78,7 +81,7 @@ int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, const char *buf,
* steps to finish and set the FPGA into operating mode.
*/
mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE;
- ret = mgr->mops->write_complete(mgr, flags);
+ ret = mgr->mops->write_complete(mgr, info);
if (ret) {
dev_err(dev, "Error after writing image data to FPGA\n");
mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR;
@@ -93,17 +96,19 @@ EXPORT_SYMBOL_GPL(fpga_mgr_buf_load);
/**
* fpga_mgr_firmware_load - request firmware and load to fpga
* @mgr: fpga manager
- * @flags: flags setting fpga confuration modes
+ * @info: fpga image specific information
* @image_name: name of image file on the firmware search path
*
* Request an FPGA image using the firmware class, then write out to the FPGA.
* Update the state before each step to provide info on what step failed if
* there is a failure. This code assumes the caller got the mgr pointer
- * from of_fpga_mgr_get() and checked that it is not an error code.
+ * from of_fpga_mgr_get() or fpga_mgr_get() and checked that it is not an error
+ * code.
*
* Return: 0 on success, negative error code otherwise.
*/
-int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
+int fpga_mgr_firmware_load(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
const char *image_name)
{
struct device *dev = &mgr->dev;
@@ -121,7 +126,7 @@ int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
return ret;
}
- ret = fpga_mgr_buf_load(mgr, flags, fw->data, fw->size);
+ ret = fpga_mgr_buf_load(mgr, info, fw->data, fw->size);
release_firmware(fw);
@@ -181,30 +186,11 @@ static struct attribute *fpga_mgr_attrs[] = {
};
ATTRIBUTE_GROUPS(fpga_mgr);
-static int fpga_mgr_of_node_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
-/**
- * of_fpga_mgr_get - get an exclusive reference to a fpga mgr
- * @node: device node
- *
- * Given a device node, get an exclusive reference to a fpga mgr.
- *
- * Return: fpga manager struct or IS_ERR() condition containing error code.
- */
-struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
+struct fpga_manager *__fpga_mgr_get(struct device *dev)
{
struct fpga_manager *mgr;
- struct device *dev;
int ret = -ENODEV;
- dev = class_find_device(fpga_mgr_class, NULL, node,
- fpga_mgr_of_node_match);
- if (!dev)
- return ERR_PTR(-ENODEV);
-
mgr = to_fpga_manager(dev);
if (!mgr)
goto err_dev;
@@ -226,6 +212,55 @@ err_dev:
put_device(dev);
return ERR_PTR(ret);
}
+
+static int fpga_mgr_dev_match(struct device *dev, const void *data)
+{
+ return dev->parent == data;
+}
+
+/**
+ * fpga_mgr_get - get an exclusive reference to a fpga mgr
+ * @dev: parent device that fpga mgr was registered with
+ *
+ * Given a device, get an exclusive reference to a fpga mgr.
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+struct fpga_manager *fpga_mgr_get(struct device *dev)
+{
+ struct device *mgr_dev = class_find_device(fpga_mgr_class, NULL, dev,
+ fpga_mgr_dev_match);
+ if (!mgr_dev)
+ return ERR_PTR(-ENODEV);
+
+ return __fpga_mgr_get(mgr_dev);
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_get);
+
+static int fpga_mgr_of_node_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+/**
+ * of_fpga_mgr_get - get an exclusive reference to a fpga mgr
+ * @node: device node
+ *
+ * Given a device node, get an exclusive reference to a fpga mgr.
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
+{
+ struct device *dev;
+
+ dev = class_find_device(fpga_mgr_class, NULL, node,
+ fpga_mgr_of_node_match);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return __fpga_mgr_get(dev);
+}
EXPORT_SYMBOL_GPL(of_fpga_mgr_get);
/**
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
new file mode 100644
index 000000000000..3222fdbad75a
--- /dev/null
+++ b/drivers/fpga/fpga-region.c
@@ -0,0 +1,603 @@
+/*
+ * FPGA Region - Device Tree support for FPGA programming under Linux
+ *
+ * Copyright (C) 2013-2016 Altera Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/**
+ * struct fpga_region - FPGA Region structure
+ * @dev: FPGA Region device
+ * @mutex: enforces exclusive reference to region
+ * @bridge_list: list of FPGA bridges specified in region
+ * @info: fpga image specific information
+ */
+struct fpga_region {
+ struct device dev;
+ struct mutex mutex; /* for exclusive reference to region */
+ struct list_head bridge_list;
+ struct fpga_image_info *info;
+};
+
+#define to_fpga_region(d) container_of(d, struct fpga_region, dev)
+
+static DEFINE_IDA(fpga_region_ida);
+static struct class *fpga_region_class;
+
+static const struct of_device_id fpga_region_of_match[] = {
+ { .compatible = "fpga-region", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fpga_region_of_match);
+
+static int fpga_region_of_node_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+/**
+ * fpga_region_find - find FPGA region
+ * @np: device node of FPGA Region
+ * Caller will need to put_device(&region->dev) when done.
+ * Returns FPGA Region struct or NULL
+ */
+static struct fpga_region *fpga_region_find(struct device_node *np)
+{
+ struct device *dev;
+
+ dev = class_find_device(fpga_region_class, NULL, np,
+ fpga_region_of_node_match);
+ if (!dev)
+ return NULL;
+
+ return to_fpga_region(dev);
+}
+
+/**
+ * fpga_region_get - get an exclusive reference to a fpga region
+ * @region: FPGA Region struct
+ *
+ * Caller should call fpga_region_put() when done with region.
+ *
+ * Return fpga_region struct if successful.
+ * Return -EBUSY if someone already has a reference to the region.
+ * Return -ENODEV if @np is not a FPGA Region.
+ */
+static struct fpga_region *fpga_region_get(struct fpga_region *region)
+{
+ struct device *dev = &region->dev;
+
+ if (!mutex_trylock(&region->mutex)) {
+ dev_dbg(dev, "%s: FPGA Region already in use\n", __func__);
+ return ERR_PTR(-EBUSY);
+ }
+
+ get_device(dev);
+ of_node_get(dev->of_node);
+ if (!try_module_get(dev->parent->driver->owner)) {
+ of_node_put(dev->of_node);
+ put_device(dev);
+ mutex_unlock(&region->mutex);
+ return ERR_PTR(-ENODEV);
+ }
+
+ dev_dbg(&region->dev, "get\n");
+
+ return region;
+}
+
+/**
+ * fpga_region_put - release a reference to a region
+ *
+ * @region: FPGA region
+ */
+static void fpga_region_put(struct fpga_region *region)
+{
+ struct device *dev = &region->dev;
+
+ dev_dbg(&region->dev, "put\n");
+
+ module_put(dev->parent->driver->owner);
+ of_node_put(dev->of_node);
+ put_device(dev);
+ mutex_unlock(&region->mutex);
+}
+
+/**
+ * fpga_region_get_manager - get exclusive reference for FPGA manager
+ * @region: FPGA region
+ *
+ * Get FPGA Manager from "fpga-mgr" property or from ancestor region.
+ *
+ * Caller should call fpga_mgr_put() when done with manager.
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+static struct fpga_manager *fpga_region_get_manager(struct fpga_region *region)
+{
+ struct device *dev = &region->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *mgr_node;
+ struct fpga_manager *mgr;
+
+ of_node_get(np);
+ while (np) {
+ if (of_device_is_compatible(np, "fpga-region")) {
+ mgr_node = of_parse_phandle(np, "fpga-mgr", 0);
+ if (mgr_node) {
+ mgr = of_fpga_mgr_get(mgr_node);
+ of_node_put(np);
+ return mgr;
+ }
+ }
+ np = of_get_next_parent(np);
+ }
+ of_node_put(np);
+
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * fpga_region_get_bridges - create a list of bridges
+ * @region: FPGA region
+ * @overlay: device node of the overlay
+ *
+ * Create a list of bridges including the parent bridge and the bridges
+ * specified by "fpga-bridges" property. Note that the
+ * fpga_bridges_enable/disable/put functions are all fine with an empty list
+ * if that happens.
+ *
+ * Caller should call fpga_bridges_put(&region->bridge_list) when
+ * done with the bridges.
+ *
+ * Return 0 for success (even if there are no bridges specified)
+ * or -EBUSY if any of the bridges are in use.
+ */
+static int fpga_region_get_bridges(struct fpga_region *region,
+ struct device_node *overlay)
+{
+ struct device *dev = &region->dev;
+ struct device_node *region_np = dev->of_node;
+ struct device_node *br, *np, *parent_br = NULL;
+ int i, ret;
+
+ /* If parent is a bridge, add to list */
+ ret = fpga_bridge_get_to_list(region_np->parent, region->info,
+ &region->bridge_list);
+ if (ret == -EBUSY)
+ return ret;
+
+ if (!ret)
+ parent_br = region_np->parent;
+
+ /* If overlay has a list of bridges, use it. */
+ if (of_parse_phandle(overlay, "fpga-bridges", 0))
+ np = overlay;
+ else
+ np = region_np;
+
+ for (i = 0; ; i++) {
+ br = of_parse_phandle(np, "fpga-bridges", i);
+ if (!br)
+ break;
+
+ /* If parent bridge is in list, skip it. */
+ if (br == parent_br)
+ continue;
+
+ /* If node is a bridge, get it and add to list */
+ ret = fpga_bridge_get_to_list(br, region->info,
+ &region->bridge_list);
+
+ /* If any of the bridges are in use, give up */
+ if (ret == -EBUSY) {
+ fpga_bridges_put(&region->bridge_list);
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * fpga_region_program_fpga - program FPGA
+ * @region: FPGA region
+ * @firmware_name: name of FPGA image firmware file
+ * @overlay: device node of the overlay
+ * Program an FPGA using information in the device tree.
+ * Function assumes that there is a firmware-name property.
+ * Return 0 for success or negative error code.
+ */
+static int fpga_region_program_fpga(struct fpga_region *region,
+ const char *firmware_name,
+ struct device_node *overlay)
+{
+ struct fpga_manager *mgr;
+ int ret;
+
+ region = fpga_region_get(region);
+ if (IS_ERR(region)) {
+ pr_err("failed to get fpga region\n");
+ return PTR_ERR(region);
+ }
+
+ mgr = fpga_region_get_manager(region);
+ if (IS_ERR(mgr)) {
+ pr_err("failed to get fpga region manager\n");
+ return PTR_ERR(mgr);
+ }
+
+ ret = fpga_region_get_bridges(region, overlay);
+ if (ret) {
+ pr_err("failed to get fpga region bridges\n");
+ goto err_put_mgr;
+ }
+
+ ret = fpga_bridges_disable(&region->bridge_list);
+ if (ret) {
+ pr_err("failed to disable region bridges\n");
+ goto err_put_br;
+ }
+
+ ret = fpga_mgr_firmware_load(mgr, region->info, firmware_name);
+ if (ret) {
+ pr_err("failed to load fpga image\n");
+ goto err_put_br;
+ }
+
+ ret = fpga_bridges_enable(&region->bridge_list);
+ if (ret) {
+ pr_err("failed to enable region bridges\n");
+ goto err_put_br;
+ }
+
+ fpga_mgr_put(mgr);
+ fpga_region_put(region);
+
+ return 0;
+
+err_put_br:
+ fpga_bridges_put(&region->bridge_list);
+err_put_mgr:
+ fpga_mgr_put(mgr);
+ fpga_region_put(region);
+
+ return ret;
+}
+
+/**
+ * child_regions_with_firmware
+ * @overlay: device node of the overlay
+ *
+ * If the overlay adds child FPGA regions, they are not allowed to have
+ * firmware-name property.
+ *
+ * Return 0 for OK or -EINVAL if child FPGA region adds firmware-name.
+ */
+static int child_regions_with_firmware(struct device_node *overlay)
+{
+ struct device_node *child_region;
+ const char *child_firmware_name;
+ int ret = 0;
+
+ of_node_get(overlay);
+
+ child_region = of_find_matching_node(overlay, fpga_region_of_match);
+ while (child_region) {
+ if (!of_property_read_string(child_region, "firmware-name",
+ &child_firmware_name)) {
+ ret = -EINVAL;
+ break;
+ }
+ child_region = of_find_matching_node(child_region,
+ fpga_region_of_match);
+ }
+
+ of_node_put(child_region);
+
+ if (ret)
+ pr_err("firmware-name not allowed in child FPGA region: %s",
+ child_region->full_name);
+
+ return ret;
+}
+
+/**
+ * fpga_region_notify_pre_apply - pre-apply overlay notification
+ *
+ * @region: FPGA region that the overlay was applied to
+ * @nd: overlay notification data
+ *
+ * Called after when an overlay targeted to a FPGA Region is about to be
+ * applied. Function will check the properties that will be added to the FPGA
+ * region. If the checks pass, it will program the FPGA.
+ *
+ * The checks are:
+ * The overlay must add either firmware-name or external-fpga-config property
+ * to the FPGA Region.
+ *
+ * firmware-name : program the FPGA
+ * external-fpga-config : FPGA is already programmed
+ *
+ * The overlay can add other FPGA regions, but child FPGA regions cannot have a
+ * firmware-name property since those regions don't exist yet.
+ *
+ * If the overlay that breaks the rules, notifier returns an error and the
+ * overlay is rejected before it goes into the main tree.
+ *
+ * Returns 0 for success or negative error code for failure.
+ */
+static int fpga_region_notify_pre_apply(struct fpga_region *region,
+ struct of_overlay_notify_data *nd)
+{
+ const char *firmware_name = NULL;
+ struct fpga_image_info *info;
+ int ret;
+
+ info = devm_kzalloc(&region->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ region->info = info;
+
+ /* Reject overlay if child FPGA Regions have firmware-name property */
+ ret = child_regions_with_firmware(nd->overlay);
+ if (ret)
+ return ret;
+
+ /* Read FPGA region properties from the overlay */
+ if (of_property_read_bool(nd->overlay, "partial-fpga-config"))
+ info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
+
+ if (of_property_read_bool(nd->overlay, "external-fpga-config"))
+ info->flags |= FPGA_MGR_EXTERNAL_CONFIG;
+
+ of_property_read_string(nd->overlay, "firmware-name", &firmware_name);
+
+ of_property_read_u32(nd->overlay, "region-unfreeze-timeout-us",
+ &info->enable_timeout_us);
+
+ of_property_read_u32(nd->overlay, "region-freeze-timeout-us",
+ &info->disable_timeout_us);
+
+ /* If FPGA was externally programmed, don't specify firmware */
+ if ((info->flags & FPGA_MGR_EXTERNAL_CONFIG) && firmware_name) {
+ pr_err("error: specified firmware and external-fpga-config");
+ return -EINVAL;
+ }
+
+ /* FPGA is already configured externally. We're done. */
+ if (info->flags & FPGA_MGR_EXTERNAL_CONFIG)
+ return 0;
+
+ /* If we got this far, we should be programming the FPGA */
+ if (!firmware_name) {
+ pr_err("should specify firmware-name or external-fpga-config\n");
+ return -EINVAL;
+ }
+
+ return fpga_region_program_fpga(region, firmware_name, nd->overlay);
+}
+
+/**
+ * fpga_region_notify_post_remove - post-remove overlay notification
+ *
+ * @region: FPGA region that was targeted by the overlay that was removed
+ * @nd: overlay notification data
+ *
+ * Called after an overlay has been removed if the overlay's target was a
+ * FPGA region.
+ */
+static void fpga_region_notify_post_remove(struct fpga_region *region,
+ struct of_overlay_notify_data *nd)
+{
+ fpga_bridges_disable(&region->bridge_list);
+ fpga_bridges_put(&region->bridge_list);
+ devm_kfree(&region->dev, region->info);
+ region->info = NULL;
+}
+
+/**
+ * of_fpga_region_notify - reconfig notifier for dynamic DT changes
+ * @nb: notifier block
+ * @action: notifier action
+ * @arg: reconfig data
+ *
+ * This notifier handles programming a FPGA when a "firmware-name" property is
+ * added to a fpga-region.
+ *
+ * Returns NOTIFY_OK or error if FPGA programming fails.
+ */
+static int of_fpga_region_notify(struct notifier_block *nb,
+ unsigned long action, void *arg)
+{
+ struct of_overlay_notify_data *nd = arg;
+ struct fpga_region *region;
+ int ret;
+
+ switch (action) {
+ case OF_OVERLAY_PRE_APPLY:
+ pr_debug("%s OF_OVERLAY_PRE_APPLY\n", __func__);
+ break;
+ case OF_OVERLAY_POST_APPLY:
+ pr_debug("%s OF_OVERLAY_POST_APPLY\n", __func__);
+ return NOTIFY_OK; /* not for us */
+ case OF_OVERLAY_PRE_REMOVE:
+ pr_debug("%s OF_OVERLAY_PRE_REMOVE\n", __func__);
+ return NOTIFY_OK; /* not for us */
+ case OF_OVERLAY_POST_REMOVE:
+ pr_debug("%s OF_OVERLAY_POST_REMOVE\n", __func__);
+ break;
+ default: /* should not happen */
+ return NOTIFY_OK;
+ }
+
+ region = fpga_region_find(nd->target);
+ if (!region)
+ return NOTIFY_OK;
+
+ ret = 0;
+ switch (action) {
+ case OF_OVERLAY_PRE_APPLY:
+ ret = fpga_region_notify_pre_apply(region, nd);
+ break;
+
+ case OF_OVERLAY_POST_REMOVE:
+ fpga_region_notify_post_remove(region, nd);
+ break;
+ }
+
+ put_device(&region->dev);
+
+ if (ret)
+ return notifier_from_errno(ret);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block fpga_region_of_nb = {
+ .notifier_call = of_fpga_region_notify,
+};
+
+static int fpga_region_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct fpga_region *region;
+ int id, ret = 0;
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ id = ida_simple_get(&fpga_region_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ ret = id;
+ goto err_kfree;
+ }
+
+ mutex_init(&region->mutex);
+ INIT_LIST_HEAD(&region->bridge_list);
+
+ device_initialize(&region->dev);
+ region->dev.class = fpga_region_class;
+ region->dev.parent = dev;
+ region->dev.of_node = np;
+ region->dev.id = id;
+ dev_set_drvdata(dev, region);
+
+ ret = dev_set_name(&region->dev, "region%d", id);
+ if (ret)
+ goto err_remove;
+
+ ret = device_add(&region->dev);
+ if (ret)
+ goto err_remove;
+
+ of_platform_populate(np, fpga_region_of_match, NULL, &region->dev);
+
+ dev_info(dev, "FPGA Region probed\n");
+
+ return 0;
+
+err_remove:
+ ida_simple_remove(&fpga_region_ida, id);
+err_kfree:
+ kfree(region);
+
+ return ret;
+}
+
+static int fpga_region_remove(struct platform_device *pdev)
+{
+ struct fpga_region *region = platform_get_drvdata(pdev);
+
+ device_unregister(&region->dev);
+
+ return 0;
+}
+
+static struct platform_driver fpga_region_driver = {
+ .probe = fpga_region_probe,
+ .remove = fpga_region_remove,
+ .driver = {
+ .name = "fpga-region",
+ .of_match_table = of_match_ptr(fpga_region_of_match),
+ },
+};
+
+static void fpga_region_dev_release(struct device *dev)
+{
+ struct fpga_region *region = to_fpga_region(dev);
+
+ ida_simple_remove(&fpga_region_ida, region->dev.id);
+ kfree(region);
+}
+
+/**
+ * fpga_region_init - init function for fpga_region class
+ * Creates the fpga_region class and registers a reconfig notifier.
+ */
+static int __init fpga_region_init(void)
+{
+ int ret;
+
+ fpga_region_class = class_create(THIS_MODULE, "fpga_region");
+ if (IS_ERR(fpga_region_class))
+ return PTR_ERR(fpga_region_class);
+
+ fpga_region_class->dev_release = fpga_region_dev_release;
+
+ ret = of_overlay_notifier_register(&fpga_region_of_nb);
+ if (ret)
+ goto err_class;
+
+ ret = platform_driver_register(&fpga_region_driver);
+ if (ret)
+ goto err_plat;
+
+ return 0;
+
+err_plat:
+ of_overlay_notifier_unregister(&fpga_region_of_nb);
+err_class:
+ class_destroy(fpga_region_class);
+ ida_destroy(&fpga_region_ida);
+ return ret;
+}
+
+static void __exit fpga_region_exit(void)
+{
+ platform_driver_unregister(&fpga_region_driver);
+ of_overlay_notifier_unregister(&fpga_region_of_nb);
+ class_destroy(fpga_region_class);
+ ida_destroy(&fpga_region_ida);
+}
+
+subsys_initcall(fpga_region_init);
+module_exit(fpga_region_exit);
+
+MODULE_DESCRIPTION("FPGA Region");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c
new file mode 100644
index 000000000000..f8770af0f6b5
--- /dev/null
+++ b/drivers/fpga/socfpga-a10.c
@@ -0,0 +1,557 @@
+/*
+ * FPGA Manager Driver for Altera Arria10 SoCFPGA
+ *
+ * Copyright (C) 2015-2016 Altera Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+
+#define A10_FPGAMGR_DCLKCNT_OFST 0x08
+#define A10_FPGAMGR_DCLKSTAT_OFST 0x0c
+#define A10_FPGAMGR_IMGCFG_CTL_00_OFST 0x70
+#define A10_FPGAMGR_IMGCFG_CTL_01_OFST 0x74
+#define A10_FPGAMGR_IMGCFG_CTL_02_OFST 0x78
+#define A10_FPGAMGR_IMGCFG_STAT_OFST 0x80
+
+#define A10_FPGAMGR_DCLKSTAT_DCLKDONE BIT(0)
+
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NCONFIG BIT(0)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NSTATUS BIT(1)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_CONDONE BIT(2)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NCONFIG BIT(8)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NSTATUS_OE BIT(16)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_CONDONE_OE BIT(24)
+
+#define A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG BIT(0)
+#define A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST BIT(16)
+#define A10_FPGAMGR_IMGCFG_CTL_01_S2F_NCE BIT(24)
+
+#define A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL BIT(0)
+#define A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_MASK (BIT(16) | BIT(17))
+#define A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SHIFT 16
+#define A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH BIT(24)
+#define A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SHIFT 24
+
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_CRC_ERROR BIT(0)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_EARLY_USERMODE BIT(1)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_USERMODE BIT(2)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN BIT(4)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN BIT(6)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_PR_READY BIT(9)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_PR_DONE BIT(10)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_PR_ERROR BIT(11)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_NCONFIG_PIN BIT(12)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_MASK (BIT(16) | BIT(17) | BIT(18))
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_SHIFT 16
+
+/* FPGA CD Ratio Value */
+#define CDRATIO_x1 0x0
+#define CDRATIO_x2 0x1
+#define CDRATIO_x4 0x2
+#define CDRATIO_x8 0x3
+
+/* Configuration width 16/32 bit */
+#define CFGWDTH_32 1
+#define CFGWDTH_16 0
+
+/*
+ * struct a10_fpga_priv - private data for fpga manager
+ * @regmap: regmap for register access
+ * @fpga_data_addr: iomap for single address data register to FPGA
+ * @clk: clock
+ */
+struct a10_fpga_priv {
+ struct regmap *regmap;
+ void __iomem *fpga_data_addr;
+ struct clk *clk;
+};
+
+static bool socfpga_a10_fpga_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case A10_FPGAMGR_DCLKCNT_OFST:
+ case A10_FPGAMGR_DCLKSTAT_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_00_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_01_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_02_OFST:
+ return true;
+ }
+ return false;
+}
+
+static bool socfpga_a10_fpga_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case A10_FPGAMGR_DCLKCNT_OFST:
+ case A10_FPGAMGR_DCLKSTAT_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_00_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_01_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_02_OFST:
+ case A10_FPGAMGR_IMGCFG_STAT_OFST:
+ return true;
+ }
+ return false;
+}
+
+static const struct regmap_config socfpga_a10_fpga_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .writeable_reg = socfpga_a10_fpga_writeable_reg,
+ .readable_reg = socfpga_a10_fpga_readable_reg,
+ .max_register = A10_FPGAMGR_IMGCFG_STAT_OFST,
+ .cache_type = REGCACHE_NONE,
+};
+
+/*
+ * from the register map description of cdratio in imgcfg_ctrl_02:
+ * Normal Configuration : 32bit Passive Parallel
+ * Partial Reconfiguration : 16bit Passive Parallel
+ */
+static void socfpga_a10_fpga_set_cfg_width(struct a10_fpga_priv *priv,
+ int width)
+{
+ width <<= A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SHIFT;
+
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH, width);
+}
+
+static void socfpga_a10_fpga_generate_dclks(struct a10_fpga_priv *priv,
+ u32 count)
+{
+ u32 val;
+
+ /* Clear any existing DONE status. */
+ regmap_write(priv->regmap, A10_FPGAMGR_DCLKSTAT_OFST,
+ A10_FPGAMGR_DCLKSTAT_DCLKDONE);
+
+ /* Issue the DCLK regmap. */
+ regmap_write(priv->regmap, A10_FPGAMGR_DCLKCNT_OFST, count);
+
+ /* wait till the dclkcnt done */
+ regmap_read_poll_timeout(priv->regmap, A10_FPGAMGR_DCLKSTAT_OFST, val,
+ val, 1, 100);
+
+ /* Clear DONE status. */
+ regmap_write(priv->regmap, A10_FPGAMGR_DCLKSTAT_OFST,
+ A10_FPGAMGR_DCLKSTAT_DCLKDONE);
+}
+
+#define RBF_ENCRYPTION_MODE_OFFSET 69
+#define RBF_DECOMPRESS_OFFSET 229
+
+static int socfpga_a10_fpga_encrypted(u32 *buf32, size_t buf32_size)
+{
+ if (buf32_size < RBF_ENCRYPTION_MODE_OFFSET + 1)
+ return -EINVAL;
+
+ /* Is the bitstream encrypted? */
+ return ((buf32[RBF_ENCRYPTION_MODE_OFFSET] >> 2) & 3) != 0;
+}
+
+static int socfpga_a10_fpga_compressed(u32 *buf32, size_t buf32_size)
+{
+ if (buf32_size < RBF_DECOMPRESS_OFFSET + 1)
+ return -EINVAL;
+
+ /* Is the bitstream compressed? */
+ return !((buf32[RBF_DECOMPRESS_OFFSET] >> 1) & 1);
+}
+
+static unsigned int socfpga_a10_fpga_get_cd_ratio(unsigned int cfg_width,
+ bool encrypt, bool compress)
+{
+ unsigned int cd_ratio;
+
+ /*
+ * cd ratio is dependent on cfg width and whether the bitstream
+ * is encrypted and/or compressed.
+ *
+ * | width | encr. | compr. | cd ratio |
+ * | 16 | 0 | 0 | 1 |
+ * | 16 | 0 | 1 | 4 |
+ * | 16 | 1 | 0 | 2 |
+ * | 16 | 1 | 1 | 4 |
+ * | 32 | 0 | 0 | 1 |
+ * | 32 | 0 | 1 | 8 |
+ * | 32 | 1 | 0 | 4 |
+ * | 32 | 1 | 1 | 8 |
+ */
+ if (!compress && !encrypt)
+ return CDRATIO_x1;
+
+ if (compress)
+ cd_ratio = CDRATIO_x4;
+ else
+ cd_ratio = CDRATIO_x2;
+
+ /* If 32 bit, double the cd ratio by incrementing the field */
+ if (cfg_width == CFGWDTH_32)
+ cd_ratio += 1;
+
+ return cd_ratio;
+}
+
+static int socfpga_a10_fpga_set_cdratio(struct fpga_manager *mgr,
+ unsigned int cfg_width,
+ const char *buf, size_t count)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ unsigned int cd_ratio;
+ int encrypt, compress;
+
+ encrypt = socfpga_a10_fpga_encrypted((u32 *)buf, count / 4);
+ if (encrypt < 0)
+ return -EINVAL;
+
+ compress = socfpga_a10_fpga_compressed((u32 *)buf, count / 4);
+ if (compress < 0)
+ return -EINVAL;
+
+ cd_ratio = socfpga_a10_fpga_get_cd_ratio(cfg_width, encrypt, compress);
+
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_MASK,
+ cd_ratio << A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SHIFT);
+
+ return 0;
+}
+
+static u32 socfpga_a10_fpga_read_stat(struct a10_fpga_priv *priv)
+{
+ u32 val;
+
+ regmap_read(priv->regmap, A10_FPGAMGR_IMGCFG_STAT_OFST, &val);
+
+ return val;
+}
+
+static int socfpga_a10_fpga_wait_for_pr_ready(struct a10_fpga_priv *priv)
+{
+ u32 reg, i;
+
+ for (i = 0; i < 10 ; i++) {
+ reg = socfpga_a10_fpga_read_stat(priv);
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_ERROR)
+ return -EINVAL;
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_READY)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int socfpga_a10_fpga_wait_for_pr_done(struct a10_fpga_priv *priv)
+{
+ u32 reg, i;
+
+ for (i = 0; i < 10 ; i++) {
+ reg = socfpga_a10_fpga_read_stat(priv);
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_ERROR)
+ return -EINVAL;
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_DONE)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+/* Start the FPGA programming by initialize the FPGA Manager */
+static int socfpga_a10_fpga_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ unsigned int cfg_width;
+ u32 msel, stat, mask;
+ int ret;
+
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG)
+ cfg_width = CFGWDTH_16;
+ else
+ return -EINVAL;
+
+ /* Check for passive parallel (msel == 000 or 001) */
+ msel = socfpga_a10_fpga_read_stat(priv);
+ msel &= A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_MASK;
+ msel >>= A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_SHIFT;
+ if ((msel != 0) && (msel != 1)) {
+ dev_dbg(&mgr->dev, "Fail: invalid msel=%d\n", msel);
+ return -EINVAL;
+ }
+
+ /* Make sure no external devices are interfering */
+ stat = socfpga_a10_fpga_read_stat(priv);
+ mask = A10_FPGAMGR_IMGCFG_STAT_F2S_NCONFIG_PIN |
+ A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN;
+ if ((stat & mask) != mask)
+ return -EINVAL;
+
+ /* Set cfg width */
+ socfpga_a10_fpga_set_cfg_width(priv, cfg_width);
+
+ /* Determine cd ratio from bitstream header and set cd ratio */
+ ret = socfpga_a10_fpga_set_cdratio(mgr, cfg_width, buf, count);
+ if (ret)
+ return ret;
+
+ /*
+ * Clear s2f_nce to enable chip select. Leave pr_request
+ * unasserted and override disabled.
+ */
+ regmap_write(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG);
+
+ /* Set cfg_ctrl to enable s2f dclk and data */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL,
+ A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL);
+
+ /*
+ * Disable overrides not needed for pr.
+ * s2f_config==1 leaves reset deasseted.
+ */
+ regmap_write(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_00_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NCONFIG |
+ A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NSTATUS |
+ A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_CONDONE |
+ A10_FPGAMGR_IMGCFG_CTL_00_S2F_NCONFIG);
+
+ /* Enable override for data, dclk, nce, and pr_request to CSS */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG, 0);
+
+ /* Send some clocks to clear out any errors */
+ socfpga_a10_fpga_generate_dclks(priv, 256);
+
+ /* Assert pr_request */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST);
+
+ /* Provide 2048 DCLKs before starting the config data streaming. */
+ socfpga_a10_fpga_generate_dclks(priv, 0x7ff);
+
+ /* Wait for pr_ready */
+ return socfpga_a10_fpga_wait_for_pr_ready(priv);
+}
+
+/*
+ * write data to the FPGA data register
+ */
+static int socfpga_a10_fpga_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ u32 *buffer_32 = (u32 *)buf;
+ size_t i = 0;
+
+ if (count <= 0)
+ return -EINVAL;
+
+ /* Write out the complete 32-bit chunks */
+ while (count >= sizeof(u32)) {
+ writel(buffer_32[i++], priv->fpga_data_addr);
+ count -= sizeof(u32);
+ }
+
+ /* Write out remaining non 32-bit chunks */
+ switch (count) {
+ case 3:
+ writel(buffer_32[i++] & 0x00ffffff, priv->fpga_data_addr);
+ break;
+ case 2:
+ writel(buffer_32[i++] & 0x0000ffff, priv->fpga_data_addr);
+ break;
+ case 1:
+ writel(buffer_32[i++] & 0x000000ff, priv->fpga_data_addr);
+ break;
+ case 0:
+ break;
+ default:
+ /* This will never happen */
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int socfpga_a10_fpga_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ u32 reg;
+ int ret;
+
+ /* Wait for pr_done */
+ ret = socfpga_a10_fpga_wait_for_pr_done(priv);
+
+ /* Clear pr_request */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST, 0);
+
+ /* Send some clocks to clear out any errors */
+ socfpga_a10_fpga_generate_dclks(priv, 256);
+
+ /* Disable s2f dclk and data */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL, 0);
+
+ /* Deassert chip select */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NCE,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NCE);
+
+ /* Disable data, dclk, nce, and pr_request override to CSS */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG);
+
+ /* Return any errors regarding pr_done or pr_error */
+ if (ret)
+ return ret;
+
+ /* Final check */
+ reg = socfpga_a10_fpga_read_stat(priv);
+
+ if (((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_USERMODE) == 0) ||
+ ((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN) == 0) ||
+ ((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN) == 0)) {
+ dev_dbg(&mgr->dev,
+ "Timeout in final check. Status=%08xf\n", reg);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static enum fpga_mgr_states socfpga_a10_fpga_state(struct fpga_manager *mgr)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ u32 reg = socfpga_a10_fpga_read_stat(priv);
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_USERMODE)
+ return FPGA_MGR_STATE_OPERATING;
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_READY)
+ return FPGA_MGR_STATE_WRITE;
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_CRC_ERROR)
+ return FPGA_MGR_STATE_WRITE_COMPLETE_ERR;
+
+ if ((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN) == 0)
+ return FPGA_MGR_STATE_RESET;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static const struct fpga_manager_ops socfpga_a10_fpga_mgr_ops = {
+ .initial_header_size = (RBF_DECOMPRESS_OFFSET + 1) * 4,
+ .state = socfpga_a10_fpga_state,
+ .write_init = socfpga_a10_fpga_write_init,
+ .write = socfpga_a10_fpga_write,
+ .write_complete = socfpga_a10_fpga_write_complete,
+};
+
+static int socfpga_a10_fpga_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct a10_fpga_priv *priv;
+ void __iomem *reg_base;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* First mmio base is for register access */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
+
+ /* Second mmio base is for writing FPGA image data */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->fpga_data_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->fpga_data_addr))
+ return PTR_ERR(priv->fpga_data_addr);
+
+ /* regmap for register access */
+ priv->regmap = devm_regmap_init_mmio(dev, reg_base,
+ &socfpga_a10_fpga_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return -ENODEV;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "no clock specified\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "could not enable clock\n");
+ return -EBUSY;
+ }
+
+ return fpga_mgr_register(dev, "SoCFPGA Arria10 FPGA Manager",
+ &socfpga_a10_fpga_mgr_ops, priv);
+}
+
+static int socfpga_a10_fpga_remove(struct platform_device *pdev)
+{
+ struct fpga_manager *mgr = platform_get_drvdata(pdev);
+ struct a10_fpga_priv *priv = mgr->priv;
+
+ fpga_mgr_unregister(&pdev->dev);
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static const struct of_device_id socfpga_a10_fpga_of_match[] = {
+ { .compatible = "altr,socfpga-a10-fpga-mgr", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, socfpga_a10_fpga_of_match);
+
+static struct platform_driver socfpga_a10_fpga_driver = {
+ .probe = socfpga_a10_fpga_probe,
+ .remove = socfpga_a10_fpga_remove,
+ .driver = {
+ .name = "socfpga_a10_fpga_manager",
+ .of_match_table = socfpga_a10_fpga_of_match,
+ },
+};
+
+module_platform_driver(socfpga_a10_fpga_driver);
+
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_DESCRIPTION("SoCFPGA Arria10 FPGA Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c
index 27d2ff28132c..b6672e66cda6 100644
--- a/drivers/fpga/socfpga.c
+++ b/drivers/fpga/socfpga.c
@@ -407,13 +407,14 @@ static int socfpga_fpga_reset(struct fpga_manager *mgr)
/*
* Prepare the FPGA to receive the configuration data.
*/
-static int socfpga_fpga_ops_configure_init(struct fpga_manager *mgr, u32 flags,
+static int socfpga_fpga_ops_configure_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
const char *buf, size_t count)
{
struct socfpga_fpga_priv *priv = mgr->priv;
int ret;
- if (flags & FPGA_MGR_PARTIAL_RECONFIG) {
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
return -EINVAL;
}
@@ -478,7 +479,7 @@ static int socfpga_fpga_ops_configure_write(struct fpga_manager *mgr,
}
static int socfpga_fpga_ops_configure_complete(struct fpga_manager *mgr,
- u32 flags)
+ struct fpga_image_info *info)
{
struct socfpga_fpga_priv *priv = mgr->priv;
u32 status;
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index c2fb4120bd62..1812bf7614e1 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -118,7 +118,6 @@
#define FPGA_RST_NONE_MASK 0x0
struct zynq_fpga_priv {
- struct device *dev;
int irq;
struct clk *clk;
@@ -175,7 +174,8 @@ static irqreturn_t zynq_fpga_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
+static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
const char *buf, size_t count)
{
struct zynq_fpga_priv *priv;
@@ -189,7 +189,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
return err;
/* don't globally reset PL if we're doing partial reconfig */
- if (!(flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+ if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
/* assert AXI interface resets */
regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
FPGA_RST_ALL_MASK);
@@ -217,7 +217,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
INIT_POLL_DELAY,
INIT_POLL_TIMEOUT);
if (err) {
- dev_err(priv->dev, "Timeout waiting for PCFG_INIT");
+ dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n");
goto out_err;
}
@@ -231,7 +231,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
INIT_POLL_DELAY,
INIT_POLL_TIMEOUT);
if (err) {
- dev_err(priv->dev, "Timeout waiting for !PCFG_INIT");
+ dev_err(&mgr->dev, "Timeout waiting for !PCFG_INIT\n");
goto out_err;
}
@@ -245,7 +245,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
INIT_POLL_DELAY,
INIT_POLL_TIMEOUT);
if (err) {
- dev_err(priv->dev, "Timeout waiting for PCFG_INIT");
+ dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n");
goto out_err;
}
}
@@ -262,7 +262,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
/* check that we have room in the command queue */
status = zynq_fpga_read(priv, STATUS_OFFSET);
if (status & STATUS_DMA_Q_F) {
- dev_err(priv->dev, "DMA command queue full");
+ dev_err(&mgr->dev, "DMA command queue full\n");
err = -EBUSY;
goto out_err;
}
@@ -295,7 +295,8 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr,
in_count = count;
priv = mgr->priv;
- kbuf = dma_alloc_coherent(priv->dev, count, &dma_addr, GFP_KERNEL);
+ kbuf =
+ dma_alloc_coherent(mgr->dev.parent, count, &dma_addr, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
@@ -331,19 +332,19 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr,
zynq_fpga_write(priv, INT_STS_OFFSET, intr_status);
if (!((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
- dev_err(priv->dev, "Error configuring FPGA");
+ dev_err(&mgr->dev, "Error configuring FPGA\n");
err = -EFAULT;
}
clk_disable(priv->clk);
out_free:
- dma_free_coherent(priv->dev, in_count, kbuf, dma_addr);
-
+ dma_free_coherent(mgr->dev.parent, count, kbuf, dma_addr);
return err;
}
-static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr, u32 flags)
+static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
{
struct zynq_fpga_priv *priv = mgr->priv;
int err;
@@ -364,7 +365,7 @@ static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr, u32 flags)
return err;
/* for the partial reconfig case we didn't touch the level shifters */
- if (!(flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+ if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
/* enable level shifters from PL to PS */
regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
LVL_SHFTR_ENABLE_PL_TO_PS);
@@ -416,8 +417,6 @@ static int zynq_fpga_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->dev = dev;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->io_base = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->io_base))
@@ -426,7 +425,7 @@ static int zynq_fpga_probe(struct platform_device *pdev)
priv->slcr = syscon_regmap_lookup_by_phandle(dev->of_node,
"syscon");
if (IS_ERR(priv->slcr)) {
- dev_err(dev, "unable to get zynq-slcr regmap");
+ dev_err(dev, "unable to get zynq-slcr regmap\n");
return PTR_ERR(priv->slcr);
}
@@ -434,38 +433,41 @@ static int zynq_fpga_probe(struct platform_device *pdev)
priv->irq = platform_get_irq(pdev, 0);
if (priv->irq < 0) {
- dev_err(dev, "No IRQ available");
+ dev_err(dev, "No IRQ available\n");
return priv->irq;
}
- err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0,
- dev_name(dev), priv);
- if (err) {
- dev_err(dev, "unable to request IRQ");
- return err;
- }
-
priv->clk = devm_clk_get(dev, "ref_clk");
if (IS_ERR(priv->clk)) {
- dev_err(dev, "input clock not found");
+ dev_err(dev, "input clock not found\n");
return PTR_ERR(priv->clk);
}
err = clk_prepare_enable(priv->clk);
if (err) {
- dev_err(dev, "unable to enable clock");
+ dev_err(dev, "unable to enable clock\n");
return err;
}
/* unlock the device */
zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK);
+ zynq_fpga_write(priv, INT_MASK_OFFSET, 0xFFFFFFFF);
+ zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
+ err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, dev_name(dev),
+ priv);
+ if (err) {
+ dev_err(dev, "unable to request IRQ\n");
+ clk_disable_unprepare(priv->clk);
+ return err;
+ }
+
clk_disable(priv->clk);
err = fpga_mgr_register(dev, "Xilinx Zynq FPGA Manager",
&zynq_fpga_ops, priv);
if (err) {
- dev_err(dev, "unable to register FPGA manager");
+ dev_err(dev, "unable to register FPGA manager\n");
clk_unprepare(priv->clk);
return err;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index cd4599c0523b..4070b7386e9d 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -138,7 +138,7 @@ config HID_ASUS
tristate "Asus"
depends on I2C_HID
---help---
- Support for Asus notebook built-in keyboard via i2c.
+ Support for Asus notebook built-in keyboard and touchpad via i2c.
Supported devices:
- EeeBook X205TA
@@ -214,7 +214,7 @@ config HID_CMEDIA
config HID_CP2112
tristate "Silicon Labs CP2112 HID USB-to-SMBus Bridge support"
- depends on USB_HID && I2C && GPIOLIB
+ depends on USB_HID && I2C && GPIOLIB && GPIOLIB_IRQCHIP
---help---
Support for Silicon Labs CP2112 HID USB to SMBus Master Bridge.
This is a HID device driver which registers as an i2c adapter
@@ -512,6 +512,14 @@ config HID_MAGICMOUSE
Say Y here if you want support for the multi-touch features of the
Apple Wireless "Magic" Mouse and the Apple Wireless "Magic" Trackpad.
+config HID_MAYFLASH
+ tristate "Mayflash game controller adapter force feedback"
+ depends on HID
+ select INPUT_FF_MEMLESS
+ ---help---
+ Say Y here if you have HJZ Mayflash PS3 game controller adapters
+ and want to enable force feedback support.
+
config HID_MICROSOFT
tristate "Microsoft non-fully HID-compliant devices"
depends on HID
@@ -861,6 +869,13 @@ config THRUSTMASTER_FF
a THRUSTMASTER Dual Trigger 3-in-1 or a THRUSTMASTER Ferrari GT
Rumble Force or Force Feedback Wheel.
+config HID_UDRAW_PS3
+ tristate "THQ PS3 uDraw tablet"
+ depends on HID
+ ---help---
+ Say Y here if you want to use the THQ uDraw gaming tablet for
+ the PS3.
+
config HID_WACOM
tristate "Wacom Intuos/Graphire tablet support (USB)"
depends on HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 86b2b5785fd2..4d111f23e801 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
+obj-$(CONFIG_HID_MAYFLASH) += hid-mf.o
obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o
obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
obj-$(CONFIG_HID_MULTITOUCH) += hid-multitouch.o
@@ -96,6 +97,7 @@ obj-$(CONFIG_HID_TIVO) += hid-tivo.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
obj-$(CONFIG_HID_UCLOGIC) += hid-uclogic.o
+obj-$(CONFIG_HID_UDRAW_PS3) += hid-udraw-ps3.o
obj-$(CONFIG_HID_LED) += hid-led.o
obj-$(CONFIG_HID_XINMO) += hid-xinmo.o
obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 7a811ec4f2e1..d40ed9fdf68d 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -11,6 +11,12 @@
* This module based on hid-ortek by
* Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com>
* Copyright (c) 2011 Jiri Kosina
+ *
+ * This module has been updated to add support for Asus i2c touchpad.
+ *
+ * Copyright (c) 2016 Brendan McGrath <redmcg@redmandi.dyndns.org>
+ * Copyright (c) 2016 Victor Vlasenko <victor.vlasenko@sysgears.com>
+ * Copyright (c) 2016 Frederik Wenigwieser <frederik.wenigwieser@gmail.com>
*/
/*
@@ -20,16 +26,287 @@
* any later version.
*/
-#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
+#include <linux/input/mt.h>
#include "hid-ids.h"
+MODULE_AUTHOR("Yusuke Fujimaki <usk.fujimaki@gmail.com>");
+MODULE_AUTHOR("Brendan McGrath <redmcg@redmandi.dyndns.org>");
+MODULE_AUTHOR("Victor Vlasenko <victor.vlasenko@sysgears.com>");
+MODULE_AUTHOR("Frederik Wenigwieser <frederik.wenigwieser@gmail.com>");
+MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
+
+#define FEATURE_REPORT_ID 0x0d
+#define INPUT_REPORT_ID 0x5d
+
+#define INPUT_REPORT_SIZE 28
+
+#define MAX_CONTACTS 5
+
+#define MAX_X 2794
+#define MAX_Y 1758
+#define MAX_TOUCH_MAJOR 8
+#define MAX_PRESSURE 128
+
+#define CONTACT_DATA_SIZE 5
+
+#define BTN_LEFT_MASK 0x01
+#define CONTACT_TOOL_TYPE_MASK 0x80
+#define CONTACT_X_MSB_MASK 0xf0
+#define CONTACT_Y_MSB_MASK 0x0f
+#define CONTACT_TOUCH_MAJOR_MASK 0x07
+#define CONTACT_PRESSURE_MASK 0x7f
+
+#define QUIRK_FIX_NOTEBOOK_REPORT BIT(0)
+#define QUIRK_NO_INIT_REPORTS BIT(1)
+#define QUIRK_SKIP_INPUT_MAPPING BIT(2)
+#define QUIRK_IS_MULTITOUCH BIT(3)
+
+#define NOTEBOOK_QUIRKS QUIRK_FIX_NOTEBOOK_REPORT
+#define TOUCHPAD_QUIRKS (QUIRK_NO_INIT_REPORTS | \
+ QUIRK_SKIP_INPUT_MAPPING | \
+ QUIRK_IS_MULTITOUCH)
+
+#define TRKID_SGN ((TRKID_MAX + 1) >> 1)
+
+struct asus_drvdata {
+ unsigned long quirks;
+ struct input_dev *input;
+};
+
+static void asus_report_contact_down(struct input_dev *input,
+ int toolType, u8 *data)
+{
+ int touch_major, pressure;
+ int x = (data[0] & CONTACT_X_MSB_MASK) << 4 | data[1];
+ int y = MAX_Y - ((data[0] & CONTACT_Y_MSB_MASK) << 8 | data[2]);
+
+ if (toolType == MT_TOOL_PALM) {
+ touch_major = MAX_TOUCH_MAJOR;
+ pressure = MAX_PRESSURE;
+ } else {
+ touch_major = (data[3] >> 4) & CONTACT_TOUCH_MAJOR_MASK;
+ pressure = data[4] & CONTACT_PRESSURE_MASK;
+ }
+
+ input_report_abs(input, ABS_MT_POSITION_X, x);
+ input_report_abs(input, ABS_MT_POSITION_Y, y);
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major);
+ input_report_abs(input, ABS_MT_PRESSURE, pressure);
+}
+
+/* Required for Synaptics Palm Detection */
+static void asus_report_tool_width(struct input_dev *input)
+{
+ struct input_mt *mt = input->mt;
+ struct input_mt_slot *oldest;
+ int oldid, count, i;
+
+ oldest = NULL;
+ oldid = mt->trkid;
+ count = 0;
+
+ for (i = 0; i < mt->num_slots; ++i) {
+ struct input_mt_slot *ps = &mt->slots[i];
+ int id = input_mt_get_value(ps, ABS_MT_TRACKING_ID);
+
+ if (id < 0)
+ continue;
+ if ((id - oldid) & TRKID_SGN) {
+ oldest = ps;
+ oldid = id;
+ }
+ count++;
+ }
+
+ if (oldest) {
+ input_report_abs(input, ABS_TOOL_WIDTH,
+ input_mt_get_value(oldest, ABS_MT_TOUCH_MAJOR));
+ }
+}
+
+static void asus_report_input(struct input_dev *input, u8 *data)
+{
+ int i;
+ u8 *contactData = data + 2;
+
+ for (i = 0; i < MAX_CONTACTS; i++) {
+ bool down = !!(data[1] & BIT(i+3));
+ int toolType = contactData[3] & CONTACT_TOOL_TYPE_MASK ?
+ MT_TOOL_PALM : MT_TOOL_FINGER;
+
+ input_mt_slot(input, i);
+ input_mt_report_slot_state(input, toolType, down);
+
+ if (down) {
+ asus_report_contact_down(input, toolType, contactData);
+ contactData += CONTACT_DATA_SIZE;
+ }
+ }
+
+ input_report_key(input, BTN_LEFT, data[1] & BTN_LEFT_MASK);
+ asus_report_tool_width(input);
+
+ input_mt_sync_frame(input);
+ input_sync(input);
+}
+
+static int asus_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH &&
+ data[0] == INPUT_REPORT_ID &&
+ size == INPUT_REPORT_SIZE) {
+ asus_report_input(drvdata->input, data);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+ int ret;
+ struct input_dev *input = hi->input;
+
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, MAX_X, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, MAX_Y, 0, 0);
+ input_set_abs_params(input, ABS_TOOL_WIDTH, 0, MAX_TOUCH_MAJOR, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, MAX_TOUCH_MAJOR, 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, MAX_PRESSURE, 0, 0);
+
+ __set_bit(BTN_LEFT, input->keybit);
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+
+ ret = input_mt_init_slots(input, MAX_CONTACTS, INPUT_MT_POINTER);
+
+ if (ret) {
+ hid_err(hdev, "Asus input mt init slots failed: %d\n", ret);
+ return ret;
+ }
+
+ drvdata->input = input;
+ }
+
+ return 0;
+}
+
+static int asus_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit,
+ int *max)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_SKIP_INPUT_MAPPING) {
+ /* Don't map anything from the HID report.
+ * We do it all manually in asus_input_configured
+ */
+ return -1;
+ }
+
+ return 0;
+}
+
+static int asus_start_multitouch(struct hid_device *hdev)
+{
+ int ret;
+ const unsigned char buf[] = { FEATURE_REPORT_ID, 0x00, 0x03, 0x01, 0x00 };
+ unsigned char *dmabuf = kmemdup(buf, sizeof(buf), GFP_KERNEL);
+
+ if (!dmabuf) {
+ ret = -ENOMEM;
+ hid_err(hdev, "Asus failed to alloc dma buf: %d\n", ret);
+ return ret;
+ }
+
+ ret = hid_hw_raw_request(hdev, dmabuf[0], dmabuf, sizeof(buf),
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+
+ kfree(dmabuf);
+
+ if (ret != sizeof(buf)) {
+ hid_err(hdev, "Asus failed to start multitouch: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused asus_reset_resume(struct hid_device *hdev)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
+ return asus_start_multitouch(hdev);
+
+ return 0;
+}
+
+static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct asus_drvdata *drvdata;
+
+ drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (drvdata == NULL) {
+ hid_err(hdev, "Can't alloc Asus descriptor\n");
+ return -ENOMEM;
+ }
+
+ hid_set_drvdata(hdev, drvdata);
+
+ drvdata->quirks = id->driver_data;
+
+ if (drvdata->quirks & QUIRK_NO_INIT_REPORTS)
+ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "Asus hid parse failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "Asus hw start failed: %d\n", ret);
+ return ret;
+ }
+
+ if (!drvdata->input) {
+ hid_err(hdev, "Asus input not registered\n");
+ ret = -ENOMEM;
+ goto err_stop_hw;
+ }
+
+ drvdata->input->name = "Asus TouchPad";
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+ ret = asus_start_multitouch(hdev);
+ if (ret)
+ goto err_stop_hw;
+ }
+
+ return 0;
+err_stop_hw:
+ hid_hw_stop(hdev);
+ return ret;
+}
+
static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x65) {
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_FIX_NOTEBOOK_REPORT &&
+ *rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x65) {
hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
rdesc[55] = 0xdd;
}
@@ -37,15 +314,25 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
static const struct hid_device_id asus_devices[] = {
- { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD), NOTEBOOK_QUIRKS},
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_TOUCHPAD), TOUCHPAD_QUIRKS },
{ }
};
MODULE_DEVICE_TABLE(hid, asus_devices);
static struct hid_driver asus_driver = {
- .name = "asus",
- .id_table = asus_devices,
- .report_fixup = asus_report_fixup
+ .name = "asus",
+ .id_table = asus_devices,
+ .report_fixup = asus_report_fixup,
+ .probe = asus_probe,
+ .input_mapping = asus_input_mapping,
+ .input_configured = asus_input_configured,
+#ifdef CONFIG_PM
+ .reset_resume = asus_reset_resume,
+#endif
+ .raw_event = asus_raw_event
};
module_hid_driver(asus_driver);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 2b89c701076f..cff060b56da9 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -727,8 +727,9 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
(hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP ||
+ hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4 ||
+ hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2 ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP ||
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
hid->product == USB_DEVICE_ID_MS_POWER_COVER) &&
hid->group == HID_GROUP_MULTITOUCH)
hid->group = HID_GROUP_GENERIC;
@@ -1857,6 +1858,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_TOUCHPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
@@ -1883,6 +1885,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
+#if IS_ENABLED(CONFIG_HID_MAYFLASH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
+#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
@@ -1983,8 +1988,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
@@ -2059,6 +2065,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
@@ -2086,6 +2095,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 60d30203a5fa..f31a778b0851 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -24,6 +24,7 @@
* http://www.silabs.com/Support%20Documents/TechnicalDocs/AN495.pdf
*/
+#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/hid.h>
#include <linux/i2c.h>
@@ -168,6 +169,12 @@ struct cp2112_device {
struct gpio_chip gc;
u8 *in_out_buffer;
spinlock_t lock;
+
+ struct gpio_desc *desc[8];
+ bool gpio_poll;
+ struct delayed_work gpio_poll_worker;
+ unsigned long irq_mask;
+ u8 gpio_prev_state;
};
static int gpio_push_pull = 0xFF;
@@ -233,7 +240,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
spin_unlock_irqrestore(&dev->lock, flags);
}
-static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int cp2112_gpio_get_all(struct gpio_chip *chip)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
@@ -252,7 +259,7 @@ static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
goto exit;
}
- ret = (buf[1] >> offset) & 1;
+ ret = buf[1];
exit:
spin_unlock_irqrestore(&dev->lock, flags);
@@ -260,6 +267,17 @@ exit:
return ret;
}
+static int cp2112_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ int ret;
+
+ ret = cp2112_gpio_get_all(chip);
+ if (ret < 0)
+ return ret;
+
+ return (ret >> offset) & 1;
+}
+
static int cp2112_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
@@ -1041,6 +1059,166 @@ static void chmod_sysfs_attrs(struct hid_device *hdev)
}
}
+static void cp2112_gpio_irq_ack(struct irq_data *d)
+{
+}
+
+static void cp2112_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ __clear_bit(d->hwirq, &dev->irq_mask);
+}
+
+static void cp2112_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ __set_bit(d->hwirq, &dev->irq_mask);
+}
+
+static void cp2112_gpio_poll_callback(struct work_struct *work)
+{
+ struct cp2112_device *dev = container_of(work, struct cp2112_device,
+ gpio_poll_worker.work);
+ struct irq_data *d;
+ u8 gpio_mask;
+ u8 virqs = (u8)dev->irq_mask;
+ u32 irq_type;
+ int irq, virq, ret;
+
+ ret = cp2112_gpio_get_all(&dev->gc);
+ if (ret == -ENODEV) /* the hardware has been disconnected */
+ return;
+ if (ret < 0)
+ goto exit;
+
+ gpio_mask = ret;
+
+ while (virqs) {
+ virq = ffs(virqs) - 1;
+ virqs &= ~BIT(virq);
+
+ if (!dev->gc.to_irq)
+ break;
+
+ irq = dev->gc.to_irq(&dev->gc, virq);
+
+ d = irq_get_irq_data(irq);
+ if (!d)
+ continue;
+
+ irq_type = irqd_get_trigger_type(d);
+
+ if (gpio_mask & BIT(virq)) {
+ /* Level High */
+
+ if (irq_type & IRQ_TYPE_LEVEL_HIGH)
+ handle_nested_irq(irq);
+
+ if ((irq_type & IRQ_TYPE_EDGE_RISING) &&
+ !(dev->gpio_prev_state & BIT(virq)))
+ handle_nested_irq(irq);
+ } else {
+ /* Level Low */
+
+ if (irq_type & IRQ_TYPE_LEVEL_LOW)
+ handle_nested_irq(irq);
+
+ if ((irq_type & IRQ_TYPE_EDGE_FALLING) &&
+ (dev->gpio_prev_state & BIT(virq)))
+ handle_nested_irq(irq);
+ }
+ }
+
+ dev->gpio_prev_state = gpio_mask;
+
+exit:
+ if (dev->gpio_poll)
+ schedule_delayed_work(&dev->gpio_poll_worker, 10);
+}
+
+
+static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
+
+ cp2112_gpio_direction_input(gc, d->hwirq);
+
+ if (!dev->gpio_poll) {
+ dev->gpio_poll = true;
+ schedule_delayed_work(&dev->gpio_poll_worker, 0);
+ }
+
+ cp2112_gpio_irq_unmask(d);
+ return 0;
+}
+
+static void cp2112_gpio_irq_shutdown(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ cancel_delayed_work_sync(&dev->gpio_poll_worker);
+}
+
+static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
+{
+ return 0;
+}
+
+static struct irq_chip cp2112_gpio_irqchip = {
+ .name = "cp2112-gpio",
+ .irq_startup = cp2112_gpio_irq_startup,
+ .irq_shutdown = cp2112_gpio_irq_shutdown,
+ .irq_ack = cp2112_gpio_irq_ack,
+ .irq_mask = cp2112_gpio_irq_mask,
+ .irq_unmask = cp2112_gpio_irq_unmask,
+ .irq_set_type = cp2112_gpio_irq_type,
+};
+
+static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
+ int pin)
+{
+ int ret;
+
+ if (dev->desc[pin])
+ return -EINVAL;
+
+ dev->desc[pin] = gpiochip_request_own_desc(&dev->gc, pin,
+ "HID/I2C:Event");
+ if (IS_ERR(dev->desc[pin])) {
+ dev_err(dev->gc.parent, "Failed to request GPIO\n");
+ return PTR_ERR(dev->desc[pin]);
+ }
+
+ ret = gpiochip_lock_as_irq(&dev->gc, pin);
+ if (ret) {
+ dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
+ goto err_desc;
+ }
+
+ ret = gpiod_to_irq(dev->desc[pin]);
+ if (ret < 0) {
+ dev_err(dev->gc.parent, "Failed to translate GPIO to IRQ\n");
+ goto err_lock;
+ }
+
+ return ret;
+
+err_lock:
+ gpiochip_unlock_as_irq(&dev->gc, pin);
+err_desc:
+ gpiochip_free_own_desc(dev->desc[pin]);
+ dev->desc[pin] = NULL;
+ return ret;
+}
+
static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct cp2112_device *dev;
@@ -1163,8 +1341,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
chmod_sysfs_attrs(hdev);
hid_hw_power(hdev, PM_HINT_NORMAL);
+ ret = gpiochip_irqchip_add(&dev->gc, &cp2112_gpio_irqchip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev->gc.parent, "failed to add IRQ chip\n");
+ goto err_sysfs_remove;
+ }
+
return ret;
+err_sysfs_remove:
+ sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group);
err_gpiochip_remove:
gpiochip_remove(&dev->gc);
err_free_i2c:
@@ -1181,10 +1368,22 @@ err_hid_stop:
static void cp2112_remove(struct hid_device *hdev)
{
struct cp2112_device *dev = hid_get_drvdata(hdev);
+ int i;
sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group);
- gpiochip_remove(&dev->gc);
i2c_del_adapter(&dev->adap);
+
+ if (dev->gpio_poll) {
+ dev->gpio_poll = false;
+ cancel_delayed_work_sync(&dev->gpio_poll_worker);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev->desc); i++) {
+ gpiochip_unlock_as_irq(&dev->gc, i);
+ gpiochip_free_own_desc(dev->desc[i]);
+ }
+
+ gpiochip_remove(&dev->gc);
/* i2c_del_adapter has finished removing all i2c devices from our
* adapter. Well behaved devices should no longer call our cp2112_xfer
* and should have waited for any pending calls to finish. It has also
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 575aa65436d1..ec277b96eaa1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -171,6 +171,7 @@
#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
#define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b
#define USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD 0x8585
+#define USB_DEVICE_ID_ASUSTEK_TOUCHPAD 0x0101
#define USB_VENDOR_ID_ATEN 0x0557
#define USB_DEVICE_ID_ATEN_UC100KM 0x2004
@@ -315,8 +316,10 @@
#define USB_VENDOR_ID_DMI 0x0c0b
#define USB_DEVICE_ID_DMI_ENC 0x5fab
-#define USB_VENDOR_ID_DRAGONRISE 0x0079
-#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
+#define USB_VENDOR_ID_DRAGONRISE 0x0079
+#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
+#define USB_DEVICE_ID_DRAGONRISE_PS3 0x1801
+#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE 0x1843
#define USB_VENDOR_ID_DWAV 0x0eef
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
@@ -718,8 +721,9 @@
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 0x07dc
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 0x07e2
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP 0x07dd
+#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4 0x07e4
+#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2 0x07e8
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP 0x07e9
-#define USB_DEVICE_ID_MS_TYPE_COVER_3 0x07de
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
#define USB_VENDOR_ID_MOJO 0x8282
@@ -903,6 +907,8 @@
#define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306
#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0
#define USB_DEVICE_ID_SONY_MOTION_CONTROLLER 0x03d5
#define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
#define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002
@@ -959,6 +965,9 @@
#define USB_VENDOR_ID_THINGM 0x27b8
#define USB_DEVICE_ID_BLINK1 0x01ed
+#define USB_VENDOR_ID_THQ 0x20d6
+#define USB_DEVICE_ID_THQ_PS3_UDRAW 0xcb17
+
#define USB_VENDOR_ID_THRUSTMASTER 0x044f
#define USB_VENDOR_ID_TIVO 0x150a
@@ -1034,6 +1043,10 @@
#define USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH 0x0500
#define USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET 0x0502
+#define USB_VENDOR_ID_WEIDA 0x2575
+#define USB_DEVICE_ID_WEIDA_8752 0xC300
+#define USB_DEVICE_ID_WEIDA_8755 0xC301
+
#define USB_VENDOR_ID_WISEGROUP 0x0925
#define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005
#define USB_DEVICE_ID_SUPER_JOY_BOX_3 0x8888
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index fb9ace1cef8b..d05f903c7614 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -253,6 +253,7 @@ __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
case ABS_RX:
case ABS_RY:
case ABS_RZ:
+ case ABS_WHEEL:
case ABS_TILT_X:
case ABS_TILT_Y:
if (field->unit == 0x14) { /* If degrees */
@@ -1468,6 +1469,31 @@ static void hidinput_cleanup_hidinput(struct hid_device *hid,
kfree(hidinput);
}
+static struct hid_input *hidinput_match(struct hid_report *report)
+{
+ struct hid_device *hid = report->device;
+ struct hid_input *hidinput;
+
+ list_for_each_entry(hidinput, &hid->inputs, list) {
+ if (hidinput->report &&
+ hidinput->report->id == report->id)
+ return hidinput;
+ }
+
+ return NULL;
+}
+
+static inline void hidinput_configure_usages(struct hid_input *hidinput,
+ struct hid_report *report)
+{
+ int i, j;
+
+ for (i = 0; i < report->maxfield; i++)
+ for (j = 0; j < report->field[i]->maxusage; j++)
+ hidinput_configure_usage(hidinput, report->field[i],
+ report->field[i]->usage + j);
+}
+
/*
* Register the input device; print a message.
* Configure the input layer interface
@@ -1478,8 +1504,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
{
struct hid_driver *drv = hid->driver;
struct hid_report *report;
- struct hid_input *hidinput = NULL;
- int i, j, k;
+ struct hid_input *next, *hidinput = NULL;
+ int i, k;
INIT_LIST_HEAD(&hid->inputs);
INIT_WORK(&hid->led_work, hidinput_led_worker);
@@ -1509,43 +1535,40 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
if (!report->maxfield)
continue;
+ /*
+ * Find the previous hidinput report attached
+ * to this report id.
+ */
+ if (hid->quirks & HID_QUIRK_MULTI_INPUT)
+ hidinput = hidinput_match(report);
+
if (!hidinput) {
hidinput = hidinput_allocate(hid);
if (!hidinput)
goto out_unwind;
}
- for (i = 0; i < report->maxfield; i++)
- for (j = 0; j < report->field[i]->maxusage; j++)
- hidinput_configure_usage(hidinput, report->field[i],
- report->field[i]->usage + j);
-
- if ((hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
- !hidinput_has_been_populated(hidinput))
- continue;
+ hidinput_configure_usages(hidinput, report);
- if (hid->quirks & HID_QUIRK_MULTI_INPUT) {
- /* This will leave hidinput NULL, so that it
- * allocates another one if we have more inputs on
- * the same interface. Some devices (e.g. Happ's
- * UGCI) cram a lot of unrelated inputs into the
- * same interface. */
+ if (hid->quirks & HID_QUIRK_MULTI_INPUT)
hidinput->report = report;
- if (drv->input_configured &&
- drv->input_configured(hid, hidinput))
- goto out_cleanup;
- if (input_register_device(hidinput->input))
- goto out_cleanup;
- hidinput = NULL;
- }
}
}
- if (hidinput && (hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
- !hidinput_has_been_populated(hidinput)) {
- /* no need to register an input device not populated */
- hidinput_cleanup_hidinput(hid, hidinput);
- hidinput = NULL;
+ list_for_each_entry_safe(hidinput, next, &hid->inputs, list) {
+ if ((hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
+ !hidinput_has_been_populated(hidinput)) {
+ /* no need to register an input device not populated */
+ hidinput_cleanup_hidinput(hid, hidinput);
+ continue;
+ }
+
+ if (drv->input_configured &&
+ drv->input_configured(hid, hidinput))
+ goto out_unwind;
+ if (input_register_device(hidinput->input))
+ goto out_unwind;
+ hidinput->registered = true;
}
if (list_empty(&hid->inputs)) {
@@ -1553,20 +1576,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
goto out_unwind;
}
- if (hidinput) {
- if (drv->input_configured &&
- drv->input_configured(hid, hidinput))
- goto out_cleanup;
- if (input_register_device(hidinput->input))
- goto out_cleanup;
- }
-
return 0;
-out_cleanup:
- list_del(&hidinput->list);
- input_free_device(hidinput->input);
- kfree(hidinput);
out_unwind:
/* unwind the ones we already registered */
hidinput_disconnect(hid);
@@ -1583,7 +1594,10 @@ void hidinput_disconnect(struct hid_device *hid)
list_for_each_entry_safe(hidinput, next, &hid->inputs, list) {
list_del(&hidinput->list);
- input_unregister_device(hidinput->input);
+ if (hidinput->registered)
+ input_unregister_device(hidinput->input);
+ else
+ input_free_device(hidinput->input);
kfree(hidinput);
}
diff --git a/drivers/hid/hid-mf.c b/drivers/hid/hid-mf.c
new file mode 100644
index 000000000000..d9090765a6e5
--- /dev/null
+++ b/drivers/hid/hid-mf.c
@@ -0,0 +1,166 @@
+/*
+ * Force feedback support for Mayflash game controller adapters.
+ *
+ * These devices are manufactured by Mayflash but identify themselves
+ * using the vendor ID of DragonRise Inc.
+ *
+ * Tested with:
+ * 0079:1801 "DragonRise Inc. Mayflash PS3 Game Controller Adapter"
+ *
+ * The following adapters probably work too, but need to be tested:
+ * 0079:1800 "DragonRise Inc. Mayflash WIIU Game Controller Adapter"
+ * 0079:1843 "DragonRise Inc. Mayflash GameCube Game Controller Adapter"
+ *
+ * Copyright (c) 2016 Marcel Hasler <mahasler@gmail.com>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+struct mf_device {
+ struct hid_report *report;
+};
+
+static int mf_play(struct input_dev *dev, void *data, struct ff_effect *effect)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct mf_device *mf = data;
+ int strong, weak;
+
+ strong = effect->u.rumble.strong_magnitude;
+ weak = effect->u.rumble.weak_magnitude;
+
+ dbg_hid("Called with 0x%04x 0x%04x.\n", strong, weak);
+
+ strong = strong * 0xff / 0xffff;
+ weak = weak * 0xff / 0xffff;
+
+ dbg_hid("Running with 0x%02x 0x%02x.\n", strong, weak);
+
+ mf->report->field[0]->value[0] = weak;
+ mf->report->field[0]->value[1] = strong;
+ hid_hw_request(hid, mf->report, HID_REQ_SET_REPORT);
+
+ return 0;
+}
+
+static int mf_init(struct hid_device *hid)
+{
+ struct mf_device *mf;
+
+ struct list_head *report_list =
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+
+ struct list_head *report_ptr;
+ struct hid_report *report;
+
+ struct list_head *input_ptr = &hid->inputs;
+ struct hid_input *input;
+
+ struct input_dev *dev;
+
+ int error;
+
+ /* Setup each of the four inputs */
+ list_for_each(report_ptr, report_list) {
+ report = list_entry(report_ptr, struct hid_report, list);
+
+ if (report->maxfield < 1 || report->field[0]->report_count < 2) {
+ hid_err(hid, "Invalid report, this should never happen!\n");
+ return -ENODEV;
+ }
+
+ if (list_is_last(input_ptr, &hid->inputs)) {
+ hid_err(hid, "Missing input, this should never happen!\n");
+ return -ENODEV;
+ }
+
+ input_ptr = input_ptr->next;
+ input = list_entry(input_ptr, struct hid_input, list);
+
+ mf = kzalloc(sizeof(struct mf_device), GFP_KERNEL);
+ if (!mf)
+ return -ENOMEM;
+
+ dev = input->input;
+ set_bit(FF_RUMBLE, dev->ffbit);
+
+ error = input_ff_create_memless(dev, mf, mf_play);
+ if (error) {
+ kfree(mf);
+ return error;
+ }
+
+ mf->report = report;
+ mf->report->field[0]->value[0] = 0x00;
+ mf->report->field[0]->value[1] = 0x00;
+ hid_hw_request(hid, mf->report, HID_REQ_SET_REPORT);
+ }
+
+ hid_info(hid, "Force feedback for HJZ Mayflash game controller "
+ "adapters by Marcel Hasler <mahasler@gmail.com>\n");
+
+ return 0;
+}
+
+static int mf_probe(struct hid_device *hid, const struct hid_device_id *id)
+{
+ int error;
+
+ dev_dbg(&hid->dev, "Mayflash HID hardware probe...\n");
+
+ /* Split device into four inputs */
+ hid->quirks |= HID_QUIRK_MULTI_INPUT;
+
+ error = hid_parse(hid);
+ if (error) {
+ hid_err(hid, "HID parse failed.\n");
+ return error;
+ }
+
+ error = hid_hw_start(hid, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+ if (error) {
+ hid_err(hid, "HID hw start failed\n");
+ return error;
+ }
+
+ error = mf_init(hid);
+ if (error) {
+ hid_err(hid, "Force feedback init failed.\n");
+ hid_hw_stop(hid);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct hid_device_id mf_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, mf_devices);
+
+static struct hid_driver mf_driver = {
+ .name = "hid_mf",
+ .id_table = mf_devices,
+ .probe = mf_probe,
+};
+module_hid_driver(mf_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index c6cd392e9f99..74b7b84a0420 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -280,9 +280,11 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP),
.driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4),
+ .driver_data = MS_HIDINPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2),
.driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
.driver_data = MS_HIDINPUT },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index fb6f1f447279..6dca66806844 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -108,6 +108,7 @@ struct mt_device {
int cc_value_index; /* contact count value index in the field */
unsigned last_slot_field; /* the last field of a slot */
unsigned mt_report_id; /* the report ID of the multitouch device */
+ unsigned long initial_quirks; /* initial quirks state */
__s16 inputmode; /* InputMode HID feature, -1 if non-existent */
__s16 inputmode_index; /* InputMode HID feature index in the report */
__s16 maxcontact_report_id; /* Maximum Contact Number HID feature,
@@ -318,13 +319,10 @@ static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
u8 *buf;
/*
- * Only fetch the feature report if initial reports are not already
- * been retrieved. Currently this is only done for Windows 8 touch
- * devices.
+ * Do not fetch the feature report if the device has been explicitly
+ * marked as non-capable.
*/
- if (!(hdev->quirks & HID_QUIRK_NO_INIT_REPORTS))
- return;
- if (td->mtclass.name != MT_CLS_WIN_8)
+ if (td->initial_quirks & HID_QUIRK_NO_INIT_REPORTS)
return;
buf = hid_alloc_report_buf(report, GFP_KERNEL);
@@ -567,6 +565,14 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case HID_UP_BUTTON:
code = BTN_MOUSE + ((usage->hid - 1) & HID_USAGE);
+ /*
+ * MS PTP spec says that external buttons left and right have
+ * usages 2 and 3.
+ */
+ if (cls->name == MT_CLS_WIN_8 &&
+ field->application == HID_DG_TOUCHPAD &&
+ (usage->hid & HID_USAGE) > 1)
+ code--;
hid_map_usage(hi, usage, bit, max, EV_KEY, code);
input_set_capability(hi->input, EV_KEY, code);
return 1;
@@ -842,7 +848,9 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
if (!td->mtclass.export_all_inputs &&
field->application != HID_DG_TOUCHSCREEN &&
field->application != HID_DG_PEN &&
- field->application != HID_DG_TOUCHPAD)
+ field->application != HID_DG_TOUCHPAD &&
+ field->application != HID_GD_KEYBOARD &&
+ field->application != HID_CP_CONSUMER_CONTROL)
return -1;
/*
@@ -1083,36 +1091,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
}
- /* This allows the driver to correctly support devices
- * that emit events over several HID messages.
- */
- hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
-
- /*
- * This allows the driver to handle different input sensors
- * that emits events through different reports on the same HID
- * device.
- */
- hdev->quirks |= HID_QUIRK_MULTI_INPUT;
- hdev->quirks |= HID_QUIRK_NO_EMPTY_INPUT;
-
- /*
- * Handle special quirks for Windows 8 certified devices.
- */
- if (id->group == HID_GROUP_MULTITOUCH_WIN_8)
- /*
- * Some multitouch screens do not like to be polled for input
- * reports. Fortunately, the Win8 spec says that all touches
- * should be sent during each report, making the initialization
- * of input reports unnecessary.
- *
- * In addition some touchpads do not behave well if we read
- * all feature reports from them. Instead we prevent
- * initial report fetching and then selectively fetch each
- * report we are interested in.
- */
- hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
-
td = devm_kzalloc(&hdev->dev, sizeof(struct mt_device), GFP_KERNEL);
if (!td) {
dev_err(&hdev->dev, "cannot allocate multitouch data\n");
@@ -1136,6 +1114,39 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (id->vendor == HID_ANY_ID && id->product == HID_ANY_ID)
td->serial_maybe = true;
+ /*
+ * Store the initial quirk state
+ */
+ td->initial_quirks = hdev->quirks;
+
+ /* This allows the driver to correctly support devices
+ * that emit events over several HID messages.
+ */
+ hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
+
+ /*
+ * This allows the driver to handle different input sensors
+ * that emits events through different reports on the same HID
+ * device.
+ */
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ hdev->quirks |= HID_QUIRK_NO_EMPTY_INPUT;
+
+ /*
+ * Some multitouch screens do not like to be polled for input
+ * reports. Fortunately, the Win8 spec says that all touches
+ * should be sent during each report, making the initialization
+ * of input reports unnecessary. For Win7 devices, well, let's hope
+ * they will still be happy (this is only be a problem if a touch
+ * was already there while probing the device).
+ *
+ * In addition some touchpads do not behave well if we read
+ * all feature reports from them. Instead we prevent
+ * initial report fetching and then selectively fetch each
+ * report we are interested in.
+ */
+ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
ret = hid_parse(hdev);
if (ret != 0)
return ret;
@@ -1204,8 +1215,11 @@ static int mt_resume(struct hid_device *hdev)
static void mt_remove(struct hid_device *hdev)
{
+ struct mt_device *td = hid_get_drvdata(hdev);
+
sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
hid_hw_stop(hdev);
+ hdev->quirks = td->initial_quirks;
}
/*
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 60875625cbdf..5c925228847c 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -795,6 +795,12 @@ static const struct hid_device_id sensor_hub_devices[] = {
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROSOFT,
USB_DEVICE_ID_MS_TYPE_COVER_2),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROSOFT,
+ 0x07bd), /* Microsoft Surface 3 */
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROCHIP,
+ 0x0f01), /* MM7150 */
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
USB_DEVICE_ID_STM_HID_SENSOR),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index b0bb99a821bd..7687c0875395 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -36,6 +36,8 @@
#include <linux/list.h>
#include <linux/idr.h>
#include <linux/input/mt.h>
+#include <linux/crc32.h>
+#include <asm/unaligned.h>
#include "hid-ids.h"
@@ -374,7 +376,7 @@ static u8 dualshock4_usb_rdesc[] = {
0x65, 0x00, /* Unit, */
0x05, 0x09, /* Usage Page (Button), */
0x19, 0x01, /* Usage Minimum (01h), */
- 0x29, 0x0E, /* Usage Maximum (0Eh), */
+ 0x29, 0x0D, /* Usage Maximum (0Dh), */
0x15, 0x00, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x75, 0x01, /* Report Size (1), */
@@ -403,14 +405,14 @@ static u8 dualshock4_usb_rdesc[] = {
0x19, 0x40, /* Usage Minimum (40h), */
0x29, 0x42, /* Usage Maximum (42h), */
0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
- 0x26, 0x00, 0x7F, /* Logical Maximum (32767), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x75, 0x10, /* Report Size (16), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x19, 0x43, /* Usage Minimum (43h), */
0x29, 0x45, /* Usage Maximum (45h), */
- 0x16, 0x00, 0xE0, /* Logical Minimum (-8192), */
- 0x26, 0xFF, 0x1F, /* Logical Maximum (8191), */
+ 0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
@@ -687,7 +689,7 @@ static u8 dualshock4_bt_rdesc[] = {
0x81, 0x42, /* Input (Variable, Null State), */
0x05, 0x09, /* Usage Page (Button), */
0x19, 0x01, /* Usage Minimum (01h), */
- 0x29, 0x0E, /* Usage Maximum (0Eh), */
+ 0x29, 0x0D, /* Usage Maximum (0Dh), */
0x15, 0x00, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x75, 0x01, /* Report Size (1), */
@@ -712,14 +714,14 @@ static u8 dualshock4_bt_rdesc[] = {
0x19, 0x40, /* Usage Minimum (40h), */
0x29, 0x42, /* Usage Maximum (42h), */
0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
- 0x26, 0x00, 0x7F, /* Logical Maximum (32767), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x75, 0x10, /* Report Size (16), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x19, 0x43, /* Usage Minimum (43h), */
0x29, 0x45, /* Usage Maximum (45h), */
- 0x16, 0x00, 0xE0, /* Logical Minimum (-8192), */
- 0x26, 0xFF, 0x1F, /* Logical Maximum (8191), */
+ 0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
@@ -975,6 +977,32 @@ static const unsigned int buzz_keymap[] = {
[20] = BTN_TRIGGER_HAPPY20,
};
+static const unsigned int ds4_absmap[] = {
+ [0x30] = ABS_X,
+ [0x31] = ABS_Y,
+ [0x32] = ABS_RX, /* right stick X */
+ [0x33] = ABS_Z, /* L2 */
+ [0x34] = ABS_RZ, /* R2 */
+ [0x35] = ABS_RY, /* right stick Y */
+};
+
+static const unsigned int ds4_keymap[] = {
+ [0x1] = BTN_WEST, /* Square */
+ [0x2] = BTN_SOUTH, /* Cross */
+ [0x3] = BTN_EAST, /* Circle */
+ [0x4] = BTN_NORTH, /* Triangle */
+ [0x5] = BTN_TL, /* L1 */
+ [0x6] = BTN_TR, /* R1 */
+ [0x7] = BTN_TL2, /* L2 */
+ [0x8] = BTN_TR2, /* R2 */
+ [0x9] = BTN_SELECT, /* Share */
+ [0xa] = BTN_START, /* Options */
+ [0xb] = BTN_THUMBL, /* L3 */
+ [0xc] = BTN_THUMBR, /* R3 */
+ [0xd] = BTN_MODE, /* PS */
+};
+
+
static enum power_supply_property sony_battery_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_CAPACITY,
@@ -1019,14 +1047,24 @@ struct motion_output_report_02 {
u8 rumble;
};
-#define DS4_REPORT_0x02_SIZE 37
-#define DS4_REPORT_0x05_SIZE 32
-#define DS4_REPORT_0x11_SIZE 78
-#define DS4_REPORT_0x81_SIZE 7
+#define DS4_FEATURE_REPORT_0x02_SIZE 37
+#define DS4_FEATURE_REPORT_0x81_SIZE 7
+#define DS4_INPUT_REPORT_0x11_SIZE 78
+#define DS4_OUTPUT_REPORT_0x05_SIZE 32
+#define DS4_OUTPUT_REPORT_0x11_SIZE 78
#define SIXAXIS_REPORT_0xF2_SIZE 17
#define SIXAXIS_REPORT_0xF5_SIZE 8
#define MOTION_REPORT_0x02_SIZE 49
+/* Offsets relative to USB input report (0x1). Bluetooth (0x11) requires an
+ * additional +2.
+ */
+#define DS4_INPUT_REPORT_BUTTON_OFFSET 5
+#define DS4_INPUT_REPORT_BATTERY_OFFSET 30
+#define DS4_INPUT_REPORT_TOUCHPAD_OFFSET 33
+
+#define DS4_TOUCHPAD_SUFFIX " Touchpad"
+
static DEFINE_SPINLOCK(sony_dev_list_lock);
static LIST_HEAD(sony_device_list);
static DEFINE_IDA(sony_device_id_allocator);
@@ -1035,6 +1073,7 @@ struct sony_sc {
spinlock_t lock;
struct list_head list_node;
struct hid_device *hdev;
+ struct input_dev *touchpad;
struct led_classdev *leds[MAX_LEDS];
unsigned long quirks;
struct work_struct state_worker;
@@ -1130,6 +1169,37 @@ static int ps3remote_mapping(struct hid_device *hdev, struct hid_input *hi,
return 1;
}
+static int ds4_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) {
+ unsigned int key = usage->hid & HID_USAGE;
+
+ if (key >= ARRAY_SIZE(ds4_keymap))
+ return -1;
+
+ key = ds4_keymap[key];
+ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, key);
+ return 1;
+ } else if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK) {
+ unsigned int abs = usage->hid & HID_USAGE;
+
+ /* Let the HID parser deal with the HAT. */
+ if (usage->hid == HID_GD_HATSWITCH)
+ return 0;
+
+ if (abs >= ARRAY_SIZE(ds4_absmap))
+ return -1;
+
+ abs = ds4_absmap[abs];
+ hid_map_usage_clear(hi, usage, bit, max, EV_ABS, abs);
+ return 1;
+ }
+
+ return 0;
+}
+
static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
{
@@ -1219,23 +1289,22 @@ static void sixaxis_parse_report(struct sony_sc *sc, u8 *rd, int size)
static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
{
- struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
- struct hid_input, list);
- struct input_dev *input_dev = hidinput->input;
unsigned long flags;
- int n, offset;
+ int n, m, offset, num_touch_data, max_touch_data;
u8 cable_state, battery_capacity, battery_charging;
- /*
- * Battery and touchpad data starts at byte 30 in the USB report and
- * 32 in Bluetooth report.
- */
- offset = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 30 : 32;
+ /* When using Bluetooth the header is 2 bytes longer, so skip these. */
+ int data_offset = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 0 : 2;
+
+ /* Second bit of third button byte is for the touchpad button. */
+ offset = data_offset + DS4_INPUT_REPORT_BUTTON_OFFSET;
+ input_report_key(sc->touchpad, BTN_LEFT, rd[offset+2] & 0x2);
/*
- * The lower 4 bits of byte 30 contain the battery level
+ * The lower 4 bits of byte 30 (or 32 for BT) contain the battery level
* and the 5th bit contains the USB cable state.
*/
+ offset = data_offset + DS4_INPUT_REPORT_BATTERY_OFFSET;
cable_state = (rd[offset] >> 4) & 0x01;
battery_capacity = rd[offset] & 0x0F;
@@ -1262,30 +1331,52 @@ static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
sc->battery_charging = battery_charging;
spin_unlock_irqrestore(&sc->lock, flags);
- offset += 5;
-
/*
- * The Dualshock 4 multi-touch trackpad data starts at offset 35 on USB
- * and 37 on Bluetooth.
- * The first 7 bits of the first byte is a counter and bit 8 is a touch
- * indicator that is 0 when pressed and 1 when not pressed.
- * The next 3 bytes are two 12 bit touch coordinates, X and Y.
- * The data for the second touch is in the same format and immediatly
- * follows the data for the first.
+ * The Dualshock 4 multi-touch trackpad data starts at offset 33 on USB
+ * and 35 on Bluetooth.
+ * The first byte indicates the number of touch data in the report.
+ * Trackpad data starts 2 bytes later (e.g. 35 for USB).
*/
- for (n = 0; n < 2; n++) {
- u16 x, y;
+ offset = data_offset + DS4_INPUT_REPORT_TOUCHPAD_OFFSET;
+ max_touch_data = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 3 : 4;
+ if (rd[offset] > 0 && rd[offset] <= max_touch_data)
+ num_touch_data = rd[offset];
+ else
+ num_touch_data = 1;
+ offset += 1;
+
+ for (m = 0; m < num_touch_data; m++) {
+ /* Skip past timestamp */
+ offset += 1;
+
+ /*
+ * The first 7 bits of the first byte is a counter and bit 8 is
+ * a touch indicator that is 0 when pressed and 1 when not
+ * pressed.
+ * The next 3 bytes are two 12 bit touch coordinates, X and Y.
+ * The data for the second touch is in the same format and
+ * immediately follows the data for the first.
+ */
+ for (n = 0; n < 2; n++) {
+ u16 x, y;
+ bool active;
- x = rd[offset+1] | ((rd[offset+2] & 0xF) << 8);
- y = ((rd[offset+2] & 0xF0) >> 4) | (rd[offset+3] << 4);
+ x = rd[offset+1] | ((rd[offset+2] & 0xF) << 8);
+ y = ((rd[offset+2] & 0xF0) >> 4) | (rd[offset+3] << 4);
- input_mt_slot(input_dev, n);
- input_mt_report_slot_state(input_dev, MT_TOOL_FINGER,
- !(rd[offset] >> 7));
- input_report_abs(input_dev, ABS_MT_POSITION_X, x);
- input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+ active = !(rd[offset] >> 7);
+ input_mt_slot(sc->touchpad, n);
+ input_mt_report_slot_state(sc->touchpad, MT_TOOL_FINGER, active);
- offset += 4;
+ if (active) {
+ input_report_abs(sc->touchpad, ABS_MT_POSITION_X, x);
+ input_report_abs(sc->touchpad, ABS_MT_POSITION_Y, y);
+ }
+
+ offset += 4;
+ }
+ input_mt_sync_frame(sc->touchpad);
+ input_sync(sc->touchpad);
}
}
@@ -1324,6 +1415,21 @@ static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
} else if (((sc->quirks & DUALSHOCK4_CONTROLLER_USB) && rd[0] == 0x01 &&
size == 64) || ((sc->quirks & DUALSHOCK4_CONTROLLER_BT)
&& rd[0] == 0x11 && size == 78)) {
+ if (sc->quirks & DUALSHOCK4_CONTROLLER_BT) {
+ /* CRC check */
+ u8 bthdr = 0xA1;
+ u32 crc;
+ u32 report_crc;
+
+ crc = crc32_le(0xFFFFFFFF, &bthdr, 1);
+ crc = ~crc32_le(crc, rd, DS4_INPUT_REPORT_0x11_SIZE-4);
+ report_crc = get_unaligned_le32(&rd[DS4_INPUT_REPORT_0x11_SIZE-4]);
+ if (crc != report_crc) {
+ hid_dbg(sc->hdev, "DualShock 4 input report's CRC check failed, received crc 0x%0x != 0x%0x\n",
+ report_crc, crc);
+ return -EILSEQ;
+ }
+ }
dualshock4_parse_report(sc, rd, size);
}
@@ -1367,47 +1473,84 @@ static int sony_mapping(struct hid_device *hdev, struct hid_input *hi,
if (sc->quirks & PS3REMOTE)
return ps3remote_mapping(hdev, hi, field, usage, bit, max);
+
+ if (sc->quirks & DUALSHOCK4_CONTROLLER)
+ return ds4_mapping(hdev, hi, field, usage, bit, max);
+
/* Let hid-core decide for the others */
return 0;
}
-static int sony_register_touchpad(struct hid_input *hi, int touch_count,
+static int sony_register_touchpad(struct sony_sc *sc, int touch_count,
int w, int h)
{
- struct input_dev *input_dev = hi->input;
+ size_t name_sz;
+ char *name;
int ret;
- ret = input_mt_init_slots(input_dev, touch_count, 0);
+ sc->touchpad = input_allocate_device();
+ if (!sc->touchpad)
+ return -ENOMEM;
+
+ input_set_drvdata(sc->touchpad, sc);
+ sc->touchpad->dev.parent = &sc->hdev->dev;
+ sc->touchpad->phys = sc->hdev->phys;
+ sc->touchpad->uniq = sc->hdev->uniq;
+ sc->touchpad->id.bustype = sc->hdev->bus;
+ sc->touchpad->id.vendor = sc->hdev->vendor;
+ sc->touchpad->id.product = sc->hdev->product;
+ sc->touchpad->id.version = sc->hdev->version;
+
+ /* Append a suffix to the controller name as there are various
+ * DS4 compatible non-Sony devices with different names.
+ */
+ name_sz = strlen(sc->hdev->name) + sizeof(DS4_TOUCHPAD_SUFFIX);
+ name = kzalloc(name_sz, GFP_KERNEL);
+ if (!name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ snprintf(name, name_sz, "%s" DS4_TOUCHPAD_SUFFIX, sc->hdev->name);
+ sc->touchpad->name = name;
+
+ ret = input_mt_init_slots(sc->touchpad, touch_count, 0);
if (ret < 0)
- return ret;
+ goto err;
+
+ /* We map the button underneath the touchpad to BTN_LEFT. */
+ __set_bit(EV_KEY, sc->touchpad->evbit);
+ __set_bit(BTN_LEFT, sc->touchpad->keybit);
+ __set_bit(INPUT_PROP_BUTTONPAD, sc->touchpad->propbit);
- input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, w, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, h, 0, 0);
+ input_set_abs_params(sc->touchpad, ABS_MT_POSITION_X, 0, w, 0, 0);
+ input_set_abs_params(sc->touchpad, ABS_MT_POSITION_Y, 0, h, 0, 0);
+
+ ret = input_register_device(sc->touchpad);
+ if (ret < 0)
+ goto err;
return 0;
+
+err:
+ kfree(sc->touchpad->name);
+ sc->touchpad->name = NULL;
+
+ input_free_device(sc->touchpad);
+ sc->touchpad = NULL;
+
+ return ret;
}
-static int sony_input_configured(struct hid_device *hdev,
- struct hid_input *hidinput)
+static void sony_unregister_touchpad(struct sony_sc *sc)
{
- struct sony_sc *sc = hid_get_drvdata(hdev);
- int ret;
+ if (!sc->touchpad)
+ return;
- /*
- * The Dualshock 4 touchpad supports 2 touches and has a
- * resolution of 1920x942 (44.86 dots/mm).
- */
- if (sc->quirks & DUALSHOCK4_CONTROLLER) {
- ret = sony_register_touchpad(hidinput, 2, 1920, 942);
- if (ret) {
- hid_err(sc->hdev,
- "Unable to initialize multi-touch slots: %d\n",
- ret);
- return ret;
- }
- }
+ kfree(sc->touchpad->name);
+ sc->touchpad->name = NULL;
- return 0;
+ input_unregister_device(sc->touchpad);
+ sc->touchpad = NULL;
}
/*
@@ -1483,11 +1626,11 @@ static int dualshock4_set_operational_bt(struct hid_device *hdev)
u8 *buf;
int ret;
- buf = kmalloc(DS4_REPORT_0x02_SIZE, GFP_KERNEL);
+ buf = kmalloc(DS4_FEATURE_REPORT_0x02_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = hid_hw_raw_request(hdev, 0x02, buf, DS4_REPORT_0x02_SIZE,
+ ret = hid_hw_raw_request(hdev, 0x02, buf, DS4_FEATURE_REPORT_0x02_SIZE,
HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
kfree(buf);
@@ -1892,14 +2035,14 @@ static void dualshock4_send_output_report(struct sony_sc *sc)
* 0xD0 - 66hz
*/
if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
- memset(buf, 0, DS4_REPORT_0x05_SIZE);
+ memset(buf, 0, DS4_OUTPUT_REPORT_0x05_SIZE);
buf[0] = 0x05;
buf[1] = 0xFF;
offset = 4;
} else {
- memset(buf, 0, DS4_REPORT_0x11_SIZE);
+ memset(buf, 0, DS4_OUTPUT_REPORT_0x11_SIZE);
buf[0] = 0x11;
- buf[1] = 0x80;
+ buf[1] = 0xC0; /* HID + CRC */
buf[3] = 0x0F;
offset = 6;
}
@@ -1925,10 +2068,17 @@ static void dualshock4_send_output_report(struct sony_sc *sc)
buf[offset++] = sc->led_delay_off[3];
if (sc->quirks & DUALSHOCK4_CONTROLLER_USB)
- hid_hw_output_report(hdev, buf, DS4_REPORT_0x05_SIZE);
- else
- hid_hw_raw_request(hdev, 0x11, buf, DS4_REPORT_0x11_SIZE,
- HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+ hid_hw_output_report(hdev, buf, DS4_OUTPUT_REPORT_0x05_SIZE);
+ else {
+ /* CRC generation */
+ u8 bthdr = 0xA2;
+ u32 crc;
+
+ crc = crc32_le(0xFFFFFFFF, &bthdr, 1);
+ crc = ~crc32_le(crc, buf, DS4_OUTPUT_REPORT_0x11_SIZE-4);
+ put_unaligned_le32(crc, &buf[74]);
+ hid_hw_output_report(hdev, buf, DS4_OUTPUT_REPORT_0x11_SIZE);
+ }
}
static void motion_send_output_report(struct sony_sc *sc)
@@ -1972,10 +2122,10 @@ static int sony_allocate_output_report(struct sony_sc *sc)
kmalloc(sizeof(union sixaxis_output_report_01),
GFP_KERNEL);
else if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
- sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x11_SIZE,
+ sc->output_report_dmabuf = kmalloc(DS4_OUTPUT_REPORT_0x11_SIZE,
GFP_KERNEL);
else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB)
- sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x05_SIZE,
+ sc->output_report_dmabuf = kmalloc(DS4_OUTPUT_REPORT_0x05_SIZE,
GFP_KERNEL);
else if (sc->quirks & MOTION_CONTROLLER)
sc->output_report_dmabuf = kmalloc(MOTION_REPORT_0x02_SIZE,
@@ -2220,7 +2370,7 @@ static int sony_check_add(struct sony_sc *sc)
return 0;
}
} else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
- buf = kmalloc(DS4_REPORT_0x81_SIZE, GFP_KERNEL);
+ buf = kmalloc(DS4_FEATURE_REPORT_0x81_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -2230,10 +2380,10 @@ static int sony_check_add(struct sony_sc *sc)
* offset 1.
*/
ret = hid_hw_raw_request(sc->hdev, 0x81, buf,
- DS4_REPORT_0x81_SIZE, HID_FEATURE_REPORT,
+ DS4_FEATURE_REPORT_0x81_SIZE, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
- if (ret != DS4_REPORT_0x81_SIZE) {
+ if (ret != DS4_FEATURE_REPORT_0x81_SIZE) {
hid_err(sc->hdev, "failed to retrieve feature report 0x81 with the DualShock 4 MAC address\n");
ret = ret < 0 ? ret : -EINVAL;
goto out_free;
@@ -2329,45 +2479,12 @@ static inline void sony_cancel_work_sync(struct sony_sc *sc)
cancel_work_sync(&sc->state_worker);
}
-static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+static int sony_input_configured(struct hid_device *hdev,
+ struct hid_input *hidinput)
{
- int ret;
+ struct sony_sc *sc = hid_get_drvdata(hdev);
int append_dev_id;
- unsigned long quirks = id->driver_data;
- struct sony_sc *sc;
- unsigned int connect_mask = HID_CONNECT_DEFAULT;
-
- if (!strcmp(hdev->name, "FutureMax Dance Mat"))
- quirks |= FUTUREMAX_DANCE_MAT;
-
- sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL);
- if (sc == NULL) {
- hid_err(hdev, "can't alloc sony descriptor\n");
- return -ENOMEM;
- }
-
- spin_lock_init(&sc->lock);
-
- sc->quirks = quirks;
- hid_set_drvdata(hdev, sc);
- sc->hdev = hdev;
-
- ret = hid_parse(hdev);
- if (ret) {
- hid_err(hdev, "parse failed\n");
- return ret;
- }
-
- if (sc->quirks & VAIO_RDESC_CONSTANT)
- connect_mask |= HID_CONNECT_HIDDEV_FORCE;
- else if (sc->quirks & SIXAXIS_CONTROLLER)
- connect_mask |= HID_CONNECT_HIDDEV_FORCE;
-
- ret = hid_hw_start(hdev, connect_mask);
- if (ret) {
- hid_err(hdev, "hw start failed\n");
- return ret;
- }
+ int ret;
ret = sony_set_device_id(sc);
if (ret < 0) {
@@ -2415,11 +2532,6 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
sony_init_output_report(sc, sixaxis_send_output_report);
} else if (sc->quirks & DUALSHOCK4_CONTROLLER) {
if (sc->quirks & DUALSHOCK4_CONTROLLER_BT) {
- /*
- * The DualShock 4 wants output reports sent on the ctrl
- * endpoint when connected via Bluetooth.
- */
- hdev->quirks |= HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP;
ret = dualshock4_set_operational_bt(hdev);
if (ret < 0) {
hid_err(hdev, "failed to set the Dualshock 4 operational mode\n");
@@ -2427,6 +2539,18 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
}
+ /*
+ * The Dualshock 4 touchpad supports 2 touches and has a
+ * resolution of 1920x942 (44.86 dots/mm).
+ */
+ ret = sony_register_touchpad(sc, 2, 1920, 942);
+ if (ret) {
+ hid_err(sc->hdev,
+ "Unable to initialize multi-touch slots: %d\n",
+ ret);
+ return ret;
+ }
+
sony_init_output_report(sc, dualshock4_send_output_report);
} else if (sc->quirks & MOTION_CONTROLLER) {
sony_init_output_report(sc, motion_send_output_report);
@@ -2482,17 +2606,84 @@ err_stop:
return ret;
}
+static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ unsigned long quirks = id->driver_data;
+ struct sony_sc *sc;
+ unsigned int connect_mask = HID_CONNECT_DEFAULT;
+
+ if (!strcmp(hdev->name, "FutureMax Dance Mat"))
+ quirks |= FUTUREMAX_DANCE_MAT;
+
+ sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL);
+ if (sc == NULL) {
+ hid_err(hdev, "can't alloc sony descriptor\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&sc->lock);
+
+ sc->quirks = quirks;
+ hid_set_drvdata(hdev, sc);
+ sc->hdev = hdev;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ return ret;
+ }
+
+ if (sc->quirks & VAIO_RDESC_CONSTANT)
+ connect_mask |= HID_CONNECT_HIDDEV_FORCE;
+ else if (sc->quirks & SIXAXIS_CONTROLLER)
+ connect_mask |= HID_CONNECT_HIDDEV_FORCE;
+
+ /* Patch the hw version on DS4 compatible devices, so applications can
+ * distinguish between the default HID mappings and the mappings defined
+ * by the Linux game controller spec. This is important for the SDL2
+ * library, which has a game controller database, which uses device ids
+ * in combination with version as a key.
+ */
+ if (sc->quirks & DUALSHOCK4_CONTROLLER)
+ hdev->version |= 0x8000;
+
+ ret = hid_hw_start(hdev, connect_mask);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ return ret;
+ }
+
+ /* sony_input_configured can fail, but this doesn't result
+ * in hid_hw_start failures (intended). Check whether
+ * the HID layer claimed the device else fail.
+ * We don't know the actual reason for the failure, most
+ * likely it is due to EEXIST in case of double connection
+ * of USB and Bluetooth, but could have been due to ENOMEM
+ * or other reasons as well.
+ */
+ if (!(hdev->claimed & HID_CLAIMED_INPUT)) {
+ hid_err(hdev, "failed to claim input\n");
+ return -ENODEV;
+ }
+
+ return ret;
+}
+
static void sony_remove(struct hid_device *hdev)
{
struct sony_sc *sc = hid_get_drvdata(hdev);
+ hid_hw_close(hdev);
+
if (sc->quirks & SONY_LED_SUPPORT)
sony_leds_remove(sc);
- if (sc->quirks & SONY_BATTERY_SUPPORT) {
- hid_hw_close(hdev);
+ if (sc->quirks & SONY_BATTERY_SUPPORT)
sony_battery_remove(sc);
- }
+
+ if (sc->touchpad)
+ sony_unregister_touchpad(sc);
sony_cancel_work_sync(sc);
@@ -2596,6 +2787,12 @@ static const struct hid_device_id sony_devices[] = {
.driver_data = DUALSHOCK4_CONTROLLER_USB },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
.driver_data = DUALSHOCK4_CONTROLLER_BT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ .driver_data = DUALSHOCK4_CONTROLLER_USB },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ .driver_data = DUALSHOCK4_CONTROLLER_BT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
+ .driver_data = DUALSHOCK4_CONTROLLER_USB },
/* Nyko Core Controller for PS3 */
{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER),
.driver_data = SIXAXIS_CONTROLLER_USB | SINO_LITE_CONTROLLER },
diff --git a/drivers/hid/hid-udraw-ps3.c b/drivers/hid/hid-udraw-ps3.c
new file mode 100644
index 000000000000..88ea390c10ad
--- /dev/null
+++ b/drivers/hid/hid-udraw-ps3.c
@@ -0,0 +1,474 @@
+/*
+ * HID driver for THQ PS3 uDraw tablet
+ *
+ * Copyright (C) 2016 Red Hat Inc. All Rights Reserved
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include "hid-ids.h"
+
+MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
+MODULE_DESCRIPTION("PS3 uDraw tablet driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * Protocol information from:
+ * http://brandonw.net/udraw/
+ * and the source code of:
+ * https://vvvv.org/contribution/udraw-hid
+ */
+
+/*
+ * The device is setup with multiple input devices:
+ * - the touch area which works as a touchpad
+ * - the tablet area which works as a touchpad/drawing tablet
+ * - a joypad with a d-pad, and 7 buttons
+ * - an accelerometer device
+ */
+
+enum {
+ TOUCH_NONE,
+ TOUCH_PEN,
+ TOUCH_FINGER,
+ TOUCH_TWOFINGER
+};
+
+enum {
+ AXIS_X,
+ AXIS_Y,
+ AXIS_Z
+};
+
+/*
+ * Accelerometer min/max values
+ * in order, X, Y and Z
+ */
+static struct {
+ int min;
+ int max;
+} accel_limits[] = {
+ [AXIS_X] = { 490, 534 },
+ [AXIS_Y] = { 490, 534 },
+ [AXIS_Z] = { 492, 536 }
+};
+
+#define DEVICE_NAME "THQ uDraw Game Tablet for PS3"
+/* resolution in pixels */
+#define RES_X 1920
+#define RES_Y 1080
+/* size in mm */
+#define WIDTH 160
+#define HEIGHT 90
+#define PRESSURE_OFFSET 113
+#define MAX_PRESSURE (255 - PRESSURE_OFFSET)
+
+struct udraw {
+ struct input_dev *joy_input_dev;
+ struct input_dev *touch_input_dev;
+ struct input_dev *pen_input_dev;
+ struct input_dev *accel_input_dev;
+ struct hid_device *hdev;
+
+ /*
+ * The device's two-finger support is pretty unreliable, as
+ * the device could report a single touch when the two fingers
+ * are too close together, and the distance between fingers, even
+ * though reported is not in the same unit as the touches.
+ *
+ * We'll make do without it, and try to report the first touch
+ * as reliably as possible.
+ */
+ int last_one_finger_x;
+ int last_one_finger_y;
+ int last_two_finger_x;
+ int last_two_finger_y;
+};
+
+static int clamp_accel(int axis, int offset)
+{
+ axis = clamp(axis,
+ accel_limits[offset].min,
+ accel_limits[offset].max);
+ axis = (axis - accel_limits[offset].min) /
+ ((accel_limits[offset].max -
+ accel_limits[offset].min) * 0xFF);
+ return axis;
+}
+
+static int udraw_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int len)
+{
+ struct udraw *udraw = hid_get_drvdata(hdev);
+ int touch;
+ int x, y, z;
+
+ if (len != 27)
+ return 0;
+
+ if (data[11] == 0x00)
+ touch = TOUCH_NONE;
+ else if (data[11] == 0x40)
+ touch = TOUCH_PEN;
+ else if (data[11] == 0x80)
+ touch = TOUCH_FINGER;
+ else
+ touch = TOUCH_TWOFINGER;
+
+ /* joypad */
+ input_report_key(udraw->joy_input_dev, BTN_WEST, data[0] & 1);
+ input_report_key(udraw->joy_input_dev, BTN_SOUTH, !!(data[0] & 2));
+ input_report_key(udraw->joy_input_dev, BTN_EAST, !!(data[0] & 4));
+ input_report_key(udraw->joy_input_dev, BTN_NORTH, !!(data[0] & 8));
+
+ input_report_key(udraw->joy_input_dev, BTN_SELECT, !!(data[1] & 1));
+ input_report_key(udraw->joy_input_dev, BTN_START, !!(data[1] & 2));
+ input_report_key(udraw->joy_input_dev, BTN_MODE, !!(data[1] & 16));
+
+ x = y = 0;
+ switch (data[2]) {
+ case 0x0:
+ y = -127;
+ break;
+ case 0x1:
+ y = -127;
+ x = 127;
+ break;
+ case 0x2:
+ x = 127;
+ break;
+ case 0x3:
+ y = 127;
+ x = 127;
+ break;
+ case 0x4:
+ y = 127;
+ break;
+ case 0x5:
+ y = 127;
+ x = -127;
+ break;
+ case 0x6:
+ x = -127;
+ break;
+ case 0x7:
+ y = -127;
+ x = -127;
+ break;
+ default:
+ break;
+ }
+
+ input_report_abs(udraw->joy_input_dev, ABS_X, x);
+ input_report_abs(udraw->joy_input_dev, ABS_Y, y);
+
+ input_sync(udraw->joy_input_dev);
+
+ /* For pen and touchpad */
+ x = y = 0;
+ if (touch != TOUCH_NONE) {
+ if (data[15] != 0x0F)
+ x = data[15] * 256 + data[17];
+ if (data[16] != 0x0F)
+ y = data[16] * 256 + data[18];
+ }
+
+ if (touch == TOUCH_FINGER) {
+ /* Save the last one-finger touch */
+ udraw->last_one_finger_x = x;
+ udraw->last_one_finger_y = y;
+ udraw->last_two_finger_x = -1;
+ udraw->last_two_finger_y = -1;
+ } else if (touch == TOUCH_TWOFINGER) {
+ /*
+ * We have a problem because x/y is the one for the
+ * second finger but we want the first finger given
+ * to user-space otherwise it'll look as if it jumped.
+ *
+ * See the udraw struct definition for why this was
+ * implemented this way.
+ */
+ if (udraw->last_two_finger_x == -1) {
+ /* Save the position of the 2nd finger */
+ udraw->last_two_finger_x = x;
+ udraw->last_two_finger_y = y;
+
+ x = udraw->last_one_finger_x;
+ y = udraw->last_one_finger_y;
+ } else {
+ /*
+ * Offset the 2-finger coords using the
+ * saved data from the first finger
+ */
+ x = x - (udraw->last_two_finger_x
+ - udraw->last_one_finger_x);
+ y = y - (udraw->last_two_finger_y
+ - udraw->last_one_finger_y);
+ }
+ }
+
+ /* touchpad */
+ if (touch == TOUCH_FINGER || touch == TOUCH_TWOFINGER) {
+ input_report_key(udraw->touch_input_dev, BTN_TOUCH, 1);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_FINGER,
+ touch == TOUCH_FINGER);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_DOUBLETAP,
+ touch == TOUCH_TWOFINGER);
+
+ input_report_abs(udraw->touch_input_dev, ABS_X, x);
+ input_report_abs(udraw->touch_input_dev, ABS_Y, y);
+ } else {
+ input_report_key(udraw->touch_input_dev, BTN_TOUCH, 0);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_FINGER, 0);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_DOUBLETAP, 0);
+ }
+ input_sync(udraw->touch_input_dev);
+
+ /* pen */
+ if (touch == TOUCH_PEN) {
+ int level;
+
+ level = clamp(data[13] - PRESSURE_OFFSET,
+ 0, MAX_PRESSURE);
+
+ input_report_key(udraw->pen_input_dev, BTN_TOUCH, (level != 0));
+ input_report_key(udraw->pen_input_dev, BTN_TOOL_PEN, 1);
+ input_report_abs(udraw->pen_input_dev, ABS_PRESSURE, level);
+ input_report_abs(udraw->pen_input_dev, ABS_X, x);
+ input_report_abs(udraw->pen_input_dev, ABS_Y, y);
+ } else {
+ input_report_key(udraw->pen_input_dev, BTN_TOUCH, 0);
+ input_report_key(udraw->pen_input_dev, BTN_TOOL_PEN, 0);
+ input_report_abs(udraw->pen_input_dev, ABS_PRESSURE, 0);
+ }
+ input_sync(udraw->pen_input_dev);
+
+ /* accel */
+ x = (data[19] + (data[20] << 8));
+ x = clamp_accel(x, AXIS_X);
+ y = (data[21] + (data[22] << 8));
+ y = clamp_accel(y, AXIS_Y);
+ z = (data[23] + (data[24] << 8));
+ z = clamp_accel(z, AXIS_Z);
+ input_report_abs(udraw->accel_input_dev, ABS_X, x);
+ input_report_abs(udraw->accel_input_dev, ABS_Y, y);
+ input_report_abs(udraw->accel_input_dev, ABS_Z, z);
+ input_sync(udraw->accel_input_dev);
+
+ /* let hidraw and hiddev handle the report */
+ return 0;
+}
+
+static int udraw_open(struct input_dev *dev)
+{
+ struct udraw *udraw = input_get_drvdata(dev);
+
+ return hid_hw_open(udraw->hdev);
+}
+
+static void udraw_close(struct input_dev *dev)
+{
+ struct udraw *udraw = input_get_drvdata(dev);
+
+ hid_hw_close(udraw->hdev);
+}
+
+static struct input_dev *allocate_and_setup(struct hid_device *hdev,
+ const char *name)
+{
+ struct input_dev *input_dev;
+
+ input_dev = devm_input_allocate_device(&hdev->dev);
+ if (!input_dev)
+ return NULL;
+
+ input_dev->name = name;
+ input_dev->phys = hdev->phys;
+ input_dev->dev.parent = &hdev->dev;
+ input_dev->open = udraw_open;
+ input_dev->close = udraw_close;
+ input_dev->uniq = hdev->uniq;
+ input_dev->id.bustype = hdev->bus;
+ input_dev->id.vendor = hdev->vendor;
+ input_dev->id.product = hdev->product;
+ input_dev->id.version = hdev->version;
+ input_set_drvdata(input_dev, hid_get_drvdata(hdev));
+
+ return input_dev;
+}
+
+static bool udraw_setup_touch(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Touchpad");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
+
+ input_set_abs_params(input_dev, ABS_X, 0, RES_X, 1, 0);
+ input_abs_set_res(input_dev, ABS_X, RES_X / WIDTH);
+ input_set_abs_params(input_dev, ABS_Y, 0, RES_Y, 1, 0);
+ input_abs_set_res(input_dev, ABS_Y, RES_Y / HEIGHT);
+
+ set_bit(BTN_TOUCH, input_dev->keybit);
+ set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+
+ set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
+ udraw->touch_input_dev = input_dev;
+
+ return true;
+}
+
+static bool udraw_setup_pen(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Pen");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
+
+ input_set_abs_params(input_dev, ABS_X, 0, RES_X, 1, 0);
+ input_abs_set_res(input_dev, ABS_X, RES_X / WIDTH);
+ input_set_abs_params(input_dev, ABS_Y, 0, RES_Y, 1, 0);
+ input_abs_set_res(input_dev, ABS_Y, RES_Y / HEIGHT);
+ input_set_abs_params(input_dev, ABS_PRESSURE,
+ 0, MAX_PRESSURE, 0, 0);
+
+ set_bit(BTN_TOUCH, input_dev->keybit);
+ set_bit(BTN_TOOL_PEN, input_dev->keybit);
+
+ set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
+ udraw->pen_input_dev = input_dev;
+
+ return true;
+}
+
+static bool udraw_setup_accel(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Accelerometer");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_ABS);
+
+ /* 1G accel is reported as ~256, so clamp to 2G */
+ input_set_abs_params(input_dev, ABS_X, -512, 512, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, -512, 512, 0, 0);
+ input_set_abs_params(input_dev, ABS_Z, -512, 512, 0, 0);
+
+ set_bit(INPUT_PROP_ACCELEROMETER, input_dev->propbit);
+
+ udraw->accel_input_dev = input_dev;
+
+ return true;
+}
+
+static bool udraw_setup_joypad(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Joypad");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
+
+ set_bit(BTN_SOUTH, input_dev->keybit);
+ set_bit(BTN_NORTH, input_dev->keybit);
+ set_bit(BTN_EAST, input_dev->keybit);
+ set_bit(BTN_WEST, input_dev->keybit);
+ set_bit(BTN_SELECT, input_dev->keybit);
+ set_bit(BTN_START, input_dev->keybit);
+ set_bit(BTN_MODE, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_X, -127, 127, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, -127, 127, 0, 0);
+
+ udraw->joy_input_dev = input_dev;
+
+ return true;
+}
+
+static int udraw_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct udraw *udraw;
+ int ret;
+
+ udraw = devm_kzalloc(&hdev->dev, sizeof(struct udraw), GFP_KERNEL);
+ if (!udraw)
+ return -ENOMEM;
+
+ udraw->hdev = hdev;
+ udraw->last_two_finger_x = -1;
+ udraw->last_two_finger_y = -1;
+
+ hid_set_drvdata(hdev, udraw);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ return ret;
+ }
+
+ if (!udraw_setup_joypad(udraw, hdev) ||
+ !udraw_setup_touch(udraw, hdev) ||
+ !udraw_setup_pen(udraw, hdev) ||
+ !udraw_setup_accel(udraw, hdev)) {
+ hid_err(hdev, "could not allocate interfaces\n");
+ return -ENOMEM;
+ }
+
+ ret = input_register_device(udraw->joy_input_dev) ||
+ input_register_device(udraw->touch_input_dev) ||
+ input_register_device(udraw->pen_input_dev) ||
+ input_register_device(udraw->accel_input_dev);
+ if (ret) {
+ hid_err(hdev, "failed to register interfaces\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW | HID_CONNECT_DRIVER);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct hid_device_id udraw_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, udraw_devices);
+
+static struct hid_driver udraw_driver = {
+ .name = "hid-udraw",
+ .id_table = udraw_devices,
+ .raw_event = udraw_raw_event,
+ .probe = udraw_probe,
+};
+module_hid_driver(udraw_driver);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index b3ec4f2de875..78fb32a7b103 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -22,6 +22,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/input.h>
+#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pm.h>
@@ -37,10 +38,15 @@
#include <linux/mutex.h>
#include <linux/acpi.h>
#include <linux/of.h>
-#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
#include <linux/i2c/i2c-hid.h>
+#include "../hid-ids.h"
+
+/* quirks to control the device */
+#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
+
/* flags */
#define I2C_HID_STARTED 0
#define I2C_HID_RESET_PENDING 1
@@ -143,10 +149,9 @@ struct i2c_hid {
char *argsbuf; /* Command arguments buffer */
unsigned long flags; /* device flags */
+ unsigned long quirks; /* Various quirks */
wait_queue_head_t wait; /* For waiting the interrupt */
- struct gpio_desc *desc;
- int irq;
struct i2c_hid_platform_data pdata;
@@ -154,6 +159,39 @@ struct i2c_hid {
struct mutex reset_lock;
};
+static const struct i2c_hid_quirks {
+ __u16 idVendor;
+ __u16 idProduct;
+ __u32 quirks;
+} i2c_hid_quirks[] = {
+ { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8752,
+ I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+ { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
+ I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+ { 0, 0 }
+};
+
+/*
+ * i2c_hid_lookup_quirk: return any quirks associated with a I2C HID device
+ * @idVendor: the 16-bit vendor ID
+ * @idProduct: the 16-bit product ID
+ *
+ * Returns: a u32 quirks value.
+ */
+static u32 i2c_hid_lookup_quirk(const u16 idVendor, const u16 idProduct)
+{
+ u32 quirks = 0;
+ int n;
+
+ for (n = 0; i2c_hid_quirks[n].idVendor; n++)
+ if (i2c_hid_quirks[n].idVendor == idVendor &&
+ (i2c_hid_quirks[n].idProduct == (__u16)HID_ANY_ID ||
+ i2c_hid_quirks[n].idProduct == idProduct))
+ quirks = i2c_hid_quirks[n].quirks;
+
+ return quirks;
+}
+
static int __i2c_hid_command(struct i2c_client *client,
const struct i2c_hid_cmd *command, u8 reportID,
u8 reportType, u8 *args, int args_len,
@@ -346,11 +384,27 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
i2c_hid_dbg(ihid, "%s\n", __func__);
+ /*
+ * Some devices require to send a command to wakeup before power on.
+ * The call will get a return value (EREMOTEIO) but device will be
+ * triggered and activated. After that, it goes like a normal device.
+ */
+ if (power_state == I2C_HID_PWR_ON &&
+ ihid->quirks & I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV) {
+ ret = i2c_hid_command(client, &hid_set_power_cmd, NULL, 0);
+
+ /* Device was already activated */
+ if (!ret)
+ goto set_pwr_exit;
+ }
+
ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
0, NULL, 0, NULL, 0);
+
if (ret)
dev_err(&client->dev, "failed to change power setting.\n");
+set_pwr_exit:
return ret;
}
@@ -716,9 +770,11 @@ static int i2c_hid_start(struct hid_device *hid)
i2c_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
if (bufsize > ihid->bufsize) {
+ disable_irq(client->irq);
i2c_hid_free_buffers(ihid);
ret = i2c_hid_alloc_buffers(ihid, bufsize);
+ enable_irq(client->irq);
if (ret)
return ret;
@@ -806,18 +862,21 @@ static struct hid_ll_driver i2c_hid_ll_driver = {
static int i2c_hid_init_irq(struct i2c_client *client)
{
struct i2c_hid *ihid = i2c_get_clientdata(client);
+ unsigned long irqflags = 0;
int ret;
- dev_dbg(&client->dev, "Requesting IRQ: %d\n", ihid->irq);
+ dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
- ret = request_threaded_irq(ihid->irq, NULL, i2c_hid_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- client->name, ihid);
+ if (!irq_get_trigger_type(client->irq))
+ irqflags = IRQF_TRIGGER_LOW;
+
+ ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
+ irqflags | IRQF_ONESHOT, client->name, ihid);
if (ret < 0) {
dev_warn(&client->dev,
"Could not register for %s interrupt, irq = %d,"
" ret = %d\n",
- client->name, ihid->irq, ret);
+ client->name, client->irq, ret);
return ret;
}
@@ -864,14 +923,6 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
}
#ifdef CONFIG_ACPI
-
-/* Default GPIO mapping */
-static const struct acpi_gpio_params i2c_hid_irq_gpio = { 0, 0, true };
-static const struct acpi_gpio_mapping i2c_hid_acpi_gpios[] = {
- { "gpios", &i2c_hid_irq_gpio, 1 },
- { },
-};
-
static int i2c_hid_acpi_pdata(struct i2c_client *client,
struct i2c_hid_platform_data *pdata)
{
@@ -882,7 +933,6 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
union acpi_object *obj;
struct acpi_device *adev;
acpi_handle handle;
- int ret;
handle = ACPI_HANDLE(&client->dev);
if (!handle || acpi_bus_get_device(handle, &adev))
@@ -898,9 +948,7 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
pdata->hid_descriptor_address = obj->integer.value;
ACPI_FREE(obj);
- /* GPIOs are optional */
- ret = acpi_dev_add_driver_gpios(adev, i2c_hid_acpi_gpios);
- return ret < 0 && ret != -ENXIO ? ret : 0;
+ return 0;
}
static const struct acpi_device_id i2c_hid_acpi_match[] = {
@@ -964,6 +1012,19 @@ static int i2c_hid_probe(struct i2c_client *client,
dbg_hid("HID probe called for i2c 0x%02x\n", client->addr);
+ if (!client->irq) {
+ dev_err(&client->dev,
+ "HID over i2c has not been provided an Int IRQ\n");
+ return -EINVAL;
+ }
+
+ if (client->irq < 0) {
+ if (client->irq != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "HID over i2c doesn't have a valid IRQ\n");
+ return client->irq;
+ }
+
ihid = kzalloc(sizeof(struct i2c_hid), GFP_KERNEL);
if (!ihid)
return -ENOMEM;
@@ -983,23 +1044,6 @@ static int i2c_hid_probe(struct i2c_client *client,
ihid->pdata = *platform_data;
}
- if (client->irq > 0) {
- ihid->irq = client->irq;
- } else if (ACPI_COMPANION(&client->dev)) {
- ihid->desc = gpiod_get(&client->dev, NULL, GPIOD_IN);
- if (IS_ERR(ihid->desc)) {
- dev_err(&client->dev, "Failed to get GPIO interrupt\n");
- return PTR_ERR(ihid->desc);
- }
-
- ihid->irq = gpiod_to_irq(ihid->desc);
- if (ihid->irq < 0) {
- gpiod_put(ihid->desc);
- dev_err(&client->dev, "Failed to convert GPIO to IRQ\n");
- return ihid->irq;
- }
- }
-
i2c_set_clientdata(client, ihid);
ihid->client = client;
@@ -1050,6 +1094,8 @@ static int i2c_hid_probe(struct i2c_client *client,
client->name, hid->vendor, hid->product);
strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
+ ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
+
ret = hid_add_device(hid);
if (ret) {
if (ret != -ENODEV)
@@ -1064,16 +1110,13 @@ err_mem_free:
hid_destroy_device(hid);
err_irq:
- free_irq(ihid->irq, ihid);
+ free_irq(client->irq, ihid);
err_pm:
pm_runtime_put_noidle(&client->dev);
pm_runtime_disable(&client->dev);
err:
- if (ihid->desc)
- gpiod_put(ihid->desc);
-
i2c_hid_free_buffers(ihid);
kfree(ihid);
return ret;
@@ -1092,18 +1135,13 @@ static int i2c_hid_remove(struct i2c_client *client)
hid = ihid->hid;
hid_destroy_device(hid);
- free_irq(ihid->irq, ihid);
+ free_irq(client->irq, ihid);
if (ihid->bufsize)
i2c_hid_free_buffers(ihid);
- if (ihid->desc)
- gpiod_put(ihid->desc);
-
kfree(ihid);
- acpi_dev_remove_driver_gpios(ACPI_COMPANION(&client->dev));
-
return 0;
}
@@ -1142,11 +1180,11 @@ static int i2c_hid_suspend(struct device *dev)
/* Save some power */
i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
- disable_irq(ihid->irq);
+ disable_irq(client->irq);
}
if (device_may_wakeup(&client->dev)) {
- wake_status = enable_irq_wake(ihid->irq);
+ wake_status = enable_irq_wake(client->irq);
if (!wake_status)
ihid->irq_wake_enabled = true;
else
@@ -1166,7 +1204,7 @@ static int i2c_hid_resume(struct device *dev)
int wake_status;
if (device_may_wakeup(&client->dev) && ihid->irq_wake_enabled) {
- wake_status = disable_irq_wake(ihid->irq);
+ wake_status = disable_irq_wake(client->irq);
if (!wake_status)
ihid->irq_wake_enabled = false;
else
@@ -1179,7 +1217,7 @@ static int i2c_hid_resume(struct device *dev)
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
- enable_irq(ihid->irq);
+ enable_irq(client->irq);
ret = i2c_hid_hwreset(client);
if (ret)
return ret;
@@ -1197,19 +1235,17 @@ static int i2c_hid_resume(struct device *dev)
static int i2c_hid_runtime_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct i2c_hid *ihid = i2c_get_clientdata(client);
i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
- disable_irq(ihid->irq);
+ disable_irq(client->irq);
return 0;
}
static int i2c_hid_runtime_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct i2c_hid *ihid = i2c_get_clientdata(client);
- enable_irq(ihid->irq);
+ enable_irq(client->irq);
i2c_hid_set_power(client, I2C_HID_PWR_ON);
return 0;
}
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 0c9ac4d5d850..842d8416a7a6 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -19,7 +19,6 @@
#include <linux/jiffies.h>
#include "client.h"
#include "hw-ish.h"
-#include "utils.h"
#include "hbm.h"
/* For FW reset flow */
@@ -310,6 +309,7 @@ static int write_ipc_from_queue(struct ishtp_device *dev)
((uint32_t)tv_utc.tv_usec);
ts_format.ts1_source = HOST_SYSTEM_TIME_USEC;
ts_format.ts2_source = HOST_UTC_TIME_USEC;
+ ts_format.reserved = 0;
time_update.primary_host_time = usec_system;
time_update.secondary_host_time = usec_utc;
@@ -427,6 +427,59 @@ static int ipc_send_mng_msg(struct ishtp_device *dev, uint32_t msg_code,
sizeof(uint32_t) + size);
}
+#define WAIT_FOR_FW_RDY 0x1
+#define WAIT_FOR_INPUT_RDY 0x2
+
+/**
+ * timed_wait_for_timeout() - wait special event with timeout
+ * @dev: ISHTP device pointer
+ * @condition: indicate the condition for waiting
+ * @timeinc: time slice for every wait cycle, in ms
+ * @timeout: time in ms for timeout
+ *
+ * This function will check special event to be ready in a loop, the loop
+ * period is specificd in timeinc. Wait timeout will causes failure.
+ *
+ * Return: 0 for success else failure code
+ */
+static int timed_wait_for_timeout(struct ishtp_device *dev, int condition,
+ unsigned int timeinc, unsigned int timeout)
+{
+ bool complete = false;
+ int ret;
+
+ do {
+ if (condition == WAIT_FOR_FW_RDY) {
+ complete = ishtp_fw_is_ready(dev);
+ } else if (condition == WAIT_FOR_INPUT_RDY) {
+ complete = ish_is_input_ready(dev);
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!complete) {
+ unsigned long left_time;
+
+ left_time = msleep_interruptible(timeinc);
+ timeout -= (timeinc - left_time);
+ }
+ } while (!complete && timeout > 0);
+
+ if (complete)
+ ret = 0;
+ else
+ ret = -EBUSY;
+
+out:
+ return ret;
+}
+
+#define TIME_SLICE_FOR_FW_RDY_MS 100
+#define TIME_SLICE_FOR_INPUT_RDY_MS 100
+#define TIMEOUT_FOR_FW_RDY_MS 2000
+#define TIMEOUT_FOR_INPUT_RDY_MS 2000
+
/**
* ish_fw_reset_handler() - FW reset handler
* @dev: ishtp device pointer
@@ -456,8 +509,8 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
ishtp_reset_handler(dev);
if (!ish_is_input_ready(dev))
- timed_wait_for_timeout(WAIT_FOR_SEND_SLICE,
- ish_is_input_ready(dev), (2 * HZ));
+ timed_wait_for_timeout(dev, WAIT_FOR_INPUT_RDY,
+ TIME_SLICE_FOR_INPUT_RDY_MS, TIMEOUT_FOR_INPUT_RDY_MS);
/* ISH FW is dead */
if (!ish_is_input_ready(dev))
@@ -472,8 +525,8 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
sizeof(uint32_t));
/* Wait for ISH FW'es ILUP and ISHTP_READY */
- timed_wait_for_timeout(WAIT_FOR_SEND_SLICE, ishtp_fw_is_ready(dev),
- (2 * HZ));
+ timed_wait_for_timeout(dev, WAIT_FOR_FW_RDY,
+ TIME_SLICE_FOR_FW_RDY_MS, TIMEOUT_FOR_FW_RDY_MS);
if (!ishtp_fw_is_ready(dev)) {
/* ISH FW is dead */
uint32_t ish_status;
@@ -487,6 +540,8 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
return 0;
}
+#define TIMEOUT_FOR_HW_RDY_MS 300
+
/**
* ish_fw_reset_work_fn() - FW reset worker function
* @unused: not used
@@ -500,7 +555,7 @@ static void fw_reset_work_fn(struct work_struct *unused)
rv = ish_fw_reset_handler(ishtp_dev);
if (!rv) {
/* ISH is ILUP & ISHTP-ready. Restart ISHTP */
- schedule_timeout(HZ / 3);
+ msleep_interruptible(TIMEOUT_FOR_HW_RDY_MS);
ishtp_dev->recvd_hw_ready = 1;
wake_up_interruptible(&ishtp_dev->wait_hw_ready);
diff --git a/drivers/hid/intel-ish-hid/ipc/utils.h b/drivers/hid/intel-ish-hid/ipc/utils.h
deleted file mode 100644
index 5a82123dc7b4..000000000000
--- a/drivers/hid/intel-ish-hid/ipc/utils.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Utility macros of ISH
- *
- * Copyright (c) 2014-2016, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-#ifndef UTILS__H
-#define UTILS__H
-
-#define WAIT_FOR_SEND_SLICE (HZ / 10)
-#define WAIT_FOR_CONNECT_SLICE (HZ / 10)
-
-/*
- * Waits for specified event when a thread that triggers event can't signal
- * Also, waits *at_least* `timeinc` after condition is satisfied
- */
-#define timed_wait_for(timeinc, condition) \
- do { \
- int completed = 0; \
- do { \
- unsigned long j; \
- int done = 0; \
- \
- completed = (condition); \
- for (j = jiffies, done = 0; !done; ) { \
- schedule_timeout(timeinc); \
- if (time_is_before_eq_jiffies(j + timeinc)) \
- done = 1; \
- } \
- } while (!(completed)); \
- } while (0)
-
-
-/*
- * Waits for specified event when a thread that triggers event
- * can't signal with timeout (use whenever we may hang)
- */
-#define timed_wait_for_timeout(timeinc, condition, timeout) \
- do { \
- int t = timeout; \
- do { \
- unsigned long j; \
- int done = 0; \
- \
- for (j = jiffies, done = 0; !done; ) { \
- schedule_timeout(timeinc); \
- if (time_is_before_eq_jiffies(j + timeinc)) \
- done = 1; \
- } \
- t -= timeinc; \
- if (t <= 0) \
- break; \
- } while (!(condition)); \
- } while (0)
-
-#endif /* UTILS__H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 256521509d20..f4cbc744e657 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -585,14 +585,7 @@ int ishtp_bus_new_client(struct ishtp_device *dev)
*/
i = dev->fw_client_presentation_num - 1;
device_uuid = dev->fw_clients[i].props.protocol_name;
- dev_name = kasprintf(GFP_KERNEL,
- "{%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X}",
- device_uuid.b[3], device_uuid.b[2], device_uuid.b[1],
- device_uuid.b[0], device_uuid.b[5], device_uuid.b[4],
- device_uuid.b[7], device_uuid.b[6], device_uuid.b[8],
- device_uuid.b[9], device_uuid.b[10], device_uuid.b[11],
- device_uuid.b[12], device_uuid.b[13], device_uuid.b[14],
- device_uuid.b[15]);
+ dev_name = kasprintf(GFP_KERNEL, "{%pUL}", device_uuid.b);
if (!dev_name)
return -ENOMEM;
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
index 74bffee60774..59460b66e689 100644
--- a/drivers/hid/intel-ish-hid/ishtp/hbm.c
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -378,11 +378,10 @@ static void ishtp_hbm_cl_disconnect_res(struct ishtp_device *dev,
list_for_each_entry(cl, &dev->cl_list, link) {
if (!rs->status && ishtp_hbm_cl_addr_equal(cl, rs)) {
cl->state = ISHTP_CL_DISCONNECTED;
+ wake_up_interruptible(&cl->wait_ctrl_res);
break;
}
}
- if (cl)
- wake_up_interruptible(&cl->wait_ctrl_res);
spin_unlock_irqrestore(&dev->cl_list_lock, flags);
}
@@ -431,11 +430,10 @@ static void ishtp_hbm_cl_connect_res(struct ishtp_device *dev,
cl->state = ISHTP_CL_DISCONNECTED;
cl->status = -ENODEV;
}
+ wake_up_interruptible(&cl->wait_ctrl_res);
break;
}
}
- if (cl)
- wake_up_interruptible(&cl->wait_ctrl_res);
spin_unlock_irqrestore(&dev->cl_list_lock, flags);
}
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ae83af649a60..333108ef18cf 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1459,7 +1459,7 @@ static int hid_post_reset(struct usb_interface *intf)
rdesc = kmalloc(hid->dev_rsize, GFP_KERNEL);
if (!rdesc) {
dbg_hid("couldn't allocate rdesc memory (post_reset)\n");
- return 1;
+ return -ENOMEM;
}
status = hid_get_class_descriptor(dev,
interface->desc.bInterfaceNumber,
@@ -1467,13 +1467,13 @@ static int hid_post_reset(struct usb_interface *intf)
if (status < 0) {
dbg_hid("reading report descriptor failed (post_reset)\n");
kfree(rdesc);
- return 1;
+ return status;
}
status = memcmp(rdesc, hid->dev_rdesc, hid->dev_rsize);
kfree(rdesc);
if (status != 0) {
dbg_hid("report descriptor changed\n");
- return 1;
+ return -EPERM;
}
/* No need to do another reset or clear a halted endpoint */
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index e6cfd323babc..b3e01c82af05 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -82,6 +82,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
@@ -101,8 +103,9 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index b4800ea891cb..d303e413306d 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -210,7 +210,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac);
void wacom_wac_usage_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage);
-int wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
+void wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value);
void wacom_wac_report(struct hid_device *hdev, struct hid_report *report);
void wacom_battery_work(struct work_struct *work);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 5e7a5648e708..b9779bcbd140 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -122,6 +122,7 @@ static void wacom_feature_mapping(struct hid_device *hdev,
struct hid_data *hid_data = &wacom->wacom_wac.hid_data;
u8 *data;
int ret;
+ int n;
switch (usage->hid) {
case HID_DG_CONTACTMAX:
@@ -159,22 +160,48 @@ static void wacom_feature_mapping(struct hid_device *hdev,
case HID_UP_DIGITIZER:
if (field->report->id == 0x0B &&
- (field->application == WACOM_G9_DIGITIZER ||
- field->application == WACOM_G11_DIGITIZER)) {
+ (field->application == WACOM_HID_G9_PEN ||
+ field->application == WACOM_HID_G11_PEN)) {
wacom->wacom_wac.mode_report = field->report->id;
wacom->wacom_wac.mode_value = 0;
}
break;
- case WACOM_G9_PAGE:
- case WACOM_G11_PAGE:
+ case WACOM_HID_WD_DATAMODE:
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 2;
+ break;
+
+ case WACOM_HID_UP_G9:
+ case WACOM_HID_UP_G11:
if (field->report->id == 0x03 &&
- (field->application == WACOM_G9_TOUCHSCREEN ||
- field->application == WACOM_G11_TOUCHSCREEN)) {
+ (field->application == WACOM_HID_G9_TOUCHSCREEN ||
+ field->application == WACOM_HID_G11_TOUCHSCREEN)) {
wacom->wacom_wac.mode_report = field->report->id;
wacom->wacom_wac.mode_value = 0;
}
break;
+ case WACOM_HID_WD_OFFSETLEFT:
+ case WACOM_HID_WD_OFFSETTOP:
+ case WACOM_HID_WD_OFFSETRIGHT:
+ case WACOM_HID_WD_OFFSETBOTTOM:
+ /* read manually */
+ n = hid_report_len(field->report);
+ data = hid_alloc_report_buf(field->report, GFP_KERNEL);
+ if (!data)
+ break;
+ data[0] = field->report->id;
+ ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
+ data, n, WAC_CMD_RETRIES);
+ if (ret == n) {
+ ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT,
+ data, n, 0);
+ } else {
+ hid_warn(hdev, "%s: could not retrieve sensor offsets\n",
+ __func__);
+ }
+ kfree(data);
+ break;
}
}
@@ -240,6 +267,30 @@ static void wacom_usage_mapping(struct hid_device *hdev,
features->touch_max = 1;
}
+ /*
+ * ISDv4 devices which predate HID's adoption of the
+ * HID_DG_BARELSWITCH2 usage use 0x000D0000 in its
+ * position instead. We can accurately detect if a
+ * usage with that value should be HID_DG_BARRELSWITCH2
+ * based on the surrounding usages, which have remained
+ * constant across generations.
+ */
+ if (features->type == HID_GENERIC &&
+ usage->hid == 0x000D0000 &&
+ field->application == HID_DG_PEN &&
+ field->physical == HID_DG_STYLUS) {
+ int i = usage->usage_index;
+
+ if (i-4 >= 0 && i+1 < field->maxusage &&
+ field->usage[i-4].hid == HID_DG_TIPSWITCH &&
+ field->usage[i-3].hid == HID_DG_BARRELSWITCH &&
+ field->usage[i-2].hid == HID_DG_ERASER &&
+ field->usage[i-1].hid == HID_DG_INVERT &&
+ field->usage[i+1].hid == HID_DG_INRANGE) {
+ usage->hid = HID_DG_BARRELSWITCH2;
+ }
+ }
+
switch (usage->hid) {
case HID_GD_X:
features->x_max = field->logical_maximum;
@@ -689,11 +740,6 @@ static int wacom_add_shared_data(struct hid_device *hdev)
return retval;
}
- if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
- wacom_wac->shared->touch = hdev;
- else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
- wacom_wac->shared->pen = hdev;
-
out:
mutex_unlock(&wacom_udev_list_lock);
return retval;
@@ -1916,6 +1962,19 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
/* shift everything including the terminator */
memmove(gap, gap+1, strlen(gap));
}
+
+ /* strip off excessive prefixing */
+ if (strstr(name, "Wacom Co.,Ltd. Wacom ") == name) {
+ int n = strlen(name);
+ int x = strlen("Wacom Co.,Ltd. ");
+ memmove(name, name+x, n-x+1);
+ }
+ if (strstr(name, "Wacom Co., Ltd. Wacom ") == name) {
+ int n = strlen(name);
+ int x = strlen("Wacom Co., Ltd. ");
+ memmove(name, name+x, n-x+1);
+ }
+
/* get rid of trailing whitespace */
if (name[strlen(name)-1] == ' ')
name[strlen(name)-1] = '\0';
@@ -1977,6 +2036,10 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
if (error)
goto fail;
+ error = wacom_add_shared_data(hdev);
+ if (error)
+ goto fail;
+
/*
* Bamboo Pad has a generic hid handling for the Pen, and we switch it
* into debug mode for the touch part.
@@ -2017,9 +2080,10 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
wacom_update_name(wacom, wireless ? " (WL)" : "");
- error = wacom_add_shared_data(hdev);
- if (error)
- goto fail;
+ if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
+ wacom_wac->shared->touch = hdev;
+ else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
+ wacom_wac->shared->pen = hdev;
if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) &&
(features->quirks & WACOM_QUIRK_BATTERY)) {
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 1cb79925730d..b1a9a3ca6d56 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -41,6 +41,8 @@ MODULE_PARM_DESC(touch_arbitration, " on (Y) off (N)");
static void wacom_report_numbered_buttons(struct input_dev *input_dev,
int button_count, int mask);
+static int wacom_numbered_button_to_key(int n);
+
/*
* Percent of battery capacity for Graphire.
* 8th value means AC online and show 100% capacity.
@@ -588,6 +590,11 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
return 1;
}
+static int wacom_intuos_id_mangle(int tool_id)
+{
+ return (tool_id & ~0xFFF) << 4 | (tool_id & 0xFFF);
+}
+
static int wacom_intuos_get_tool_type(int tool_id)
{
int tool_type;
@@ -595,7 +602,7 @@ static int wacom_intuos_get_tool_type(int tool_id)
switch (tool_id) {
case 0x812: /* Inking pen */
case 0x801: /* Intuos3 Inking pen */
- case 0x120802: /* Intuos4/5 Inking Pen */
+ case 0x12802: /* Intuos4/5 Inking Pen */
case 0x012:
tool_type = BTN_TOOL_PENCIL;
break;
@@ -610,11 +617,11 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
case 0x8e2: /* IntuosHT2 pen */
case 0x022:
- case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */
- case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
- case 0x160802: /* Cintiq 13HD Pro Pen */
- case 0x180802: /* DTH2242 Pen */
- case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
+ case 0x10804: /* Intuos4/5 13HD/24HD Art Pen */
+ case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */
+ case 0x16802: /* Cintiq 13HD Pro Pen */
+ case 0x18802: /* DTH2242 Pen */
+ case 0x10802: /* Intuos4/5 13HD/24HD General Pen */
tool_type = BTN_TOOL_PEN;
break;
@@ -638,6 +645,7 @@ static int wacom_intuos_get_tool_type(int tool_id)
break;
case 0x82a: /* Eraser */
+ case 0x84a:
case 0x85a:
case 0x91a:
case 0xd1a:
@@ -648,12 +656,12 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */
case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */
case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
- case 0x14080a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
- case 0x10090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
- case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
- case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
- case 0x18080a: /* DTH2242 Eraser */
- case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
+ case 0x1480a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
+ case 0x1090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
+ case 0x1080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
+ case 0x1680a: /* Cintiq 13HD Pro Pen Eraser */
+ case 0x1880a: /* DTH2242 Eraser */
+ case 0x1080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
tool_type = BTN_TOOL_RUBBER;
break;
@@ -662,7 +670,7 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x112:
case 0x913: /* Intuos3 Airbrush */
case 0x902: /* Intuos4/5 13HD/24HD Airbrush */
- case 0x100902: /* Intuos4/5 13HD/24HD Airbrush */
+ case 0x10902: /* Intuos4/5 13HD/24HD Airbrush */
tool_type = BTN_TOOL_AIRBRUSH;
break;
@@ -693,7 +701,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
(data[6] << 4) + (data[7] >> 4);
wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) |
- ((data[7] & 0x0f) << 20) | ((data[8] & 0xf0) << 12);
+ ((data[7] & 0x0f) << 16) | ((data[8] & 0xf0) << 8);
wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]);
@@ -923,7 +931,7 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
* don't report events for invalid data
*/
/* older I4 styli don't work with new Cintiqs */
- if ((!((wacom->id[idx] >> 20) & 0x01) &&
+ if ((!((wacom->id[idx] >> 16) & 0x01) &&
(features->type == WACOM_21UX2)) ||
/* Only large Intuos support Lense Cursor */
(wacom->tool[idx] == BTN_TOOL_LENS &&
@@ -1059,7 +1067,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
break;
}
- input_report_abs(input, ABS_MISC, wacom->id[idx]); /* report tool id */
+ input_report_abs(input, ABS_MISC,
+ wacom_intuos_id_mangle(wacom->id[idx])); /* report tool id */
input_report_key(input, wacom->tool[idx], 1);
input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
wacom->reporting_data = true;
@@ -1435,11 +1444,59 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
return 0;
}
+static int wacom_equivalent_usage(int usage)
+{
+ if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) {
+ int subpage = (usage & 0xFF00) << 8;
+ int subusage = (usage & 0xFF);
+
+ if (subpage == WACOM_HID_SP_PAD ||
+ subpage == WACOM_HID_SP_BUTTON ||
+ subpage == WACOM_HID_SP_DIGITIZER ||
+ subpage == WACOM_HID_SP_DIGITIZERINFO ||
+ usage == WACOM_HID_WD_SENSE ||
+ usage == WACOM_HID_WD_SERIALHI ||
+ usage == WACOM_HID_WD_TOOLTYPE ||
+ usage == WACOM_HID_WD_DISTANCE ||
+ usage == WACOM_HID_WD_TOUCHSTRIP ||
+ usage == WACOM_HID_WD_TOUCHSTRIP2 ||
+ usage == WACOM_HID_WD_TOUCHRING ||
+ usage == WACOM_HID_WD_TOUCHRINGSTATUS) {
+ return usage;
+ }
+
+ if (subpage == HID_UP_UNDEFINED)
+ subpage = HID_UP_DIGITIZER;
+
+ return subpage | subusage;
+ }
+
+ return usage;
+}
+
static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
struct hid_field *field, __u8 type, __u16 code, int fuzz)
{
+ struct wacom *wacom = input_get_drvdata(input);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
int fmin = field->logical_minimum;
int fmax = field->logical_maximum;
+ unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
+ int resolution_code = code;
+
+ if (equivalent_usage == HID_DG_TWIST) {
+ resolution_code = ABS_RZ;
+ }
+
+ if (equivalent_usage == HID_GD_X) {
+ fmin += features->offset_left;
+ fmax -= features->offset_right;
+ }
+ if (equivalent_usage == HID_GD_Y) {
+ fmin += features->offset_top;
+ fmax -= features->offset_bottom;
+ }
usage->type = type;
usage->code = code;
@@ -1450,7 +1507,7 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
case EV_ABS:
input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
input_abs_set_res(input, code,
- hidinput_calc_abs_res(field, code));
+ hidinput_calc_abs_res(field, resolution_code));
break;
case EV_KEY:
input_set_capability(input, EV_KEY, code);
@@ -1458,6 +1515,172 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
case EV_MSC:
input_set_capability(input, EV_MSC, code);
break;
+ case EV_SW:
+ input_set_capability(input, EV_SW, code);
+ break;
+ }
+}
+
+static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+ struct input_dev *input = wacom_wac->pad_input;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+ switch (equivalent_usage) {
+ case WACOM_HID_WD_BATTERY_LEVEL:
+ case WACOM_HID_WD_BATTERY_CHARGING:
+ features->quirks |= WACOM_QUIRK_BATTERY;
+ break;
+ case WACOM_HID_WD_ACCELEROMETER_X:
+ __set_bit(INPUT_PROP_ACCELEROMETER, input->propbit);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_ACCELEROMETER_Y:
+ __set_bit(INPUT_PROP_ACCELEROMETER, input->propbit);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_ACCELEROMETER_Z:
+ __set_bit(INPUT_PROP_ACCELEROMETER, input->propbit);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_BUTTONHOME:
+ case WACOM_HID_WD_BUTTONUP:
+ case WACOM_HID_WD_BUTTONDOWN:
+ case WACOM_HID_WD_BUTTONLEFT:
+ case WACOM_HID_WD_BUTTONRIGHT:
+ case WACOM_HID_WD_BUTTONCENTER:
+ wacom_map_usage(input, usage, field, EV_KEY,
+ wacom_numbered_button_to_key(features->numbered_buttons),
+ 0);
+ features->numbered_buttons++;
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHONOFF:
+ wacom_map_usage(input, usage, field, EV_SW, SW_MUTE_DEVICE, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHSTRIP:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_RX, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHSTRIP2:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_RY, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHRING:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ }
+
+ switch (equivalent_usage & 0xfffffff0) {
+ case WACOM_HID_WD_EXPRESSKEY00:
+ wacom_map_usage(input, usage, field, EV_KEY,
+ wacom_numbered_button_to_key(features->numbered_buttons),
+ 0);
+ features->numbered_buttons++;
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ }
+}
+
+static void wacom_wac_pad_battery_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+ switch (equivalent_usage) {
+ case WACOM_HID_WD_BATTERY_LEVEL:
+ wacom_wac->hid_data.battery_capacity = value;
+ wacom_wac->hid_data.bat_connected = 1;
+ break;
+
+ case WACOM_HID_WD_BATTERY_CHARGING:
+ wacom_wac->hid_data.bat_charging = value;
+ wacom_wac->hid_data.ps_connected = value;
+ wacom_wac->hid_data.bat_connected = 1;
+ break;
+ }
+}
+
+static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct input_dev *input = wacom_wac->pad_input;
+ struct wacom_features *features = &wacom_wac->features;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+ if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) {
+ wacom_wac->hid_data.inrange_state |= value;
+ }
+
+ switch (equivalent_usage) {
+ case WACOM_HID_WD_TOUCHRINGSTATUS:
+ break;
+
+ default:
+ features->input_event_flag = true;
+ input_event(input, usage->type, usage->code, value);
+ break;
+ }
+}
+
+static void wacom_wac_pad_pre_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
+ wacom_wac->hid_data.inrange_state = 0;
+}
+
+static void wacom_wac_pad_battery_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+
+ if (features->quirks & WACOM_QUIRK_BATTERY) {
+ int capacity = wacom_wac->hid_data.battery_capacity;
+ bool charging = wacom_wac->hid_data.bat_charging;
+ bool connected = wacom_wac->hid_data.bat_connected;
+ bool powered = wacom_wac->hid_data.ps_connected;
+
+ wacom_notify_battery(wacom_wac, capacity, charging,
+ connected, powered);
+ }
+}
+
+static void wacom_wac_pad_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+ struct input_dev *input = wacom_wac->pad_input;
+ bool active = wacom_wac->hid_data.inrange_state != 0;
+
+ /* report prox for expresskey events */
+ if (wacom_equivalent_usage(report->field[0]->physical) == HID_DG_TABLETFUNCTIONKEY) {
+ features->input_event_flag = true;
+ input_event(input, EV_ABS, ABS_MISC, active ? PAD_DEVICE_ID : 0);
+ }
+
+ if (features->input_event_flag) {
+ features->input_event_flag = false;
+ input_sync(input);
}
}
@@ -1466,25 +1689,43 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
struct input_dev *input = wacom_wac->pen_input;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4);
break;
case HID_GD_Y:
wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 4);
break;
+ case WACOM_HID_WD_DISTANCE:
+ case HID_GD_Z:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_DISTANCE, 0);
+ break;
case HID_DG_TIPPRESSURE:
wacom_map_usage(input, usage, field, EV_ABS, ABS_PRESSURE, 0);
break;
case HID_DG_INRANGE:
wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_PEN, 0);
break;
+ case HID_DG_BATTERYSTRENGTH:
+ features->quirks |= WACOM_QUIRK_BATTERY;
+ break;
case HID_DG_INVERT:
wacom_map_usage(input, usage, field, EV_KEY,
BTN_TOOL_RUBBER, 0);
break;
+ case HID_DG_TILT_X:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_TILT_X, 0);
+ break;
+ case HID_DG_TILT_Y:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_TILT_Y, 0);
+ break;
+ case HID_DG_TWIST:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0);
+ break;
case HID_DG_ERASER:
case HID_DG_TIPSWITCH:
wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0);
@@ -1498,39 +1739,131 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
case HID_DG_TOOLSERIALNUMBER:
wacom_map_usage(input, usage, field, EV_MSC, MSC_SERIAL, 0);
break;
+ case WACOM_HID_WD_SENSE:
+ features->quirks |= WACOM_QUIRK_SENSE;
+ wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_PEN, 0);
+ break;
+ case WACOM_HID_WD_SERIALHI:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_MISC, 0);
+ set_bit(EV_KEY, input->evbit);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
+ input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
+ input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
+ input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
+ input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
+ input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+ break;
+ case WACOM_HID_WD_FINGERWHEEL:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
+ break;
}
}
-static int wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field,
+static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
struct input_dev *input = wacom_wac->pen_input;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- /* checking which Tool / tip switch to send */
- switch (usage->hid) {
+ switch (equivalent_usage) {
+ case HID_GD_Z:
+ /*
+ * HID_GD_Z "should increase as the control's position is
+ * moved from high to low", while ABS_DISTANCE instead
+ * increases in value as the tool moves from low to high.
+ */
+ value = field->logical_maximum - value;
+ break;
case HID_DG_INRANGE:
wacom_wac->hid_data.inrange_state = value;
- return 0;
+ if (!(features->quirks & WACOM_QUIRK_SENSE))
+ wacom_wac->hid_data.sense_state = value;
+ return;
+ case HID_DG_BATTERYSTRENGTH:
+ wacom_wac->hid_data.battery_capacity = value;
+ wacom_wac->hid_data.bat_connected = 1;
+ break;
case HID_DG_INVERT:
wacom_wac->hid_data.invert_state = value;
- return 0;
+ return;
case HID_DG_ERASER:
case HID_DG_TIPSWITCH:
wacom_wac->hid_data.tipswitch |= value;
- return 0;
+ return;
+ case HID_DG_TOOLSERIALNUMBER:
+ wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
+ wacom_wac->serial[0] |= value;
+ return;
+ case WACOM_HID_WD_SENSE:
+ wacom_wac->hid_data.sense_state = value;
+ return;
+ case WACOM_HID_WD_SERIALHI:
+ wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
+ wacom_wac->serial[0] |= ((__u64)value) << 32;
+ /*
+ * Non-USI EMR devices may contain additional tool type
+ * information here. See WACOM_HID_WD_TOOLTYPE case for
+ * more details.
+ */
+ if (value >> 20 == 1) {
+ wacom_wac->id[0] |= value & 0xFFFFF;
+ }
+ return;
+ case WACOM_HID_WD_TOOLTYPE:
+ /*
+ * Some devices (MobileStudio Pro, and possibly later
+ * devices as well) do not return the complete tool
+ * type in their WACOM_HID_WD_TOOLTYPE usage. Use a
+ * bitwise OR so the complete value can be built
+ * up over time :(
+ */
+ wacom_wac->id[0] |= value;
+ return;
+ case WACOM_HID_WD_OFFSETLEFT:
+ if (features->offset_left && value != features->offset_left)
+ hid_warn(hdev, "%s: overriding exising left offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_left);
+ features->offset_left = value;
+ return;
+ case WACOM_HID_WD_OFFSETRIGHT:
+ if (features->offset_right && value != features->offset_right)
+ hid_warn(hdev, "%s: overriding exising right offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_right);
+ features->offset_right = value;
+ return;
+ case WACOM_HID_WD_OFFSETTOP:
+ if (features->offset_top && value != features->offset_top)
+ hid_warn(hdev, "%s: overriding exising top offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_top);
+ features->offset_top = value;
+ return;
+ case WACOM_HID_WD_OFFSETBOTTOM:
+ if (features->offset_bottom && value != features->offset_bottom)
+ hid_warn(hdev, "%s: overriding exising bottom offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_bottom);
+ features->offset_bottom = value;
+ return;
}
/* send pen events only when touch is up or forced out
* or touch arbitration is off
*/
if (!usage->type || delay_pen_events(wacom_wac))
- return 0;
+ return;
- input_event(input, usage->type, usage->code, value);
+ /* send pen events only when the pen is in/entering/leaving proximity */
+ if (!wacom_wac->hid_data.inrange_state && !wacom_wac->tool[0])
+ return;
- return 0;
+ input_event(input, usage->type, usage->code, value);
}
static void wacom_wac_pen_pre_report(struct hid_device *hdev,
@@ -1546,24 +1879,53 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct input_dev *input = wacom_wac->pen_input;
bool prox = wacom_wac->hid_data.inrange_state;
+ bool range = wacom_wac->hid_data.sense_state;
- if (!wacom_wac->shared->stylus_in_proximity) /* first in prox */
+ if (!wacom_wac->tool[0] && prox) { /* first in prox */
/* Going into proximity select tool */
- wacom_wac->tool[0] = wacom_wac->hid_data.invert_state ?
- BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+ if (wacom_wac->hid_data.invert_state)
+ wacom_wac->tool[0] = BTN_TOOL_RUBBER;
+ else if (wacom_wac->id[0])
+ wacom_wac->tool[0] = wacom_intuos_get_tool_type(wacom_wac->id[0]);
+ else
+ wacom_wac->tool[0] = BTN_TOOL_PEN;
+ }
/* keep pen state for touch events */
- wacom_wac->shared->stylus_in_proximity = prox;
+ wacom_wac->shared->stylus_in_proximity = range;
- if (!delay_pen_events(wacom_wac)) {
+ if (!delay_pen_events(wacom_wac) && wacom_wac->tool[0]) {
+ int id = wacom_wac->id[0];
+
+ /*
+ * Non-USI EMR tools should have their IDs mangled to
+ * match the legacy behavior of wacom_intuos_general
+ */
+ if (wacom_wac->serial[0] >> 52 == 1)
+ id = wacom_intuos_id_mangle(id);
+
+ /*
+ * To ensure compatibility with xf86-input-wacom, we should
+ * report the BTN_TOOL_* event prior to the ABS_MISC or
+ * MSC_SERIAL events.
+ */
input_report_key(input, BTN_TOUCH,
wacom_wac->hid_data.tipswitch);
input_report_key(input, wacom_wac->tool[0], prox);
+ if (wacom_wac->serial[0]) {
+ input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
+ input_report_abs(input, ABS_MISC, id);
+ }
wacom_wac->hid_data.tipswitch = false;
input_sync(input);
}
+
+ if (!prox) {
+ wacom_wac->tool[0] = 0;
+ wacom_wac->id[0] = 0;
+ }
}
static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
@@ -1573,8 +1935,9 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct input_dev *input = wacom_wac->touch_input;
unsigned touch_max = wacom_wac->features.touch_max;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
if (touch_max == 1)
wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4);
@@ -1644,13 +2007,14 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
}
}
-static int wacom_wac_finger_event(struct hid_device *hdev,
+static void wacom_wac_finger_event(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage, __s32 value)
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
wacom_wac->hid_data.x = value;
break;
@@ -1673,11 +2037,9 @@ static int wacom_wac_finger_event(struct hid_device *hdev,
if (usage->usage_index + 1 == field->report_count) {
- if (usage->hid == wacom_wac->hid_data.last_slot_field)
+ if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
}
-
- return 0;
}
static void wacom_wac_finger_pre_report(struct hid_device *hdev,
@@ -1762,28 +2124,30 @@ void wacom_wac_usage_mapping(struct hid_device *hdev,
/* currently, only direct devices have proper hid report descriptors */
features->device_type |= WACOM_DEVICETYPE_DIRECT;
- if (WACOM_PEN_FIELD(field))
- return wacom_wac_pen_usage_mapping(hdev, field, usage);
-
- if (WACOM_FINGER_FIELD(field))
- return wacom_wac_finger_usage_mapping(hdev, field, usage);
+ if (WACOM_PAD_FIELD(field))
+ wacom_wac_pad_usage_mapping(hdev, field, usage);
+ else if (WACOM_PEN_FIELD(field))
+ wacom_wac_pen_usage_mapping(hdev, field, usage);
+ else if (WACOM_FINGER_FIELD(field))
+ wacom_wac_finger_usage_mapping(hdev, field, usage);
}
-int wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
+void wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct wacom *wacom = hid_get_drvdata(hdev);
if (wacom->wacom_wac.features.type != HID_GENERIC)
- return 0;
-
- if (WACOM_PEN_FIELD(field))
- return wacom_wac_pen_event(hdev, field, usage, value);
-
- if (WACOM_FINGER_FIELD(field))
- return wacom_wac_finger_event(hdev, field, usage, value);
+ return;
- return 0;
+ if (WACOM_PAD_FIELD(field)) {
+ wacom_wac_pad_battery_event(hdev, field, usage, value);
+ if (wacom->wacom_wac.pad_input)
+ wacom_wac_pad_event(hdev, field, usage, value);
+ } else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+ wacom_wac_pen_event(hdev, field, usage, value);
+ else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
+ wacom_wac_finger_event(hdev, field, usage, value);
}
static void wacom_report_events(struct hid_device *hdev, struct hid_report *report)
@@ -1814,19 +2178,23 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
if (wacom_wac->features.type != HID_GENERIC)
return;
- if (WACOM_PEN_FIELD(field))
+ if (WACOM_PAD_FIELD(field) && wacom->wacom_wac.pad_input)
+ wacom_wac_pad_pre_report(hdev, report);
+ else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
wacom_wac_pen_pre_report(hdev, report);
-
- if (WACOM_FINGER_FIELD(field))
+ else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
wacom_wac_finger_pre_report(hdev, report);
wacom_report_events(hdev, report);
- if (WACOM_PEN_FIELD(field))
- return wacom_wac_pen_report(hdev, report);
-
- if (WACOM_FINGER_FIELD(field))
- return wacom_wac_finger_report(hdev, report);
+ if (WACOM_PAD_FIELD(field)) {
+ wacom_wac_pad_battery_report(hdev, report);
+ if (wacom->wacom_wac.pad_input)
+ wacom_wac_pad_report(hdev, report);
+ } else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+ wacom_wac_pen_report(hdev, report);
+ else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
+ wacom_wac_finger_report(hdev, report);
}
static int wacom_bpt_touch(struct wacom_wac *wacom)
@@ -2399,6 +2767,8 @@ void wacom_setup_device_quirks(struct wacom *wacom)
struct wacom_features *features = &wacom->wacom_wac.features;
/* The pen and pad share the same interface on most devices */
+ if (features->numbered_buttons > 0)
+ features->device_type |= WACOM_DEVICETYPE_PAD;
if (features->type == GRAPHIRE_BT || features->type == WACOM_G4 ||
features->type == DTUS ||
(features->type >= INTUOS3S && features->type <= WACOM_MO)) {
@@ -2448,7 +2818,7 @@ void wacom_setup_device_quirks(struct wacom *wacom)
/*
* Raw Wacom-mode pen and touch events both come from interface
* 0, whose HID descriptor has an application usage of 0xFF0D
- * (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
+ * (i.e., WACOM_HID_WD_DIGITIZER). We route pen packets back
* out through the HID_GENERIC device created for interface 1,
* so rewrite this one to be of type WACOM_DEVICETYPE_TOUCH.
*/
@@ -2530,10 +2900,12 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(ABS_MISC, input_dev->absbit);
- input_set_abs_params(input_dev, ABS_X, features->x_min,
- features->x_max, features->x_fuzz, 0);
- input_set_abs_params(input_dev, ABS_Y, features->y_min,
- features->y_max, features->y_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_X, 0 + features->offset_left,
+ features->x_max - features->offset_right,
+ features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0 + features->offset_top,
+ features->y_max - features->offset_bottom,
+ features->y_fuzz, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0,
features->pressure_max, features->pressure_fuzz, 0);
@@ -2769,17 +3141,29 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
return 0;
}
+static int wacom_numbered_button_to_key(int n)
+{
+ if (n < 10)
+ return BTN_0 + n;
+ else if (n < 16)
+ return BTN_A + (n-10);
+ else if (n < 18)
+ return BTN_BASE + (n-16);
+ else
+ return 0;
+}
+
static void wacom_setup_numbered_buttons(struct input_dev *input_dev,
int button_count)
{
int i;
- for (i = 0; i < button_count && i < 10; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
- for (i = 10; i < button_count && i < 16; i++)
- __set_bit(BTN_A + (i-10), input_dev->keybit);
- for (i = 16; i < button_count && i < 18; i++)
- __set_bit(BTN_BASE + (i-16), input_dev->keybit);
+ for (i = 0; i < button_count; i++) {
+ int key = wacom_numbered_button_to_key(i);
+
+ if (key)
+ __set_bit(key, input_dev->keybit);
+ }
}
static void wacom_24hd_update_leds(struct wacom *wacom, int mask, int group)
@@ -2881,12 +3265,12 @@ static void wacom_report_numbered_buttons(struct input_dev *input_dev,
for (i = 0; i < wacom->led.count; i++)
wacom_update_led(wacom, button_count, mask, i);
- for (i = 0; i < button_count && i < 10; i++)
- input_report_key(input_dev, BTN_0 + i, mask & (1 << i));
- for (i = 10; i < button_count && i < 16; i++)
- input_report_key(input_dev, BTN_A + (i-10), mask & (1 << i));
- for (i = 16; i < button_count && i < 18; i++)
- input_report_key(input_dev, BTN_BASE + (i-16), mask & (1 << i));
+ for (i = 0; i < button_count; i++) {
+ int key = wacom_numbered_button_to_key(i);
+
+ if (key)
+ input_report_key(input_dev, key, mask & (1 << i));
+ }
}
int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
@@ -2906,8 +3290,12 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
__set_bit(ABS_MISC, input_dev->absbit);
/* kept for making legacy xf86-input-wacom accepting the pad */
- input_set_abs_params(input_dev, ABS_X, 0, 1, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, 1, 0, 0);
+ if (!(input_dev->absinfo && (input_dev->absinfo[ABS_X].minimum ||
+ input_dev->absinfo[ABS_X].maximum)))
+ input_set_abs_params(input_dev, ABS_X, 0, 1, 0, 0);
+ if (!(input_dev->absinfo && (input_dev->absinfo[ABS_Y].minimum ||
+ input_dev->absinfo[ABS_Y].maximum)))
+ input_set_abs_params(input_dev, ABS_Y, 0, 1, 0, 0);
/* kept for making udev and libwacom accepting the pad */
__set_bit(BTN_STYLUS, input_dev->keybit);
@@ -3027,6 +3415,9 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
break;
+ case HID_GENERIC:
+ break;
+
default:
/* no pad supported */
return -ENODEV;
@@ -3233,26 +3624,30 @@ static const struct wacom_features wacom_features_0x317 =
INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0xF4 =
- { "Wacom Cintiq 24HD", 104080, 65200, 2047, 63,
+ { "Wacom Cintiq 24HD", 104480, 65600, 2047, 63,
WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 16,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0xF8 =
- { "Wacom Cintiq 24HD touch", 104080, 65200, 2047, 63, /* Pen */
+ { "Wacom Cintiq 24HD touch", 104480, 65600, 2047, 63, /* Pen */
WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 16,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
static const struct wacom_features wacom_features_0xF6 =
{ "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x32A =
- { "Wacom Cintiq 27QHD", 119740, 67520, 2047, 63,
+ { "Wacom Cintiq 27QHD", 120140, 67920, 2047, 63,
WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 0,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x32B =
- { "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63,
+ { "Wacom Cintiq 27QHD touch", 120140, 67920, 2047, 63,
WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 0,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x32C };
static const struct wacom_features wacom_features_0x32C =
{ "Wacom Cintiq 27QHD touch", .type = WACOM_27QHDT,
@@ -3267,13 +3662,15 @@ static const struct wacom_features wacom_features_0xC6 =
{ "Wacom Cintiq 12WX", 53020, 33440, 1023, 63,
WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 10 };
static const struct wacom_features wacom_features_0x304 =
- { "Wacom Cintiq 13HD", 59152, 33448, 1023, 63,
+ { "Wacom Cintiq 13HD", 59552, 33848, 1023, 63,
WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x333 =
- { "Wacom Cintiq 13HD touch", 59152, 33448, 2047, 63,
+ { "Wacom Cintiq 13HD touch", 59552, 33848, 2047, 63,
WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x335 };
static const struct wacom_features wacom_features_0x335 =
{ "Wacom Cintiq 13HD touch", .type = WACOM_24HDT, /* Touch */
@@ -3290,42 +3687,50 @@ static const struct wacom_features wacom_features_0xF0 =
{ "Wacom DTU1631", 34623, 19553, 511, 0,
DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xFB =
- { "Wacom DTU1031", 21896, 13760, 511, 0,
+ { "Wacom DTU1031", 22096, 13960, 511, 0,
DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x32F =
- { "Wacom DTU1031X", 22472, 12728, 511, 0,
+ { "Wacom DTU1031X", 22672, 12928, 511, 0,
DTUSX, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 0,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x336 =
- { "Wacom DTU1141", 23472, 13203, 1023, 0,
+ { "Wacom DTU1141", 23672, 13403, 1023, 0,
DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x57 =
- { "Wacom DTK2241", 95640, 54060, 2047, 63,
+ { "Wacom DTK2241", 95840, 54260, 2047, 63,
DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x59 = /* Pen */
- { "Wacom DTH2242", 95640, 54060, 2047, 63,
+ { "Wacom DTH2242", 95840, 54260, 2047, 63,
DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D };
static const struct wacom_features wacom_features_0x5D = /* Touch */
{ "Wacom DTH2242", .type = WACOM_24HDT,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0xCC =
- { "Wacom Cintiq 21UX2", 86800, 65200, 2047, 63,
+ { "Wacom Cintiq 21UX2", 87200, 65600, 2047, 63,
WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0xFA =
- { "Wacom Cintiq 22HD", 95440, 53860, 2047, 63,
+ { "Wacom Cintiq 22HD", 95840, 54260, 2047, 63,
WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x5B =
- { "Wacom Cintiq 22HDT", 95440, 53860, 2047, 63,
+ { "Wacom Cintiq 22HDT", 95840, 54260, 2047, 63,
WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e };
static const struct wacom_features wacom_features_0x5E =
{ "Wacom Cintiq 22HDT", .type = WACOM_24HDT,
@@ -3469,18 +3874,20 @@ static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", 12800, 8000, 255, 0,
TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x307 =
- { "Wacom ISDv5 307", 59152, 33448, 2047, 63,
+ { "Wacom ISDv5 307", 59552, 33848, 2047, 63,
CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 };
static const struct wacom_features wacom_features_0x309 =
{ "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x30A =
- { "Wacom ISDv5 30A", 59152, 33448, 2047, 63,
+ { "Wacom ISDv5 30A", 59552, 33848, 2047, 63,
CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30C };
static const struct wacom_features wacom_features_0x30C =
{ "Wacom ISDv5 30C", .type = WACOM_24HDT, /* Touch */
@@ -3496,6 +3903,7 @@ static const struct wacom_features wacom_features_0x325 =
{ "Wacom ISDv5 325", 59552, 33848, 2047, 63,
CINTIQ_COMPANION_2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 11,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x326 };
static const struct wacom_features wacom_features_0x326 = /* Touch */
{ "Wacom ISDv5 326", .type = HID_GENERIC, .oVid = USB_VENDOR_ID_WACOM,
@@ -3525,8 +3933,9 @@ static const struct wacom_features wacom_features_0x33E =
INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x343 =
- { "Wacom DTK1651", 34616, 19559, 1023, 0,
+ { "Wacom DTK1651", 34816, 19759, 1023, 0,
DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_HID_ANY_ID =
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 324c40b0c119..fb0e50acb10d 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -74,6 +74,7 @@
/* device quirks */
#define WACOM_QUIRK_BBTOUCH_LOWRES 0x0001
+#define WACOM_QUIRK_SENSE 0x0002
#define WACOM_QUIRK_BATTERY 0x0008
/* device types */
@@ -84,23 +85,66 @@
#define WACOM_DEVICETYPE_WL_MONITOR 0x0008
#define WACOM_DEVICETYPE_DIRECT 0x0010
-#define WACOM_VENDORDEFINED_PEN 0xff0d0001
-#define WACOM_G9_PAGE 0xff090000
-#define WACOM_G9_DIGITIZER (WACOM_G9_PAGE | 0x02)
-#define WACOM_G9_TOUCHSCREEN (WACOM_G9_PAGE | 0x11)
-#define WACOM_G11_PAGE 0xff110000
-#define WACOM_G11_DIGITIZER (WACOM_G11_PAGE | 0x02)
-#define WACOM_G11_TOUCHSCREEN (WACOM_G11_PAGE | 0x11)
+#define WACOM_HID_UP_WACOMDIGITIZER 0xff0d0000
+#define WACOM_HID_SP_PAD 0x00040000
+#define WACOM_HID_SP_BUTTON 0x00090000
+#define WACOM_HID_SP_DIGITIZER 0x000d0000
+#define WACOM_HID_SP_DIGITIZERINFO 0x00100000
+#define WACOM_HID_WD_DIGITIZER (WACOM_HID_UP_WACOMDIGITIZER | 0x01)
+#define WACOM_HID_WD_SENSE (WACOM_HID_UP_WACOMDIGITIZER | 0x36)
+#define WACOM_HID_WD_DIGITIZERFNKEYS (WACOM_HID_UP_WACOMDIGITIZER | 0x39)
+#define WACOM_HID_WD_SERIALHI (WACOM_HID_UP_WACOMDIGITIZER | 0x5c)
+#define WACOM_HID_WD_TOOLTYPE (WACOM_HID_UP_WACOMDIGITIZER | 0x77)
+#define WACOM_HID_WD_DISTANCE (WACOM_HID_UP_WACOMDIGITIZER | 0x0132)
+#define WACOM_HID_WD_TOUCHSTRIP (WACOM_HID_UP_WACOMDIGITIZER | 0x0136)
+#define WACOM_HID_WD_TOUCHSTRIP2 (WACOM_HID_UP_WACOMDIGITIZER | 0x0137)
+#define WACOM_HID_WD_TOUCHRING (WACOM_HID_UP_WACOMDIGITIZER | 0x0138)
+#define WACOM_HID_WD_TOUCHRINGSTATUS (WACOM_HID_UP_WACOMDIGITIZER | 0x0139)
+#define WACOM_HID_WD_ACCELEROMETER_X (WACOM_HID_UP_WACOMDIGITIZER | 0x0401)
+#define WACOM_HID_WD_ACCELEROMETER_Y (WACOM_HID_UP_WACOMDIGITIZER | 0x0402)
+#define WACOM_HID_WD_ACCELEROMETER_Z (WACOM_HID_UP_WACOMDIGITIZER | 0x0403)
+#define WACOM_HID_WD_BATTERY_CHARGING (WACOM_HID_UP_WACOMDIGITIZER | 0x0404)
+#define WACOM_HID_WD_BATTERY_LEVEL (WACOM_HID_UP_WACOMDIGITIZER | 0x043b)
+#define WACOM_HID_WD_EXPRESSKEY00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0910)
+#define WACOM_HID_WD_EXPRESSKEYCAP00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0950)
+#define WACOM_HID_WD_BUTTONHOME (WACOM_HID_UP_WACOMDIGITIZER | 0x0990)
+#define WACOM_HID_WD_BUTTONUP (WACOM_HID_UP_WACOMDIGITIZER | 0x0991)
+#define WACOM_HID_WD_BUTTONDOWN (WACOM_HID_UP_WACOMDIGITIZER | 0x0992)
+#define WACOM_HID_WD_BUTTONLEFT (WACOM_HID_UP_WACOMDIGITIZER | 0x0993)
+#define WACOM_HID_WD_BUTTONRIGHT (WACOM_HID_UP_WACOMDIGITIZER | 0x0994)
+#define WACOM_HID_WD_BUTTONCENTER (WACOM_HID_UP_WACOMDIGITIZER | 0x0995)
+#define WACOM_HID_WD_TOUCHONOFF (WACOM_HID_UP_WACOMDIGITIZER | 0x0996)
+#define WACOM_HID_WD_FINGERWHEEL (WACOM_HID_UP_WACOMDIGITIZER | 0x0d03)
+#define WACOM_HID_WD_OFFSETLEFT (WACOM_HID_UP_WACOMDIGITIZER | 0x0d30)
+#define WACOM_HID_WD_OFFSETTOP (WACOM_HID_UP_WACOMDIGITIZER | 0x0d31)
+#define WACOM_HID_WD_OFFSETRIGHT (WACOM_HID_UP_WACOMDIGITIZER | 0x0d32)
+#define WACOM_HID_WD_OFFSETBOTTOM (WACOM_HID_UP_WACOMDIGITIZER | 0x0d33)
+#define WACOM_HID_WD_DATAMODE (WACOM_HID_UP_WACOMDIGITIZER | 0x1002)
+#define WACOM_HID_WD_DIGITIZERINFO (WACOM_HID_UP_WACOMDIGITIZER | 0x1013)
+#define WACOM_HID_UP_G9 0xff090000
+#define WACOM_HID_G9_PEN (WACOM_HID_UP_G9 | 0x02)
+#define WACOM_HID_G9_TOUCHSCREEN (WACOM_HID_UP_G9 | 0x11)
+#define WACOM_HID_UP_G11 0xff110000
+#define WACOM_HID_G11_PEN (WACOM_HID_UP_G11 | 0x02)
+#define WACOM_HID_G11_TOUCHSCREEN (WACOM_HID_UP_G11 | 0x11)
+
+#define WACOM_PAD_FIELD(f) (((f)->physical == HID_DG_TABLETFUNCTIONKEY) || \
+ ((f)->physical == WACOM_HID_WD_DIGITIZERFNKEYS) || \
+ ((f)->physical == WACOM_HID_WD_DIGITIZERINFO))
#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \
((f)->physical == HID_DG_STYLUS) || \
((f)->physical == HID_DG_PEN) || \
((f)->application == HID_DG_PEN) || \
((f)->application == HID_DG_DIGITIZER) || \
- ((f)->application == WACOM_VENDORDEFINED_PEN))
+ ((f)->application == WACOM_HID_WD_DIGITIZER) || \
+ ((f)->application == WACOM_HID_G9_PEN) || \
+ ((f)->application == WACOM_HID_G11_PEN))
#define WACOM_FINGER_FIELD(f) (((f)->logical == HID_DG_FINGER) || \
((f)->physical == HID_DG_FINGER) || \
- ((f)->application == HID_DG_TOUCHSCREEN))
+ ((f)->application == HID_DG_TOUCHSCREEN) || \
+ ((f)->application == WACOM_HID_G9_TOUCHSCREEN) || \
+ ((f)->application == WACOM_HID_G11_TOUCHSCREEN))
enum {
PENPARTNER = 0,
@@ -167,8 +211,10 @@ struct wacom_features {
int x_resolution;
int y_resolution;
int numbered_buttons;
- int x_min;
- int y_min;
+ int offset_left;
+ int offset_right;
+ int offset_top;
+ int offset_bottom;
int device_type;
int x_phy;
int y_phy;
@@ -186,6 +232,7 @@ struct wacom_features {
int pktlen;
bool check_for_hid_type;
int hid_type;
+ bool input_event_flag;
};
struct wacom_shared {
@@ -202,6 +249,7 @@ struct wacom_shared {
struct hid_data {
__s16 inputmode; /* InputMode HID feature, -1 if non-existent */
__s16 inputmode_index; /* InputMode HID feature index in the report */
+ bool sense_state;
bool inrange_state;
bool invert_state;
bool tipswitch;
@@ -217,6 +265,10 @@ struct hid_data {
int last_slot_field;
int num_expected;
int num_received;
+ int battery_capacity;
+ int bat_charging;
+ int bat_connected;
+ int ps_connected;
};
struct wacom_remote_data {
@@ -234,7 +286,7 @@ struct wacom_wac {
unsigned char data[WACOM_PKGLEN_MAX];
int tool[2];
int id[2];
- __u32 serial[2];
+ __u64 serial[2];
bool reporting_data;
struct wacom_features features;
struct wacom_shared *shared;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 16f91c8490fe..5fb4c6d9209b 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -39,7 +39,7 @@
* vmbus_setevent- Trigger an event notification on the specified
* channel.
*/
-static void vmbus_setevent(struct vmbus_channel *channel)
+void vmbus_setevent(struct vmbus_channel *channel)
{
struct hv_monitor_page *monitorpage;
@@ -65,6 +65,7 @@ static void vmbus_setevent(struct vmbus_channel *channel)
vmbus_set_event(channel);
}
}
+EXPORT_SYMBOL_GPL(vmbus_setevent);
/*
* vmbus_open - Open the specified channel.
@@ -635,8 +636,6 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
struct kvec bufferlist[3];
u64 aligned_data = 0;
- int ret;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
int num_vecs = ((bufferlen != 0) ? 3 : 1);
@@ -656,33 +655,9 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
- &signal, lock, channel->signal_policy);
-
- /*
- * Signalling the host is conditional on many factors:
- * 1. The ring state changed from being empty to non-empty.
- * This is tracked by the variable "signal".
- * 2. The variable kick_q tracks if more data will be placed
- * on the ring. We will not signal if more data is
- * to be placed.
- *
- * Based on the channel signal state, we will decide
- * which signaling policy will be applied.
- *
- * If we cannot write to the ring-buffer; signal the host
- * even if we may not have written anything. This is a rare
- * enough condition that it should not matter.
- * NOTE: in this case, the hvsock channel is an exception, because
- * it looks the host side's hvsock implementation has a throttling
- * mechanism which can hurt the performance otherwise.
- */
-
- if (((ret == 0) && kick_q && signal) ||
- (ret && !is_hvsock_channel(channel)))
- vmbus_setevent(channel);
+ return hv_ringbuffer_write(channel, bufferlist, num_vecs,
+ lock, kick_q);
- return ret;
}
EXPORT_SYMBOL(vmbus_sendpacket_ctl);
@@ -723,7 +698,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
u32 flags,
bool kick_q)
{
- int ret;
int i;
struct vmbus_channel_packet_page_buffer desc;
u32 descsize;
@@ -731,7 +705,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
if (pagecount > MAX_PAGE_BUFFER_COUNT)
@@ -769,29 +742,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
- &signal, lock, channel->signal_policy);
-
- /*
- * Signalling the host is conditional on many factors:
- * 1. The ring state changed from being empty to non-empty.
- * This is tracked by the variable "signal".
- * 2. The variable kick_q tracks if more data will be placed
- * on the ring. We will not signal if more data is
- * to be placed.
- *
- * Based on the channel signal state, we will decide
- * which signaling policy will be applied.
- *
- * If we cannot write to the ring-buffer; signal the host
- * even if we may not have written anything. This is a rare
- * enough condition that it should not matter.
- */
-
- if (((ret == 0) && kick_q && signal) || (ret))
- vmbus_setevent(channel);
-
- return ret;
+ return hv_ringbuffer_write(channel, bufferlist, 3,
+ lock, kick_q);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
@@ -822,12 +774,10 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
u32 desc_size,
void *buffer, u32 bufferlen, u64 requestid)
{
- int ret;
u32 packetlen;
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
packetlen = desc_size + bufferlen;
@@ -848,13 +798,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
- &signal, lock, channel->signal_policy);
-
- if (ret == 0 && signal)
- vmbus_setevent(channel);
-
- return ret;
+ return hv_ringbuffer_write(channel, bufferlist, 3,
+ lock, true);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
@@ -866,14 +811,12 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
struct hv_multipage_buffer *multi_pagebuffer,
void *buffer, u32 bufferlen, u64 requestid)
{
- int ret;
struct vmbus_channel_packet_multipage_buffer desc;
u32 descsize;
u32 packetlen;
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len);
@@ -913,13 +856,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
- &signal, lock, channel->signal_policy);
-
- if (ret == 0 && signal)
- vmbus_setevent(channel);
-
- return ret;
+ return hv_ringbuffer_write(channel, bufferlist, 3,
+ lock, true);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
@@ -941,16 +879,9 @@ __vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
bool raw)
{
- int ret;
- bool signal = false;
+ return hv_ringbuffer_read(channel, buffer, bufferlen,
+ buffer_actual_len, requestid, raw);
- ret = hv_ringbuffer_read(&channel->inbound, buffer, bufferlen,
- buffer_actual_len, requestid, &signal, raw);
-
- if (signal)
- vmbus_setevent(channel);
-
- return ret;
}
int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 96a85cd39580..26b419203f16 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -134,7 +134,7 @@ static const struct vmbus_device vmbus_devs[] = {
},
/* Unknown GUID */
- { .dev_type = HV_UNKOWN,
+ { .dev_type = HV_UNKNOWN,
.perf_device = false,
},
};
@@ -163,9 +163,9 @@ static u16 hv_get_dev_type(const struct vmbus_channel *channel)
u16 i;
if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
- return HV_UNKOWN;
+ return HV_UNKNOWN;
- for (i = HV_IDE; i < HV_UNKOWN; i++) {
+ for (i = HV_IDE; i < HV_UNKNOWN; i++) {
if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
return i;
}
@@ -389,6 +389,7 @@ void vmbus_free_channels(void)
{
struct vmbus_channel *channel, *tmp;
+ mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
listentry) {
/* hv_process_channel_removal() needs this */
@@ -396,6 +397,7 @@ void vmbus_free_channels(void)
vmbus_device_unregister(channel->device_obj);
}
+ mutex_unlock(&vmbus_connection.channel_mutex);
}
/*
@@ -447,8 +449,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
}
dev_type = hv_get_dev_type(newchannel);
- if (dev_type == HV_NIC)
- set_channel_signal_state(newchannel, HV_SIGNAL_POLICY_EXPLICIT);
init_vp_index(newchannel, dev_type);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 78e6368a4423..6ce8b874e833 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -39,6 +39,7 @@ struct vmbus_connection vmbus_connection = {
.conn_state = DISCONNECTED,
.next_gpadl_handle = ATOMIC_INIT(0xE1E10),
};
+EXPORT_SYMBOL_GPL(vmbus_connection);
/*
* Negotiated protocol version with the host.
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 60dbd6cb4640..446802ae8f1b 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -575,7 +575,7 @@ void hv_synic_clockevents_cleanup(void)
if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
return;
- for_each_online_cpu(cpu)
+ for_each_present_cpu(cpu)
clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
}
@@ -594,8 +594,10 @@ void hv_synic_cleanup(void *arg)
return;
/* Turn off clockevent device */
- if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
+ if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) {
+ clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
hv_ce_shutdown(hv_context.clk_evt[cpu]);
+ }
rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index fdf8da929cbe..14c3dc4bd23c 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -564,6 +564,11 @@ struct hv_dynmem_device {
* next version to try.
*/
__u32 next_version;
+
+ /*
+ * The negotiated version agreed by host.
+ */
+ __u32 version;
};
static struct hv_dynmem_device dm_device;
@@ -645,6 +650,7 @@ static void hv_bring_pgs_online(struct hv_hotadd_state *has,
{
int i;
+ pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
for (i = 0; i < size; i++)
hv_page_online_one(has, pfn_to_page(start_pfn + i));
}
@@ -685,7 +691,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
(HA_CHUNK << PAGE_SHIFT));
if (ret) {
- pr_info("hot_add memory failed error is %d\n", ret);
+ pr_warn("hot_add memory failed error is %d\n", ret);
if (ret == -EEXIST) {
/*
* This error indicates that the error
@@ -814,6 +820,9 @@ static unsigned long handle_pg_range(unsigned long pg_start,
unsigned long old_covered_state;
unsigned long res = 0, flags;
+ pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
+ pg_start);
+
spin_lock_irqsave(&dm_device.ha_lock, flags);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
/*
@@ -1025,8 +1034,13 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
switch (info_hdr->type) {
case INFO_TYPE_MAX_PAGE_CNT:
- pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
- pr_info("Data Size is %d\n", info_hdr->data_size);
+ if (info_hdr->data_size == sizeof(__u64)) {
+ __u64 *max_page_count = (__u64 *)&info_hdr[1];
+
+ pr_info("INFO_TYPE_MAX_PAGE_CNT = %llu\n",
+ *max_page_count);
+ }
+
break;
default:
pr_info("Received Unknown type: %d\n", info_hdr->type);
@@ -1196,8 +1210,6 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
return num_pages;
}
-
-
static void balloon_up(struct work_struct *dummy)
{
unsigned int num_pages = dm_device.balloon_wrk.num_pages;
@@ -1224,6 +1236,10 @@ static void balloon_up(struct work_struct *dummy)
/* Refuse to balloon below the floor, keep the 2M granularity. */
if (avail_pages < num_pages || avail_pages - num_pages < floor) {
+ pr_warn("Balloon request will be partially fulfilled. %s\n",
+ avail_pages < num_pages ? "Not enough memory." :
+ "Balloon floor reached.");
+
num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
num_pages -= num_pages % PAGES_IN_2M;
}
@@ -1245,6 +1261,9 @@ static void balloon_up(struct work_struct *dummy)
}
if (num_ballooned == 0 || num_ballooned == num_pages) {
+ pr_debug("Ballooned %u out of %u requested pages.\n",
+ num_pages, dm_device.balloon_wrk.num_pages);
+
bl_resp->more_pages = 0;
done = true;
dm_device.state = DM_INITIALIZED;
@@ -1292,12 +1311,16 @@ static void balloon_down(struct hv_dynmem_device *dm,
int range_count = req->range_count;
struct dm_unballoon_response resp;
int i;
+ unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
for (i = 0; i < range_count; i++) {
free_balloon_pages(dm, &range_array[i]);
complete(&dm_device.config_event);
}
+ pr_debug("Freed %u ballooned pages.\n",
+ prev_pages_ballooned - dm->num_pages_ballooned);
+
if (req->more_pages == 1)
return;
@@ -1365,6 +1388,7 @@ static void version_resp(struct hv_dynmem_device *dm,
version_req.hdr.size = sizeof(struct dm_version_request);
version_req.hdr.trans_id = atomic_inc_return(&trans_id);
version_req.version.version = dm->next_version;
+ dm->version = version_req.version.version;
/*
* Set the next version to try in case current version fails.
@@ -1501,7 +1525,11 @@ static int balloon_probe(struct hv_device *dev,
struct dm_version_request version_req;
struct dm_capabilities cap_msg;
+#ifdef CONFIG_MEMORY_HOTPLUG
do_hot_add = hot_add;
+#else
+ do_hot_add = false;
+#endif
/*
* First allocate a send buffer.
@@ -1553,6 +1581,7 @@ static int balloon_probe(struct hv_device *dev,
version_req.hdr.trans_id = atomic_inc_return(&trans_id);
version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
version_req.is_last_attempt = 0;
+ dm_device.version = version_req.version.version;
ret = vmbus_sendpacket(dev->channel, &version_req,
sizeof(struct dm_version_request),
@@ -1575,6 +1604,11 @@ static int balloon_probe(struct hv_device *dev,
ret = -ETIMEDOUT;
goto probe_error2;
}
+
+ pr_info("Using Dynamic Memory protocol version %u.%u\n",
+ DYNMEM_MAJOR_VERSION(dm_device.version),
+ DYNMEM_MINOR_VERSION(dm_device.version));
+
/*
* Now submit our capabilities to the host.
*/
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index a6707133c297..eee238cc60bd 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -31,7 +31,10 @@
#define VSS_MINOR 0
#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
-#define VSS_USERSPACE_TIMEOUT (msecs_to_jiffies(10 * 1000))
+/*
+ * Timeout values are based on expecations from host
+ */
+#define VSS_FREEZE_TIMEOUT (15 * 60)
/*
* Global state maintained for transaction that is being processed. For a class
@@ -120,7 +123,7 @@ static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
default:
return -EINVAL;
}
- pr_debug("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
+ pr_info("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
return 0;
}
@@ -128,8 +131,10 @@ static int vss_on_msg(void *msg, int len)
{
struct hv_vss_msg *vss_msg = (struct hv_vss_msg *)msg;
- if (len != sizeof(*vss_msg))
+ if (len != sizeof(*vss_msg)) {
+ pr_debug("VSS: Message size does not match length\n");
return -EINVAL;
+ }
if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER ||
vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) {
@@ -137,8 +142,11 @@ static int vss_on_msg(void *msg, int len)
* Don't process registration messages if we're in the middle
* of a transaction processing.
*/
- if (vss_transaction.state > HVUTIL_READY)
+ if (vss_transaction.state > HVUTIL_READY) {
+ pr_debug("VSS: Got unexpected registration request\n");
return -EINVAL;
+ }
+
return vss_handle_handshake(vss_msg);
} else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) {
vss_transaction.state = HVUTIL_USERSPACE_RECV;
@@ -155,7 +163,7 @@ static int vss_on_msg(void *msg, int len)
}
} else {
/* This is a spurious call! */
- pr_warn("VSS: Transaction not active\n");
+ pr_debug("VSS: Transaction not active\n");
return -EINVAL;
}
return 0;
@@ -168,8 +176,10 @@ static void vss_send_op(void)
struct hv_vss_msg *vss_msg;
/* The transaction state is wrong. */
- if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED)
+ if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED) {
+ pr_debug("VSS: Unexpected attempt to send to daemon\n");
return;
+ }
vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
if (!vss_msg)
@@ -179,7 +189,8 @@ static void vss_send_op(void)
vss_transaction.state = HVUTIL_USERSPACE_REQ;
- schedule_delayed_work(&vss_timeout_work, VSS_USERSPACE_TIMEOUT);
+ schedule_delayed_work(&vss_timeout_work, op == VSS_OP_FREEZE ?
+ VSS_FREEZE_TIMEOUT * HZ : HV_UTIL_TIMEOUT * HZ);
rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
if (rc) {
@@ -210,9 +221,13 @@ static void vss_handle_request(struct work_struct *dummy)
case VSS_OP_HOT_BACKUP:
if (vss_transaction.state < HVUTIL_READY) {
/* Userspace is not registered yet */
+ pr_debug("VSS: Not ready for request.\n");
vss_respond_to_host(HV_E_FAIL);
return;
}
+
+ pr_debug("VSS: Received request for op code: %d\n",
+ vss_transaction.msg->vss_hdr.operation);
vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
vss_send_op();
return;
@@ -353,8 +368,10 @@ hv_vss_init(struct hv_util_service *srv)
hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL,
vss_on_msg, vss_on_reset);
- if (!hvt)
+ if (!hvt) {
+ pr_warn("VSS: Failed to initialize transport\n");
return -EFAULT;
+ }
return 0;
}
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index bcd06306f3e8..e7707747f56d 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -389,16 +389,19 @@ static int util_probe(struct hv_device *dev,
ts_srv_version = TS_VERSION_1;
hb_srv_version = HB_VERSION_1;
break;
- case(VERSION_WIN10):
+ case VERSION_WIN7:
+ case VERSION_WIN8:
+ case VERSION_WIN8_1:
util_fw_version = UTIL_FW_VERSION;
sd_srv_version = SD_VERSION;
- ts_srv_version = TS_VERSION;
+ ts_srv_version = TS_VERSION_3;
hb_srv_version = HB_VERSION;
break;
+ case VERSION_WIN10:
default:
util_fw_version = UTIL_FW_VERSION;
sd_srv_version = SD_VERSION;
- ts_srv_version = TS_VERSION_3;
+ ts_srv_version = TS_VERSION;
hb_srv_version = HB_VERSION;
}
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index a5b4442433c8..0675b395ce5c 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -38,7 +38,7 @@
/*
* Timeout for guest-host handshake for services.
*/
-#define HV_UTIL_NEGO_TIMEOUT 60
+#define HV_UTIL_NEGO_TIMEOUT 55
/*
* The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
@@ -527,14 +527,14 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
-int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
+int hv_ringbuffer_write(struct vmbus_channel *channel,
struct kvec *kv_list,
- u32 kv_count, bool *signal, bool lock,
- enum hv_signal_policy policy);
+ u32 kv_count, bool lock,
+ bool kick_q);
-int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
+int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
- u64 *requestid, bool *signal, bool raw);
+ u64 *requestid, bool raw);
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 08043da1a61c..cd49cb17eb7f 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -66,21 +66,25 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
* once the ring buffer is empty, it will clear the
* interrupt_mask and re-check to see if new data has
* arrived.
+ *
+ * KYS: Oct. 30, 2016:
+ * It looks like Windows hosts have logic to deal with DOS attacks that
+ * can be triggered if it receives interrupts when it is not expecting
+ * the interrupt. The host expects interrupts only when the ring
+ * transitions from empty to non-empty (or full to non full on the guest
+ * to host ring).
+ * So, base the signaling decision solely on the ring state until the
+ * host logic is fixed.
*/
-static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
- enum hv_signal_policy policy)
+static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel,
+ bool kick_q)
{
+ struct hv_ring_buffer_info *rbi = &channel->outbound;
+
virt_mb();
if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
- return false;
-
- /*
- * When the client wants to control signaling,
- * we only honour the host interrupt mask.
- */
- if (policy == HV_SIGNAL_POLICY_EXPLICIT)
- return true;
+ return;
/* check interrupt_mask before read_index */
virt_rmb();
@@ -89,9 +93,9 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
* ring transitions from being empty to non-empty.
*/
if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
- return true;
+ vmbus_setevent(channel);
- return false;
+ return;
}
/* Get the next write location for the specified ring buffer. */
@@ -280,9 +284,9 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
}
/* Write to the ring buffer. */
-int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
- struct kvec *kv_list, u32 kv_count, bool *signal, bool lock,
- enum hv_signal_policy policy)
+int hv_ringbuffer_write(struct vmbus_channel *channel,
+ struct kvec *kv_list, u32 kv_count, bool lock,
+ bool kick_q)
{
int i = 0;
u32 bytes_avail_towrite;
@@ -292,6 +296,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
u32 old_write;
u64 prev_indices = 0;
unsigned long flags = 0;
+ struct hv_ring_buffer_info *outring_info = &channel->outbound;
for (i = 0; i < kv_count; i++)
totalbytes_towrite += kv_list[i].iov_len;
@@ -344,13 +349,13 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
if (lock)
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
- *signal = hv_need_to_signal(old_write, outring_info, policy);
+ hv_signal_on_write(old_write, channel, kick_q);
return 0;
}
-int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
+int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
- u64 *requestid, bool *signal, bool raw)
+ u64 *requestid, bool raw)
{
u32 bytes_avail_toread;
u32 next_read_location = 0;
@@ -359,6 +364,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
u32 offset;
u32 packetlen;
int ret = 0;
+ struct hv_ring_buffer_info *inring_info = &channel->inbound;
if (buflen <= 0)
return -EINVAL;
@@ -416,7 +422,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
/* Update the read index */
hv_set_next_read_location(inring_info, next_read_location);
- *signal = hv_need_to_signal_on_read(inring_info);
+ hv_signal_on_read(channel);
return ret;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 0276d2ef06ee..230c62e7f567 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -45,6 +45,11 @@
#include <linux/random.h>
#include "hyperv_vmbus.h"
+struct vmbus_dynid {
+ struct list_head node;
+ struct hv_vmbus_device_id id;
+};
+
static struct acpi_device *hv_acpi_dev;
static struct completion probe_event;
@@ -500,7 +505,7 @@ static ssize_t device_show(struct device *dev,
static DEVICE_ATTR_RO(device);
/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
-static struct attribute *vmbus_attrs[] = {
+static struct attribute *vmbus_dev_attrs[] = {
&dev_attr_id.attr,
&dev_attr_state.attr,
&dev_attr_monitor_id.attr,
@@ -528,7 +533,7 @@ static struct attribute *vmbus_attrs[] = {
&dev_attr_device.attr,
NULL,
};
-ATTRIBUTE_GROUPS(vmbus);
+ATTRIBUTE_GROUPS(vmbus_dev);
/*
* vmbus_uevent - add uevent for our device
@@ -565,10 +570,29 @@ static inline bool is_null_guid(const uuid_le *guid)
* Return a matching hv_vmbus_device_id pointer.
* If there is no match, return NULL.
*/
-static const struct hv_vmbus_device_id *hv_vmbus_get_id(
- const struct hv_vmbus_device_id *id,
+static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
const uuid_le *guid)
{
+ const struct hv_vmbus_device_id *id = NULL;
+ struct vmbus_dynid *dynid;
+
+ /* Look at the dynamic ids first, before the static ones */
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry(dynid, &drv->dynids.list, node) {
+ if (!uuid_le_cmp(dynid->id.guid, *guid)) {
+ id = &dynid->id;
+ break;
+ }
+ }
+ spin_unlock(&drv->dynids.lock);
+
+ if (id)
+ return id;
+
+ id = drv->id_table;
+ if (id == NULL)
+ return NULL; /* empty device table */
+
for (; !is_null_guid(&id->guid); id++)
if (!uuid_le_cmp(id->guid, *guid))
return id;
@@ -576,6 +600,134 @@ static const struct hv_vmbus_device_id *hv_vmbus_get_id(
return NULL;
}
+/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
+static int vmbus_add_dynid(struct hv_driver *drv, uuid_le *guid)
+{
+ struct vmbus_dynid *dynid;
+
+ dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
+ if (!dynid)
+ return -ENOMEM;
+
+ dynid->id.guid = *guid;
+
+ spin_lock(&drv->dynids.lock);
+ list_add_tail(&dynid->node, &drv->dynids.list);
+ spin_unlock(&drv->dynids.lock);
+
+ return driver_attach(&drv->driver);
+}
+
+static void vmbus_free_dynids(struct hv_driver *drv)
+{
+ struct vmbus_dynid *dynid, *n;
+
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
+ list_del(&dynid->node);
+ kfree(dynid);
+ }
+ spin_unlock(&drv->dynids.lock);
+}
+
+/* Parse string of form: 1b4e28ba-2fa1-11d2-883f-b9a761bde3f */
+static int get_uuid_le(const char *str, uuid_le *uu)
+{
+ unsigned int b[16];
+ int i;
+
+ if (strlen(str) < 37)
+ return -1;
+
+ for (i = 0; i < 36; i++) {
+ switch (i) {
+ case 8: case 13: case 18: case 23:
+ if (str[i] != '-')
+ return -1;
+ break;
+ default:
+ if (!isxdigit(str[i]))
+ return -1;
+ }
+ }
+
+ /* unparse little endian output byte order */
+ if (sscanf(str,
+ "%2x%2x%2x%2x-%2x%2x-%2x%2x-%2x%2x-%2x%2x%2x%2x%2x%2x",
+ &b[3], &b[2], &b[1], &b[0],
+ &b[5], &b[4], &b[7], &b[6], &b[8], &b[9],
+ &b[10], &b[11], &b[12], &b[13], &b[14], &b[15]) != 16)
+ return -1;
+
+ for (i = 0; i < 16; i++)
+ uu->b[i] = b[i];
+ return 0;
+}
+
+/*
+ * store_new_id - sysfs frontend to vmbus_add_dynid()
+ *
+ * Allow GUIDs to be added to an existing driver via sysfs.
+ */
+static ssize_t new_id_store(struct device_driver *driver, const char *buf,
+ size_t count)
+{
+ struct hv_driver *drv = drv_to_hv_drv(driver);
+ uuid_le guid = NULL_UUID_LE;
+ ssize_t retval;
+
+ if (get_uuid_le(buf, &guid) != 0)
+ return -EINVAL;
+
+ if (hv_vmbus_get_id(drv, &guid))
+ return -EEXIST;
+
+ retval = vmbus_add_dynid(drv, &guid);
+ if (retval)
+ return retval;
+ return count;
+}
+static DRIVER_ATTR_WO(new_id);
+
+/*
+ * store_remove_id - remove a PCI device ID from this driver
+ *
+ * Removes a dynamic pci device ID to this driver.
+ */
+static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
+ size_t count)
+{
+ struct hv_driver *drv = drv_to_hv_drv(driver);
+ struct vmbus_dynid *dynid, *n;
+ uuid_le guid = NULL_UUID_LE;
+ size_t retval = -ENODEV;
+
+ if (get_uuid_le(buf, &guid))
+ return -EINVAL;
+
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
+ struct hv_vmbus_device_id *id = &dynid->id;
+
+ if (!uuid_le_cmp(id->guid, guid)) {
+ list_del(&dynid->node);
+ kfree(dynid);
+ retval = count;
+ break;
+ }
+ }
+ spin_unlock(&drv->dynids.lock);
+
+ return retval;
+}
+static DRIVER_ATTR_WO(remove_id);
+
+static struct attribute *vmbus_drv_attrs[] = {
+ &driver_attr_new_id.attr,
+ &driver_attr_remove_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vmbus_drv);
/*
@@ -590,7 +742,7 @@ static int vmbus_match(struct device *device, struct device_driver *driver)
if (is_hvsock_channel(hv_dev->channel))
return drv->hvsock;
- if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type))
+ if (hv_vmbus_get_id(drv, &hv_dev->dev_type))
return 1;
return 0;
@@ -607,7 +759,7 @@ static int vmbus_probe(struct device *child_device)
struct hv_device *dev = device_to_hv_device(child_device);
const struct hv_vmbus_device_id *dev_id;
- dev_id = hv_vmbus_get_id(drv->id_table, &dev->dev_type);
+ dev_id = hv_vmbus_get_id(drv, &dev->dev_type);
if (drv->probe) {
ret = drv->probe(dev, dev_id);
if (ret != 0)
@@ -684,7 +836,8 @@ static struct bus_type hv_bus = {
.remove = vmbus_remove,
.probe = vmbus_probe,
.uevent = vmbus_uevent,
- .dev_groups = vmbus_groups,
+ .dev_groups = vmbus_dev_groups,
+ .drv_groups = vmbus_drv_groups,
};
struct onmessage_work_context {
@@ -905,6 +1058,9 @@ int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, c
hv_driver->driver.mod_name = mod_name;
hv_driver->driver.bus = &hv_bus;
+ spin_lock_init(&hv_driver->dynids.lock);
+ INIT_LIST_HEAD(&hv_driver->dynids.list);
+
ret = driver_register(&hv_driver->driver);
return ret;
@@ -923,8 +1079,10 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver)
{
pr_info("unregistering driver %s\n", hv_driver->name);
- if (!vmbus_exists())
+ if (!vmbus_exists()) {
driver_unregister(&hv_driver->driver);
+ vmbus_free_dynids(hv_driver);
+ }
}
EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 45cef3d2c75c..190d270b20a2 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -907,6 +907,17 @@ config SENSORS_MCP3021
This driver can also be built as a module. If so, the module
will be called mcp3021.
+config SENSORS_TC654
+ tristate "Microchip TC654/TC655 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for TC654 and TC655.
+ The TC654 and TC655 are PWM mode fan speed controllers with
+ FanSense technology for use with brushless DC fans.
+
+ This driver can also be built as a module. If so, the module
+ will be called tc654.
+
config SENSORS_MENF21BMC_HWMON
tristate "MEN 14F021P00 BMC Hardware Monitoring"
depends on MFD_MENF21BMC
@@ -1068,8 +1079,8 @@ config SENSORS_LM90
LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A,
Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659,
MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, ON Semiconductor NCT1008,
- Winbond/Nuvoton W83L771W/G/AWG/ASG, Philips SA56004, and GMT G781
- sensor chips.
+ Winbond/Nuvoton W83L771W/G/AWG/ASG, Philips SA56004, GMT G781, and
+ Texas Instruments TMP451 sensor chips.
This driver can also be built as a module. If so, the module
will be called lm90.
@@ -1591,6 +1602,17 @@ config SENSORS_TMP103
This driver can also be built as a module. If so, the module
will be called tmp103.
+config SENSORS_TMP108
+ tristate "Texas Instruments TMP108"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for Texas Instruments TMP108
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called tmp108.
+
config SENSORS_TMP401
tristate "Texas Instruments TMP401 and compatibles"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index aecf4ba17460..d2cb7e804a0f 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -122,6 +122,7 @@ obj-$(CONFIG_SENSORS_MAX6697) += max6697.o
obj-$(CONFIG_SENSORS_MAX31790) += max31790.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
+obj-$(CONFIG_SENSORS_TC654) += tc654.o
obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o
obj-$(CONFIG_SENSORS_NCT6683) += nct6683.o
obj-$(CONFIG_SENSORS_NCT6775) += nct6775.o
@@ -152,6 +153,7 @@ obj-$(CONFIG_SENSORS_TC74) += tc74.o
obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
obj-$(CONFIG_SENSORS_TMP102) += tmp102.o
obj-$(CONFIG_SENSORS_TMP103) += tmp103.o
+obj-$(CONFIG_SENSORS_TMP108) += tmp108.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
obj-$(CONFIG_SENSORS_TWL4030_MADC)+= twl4030-madc-hwmon.o
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index d6c767ace916..1abb4609b412 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -93,7 +93,7 @@ static const int in_scale[6] = { 2500, 2250, 3300, 5000, 12000, 3300 };
#define IN_FROM_REG(reg, scale) (((reg) * (scale) + 96) / 192)
#define IN_TO_REG(val, scale) ((val) <= 0 ? 0 : \
- (val) * 192 >= (scale) * 255 ? 255 : \
+ (val) >= (scale) * 255 / 192 ? 255 : \
((val) * 192 + (scale) / 2) / (scale))
#define TEMP_FROM_REG(reg) ((reg) * 1000)
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index e67b9a50ac7c..b2a5d9e5c590 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -197,8 +197,9 @@ static int adm1026_scaling[] = { /* .001 Volts */
};
#define NEG12_OFFSET 16000
#define SCALE(val, from, to) (((val)*(to) + ((from)/2))/(from))
-#define INS_TO_REG(n, val) (clamp_val(SCALE(val, adm1026_scaling[n], 192),\
- 0, 255))
+#define INS_TO_REG(n, val) \
+ SCALE(clamp_val(val, 0, 255 * adm1026_scaling[n] / 192), \
+ adm1026_scaling[n], 192)
#define INS_FROM_REG(n, val) (SCALE(val, 192, adm1026_scaling[n]))
/*
@@ -215,11 +216,11 @@ static int adm1026_scaling[] = { /* .001 Volts */
#define DIV_TO_REG(val) ((val) >= 8 ? 3 : (val) >= 4 ? 2 : (val) >= 2 ? 1 : 0)
/* Temperature is reported in 1 degC increments */
-#define TEMP_TO_REG(val) (clamp_val(((val) + ((val) < 0 ? -500 : 500)) \
- / 1000, -127, 127))
+#define TEMP_TO_REG(val) DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), \
+ 1000)
#define TEMP_FROM_REG(val) ((val) * 1000)
-#define OFFSET_TO_REG(val) (clamp_val(((val) + ((val) < 0 ? -500 : 500)) \
- / 1000, -127, 127))
+#define OFFSET_TO_REG(val) DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), \
+ 1000)
#define OFFSET_FROM_REG(val) ((val) * 1000)
#define PWM_TO_REG(val) (clamp_val(val, 0, 255))
@@ -233,7 +234,8 @@ static int adm1026_scaling[] = { /* .001 Volts */
* indicates that the DAC could be used to drive the fans, but in our
* example board (Arima HDAMA) it isn't connected to the fans at all.
*/
-#define DAC_TO_REG(val) (clamp_val(((((val) * 255) + 500) / 2500), 0, 255))
+#define DAC_TO_REG(val) DIV_ROUND_CLOSEST(clamp_val(val, 0, 2500) * 255, \
+ 2500)
#define DAC_FROM_REG(val) (((val) * 2500) / 255)
/*
@@ -593,7 +595,10 @@ static ssize_t set_in16_min(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->in_min[16] = INS_TO_REG(16, val + NEG12_OFFSET);
+ data->in_min[16] = INS_TO_REG(16,
+ clamp_val(val, INT_MIN,
+ INT_MAX - NEG12_OFFSET) +
+ NEG12_OFFSET);
adm1026_write_value(client, ADM1026_REG_IN_MIN[16], data->in_min[16]);
mutex_unlock(&data->update_lock);
return count;
@@ -618,7 +623,10 @@ static ssize_t set_in16_max(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->in_max[16] = INS_TO_REG(16, val+NEG12_OFFSET);
+ data->in_max[16] = INS_TO_REG(16,
+ clamp_val(val, INT_MIN,
+ INT_MAX - NEG12_OFFSET) +
+ NEG12_OFFSET);
adm1026_write_value(client, ADM1026_REG_IN_MAX[16], data->in_max[16]);
mutex_unlock(&data->update_lock);
return count;
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 2fe1828bd10b..72bf2489511e 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -98,13 +98,15 @@ static inline unsigned int IN_FROM_REG(u8 reg, int n)
static inline u8 IN_TO_REG(unsigned long val, int n)
{
- return clamp_val(SCALE(val, 192, nom_mv[n]), 0, 255);
+ val = clamp_val(val, 0, nom_mv[n] * 255 / 192);
+ return SCALE(val, 192, nom_mv[n]);
}
/* temperature range: -40..125, 127 disables temperature alarm */
static inline s8 TEMP_TO_REG(long val)
{
- return clamp_val(SCALE(val, 1, 1000), -40, 127);
+ val = clamp_val(val, -40000, 127000);
+ return SCALE(val, 1, 1000);
}
/* two fans, each with low fan speed limit */
@@ -122,7 +124,8 @@ static inline unsigned int FAN_FROM_REG(u8 reg, u8 div)
/* analog out 0..1250mV */
static inline u8 AOUT_TO_REG(unsigned long val)
{
- return clamp_val(SCALE(val, 255, 1250), 0, 255);
+ val = clamp_val(val, 0, 1250);
+ return SCALE(val, 255, 1250);
}
static inline unsigned int AOUT_FROM_REG(u8 reg)
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index 812fbc00f693..bdeaece9641d 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -55,7 +55,7 @@ struct adt7411_data {
struct mutex device_lock; /* for "atomic" device accesses */
struct mutex update_lock;
unsigned long next_update;
- int vref_cached;
+ long vref_cached;
struct i2c_client *client;
bool use_ext_temp;
};
@@ -114,85 +114,6 @@ static int adt7411_modify_bit(struct i2c_client *client, u8 reg, u8 bit,
return ret;
}
-static ssize_t adt7411_show_vdd(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct adt7411_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int ret = adt7411_read_10_bit(client, ADT7411_REG_INT_TEMP_VDD_LSB,
- ADT7411_REG_VDD_MSB, 2);
-
- return ret < 0 ? ret : sprintf(buf, "%u\n", ret * 7000 / 1024);
-}
-
-static ssize_t adt7411_show_temp(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int nr = to_sensor_dev_attr(attr)->index;
- struct adt7411_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int val;
- struct {
- u8 low;
- u8 high;
- } reg[2] = {
- { ADT7411_REG_INT_TEMP_VDD_LSB, ADT7411_REG_INT_TEMP_MSB },
- { ADT7411_REG_EXT_TEMP_AIN14_LSB,
- ADT7411_REG_EXT_TEMP_AIN1_MSB },
- };
-
- val = adt7411_read_10_bit(client, reg[nr].low, reg[nr].high, 0);
- if (val < 0)
- return val;
-
- val = val & 0x200 ? val - 0x400 : val; /* 10 bit signed */
-
- return sprintf(buf, "%d\n", val * 250);
-}
-
-static ssize_t adt7411_show_input(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int nr = to_sensor_dev_attr(attr)->index;
- struct adt7411_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int val;
- u8 lsb_reg, lsb_shift;
-
- mutex_lock(&data->update_lock);
- if (time_after_eq(jiffies, data->next_update)) {
- val = i2c_smbus_read_byte_data(client, ADT7411_REG_CFG3);
- if (val < 0)
- goto exit_unlock;
-
- if (val & ADT7411_CFG3_REF_VDD) {
- val = adt7411_read_10_bit(client,
- ADT7411_REG_INT_TEMP_VDD_LSB,
- ADT7411_REG_VDD_MSB, 2);
- if (val < 0)
- goto exit_unlock;
-
- data->vref_cached = val * 7000 / 1024;
- } else {
- data->vref_cached = 2250;
- }
-
- data->next_update = jiffies + HZ;
- }
-
- lsb_reg = ADT7411_REG_EXT_TEMP_AIN14_LSB + (nr >> 2);
- lsb_shift = 2 * (nr & 0x03);
- val = adt7411_read_10_bit(client, lsb_reg,
- ADT7411_REG_EXT_TEMP_AIN1_MSB + nr, lsb_shift);
- if (val < 0)
- goto exit_unlock;
-
- val = sprintf(buf, "%u\n", val * data->vref_cached / 1024);
- exit_unlock:
- mutex_unlock(&data->update_lock);
- return val;
-}
-
static ssize_t adt7411_show_bit(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -228,65 +149,157 @@ static ssize_t adt7411_set_bit(struct device *dev,
return ret < 0 ? ret : count;
}
-
#define ADT7411_BIT_ATTR(__name, __reg, __bit) \
SENSOR_DEVICE_ATTR_2(__name, S_IRUGO | S_IWUSR, adt7411_show_bit, \
adt7411_set_bit, __bit, __reg)
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, adt7411_show_temp, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, adt7411_show_temp, NULL, 1);
-static DEVICE_ATTR(in0_input, S_IRUGO, adt7411_show_vdd, NULL);
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, adt7411_show_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, adt7411_show_input, NULL, 1);
-static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, adt7411_show_input, NULL, 2);
-static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, adt7411_show_input, NULL, 3);
-static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, adt7411_show_input, NULL, 4);
-static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, adt7411_show_input, NULL, 5);
-static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, adt7411_show_input, NULL, 6);
-static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, adt7411_show_input, NULL, 7);
static ADT7411_BIT_ATTR(no_average, ADT7411_REG_CFG2, ADT7411_CFG2_DISABLE_AVG);
static ADT7411_BIT_ATTR(fast_sampling, ADT7411_REG_CFG3, ADT7411_CFG3_ADC_CLK_225);
static ADT7411_BIT_ATTR(adc_ref_vdd, ADT7411_REG_CFG3, ADT7411_CFG3_REF_VDD);
static struct attribute *adt7411_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &dev_attr_in0_input.attr,
- &sensor_dev_attr_in1_input.dev_attr.attr,
- &sensor_dev_attr_in2_input.dev_attr.attr,
- &sensor_dev_attr_in3_input.dev_attr.attr,
- &sensor_dev_attr_in4_input.dev_attr.attr,
- &sensor_dev_attr_in5_input.dev_attr.attr,
- &sensor_dev_attr_in6_input.dev_attr.attr,
- &sensor_dev_attr_in7_input.dev_attr.attr,
- &sensor_dev_attr_in8_input.dev_attr.attr,
&sensor_dev_attr_no_average.dev_attr.attr,
&sensor_dev_attr_fast_sampling.dev_attr.attr,
&sensor_dev_attr_adc_ref_vdd.dev_attr.attr,
NULL
};
+ATTRIBUTE_GROUPS(adt7411);
-static umode_t adt7411_attrs_visible(struct kobject *kobj,
- struct attribute *attr, int index)
+static int adt7411_read_in_vdd(struct device *dev, u32 attr, long *val)
{
- struct device *dev = container_of(kobj, struct device, kobj);
struct adt7411_data *data = dev_get_drvdata(dev);
- bool visible = true;
+ struct i2c_client *client = data->client;
+ int ret;
- if (attr == &sensor_dev_attr_temp2_input.dev_attr.attr)
- visible = data->use_ext_temp;
- else if (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
- attr == &sensor_dev_attr_in2_input.dev_attr.attr)
- visible = !data->use_ext_temp;
+ switch (attr) {
+ case hwmon_in_input:
+ ret = adt7411_read_10_bit(client, ADT7411_REG_INT_TEMP_VDD_LSB,
+ ADT7411_REG_VDD_MSB, 2);
+ if (ret < 0)
+ return ret;
+ *val = ret * 7000 / 1024;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adt7411_read_in_chan(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct adt7411_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
- return visible ? attr->mode : 0;
+ int ret;
+ int lsb_reg, lsb_shift;
+ int nr = channel - 1;
+
+ mutex_lock(&data->update_lock);
+ if (time_after_eq(jiffies, data->next_update)) {
+ ret = i2c_smbus_read_byte_data(client, ADT7411_REG_CFG3);
+ if (ret < 0)
+ goto exit_unlock;
+
+ if (ret & ADT7411_CFG3_REF_VDD) {
+ ret = adt7411_read_in_vdd(dev, hwmon_in_input,
+ &data->vref_cached);
+ if (ret < 0)
+ goto exit_unlock;
+ } else {
+ data->vref_cached = 2250;
+ }
+
+ data->next_update = jiffies + HZ;
+ }
+
+ switch (attr) {
+ case hwmon_in_input:
+ lsb_reg = ADT7411_REG_EXT_TEMP_AIN14_LSB + (nr >> 2);
+ lsb_shift = 2 * (nr & 0x03);
+ ret = adt7411_read_10_bit(client, lsb_reg,
+ ADT7411_REG_EXT_TEMP_AIN1_MSB + nr,
+ lsb_shift);
+ if (ret < 0)
+ goto exit_unlock;
+ *val = ret * data->vref_cached / 1024;
+ ret = 0;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ exit_unlock:
+ mutex_unlock(&data->update_lock);
+ return ret;
}
-static const struct attribute_group adt7411_group = {
- .attrs = adt7411_attrs,
- .is_visible = adt7411_attrs_visible,
-};
-__ATTRIBUTE_GROUPS(adt7411);
+static int adt7411_read_in(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ if (channel == 0)
+ return adt7411_read_in_vdd(dev, attr, val);
+ else
+ return adt7411_read_in_chan(dev, attr, channel, val);
+}
+
+static int adt7411_read_temp(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct adt7411_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int ret, regl, regh;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ regl = channel ? ADT7411_REG_EXT_TEMP_AIN14_LSB :
+ ADT7411_REG_INT_TEMP_VDD_LSB;
+ regh = channel ? ADT7411_REG_EXT_TEMP_AIN1_MSB :
+ ADT7411_REG_INT_TEMP_MSB;
+ ret = adt7411_read_10_bit(client, regl, regh, 0);
+ if (ret < 0)
+ return ret;
+ ret = ret & 0x200 ? ret - 0x400 : ret; /* 10 bit signed */
+ *val = ret * 250;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adt7411_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_in:
+ return adt7411_read_in(dev, attr, channel, val);
+ case hwmon_temp:
+ return adt7411_read_temp(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t adt7411_is_visible(const void *_data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct adt7411_data *data = _data;
+
+ switch (type) {
+ case hwmon_in:
+ if (channel > 0 && channel < 3)
+ return data->use_ext_temp ? 0 : S_IRUGO;
+ else
+ return S_IRUGO;
+ case hwmon_temp:
+ if (channel == 1)
+ return data->use_ext_temp ? S_IRUGO : 0;
+ else
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+}
static int adt7411_detect(struct i2c_client *client,
struct i2c_board_info *info)
@@ -358,6 +371,51 @@ static int adt7411_init_device(struct adt7411_data *data)
return i2c_smbus_write_byte_data(data->client, ADT7411_REG_CFG1, val);
}
+static const u32 adt7411_in_config[] = {
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info adt7411_in = {
+ .type = hwmon_in,
+ .config = adt7411_in_config,
+};
+
+static const u32 adt7411_temp_config[] = {
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info adt7411_temp = {
+ .type = hwmon_temp,
+ .config = adt7411_temp_config,
+};
+
+static const struct hwmon_channel_info *adt7411_info[] = {
+ &adt7411_in,
+ &adt7411_temp,
+ NULL
+};
+
+static const struct hwmon_ops adt7411_hwmon_ops = {
+ .is_visible = adt7411_is_visible,
+ .read = adt7411_read,
+};
+
+static const struct hwmon_chip_info adt7411_chip_info = {
+ .ops = &adt7411_hwmon_ops,
+ .info = adt7411_info,
+};
+
static int adt7411_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -382,9 +440,10 @@ static int adt7411_probe(struct i2c_client *client,
/* force update on first occasion */
data->next_update = jiffies;
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- data,
- adt7411_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &adt7411_chip_info,
+ adt7411_groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 5929e126da63..19f2a6d48bac 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -810,8 +810,8 @@ static ssize_t set_temp_min(struct device *dev,
if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
return -EINVAL;
+ temp = clamp_val(temp, -64000, 191000);
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
- temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->temp_min[attr->index] = temp;
@@ -848,8 +848,8 @@ static ssize_t set_temp_max(struct device *dev,
if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
return -EINVAL;
+ temp = clamp_val(temp, -64000, 191000);
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
- temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->temp_max[attr->index] = temp;
@@ -912,9 +912,9 @@ static ssize_t set_volt_max(struct device *dev,
if (kstrtol(buf, 10, &temp) || !x)
return -EINVAL;
+ temp = clamp_val(temp, 0, 255 * x / 1000);
temp *= 1000; /* convert mV to uV */
temp = DIV_ROUND_CLOSEST(temp, x);
- temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->volt_max[attr->index] = temp;
@@ -954,9 +954,9 @@ static ssize_t set_volt_min(struct device *dev,
if (kstrtol(buf, 10, &temp) || !x)
return -EINVAL;
+ temp = clamp_val(temp, 0, 255 * x / 1000);
temp *= 1000; /* convert mV to uV */
temp = DIV_ROUND_CLOSEST(temp, x);
- temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->volt_min[attr->index] = temp;
@@ -1220,8 +1220,8 @@ static ssize_t set_pwm_hyst(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
+ temp = clamp_val(temp, 0, 15000);
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = clamp_val(temp, 0, 15);
/* package things up */
temp &= ADT7462_PWM_HYST_MASK;
@@ -1306,8 +1306,8 @@ static ssize_t set_pwm_tmin(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
+ temp = clamp_val(temp, -64000, 191000);
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
- temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 6e60ca53406e..c9a1d9c25572 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -483,8 +483,8 @@ static ssize_t set_temp_min(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
+ temp = clamp_val(temp, -128000, 127000);
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = clamp_val(temp, -128, 127);
mutex_lock(&data->lock);
data->temp_min[attr->index] = temp;
@@ -517,8 +517,8 @@ static ssize_t set_temp_max(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
+ temp = clamp_val(temp, -128000, 127000);
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = clamp_val(temp, -128, 127);
mutex_lock(&data->lock);
data->temp_max[attr->index] = temp;
@@ -880,8 +880,8 @@ static ssize_t set_pwm_tmin(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
+ temp = clamp_val(temp, -128000, 127000);
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = clamp_val(temp, -128, 127);
mutex_lock(&data->lock);
data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 12e851a5af48..46b4e35fd555 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -188,8 +188,8 @@ static struct amc6821_data *amc6821_update_device(struct device *dev)
!data->valid) {
for (i = 0; i < TEMP_IDX_LEN; i++)
- data->temp[i] = i2c_smbus_read_byte_data(client,
- temp_reg[i]);
+ data->temp[i] = (int8_t)i2c_smbus_read_byte_data(
+ client, temp_reg[i]);
data->stat1 = i2c_smbus_read_byte_data(client,
AMC6821_REG_STAT1);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 6a27eb2fed17..3ac4c03ba77b 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -51,6 +51,7 @@ static int force_tjmax;
module_param_named(tjmax, force_tjmax, int, 0444);
MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+#define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
#define NUM_REAL_CORES 128 /* Number of Real cores per cpu */
#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
@@ -58,7 +59,6 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
-#define TO_PHYS_ID(cpu) (cpu_data(cpu).phys_proc_id)
#define TO_CORE_ID(cpu) (cpu_data(cpu).cpu_core_id)
#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
@@ -102,20 +102,17 @@ struct temp_data {
/* Platform Data per Physical CPU */
struct platform_data {
- struct device *hwmon_dev;
- u16 phys_proc_id;
- struct temp_data *core_data[MAX_CORE_DATA];
+ struct device *hwmon_dev;
+ u16 pkg_id;
+ struct cpumask cpumask;
+ struct temp_data *core_data[MAX_CORE_DATA];
struct device_attribute name_attr;
};
-struct pdev_entry {
- struct list_head list;
- struct platform_device *pdev;
- u16 phys_proc_id;
-};
-
-static LIST_HEAD(pdev_list);
-static DEFINE_MUTEX(pdev_list_mutex);
+/* Keep track of how many package pointers we allocated in init() */
+static int max_packages __read_mostly;
+/* Array of package pointers. Serialized by cpu hotplug lock */
+static struct platform_device **pkg_devices;
static ssize_t show_label(struct device *dev,
struct device_attribute *devattr, char *buf)
@@ -125,7 +122,7 @@ static ssize_t show_label(struct device *dev,
struct temp_data *tdata = pdata->core_data[attr->index];
if (tdata->is_pkg_data)
- return sprintf(buf, "Physical id %u\n", pdata->phys_proc_id);
+ return sprintf(buf, "Package id %u\n", pdata->pkg_id);
return sprintf(buf, "Core %u\n", tdata->cpu_core_id);
}
@@ -138,7 +135,9 @@ static ssize_t show_crit_alarm(struct device *dev,
struct platform_data *pdata = dev_get_drvdata(dev);
struct temp_data *tdata = pdata->core_data[attr->index];
+ mutex_lock(&tdata->update_lock);
rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
+ mutex_unlock(&tdata->update_lock);
return sprintf(buf, "%d\n", (eax >> 5) & 1);
}
@@ -435,18 +434,10 @@ static int chk_ucode_version(unsigned int cpu)
static struct platform_device *coretemp_get_pdev(unsigned int cpu)
{
- u16 phys_proc_id = TO_PHYS_ID(cpu);
- struct pdev_entry *p;
-
- mutex_lock(&pdev_list_mutex);
+ int pkgid = topology_logical_package_id(cpu);
- list_for_each_entry(p, &pdev_list, list)
- if (p->phys_proc_id == phys_proc_id) {
- mutex_unlock(&pdev_list_mutex);
- return p->pdev;
- }
-
- mutex_unlock(&pdev_list_mutex);
+ if (pkgid >= 0 && pkgid < max_packages)
+ return pkg_devices[pkgid];
return NULL;
}
@@ -483,21 +474,11 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
* The attr number is always core id + 2
* The Pkgtemp will always show up as temp1_*, if available
*/
- attr_no = pkg_flag ? 1 : TO_ATTR_NO(cpu);
+ attr_no = pkg_flag ? PKG_SYSFS_ATTR_NO : TO_ATTR_NO(cpu);
if (attr_no > MAX_CORE_DATA - 1)
return -ERANGE;
- /*
- * Provide a single set of attributes for all HT siblings of a core
- * to avoid duplicate sensors (the processor ID and core ID of all
- * HT siblings of a core are the same).
- * Skip if a HT sibling of this core is already registered.
- * This is not an error.
- */
- if (pdata->core_data[attr_no] != NULL)
- return 0;
-
tdata = init_temp_data(cpu, pkg_flag);
if (!tdata)
return -ENOMEM;
@@ -539,21 +520,14 @@ exit_free:
return err;
}
-static void coretemp_add_core(unsigned int cpu, int pkg_flag)
+static void
+coretemp_add_core(struct platform_device *pdev, unsigned int cpu, int pkg_flag)
{
- struct platform_device *pdev = coretemp_get_pdev(cpu);
- int err;
-
- if (!pdev)
- return;
-
- err = create_core_data(pdev, cpu, pkg_flag);
- if (err)
+ if (create_core_data(pdev, cpu, pkg_flag))
dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
}
-static void coretemp_remove_core(struct platform_data *pdata,
- int indx)
+static void coretemp_remove_core(struct platform_data *pdata, int indx)
{
struct temp_data *tdata = pdata->core_data[indx];
@@ -574,7 +548,7 @@ static int coretemp_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
- pdata->phys_proc_id = pdev->id;
+ pdata->pkg_id = pdev->id;
platform_set_drvdata(pdev, pdata);
pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
@@ -602,85 +576,33 @@ static struct platform_driver coretemp_driver = {
.remove = coretemp_remove,
};
-static int coretemp_device_add(unsigned int cpu)
+static struct platform_device *coretemp_device_add(unsigned int cpu)
{
- int err;
+ int err, pkgid = topology_logical_package_id(cpu);
struct platform_device *pdev;
- struct pdev_entry *pdev_entry;
- mutex_lock(&pdev_list_mutex);
+ if (pkgid < 0)
+ return ERR_PTR(-ENOMEM);
- pdev = platform_device_alloc(DRVNAME, TO_PHYS_ID(cpu));
- if (!pdev) {
- err = -ENOMEM;
- pr_err("Device allocation failed\n");
- goto exit;
- }
-
- pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL);
- if (!pdev_entry) {
- err = -ENOMEM;
- goto exit_device_put;
- }
+ pdev = platform_device_alloc(DRVNAME, pkgid);
+ if (!pdev)
+ return ERR_PTR(-ENOMEM);
err = platform_device_add(pdev);
if (err) {
- pr_err("Device addition failed (%d)\n", err);
- goto exit_device_free;
- }
-
- pdev_entry->pdev = pdev;
- pdev_entry->phys_proc_id = pdev->id;
-
- list_add_tail(&pdev_entry->list, &pdev_list);
- mutex_unlock(&pdev_list_mutex);
-
- return 0;
-
-exit_device_free:
- kfree(pdev_entry);
-exit_device_put:
- platform_device_put(pdev);
-exit:
- mutex_unlock(&pdev_list_mutex);
- return err;
-}
-
-static void coretemp_device_remove(unsigned int cpu)
-{
- struct pdev_entry *p, *n;
- u16 phys_proc_id = TO_PHYS_ID(cpu);
-
- mutex_lock(&pdev_list_mutex);
- list_for_each_entry_safe(p, n, &pdev_list, list) {
- if (p->phys_proc_id != phys_proc_id)
- continue;
- platform_device_unregister(p->pdev);
- list_del(&p->list);
- kfree(p);
+ platform_device_put(pdev);
+ return ERR_PTR(err);
}
- mutex_unlock(&pdev_list_mutex);
-}
-
-static bool is_any_core_online(struct platform_data *pdata)
-{
- int i;
- /* Find online cores, except pkgtemp data */
- for (i = MAX_CORE_DATA - 1; i >= 0; --i) {
- if (pdata->core_data[i] &&
- !pdata->core_data[i]->is_pkg_data) {
- return true;
- }
- }
- return false;
+ pkg_devices[pkgid] = pdev;
+ return pdev;
}
-static void get_core_online(unsigned int cpu)
+static int coretemp_cpu_online(unsigned int cpu)
{
- struct cpuinfo_x86 *c = &cpu_data(cpu);
struct platform_device *pdev = coretemp_get_pdev(cpu);
- int err;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ struct platform_data *pdata;
/*
* CPUID.06H.EAX[0] indicates whether the CPU has thermal
@@ -688,12 +610,12 @@ static void get_core_online(unsigned int cpu)
* without thermal sensors will be filtered out.
*/
if (!cpu_has(c, X86_FEATURE_DTHERM))
- return;
+ return -ENODEV;
if (!pdev) {
/* Check the microcode version of the CPU */
if (chk_ucode_version(cpu))
- return;
+ return -EINVAL;
/*
* Alright, we have DTS support.
@@ -701,101 +623,100 @@ static void get_core_online(unsigned int cpu)
* online. So, initialize per-pkg data structures and
* then bring this core online.
*/
- err = coretemp_device_add(cpu);
- if (err)
- return;
+ pdev = coretemp_device_add(cpu);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
/*
* Check whether pkgtemp support is available.
* If so, add interfaces for pkgtemp.
*/
if (cpu_has(c, X86_FEATURE_PTS))
- coretemp_add_core(cpu, 1);
+ coretemp_add_core(pdev, cpu, 1);
}
+
+ pdata = platform_get_drvdata(pdev);
/*
- * Physical CPU device already exists.
- * So, just add interfaces for this core.
+ * Check whether a thread sibling is already online. If not add the
+ * interface for this CPU core.
*/
- coretemp_add_core(cpu, 0);
+ if (!cpumask_intersects(&pdata->cpumask, topology_sibling_cpumask(cpu)))
+ coretemp_add_core(pdev, cpu, 0);
+
+ cpumask_set_cpu(cpu, &pdata->cpumask);
+ return 0;
}
-static void put_core_offline(unsigned int cpu)
+static int coretemp_cpu_offline(unsigned int cpu)
{
- int i, indx;
- struct platform_data *pdata;
struct platform_device *pdev = coretemp_get_pdev(cpu);
+ struct platform_data *pd;
+ struct temp_data *tdata;
+ int indx, target;
/* If the physical CPU device does not exist, just return */
if (!pdev)
- return;
-
- pdata = platform_get_drvdata(pdev);
-
- indx = TO_ATTR_NO(cpu);
+ return 0;
/* The core id is too big, just return */
+ indx = TO_ATTR_NO(cpu);
if (indx > MAX_CORE_DATA - 1)
- return;
+ return 0;
- if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
- coretemp_remove_core(pdata, indx);
+ pd = platform_get_drvdata(pdev);
+ tdata = pd->core_data[indx];
+
+ cpumask_clear_cpu(cpu, &pd->cpumask);
/*
- * If a HT sibling of a core is taken offline, but another HT sibling
- * of the same core is still online, register the alternate sibling.
- * This ensures that exactly one set of attributes is provided as long
- * as at least one HT sibling of a core is online.
+ * If this is the last thread sibling, remove the CPU core
+ * interface, If there is still a sibling online, transfer the
+ * target cpu of that core interface to it.
*/
- for_each_sibling(i, cpu) {
- if (i != cpu) {
- get_core_online(i);
- /*
- * Display temperature sensor data for one HT sibling
- * per core only, so abort the loop after one such
- * sibling has been found.
- */
- break;
- }
+ target = cpumask_any_and(&pd->cpumask, topology_sibling_cpumask(cpu));
+ if (target >= nr_cpu_ids) {
+ coretemp_remove_core(pd, indx);
+ } else if (tdata && tdata->cpu == cpu) {
+ mutex_lock(&tdata->update_lock);
+ tdata->cpu = target;
+ mutex_unlock(&tdata->update_lock);
}
+
/*
- * If all cores in this pkg are offline, remove the device.
- * coretemp_device_remove calls unregister_platform_device,
- * which in turn calls coretemp_remove. This removes the
- * pkgtemp entry and does other clean ups.
+ * If all cores in this pkg are offline, remove the device. This
+ * will invoke the platform driver remove function, which cleans up
+ * the rest.
*/
- if (!is_any_core_online(pdata))
- coretemp_device_remove(cpu);
-}
+ if (cpumask_empty(&pd->cpumask)) {
+ pkg_devices[topology_logical_package_id(cpu)] = NULL;
+ platform_device_unregister(pdev);
+ return 0;
+ }
-static int coretemp_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long) hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- get_core_online(cpu);
- break;
- case CPU_DOWN_PREPARE:
- put_core_offline(cpu);
- break;
+ /*
+ * Check whether this core is the target for the package
+ * interface. We need to assign it to some other cpu.
+ */
+ tdata = pd->core_data[PKG_SYSFS_ATTR_NO];
+ if (tdata && tdata->cpu == cpu) {
+ target = cpumask_first(&pd->cpumask);
+ mutex_lock(&tdata->update_lock);
+ tdata->cpu = target;
+ mutex_unlock(&tdata->update_lock);
}
- return NOTIFY_OK;
+ return 0;
}
-
-static struct notifier_block coretemp_cpu_notifier __refdata = {
- .notifier_call = coretemp_cpu_callback,
-};
-
static const struct x86_cpu_id __initconst coretemp_ids[] = {
{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
{}
};
MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
+static enum cpuhp_state coretemp_hp_online;
+
static int __init coretemp_init(void)
{
- int i, err;
+ int err;
/*
* CPUID.06H.EAX[0] indicates whether the CPU has thermal
@@ -805,54 +726,38 @@ static int __init coretemp_init(void)
if (!x86_match_cpu(coretemp_ids))
return -ENODEV;
+ max_packages = topology_max_packages();
+ pkg_devices = kzalloc(max_packages * sizeof(struct platform_device *),
+ GFP_KERNEL);
+ if (!pkg_devices)
+ return -ENOMEM;
+
err = platform_driver_register(&coretemp_driver);
if (err)
- goto exit;
+ return err;
- cpu_notifier_register_begin();
- for_each_online_cpu(i)
- get_core_online(i);
-
-#ifndef CONFIG_HOTPLUG_CPU
- if (list_empty(&pdev_list)) {
- cpu_notifier_register_done();
- err = -ENODEV;
- goto exit_driver_unreg;
- }
-#endif
-
- __register_hotcpu_notifier(&coretemp_cpu_notifier);
- cpu_notifier_register_done();
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/coretemp:online",
+ coretemp_cpu_online, coretemp_cpu_offline);
+ if (err < 0)
+ goto outdrv;
+ coretemp_hp_online = err;
return 0;
-#ifndef CONFIG_HOTPLUG_CPU
-exit_driver_unreg:
+outdrv:
platform_driver_unregister(&coretemp_driver);
-#endif
-exit:
+ kfree(pkg_devices);
return err;
}
+module_init(coretemp_init)
static void __exit coretemp_exit(void)
{
- struct pdev_entry *p, *n;
-
- cpu_notifier_register_begin();
- __unregister_hotcpu_notifier(&coretemp_cpu_notifier);
- mutex_lock(&pdev_list_mutex);
- list_for_each_entry_safe(p, n, &pdev_list, list) {
- platform_device_unregister(p->pdev);
- list_del(&p->list);
- kfree(p);
- }
- mutex_unlock(&pdev_list_mutex);
- cpu_notifier_register_done();
+ cpuhp_remove_state(coretemp_hp_online);
platform_driver_unregister(&coretemp_driver);
+ kfree(pkg_devices);
}
+module_exit(coretemp_exit)
MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>");
MODULE_DESCRIPTION("Intel Core temperature monitor");
MODULE_LICENSE("GPL");
-
-module_init(coretemp_init)
-module_exit(coretemp_exit)
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index edf550fc4eef..0043a4c02b85 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -166,7 +166,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
if (res)
return res;
- val = (val * 10 / 625) * 8;
+ val = (clamp_val(val, -128000, 128000) * 10 / 625) * 8;
mutex_lock(&data->update_lock);
data->temp[attr->index] = val;
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 24e395c5907d..4b870ee9b0d3 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -251,7 +251,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
if (result < 0)
return result;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -63000, 127000), 1000);
mutex_lock(&data->update_lock);
data->temp_min[nr] = val;
@@ -273,7 +273,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
if (result < 0)
return result;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -63000, 127000), 1000);
mutex_lock(&data->update_lock);
data->temp_max[nr] = val;
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index f37fe2011640..4aee5adf9ef2 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -215,12 +215,13 @@ static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
if (err < 0)
return err;
- val = DIV_ROUND_CLOSEST(val * 0xC0, nominal_mv[nr]);
+ val = clamp_val(val, 0, 255 * nominal_mv[nr] / 192);
+ val = DIV_ROUND_CLOSEST(val * 192, nominal_mv[nr]);
reg = (sf == min) ? EMC6W201_REG_IN_LOW(nr)
: EMC6W201_REG_IN_HIGH(nr);
mutex_lock(&data->update_lock);
- data->in[sf][nr] = clamp_val(val, 0, 255);
+ data->in[sf][nr] = val;
err = emc6w201_write8(client, reg, data->in[sf][nr]);
mutex_unlock(&data->update_lock);
@@ -252,12 +253,13 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
if (err < 0)
return err;
+ val = clamp_val(val, -127000, 127000);
val = DIV_ROUND_CLOSEST(val, 1000);
reg = (sf == min) ? EMC6W201_REG_TEMP_LOW(nr)
: EMC6W201_REG_TEMP_HIGH(nr);
mutex_lock(&data->update_lock);
- data->temp[sf][nr] = clamp_val(val, -127, 127);
+ data->temp[sf][nr] = val;
err = emc6w201_write8(client, reg, data->temp[sf][nr]);
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index b96a2a9e4df7..628be9c95ff9 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -193,14 +193,17 @@ static inline unsigned int rpm_from_cnt(u8 cnt, u32 clk_freq, u16 p,
* Convert fan RPM value from sysfs into count value for fan controller
* register (FAN_SET_CNT).
*/
-static inline unsigned char cnt_from_rpm(u32 rpm, u32 clk_freq, u16 p,
+static inline unsigned char cnt_from_rpm(unsigned long rpm, u32 clk_freq, u16 p,
u8 clk_div, u8 gear_mult)
{
- if (!rpm) /* to stop the fan, set cnt to 255 */
+ unsigned long f1 = clk_freq * 30 * gear_mult;
+ unsigned long f2 = p * clk_div;
+
+ if (!rpm) /* to stop the fan, set cnt to 255 */
return 0xff;
- return clamp_val(((clk_freq * 30 * gear_mult) / (rpm * p * clk_div)),
- 0, 255);
+ rpm = clamp_val(rpm, f1 / (255 * f2), ULONG_MAX / f2);
+ return DIV_ROUND_CLOSEST(f1, rpm * f2);
}
/* helper to grab and cache data, at most one time per second */
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index a74c075a30ec..3932f9276c07 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -38,12 +38,15 @@ struct hwmon_device {
#define to_hwmon_device(d) container_of(d, struct hwmon_device, dev)
+#define MAX_SYSFS_ATTR_NAME_LENGTH 32
+
struct hwmon_device_attribute {
struct device_attribute dev_attr;
const struct hwmon_ops *ops;
enum hwmon_sensor_types type;
u32 attr;
int index;
+ char name[MAX_SYSFS_ATTR_NAME_LENGTH];
};
#define to_hwmon_attr(d) \
@@ -178,6 +181,22 @@ static ssize_t hwmon_attr_show(struct device *dev,
return sprintf(buf, "%ld\n", val);
}
+static ssize_t hwmon_attr_show_string(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct hwmon_device_attribute *hattr = to_hwmon_attr(devattr);
+ char *s;
+ int ret;
+
+ ret = hattr->ops->read_string(dev, hattr->type, hattr->attr,
+ hattr->index, &s);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%s\n", s);
+}
+
static ssize_t hwmon_attr_store(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
@@ -205,6 +224,17 @@ static int hwmon_attr_base(enum hwmon_sensor_types type)
return 1;
}
+static bool is_string_attr(enum hwmon_sensor_types type, u32 attr)
+{
+ return (type == hwmon_temp && attr == hwmon_temp_label) ||
+ (type == hwmon_in && attr == hwmon_in_label) ||
+ (type == hwmon_curr && attr == hwmon_curr_label) ||
+ (type == hwmon_power && attr == hwmon_power_label) ||
+ (type == hwmon_energy && attr == hwmon_energy_label) ||
+ (type == hwmon_humidity && attr == hwmon_humidity_label) ||
+ (type == hwmon_fan && attr == hwmon_fan_label);
+}
+
static struct attribute *hwmon_genattr(struct device *dev,
const void *drvdata,
enum hwmon_sensor_types type,
@@ -218,6 +248,7 @@ static struct attribute *hwmon_genattr(struct device *dev,
struct attribute *a;
umode_t mode;
char *name;
+ bool is_string = is_string_attr(type, attr);
/* The attribute is invisible if there is no template string */
if (!template)
@@ -227,32 +258,31 @@ static struct attribute *hwmon_genattr(struct device *dev,
if (!mode)
return ERR_PTR(-ENOENT);
- if ((mode & S_IRUGO) && !ops->read)
+ if ((mode & S_IRUGO) && ((is_string && !ops->read_string) ||
+ (!is_string && !ops->read)))
return ERR_PTR(-EINVAL);
if ((mode & S_IWUGO) && !ops->write)
return ERR_PTR(-EINVAL);
+ hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
+ if (!hattr)
+ return ERR_PTR(-ENOMEM);
+
if (type == hwmon_chip) {
name = (char *)template;
} else {
- name = devm_kzalloc(dev, strlen(template) + 16, GFP_KERNEL);
- if (!name)
- return ERR_PTR(-ENOMEM);
- scnprintf(name, strlen(template) + 16, template,
+ scnprintf(hattr->name, sizeof(hattr->name), template,
index + hwmon_attr_base(type));
+ name = hattr->name;
}
- hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
- if (!hattr)
- return ERR_PTR(-ENOMEM);
-
hattr->type = type;
hattr->attr = attr;
hattr->index = index;
hattr->ops = ops;
dattr = &hattr->dev_attr;
- dattr->show = hwmon_attr_show;
+ dattr->show = is_string ? hwmon_attr_show_string : hwmon_attr_show;
dattr->store = hwmon_attr_store;
a = &dattr->attr;
@@ -263,7 +293,11 @@ static struct attribute *hwmon_genattr(struct device *dev,
return a;
}
-static const char * const hwmon_chip_attr_templates[] = {
+/*
+ * Chip attributes are not attribute templates but actual sysfs attributes.
+ * See hwmon_genattr() for special handling.
+ */
+static const char * const hwmon_chip_attrs[] = {
[hwmon_chip_temp_reset_history] = "temp_reset_history",
[hwmon_chip_in_reset_history] = "in_reset_history",
[hwmon_chip_curr_reset_history] = "curr_reset_history",
@@ -400,7 +434,7 @@ static const char * const hwmon_pwm_attr_templates[] = {
};
static const char * const *__templates[] = {
- [hwmon_chip] = hwmon_chip_attr_templates,
+ [hwmon_chip] = hwmon_chip_attrs,
[hwmon_temp] = hwmon_temp_attr_templates,
[hwmon_in] = hwmon_in_attr_templates,
[hwmon_curr] = hwmon_curr_attr_templates,
@@ -412,7 +446,7 @@ static const char * const *__templates[] = {
};
static const int __templates_size[] = {
- [hwmon_chip] = ARRAY_SIZE(hwmon_chip_attr_templates),
+ [hwmon_chip] = ARRAY_SIZE(hwmon_chip_attrs),
[hwmon_temp] = ARRAY_SIZE(hwmon_temp_attr_templates),
[hwmon_in] = ARRAY_SIZE(hwmon_in_attr_templates),
[hwmon_curr] = ARRAY_SIZE(hwmon_curr_attr_templates),
@@ -526,9 +560,9 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
hdev = &hwdev->dev;
- if (chip && chip->ops->is_visible) {
+ if (chip) {
struct attribute **attrs;
- int ngroups = 2;
+ int ngroups = 2; /* terminating NULL plus &hwdev->groups */
if (groups)
for (i = 0; groups[i]; i++)
@@ -572,7 +606,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
if (err)
goto free_hwmon;
- if (chip && chip->ops->is_visible && chip->ops->read &&
+ if (chip && chip->ops->read &&
chip->info[0]->type == hwmon_chip &&
(chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
const struct hwmon_channel_info **info = chip->info;
@@ -626,8 +660,8 @@ EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
* @dev: the parent device
* @name: hwmon name attribute
* @drvdata: driver data to attach to created device
- * @info: Pointer to hwmon chip information
- * @groups - pointer to list of driver specific attribute groups
+ * @info: pointer to hwmon chip information
+ * @extra_groups: pointer to list of additional non-standard attribute groups
*
* hwmon_device_unregister() must be called when the device is no
* longer needed.
@@ -638,12 +672,12 @@ struct device *
hwmon_device_register_with_info(struct device *dev, const char *name,
void *drvdata,
const struct hwmon_chip_info *chip,
- const struct attribute_group **groups)
+ const struct attribute_group **extra_groups)
{
- if (chip && (!chip->ops || !chip->info))
+ if (chip && (!chip->ops || !chip->ops->is_visible || !chip->info))
return ERR_PTR(-EINVAL);
- return __hwmon_device_register(dev, name, drvdata, chip, groups);
+ return __hwmon_device_register(dev, name, drvdata, chip, extra_groups);
}
EXPORT_SYMBOL_GPL(hwmon_device_register_with_info);
@@ -658,6 +692,9 @@ EXPORT_SYMBOL_GPL(hwmon_device_register_with_info);
*/
struct device *hwmon_device_register(struct device *dev)
{
+ dev_warn(dev,
+ "hwmon_device_register() is deprecated. Please convert the driver to use hwmon_device_register_with_info().\n");
+
return hwmon_device_register_with_groups(dev, NULL, NULL, NULL);
}
EXPORT_SYMBOL_GPL(hwmon_device_register);
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 6ff773fcaefb..29c8136ce9c5 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -136,7 +136,8 @@ static const int lm85_scaling[] = { /* .001 Volts */
#define SCALE(val, from, to) (((val) * (to) + ((from) / 2)) / (from))
#define INS_TO_REG(n, val) \
- clamp_val(SCALE(val, lm85_scaling[n], 192), 0, 255)
+ SCALE(clamp_val(val, 0, 255 * lm85_scaling[n] / 192), \
+ lm85_scaling[n], 192)
#define INSEXT_FROM_REG(n, val, ext) \
SCALE(((val) << 4) + (ext), 192 << 4, lm85_scaling[n])
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index a5e295826aea..13cca3606e06 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -121,7 +121,7 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C };
#define IN_FROM_REG(reg, scale) (((reg) * (scale) + 96) / 192)
#define IN_TO_REG(val, scale) ((val) <= 0 ? 0 : \
- (val) * 192 >= (scale) * 255 ? 255 : \
+ (val) >= (scale) * 255 / 192 ? 255 : \
((val) * 192 + (scale) / 2) / (scale))
#define TEMP_FROM_REG(reg) ((reg) * 1000)
@@ -154,7 +154,6 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C };
*/
struct lm87_data {
- struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* In jiffies */
@@ -181,6 +180,8 @@ struct lm87_data {
u16 alarms; /* register values, combined */
u8 vid; /* register values, combined */
u8 vrm;
+
+ const struct attribute_group *attr_groups[6];
};
static inline int lm87_read_value(struct i2c_client *client, u8 reg)
@@ -195,7 +196,7 @@ static inline int lm87_write_value(struct i2c_client *client, u8 reg, u8 value)
static struct lm87_data *lm87_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
@@ -309,7 +310,7 @@ static ssize_t show_in_max(struct device *dev,
static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
long val;
@@ -330,7 +331,7 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
long val;
@@ -396,7 +397,7 @@ static ssize_t show_temp_high(struct device *dev,
static ssize_t set_temp_low(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
long val;
@@ -416,7 +417,7 @@ static ssize_t set_temp_low(struct device *dev, struct device_attribute *attr,
static ssize_t set_temp_high(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
long val;
@@ -495,7 +496,7 @@ static ssize_t show_fan_div(struct device *dev,
static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
long val;
@@ -522,7 +523,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
long val;
@@ -635,7 +636,7 @@ static ssize_t show_aout(struct device *dev, struct device_attribute *attr,
static ssize_t set_aout(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
long val;
int err;
@@ -841,23 +842,18 @@ static int lm87_detect(struct i2c_client *client, struct i2c_board_info *info)
return 0;
}
-static void lm87_remove_files(struct i2c_client *client)
+static void lm87_restore_config(void *arg)
{
- struct device *dev = &client->dev;
-
- sysfs_remove_group(&dev->kobj, &lm87_group);
- sysfs_remove_group(&dev->kobj, &lm87_group_in6);
- sysfs_remove_group(&dev->kobj, &lm87_group_fan1);
- sysfs_remove_group(&dev->kobj, &lm87_group_in7);
- sysfs_remove_group(&dev->kobj, &lm87_group_fan2);
- sysfs_remove_group(&dev->kobj, &lm87_group_temp3);
- sysfs_remove_group(&dev->kobj, &lm87_group_in0_5);
- sysfs_remove_group(&dev->kobj, &lm87_group_vid);
+ struct i2c_client *client = arg;
+ struct lm87_data *data = i2c_get_clientdata(client);
+
+ lm87_write_value(client, LM87_REG_CONFIG, data->config);
}
-static void lm87_init_client(struct i2c_client *client)
+static int lm87_init_client(struct i2c_client *client)
{
struct lm87_data *data = i2c_get_clientdata(client);
+ int rc;
if (dev_get_platdata(&client->dev)) {
data->channel = *(u8 *)dev_get_platdata(&client->dev);
@@ -868,6 +864,10 @@ static void lm87_init_client(struct i2c_client *client)
}
data->config = lm87_read_value(client, LM87_REG_CONFIG) & 0x6F;
+ rc = devm_add_action(&client->dev, lm87_restore_config, client);
+ if (rc)
+ return rc;
+
if (!(data->config & 0x01)) {
int i;
@@ -895,12 +895,15 @@ static void lm87_init_client(struct i2c_client *client)
if ((data->config & 0x09) != 0x01)
lm87_write_value(client, LM87_REG_CONFIG,
(data->config & 0x77) | 0x01);
+ return 0;
}
static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct lm87_data *data;
+ struct device *hwmon_dev;
int err;
+ unsigned int group_tail = 0;
data = devm_kzalloc(&client->dev, sizeof(struct lm87_data), GFP_KERNEL);
if (!data)
@@ -910,7 +913,9 @@ static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
mutex_init(&data->update_lock);
/* Initialize the LM87 chip */
- lm87_init_client(client);
+ err = lm87_init_client(client);
+ if (err)
+ return err;
data->in_scale[0] = 2500;
data->in_scale[1] = 2700;
@@ -921,72 +926,34 @@ static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
data->in_scale[6] = 1875;
data->in_scale[7] = 1875;
- /* Register sysfs hooks */
- err = sysfs_create_group(&client->dev.kobj, &lm87_group);
- if (err)
- goto exit_stop;
-
- if (data->channel & CHAN_NO_FAN(0)) {
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_in6);
- if (err)
- goto exit_remove;
- } else {
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_fan1);
- if (err)
- goto exit_remove;
- }
-
- if (data->channel & CHAN_NO_FAN(1)) {
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_in7);
- if (err)
- goto exit_remove;
- } else {
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_fan2);
- if (err)
- goto exit_remove;
- }
-
- if (data->channel & CHAN_TEMP3) {
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_temp3);
- if (err)
- goto exit_remove;
- } else {
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_in0_5);
- if (err)
- goto exit_remove;
- }
+ /*
+ * Construct the list of attributes, the list depends on the
+ * configuration of the chip
+ */
+ data->attr_groups[group_tail++] = &lm87_group;
+ if (data->channel & CHAN_NO_FAN(0))
+ data->attr_groups[group_tail++] = &lm87_group_in6;
+ else
+ data->attr_groups[group_tail++] = &lm87_group_fan1;
+
+ if (data->channel & CHAN_NO_FAN(1))
+ data->attr_groups[group_tail++] = &lm87_group_in7;
+ else
+ data->attr_groups[group_tail++] = &lm87_group_fan2;
+
+ if (data->channel & CHAN_TEMP3)
+ data->attr_groups[group_tail++] = &lm87_group_temp3;
+ else
+ data->attr_groups[group_tail++] = &lm87_group_in0_5;
if (!(data->channel & CHAN_NO_VID)) {
data->vrm = vid_which_vrm();
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_vid);
- if (err)
- goto exit_remove;
- }
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
+ data->attr_groups[group_tail++] = &lm87_group_vid;
}
- return 0;
-
-exit_remove:
- lm87_remove_files(client);
-exit_stop:
- lm87_write_value(client, LM87_REG_CONFIG, data->config);
- return err;
-}
-
-static int lm87_remove(struct i2c_client *client)
-{
- struct lm87_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- lm87_remove_files(client);
-
- lm87_write_value(client, LM87_REG_CONFIG, data->config);
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(
+ &client->dev, client->name, client, data->attr_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
/*
@@ -1006,7 +973,6 @@ static struct i2c_driver lm87_driver = {
.name = "lm87",
},
.probe = lm87_probe,
- .remove = lm87_remove,
.id_table = lm87_id,
.detect = lm87_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c
index 972444a14cca..1929734c3b1d 100644
--- a/drivers/hwmon/mcp3021.c
+++ b/drivers/hwmon/mcp3021.c
@@ -4,6 +4,7 @@
* Copyright (C) 2008-2009, 2012 Freescale Semiconductor, Inc.
* Author: Mingkai Hu <Mingkai.hu@freescale.com>
* Reworked by Sven Schuchmann <schuchmann@schleissheimer.de>
+ * DT support added by Clemens Gruber <clemens.gruber@pqgruber.com>
*
* This driver export the value of analog input voltage to sysfs, the
* voltage unit is mV. Through the sysfs interface, lm-sensors tool
@@ -22,11 +23,13 @@
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
-/* Vdd info */
-#define MCP3021_VDD_MAX 5500
-#define MCP3021_VDD_MIN 2700
-#define MCP3021_VDD_REF 3300
+/* Vdd / reference voltage in millivolt */
+#define MCP3021_VDD_REF_MAX 5500
+#define MCP3021_VDD_REF_MIN 2700
+#define MCP3021_VDD_REF_DEFAULT 3300
/* output format */
#define MCP3021_SAR_SHIFT 2
@@ -47,7 +50,7 @@ enum chips {
*/
struct mcp3021_data {
struct device *hwmon_dev;
- u32 vdd; /* device power supply */
+ u32 vdd; /* supply and reference voltage in millivolt */
u16 sar_shift;
u16 sar_mask;
u8 output_res;
@@ -99,13 +102,14 @@ static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", in_input);
}
-static DEVICE_ATTR(in0_input, S_IRUGO, show_in_input, NULL);
+static DEVICE_ATTR(in0_input, 0444, show_in_input, NULL);
static int mcp3021_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int err;
struct mcp3021_data *data = NULL;
+ struct device_node *np = client->dev.of_node;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
@@ -117,6 +121,21 @@ static int mcp3021_probe(struct i2c_client *client,
i2c_set_clientdata(client, data);
+ if (np) {
+ if (!of_property_read_u32(np, "reference-voltage-microvolt",
+ &data->vdd))
+ data->vdd /= 1000;
+ else
+ data->vdd = MCP3021_VDD_REF_DEFAULT;
+ } else {
+ u32 *pdata = dev_get_platdata(&client->dev);
+
+ if (pdata)
+ data->vdd = *pdata;
+ else
+ data->vdd = MCP3021_VDD_REF_DEFAULT;
+ }
+
switch (id->driver_data) {
case mcp3021:
data->sar_shift = MCP3021_SAR_SHIFT;
@@ -131,13 +150,8 @@ static int mcp3021_probe(struct i2c_client *client,
break;
}
- if (dev_get_platdata(&client->dev)) {
- data->vdd = *(u32 *)dev_get_platdata(&client->dev);
- if (data->vdd > MCP3021_VDD_MAX || data->vdd < MCP3021_VDD_MIN)
- return -EINVAL;
- } else {
- data->vdd = MCP3021_VDD_REF;
- }
+ if (data->vdd > MCP3021_VDD_REF_MAX || data->vdd < MCP3021_VDD_REF_MIN)
+ return -EINVAL;
err = sysfs_create_file(&client->dev.kobj, &dev_attr_in0_input.attr);
if (err)
@@ -173,9 +187,19 @@ static const struct i2c_device_id mcp3021_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mcp3021_id);
+#ifdef CONFIG_OF
+static const struct of_device_id of_mcp3021_match[] = {
+ { .compatible = "microchip,mcp3021", .data = (void *)mcp3021 },
+ { .compatible = "microchip,mcp3221", .data = (void *)mcp3221 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_mcp3021_match);
+#endif
+
static struct i2c_driver mcp3021_driver = {
.driver = {
.name = "mcp3021",
+ .of_match_table = of_match_ptr(of_mcp3021_match),
},
.probe = mcp3021_probe,
.remove = mcp3021_remove,
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 3ce33d244cc0..12b94b094c0d 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -259,13 +259,15 @@ static int nct7802_read_fan_min(struct nct7802_data *data, u8 reg_fan_low,
ret = 0;
else if (ret)
ret = DIV_ROUND_CLOSEST(1350000U, ret);
+ else
+ ret = 1350000U;
abort:
mutex_unlock(&data->access_lock);
return ret;
}
static int nct7802_write_fan_min(struct nct7802_data *data, u8 reg_fan_low,
- u8 reg_fan_high, unsigned int limit)
+ u8 reg_fan_high, unsigned long limit)
{
int err;
@@ -326,8 +328,8 @@ static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index,
int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
int err;
+ voltage = clamp_val(voltage, 0, 0x3ff * nct7802_vmul[nr]);
voltage = DIV_ROUND_CLOSEST(voltage, nct7802_vmul[nr]);
- voltage = clamp_val(voltage, 0, 0x3ff);
mutex_lock(&data->access_lock);
err = regmap_write(data->regmap,
@@ -402,7 +404,7 @@ static ssize_t store_temp(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
err = regmap_write(data->regmap, nr, val & 0xff);
return err ? : count;
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 3baa4f4a8c5e..4ab5293c7bf0 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -499,15 +499,27 @@ static int adm1275_probe(struct i2c_client *client,
pindex = 2;
tindex = 3;
- info->func[0] |= PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT;
+ info->func[0] |= PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
+
+ /* Enable VOUT if not enabled (it is disabled by default) */
+ if (!(config & ADM1278_VOUT_EN)) {
+ config |= ADM1278_VOUT_EN;
+ ret = i2c_smbus_write_byte_data(client,
+ ADM1275_PMON_CONFIG,
+ config);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to enable VOUT monitoring\n");
+ return -ENODEV;
+ }
+ }
+
if (config & ADM1278_TEMP1_EN)
info->func[0] |=
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
if (config & ADM1278_VIN_EN)
info->func[0] |= PMBUS_HAVE_VIN;
- if (config & ADM1278_VOUT_EN)
- info->func[0] |=
- PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
break;
case adm1293:
case adm1294:
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 559a3dcd64d8..094f948f99ff 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -251,6 +251,7 @@ static const struct of_device_id scpi_of_match[] = {
{.compatible = "arm,scpi-sensors"},
{},
};
+MODULE_DEVICE_TABLE(of, scpi_of_match);
static struct platform_driver scpi_hwmon_platdrv = {
.driver = {
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 6ac7cda72d4c..15650f247679 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -77,14 +77,15 @@ static inline unsigned int IN_FROM_REG(u8 reg, int n)
static inline u8 IN_TO_REG(unsigned long val, int n)
{
- return clamp_val(SCALE(val, 192, nom_mv[n]), 0, 255);
+ val = clamp_val(val, 0, nom_mv[n] * 255 / 192);
+ return SCALE(val, 192, nom_mv[n]);
}
/*
* TEMP: 0.001 degC units (-128C to +127C)
* REG: 1C/bit, two's complement
*/
-static inline s8 TEMP_TO_REG(int val)
+static inline s8 TEMP_TO_REG(long val)
{
return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
}
diff --git a/drivers/hwmon/tc654.c b/drivers/hwmon/tc654.c
new file mode 100644
index 000000000000..18136e1f95fd
--- /dev/null
+++ b/drivers/hwmon/tc654.c
@@ -0,0 +1,514 @@
+/*
+ * tc654.c - Linux kernel modules for fan speed controller
+ *
+ * Copyright (C) 2016 Allied Telesis Labs NZ
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/util_macros.h>
+
+enum tc654_regs {
+ TC654_REG_RPM1 = 0x00, /* RPM Output 1 */
+ TC654_REG_RPM2 = 0x01, /* RPM Output 2 */
+ TC654_REG_FAN_FAULT1 = 0x02, /* Fan Fault 1 Threshold */
+ TC654_REG_FAN_FAULT2 = 0x03, /* Fan Fault 2 Threshold */
+ TC654_REG_CONFIG = 0x04, /* Configuration */
+ TC654_REG_STATUS = 0x05, /* Status */
+ TC654_REG_DUTY_CYCLE = 0x06, /* Fan Speed Duty Cycle */
+ TC654_REG_MFR_ID = 0x07, /* Manufacturer Identification */
+ TC654_REG_VER_ID = 0x08, /* Version Identification */
+};
+
+/* Macros to easily index the registers */
+#define TC654_REG_RPM(idx) (TC654_REG_RPM1 + (idx))
+#define TC654_REG_FAN_FAULT(idx) (TC654_REG_FAN_FAULT1 + (idx))
+
+/* Config register bits */
+#define TC654_REG_CONFIG_RES BIT(6) /* Resolution Selection */
+#define TC654_REG_CONFIG_DUTYC BIT(5) /* Duty Cycle Control */
+#define TC654_REG_CONFIG_SDM BIT(0) /* Shutdown Mode */
+
+/* Status register bits */
+#define TC654_REG_STATUS_F2F BIT(1) /* Fan 2 Fault */
+#define TC654_REG_STATUS_F1F BIT(0) /* Fan 1 Fault */
+
+/* RPM resolution for RPM Output registers */
+#define TC654_HIGH_RPM_RESOLUTION 25 /* 25 RPM resolution */
+#define TC654_LOW_RPM_RESOLUTION 50 /* 50 RPM resolution */
+
+/* Convert to the fan fault RPM threshold from register value */
+#define TC654_FAN_FAULT_FROM_REG(val) ((val) * 50) /* 50 RPM resolution */
+
+/* Convert to register value from the fan fault RPM threshold */
+#define TC654_FAN_FAULT_TO_REG(val) (((val) / 50) & 0xff)
+
+/* Register data is read (and cached) at most once per second. */
+#define TC654_UPDATE_INTERVAL HZ
+
+struct tc654_data {
+ struct i2c_client *client;
+
+ /* update mutex */
+ struct mutex update_lock;
+
+ /* tc654 register cache */
+ bool valid;
+ unsigned long last_updated; /* in jiffies */
+
+ u8 rpm_output[2]; /* The fan RPM data for fans 1 and 2 is then
+ * written to registers RPM1 and RPM2
+ */
+ u8 fan_fault[2]; /* The Fan Fault Threshold Registers are used to
+ * set the fan fault threshold levels for fan 1
+ * and fan 2
+ */
+ u8 config; /* The Configuration Register is an 8-bit read/
+ * writable multi-function control register
+ * 7: Fan Fault Clear
+ * 1 = Clear Fan Fault
+ * 0 = Normal Operation (default)
+ * 6: Resolution Selection for RPM Output Registers
+ * RPM Output Registers (RPM1 and RPM2) will be
+ * set for
+ * 1 = 25 RPM (9-bit) resolution
+ * 0 = 50 RPM (8-bit) resolution (default)
+ * 5: Duty Cycle Control Method
+ * The V OUT duty cycle will be controlled via
+ * 1 = the SMBus interface.
+ * 0 = via the V IN analog input pin. (default)
+ * 4,3: Fan 2 Pulses Per Rotation
+ * 00 = 1
+ * 01 = 2 (default)
+ * 10 = 4
+ * 11 = 8
+ * 2,1: Fan 1 Pulses Per Rotation
+ * 00 = 1
+ * 01 = 2 (default)
+ * 10 = 4
+ * 11 = 8
+ * 0: Shutdown Mode
+ * 1 = Shutdown mode.
+ * 0 = Normal operation. (default)
+ */
+ u8 status; /* The Status register provides all the information
+ * about what is going on within the TC654/TC655
+ * devices.
+ * 7,6: Unimplemented, Read as '0'
+ * 5: Over-Temperature Fault Condition
+ * 1 = Over-Temperature condition has occurred
+ * 0 = Normal operation. V IN is less than 2.6V
+ * 4: RPM2 Counter Overflow
+ * 1 = Fault condition
+ * 0 = Normal operation
+ * 3: RPM1 Counter Overflow
+ * 1 = Fault condition
+ * 0 = Normal operation
+ * 2: V IN Input Status
+ * 1 = V IN is open
+ * 0 = Normal operation. voltage present at V IN
+ * 1: Fan 2 Fault
+ * 1 = Fault condition
+ * 0 = Normal operation
+ * 0: Fan 1 Fault
+ * 1 = Fault condition
+ * 0 = Normal operation
+ */
+ u8 duty_cycle; /* The DUTY_CYCLE register is a 4-bit read/
+ * writable register used to control the duty
+ * cycle of the V OUT output.
+ */
+};
+
+/* helper to grab and cache data, at most one time per second */
+static struct tc654_data *tc654_update_client(struct device *dev)
+{
+ struct tc654_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int ret = 0;
+
+ mutex_lock(&data->update_lock);
+ if (time_before(jiffies, data->last_updated + TC654_UPDATE_INTERVAL) &&
+ likely(data->valid))
+ goto out;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_RPM(0));
+ if (ret < 0)
+ goto out;
+ data->rpm_output[0] = ret;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_RPM(1));
+ if (ret < 0)
+ goto out;
+ data->rpm_output[1] = ret;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_FAN_FAULT(0));
+ if (ret < 0)
+ goto out;
+ data->fan_fault[0] = ret;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_FAN_FAULT(1));
+ if (ret < 0)
+ goto out;
+ data->fan_fault[1] = ret;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_CONFIG);
+ if (ret < 0)
+ goto out;
+ data->config = ret;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_STATUS);
+ if (ret < 0)
+ goto out;
+ data->status = ret;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_DUTY_CYCLE);
+ if (ret < 0)
+ goto out;
+ data->duty_cycle = ret & 0x0f;
+
+ data->last_updated = jiffies;
+ data->valid = true;
+out:
+ mutex_unlock(&data->update_lock);
+
+ if (ret < 0) /* upon error, encode it in return value */
+ data = ERR_PTR(ret);
+
+ return data;
+}
+
+/*
+ * sysfs attributes
+ */
+
+static ssize_t show_fan(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct tc654_data *data = tc654_update_client(dev);
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (data->config & TC654_REG_CONFIG_RES)
+ val = data->rpm_output[nr] * TC654_HIGH_RPM_RESOLUTION;
+ else
+ val = data->rpm_output[nr] * TC654_LOW_RPM_RESOLUTION;
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct tc654_data *data = tc654_update_client(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n",
+ TC654_FAN_FAULT_FROM_REG(data->fan_fault[nr]));
+}
+
+static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct tc654_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+
+ val = clamp_val(val, 0, 12750);
+
+ mutex_lock(&data->update_lock);
+
+ data->fan_fault[nr] = TC654_FAN_FAULT_TO_REG(val);
+ ret = i2c_smbus_write_byte_data(client, TC654_REG_FAN_FAULT(nr),
+ data->fan_fault[nr]);
+
+ mutex_unlock(&data->update_lock);
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct tc654_data *data = tc654_update_client(dev);
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (nr == 0)
+ val = !!(data->status & TC654_REG_STATUS_F1F);
+ else
+ val = !!(data->status & TC654_REG_STATUS_F2F);
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static const u8 TC654_FAN_PULSE_SHIFT[] = { 1, 3 };
+
+static ssize_t show_fan_pulses(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct tc654_data *data = tc654_update_client(dev);
+ u8 val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = BIT((data->config >> TC654_FAN_PULSE_SHIFT[nr]) & 0x03);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t set_fan_pulses(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct tc654_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ u8 config;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+
+ switch (val) {
+ case 1:
+ config = 0;
+ break;
+ case 2:
+ config = 1;
+ break;
+ case 4:
+ config = 2;
+ break;
+ case 8:
+ config = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&data->update_lock);
+
+ data->config &= ~(0x03 << TC654_FAN_PULSE_SHIFT[nr]);
+ data->config |= (config << TC654_FAN_PULSE_SHIFT[nr]);
+ ret = i2c_smbus_write_byte_data(client, TC654_REG_CONFIG, data->config);
+
+ mutex_unlock(&data->update_lock);
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t show_pwm_mode(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct tc654_data *data = tc654_update_client(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", !!(data->config & TC654_REG_CONFIG_DUTYC));
+}
+
+static ssize_t set_pwm_mode(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct tc654_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+
+ if (val)
+ data->config |= TC654_REG_CONFIG_DUTYC;
+ else
+ data->config &= ~TC654_REG_CONFIG_DUTYC;
+
+ ret = i2c_smbus_write_byte_data(client, TC654_REG_CONFIG, data->config);
+
+ mutex_unlock(&data->update_lock);
+ return ret < 0 ? ret : count;
+}
+
+static const int tc654_pwm_map[16] = { 77, 88, 102, 112, 124, 136, 148, 160,
+ 172, 184, 196, 207, 219, 231, 243, 255};
+
+static ssize_t show_pwm(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct tc654_data *data = tc654_update_client(dev);
+ int pwm;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (data->config & TC654_REG_CONFIG_SDM)
+ pwm = 0;
+ else
+ pwm = tc654_pwm_map[data->duty_cycle];
+
+ return sprintf(buf, "%d\n", pwm);
+}
+
+static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct tc654_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+ if (val > 255)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+
+ if (val == 0)
+ data->config |= TC654_REG_CONFIG_SDM;
+ else
+ data->config &= ~TC654_REG_CONFIG_SDM;
+
+ data->duty_cycle = find_closest(val, tc654_pwm_map,
+ ARRAY_SIZE(tc654_pwm_map));
+
+ ret = i2c_smbus_write_byte_data(client, TC654_REG_CONFIG, data->config);
+ if (ret < 0)
+ goto out;
+
+ ret = i2c_smbus_write_byte_data(client, TC654_REG_DUTY_CYCLE,
+ data->duty_cycle);
+
+out:
+ mutex_unlock(&data->update_lock);
+ return ret < 0 ? ret : count;
+}
+
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min,
+ set_fan_min, 0);
+static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min,
+ set_fan_min, 1);
+static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_fan_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_fan_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan1_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
+ set_fan_pulses, 0);
+static SENSOR_DEVICE_ATTR(fan2_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
+ set_fan_pulses, 1);
+static SENSOR_DEVICE_ATTR(pwm1_mode, S_IWUSR | S_IRUGO,
+ show_pwm_mode, set_pwm_mode, 0);
+static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm,
+ set_pwm, 0);
+
+/* Driver data */
+static struct attribute *tc654_attrs[] = {
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_min.dev_attr.attr,
+ &sensor_dev_attr_fan2_min.dev_attr.attr,
+ &sensor_dev_attr_fan1_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan2_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan1_pulses.dev_attr.attr,
+ &sensor_dev_attr_fan2_pulses.dev_attr.attr,
+ &sensor_dev_attr_pwm1_mode.dev_attr.attr,
+ &sensor_dev_attr_pwm1.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(tc654);
+
+/*
+ * device probe and removal
+ */
+
+static int tc654_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct tc654_data *data;
+ struct device *hwmon_dev;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(struct tc654_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ mutex_init(&data->update_lock);
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ data->config = ret;
+
+ hwmon_dev =
+ devm_hwmon_device_register_with_groups(dev, client->name, data,
+ tc654_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id tc654_id[] = {
+ {"tc654", 0},
+ {"tc655", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, tc654_id);
+
+static struct i2c_driver tc654_driver = {
+ .driver = {
+ .name = "tc654",
+ },
+ .probe = tc654_probe,
+ .id_table = tc654_id,
+};
+
+module_i2c_driver(tc654_driver);
+
+MODULE_AUTHOR("Allied Telesis Labs");
+MODULE_DESCRIPTION("Microchip TC654/TC655 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/tmp108.c b/drivers/hwmon/tmp108.c
new file mode 100644
index 000000000000..91bb94639286
--- /dev/null
+++ b/drivers/hwmon/tmp108.c
@@ -0,0 +1,469 @@
+/* Texas Instruments TMP108 SMBus temperature sensor driver
+ *
+ * Copyright (C) 2016 John Muir <john@jmuir.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define DRIVER_NAME "tmp108"
+
+#define TMP108_REG_TEMP 0x00
+#define TMP108_REG_CONF 0x01
+#define TMP108_REG_TLOW 0x02
+#define TMP108_REG_THIGH 0x03
+
+#define TMP108_TEMP_MIN_MC -50000 /* Minimum millicelcius. */
+#define TMP108_TEMP_MAX_MC 127937 /* Maximum millicelcius. */
+
+/* Configuration register bits.
+ * Note: these bit definitions are byte swapped.
+ */
+#define TMP108_CONF_M0 0x0100 /* Sensor mode. */
+#define TMP108_CONF_M1 0x0200
+#define TMP108_CONF_TM 0x0400 /* Thermostat mode. */
+#define TMP108_CONF_FL 0x0800 /* Watchdog flag - TLOW */
+#define TMP108_CONF_FH 0x1000 /* Watchdog flag - THIGH */
+#define TMP108_CONF_CR0 0x2000 /* Conversion rate. */
+#define TMP108_CONF_CR1 0x4000
+#define TMP108_CONF_ID 0x8000
+#define TMP108_CONF_HYS0 0x0010 /* Hysteresis. */
+#define TMP108_CONF_HYS1 0x0020
+#define TMP108_CONF_POL 0x0080 /* Polarity of alert. */
+
+/* Defaults set by the hardware upon reset. */
+#define TMP108_CONF_DEFAULTS (TMP108_CONF_CR0 | TMP108_CONF_TM |\
+ TMP108_CONF_HYS0 | TMP108_CONF_M1)
+/* These bits are read-only. */
+#define TMP108_CONF_READ_ONLY (TMP108_CONF_FL | TMP108_CONF_FH |\
+ TMP108_CONF_ID)
+
+#define TMP108_CONF_MODE_MASK (TMP108_CONF_M0|TMP108_CONF_M1)
+#define TMP108_MODE_SHUTDOWN 0x0000
+#define TMP108_MODE_ONE_SHOT TMP108_CONF_M0
+#define TMP108_MODE_CONTINUOUS TMP108_CONF_M1 /* Default */
+ /* When M1 is set, M0 is ignored. */
+
+#define TMP108_CONF_CONVRATE_MASK (TMP108_CONF_CR0|TMP108_CONF_CR1)
+#define TMP108_CONVRATE_0P25HZ 0x0000
+#define TMP108_CONVRATE_1HZ TMP108_CONF_CR0 /* Default */
+#define TMP108_CONVRATE_4HZ TMP108_CONF_CR1
+#define TMP108_CONVRATE_16HZ (TMP108_CONF_CR0|TMP108_CONF_CR1)
+
+#define TMP108_CONF_HYSTERESIS_MASK (TMP108_CONF_HYS0|TMP108_CONF_HYS1)
+#define TMP108_HYSTERESIS_0C 0x0000
+#define TMP108_HYSTERESIS_1C TMP108_CONF_HYS0 /* Default */
+#define TMP108_HYSTERESIS_2C TMP108_CONF_HYS1
+#define TMP108_HYSTERESIS_4C (TMP108_CONF_HYS0|TMP108_CONF_HYS1)
+
+#define TMP108_CONVERSION_TIME_MS 30 /* in milli-seconds */
+
+struct tmp108 {
+ struct regmap *regmap;
+ u16 orig_config;
+ unsigned long ready_time;
+};
+
+/* convert 12-bit TMP108 register value to milliCelsius */
+static inline int tmp108_temp_reg_to_mC(s16 val)
+{
+ return (val & ~0x0f) * 1000 / 256;
+}
+
+/* convert milliCelsius to left adjusted 12-bit TMP108 register value */
+static inline u16 tmp108_mC_to_temp_reg(int val)
+{
+ return (val * 256) / 1000;
+}
+
+static int tmp108_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
+{
+ struct tmp108 *tmp108 = dev_get_drvdata(dev);
+ unsigned int regval;
+ int err, hyst;
+
+ if (type == hwmon_chip) {
+ if (attr == hwmon_chip_update_interval) {
+ err = regmap_read(tmp108->regmap, TMP108_REG_CONF,
+ &regval);
+ if (err < 0)
+ return err;
+ switch (regval & TMP108_CONF_CONVRATE_MASK) {
+ case TMP108_CONVRATE_0P25HZ:
+ default:
+ *temp = 4000;
+ break;
+ case TMP108_CONVRATE_1HZ:
+ *temp = 1000;
+ break;
+ case TMP108_CONVRATE_4HZ:
+ *temp = 250;
+ break;
+ case TMP108_CONVRATE_16HZ:
+ *temp = 63;
+ break;
+ }
+ return 0;
+ }
+ return -EOPNOTSUPP;
+ }
+
+ switch (attr) {
+ case hwmon_temp_input:
+ /* Is it too early to return a conversion ? */
+ if (time_before(jiffies, tmp108->ready_time)) {
+ dev_dbg(dev, "%s: Conversion not ready yet..\n",
+ __func__);
+ return -EAGAIN;
+ }
+ err = regmap_read(tmp108->regmap, TMP108_REG_TEMP, &regval);
+ if (err < 0)
+ return err;
+ *temp = tmp108_temp_reg_to_mC(regval);
+ break;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ err = regmap_read(tmp108->regmap, attr == hwmon_temp_min ?
+ TMP108_REG_TLOW : TMP108_REG_THIGH, &regval);
+ if (err < 0)
+ return err;
+ *temp = tmp108_temp_reg_to_mC(regval);
+ break;
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ err = regmap_read(tmp108->regmap, TMP108_REG_CONF, &regval);
+ if (err < 0)
+ return err;
+ *temp = !!(regval & (attr == hwmon_temp_min_alarm ?
+ TMP108_CONF_FL : TMP108_CONF_FH));
+ break;
+ case hwmon_temp_min_hyst:
+ case hwmon_temp_max_hyst:
+ err = regmap_read(tmp108->regmap, TMP108_REG_CONF, &regval);
+ if (err < 0)
+ return err;
+ switch (regval & TMP108_CONF_HYSTERESIS_MASK) {
+ case TMP108_HYSTERESIS_0C:
+ default:
+ hyst = 0;
+ break;
+ case TMP108_HYSTERESIS_1C:
+ hyst = 1000;
+ break;
+ case TMP108_HYSTERESIS_2C:
+ hyst = 2000;
+ break;
+ case TMP108_HYSTERESIS_4C:
+ hyst = 4000;
+ break;
+ }
+ err = regmap_read(tmp108->regmap, attr == hwmon_temp_min_hyst ?
+ TMP108_REG_TLOW : TMP108_REG_THIGH, &regval);
+ if (err < 0)
+ return err;
+ *temp = tmp108_temp_reg_to_mC(regval);
+ if (attr == hwmon_temp_min_hyst)
+ *temp += hyst;
+ else
+ *temp -= hyst;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int tmp108_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long temp)
+{
+ struct tmp108 *tmp108 = dev_get_drvdata(dev);
+ u32 regval, mask;
+ int err;
+
+ if (type == hwmon_chip) {
+ if (attr == hwmon_chip_update_interval) {
+ if (temp < 156)
+ mask = TMP108_CONVRATE_16HZ;
+ else if (temp < 625)
+ mask = TMP108_CONVRATE_4HZ;
+ else if (temp < 2500)
+ mask = TMP108_CONVRATE_1HZ;
+ else
+ mask = TMP108_CONVRATE_0P25HZ;
+ return regmap_update_bits(tmp108->regmap,
+ TMP108_REG_CONF,
+ TMP108_CONF_CONVRATE_MASK,
+ mask);
+ }
+ return -EOPNOTSUPP;
+ }
+
+ switch (attr) {
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ temp = clamp_val(temp, TMP108_TEMP_MIN_MC, TMP108_TEMP_MAX_MC);
+ return regmap_write(tmp108->regmap,
+ attr == hwmon_temp_min ?
+ TMP108_REG_TLOW : TMP108_REG_THIGH,
+ tmp108_mC_to_temp_reg(temp));
+ case hwmon_temp_min_hyst:
+ case hwmon_temp_max_hyst:
+ temp = clamp_val(temp, TMP108_TEMP_MIN_MC, TMP108_TEMP_MAX_MC);
+ err = regmap_read(tmp108->regmap,
+ attr == hwmon_temp_min_hyst ?
+ TMP108_REG_TLOW : TMP108_REG_THIGH,
+ &regval);
+ if (err < 0)
+ return err;
+ if (attr == hwmon_temp_min_hyst)
+ temp -= tmp108_temp_reg_to_mC(regval);
+ else
+ temp = tmp108_temp_reg_to_mC(regval) - temp;
+ if (temp < 500)
+ mask = TMP108_HYSTERESIS_0C;
+ else if (temp < 1500)
+ mask = TMP108_HYSTERESIS_1C;
+ else if (temp < 3000)
+ mask = TMP108_HYSTERESIS_2C;
+ else
+ mask = TMP108_HYSTERESIS_4C;
+ return regmap_update_bits(tmp108->regmap, TMP108_REG_CONF,
+ TMP108_CONF_HYSTERESIS_MASK, mask);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t tmp108_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type == hwmon_chip && attr == hwmon_chip_update_interval)
+ return 0644;
+
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ return 0444;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_min_hyst:
+ case hwmon_temp_max_hyst:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static u32 tmp108_chip_config[] = {
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
+ 0
+};
+
+static const struct hwmon_channel_info tmp108_chip = {
+ .type = hwmon_chip,
+ .config = tmp108_chip_config,
+};
+
+static u32 tmp108_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_MIN_HYST
+ | HWMON_T_MAX_HYST | HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM,
+ 0
+};
+
+static const struct hwmon_channel_info tmp108_temp = {
+ .type = hwmon_temp,
+ .config = tmp108_temp_config,
+};
+
+static const struct hwmon_channel_info *tmp108_info[] = {
+ &tmp108_chip,
+ &tmp108_temp,
+ NULL
+};
+
+static const struct hwmon_ops tmp108_hwmon_ops = {
+ .is_visible = tmp108_is_visible,
+ .read = tmp108_read,
+ .write = tmp108_write,
+};
+
+static const struct hwmon_chip_info tmp108_chip_info = {
+ .ops = &tmp108_hwmon_ops,
+ .info = tmp108_info,
+};
+
+static void tmp108_restore_config(void *data)
+{
+ struct tmp108 *tmp108 = data;
+
+ regmap_write(tmp108->regmap, TMP108_REG_CONF, tmp108->orig_config);
+}
+
+static bool tmp108_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return reg != TMP108_REG_TEMP;
+}
+
+static bool tmp108_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /* Configuration register must be volatile to enable FL and FH. */
+ return reg == TMP108_REG_TEMP || reg == TMP108_REG_CONF;
+}
+
+static const struct regmap_config tmp108_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = TMP108_REG_THIGH,
+ .writeable_reg = tmp108_is_writeable_reg,
+ .volatile_reg = tmp108_is_volatile_reg,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .cache_type = REGCACHE_RBTREE,
+ .use_single_rw = true,
+};
+
+static int tmp108_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct tmp108 *tmp108;
+ int err;
+ u32 config;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA)) {
+ dev_err(dev,
+ "adapter doesn't support SMBus word transactions\n");
+ return -ENODEV;
+ }
+
+ tmp108 = devm_kzalloc(dev, sizeof(*tmp108), GFP_KERNEL);
+ if (!tmp108)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, tmp108);
+
+ tmp108->regmap = devm_regmap_init_i2c(client, &tmp108_regmap_config);
+ if (IS_ERR(tmp108->regmap)) {
+ err = PTR_ERR(tmp108->regmap);
+ dev_err(dev, "regmap init failed: %d", err);
+ return err;
+ }
+
+ err = regmap_read(tmp108->regmap, TMP108_REG_CONF, &config);
+ if (err < 0) {
+ dev_err(dev, "error reading config register: %d", err);
+ return err;
+ }
+ tmp108->orig_config = config;
+
+ /* Only continuous mode is supported. */
+ config &= ~TMP108_CONF_MODE_MASK;
+ config |= TMP108_MODE_CONTINUOUS;
+
+ /* Only comparator mode is supported. */
+ config &= ~TMP108_CONF_TM;
+
+ err = regmap_write(tmp108->regmap, TMP108_REG_CONF, config);
+ if (err < 0) {
+ dev_err(dev, "error writing config register: %d", err);
+ return err;
+ }
+
+ tmp108->ready_time = jiffies;
+ if ((tmp108->orig_config & TMP108_CONF_MODE_MASK) ==
+ TMP108_MODE_SHUTDOWN)
+ tmp108->ready_time +=
+ msecs_to_jiffies(TMP108_CONVERSION_TIME_MS);
+
+ err = devm_add_action_or_reset(dev, tmp108_restore_config, tmp108);
+ if (err) {
+ dev_err(dev, "add action or reset failed: %d", err);
+ return err;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ tmp108,
+ &tmp108_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static int __maybe_unused tmp108_suspend(struct device *dev)
+{
+ struct tmp108 *tmp108 = dev_get_drvdata(dev);
+
+ return regmap_update_bits(tmp108->regmap, TMP108_REG_CONF,
+ TMP108_CONF_MODE_MASK, TMP108_MODE_SHUTDOWN);
+}
+
+static int __maybe_unused tmp108_resume(struct device *dev)
+{
+ struct tmp108 *tmp108 = dev_get_drvdata(dev);
+ int err;
+
+ err = regmap_update_bits(tmp108->regmap, TMP108_REG_CONF,
+ TMP108_CONF_MODE_MASK, TMP108_MODE_CONTINUOUS);
+ tmp108->ready_time = jiffies +
+ msecs_to_jiffies(TMP108_CONVERSION_TIME_MS);
+ return err;
+}
+
+static SIMPLE_DEV_PM_OPS(tmp108_dev_pm_ops, tmp108_suspend, tmp108_resume);
+
+static const struct i2c_device_id tmp108_i2c_ids[] = {
+ { "tmp108", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tmp108_i2c_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id tmp108_of_ids[] = {
+ { .compatible = "ti,tmp108", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tmp108_of_ids);
+#endif
+
+static struct i2c_driver tmp108_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &tmp108_dev_pm_ops,
+ .of_match_table = of_match_ptr(tmp108_of_ids),
+ },
+ .probe = tmp108_probe,
+ .id_table = tmp108_i2c_ids,
+};
+
+module_i2c_driver(tmp108_driver);
+
+MODULE_AUTHOR("John Muir <john@jmuir.com>");
+MODULE_DESCRIPTION("Texas Instruments TMP108 temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index ac91c07e3f90..d1f209a5feac 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -220,7 +220,7 @@ struct pdev_entry {
static LIST_HEAD(pdev_list);
static DEFINE_MUTEX(pdev_list_mutex);
-static int via_cputemp_device_add(unsigned int cpu)
+static int via_cputemp_online(unsigned int cpu)
{
int err;
struct platform_device *pdev;
@@ -261,7 +261,7 @@ exit:
return err;
}
-static void via_cputemp_device_remove(unsigned int cpu)
+static int via_cputemp_down_prep(unsigned int cpu)
{
struct pdev_entry *p;
@@ -272,33 +272,13 @@ static void via_cputemp_device_remove(unsigned int cpu)
list_del(&p->list);
mutex_unlock(&pdev_list_mutex);
kfree(p);
- return;
+ return 0;
}
}
mutex_unlock(&pdev_list_mutex);
+ return 0;
}
-static int via_cputemp_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long) hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- via_cputemp_device_add(cpu);
- break;
- case CPU_DOWN_PREPARE:
- via_cputemp_device_remove(cpu);
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
- .notifier_call = via_cputemp_cpu_callback,
-};
-
static const struct x86_cpu_id __initconst cputemp_ids[] = {
{ X86_VENDOR_CENTAUR, 6, 0xa, }, /* C7 A */
{ X86_VENDOR_CENTAUR, 6, 0xd, }, /* C7 D */
@@ -307,9 +287,11 @@ static const struct x86_cpu_id __initconst cputemp_ids[] = {
};
MODULE_DEVICE_TABLE(x86cpu, cputemp_ids);
+static enum cpuhp_state via_temp_online;
+
static int __init via_cputemp_init(void)
{
- int i, err;
+ int err;
if (!x86_match_cpu(cputemp_ids))
return -ENODEV;
@@ -318,58 +300,33 @@ static int __init via_cputemp_init(void)
if (err)
goto exit;
- cpu_notifier_register_begin();
- for_each_online_cpu(i) {
- struct cpuinfo_x86 *c = &cpu_data(i);
-
- if (c->x86 != 6)
- continue;
-
- if (c->x86_model < 0x0a)
- continue;
-
- if (c->x86_model > 0x0f) {
- pr_warn("Unknown CPU model 0x%x\n", c->x86_model);
- continue;
- }
-
- via_cputemp_device_add(i);
- }
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/via:online",
+ via_cputemp_online, via_cputemp_down_prep);
+ if (err < 0)
+ goto exit_driver_unreg;
+ via_temp_online = err;
#ifndef CONFIG_HOTPLUG_CPU
if (list_empty(&pdev_list)) {
- cpu_notifier_register_done();
err = -ENODEV;
- goto exit_driver_unreg;
+ goto exit_hp_unreg;
}
#endif
-
- __register_hotcpu_notifier(&via_cputemp_cpu_notifier);
- cpu_notifier_register_done();
return 0;
#ifndef CONFIG_HOTPLUG_CPU
+exit_hp_unreg:
+ cpuhp_remove_state_nocalls(via_temp_online);
+#endif
exit_driver_unreg:
platform_driver_unregister(&via_cputemp_driver);
-#endif
exit:
return err;
}
static void __exit via_cputemp_exit(void)
{
- struct pdev_entry *p, *n;
-
- cpu_notifier_register_begin();
- __unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
- mutex_lock(&pdev_list_mutex);
- list_for_each_entry_safe(p, n, &pdev_list, list) {
- platform_device_unregister(p->pdev);
- list_del(&p->list);
- kfree(p);
- }
- mutex_unlock(&pdev_list_mutex);
- cpu_notifier_register_done();
+ cpuhp_remove_state(via_temp_online);
platform_driver_unregister(&via_cputemp_driver);
}
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 2cd7c718198a..17741969026e 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -202,6 +202,21 @@ static void *etm_setup_aux(int event_cpu, void **pages,
if (!event_data)
return NULL;
+ /*
+ * In theory nothing prevent tracers in a trace session from being
+ * associated with different sinks, nor having a sink per tracer. But
+ * until we have HW with this kind of topology we need to assume tracers
+ * in a trace session are using the same sink. Therefore go through
+ * the coresight bus and pick the first enabled sink.
+ *
+ * When operated from sysFS users are responsible to enable the sink
+ * while from perf, the perf tools will do it based on the choice made
+ * on the cmd line. As such the "enable_sink" flag in sysFS is reset.
+ */
+ sink = coresight_get_enabled_sink(true);
+ if (!sink)
+ goto err;
+
INIT_WORK(&event_data->work, free_event_data);
mask = &event_data->mask;
@@ -219,25 +234,11 @@ static void *etm_setup_aux(int event_cpu, void **pages,
* list of devices from source to sink that can be
* referenced later when the path is actually needed.
*/
- event_data->path[cpu] = coresight_build_path(csdev);
+ event_data->path[cpu] = coresight_build_path(csdev, sink);
if (IS_ERR(event_data->path[cpu]))
goto err;
}
- /*
- * In theory nothing prevent tracers in a trace session from being
- * associated with different sinks, nor having a sink per tracer. But
- * until we have HW with this kind of topology and a way to convey
- * sink assignement from the perf cmd line we need to assume tracers
- * in a trace session are using the same sink. Therefore pick the sink
- * found at the end of the first available path.
- */
- cpu = cpumask_first(mask);
- /* Grab the sink at the end of the path */
- sink = coresight_get_sink(event_data->path[cpu]);
- if (!sink)
- goto err;
-
if (!sink_ops(sink)->alloc_buffer)
goto err;
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index 4a18ee499965..ad063d7444e1 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -89,11 +89,13 @@
/* ETMCR - 0x00 */
#define ETMCR_PWD_DWN BIT(0)
#define ETMCR_STALL_MODE BIT(7)
+#define ETMCR_BRANCH_BROADCAST BIT(8)
#define ETMCR_ETM_PRG BIT(10)
#define ETMCR_ETM_EN BIT(11)
#define ETMCR_CYC_ACC BIT(12)
#define ETMCR_CTXID_SIZE (BIT(14)|BIT(15))
#define ETMCR_TIMESTAMP_EN BIT(28)
+#define ETMCR_RETURN_STACK BIT(29)
/* ETMCCR - 0x04 */
#define ETMCCR_FIFOFULL BIT(23)
/* ETMPDCR - 0x310 */
@@ -110,8 +112,11 @@
#define ETM_MODE_STALL BIT(2)
#define ETM_MODE_TIMESTAMP BIT(3)
#define ETM_MODE_CTXID BIT(4)
+#define ETM_MODE_BBROAD BIT(5)
+#define ETM_MODE_RET_STACK BIT(6)
#define ETM_MODE_ALL (ETM_MODE_EXCLUDE | ETM_MODE_CYCACC | \
ETM_MODE_STALL | ETM_MODE_TIMESTAMP | \
+ ETM_MODE_BBROAD | ETM_MODE_RET_STACK | \
ETM_MODE_CTXID | ETM_MODE_EXCL_KERN | \
ETM_MODE_EXCL_USER)
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index e9b071953f80..ca98ad13bb8c 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -146,7 +146,7 @@ static ssize_t mode_store(struct device *dev,
goto err_unlock;
}
config->ctrl |= ETMCR_STALL_MODE;
- } else
+ } else
config->ctrl &= ~ETMCR_STALL_MODE;
if (config->mode & ETM_MODE_TIMESTAMP) {
@@ -164,6 +164,16 @@ static ssize_t mode_store(struct device *dev,
else
config->ctrl &= ~ETMCR_CTXID_SIZE;
+ if (config->mode & ETM_MODE_BBROAD)
+ config->ctrl |= ETMCR_BRANCH_BROADCAST;
+ else
+ config->ctrl &= ~ETMCR_BRANCH_BROADCAST;
+
+ if (config->mode & ETM_MODE_RET_STACK)
+ config->ctrl |= ETMCR_RETURN_STACK;
+ else
+ config->ctrl &= ~ETMCR_RETURN_STACK;
+
if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
etm_config_trace_mode(config);
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 196a14be4b3d..ef9d8e93e3b2 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -111,7 +111,9 @@ static inline void CS_UNLOCK(void __iomem *addr)
void coresight_disable_path(struct list_head *path);
int coresight_enable_path(struct list_head *path, u32 mode);
struct coresight_device *coresight_get_sink(struct list_head *path);
-struct list_head *coresight_build_path(struct coresight_device *csdev);
+struct coresight_device *coresight_get_enabled_sink(bool reset);
+struct list_head *coresight_build_path(struct coresight_device *csdev,
+ struct coresight_device *sink);
void coresight_release_path(struct list_head *path);
#ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 49e0f1b925a5..944c17b48d23 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -419,10 +419,10 @@ static ssize_t stm_generic_packet(struct stm_data *stm_data,
struct stm_drvdata, stm);
if (!(drvdata && local_read(&drvdata->mode)))
- return 0;
+ return -EACCES;
if (channel >= drvdata->numsp)
- return 0;
+ return -EINVAL;
ch_addr = (unsigned long)stm_channel_addr(drvdata, channel);
@@ -920,6 +920,11 @@ static struct amba_id stm_ids[] = {
.mask = 0x0003ffff,
.data = "STM32",
},
+ {
+ .id = 0x0003b963,
+ .mask = 0x0003ffff,
+ .data = "STM500",
+ },
{ 0, 0},
};
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index d6941ea24d8d..1549436e2492 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -70,7 +70,7 @@ static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
* When operating in sysFS mode the content of the buffer needs to be
* read before the TMC is disabled.
*/
- if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
tmc_etb_dump_hw(drvdata);
tmc_disable_hw(drvdata);
@@ -103,19 +103,14 @@ static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode)
+static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
{
int ret = 0;
bool used = false;
char *buf = NULL;
- long val;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- /* This shouldn't be happening */
- if (WARN_ON(mode != CS_MODE_SYSFS))
- return -EINVAL;
-
/*
* If we don't have a buffer release the lock and allocate memory.
* Otherwise keep the lock and move along.
@@ -138,13 +133,12 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode)
goto out;
}
- val = local_xchg(&drvdata->mode, mode);
/*
* In sysFS mode we can have multiple writers per sink. Since this
* sink is already enabled no memory is needed and the HW need not be
* touched.
*/
- if (val == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
goto out;
/*
@@ -163,6 +157,7 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode)
drvdata->buf = buf;
}
+ drvdata->mode = CS_MODE_SYSFS;
tmc_etb_enable_hw(drvdata);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -177,34 +172,29 @@ out:
return ret;
}
-static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, u32 mode)
+static int tmc_enable_etf_sink_perf(struct coresight_device *csdev)
{
int ret = 0;
- long val;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- /* This shouldn't be happening */
- if (WARN_ON(mode != CS_MODE_PERF))
- return -EINVAL;
-
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
ret = -EINVAL;
goto out;
}
- val = local_xchg(&drvdata->mode, mode);
/*
* In Perf mode there can be only one writer per sink. There
* is also no need to continue if the ETB/ETR is already operated
* from sysFS.
*/
- if (val != CS_MODE_DISABLED) {
+ if (drvdata->mode != CS_MODE_DISABLED) {
ret = -EINVAL;
goto out;
}
+ drvdata->mode = CS_MODE_PERF;
tmc_etb_enable_hw(drvdata);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -216,9 +206,9 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
{
switch (mode) {
case CS_MODE_SYSFS:
- return tmc_enable_etf_sink_sysfs(csdev, mode);
+ return tmc_enable_etf_sink_sysfs(csdev);
case CS_MODE_PERF:
- return tmc_enable_etf_sink_perf(csdev, mode);
+ return tmc_enable_etf_sink_perf(csdev);
}
/* We shouldn't be here */
@@ -227,7 +217,6 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
static void tmc_disable_etf_sink(struct coresight_device *csdev)
{
- long val;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -237,10 +226,11 @@ static void tmc_disable_etf_sink(struct coresight_device *csdev)
return;
}
- val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
/* Disable the TMC only if it needs to */
- if (val != CS_MODE_DISABLED)
+ if (drvdata->mode != CS_MODE_DISABLED) {
tmc_etb_disable_hw(drvdata);
+ drvdata->mode = CS_MODE_DISABLED;
+ }
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -260,7 +250,7 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
}
tmc_etf_enable_hw(drvdata);
- local_set(&drvdata->mode, CS_MODE_SYSFS);
+ drvdata->mode = CS_MODE_SYSFS;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_info(drvdata->dev, "TMC-ETF enabled\n");
@@ -280,7 +270,7 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
}
tmc_etf_disable_hw(drvdata);
- local_set(&drvdata->mode, CS_MODE_DISABLED);
+ drvdata->mode = CS_MODE_DISABLED;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_info(drvdata->dev, "TMC disabled\n");
@@ -383,7 +373,7 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
return;
/* This shouldn't happen */
- if (WARN_ON_ONCE(local_read(&drvdata->mode) != CS_MODE_PERF))
+ if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
return;
CS_UNLOCK(drvdata->base);
@@ -504,7 +494,6 @@ const struct coresight_ops tmc_etf_cs_ops = {
int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
{
- long val;
enum tmc_mode mode;
int ret = 0;
unsigned long flags;
@@ -528,9 +517,8 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
goto out;
}
- val = local_read(&drvdata->mode);
/* Don't interfere if operated from Perf */
- if (val == CS_MODE_PERF) {
+ if (drvdata->mode == CS_MODE_PERF) {
ret = -EINVAL;
goto out;
}
@@ -542,7 +530,7 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
}
/* Disable the TMC if need be */
- if (val == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
tmc_etb_disable_hw(drvdata);
drvdata->reading = true;
@@ -573,7 +561,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
}
/* Re-enable the TMC if need be */
- if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
+ if (drvdata->mode == CS_MODE_SYSFS) {
/*
* The trace run will continue with the same allocated trace
* buffer. As such zero-out the buffer so that we don't end
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 886ea83c68e0..5d312699b3b9 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -86,26 +86,22 @@ static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
* When operating in sysFS mode the content of the buffer needs to be
* read before the TMC is disabled.
*/
- if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
tmc_etr_dump_hw(drvdata);
tmc_disable_hw(drvdata);
CS_LOCK(drvdata->base);
}
-static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
+static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
{
int ret = 0;
bool used = false;
- long val;
unsigned long flags;
void __iomem *vaddr = NULL;
dma_addr_t paddr;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- /* This shouldn't be happening */
- if (WARN_ON(mode != CS_MODE_SYSFS))
- return -EINVAL;
/*
* If we don't have a buffer release the lock and allocate memory.
@@ -134,13 +130,12 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
goto out;
}
- val = local_xchg(&drvdata->mode, mode);
/*
* In sysFS mode we can have multiple writers per sink. Since this
* sink is already enabled no memory is needed and the HW need not be
* touched.
*/
- if (val == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
goto out;
/*
@@ -155,8 +150,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
drvdata->buf = drvdata->vaddr;
}
- memset(drvdata->vaddr, 0, drvdata->size);
-
+ drvdata->mode = CS_MODE_SYSFS;
tmc_etr_enable_hw(drvdata);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -171,34 +165,29 @@ out:
return ret;
}
-static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, u32 mode)
+static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
{
int ret = 0;
- long val;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- /* This shouldn't be happening */
- if (WARN_ON(mode != CS_MODE_PERF))
- return -EINVAL;
-
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
ret = -EINVAL;
goto out;
}
- val = local_xchg(&drvdata->mode, mode);
/*
* In Perf mode there can be only one writer per sink. There
* is also no need to continue if the ETR is already operated
* from sysFS.
*/
- if (val != CS_MODE_DISABLED) {
+ if (drvdata->mode != CS_MODE_DISABLED) {
ret = -EINVAL;
goto out;
}
+ drvdata->mode = CS_MODE_PERF;
tmc_etr_enable_hw(drvdata);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -210,9 +199,9 @@ static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
{
switch (mode) {
case CS_MODE_SYSFS:
- return tmc_enable_etr_sink_sysfs(csdev, mode);
+ return tmc_enable_etr_sink_sysfs(csdev);
case CS_MODE_PERF:
- return tmc_enable_etr_sink_perf(csdev, mode);
+ return tmc_enable_etr_sink_perf(csdev);
}
/* We shouldn't be here */
@@ -221,7 +210,6 @@ static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
static void tmc_disable_etr_sink(struct coresight_device *csdev)
{
- long val;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -231,10 +219,11 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev)
return;
}
- val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
/* Disable the TMC only if it needs to */
- if (val != CS_MODE_DISABLED)
+ if (drvdata->mode != CS_MODE_DISABLED) {
tmc_etr_disable_hw(drvdata);
+ drvdata->mode = CS_MODE_DISABLED;
+ }
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -253,7 +242,6 @@ const struct coresight_ops tmc_etr_cs_ops = {
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
{
int ret = 0;
- long val;
unsigned long flags;
/* config types are set a boot time and never change */
@@ -266,9 +254,8 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
goto out;
}
- val = local_read(&drvdata->mode);
/* Don't interfere if operated from Perf */
- if (val == CS_MODE_PERF) {
+ if (drvdata->mode == CS_MODE_PERF) {
ret = -EINVAL;
goto out;
}
@@ -280,7 +267,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
}
/* Disable the TMC if need be */
- if (val == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
tmc_etr_disable_hw(drvdata);
drvdata->reading = true;
@@ -303,7 +290,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
spin_lock_irqsave(&drvdata->spinlock, flags);
/* RE-enable the TMC if need be */
- if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
+ if (drvdata->mode == CS_MODE_SYSFS) {
/*
* The trace run will continue with the same allocated trace
* buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 44b3ae346118..51c01851533e 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -117,7 +117,7 @@ struct tmc_drvdata {
void __iomem *vaddr;
u32 size;
u32 len;
- local_t mode;
+ u32 mode;
enum tmc_config_type config_type;
enum tmc_mem_intf_width memwidth;
u32 trigger_cntr;
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 7bf00a0beb6f..0c37356e417c 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -368,6 +368,52 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
return csdev;
}
+static int coresight_enabled_sink(struct device *dev, void *data)
+{
+ bool *reset = data;
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
+ csdev->activated) {
+ /*
+ * Now that we have a handle on the sink for this session,
+ * disable the sysFS "enable_sink" flag so that possible
+ * concurrent perf session that wish to use another sink don't
+ * trip on it. Doing so has no ramification for the current
+ * session.
+ */
+ if (*reset)
+ csdev->activated = false;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * coresight_get_enabled_sink - returns the first enabled sink found on the bus
+ * @deactivate: Whether the 'enable_sink' flag should be reset
+ *
+ * When operated from perf the deactivate parameter should be set to 'true'.
+ * That way the "enabled_sink" flag of the sink that was selected can be reset,
+ * allowing for other concurrent perf sessions to choose a different sink.
+ *
+ * When operated from sysFS users have full control and as such the deactivate
+ * parameter should be set to 'false', hence mandating users to explicitly
+ * clear the flag.
+ */
+struct coresight_device *coresight_get_enabled_sink(bool deactivate)
+{
+ struct device *dev = NULL;
+
+ dev = bus_find_device(&coresight_bustype, NULL, &deactivate,
+ coresight_enabled_sink);
+
+ return dev ? to_coresight_device(dev) : NULL;
+}
+
/**
* _coresight_build_path - recursively build a path from a @csdev to a sink.
* @csdev: The device to start from.
@@ -380,6 +426,7 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
* last one.
*/
static int _coresight_build_path(struct coresight_device *csdev,
+ struct coresight_device *sink,
struct list_head *path)
{
int i;
@@ -387,15 +434,15 @@ static int _coresight_build_path(struct coresight_device *csdev,
struct coresight_node *node;
/* An activated sink has been found. Enqueue the element */
- if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
- csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && csdev->activated)
+ if (csdev == sink)
goto out;
/* Not a sink - recursively explore each port found on this element */
for (i = 0; i < csdev->nr_outport; i++) {
struct coresight_device *child_dev = csdev->conns[i].child_dev;
- if (child_dev && _coresight_build_path(child_dev, path) == 0) {
+ if (child_dev &&
+ _coresight_build_path(child_dev, sink, path) == 0) {
found = true;
break;
}
@@ -422,18 +469,22 @@ out:
return 0;
}
-struct list_head *coresight_build_path(struct coresight_device *csdev)
+struct list_head *coresight_build_path(struct coresight_device *source,
+ struct coresight_device *sink)
{
struct list_head *path;
int rc;
+ if (!sink)
+ return ERR_PTR(-EINVAL);
+
path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
if (!path)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(path);
- rc = _coresight_build_path(csdev, path);
+ rc = _coresight_build_path(source, sink, path);
if (rc) {
kfree(path);
return ERR_PTR(rc);
@@ -497,6 +548,7 @@ static int coresight_validate_source(struct coresight_device *csdev,
int coresight_enable(struct coresight_device *csdev)
{
int cpu, ret = 0;
+ struct coresight_device *sink;
struct list_head *path;
mutex_lock(&coresight_mutex);
@@ -508,7 +560,17 @@ int coresight_enable(struct coresight_device *csdev)
if (csdev->enable)
goto out;
- path = coresight_build_path(csdev);
+ /*
+ * Search for a valid sink for this session but don't reset the
+ * "enable_sink" flag in sysFS. Users get to do that explicitly.
+ */
+ sink = coresight_get_enabled_sink(false);
+ if (!sink) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ path = coresight_build_path(csdev, sink);
if (IS_ERR(path)) {
pr_err("building path(s) failed\n");
ret = PTR_ERR(path);
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 6f0a51a2c6ec..cdd9b3b26195 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -29,6 +29,9 @@
#include "intel_th.h"
#include "debug.h"
+static bool host_mode __read_mostly;
+module_param(host_mode, bool, 0444);
+
static DEFINE_IDA(intel_th_ida);
static int intel_th_match(struct device *dev, struct device_driver *driver)
@@ -380,7 +383,7 @@ static void intel_th_device_free(struct intel_th_device *thdev)
/*
* Intel(R) Trace Hub subdevices
*/
-static struct intel_th_subdevice {
+static const struct intel_th_subdevice {
const char *name;
struct resource res[3];
unsigned nres;
@@ -527,14 +530,19 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
{
struct resource res[3];
unsigned int req = 0;
- int i, err;
+ int src, dst, err;
/* create devices for each intel_th_subdevice */
- for (i = 0; i < ARRAY_SIZE(intel_th_subdevices); i++) {
- struct intel_th_subdevice *subdev = &intel_th_subdevices[i];
+ for (src = 0, dst = 0; src < ARRAY_SIZE(intel_th_subdevices); src++) {
+ const struct intel_th_subdevice *subdev =
+ &intel_th_subdevices[src];
struct intel_th_device *thdev;
int r;
+ /* only allow SOURCE and SWITCH devices in host mode */
+ if (host_mode && subdev->type == INTEL_TH_OUTPUT)
+ continue;
+
thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
subdev->id);
if (!thdev) {
@@ -577,10 +585,12 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
}
if (subdev->type == INTEL_TH_OUTPUT) {
- thdev->dev.devt = MKDEV(th->major, i);
+ thdev->dev.devt = MKDEV(th->major, dst);
thdev->output.type = subdev->otype;
thdev->output.port = -1;
thdev->output.scratchpad = subdev->scrpd;
+ } else if (subdev->type == INTEL_TH_SWITCH) {
+ thdev->host_mode = host_mode;
}
err = device_add(&thdev->dev);
@@ -597,14 +607,14 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
req++;
}
- th->thdev[i] = thdev;
+ th->thdev[dst++] = thdev;
}
return 0;
kill_subdevs:
- for (i-- ; i >= 0; i--)
- intel_th_device_remove(th->thdev[i]);
+ for (; dst >= 0; dst--)
+ intel_th_device_remove(th->thdev[dst]);
return err;
}
@@ -717,7 +727,7 @@ void intel_th_free(struct intel_th *th)
intel_th_request_hub_module_flush(th);
for (i = 0; i < TH_SUBDEVICE_MAX; i++)
- if (th->thdev[i] != th->hub)
+ if (th->thdev[i] && th->thdev[i] != th->hub)
intel_th_device_remove(th->thdev[i]);
intel_th_device_remove(th->hub);
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index 33e09369a491..dd32d0bad687 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -564,6 +564,9 @@ static int intel_th_gth_assign(struct intel_th_device *thdev,
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
int i, id;
+ if (thdev->host_mode)
+ return -EBUSY;
+
if (othdev->type != INTEL_TH_OUTPUT)
return -EINVAL;
@@ -600,6 +603,9 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
int port = othdev->output.port;
+ if (thdev->host_mode)
+ return;
+
spin_lock(&gth->gth_lock);
othdev->output.port = -1;
othdev->output.active = false;
@@ -654,9 +660,24 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
gth->base = base;
spin_lock_init(&gth->gth_lock);
+ /*
+ * Host mode can be signalled via SW means or via SCRPD_DEBUGGER_IN_USE
+ * bit. Either way, don't reset HW in this case, and don't export any
+ * capture configuration attributes. Also, refuse to assign output
+ * drivers to ports, see intel_th_gth_assign().
+ */
+ if (thdev->host_mode)
+ goto done;
+
ret = intel_th_gth_reset(gth);
- if (ret)
- return ret;
+ if (ret) {
+ if (ret != -EBUSY)
+ return ret;
+
+ thdev->host_mode = true;
+
+ goto done;
+ }
for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++)
gth->master[i] = -1;
@@ -677,6 +698,7 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
return -ENOMEM;
}
+done:
dev_set_drvdata(dev, gth);
return 0;
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 4c195786bf1f..3096e7054f6d 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -54,6 +54,7 @@ struct intel_th_output {
* @num_resources: number of resources in @resource array
* @type: INTEL_TH_{SOURCE,OUTPUT,SWITCH}
* @id: device instance or -1
+ * @host_mode: Intel TH is controlled by an external debug host
* @output: output descriptor for INTEL_TH_OUTPUT devices
* @name: device name to match the driver
*/
@@ -64,6 +65,9 @@ struct intel_th_device {
unsigned int type;
int id;
+ /* INTEL_TH_SWITCH specific */
+ bool host_mode;
+
/* INTEL_TH_OUTPUT specific */
struct intel_th_output output;
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 51f81d64ca37..a6ea387b5b00 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -361,7 +361,7 @@ static int stm_char_open(struct inode *inode, struct file *file)
struct stm_file *stmf;
struct device *dev;
unsigned int major = imajor(inode);
- int err = -ENODEV;
+ int err = -ENOMEM;
dev = class_find_device(&stm_class, NULL, &major, major_match);
if (!dev)
@@ -369,8 +369,9 @@ static int stm_char_open(struct inode *inode, struct file *file)
stmf = kzalloc(sizeof(*stmf), GFP_KERNEL);
if (!stmf)
- return -ENOMEM;
+ goto err_put_device;
+ err = -ENODEV;
stm_output_init(&stmf->output);
stmf->stm = to_stm_device(dev);
@@ -382,9 +383,10 @@ static int stm_char_open(struct inode *inode, struct file *file)
return nonseekable_open(inode, file);
err_free:
+ kfree(stmf);
+err_put_device:
/* matches class_find_device() above */
put_device(dev);
- kfree(stmf);
return err;
}
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 6743b18194fb..a918270d6f54 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -73,6 +73,7 @@ source "drivers/iio/adc/Kconfig"
source "drivers/iio/amplifiers/Kconfig"
source "drivers/iio/chemical/Kconfig"
source "drivers/iio/common/Kconfig"
+source "drivers/iio/counter/Kconfig"
source "drivers/iio/dac/Kconfig"
source "drivers/iio/dummy/Kconfig"
source "drivers/iio/frequency/Kconfig"
@@ -87,6 +88,7 @@ if IIO_TRIGGER
source "drivers/iio/trigger/Kconfig"
endif #IIO_TRIGGER
source "drivers/iio/potentiometer/Kconfig"
+source "drivers/iio/potentiostat/Kconfig"
source "drivers/iio/pressure/Kconfig"
source "drivers/iio/proximity/Kconfig"
source "drivers/iio/temperature/Kconfig"
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index 87e4c4369e2f..33fa4026f92c 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -18,6 +18,7 @@ obj-y += amplifiers/
obj-y += buffer/
obj-y += chemical/
obj-y += common/
+obj-y += counter/
obj-y += dac/
obj-y += dummy/
obj-y += gyro/
@@ -29,6 +30,7 @@ obj-y += light/
obj-y += magnetometer/
obj-y += orientation/
obj-y += potentiometer/
+obj-y += potentiostat/
obj-y += pressure/
obj-y += proximity/
obj-y += temperature/
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 2b791fe1e2bc..c68bdb649005 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -52,6 +52,26 @@ config BMC150_ACCEL_SPI
tristate
select REGMAP_SPI
+config DA280
+ tristate "MiraMEMS DA280 3-axis 14-bit digital accelerometer driver"
+ depends on I2C
+ help
+ Say yes here to build support for the MiraMEMS DA280 3-axis 14-bit
+ digital accelerometer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called da280.
+
+config DA311
+ tristate "MiraMEMS DA311 3-axis 12-bit digital accelerometer driver"
+ depends on I2C
+ help
+ Say yes here to build support for the MiraMEMS DA311 3-axis 12-bit
+ digital accelerometer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called da311.
+
config DMARD06
tristate "Domintech DMARD06 Digital Accelerometer Driver"
depends on OF || COMPILE_TEST
@@ -73,6 +93,16 @@ config DMARD09
Choosing M will build the driver as a module. If so, the module
will be called dmard09.
+config DMARD10
+ tristate "Domintech DMARD10 3-axis Accelerometer Driver"
+ depends on I2C
+ help
+ Say yes here to get support for the Domintech DMARD10 3-axis
+ accelerometer.
+
+ Choosing M will build the driver as a module. If so, the module
+ will be called dmard10.
+
config HID_SENSOR_ACCEL_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
@@ -97,7 +127,8 @@ config IIO_ST_ACCEL_3AXIS
help
Say yes here to build support for STMicroelectronics accelerometers:
LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
- LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL.
+ LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL,
+ LNG2DM
This driver can also be built as a module. If so, these modules
will be created:
@@ -273,6 +304,18 @@ config MXC6255
To compile this driver as a module, choose M here: the module will be
called mxc6255.
+config SCA3000
+ select IIO_BUFFER
+ select IIO_KFIFO_BUF
+ depends on SPI
+ tristate "VTI SCA3000 series accelerometers"
+ help
+ Say Y here to build support for the VTI SCA3000 series of SPI
+ accelerometers. These devices use a hardware ring buffer.
+
+ To compile this driver as a module, say M here: the module will be
+ called sca3000.
+
config STK8312
tristate "Sensortek STK8312 3-Axis Accelerometer Driver"
depends on I2C
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index f5d3ddee619e..69fe8edc57a2 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -8,8 +8,11 @@ obj-$(CONFIG_BMA220) += bma220_spi.o
obj-$(CONFIG_BMC150_ACCEL) += bmc150-accel-core.o
obj-$(CONFIG_BMC150_ACCEL_I2C) += bmc150-accel-i2c.o
obj-$(CONFIG_BMC150_ACCEL_SPI) += bmc150-accel-spi.o
+obj-$(CONFIG_DA280) += da280.o
+obj-$(CONFIG_DA311) += da311.o
obj-$(CONFIG_DMARD06) += dmard06.o
obj-$(CONFIG_DMARD09) += dmard09.o
+obj-$(CONFIG_DMARD10) += dmard10.o
obj-$(CONFIG_HID_SENSOR_ACCEL_3D) += hid-sensor-accel-3d.o
obj-$(CONFIG_KXCJK1013) += kxcjk-1013.o
obj-$(CONFIG_KXSD9) += kxsd9.o
@@ -32,6 +35,8 @@ obj-$(CONFIG_MMA9553) += mma9553.o
obj-$(CONFIG_MXC4005) += mxc4005.o
obj-$(CONFIG_MXC6255) += mxc6255.o
+obj-$(CONFIG_SCA3000) += sca3000.o
+
obj-$(CONFIG_STK8312) += stk8312.o
obj-$(CONFIG_STK8BA50) += stk8ba50.o
diff --git a/drivers/iio/accel/da280.c b/drivers/iio/accel/da280.c
new file mode 100644
index 000000000000..ed8343aeac9c
--- /dev/null
+++ b/drivers/iio/accel/da280.c
@@ -0,0 +1,183 @@
+/**
+ * IIO driver for the MiraMEMS DA280 3-axis accelerometer and
+ * IIO driver for the MiraMEMS DA226 2-axis accelerometer
+ *
+ * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/byteorder/generic.h>
+
+#define DA280_REG_CHIP_ID 0x01
+#define DA280_REG_ACC_X_LSB 0x02
+#define DA280_REG_ACC_Y_LSB 0x04
+#define DA280_REG_ACC_Z_LSB 0x06
+#define DA280_REG_MODE_BW 0x11
+
+#define DA280_CHIP_ID 0x13
+#define DA280_MODE_ENABLE 0x1e
+#define DA280_MODE_DISABLE 0x9e
+
+enum { da226, da280 };
+
+/*
+ * a value of + or -4096 corresponds to + or - 1G
+ * scale = 9.81 / 4096 = 0.002395019
+ */
+
+static const int da280_nscale = 2395019;
+
+#define DA280_CHANNEL(reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec da280_channels[] = {
+ DA280_CHANNEL(DA280_REG_ACC_X_LSB, X),
+ DA280_CHANNEL(DA280_REG_ACC_Y_LSB, Y),
+ DA280_CHANNEL(DA280_REG_ACC_Z_LSB, Z),
+};
+
+struct da280_data {
+ struct i2c_client *client;
+};
+
+static int da280_enable(struct i2c_client *client, bool enable)
+{
+ u8 data = enable ? DA280_MODE_ENABLE : DA280_MODE_DISABLE;
+
+ return i2c_smbus_write_byte_data(client, DA280_REG_MODE_BW, data);
+}
+
+static int da280_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct da280_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = i2c_smbus_read_word_data(data->client, chan->address);
+ if (ret < 0)
+ return ret;
+ /*
+ * Values are 14 bits, stored as 16 bits with the 2
+ * least significant bits always 0.
+ */
+ *val = (short)ret >> 2;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = da280_nscale;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info da280_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = da280_read_raw,
+};
+
+static int da280_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct da280_data *data;
+
+ ret = i2c_smbus_read_byte_data(client, DA280_REG_CHIP_ID);
+ if (ret != DA280_CHIP_ID)
+ return (ret < 0) ? ret : -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ i2c_set_clientdata(client, indio_dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &da280_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = da280_channels;
+ if (id->driver_data == da226) {
+ indio_dev->name = "da226";
+ indio_dev->num_channels = 2;
+ } else {
+ indio_dev->name = "da280";
+ indio_dev->num_channels = 3;
+ }
+
+ ret = da280_enable(client, true);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "device_register failed\n");
+ da280_enable(client, false);
+ }
+
+ return ret;
+}
+
+static int da280_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+
+ return da280_enable(client, false);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int da280_suspend(struct device *dev)
+{
+ return da280_enable(to_i2c_client(dev), false);
+}
+
+static int da280_resume(struct device *dev)
+{
+ return da280_enable(to_i2c_client(dev), true);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(da280_pm_ops, da280_suspend, da280_resume);
+
+static const struct i2c_device_id da280_i2c_id[] = {
+ { "da226", da226 },
+ { "da280", da280 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, da280_i2c_id);
+
+static struct i2c_driver da280_driver = {
+ .driver = {
+ .name = "da280",
+ .pm = &da280_pm_ops,
+ },
+ .probe = da280_probe,
+ .remove = da280_remove,
+ .id_table = da280_i2c_id,
+};
+
+module_i2c_driver(da280_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("MiraMEMS DA280 3-Axis Accelerometer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/da311.c b/drivers/iio/accel/da311.c
new file mode 100644
index 000000000000..537cfa8b6edf
--- /dev/null
+++ b/drivers/iio/accel/da311.c
@@ -0,0 +1,305 @@
+/**
+ * IIO driver for the MiraMEMS DA311 3-axis accelerometer
+ *
+ * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (c) 2011-2013 MiraMEMS Sensing Technology Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/byteorder/generic.h>
+
+#define DA311_CHIP_ID 0x13
+
+/*
+ * Note register addressed go from 0 - 0x3f and then wrap.
+ * For some reason there are 2 banks with 0 - 0x3f addresses,
+ * rather then a single 0-0x7f bank.
+ */
+
+/* Bank 0 regs */
+#define DA311_REG_BANK 0x0000
+#define DA311_REG_LDO_REG 0x0006
+#define DA311_REG_CHIP_ID 0x000f
+#define DA311_REG_TEMP_CFG_REG 0x001f
+#define DA311_REG_CTRL_REG1 0x0020
+#define DA311_REG_CTRL_REG3 0x0022
+#define DA311_REG_CTRL_REG4 0x0023
+#define DA311_REG_CTRL_REG5 0x0024
+#define DA311_REG_CTRL_REG6 0x0025
+#define DA311_REG_STATUS_REG 0x0027
+#define DA311_REG_OUT_X_L 0x0028
+#define DA311_REG_OUT_X_H 0x0029
+#define DA311_REG_OUT_Y_L 0x002a
+#define DA311_REG_OUT_Y_H 0x002b
+#define DA311_REG_OUT_Z_L 0x002c
+#define DA311_REG_OUT_Z_H 0x002d
+#define DA311_REG_INT1_CFG 0x0030
+#define DA311_REG_INT1_SRC 0x0031
+#define DA311_REG_INT1_THS 0x0032
+#define DA311_REG_INT1_DURATION 0x0033
+#define DA311_REG_INT2_CFG 0x0034
+#define DA311_REG_INT2_SRC 0x0035
+#define DA311_REG_INT2_THS 0x0036
+#define DA311_REG_INT2_DURATION 0x0037
+#define DA311_REG_CLICK_CFG 0x0038
+#define DA311_REG_CLICK_SRC 0x0039
+#define DA311_REG_CLICK_THS 0x003a
+#define DA311_REG_TIME_LIMIT 0x003b
+#define DA311_REG_TIME_LATENCY 0x003c
+#define DA311_REG_TIME_WINDOW 0x003d
+
+/* Bank 1 regs */
+#define DA311_REG_SOFT_RESET 0x0105
+#define DA311_REG_OTP_XOFF_L 0x0110
+#define DA311_REG_OTP_XOFF_H 0x0111
+#define DA311_REG_OTP_YOFF_L 0x0112
+#define DA311_REG_OTP_YOFF_H 0x0113
+#define DA311_REG_OTP_ZOFF_L 0x0114
+#define DA311_REG_OTP_ZOFF_H 0x0115
+#define DA311_REG_OTP_XSO 0x0116
+#define DA311_REG_OTP_YSO 0x0117
+#define DA311_REG_OTP_ZSO 0x0118
+#define DA311_REG_OTP_TRIM_OSC 0x011b
+#define DA311_REG_LPF_ABSOLUTE 0x011c
+#define DA311_REG_TEMP_OFF1 0x0127
+#define DA311_REG_TEMP_OFF2 0x0128
+#define DA311_REG_TEMP_OFF3 0x0129
+#define DA311_REG_OTP_TRIM_THERM_H 0x011a
+
+/*
+ * a value of + or -1024 corresponds to + or - 1G
+ * scale = 9.81 / 1024 = 0.009580078
+ */
+
+static const int da311_nscale = 9580078;
+
+#define DA311_CHANNEL(reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec da311_channels[] = {
+ /* | 0x80 comes from the android driver */
+ DA311_CHANNEL(DA311_REG_OUT_X_L | 0x80, X),
+ DA311_CHANNEL(DA311_REG_OUT_Y_L | 0x80, Y),
+ DA311_CHANNEL(DA311_REG_OUT_Z_L | 0x80, Z),
+};
+
+struct da311_data {
+ struct i2c_client *client;
+};
+
+static int da311_register_mask_write(struct i2c_client *client, u16 addr,
+ u8 mask, u8 data)
+{
+ int ret;
+ u8 tmp_data = 0;
+
+ if (addr & 0xff00) {
+ /* Select bank 1 */
+ ret = i2c_smbus_write_byte_data(client, DA311_REG_BANK, 0x01);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (mask != 0xff) {
+ ret = i2c_smbus_read_byte_data(client, addr);
+ if (ret < 0)
+ return ret;
+ tmp_data = ret;
+ }
+
+ tmp_data &= ~mask;
+ tmp_data |= data & mask;
+ ret = i2c_smbus_write_byte_data(client, addr & 0xff, tmp_data);
+ if (ret < 0)
+ return ret;
+
+ if (addr & 0xff00) {
+ /* Back to bank 0 */
+ ret = i2c_smbus_write_byte_data(client, DA311_REG_BANK, 0x00);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Init sequence taken from the android driver */
+static int da311_reset(struct i2c_client *client)
+{
+ const struct {
+ u16 addr;
+ u8 mask;
+ u8 data;
+ } init_data[] = {
+ { DA311_REG_TEMP_CFG_REG, 0xff, 0x08 },
+ { DA311_REG_CTRL_REG5, 0xff, 0x80 },
+ { DA311_REG_CTRL_REG4, 0x30, 0x00 },
+ { DA311_REG_CTRL_REG1, 0xff, 0x6f },
+ { DA311_REG_TEMP_CFG_REG, 0xff, 0x88 },
+ { DA311_REG_LDO_REG, 0xff, 0x02 },
+ { DA311_REG_OTP_TRIM_OSC, 0xff, 0x27 },
+ { DA311_REG_LPF_ABSOLUTE, 0xff, 0x30 },
+ { DA311_REG_TEMP_OFF1, 0xff, 0x3f },
+ { DA311_REG_TEMP_OFF2, 0xff, 0xff },
+ { DA311_REG_TEMP_OFF3, 0xff, 0x0f },
+ };
+ int i, ret;
+
+ /* Reset */
+ ret = da311_register_mask_write(client, DA311_REG_SOFT_RESET,
+ 0xff, 0xaa);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(init_data); i++) {
+ ret = da311_register_mask_write(client,
+ init_data[i].addr,
+ init_data[i].mask,
+ init_data[i].data);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int da311_enable(struct i2c_client *client, bool enable)
+{
+ u8 data = enable ? 0x00 : 0x20;
+
+ return da311_register_mask_write(client, DA311_REG_TEMP_CFG_REG,
+ 0x20, data);
+}
+
+static int da311_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct da311_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = i2c_smbus_read_word_data(data->client, chan->address);
+ if (ret < 0)
+ return ret;
+ /*
+ * Values are 12 bits, stored as 16 bits with the 4
+ * least significant bits always 0.
+ */
+ *val = (short)ret >> 4;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = da311_nscale;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info da311_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = da311_read_raw,
+};
+
+static int da311_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct da311_data *data;
+
+ ret = i2c_smbus_read_byte_data(client, DA311_REG_CHIP_ID);
+ if (ret != DA311_CHIP_ID)
+ return (ret < 0) ? ret : -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ i2c_set_clientdata(client, indio_dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &da311_info;
+ indio_dev->name = "da311";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = da311_channels;
+ indio_dev->num_channels = ARRAY_SIZE(da311_channels);
+
+ ret = da311_reset(client);
+ if (ret < 0)
+ return ret;
+
+ ret = da311_enable(client, true);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "device_register failed\n");
+ da311_enable(client, false);
+ }
+
+ return ret;
+}
+
+static int da311_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+
+ return da311_enable(client, false);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int da311_suspend(struct device *dev)
+{
+ return da311_enable(to_i2c_client(dev), false);
+}
+
+static int da311_resume(struct device *dev)
+{
+ return da311_enable(to_i2c_client(dev), true);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(da311_pm_ops, da311_suspend, da311_resume);
+
+static const struct i2c_device_id da311_i2c_id[] = {
+ {"da311", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, da311_i2c_id);
+
+static struct i2c_driver da311_driver = {
+ .driver = {
+ .name = "da311",
+ .pm = &da311_pm_ops,
+ },
+ .probe = da311_probe,
+ .remove = da311_remove,
+ .id_table = da311_i2c_id,
+};
+
+module_i2c_driver(da311_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("MiraMEMS DA311 3-Axis Accelerometer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/dmard10.c b/drivers/iio/accel/dmard10.c
new file mode 100644
index 000000000000..b8736cc75656
--- /dev/null
+++ b/drivers/iio/accel/dmard10.c
@@ -0,0 +1,266 @@
+/**
+ * IIO driver for the 3-axis accelerometer Domintech ARD10.
+ *
+ * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (c) 2012 Domintech Technology Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/byteorder/generic.h>
+
+#define DMARD10_REG_ACTR 0x00
+#define DMARD10_REG_AFEM 0x0c
+#define DMARD10_REG_STADR 0x12
+#define DMARD10_REG_STAINT 0x1c
+#define DMARD10_REG_MISC2 0x1f
+#define DMARD10_REG_PD 0x21
+
+#define DMARD10_MODE_OFF 0x00
+#define DMARD10_MODE_STANDBY 0x02
+#define DMARD10_MODE_ACTIVE 0x06
+#define DMARD10_MODE_READ_OTP 0x12
+#define DMARD10_MODE_RESET_DATA_PATH 0x82
+
+/* AFEN set 1, ATM[2:0]=b'000 (normal), EN_Z/Y/X/T=1 */
+#define DMARD10_VALUE_AFEM_AFEN_NORMAL 0x8f
+/* ODR[3:0]=b'0111 (100Hz), CCK[3:0]=b'0100 (204.8kHZ) */
+#define DMARD10_VALUE_CKSEL_ODR_100_204 0x74
+/* INTC[6:5]=b'00 */
+#define DMARD10_VALUE_INTC 0x00
+/* TAP1/TAP2 Average 2 */
+#define DMARD10_VALUE_TAPNS_AVE_2 0x11
+
+#define DMARD10_VALUE_STADR 0x55
+#define DMARD10_VALUE_STAINT 0xaa
+#define DMARD10_VALUE_MISC2_OSCA_EN 0x08
+#define DMARD10_VALUE_PD_RST 0x52
+
+/* Offsets into the buffer read in dmard10_read_raw() */
+#define DMARD10_X_OFFSET 1
+#define DMARD10_Y_OFFSET 2
+#define DMARD10_Z_OFFSET 3
+
+/*
+ * a value of + or -128 corresponds to + or - 1G
+ * scale = 9.81 / 128 = 0.076640625
+ */
+
+static const int dmard10_nscale = 76640625;
+
+#define DMARD10_CHANNEL(reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec dmard10_channels[] = {
+ DMARD10_CHANNEL(DMARD10_X_OFFSET, X),
+ DMARD10_CHANNEL(DMARD10_Y_OFFSET, Y),
+ DMARD10_CHANNEL(DMARD10_Z_OFFSET, Z),
+};
+
+struct dmard10_data {
+ struct i2c_client *client;
+};
+
+/* Init sequence taken from the android driver */
+static int dmard10_reset(struct i2c_client *client)
+{
+ unsigned char buffer[7];
+ int ret;
+
+ /* 1. Powerdown reset */
+ ret = i2c_smbus_write_byte_data(client, DMARD10_REG_PD,
+ DMARD10_VALUE_PD_RST);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * 2. ACTR => Standby mode => Download OTP to parameter reg =>
+ * Standby mode => Reset data path => Standby mode
+ */
+ buffer[0] = DMARD10_REG_ACTR;
+ buffer[1] = DMARD10_MODE_STANDBY;
+ buffer[2] = DMARD10_MODE_READ_OTP;
+ buffer[3] = DMARD10_MODE_STANDBY;
+ buffer[4] = DMARD10_MODE_RESET_DATA_PATH;
+ buffer[5] = DMARD10_MODE_STANDBY;
+ ret = i2c_master_send(client, buffer, 6);
+ if (ret < 0)
+ return ret;
+
+ /* 3. OSCA_EN = 1, TSTO = b'000 (INT1 = normal, TEST0 = normal) */
+ ret = i2c_smbus_write_byte_data(client, DMARD10_REG_MISC2,
+ DMARD10_VALUE_MISC2_OSCA_EN);
+ if (ret < 0)
+ return ret;
+
+ /* 4. AFEN = 1 (AFE will powerdown after ADC) */
+ buffer[0] = DMARD10_REG_AFEM;
+ buffer[1] = DMARD10_VALUE_AFEM_AFEN_NORMAL;
+ buffer[2] = DMARD10_VALUE_CKSEL_ODR_100_204;
+ buffer[3] = DMARD10_VALUE_INTC;
+ buffer[4] = DMARD10_VALUE_TAPNS_AVE_2;
+ buffer[5] = 0x00; /* DLYC, no delay timing */
+ buffer[6] = 0x07; /* INTD=1 push-pull, INTA=1 active high, AUTOT=1 */
+ ret = i2c_master_send(client, buffer, 7);
+ if (ret < 0)
+ return ret;
+
+ /* 5. Activation mode */
+ ret = i2c_smbus_write_byte_data(client, DMARD10_REG_ACTR,
+ DMARD10_MODE_ACTIVE);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* Shutdown sequence taken from the android driver */
+static int dmard10_shutdown(struct i2c_client *client)
+{
+ unsigned char buffer[3];
+
+ buffer[0] = DMARD10_REG_ACTR;
+ buffer[1] = DMARD10_MODE_STANDBY;
+ buffer[2] = DMARD10_MODE_OFF;
+
+ return i2c_master_send(client, buffer, 3);
+}
+
+static int dmard10_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct dmard10_data *data = iio_priv(indio_dev);
+ __le16 buf[4];
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ /*
+ * Read 8 bytes starting at the REG_STADR register, trying to
+ * read the individual X, Y, Z registers will always read 0.
+ */
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ DMARD10_REG_STADR,
+ sizeof(buf), (u8 *)buf);
+ if (ret < 0)
+ return ret;
+ ret = le16_to_cpu(buf[chan->address]);
+ *val = sign_extend32(ret, 12);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = dmard10_nscale;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info dmard10_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = dmard10_read_raw,
+};
+
+static int dmard10_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct dmard10_data *data;
+
+ /* These 2 registers have special POR reset values used for id */
+ ret = i2c_smbus_read_byte_data(client, DMARD10_REG_STADR);
+ if (ret != DMARD10_VALUE_STADR)
+ return (ret < 0) ? ret : -ENODEV;
+
+ ret = i2c_smbus_read_byte_data(client, DMARD10_REG_STAINT);
+ if (ret != DMARD10_VALUE_STAINT)
+ return (ret < 0) ? ret : -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev) {
+ dev_err(&client->dev, "iio allocation failed!\n");
+ return -ENOMEM;
+ }
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ i2c_set_clientdata(client, indio_dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &dmard10_info;
+ indio_dev->name = "dmard10";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = dmard10_channels;
+ indio_dev->num_channels = ARRAY_SIZE(dmard10_channels);
+
+ ret = dmard10_reset(client);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "device_register failed\n");
+ dmard10_shutdown(client);
+ }
+
+ return ret;
+}
+
+static int dmard10_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+
+ return dmard10_shutdown(client);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dmard10_suspend(struct device *dev)
+{
+ return dmard10_shutdown(to_i2c_client(dev));
+}
+
+static int dmard10_resume(struct device *dev)
+{
+ return dmard10_reset(to_i2c_client(dev));
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dmard10_pm_ops, dmard10_suspend, dmard10_resume);
+
+static const struct i2c_device_id dmard10_i2c_id[] = {
+ {"dmard10", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, dmard10_i2c_id);
+
+static struct i2c_driver dmard10_driver = {
+ .driver = {
+ .name = "dmard10",
+ .pm = &dmard10_pm_ops,
+ },
+ .probe = dmard10_probe,
+ .remove = dmard10_remove,
+ .id_table = dmard10_i2c_id,
+};
+
+module_i2c_driver(dmard10_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Domintech ARD10 3-Axis Accelerometer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index 03beadf14ad3..3a40774cca74 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -39,7 +39,7 @@
#define MMA7660_SCALE_AVAIL "0.467142857"
-const int mma7660_nscale = 467142857;
+static const int mma7660_nscale = 467142857;
#define MMA7660_CHANNEL(reg, axis) { \
.type = IIO_ACCEL, \
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index d41e1b588e68..f418c588af6a 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -459,12 +459,14 @@ static int mma8452_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
mutex_lock(&data->lock);
ret = mma8452_read(data, buffer);
mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@@ -664,37 +666,46 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
struct mma8452_data *data = iio_priv(indio_dev);
int i, ret;
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
i = mma8452_get_samp_freq_index(data, val, val2);
- if (i < 0)
- return i;
-
+ if (i < 0) {
+ ret = i;
+ break;
+ }
data->ctrl_reg1 &= ~MMA8452_CTRL_DR_MASK;
data->ctrl_reg1 |= i << MMA8452_CTRL_DR_SHIFT;
- return mma8452_change_config(data, MMA8452_CTRL_REG1,
- data->ctrl_reg1);
+ ret = mma8452_change_config(data, MMA8452_CTRL_REG1,
+ data->ctrl_reg1);
+ break;
case IIO_CHAN_INFO_SCALE:
i = mma8452_get_scale_index(data, val, val2);
- if (i < 0)
- return i;
+ if (i < 0) {
+ ret = i;
+ break;
+ }
data->data_cfg &= ~MMA8452_DATA_CFG_FS_MASK;
data->data_cfg |= i;
- return mma8452_change_config(data, MMA8452_DATA_CFG,
- data->data_cfg);
+ ret = mma8452_change_config(data, MMA8452_DATA_CFG,
+ data->data_cfg);
+ break;
case IIO_CHAN_INFO_CALIBBIAS:
- if (val < -128 || val > 127)
- return -EINVAL;
+ if (val < -128 || val > 127) {
+ ret = -EINVAL;
+ break;
+ }
- return mma8452_change_config(data,
- MMA8452_OFF_X + chan->scan_index,
- val);
+ ret = mma8452_change_config(data,
+ MMA8452_OFF_X + chan->scan_index,
+ val);
+ break;
case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
if (val == 0 && val2 == 0) {
@@ -703,23 +714,30 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
data->data_cfg |= MMA8452_DATA_CFG_HPF_MASK;
ret = mma8452_set_hp_filter_frequency(data, val, val2);
if (ret < 0)
- return ret;
+ break;
}
- return mma8452_change_config(data, MMA8452_DATA_CFG,
+ ret = mma8452_change_config(data, MMA8452_DATA_CFG,
data->data_cfg);
+ break;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
ret = mma8452_get_odr_index(data);
for (i = 0; i < ARRAY_SIZE(mma8452_os_ratio); i++) {
- if (mma8452_os_ratio[i][ret] == val)
- return mma8452_set_power_mode(data, i);
+ if (mma8452_os_ratio[i][ret] == val) {
+ ret = mma8452_set_power_mode(data, i);
+ break;
+ }
}
-
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
}
static int mma8452_read_thresh(struct iio_dev *indio_dev,
@@ -1347,20 +1365,9 @@ static int mma8452_data_rdy_trigger_set_state(struct iio_trigger *trig,
return mma8452_change_config(data, MMA8452_CTRL_REG4, reg);
}
-static int mma8452_validate_device(struct iio_trigger *trig,
- struct iio_dev *indio_dev)
-{
- struct iio_dev *indio = iio_trigger_get_drvdata(trig);
-
- if (indio != indio_dev)
- return -EINVAL;
-
- return 0;
-}
-
static const struct iio_trigger_ops mma8452_trigger_ops = {
.set_trigger_state = mma8452_data_rdy_trigger_set_state,
- .validate_device = mma8452_validate_device,
+ .validate_device = iio_trigger_validate_own_device,
.owner = THIS_MODULE,
};
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
new file mode 100644
index 000000000000..cb1d83fa19a0
--- /dev/null
+++ b/drivers/iio/accel/sca3000.c
@@ -0,0 +1,1576 @@
+/*
+ * sca3000_core.c -- support VTI sca3000 series accelerometers via SPI
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
+ *
+ * See industrialio/accels/sca3000.h for comments.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/kfifo_buf.h>
+
+#define SCA3000_WRITE_REG(a) (((a) << 2) | 0x02)
+#define SCA3000_READ_REG(a) ((a) << 2)
+
+#define SCA3000_REG_REVID_ADDR 0x00
+#define SCA3000_REG_REVID_MAJOR_MASK GENMASK(8, 4)
+#define SCA3000_REG_REVID_MINOR_MASK GENMASK(3, 0)
+
+#define SCA3000_REG_STATUS_ADDR 0x02
+#define SCA3000_LOCKED BIT(5)
+#define SCA3000_EEPROM_CS_ERROR BIT(1)
+#define SCA3000_SPI_FRAME_ERROR BIT(0)
+
+/* All reads done using register decrement so no need to directly access LSBs */
+#define SCA3000_REG_X_MSB_ADDR 0x05
+#define SCA3000_REG_Y_MSB_ADDR 0x07
+#define SCA3000_REG_Z_MSB_ADDR 0x09
+
+#define SCA3000_REG_RING_OUT_ADDR 0x0f
+
+/* Temp read untested - the e05 doesn't have the sensor */
+#define SCA3000_REG_TEMP_MSB_ADDR 0x13
+
+#define SCA3000_REG_MODE_ADDR 0x14
+#define SCA3000_MODE_PROT_MASK 0x28
+#define SCA3000_REG_MODE_RING_BUF_ENABLE BIT(7)
+#define SCA3000_REG_MODE_RING_BUF_8BIT BIT(6)
+
+/*
+ * Free fall detection triggers an interrupt if the acceleration
+ * is below a threshold for equivalent of 25cm drop
+ */
+#define SCA3000_REG_MODE_FREE_FALL_DETECT BIT(4)
+#define SCA3000_REG_MODE_MEAS_MODE_NORMAL 0x00
+#define SCA3000_REG_MODE_MEAS_MODE_OP_1 0x01
+#define SCA3000_REG_MODE_MEAS_MODE_OP_2 0x02
+
+/*
+ * In motion detection mode the accelerations are band pass filtered
+ * (approx 1 - 25Hz) and then a programmable threshold used to trigger
+ * and interrupt.
+ */
+#define SCA3000_REG_MODE_MEAS_MODE_MOT_DET 0x03
+#define SCA3000_REG_MODE_MODE_MASK 0x03
+
+#define SCA3000_REG_BUF_COUNT_ADDR 0x15
+
+#define SCA3000_REG_INT_STATUS_ADDR 0x16
+#define SCA3000_REG_INT_STATUS_THREE_QUARTERS BIT(7)
+#define SCA3000_REG_INT_STATUS_HALF BIT(6)
+
+#define SCA3000_INT_STATUS_FREE_FALL BIT(3)
+#define SCA3000_INT_STATUS_Y_TRIGGER BIT(2)
+#define SCA3000_INT_STATUS_X_TRIGGER BIT(1)
+#define SCA3000_INT_STATUS_Z_TRIGGER BIT(0)
+
+/* Used to allow access to multiplexed registers */
+#define SCA3000_REG_CTRL_SEL_ADDR 0x18
+/* Only available for SCA3000-D03 and SCA3000-D01 */
+#define SCA3000_REG_CTRL_SEL_I2C_DISABLE 0x01
+#define SCA3000_REG_CTRL_SEL_MD_CTRL 0x02
+#define SCA3000_REG_CTRL_SEL_MD_Y_TH 0x03
+#define SCA3000_REG_CTRL_SEL_MD_X_TH 0x04
+#define SCA3000_REG_CTRL_SEL_MD_Z_TH 0x05
+/*
+ * BE VERY CAREFUL WITH THIS, IF 3 BITS ARE NOT SET the device
+ * will not function
+ */
+#define SCA3000_REG_CTRL_SEL_OUT_CTRL 0x0B
+
+#define SCA3000_REG_OUT_CTRL_PROT_MASK 0xE0
+#define SCA3000_REG_OUT_CTRL_BUF_X_EN 0x10
+#define SCA3000_REG_OUT_CTRL_BUF_Y_EN 0x08
+#define SCA3000_REG_OUT_CTRL_BUF_Z_EN 0x04
+#define SCA3000_REG_OUT_CTRL_BUF_DIV_MASK 0x03
+#define SCA3000_REG_OUT_CTRL_BUF_DIV_4 0x02
+#define SCA3000_REG_OUT_CTRL_BUF_DIV_2 0x01
+
+
+/*
+ * Control which motion detector interrupts are on.
+ * For now only OR combinations are supported.
+ */
+#define SCA3000_MD_CTRL_PROT_MASK 0xC0
+#define SCA3000_MD_CTRL_OR_Y BIT(0)
+#define SCA3000_MD_CTRL_OR_X BIT(1)
+#define SCA3000_MD_CTRL_OR_Z BIT(2)
+/* Currently unsupported */
+#define SCA3000_MD_CTRL_AND_Y BIT(3)
+#define SCA3000_MD_CTRL_AND_X BIT(4)
+#define SAC3000_MD_CTRL_AND_Z BIT(5)
+
+/*
+ * Some control registers of complex access methods requiring this register to
+ * be used to remove a lock.
+ */
+#define SCA3000_REG_UNLOCK_ADDR 0x1e
+
+#define SCA3000_REG_INT_MASK_ADDR 0x21
+#define SCA3000_REG_INT_MASK_PROT_MASK 0x1C
+
+#define SCA3000_REG_INT_MASK_RING_THREE_QUARTER BIT(7)
+#define SCA3000_REG_INT_MASK_RING_HALF BIT(6)
+
+#define SCA3000_REG_INT_MASK_ALL_INTS 0x02
+#define SCA3000_REG_INT_MASK_ACTIVE_HIGH 0x01
+#define SCA3000_REG_INT_MASK_ACTIVE_LOW 0x00
+/* Values of multiplexed registers (write to ctrl_data after select) */
+#define SCA3000_REG_CTRL_DATA_ADDR 0x22
+
+/*
+ * Measurement modes available on some sca3000 series chips. Code assumes others
+ * may become available in the future.
+ *
+ * Bypass - Bypass the low-pass filter in the signal channel so as to increase
+ * signal bandwidth.
+ *
+ * Narrow - Narrow low-pass filtering of the signal channel and half output
+ * data rate by decimation.
+ *
+ * Wide - Widen low-pass filtering of signal channel to increase bandwidth
+ */
+#define SCA3000_OP_MODE_BYPASS 0x01
+#define SCA3000_OP_MODE_NARROW 0x02
+#define SCA3000_OP_MODE_WIDE 0x04
+#define SCA3000_MAX_TX 6
+#define SCA3000_MAX_RX 2
+
+/**
+ * struct sca3000_state - device instance state information
+ * @us: the associated spi device
+ * @info: chip variant information
+ * @last_timestamp: the timestamp of the last event
+ * @mo_det_use_count: reference counter for the motion detection unit
+ * @lock: lock used to protect elements of sca3000_state
+ * and the underlying device state.
+ * @tx: dma-able transmit buffer
+ * @rx: dma-able receive buffer
+ **/
+struct sca3000_state {
+ struct spi_device *us;
+ const struct sca3000_chip_info *info;
+ s64 last_timestamp;
+ int mo_det_use_count;
+ struct mutex lock;
+ /* Can these share a cacheline ? */
+ u8 rx[384] ____cacheline_aligned;
+ u8 tx[6] ____cacheline_aligned;
+};
+
+/**
+ * struct sca3000_chip_info - model dependent parameters
+ * @scale: scale * 10^-6
+ * @temp_output: some devices have temperature sensors.
+ * @measurement_mode_freq: normal mode sampling frequency
+ * @measurement_mode_3db_freq: 3db cutoff frequency of the low pass filter for
+ * the normal measurement mode.
+ * @option_mode_1: first optional mode. Not all models have one
+ * @option_mode_1_freq: option mode 1 sampling frequency
+ * @option_mode_1_3db_freq: 3db cutoff frequency of the low pass filter for
+ * the first option mode.
+ * @option_mode_2: second optional mode. Not all chips have one
+ * @option_mode_2_freq: option mode 2 sampling frequency
+ * @option_mode_2_3db_freq: 3db cutoff frequency of the low pass filter for
+ * the second option mode.
+ * @mod_det_mult_xz: Bit wise multipliers to calculate the threshold
+ * for motion detection in the x and z axis.
+ * @mod_det_mult_y: Bit wise multipliers to calculate the threshold
+ * for motion detection in the y axis.
+ *
+ * This structure is used to hold information about the functionality of a given
+ * sca3000 variant.
+ **/
+struct sca3000_chip_info {
+ unsigned int scale;
+ bool temp_output;
+ int measurement_mode_freq;
+ int measurement_mode_3db_freq;
+ int option_mode_1;
+ int option_mode_1_freq;
+ int option_mode_1_3db_freq;
+ int option_mode_2;
+ int option_mode_2_freq;
+ int option_mode_2_3db_freq;
+ int mot_det_mult_xz[6];
+ int mot_det_mult_y[7];
+};
+
+enum sca3000_variant {
+ d01,
+ e02,
+ e04,
+ e05,
+};
+
+/*
+ * Note where option modes are not defined, the chip simply does not
+ * support any.
+ * Other chips in the sca3000 series use i2c and are not included here.
+ *
+ * Some of these devices are only listed in the family data sheet and
+ * do not actually appear to be available.
+ */
+static const struct sca3000_chip_info sca3000_spi_chip_info_tbl[] = {
+ [d01] = {
+ .scale = 7357,
+ .temp_output = true,
+ .measurement_mode_freq = 250,
+ .measurement_mode_3db_freq = 45,
+ .option_mode_1 = SCA3000_OP_MODE_BYPASS,
+ .option_mode_1_freq = 250,
+ .option_mode_1_3db_freq = 70,
+ .mot_det_mult_xz = {50, 100, 200, 350, 650, 1300},
+ .mot_det_mult_y = {50, 100, 150, 250, 450, 850, 1750},
+ },
+ [e02] = {
+ .scale = 9810,
+ .measurement_mode_freq = 125,
+ .measurement_mode_3db_freq = 40,
+ .option_mode_1 = SCA3000_OP_MODE_NARROW,
+ .option_mode_1_freq = 63,
+ .option_mode_1_3db_freq = 11,
+ .mot_det_mult_xz = {100, 150, 300, 550, 1050, 2050},
+ .mot_det_mult_y = {50, 100, 200, 350, 700, 1350, 2700},
+ },
+ [e04] = {
+ .scale = 19620,
+ .measurement_mode_freq = 100,
+ .measurement_mode_3db_freq = 38,
+ .option_mode_1 = SCA3000_OP_MODE_NARROW,
+ .option_mode_1_freq = 50,
+ .option_mode_1_3db_freq = 9,
+ .option_mode_2 = SCA3000_OP_MODE_WIDE,
+ .option_mode_2_freq = 400,
+ .option_mode_2_3db_freq = 70,
+ .mot_det_mult_xz = {200, 300, 600, 1100, 2100, 4100},
+ .mot_det_mult_y = {100, 200, 400, 7000, 1400, 2700, 54000},
+ },
+ [e05] = {
+ .scale = 61313,
+ .measurement_mode_freq = 200,
+ .measurement_mode_3db_freq = 60,
+ .option_mode_1 = SCA3000_OP_MODE_NARROW,
+ .option_mode_1_freq = 50,
+ .option_mode_1_3db_freq = 9,
+ .option_mode_2 = SCA3000_OP_MODE_WIDE,
+ .option_mode_2_freq = 400,
+ .option_mode_2_3db_freq = 75,
+ .mot_det_mult_xz = {600, 900, 1700, 3200, 6100, 11900},
+ .mot_det_mult_y = {300, 600, 1200, 2000, 4100, 7800, 15600},
+ },
+};
+
+static int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val)
+{
+ st->tx[0] = SCA3000_WRITE_REG(address);
+ st->tx[1] = val;
+ return spi_write(st->us, st->tx, 2);
+}
+
+static int sca3000_read_data_short(struct sca3000_state *st,
+ u8 reg_address_high,
+ int len)
+{
+ struct spi_transfer xfer[2] = {
+ {
+ .len = 1,
+ .tx_buf = st->tx,
+ }, {
+ .len = len,
+ .rx_buf = st->rx,
+ }
+ };
+ st->tx[0] = SCA3000_READ_REG(reg_address_high);
+
+ return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
+}
+
+/**
+ * sca3000_reg_lock_on() - test if the ctrl register lock is on
+ * @st: Driver specific device instance data.
+ *
+ * Lock must be held.
+ **/
+static int sca3000_reg_lock_on(struct sca3000_state *st)
+{
+ int ret;
+
+ ret = sca3000_read_data_short(st, SCA3000_REG_STATUS_ADDR, 1);
+ if (ret < 0)
+ return ret;
+
+ return !(st->rx[0] & SCA3000_LOCKED);
+}
+
+/**
+ * __sca3000_unlock_reg_lock() - unlock the control registers
+ * @st: Driver specific device instance data.
+ *
+ * Note the device does not appear to support doing this in a single transfer.
+ * This should only ever be used as part of ctrl reg read.
+ * Lock must be held before calling this
+ */
+static int __sca3000_unlock_reg_lock(struct sca3000_state *st)
+{
+ struct spi_transfer xfer[3] = {
+ {
+ .len = 2,
+ .cs_change = 1,
+ .tx_buf = st->tx,
+ }, {
+ .len = 2,
+ .cs_change = 1,
+ .tx_buf = st->tx + 2,
+ }, {
+ .len = 2,
+ .tx_buf = st->tx + 4,
+ },
+ };
+ st->tx[0] = SCA3000_WRITE_REG(SCA3000_REG_UNLOCK_ADDR);
+ st->tx[1] = 0x00;
+ st->tx[2] = SCA3000_WRITE_REG(SCA3000_REG_UNLOCK_ADDR);
+ st->tx[3] = 0x50;
+ st->tx[4] = SCA3000_WRITE_REG(SCA3000_REG_UNLOCK_ADDR);
+ st->tx[5] = 0xA0;
+
+ return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
+}
+
+/**
+ * sca3000_write_ctrl_reg() write to a lock protect ctrl register
+ * @st: Driver specific device instance data.
+ * @sel: selects which registers we wish to write to
+ * @val: the value to be written
+ *
+ * Certain control registers are protected against overwriting by the lock
+ * register and use a shared write address. This function allows writing of
+ * these registers.
+ * Lock must be held.
+ */
+static int sca3000_write_ctrl_reg(struct sca3000_state *st,
+ u8 sel,
+ uint8_t val)
+{
+ int ret;
+
+ ret = sca3000_reg_lock_on(st);
+ if (ret < 0)
+ goto error_ret;
+ if (ret) {
+ ret = __sca3000_unlock_reg_lock(st);
+ if (ret)
+ goto error_ret;
+ }
+
+ /* Set the control select register */
+ ret = sca3000_write_reg(st, SCA3000_REG_CTRL_SEL_ADDR, sel);
+ if (ret)
+ goto error_ret;
+
+ /* Write the actual value into the register */
+ ret = sca3000_write_reg(st, SCA3000_REG_CTRL_DATA_ADDR, val);
+
+error_ret:
+ return ret;
+}
+
+/**
+ * sca3000_read_ctrl_reg() read from lock protected control register.
+ * @st: Driver specific device instance data.
+ * @ctrl_reg: Which ctrl register do we want to read.
+ *
+ * Lock must be held.
+ */
+static int sca3000_read_ctrl_reg(struct sca3000_state *st,
+ u8 ctrl_reg)
+{
+ int ret;
+
+ ret = sca3000_reg_lock_on(st);
+ if (ret < 0)
+ goto error_ret;
+ if (ret) {
+ ret = __sca3000_unlock_reg_lock(st);
+ if (ret)
+ goto error_ret;
+ }
+ /* Set the control select register */
+ ret = sca3000_write_reg(st, SCA3000_REG_CTRL_SEL_ADDR, ctrl_reg);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_read_data_short(st, SCA3000_REG_CTRL_DATA_ADDR, 1);
+ if (ret)
+ goto error_ret;
+ return st->rx[0];
+error_ret:
+ return ret;
+}
+
+/**
+ * sca3000_show_rev() - sysfs interface to read the chip revision number
+ * @indio_dev: Device instance specific generic IIO data.
+ * Driver specific device instance data can be obtained via
+ * via iio_priv(indio_dev)
+ */
+static int sca3000_print_rev(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct sca3000_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data_short(st, SCA3000_REG_REVID_ADDR, 1);
+ if (ret < 0)
+ goto error_ret;
+ dev_info(&indio_dev->dev,
+ "sca3000 revision major=%lu, minor=%lu\n",
+ st->rx[0] & SCA3000_REG_REVID_MAJOR_MASK,
+ st->rx[0] & SCA3000_REG_REVID_MINOR_MASK);
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static ssize_t
+sca3000_show_available_3db_freqs(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int len;
+
+ len = sprintf(buf, "%d", st->info->measurement_mode_3db_freq);
+ if (st->info->option_mode_1)
+ len += sprintf(buf + len, " %d",
+ st->info->option_mode_1_3db_freq);
+ if (st->info->option_mode_2)
+ len += sprintf(buf + len, " %d",
+ st->info->option_mode_2_3db_freq);
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(in_accel_filter_low_pass_3db_frequency_available,
+ S_IRUGO, sca3000_show_available_3db_freqs,
+ NULL, 0);
+
+static const struct iio_event_spec sca3000_event = {
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
+};
+
+/*
+ * Note the hack in the number of bits to pretend we have 2 more than
+ * we do in the fifo.
+ */
+#define SCA3000_CHAN(index, mod) \
+ { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = mod, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |\
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),\
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .address = index, \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 13, \
+ .storagebits = 16, \
+ .shift = 3, \
+ .endianness = IIO_BE, \
+ }, \
+ .event_spec = &sca3000_event, \
+ .num_event_specs = 1, \
+ }
+
+static const struct iio_event_spec sca3000_freefall_event_spec = {
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_PERIOD),
+};
+
+static const struct iio_chan_spec sca3000_channels[] = {
+ SCA3000_CHAN(0, IIO_MOD_X),
+ SCA3000_CHAN(1, IIO_MOD_Y),
+ SCA3000_CHAN(2, IIO_MOD_Z),
+ {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X_AND_Y_AND_Z,
+ .scan_index = -1, /* Fake channel */
+ .event_spec = &sca3000_freefall_event_spec,
+ .num_event_specs = 1,
+ },
+};
+
+static const struct iio_chan_spec sca3000_channels_with_temp[] = {
+ SCA3000_CHAN(0, IIO_MOD_X),
+ SCA3000_CHAN(1, IIO_MOD_Y),
+ SCA3000_CHAN(2, IIO_MOD_Z),
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ /* No buffer support */
+ .scan_index = -1,
+ },
+ {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X_AND_Y_AND_Z,
+ .scan_index = -1, /* Fake channel */
+ .event_spec = &sca3000_freefall_event_spec,
+ .num_event_specs = 1,
+ },
+};
+
+static u8 sca3000_addresses[3][3] = {
+ [0] = {SCA3000_REG_X_MSB_ADDR, SCA3000_REG_CTRL_SEL_MD_X_TH,
+ SCA3000_MD_CTRL_OR_X},
+ [1] = {SCA3000_REG_Y_MSB_ADDR, SCA3000_REG_CTRL_SEL_MD_Y_TH,
+ SCA3000_MD_CTRL_OR_Y},
+ [2] = {SCA3000_REG_Z_MSB_ADDR, SCA3000_REG_CTRL_SEL_MD_Z_TH,
+ SCA3000_MD_CTRL_OR_Z},
+};
+
+/**
+ * __sca3000_get_base_freq() - obtain mode specific base frequency
+ * @st: Private driver specific device instance specific state.
+ * @info: chip type specific information.
+ * @base_freq: Base frequency for the current measurement mode.
+ *
+ * lock must be held
+ */
+static inline int __sca3000_get_base_freq(struct sca3000_state *st,
+ const struct sca3000_chip_info *info,
+ int *base_freq)
+{
+ int ret;
+
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ goto error_ret;
+ switch (SCA3000_REG_MODE_MODE_MASK & st->rx[0]) {
+ case SCA3000_REG_MODE_MEAS_MODE_NORMAL:
+ *base_freq = info->measurement_mode_freq;
+ break;
+ case SCA3000_REG_MODE_MEAS_MODE_OP_1:
+ *base_freq = info->option_mode_1_freq;
+ break;
+ case SCA3000_REG_MODE_MEAS_MODE_OP_2:
+ *base_freq = info->option_mode_2_freq;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+error_ret:
+ return ret;
+}
+
+/**
+ * sca3000_read_raw_samp_freq() - read_raw handler for IIO_CHAN_INFO_SAMP_FREQ
+ * @st: Private driver specific device instance specific state.
+ * @val: The frequency read back.
+ *
+ * lock must be held
+ **/
+static int sca3000_read_raw_samp_freq(struct sca3000_state *st, int *val)
+{
+ int ret;
+
+ ret = __sca3000_get_base_freq(st, st->info, val);
+ if (ret)
+ return ret;
+
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+ if (ret < 0)
+ return ret;
+
+ if (*val > 0) {
+ ret &= SCA3000_REG_OUT_CTRL_BUF_DIV_MASK;
+ switch (ret) {
+ case SCA3000_REG_OUT_CTRL_BUF_DIV_2:
+ *val /= 2;
+ break;
+ case SCA3000_REG_OUT_CTRL_BUF_DIV_4:
+ *val /= 4;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * sca3000_write_raw_samp_freq() - write_raw handler for IIO_CHAN_INFO_SAMP_FREQ
+ * @st: Private driver specific device instance specific state.
+ * @val: The frequency desired.
+ *
+ * lock must be held
+ */
+static int sca3000_write_raw_samp_freq(struct sca3000_state *st, int val)
+{
+ int ret, base_freq, ctrlval;
+
+ ret = __sca3000_get_base_freq(st, st->info, &base_freq);
+ if (ret)
+ return ret;
+
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+ if (ret < 0)
+ return ret;
+
+ ctrlval = ret & ~SCA3000_REG_OUT_CTRL_BUF_DIV_MASK;
+
+ if (val == base_freq / 2)
+ ctrlval |= SCA3000_REG_OUT_CTRL_BUF_DIV_2;
+ if (val == base_freq / 4)
+ ctrlval |= SCA3000_REG_OUT_CTRL_BUF_DIV_4;
+ else if (val != base_freq)
+ return -EINVAL;
+
+ return sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
+ ctrlval);
+}
+
+static int sca3000_read_3db_freq(struct sca3000_state *st, int *val)
+{
+ int ret;
+
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ return ret;
+
+ /* mask bottom 2 bits - only ones that are relevant */
+ st->rx[0] &= SCA3000_REG_MODE_MODE_MASK;
+ switch (st->rx[0]) {
+ case SCA3000_REG_MODE_MEAS_MODE_NORMAL:
+ *val = st->info->measurement_mode_3db_freq;
+ return IIO_VAL_INT;
+ case SCA3000_REG_MODE_MEAS_MODE_MOT_DET:
+ return -EBUSY;
+ case SCA3000_REG_MODE_MEAS_MODE_OP_1:
+ *val = st->info->option_mode_1_3db_freq;
+ return IIO_VAL_INT;
+ case SCA3000_REG_MODE_MEAS_MODE_OP_2:
+ *val = st->info->option_mode_2_3db_freq;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sca3000_write_3db_freq(struct sca3000_state *st, int val)
+{
+ int ret;
+ int mode;
+
+ if (val == st->info->measurement_mode_3db_freq)
+ mode = SCA3000_REG_MODE_MEAS_MODE_NORMAL;
+ else if (st->info->option_mode_1 &&
+ (val == st->info->option_mode_1_3db_freq))
+ mode = SCA3000_REG_MODE_MEAS_MODE_OP_1;
+ else if (st->info->option_mode_2 &&
+ (val == st->info->option_mode_2_3db_freq))
+ mode = SCA3000_REG_MODE_MEAS_MODE_OP_2;
+ else
+ return -EINVAL;
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ return ret;
+
+ st->rx[0] &= ~SCA3000_REG_MODE_MODE_MASK;
+ st->rx[0] |= (mode & SCA3000_REG_MODE_MODE_MASK);
+
+ return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR, st->rx[0]);
+}
+
+static int sca3000_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+ u8 address;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
+ if (chan->type == IIO_ACCEL) {
+ if (st->mo_det_use_count) {
+ mutex_unlock(&st->lock);
+ return -EBUSY;
+ }
+ address = sca3000_addresses[chan->address][0];
+ ret = sca3000_read_data_short(st, address, 2);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+ *val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF;
+ *val = ((*val) << (sizeof(*val) * 8 - 13)) >>
+ (sizeof(*val) * 8 - 13);
+ } else {
+ /* get the temperature when available */
+ ret = sca3000_read_data_short(st,
+ SCA3000_REG_TEMP_MSB_ADDR,
+ 2);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+ *val = ((st->rx[0] & 0x3F) << 3) |
+ ((st->rx[1] & 0xE0) >> 5);
+ }
+ mutex_unlock(&st->lock);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ if (chan->type == IIO_ACCEL)
+ *val2 = st->info->scale;
+ else /* temperature */
+ *val2 = 555556;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = -214;
+ *val2 = 600000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ mutex_lock(&st->lock);
+ ret = sca3000_read_raw_samp_freq(st, val);
+ mutex_unlock(&st->lock);
+ return ret ? ret : IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ mutex_lock(&st->lock);
+ ret = sca3000_read_3db_freq(st, val);
+ mutex_unlock(&st->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sca3000_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val2)
+ return -EINVAL;
+ mutex_lock(&st->lock);
+ ret = sca3000_write_raw_samp_freq(st, val);
+ mutex_unlock(&st->lock);
+ return ret;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ if (val2)
+ return -EINVAL;
+ mutex_lock(&st->lock);
+ ret = sca3000_write_3db_freq(st, val);
+ mutex_unlock(&st->lock);
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * sca3000_read_av_freq() - sysfs function to get available frequencies
+ * @dev: Device structure for this device.
+ * @attr: Description of the attribute.
+ * @buf: Incoming string
+ *
+ * The later modes are only relevant to the ring buffer - and depend on current
+ * mode. Note that data sheet gives rather wide tolerances for these so integer
+ * division will give good enough answer and not all chips have them specified
+ * at all.
+ **/
+static ssize_t sca3000_read_av_freq(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int len = 0, ret, val;
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ val = st->rx[0];
+ mutex_unlock(&st->lock);
+ if (ret)
+ goto error_ret;
+
+ switch (val & SCA3000_REG_MODE_MODE_MASK) {
+ case SCA3000_REG_MODE_MEAS_MODE_NORMAL:
+ len += sprintf(buf + len, "%d %d %d\n",
+ st->info->measurement_mode_freq,
+ st->info->measurement_mode_freq / 2,
+ st->info->measurement_mode_freq / 4);
+ break;
+ case SCA3000_REG_MODE_MEAS_MODE_OP_1:
+ len += sprintf(buf + len, "%d %d %d\n",
+ st->info->option_mode_1_freq,
+ st->info->option_mode_1_freq / 2,
+ st->info->option_mode_1_freq / 4);
+ break;
+ case SCA3000_REG_MODE_MEAS_MODE_OP_2:
+ len += sprintf(buf + len, "%d %d %d\n",
+ st->info->option_mode_2_freq,
+ st->info->option_mode_2_freq / 2,
+ st->info->option_mode_2_freq / 4);
+ break;
+ }
+ return len;
+error_ret:
+ return ret;
+}
+
+/*
+ * Should only really be registered if ring buffer support is compiled in.
+ * Does no harm however and doing it right would add a fair bit of complexity
+ */
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sca3000_read_av_freq);
+
+/**
+ * sca3000_read_event_value() - query of a threshold or period
+ **/
+static int sca3000_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ int ret, i;
+ struct sca3000_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ mutex_lock(&st->lock);
+ ret = sca3000_read_ctrl_reg(st,
+ sca3000_addresses[chan->address][1]);
+ mutex_unlock(&st->lock);
+ if (ret < 0)
+ return ret;
+ *val = 0;
+ if (chan->channel2 == IIO_MOD_Y)
+ for_each_set_bit(i, (unsigned long *)&ret,
+ ARRAY_SIZE(st->info->mot_det_mult_y))
+ *val += st->info->mot_det_mult_y[i];
+ else
+ for_each_set_bit(i, (unsigned long *)&ret,
+ ARRAY_SIZE(st->info->mot_det_mult_xz))
+ *val += st->info->mot_det_mult_xz[i];
+
+ return IIO_VAL_INT;
+ case IIO_EV_INFO_PERIOD:
+ *val = 0;
+ *val2 = 226000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * sca3000_write_value() - control of threshold and period
+ * @indio_dev: Device instance specific IIO information.
+ * @chan: Description of the channel for which the event is being
+ * configured.
+ * @type: The type of event being configured, here magnitude rising
+ * as everything else is read only.
+ * @dir: Direction of the event (here rising)
+ * @info: What information about the event are we configuring.
+ * Here the threshold only.
+ * @val: Integer part of the value being written..
+ * @val2: Non integer part of the value being written. Here always 0.
+ */
+static int sca3000_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+ int i;
+ u8 nonlinear = 0;
+
+ if (chan->channel2 == IIO_MOD_Y) {
+ i = ARRAY_SIZE(st->info->mot_det_mult_y);
+ while (i > 0)
+ if (val >= st->info->mot_det_mult_y[--i]) {
+ nonlinear |= (1 << i);
+ val -= st->info->mot_det_mult_y[i];
+ }
+ } else {
+ i = ARRAY_SIZE(st->info->mot_det_mult_xz);
+ while (i > 0)
+ if (val >= st->info->mot_det_mult_xz[--i]) {
+ nonlinear |= (1 << i);
+ val -= st->info->mot_det_mult_xz[i];
+ }
+ }
+
+ mutex_lock(&st->lock);
+ ret = sca3000_write_ctrl_reg(st,
+ sca3000_addresses[chan->address][1],
+ nonlinear);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static struct attribute *sca3000_attributes[] = {
+ &iio_dev_attr_in_accel_filter_low_pass_3db_frequency_available.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group sca3000_attribute_group = {
+ .attrs = sca3000_attributes,
+};
+
+static int sca3000_read_data(struct sca3000_state *st,
+ u8 reg_address_high,
+ u8 *rx,
+ int len)
+{
+ int ret;
+ struct spi_transfer xfer[2] = {
+ {
+ .len = 1,
+ .tx_buf = st->tx,
+ }, {
+ .len = len,
+ .rx_buf = rx,
+ }
+ };
+
+ st->tx[0] = SCA3000_READ_REG(reg_address_high);
+ ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
+ if (ret) {
+ dev_err(get_device(&st->us->dev), "problem reading register");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * sca3000_ring_int_process() - ring specific interrupt handling.
+ * @val: Value of the interrupt status register.
+ * @indio_dev: Device instance specific IIO device structure.
+ */
+static void sca3000_ring_int_process(u8 val, struct iio_dev *indio_dev)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret, i, num_available;
+
+ mutex_lock(&st->lock);
+
+ if (val & SCA3000_REG_INT_STATUS_HALF) {
+ ret = sca3000_read_data_short(st, SCA3000_REG_BUF_COUNT_ADDR,
+ 1);
+ if (ret)
+ goto error_ret;
+ num_available = st->rx[0];
+ /*
+ * num_available is the total number of samples available
+ * i.e. number of time points * number of channels.
+ */
+ ret = sca3000_read_data(st, SCA3000_REG_RING_OUT_ADDR, st->rx,
+ num_available * 2);
+ if (ret)
+ goto error_ret;
+ for (i = 0; i < num_available / 3; i++) {
+ /*
+ * Dirty hack to cover for 11 bit in fifo, 13 bit
+ * direct reading.
+ *
+ * In theory the bottom two bits are undefined.
+ * In reality they appear to always be 0.
+ */
+ iio_push_to_buffers(indio_dev, st->rx + i * 3 * 2);
+ }
+ }
+error_ret:
+ mutex_unlock(&st->lock);
+}
+
+/**
+ * sca3000_event_handler() - handling ring and non ring events
+ * @irq: The irq being handled.
+ * @private: struct iio_device pointer for the device.
+ *
+ * Ring related interrupt handler. Depending on event, push to
+ * the ring buffer event chrdev or the event one.
+ *
+ * This function is complicated by the fact that the devices can signify ring
+ * and non ring events via the same interrupt line and they can only
+ * be distinguished via a read of the relevant status register.
+ */
+static irqreturn_t sca3000_event_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret, val;
+ s64 last_timestamp = iio_get_time_ns(indio_dev);
+
+ /*
+ * Could lead if badly timed to an extra read of status reg,
+ * but ensures no interrupt is missed.
+ */
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data_short(st, SCA3000_REG_INT_STATUS_ADDR, 1);
+ val = st->rx[0];
+ mutex_unlock(&st->lock);
+ if (ret)
+ goto done;
+
+ sca3000_ring_int_process(val, indio_dev);
+
+ if (val & SCA3000_INT_STATUS_FREE_FALL)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_FALLING),
+ last_timestamp);
+
+ if (val & SCA3000_INT_STATUS_Y_TRIGGER)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_RISING),
+ last_timestamp);
+
+ if (val & SCA3000_INT_STATUS_X_TRIGGER)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_RISING),
+ last_timestamp);
+
+ if (val & SCA3000_INT_STATUS_Z_TRIGGER)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_RISING),
+ last_timestamp);
+
+done:
+ return IRQ_HANDLED;
+}
+
+/**
+ * sca3000_read_event_config() what events are enabled
+ **/
+static int sca3000_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+ /* read current value of mode register */
+ mutex_lock(&st->lock);
+
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ goto error_ret;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X_AND_Y_AND_Z:
+ ret = !!(st->rx[0] & SCA3000_REG_MODE_FREE_FALL_DETECT);
+ break;
+ case IIO_MOD_X:
+ case IIO_MOD_Y:
+ case IIO_MOD_Z:
+ /*
+ * Motion detection mode cannot run at the same time as
+ * acceleration data being read.
+ */
+ if ((st->rx[0] & SCA3000_REG_MODE_MODE_MASK)
+ != SCA3000_REG_MODE_MEAS_MODE_MOT_DET) {
+ ret = 0;
+ } else {
+ ret = sca3000_read_ctrl_reg(st,
+ SCA3000_REG_CTRL_SEL_MD_CTRL);
+ if (ret < 0)
+ goto error_ret;
+ /* only supporting logical or's for now */
+ ret = !!(ret & sca3000_addresses[chan->address][2]);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int sca3000_freefall_set_state(struct iio_dev *indio_dev, int state)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+
+ /* read current value of mode register */
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ return ret;
+
+ /* if off and should be on */
+ if (state && !(st->rx[0] & SCA3000_REG_MODE_FREE_FALL_DETECT))
+ return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+ st->rx[0] | SCA3000_REG_MODE_FREE_FALL_DETECT);
+ /* if on and should be off */
+ else if (!state && (st->rx[0] & SCA3000_REG_MODE_FREE_FALL_DETECT))
+ return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+ st->rx[0] & ~SCA3000_REG_MODE_FREE_FALL_DETECT);
+ else
+ return 0;
+}
+
+static int sca3000_motion_detect_set_state(struct iio_dev *indio_dev, int axis,
+ int state)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret, ctrlval;
+
+ /*
+ * First read the motion detector config to find out if
+ * this axis is on
+ */
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
+ if (ret < 0)
+ return ret;
+ ctrlval = ret;
+ /* if off and should be on */
+ if (state && !(ctrlval & sca3000_addresses[axis][2])) {
+ ret = sca3000_write_ctrl_reg(st,
+ SCA3000_REG_CTRL_SEL_MD_CTRL,
+ ctrlval |
+ sca3000_addresses[axis][2]);
+ if (ret)
+ return ret;
+ st->mo_det_use_count++;
+ } else if (!state && (ctrlval & sca3000_addresses[axis][2])) {
+ ret = sca3000_write_ctrl_reg(st,
+ SCA3000_REG_CTRL_SEL_MD_CTRL,
+ ctrlval &
+ ~(sca3000_addresses[axis][2]));
+ if (ret)
+ return ret;
+ st->mo_det_use_count--;
+ }
+
+ /* read current value of mode register */
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ return ret;
+ /* if off and should be on */
+ if ((st->mo_det_use_count) &&
+ ((st->rx[0] & SCA3000_REG_MODE_MODE_MASK)
+ != SCA3000_REG_MODE_MEAS_MODE_MOT_DET))
+ return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+ (st->rx[0] & ~SCA3000_REG_MODE_MODE_MASK)
+ | SCA3000_REG_MODE_MEAS_MODE_MOT_DET);
+ /* if on and should be off */
+ else if (!(st->mo_det_use_count) &&
+ ((st->rx[0] & SCA3000_REG_MODE_MODE_MASK)
+ == SCA3000_REG_MODE_MEAS_MODE_MOT_DET))
+ return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+ st->rx[0] & SCA3000_REG_MODE_MODE_MASK);
+ else
+ return 0;
+}
+
+/**
+ * sca3000_write_event_config() - simple on off control for motion detector
+ * @indio_dev: IIO device instance specific structure. Data specific to this
+ * particular driver may be accessed via iio_priv(indio_dev).
+ * @chan: Description of the channel whose event we are configuring.
+ * @type: The type of event.
+ * @dir: The direction of the event.
+ * @state: Desired state of event being configured.
+ *
+ * This is a per axis control, but enabling any will result in the
+ * motion detector unit being enabled.
+ * N.B. enabling motion detector stops normal data acquisition.
+ * There is a complexity in knowing which mode to return to when
+ * this mode is disabled. Currently normal mode is assumed.
+ **/
+static int sca3000_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->lock);
+ switch (chan->channel2) {
+ case IIO_MOD_X_AND_Y_AND_Z:
+ ret = sca3000_freefall_set_state(indio_dev, state);
+ break;
+
+ case IIO_MOD_X:
+ case IIO_MOD_Y:
+ case IIO_MOD_Z:
+ ret = sca3000_motion_detect_set_state(indio_dev,
+ chan->address,
+ state);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int sca3000_configure_ring(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer;
+
+ buffer = iio_kfifo_allocate();
+ if (!buffer)
+ return -ENOMEM;
+
+ iio_device_attach_buffer(indio_dev, buffer);
+ indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
+
+ return 0;
+}
+
+static void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
+{
+ iio_kfifo_free(indio_dev->buffer);
+}
+
+static inline
+int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ goto error_ret;
+ if (state) {
+ dev_info(&indio_dev->dev, "supposedly enabling ring buffer\n");
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_MODE_ADDR,
+ (st->rx[0] | SCA3000_REG_MODE_RING_BUF_ENABLE));
+ } else
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_MODE_ADDR,
+ (st->rx[0] & ~SCA3000_REG_MODE_RING_BUF_ENABLE));
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+/**
+ * sca3000_hw_ring_preenable() - hw ring buffer preenable function
+ * @indio_dev: structure representing the IIO device. Device instance
+ * specific state can be accessed via iio_priv(indio_dev).
+ *
+ * Very simple enable function as the chip will allows normal reads
+ * during ring buffer operation so as long as it is indeed running
+ * before we notify the core, the precise ordering does not matter.
+ */
+static int sca3000_hw_ring_preenable(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct sca3000_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&st->lock);
+
+ /* Enable the 50% full interrupt */
+ ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
+ if (ret)
+ goto error_unlock;
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_INT_MASK_ADDR,
+ st->rx[0] | SCA3000_REG_INT_MASK_RING_HALF);
+ if (ret)
+ goto error_unlock;
+
+ mutex_unlock(&st->lock);
+
+ return __sca3000_hw_ring_state_set(indio_dev, 1);
+
+error_unlock:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int sca3000_hw_ring_postdisable(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct sca3000_state *st = iio_priv(indio_dev);
+
+ ret = __sca3000_hw_ring_state_set(indio_dev, 0);
+ if (ret)
+ return ret;
+
+ /* Disable the 50% full interrupt */
+ mutex_lock(&st->lock);
+
+ ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
+ if (ret)
+ goto unlock;
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_INT_MASK_ADDR,
+ st->rx[0] & ~SCA3000_REG_INT_MASK_RING_HALF);
+unlock:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = {
+ .preenable = &sca3000_hw_ring_preenable,
+ .postdisable = &sca3000_hw_ring_postdisable,
+};
+
+/**
+ * sca3000_clean_setup() - get the device into a predictable state
+ * @st: Device instance specific private data structure
+ *
+ * Devices use flash memory to store many of the register values
+ * and hence can come up in somewhat unpredictable states.
+ * Hence reset everything on driver load.
+ */
+static int sca3000_clean_setup(struct sca3000_state *st)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ /* Ensure all interrupts have been acknowledged */
+ ret = sca3000_read_data_short(st, SCA3000_REG_INT_STATUS_ADDR, 1);
+ if (ret)
+ goto error_ret;
+
+ /* Turn off all motion detection channels */
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
+ if (ret < 0)
+ goto error_ret;
+ ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL,
+ ret & SCA3000_MD_CTRL_PROT_MASK);
+ if (ret)
+ goto error_ret;
+
+ /* Disable ring buffer */
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+ if (ret < 0)
+ goto error_ret;
+ ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
+ (ret & SCA3000_REG_OUT_CTRL_PROT_MASK)
+ | SCA3000_REG_OUT_CTRL_BUF_X_EN
+ | SCA3000_REG_OUT_CTRL_BUF_Y_EN
+ | SCA3000_REG_OUT_CTRL_BUF_Z_EN
+ | SCA3000_REG_OUT_CTRL_BUF_DIV_4);
+ if (ret)
+ goto error_ret;
+ /* Enable interrupts, relevant to mode and set up as active low */
+ ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_INT_MASK_ADDR,
+ (ret & SCA3000_REG_INT_MASK_PROT_MASK)
+ | SCA3000_REG_INT_MASK_ACTIVE_LOW);
+ if (ret)
+ goto error_ret;
+ /*
+ * Select normal measurement mode, free fall off, ring off
+ * Ring in 12 bit mode - it is fine to overwrite reserved bits 3,5
+ * as that occurs in one of the example on the datasheet
+ */
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+ (st->rx[0] & SCA3000_MODE_PROT_MASK));
+
+error_ret:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static const struct iio_info sca3000_info = {
+ .attrs = &sca3000_attribute_group,
+ .read_raw = &sca3000_read_raw,
+ .write_raw = &sca3000_write_raw,
+ .read_event_value = &sca3000_read_event_value,
+ .write_event_value = &sca3000_write_event_value,
+ .read_event_config = &sca3000_read_event_config,
+ .write_event_config = &sca3000_write_event_config,
+ .driver_module = THIS_MODULE,
+};
+
+static int sca3000_probe(struct spi_device *spi)
+{
+ int ret;
+ struct sca3000_state *st;
+ struct iio_dev *indio_dev;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+ st->us = spi;
+ mutex_init(&st->lock);
+ st->info = &sca3000_spi_chip_info_tbl[spi_get_device_id(spi)
+ ->driver_data];
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &sca3000_info;
+ if (st->info->temp_output) {
+ indio_dev->channels = sca3000_channels_with_temp;
+ indio_dev->num_channels =
+ ARRAY_SIZE(sca3000_channels_with_temp);
+ } else {
+ indio_dev->channels = sca3000_channels;
+ indio_dev->num_channels = ARRAY_SIZE(sca3000_channels);
+ }
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ sca3000_configure_ring(indio_dev);
+
+ if (spi->irq) {
+ ret = request_threaded_irq(spi->irq,
+ NULL,
+ &sca3000_event_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "sca3000",
+ indio_dev);
+ if (ret)
+ return ret;
+ }
+ indio_dev->setup_ops = &sca3000_ring_setup_ops;
+ ret = sca3000_clean_setup(st);
+ if (ret)
+ goto error_free_irq;
+
+ ret = sca3000_print_rev(indio_dev);
+ if (ret)
+ goto error_free_irq;
+
+ return iio_device_register(indio_dev);
+
+error_free_irq:
+ if (spi->irq)
+ free_irq(spi->irq, indio_dev);
+
+ return ret;
+}
+
+static int sca3000_stop_all_interrupts(struct sca3000_state *st)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_write_reg(st, SCA3000_REG_INT_MASK_ADDR,
+ (st->rx[0] &
+ ~(SCA3000_REG_INT_MASK_RING_THREE_QUARTER |
+ SCA3000_REG_INT_MASK_RING_HALF |
+ SCA3000_REG_INT_MASK_ALL_INTS)));
+error_ret:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static int sca3000_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct sca3000_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ /* Must ensure no interrupts can be generated after this! */
+ sca3000_stop_all_interrupts(st);
+ if (spi->irq)
+ free_irq(spi->irq, indio_dev);
+
+ sca3000_unconfigure_ring(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id sca3000_id[] = {
+ {"sca3000_d01", d01},
+ {"sca3000_e02", e02},
+ {"sca3000_e04", e04},
+ {"sca3000_e05", e05},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, sca3000_id);
+
+static struct spi_driver sca3000_driver = {
+ .driver = {
+ .name = "sca3000",
+ },
+ .probe = sca3000_probe,
+ .remove = sca3000_remove,
+ .id_table = sca3000_id,
+};
+module_spi_driver(sca3000_driver);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
+MODULE_DESCRIPTION("VTI SCA3000 Series Accelerometers SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index f8dfdb690563..7c231687109a 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -30,6 +30,7 @@
#define LSM303AGR_ACCEL_DEV_NAME "lsm303agr_accel"
#define LIS2DH12_ACCEL_DEV_NAME "lis2dh12_accel"
#define LIS3L02DQ_ACCEL_DEV_NAME "lis3l02dq"
+#define LNG2DM_ACCEL_DEV_NAME "lng2dm"
/**
* struct st_sensors_platform_data - default accel platform data
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index ce69048c88e9..f6b6d42385e1 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -43,194 +43,6 @@
#define ST_ACCEL_FS_AVL_200G 200
#define ST_ACCEL_FS_AVL_400G 400
-/* CUSTOM VALUES FOR SENSOR 1 */
-#define ST_ACCEL_1_WAI_EXP 0x33
-#define ST_ACCEL_1_ODR_ADDR 0x20
-#define ST_ACCEL_1_ODR_MASK 0xf0
-#define ST_ACCEL_1_ODR_AVL_1HZ_VAL 0x01
-#define ST_ACCEL_1_ODR_AVL_10HZ_VAL 0x02
-#define ST_ACCEL_1_ODR_AVL_25HZ_VAL 0x03
-#define ST_ACCEL_1_ODR_AVL_50HZ_VAL 0x04
-#define ST_ACCEL_1_ODR_AVL_100HZ_VAL 0x05
-#define ST_ACCEL_1_ODR_AVL_200HZ_VAL 0x06
-#define ST_ACCEL_1_ODR_AVL_400HZ_VAL 0x07
-#define ST_ACCEL_1_ODR_AVL_1600HZ_VAL 0x08
-#define ST_ACCEL_1_FS_ADDR 0x23
-#define ST_ACCEL_1_FS_MASK 0x30
-#define ST_ACCEL_1_FS_AVL_2_VAL 0x00
-#define ST_ACCEL_1_FS_AVL_4_VAL 0x01
-#define ST_ACCEL_1_FS_AVL_8_VAL 0x02
-#define ST_ACCEL_1_FS_AVL_16_VAL 0x03
-#define ST_ACCEL_1_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1000)
-#define ST_ACCEL_1_FS_AVL_4_GAIN IIO_G_TO_M_S_2(2000)
-#define ST_ACCEL_1_FS_AVL_8_GAIN IIO_G_TO_M_S_2(4000)
-#define ST_ACCEL_1_FS_AVL_16_GAIN IIO_G_TO_M_S_2(12000)
-#define ST_ACCEL_1_BDU_ADDR 0x23
-#define ST_ACCEL_1_BDU_MASK 0x80
-#define ST_ACCEL_1_DRDY_IRQ_ADDR 0x22
-#define ST_ACCEL_1_DRDY_IRQ_INT1_MASK 0x10
-#define ST_ACCEL_1_DRDY_IRQ_INT2_MASK 0x08
-#define ST_ACCEL_1_IHL_IRQ_ADDR 0x25
-#define ST_ACCEL_1_IHL_IRQ_MASK 0x02
-#define ST_ACCEL_1_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 2 */
-#define ST_ACCEL_2_WAI_EXP 0x32
-#define ST_ACCEL_2_ODR_ADDR 0x20
-#define ST_ACCEL_2_ODR_MASK 0x18
-#define ST_ACCEL_2_ODR_AVL_50HZ_VAL 0x00
-#define ST_ACCEL_2_ODR_AVL_100HZ_VAL 0x01
-#define ST_ACCEL_2_ODR_AVL_400HZ_VAL 0x02
-#define ST_ACCEL_2_ODR_AVL_1000HZ_VAL 0x03
-#define ST_ACCEL_2_PW_ADDR 0x20
-#define ST_ACCEL_2_PW_MASK 0xe0
-#define ST_ACCEL_2_FS_ADDR 0x23
-#define ST_ACCEL_2_FS_MASK 0x30
-#define ST_ACCEL_2_FS_AVL_2_VAL 0X00
-#define ST_ACCEL_2_FS_AVL_4_VAL 0X01
-#define ST_ACCEL_2_FS_AVL_8_VAL 0x03
-#define ST_ACCEL_2_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1000)
-#define ST_ACCEL_2_FS_AVL_4_GAIN IIO_G_TO_M_S_2(2000)
-#define ST_ACCEL_2_FS_AVL_8_GAIN IIO_G_TO_M_S_2(3900)
-#define ST_ACCEL_2_BDU_ADDR 0x23
-#define ST_ACCEL_2_BDU_MASK 0x80
-#define ST_ACCEL_2_DRDY_IRQ_ADDR 0x22
-#define ST_ACCEL_2_DRDY_IRQ_INT1_MASK 0x02
-#define ST_ACCEL_2_DRDY_IRQ_INT2_MASK 0x10
-#define ST_ACCEL_2_IHL_IRQ_ADDR 0x22
-#define ST_ACCEL_2_IHL_IRQ_MASK 0x80
-#define ST_ACCEL_2_OD_IRQ_ADDR 0x22
-#define ST_ACCEL_2_OD_IRQ_MASK 0x40
-#define ST_ACCEL_2_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 3 */
-#define ST_ACCEL_3_WAI_EXP 0x40
-#define ST_ACCEL_3_ODR_ADDR 0x20
-#define ST_ACCEL_3_ODR_MASK 0xf0
-#define ST_ACCEL_3_ODR_AVL_3HZ_VAL 0x01
-#define ST_ACCEL_3_ODR_AVL_6HZ_VAL 0x02
-#define ST_ACCEL_3_ODR_AVL_12HZ_VAL 0x03
-#define ST_ACCEL_3_ODR_AVL_25HZ_VAL 0x04
-#define ST_ACCEL_3_ODR_AVL_50HZ_VAL 0x05
-#define ST_ACCEL_3_ODR_AVL_100HZ_VAL 0x06
-#define ST_ACCEL_3_ODR_AVL_200HZ_VAL 0x07
-#define ST_ACCEL_3_ODR_AVL_400HZ_VAL 0x08
-#define ST_ACCEL_3_ODR_AVL_800HZ_VAL 0x09
-#define ST_ACCEL_3_ODR_AVL_1600HZ_VAL 0x0a
-#define ST_ACCEL_3_FS_ADDR 0x24
-#define ST_ACCEL_3_FS_MASK 0x38
-#define ST_ACCEL_3_FS_AVL_2_VAL 0X00
-#define ST_ACCEL_3_FS_AVL_4_VAL 0X01
-#define ST_ACCEL_3_FS_AVL_6_VAL 0x02
-#define ST_ACCEL_3_FS_AVL_8_VAL 0x03
-#define ST_ACCEL_3_FS_AVL_16_VAL 0x04
-#define ST_ACCEL_3_FS_AVL_2_GAIN IIO_G_TO_M_S_2(61)
-#define ST_ACCEL_3_FS_AVL_4_GAIN IIO_G_TO_M_S_2(122)
-#define ST_ACCEL_3_FS_AVL_6_GAIN IIO_G_TO_M_S_2(183)
-#define ST_ACCEL_3_FS_AVL_8_GAIN IIO_G_TO_M_S_2(244)
-#define ST_ACCEL_3_FS_AVL_16_GAIN IIO_G_TO_M_S_2(732)
-#define ST_ACCEL_3_BDU_ADDR 0x20
-#define ST_ACCEL_3_BDU_MASK 0x08
-#define ST_ACCEL_3_DRDY_IRQ_ADDR 0x23
-#define ST_ACCEL_3_DRDY_IRQ_INT1_MASK 0x80
-#define ST_ACCEL_3_DRDY_IRQ_INT2_MASK 0x00
-#define ST_ACCEL_3_IHL_IRQ_ADDR 0x23
-#define ST_ACCEL_3_IHL_IRQ_MASK 0x40
-#define ST_ACCEL_3_IG1_EN_ADDR 0x23
-#define ST_ACCEL_3_IG1_EN_MASK 0x08
-#define ST_ACCEL_3_MULTIREAD_BIT false
-
-/* CUSTOM VALUES FOR SENSOR 4 */
-#define ST_ACCEL_4_WAI_EXP 0x3a
-#define ST_ACCEL_4_ODR_ADDR 0x20
-#define ST_ACCEL_4_ODR_MASK 0x30 /* DF1 and DF0 */
-#define ST_ACCEL_4_ODR_AVL_40HZ_VAL 0x00
-#define ST_ACCEL_4_ODR_AVL_160HZ_VAL 0x01
-#define ST_ACCEL_4_ODR_AVL_640HZ_VAL 0x02
-#define ST_ACCEL_4_ODR_AVL_2560HZ_VAL 0x03
-#define ST_ACCEL_4_PW_ADDR 0x20
-#define ST_ACCEL_4_PW_MASK 0xc0
-#define ST_ACCEL_4_FS_ADDR 0x21
-#define ST_ACCEL_4_FS_MASK 0x80
-#define ST_ACCEL_4_FS_AVL_2_VAL 0X00
-#define ST_ACCEL_4_FS_AVL_6_VAL 0X01
-#define ST_ACCEL_4_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1024)
-#define ST_ACCEL_4_FS_AVL_6_GAIN IIO_G_TO_M_S_2(340)
-#define ST_ACCEL_4_BDU_ADDR 0x21
-#define ST_ACCEL_4_BDU_MASK 0x40
-#define ST_ACCEL_4_DRDY_IRQ_ADDR 0x21
-#define ST_ACCEL_4_DRDY_IRQ_INT1_MASK 0x04
-#define ST_ACCEL_4_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 5 */
-#define ST_ACCEL_5_WAI_EXP 0x3b
-#define ST_ACCEL_5_ODR_ADDR 0x20
-#define ST_ACCEL_5_ODR_MASK 0x80
-#define ST_ACCEL_5_ODR_AVL_100HZ_VAL 0x00
-#define ST_ACCEL_5_ODR_AVL_400HZ_VAL 0x01
-#define ST_ACCEL_5_PW_ADDR 0x20
-#define ST_ACCEL_5_PW_MASK 0x40
-#define ST_ACCEL_5_FS_ADDR 0x20
-#define ST_ACCEL_5_FS_MASK 0x20
-#define ST_ACCEL_5_FS_AVL_2_VAL 0X00
-#define ST_ACCEL_5_FS_AVL_8_VAL 0X01
-/* TODO: check these resulting gain settings, these are not in the datsheet */
-#define ST_ACCEL_5_FS_AVL_2_GAIN IIO_G_TO_M_S_2(18000)
-#define ST_ACCEL_5_FS_AVL_8_GAIN IIO_G_TO_M_S_2(72000)
-#define ST_ACCEL_5_DRDY_IRQ_ADDR 0x22
-#define ST_ACCEL_5_DRDY_IRQ_INT1_MASK 0x04
-#define ST_ACCEL_5_DRDY_IRQ_INT2_MASK 0x20
-#define ST_ACCEL_5_IHL_IRQ_ADDR 0x22
-#define ST_ACCEL_5_IHL_IRQ_MASK 0x80
-#define ST_ACCEL_5_OD_IRQ_ADDR 0x22
-#define ST_ACCEL_5_OD_IRQ_MASK 0x40
-#define ST_ACCEL_5_IG1_EN_ADDR 0x21
-#define ST_ACCEL_5_IG1_EN_MASK 0x08
-#define ST_ACCEL_5_MULTIREAD_BIT false
-
-/* CUSTOM VALUES FOR SENSOR 6 */
-#define ST_ACCEL_6_WAI_EXP 0x32
-#define ST_ACCEL_6_ODR_ADDR 0x20
-#define ST_ACCEL_6_ODR_MASK 0x18
-#define ST_ACCEL_6_ODR_AVL_50HZ_VAL 0x00
-#define ST_ACCEL_6_ODR_AVL_100HZ_VAL 0x01
-#define ST_ACCEL_6_ODR_AVL_400HZ_VAL 0x02
-#define ST_ACCEL_6_ODR_AVL_1000HZ_VAL 0x03
-#define ST_ACCEL_6_PW_ADDR 0x20
-#define ST_ACCEL_6_PW_MASK 0x20
-#define ST_ACCEL_6_FS_ADDR 0x23
-#define ST_ACCEL_6_FS_MASK 0x30
-#define ST_ACCEL_6_FS_AVL_100_VAL 0x00
-#define ST_ACCEL_6_FS_AVL_200_VAL 0x01
-#define ST_ACCEL_6_FS_AVL_400_VAL 0x03
-#define ST_ACCEL_6_FS_AVL_100_GAIN IIO_G_TO_M_S_2(49000)
-#define ST_ACCEL_6_FS_AVL_200_GAIN IIO_G_TO_M_S_2(98000)
-#define ST_ACCEL_6_FS_AVL_400_GAIN IIO_G_TO_M_S_2(195000)
-#define ST_ACCEL_6_BDU_ADDR 0x23
-#define ST_ACCEL_6_BDU_MASK 0x80
-#define ST_ACCEL_6_DRDY_IRQ_ADDR 0x22
-#define ST_ACCEL_6_DRDY_IRQ_INT1_MASK 0x02
-#define ST_ACCEL_6_DRDY_IRQ_INT2_MASK 0x10
-#define ST_ACCEL_6_IHL_IRQ_ADDR 0x22
-#define ST_ACCEL_6_IHL_IRQ_MASK 0x80
-#define ST_ACCEL_6_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 7 */
-#define ST_ACCEL_7_ODR_ADDR 0x20
-#define ST_ACCEL_7_ODR_MASK 0x30
-#define ST_ACCEL_7_ODR_AVL_280HZ_VAL 0x00
-#define ST_ACCEL_7_ODR_AVL_560HZ_VAL 0x01
-#define ST_ACCEL_7_ODR_AVL_1120HZ_VAL 0x02
-#define ST_ACCEL_7_ODR_AVL_4480HZ_VAL 0x03
-#define ST_ACCEL_7_PW_ADDR 0x20
-#define ST_ACCEL_7_PW_MASK 0xc0
-#define ST_ACCEL_7_FS_AVL_2_GAIN IIO_G_TO_M_S_2(488)
-#define ST_ACCEL_7_BDU_ADDR 0x21
-#define ST_ACCEL_7_BDU_MASK 0x40
-#define ST_ACCEL_7_DRDY_IRQ_ADDR 0x21
-#define ST_ACCEL_7_DRDY_IRQ_INT1_MASK 0x04
-#define ST_ACCEL_7_MULTIREAD_BIT false
-
static const struct iio_chan_spec st_accel_8bit_channels[] = {
ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -281,7 +93,7 @@ static const struct iio_chan_spec st_accel_16bit_channels[] = {
static const struct st_sensor_settings st_accel_sensors_settings[] = {
{
- .wai = ST_ACCEL_1_WAI_EXP,
+ .wai = 0x33,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS3DH_ACCEL_DEV_NAME,
@@ -294,22 +106,22 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
.odr = {
- .addr = ST_ACCEL_1_ODR_ADDR,
- .mask = ST_ACCEL_1_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xf0,
.odr_avl = {
- { 1, ST_ACCEL_1_ODR_AVL_1HZ_VAL, },
- { 10, ST_ACCEL_1_ODR_AVL_10HZ_VAL, },
- { 25, ST_ACCEL_1_ODR_AVL_25HZ_VAL, },
- { 50, ST_ACCEL_1_ODR_AVL_50HZ_VAL, },
- { 100, ST_ACCEL_1_ODR_AVL_100HZ_VAL, },
- { 200, ST_ACCEL_1_ODR_AVL_200HZ_VAL, },
- { 400, ST_ACCEL_1_ODR_AVL_400HZ_VAL, },
- { 1600, ST_ACCEL_1_ODR_AVL_1600HZ_VAL, },
+ { .hz = 1, .value = 0x01, },
+ { .hz = 10, .value = 0x02, },
+ { .hz = 25, .value = 0x03, },
+ { .hz = 50, .value = 0x04, },
+ { .hz = 100, .value = 0x05, },
+ { .hz = 200, .value = 0x06, },
+ { .hz = 400, .value = 0x07, },
+ { .hz = 1600, .value = 0x08, },
},
},
.pw = {
- .addr = ST_ACCEL_1_ODR_ADDR,
- .mask = ST_ACCEL_1_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xf0,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
.enable_axis = {
@@ -317,48 +129,48 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_ACCEL_1_FS_ADDR,
- .mask = ST_ACCEL_1_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
- .value = ST_ACCEL_1_FS_AVL_2_VAL,
- .gain = ST_ACCEL_1_FS_AVL_2_GAIN,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(1000),
},
[1] = {
.num = ST_ACCEL_FS_AVL_4G,
- .value = ST_ACCEL_1_FS_AVL_4_VAL,
- .gain = ST_ACCEL_1_FS_AVL_4_GAIN,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(2000),
},
[2] = {
.num = ST_ACCEL_FS_AVL_8G,
- .value = ST_ACCEL_1_FS_AVL_8_VAL,
- .gain = ST_ACCEL_1_FS_AVL_8_GAIN,
+ .value = 0x02,
+ .gain = IIO_G_TO_M_S_2(4000),
},
[3] = {
.num = ST_ACCEL_FS_AVL_16G,
- .value = ST_ACCEL_1_FS_AVL_16_VAL,
- .gain = ST_ACCEL_1_FS_AVL_16_GAIN,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(12000),
},
},
},
.bdu = {
- .addr = ST_ACCEL_1_BDU_ADDR,
- .mask = ST_ACCEL_1_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_ACCEL_1_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_1_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_ACCEL_1_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_ACCEL_1_IHL_IRQ_ADDR,
- .mask_ihl = ST_ACCEL_1_IHL_IRQ_MASK,
+ .addr = 0x22,
+ .mask_int1 = 0x10,
+ .mask_int2 = 0x08,
+ .addr_ihl = 0x25,
+ .mask_ihl = 0x02,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_ACCEL_1_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_ACCEL_2_WAI_EXP,
+ .wai = 0x32,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS331DLH_ACCEL_DEV_NAME,
@@ -368,18 +180,18 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
.odr = {
- .addr = ST_ACCEL_2_ODR_ADDR,
- .mask = ST_ACCEL_2_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x18,
.odr_avl = {
- { 50, ST_ACCEL_2_ODR_AVL_50HZ_VAL, },
- { 100, ST_ACCEL_2_ODR_AVL_100HZ_VAL, },
- { 400, ST_ACCEL_2_ODR_AVL_400HZ_VAL, },
- { 1000, ST_ACCEL_2_ODR_AVL_1000HZ_VAL, },
+ { .hz = 50, .value = 0x00, },
+ { .hz = 100, .value = 0x01, },
+ { .hz = 400, .value = 0x02, },
+ { .hz = 1000, .value = 0x03, },
},
},
.pw = {
- .addr = ST_ACCEL_2_PW_ADDR,
- .mask = ST_ACCEL_2_PW_MASK,
+ .addr = 0x20,
+ .mask = 0xe0,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -388,69 +200,69 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_ACCEL_2_FS_ADDR,
- .mask = ST_ACCEL_2_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
- .value = ST_ACCEL_2_FS_AVL_2_VAL,
- .gain = ST_ACCEL_2_FS_AVL_2_GAIN,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(1000),
},
[1] = {
.num = ST_ACCEL_FS_AVL_4G,
- .value = ST_ACCEL_2_FS_AVL_4_VAL,
- .gain = ST_ACCEL_2_FS_AVL_4_GAIN,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(2000),
},
[2] = {
.num = ST_ACCEL_FS_AVL_8G,
- .value = ST_ACCEL_2_FS_AVL_8_VAL,
- .gain = ST_ACCEL_2_FS_AVL_8_GAIN,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(3900),
},
},
},
.bdu = {
- .addr = ST_ACCEL_2_BDU_ADDR,
- .mask = ST_ACCEL_2_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_ACCEL_2_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_2_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_ACCEL_2_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_ACCEL_2_IHL_IRQ_ADDR,
- .mask_ihl = ST_ACCEL_2_IHL_IRQ_MASK,
- .addr_od = ST_ACCEL_2_OD_IRQ_ADDR,
- .mask_od = ST_ACCEL_2_OD_IRQ_MASK,
+ .addr = 0x22,
+ .mask_int1 = 0x02,
+ .mask_int2 = 0x10,
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x80,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_ACCEL_2_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_ACCEL_3_WAI_EXP,
+ .wai = 0x40,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LSM330_ACCEL_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_accel_16bit_channels,
.odr = {
- .addr = ST_ACCEL_3_ODR_ADDR,
- .mask = ST_ACCEL_3_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xf0,
.odr_avl = {
- { 3, ST_ACCEL_3_ODR_AVL_3HZ_VAL },
- { 6, ST_ACCEL_3_ODR_AVL_6HZ_VAL, },
- { 12, ST_ACCEL_3_ODR_AVL_12HZ_VAL, },
- { 25, ST_ACCEL_3_ODR_AVL_25HZ_VAL, },
- { 50, ST_ACCEL_3_ODR_AVL_50HZ_VAL, },
- { 100, ST_ACCEL_3_ODR_AVL_100HZ_VAL, },
- { 200, ST_ACCEL_3_ODR_AVL_200HZ_VAL, },
- { 400, ST_ACCEL_3_ODR_AVL_400HZ_VAL, },
- { 800, ST_ACCEL_3_ODR_AVL_800HZ_VAL, },
- { 1600, ST_ACCEL_3_ODR_AVL_1600HZ_VAL, },
+ { .hz = 3, .value = 0x01, },
+ { .hz = 6, .value = 0x02, },
+ { .hz = 12, .value = 0x03, },
+ { .hz = 25, .value = 0x04, },
+ { .hz = 50, .value = 0x05, },
+ { .hz = 100, .value = 0x06, },
+ { .hz = 200, .value = 0x07, },
+ { .hz = 400, .value = 0x08, },
+ { .hz = 800, .value = 0x09, },
+ { .hz = 1600, .value = 0x0a, },
},
},
.pw = {
- .addr = ST_ACCEL_3_ODR_ADDR,
- .mask = ST_ACCEL_3_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xf0,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
.enable_axis = {
@@ -458,75 +270,75 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_ACCEL_3_FS_ADDR,
- .mask = ST_ACCEL_3_FS_MASK,
+ .addr = 0x24,
+ .mask = 0x38,
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
- .value = ST_ACCEL_3_FS_AVL_2_VAL,
- .gain = ST_ACCEL_3_FS_AVL_2_GAIN,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(61),
},
[1] = {
.num = ST_ACCEL_FS_AVL_4G,
- .value = ST_ACCEL_3_FS_AVL_4_VAL,
- .gain = ST_ACCEL_3_FS_AVL_4_GAIN,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(122),
},
[2] = {
.num = ST_ACCEL_FS_AVL_6G,
- .value = ST_ACCEL_3_FS_AVL_6_VAL,
- .gain = ST_ACCEL_3_FS_AVL_6_GAIN,
+ .value = 0x02,
+ .gain = IIO_G_TO_M_S_2(183),
},
[3] = {
.num = ST_ACCEL_FS_AVL_8G,
- .value = ST_ACCEL_3_FS_AVL_8_VAL,
- .gain = ST_ACCEL_3_FS_AVL_8_GAIN,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(244),
},
[4] = {
.num = ST_ACCEL_FS_AVL_16G,
- .value = ST_ACCEL_3_FS_AVL_16_VAL,
- .gain = ST_ACCEL_3_FS_AVL_16_GAIN,
+ .value = 0x04,
+ .gain = IIO_G_TO_M_S_2(732),
},
},
},
.bdu = {
- .addr = ST_ACCEL_3_BDU_ADDR,
- .mask = ST_ACCEL_3_BDU_MASK,
+ .addr = 0x20,
+ .mask = 0x08,
},
.drdy_irq = {
- .addr = ST_ACCEL_3_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_3_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_ACCEL_3_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_ACCEL_3_IHL_IRQ_ADDR,
- .mask_ihl = ST_ACCEL_3_IHL_IRQ_MASK,
+ .addr = 0x23,
+ .mask_int1 = 0x80,
+ .mask_int2 = 0x00,
+ .addr_ihl = 0x23,
+ .mask_ihl = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
.ig1 = {
- .en_addr = ST_ACCEL_3_IG1_EN_ADDR,
- .en_mask = ST_ACCEL_3_IG1_EN_MASK,
+ .en_addr = 0x23,
+ .en_mask = 0x08,
},
},
- .multi_read_bit = ST_ACCEL_3_MULTIREAD_BIT,
+ .multi_read_bit = false,
.bootime = 2,
},
{
- .wai = ST_ACCEL_4_WAI_EXP,
+ .wai = 0x3a,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS3LV02DL_ACCEL_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
.odr = {
- .addr = ST_ACCEL_4_ODR_ADDR,
- .mask = ST_ACCEL_4_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x30, /* DF1 and DF0 */
.odr_avl = {
- { 40, ST_ACCEL_4_ODR_AVL_40HZ_VAL },
- { 160, ST_ACCEL_4_ODR_AVL_160HZ_VAL, },
- { 640, ST_ACCEL_4_ODR_AVL_640HZ_VAL, },
- { 2560, ST_ACCEL_4_ODR_AVL_2560HZ_VAL, },
+ { .hz = 40, .value = 0x00, },
+ { .hz = 160, .value = 0x01, },
+ { .hz = 640, .value = 0x02, },
+ { .hz = 2560, .value = 0x03, },
},
},
.pw = {
- .addr = ST_ACCEL_4_PW_ADDR,
- .mask = ST_ACCEL_4_PW_MASK,
+ .addr = 0x20,
+ .mask = 0xc0,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -535,51 +347,51 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_ACCEL_4_FS_ADDR,
- .mask = ST_ACCEL_4_FS_MASK,
+ .addr = 0x21,
+ .mask = 0x80,
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
- .value = ST_ACCEL_4_FS_AVL_2_VAL,
- .gain = ST_ACCEL_4_FS_AVL_2_GAIN,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(1024),
},
[1] = {
.num = ST_ACCEL_FS_AVL_6G,
- .value = ST_ACCEL_4_FS_AVL_6_VAL,
- .gain = ST_ACCEL_4_FS_AVL_6_GAIN,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(340),
},
},
},
.bdu = {
- .addr = ST_ACCEL_4_BDU_ADDR,
- .mask = ST_ACCEL_4_BDU_MASK,
+ .addr = 0x21,
+ .mask = 0x40,
},
.drdy_irq = {
- .addr = ST_ACCEL_4_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_4_DRDY_IRQ_INT1_MASK,
+ .addr = 0x21,
+ .mask_int1 = 0x04,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_ACCEL_4_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2, /* guess */
},
{
- .wai = ST_ACCEL_5_WAI_EXP,
+ .wai = 0x3b,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS331DL_ACCEL_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_accel_8bit_channels,
.odr = {
- .addr = ST_ACCEL_5_ODR_ADDR,
- .mask = ST_ACCEL_5_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x80,
.odr_avl = {
- { 100, ST_ACCEL_5_ODR_AVL_100HZ_VAL },
- { 400, ST_ACCEL_5_ODR_AVL_400HZ_VAL, },
+ { .hz = 100, .value = 0x00, },
+ { .hz = 400, .value = 0x01, },
},
},
.pw = {
- .addr = ST_ACCEL_5_PW_ADDR,
- .mask = ST_ACCEL_5_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x40,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -588,54 +400,58 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_ACCEL_5_FS_ADDR,
- .mask = ST_ACCEL_5_FS_MASK,
+ .addr = 0x20,
+ .mask = 0x20,
+ /*
+ * TODO: check these resulting gain settings, these are
+ * not in the datsheet
+ */
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
- .value = ST_ACCEL_5_FS_AVL_2_VAL,
- .gain = ST_ACCEL_5_FS_AVL_2_GAIN,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(18000),
},
[1] = {
.num = ST_ACCEL_FS_AVL_8G,
- .value = ST_ACCEL_5_FS_AVL_8_VAL,
- .gain = ST_ACCEL_5_FS_AVL_8_GAIN,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(72000),
},
},
},
.drdy_irq = {
- .addr = ST_ACCEL_5_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_5_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_ACCEL_5_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_ACCEL_5_IHL_IRQ_ADDR,
- .mask_ihl = ST_ACCEL_5_IHL_IRQ_MASK,
- .addr_od = ST_ACCEL_5_OD_IRQ_ADDR,
- .mask_od = ST_ACCEL_5_OD_IRQ_MASK,
+ .addr = 0x22,
+ .mask_int1 = 0x04,
+ .mask_int2 = 0x20,
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x80,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_ACCEL_5_MULTIREAD_BIT,
+ .multi_read_bit = false,
.bootime = 2, /* guess */
},
{
- .wai = ST_ACCEL_6_WAI_EXP,
+ .wai = 0x32,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = H3LIS331DL_DRIVER_NAME,
},
.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
.odr = {
- .addr = ST_ACCEL_6_ODR_ADDR,
- .mask = ST_ACCEL_6_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x18,
.odr_avl = {
- { 50, ST_ACCEL_6_ODR_AVL_50HZ_VAL },
- { 100, ST_ACCEL_6_ODR_AVL_100HZ_VAL, },
- { 400, ST_ACCEL_6_ODR_AVL_400HZ_VAL, },
- { 1000, ST_ACCEL_6_ODR_AVL_1000HZ_VAL, },
+ { .hz = 50, .value = 0x00, },
+ { .hz = 100, .value = 0x01, },
+ { .hz = 400, .value = 0x02, },
+ { .hz = 1000, .value = 0x03, },
},
},
.pw = {
- .addr = ST_ACCEL_6_PW_ADDR,
- .mask = ST_ACCEL_6_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x20,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -644,38 +460,38 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_ACCEL_6_FS_ADDR,
- .mask = ST_ACCEL_6_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_100G,
- .value = ST_ACCEL_6_FS_AVL_100_VAL,
- .gain = ST_ACCEL_6_FS_AVL_100_GAIN,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(49000),
},
[1] = {
.num = ST_ACCEL_FS_AVL_200G,
- .value = ST_ACCEL_6_FS_AVL_200_VAL,
- .gain = ST_ACCEL_6_FS_AVL_200_GAIN,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(98000),
},
[2] = {
.num = ST_ACCEL_FS_AVL_400G,
- .value = ST_ACCEL_6_FS_AVL_400_VAL,
- .gain = ST_ACCEL_6_FS_AVL_400_GAIN,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(195000),
},
},
},
.bdu = {
- .addr = ST_ACCEL_6_BDU_ADDR,
- .mask = ST_ACCEL_6_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_ACCEL_6_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_6_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_ACCEL_6_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_ACCEL_6_IHL_IRQ_ADDR,
- .mask_ihl = ST_ACCEL_6_IHL_IRQ_MASK,
+ .addr = 0x22,
+ .mask_int1 = 0x02,
+ .mask_int2 = 0x10,
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x80,
},
- .multi_read_bit = ST_ACCEL_6_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
@@ -685,18 +501,18 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
.odr = {
- .addr = ST_ACCEL_7_ODR_ADDR,
- .mask = ST_ACCEL_7_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x30,
.odr_avl = {
- { 280, ST_ACCEL_7_ODR_AVL_280HZ_VAL, },
- { 560, ST_ACCEL_7_ODR_AVL_560HZ_VAL, },
- { 1120, ST_ACCEL_7_ODR_AVL_1120HZ_VAL, },
- { 4480, ST_ACCEL_7_ODR_AVL_4480HZ_VAL, },
+ { .hz = 280, .value = 0x00, },
+ { .hz = 560, .value = 0x01, },
+ { .hz = 1120, .value = 0x02, },
+ { .hz = 4480, .value = 0x03, },
},
},
.pw = {
- .addr = ST_ACCEL_7_PW_ADDR,
- .mask = ST_ACCEL_7_PW_MASK,
+ .addr = 0x20,
+ .mask = 0xc0,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -708,7 +524,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
- .gain = ST_ACCEL_7_FS_AVL_2_GAIN,
+ .gain = IIO_G_TO_M_S_2(488),
},
},
},
@@ -719,11 +535,78 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.bdu = {
},
.drdy_irq = {
- .addr = ST_ACCEL_7_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_7_DRDY_IRQ_INT1_MASK,
+ .addr = 0x21,
+ .mask_int1 = 0x04,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ },
+ .multi_read_bit = false,
+ .bootime = 2,
+ },
+ {
+ .wai = 0x33,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LNG2DM_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_8bit_channels,
+ .odr = {
+ .addr = 0x20,
+ .mask = 0xf0,
+ .odr_avl = {
+ { .hz = 1, .value = 0x01, },
+ { .hz = 10, .value = 0x02, },
+ { .hz = 25, .value = 0x03, },
+ { .hz = 50, .value = 0x04, },
+ { .hz = 100, .value = 0x05, },
+ { .hz = 200, .value = 0x06, },
+ { .hz = 400, .value = 0x07, },
+ { .hz = 1600, .value = 0x08, },
+ },
+ },
+ .pw = {
+ .addr = 0x20,
+ .mask = 0xf0,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = 0x23,
+ .mask = 0x30,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(15600),
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(31200),
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = 0x02,
+ .gain = IIO_G_TO_M_S_2(62500),
+ },
+ [3] = {
+ .num = ST_ACCEL_FS_AVL_16G,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(187500),
+ },
+ },
+ },
+ .drdy_irq = {
+ .addr = 0x22,
+ .mask_int1 = 0x10,
+ .mask_int2 = 0x08,
+ .addr_ihl = 0x25,
+ .mask_ihl = 0x02,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_ACCEL_7_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
};
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index e9d427a5df7c..c0f8867aa1ea 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -84,6 +84,10 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,lis3l02dq",
.data = LIS3L02DQ_ACCEL_DEV_NAME,
},
+ {
+ .compatible = "st,lng2dm-accel",
+ .data = LNG2DM_ACCEL_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -135,6 +139,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ LSM303AGR_ACCEL_DEV_NAME },
{ LIS2DH12_ACCEL_DEV_NAME },
{ LIS3L02DQ_ACCEL_DEV_NAME },
+ { LNG2DM_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index efd43941d45d..c25ac50d4600 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -60,6 +60,7 @@ static const struct spi_device_id st_accel_id_table[] = {
{ LSM303AGR_ACCEL_DEV_NAME },
{ LIS2DH12_ACCEL_DEV_NAME },
{ LIS3L02DQ_ACCEL_DEV_NAME },
+ { LNG2DM_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_accel_id_table);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 99c051490eff..38bc319904c4 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -58,6 +58,18 @@ config AD7476
To compile this driver as a module, choose M here: the
module will be called ad7476.
+config AD7766
+ tristate "Analog Devices AD7766/AD7767 ADC driver"
+ depends on SPI_MASTER
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices AD7766, AD7766-1,
+ AD7766-2, AD7767, AD7767-1, AD7767-2 SPI analog to digital converters.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ad7766.
+
config AD7791
tristate "Analog Devices AD7791 ADC driver"
depends on SPI
@@ -195,6 +207,16 @@ config DA9150_GPADC
To compile this driver as a module, choose M here: the module will be
called berlin2-adc.
+config ENVELOPE_DETECTOR
+ tristate "Envelope detector using a DAC and a comparator"
+ depends on OF
+ help
+ Say yes here to build support for an envelope detector using a DAC
+ and a comparator.
+
+ To compile this driver as a module, choose M here: the module will be
+ called envelope-detector.
+
config EXYNOS_ADC
tristate "Exynos ADC driver support"
depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST)
@@ -419,6 +441,28 @@ config ROCKCHIP_SARADC
To compile this driver as a module, choose M here: the
module will be called rockchip_saradc.
+config STM32_ADC_CORE
+ tristate "STMicroelectronics STM32 adc core"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on OF
+ depends on REGULATOR
+ help
+ Select this option to enable the core driver for STMicroelectronics
+ STM32 analog-to-digital converter (ADC).
+
+ This driver can also be built as a module. If so, the module
+ will be called stm32-adc-core.
+
+config STM32_ADC
+ tristate "STMicroelectronics STM32 adc"
+ depends on STM32_ADC_CORE
+ help
+ Say yes here to build support for STMicroelectronics stm32 Analog
+ to Digital Converter (ADC).
+
+ This driver can also be built as a module. If so, the module
+ will be called stm32-adc.
+
config STX104
tristate "Apex Embedded Systems STX104 driver"
depends on X86 && ISA_BUS_API
@@ -449,6 +493,8 @@ config TI_ADC081C
config TI_ADC0832
tristate "Texas Instruments ADC0831/ADC0832/ADC0834/ADC0838"
depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
If you say yes here you get support for Texas Instruments ADC0831,
ADC0832, ADC0834, ADC0838 ADC chips.
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 7a40c04c311f..d36c4be8d1fc 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_AD7291) += ad7291.o
obj-$(CONFIG_AD7298) += ad7298.o
obj-$(CONFIG_AD7923) += ad7923.o
obj-$(CONFIG_AD7476) += ad7476.o
+obj-$(CONFIG_AD7766) += ad7766.o
obj-$(CONFIG_AD7791) += ad7791.o
obj-$(CONFIG_AD7793) += ad7793.o
obj-$(CONFIG_AD7887) += ad7887.o
@@ -20,6 +21,7 @@ obj-$(CONFIG_BCM_IPROC_ADC) += bcm_iproc_adc.o
obj-$(CONFIG_BERLIN2_ADC) += berlin2-adc.o
obj-$(CONFIG_CC10001_ADC) += cc10001_adc.o
obj-$(CONFIG_DA9150_GPADC) += da9150-gpadc.o
+obj-$(CONFIG_ENVELOPE_DETECTOR) += envelope-detector.o
obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o
obj-$(CONFIG_FSL_MX25_ADC) += fsl-imx25-gcq.o
obj-$(CONFIG_HI8435) += hi8435.o
@@ -41,6 +43,8 @@ obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_STX104) += stx104.o
+obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
+obj-$(CONFIG_STM32_ADC) += stm32-adc.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o
obj-$(CONFIG_TI_ADC12138) += ti-adc12138.o
diff --git a/drivers/iio/adc/ad7766.c b/drivers/iio/adc/ad7766.c
new file mode 100644
index 000000000000..75cca42b6e70
--- /dev/null
+++ b/drivers/iio/adc/ad7766.c
@@ -0,0 +1,330 @@
+/*
+ * AD7766/AD7767 SPI ADC driver
+ *
+ * Copyright 2016 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+struct ad7766_chip_info {
+ unsigned int decimation_factor;
+};
+
+enum {
+ AD7766_SUPPLY_AVDD = 0,
+ AD7766_SUPPLY_DVDD = 1,
+ AD7766_SUPPLY_VREF = 2,
+ AD7766_NUM_SUPPLIES = 3
+};
+
+struct ad7766 {
+ const struct ad7766_chip_info *chip_info;
+ struct spi_device *spi;
+ struct clk *mclk;
+ struct gpio_desc *pd_gpio;
+ struct regulator_bulk_data reg[AD7766_NUM_SUPPLIES];
+
+ struct iio_trigger *trig;
+
+ struct spi_transfer xfer;
+ struct spi_message msg;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ * Make the buffer large enough for one 24 bit sample and one 64 bit
+ * aligned 64 bit timestamp.
+ */
+ unsigned char data[ALIGN(3, sizeof(s64)) + sizeof(s64)]
+ ____cacheline_aligned;
+};
+
+/*
+ * AD7766 and AD7767 variations are interface compatible, the main difference is
+ * analog performance. Both parts will use the same ID.
+ */
+enum ad7766_device_ids {
+ ID_AD7766,
+ ID_AD7766_1,
+ ID_AD7766_2,
+};
+
+static irqreturn_t ad7766_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad7766 *ad7766 = iio_priv(indio_dev);
+ int ret;
+
+ ret = spi_sync(ad7766->spi, &ad7766->msg);
+ if (ret < 0)
+ goto done;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, ad7766->data,
+ pf->timestamp);
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ad7766_preenable(struct iio_dev *indio_dev)
+{
+ struct ad7766 *ad7766 = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ad7766->reg), ad7766->reg);
+ if (ret < 0) {
+ dev_err(&ad7766->spi->dev, "Failed to enable supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ad7766->mclk);
+ if (ret < 0) {
+ dev_err(&ad7766->spi->dev, "Failed to enable MCLK: %d\n", ret);
+ regulator_bulk_disable(ARRAY_SIZE(ad7766->reg), ad7766->reg);
+ return ret;
+ }
+
+ if (ad7766->pd_gpio)
+ gpiod_set_value(ad7766->pd_gpio, 0);
+
+ return 0;
+}
+
+static int ad7766_postdisable(struct iio_dev *indio_dev)
+{
+ struct ad7766 *ad7766 = iio_priv(indio_dev);
+
+ if (ad7766->pd_gpio)
+ gpiod_set_value(ad7766->pd_gpio, 1);
+
+ /*
+ * The PD pin is synchronous to the clock, so give it some time to
+ * notice the change before we disable the clock.
+ */
+ msleep(20);
+
+ clk_disable_unprepare(ad7766->mclk);
+ regulator_bulk_disable(ARRAY_SIZE(ad7766->reg), ad7766->reg);
+
+ return 0;
+}
+
+static int ad7766_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val, int *val2, long info)
+{
+ struct ad7766 *ad7766 = iio_priv(indio_dev);
+ struct regulator *vref = ad7766->reg[AD7766_SUPPLY_VREF].consumer;
+ int scale_uv;
+
+ switch (info) {
+ case IIO_CHAN_INFO_SCALE:
+ scale_uv = regulator_get_voltage(vref);
+ if (scale_uv < 0)
+ return scale_uv;
+ *val = scale_uv / 1000;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = clk_get_rate(ad7766->mclk) /
+ ad7766->chip_info->decimation_factor;
+ return IIO_VAL_INT;
+ }
+ return -EINVAL;
+}
+
+static const struct iio_chan_spec ad7766_channels[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_type = {
+ .sign = 's',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7766_chip_info ad7766_chip_info[] = {
+ [ID_AD7766] = {
+ .decimation_factor = 8,
+ },
+ [ID_AD7766_1] = {
+ .decimation_factor = 16,
+ },
+ [ID_AD7766_2] = {
+ .decimation_factor = 32,
+ },
+};
+
+static const struct iio_buffer_setup_ops ad7766_buffer_setup_ops = {
+ .preenable = &ad7766_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
+ .postdisable = &ad7766_postdisable,
+};
+
+static const struct iio_info ad7766_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &ad7766_read_raw,
+};
+
+static irqreturn_t ad7766_irq(int irq, void *private)
+{
+ iio_trigger_poll(private);
+ return IRQ_HANDLED;
+}
+
+static int ad7766_set_trigger_state(struct iio_trigger *trig, bool enable)
+{
+ struct ad7766 *ad7766 = iio_trigger_get_drvdata(trig);
+
+ if (enable)
+ enable_irq(ad7766->spi->irq);
+ else
+ disable_irq(ad7766->spi->irq);
+
+ return 0;
+}
+
+static const struct iio_trigger_ops ad7766_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = ad7766_set_trigger_state,
+ .validate_device = iio_trigger_validate_own_device,
+};
+
+static int ad7766_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct iio_dev *indio_dev;
+ struct ad7766 *ad7766;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*ad7766));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ ad7766 = iio_priv(indio_dev);
+ ad7766->chip_info = &ad7766_chip_info[id->driver_data];
+
+ ad7766->mclk = devm_clk_get(&spi->dev, "mclk");
+ if (IS_ERR(ad7766->mclk))
+ return PTR_ERR(ad7766->mclk);
+
+ ad7766->reg[AD7766_SUPPLY_AVDD].supply = "avdd";
+ ad7766->reg[AD7766_SUPPLY_DVDD].supply = "dvdd";
+ ad7766->reg[AD7766_SUPPLY_VREF].supply = "vref";
+
+ ret = devm_regulator_bulk_get(&spi->dev, ARRAY_SIZE(ad7766->reg),
+ ad7766->reg);
+ if (ret)
+ return ret;
+
+ ad7766->pd_gpio = devm_gpiod_get_optional(&spi->dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ad7766->pd_gpio))
+ return PTR_ERR(ad7766->pd_gpio);
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad7766_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7766_channels);
+ indio_dev->info = &ad7766_info;
+
+ if (spi->irq > 0) {
+ ad7766->trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+ if (!ad7766->trig)
+ return -ENOMEM;
+
+ ad7766->trig->ops = &ad7766_trigger_ops;
+ ad7766->trig->dev.parent = &spi->dev;
+ iio_trigger_set_drvdata(ad7766->trig, ad7766);
+
+ ret = devm_request_irq(&spi->dev, spi->irq, ad7766_irq,
+ IRQF_TRIGGER_FALLING, dev_name(&spi->dev),
+ ad7766->trig);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The device generates interrupts as long as it is powered up.
+ * Some platforms might not allow the option to power it down so
+ * disable the interrupt to avoid extra load on the system
+ */
+ disable_irq(spi->irq);
+
+ ret = devm_iio_trigger_register(&spi->dev, ad7766->trig);
+ if (ret)
+ return ret;
+ }
+
+ spi_set_drvdata(spi, indio_dev);
+
+ ad7766->spi = spi;
+
+ /* First byte always 0 */
+ ad7766->xfer.rx_buf = &ad7766->data[1];
+ ad7766->xfer.len = 3;
+
+ spi_message_init(&ad7766->msg);
+ spi_message_add_tail(&ad7766->xfer, &ad7766->msg);
+
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+ &iio_pollfunc_store_time, &ad7766_trigger_handler,
+ &ad7766_buffer_setup_ops);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static const struct spi_device_id ad7766_id[] = {
+ {"ad7766", ID_AD7766},
+ {"ad7766-1", ID_AD7766_1},
+ {"ad7766-2", ID_AD7766_2},
+ {"ad7767", ID_AD7766},
+ {"ad7767-1", ID_AD7766_1},
+ {"ad7767-2", ID_AD7766_2},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad7766_id);
+
+static struct spi_driver ad7766_driver = {
+ .driver = {
+ .name = "ad7766",
+ },
+ .probe = ad7766_probe,
+ .id_table = ad7766_id,
+};
+module_spi_driver(ad7766_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD7766 and AD7767 ADCs driver support");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index bbdac07f4aaa..34b928cefeed 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -30,6 +30,7 @@
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
+#include <linux/pinctrl/consumer.h>
/* Registers */
#define AT91_ADC_CR 0x00 /* Control Register */
@@ -1347,6 +1348,32 @@ static int at91_adc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int at91_adc_suspend(struct device *dev)
+{
+ struct iio_dev *idev = platform_get_drvdata(to_platform_device(dev));
+ struct at91_adc_state *st = iio_priv(idev);
+
+ pinctrl_pm_select_sleep_state(dev);
+ clk_disable_unprepare(st->clk);
+
+ return 0;
+}
+
+static int at91_adc_resume(struct device *dev)
+{
+ struct iio_dev *idev = platform_get_drvdata(to_platform_device(dev));
+ struct at91_adc_state *st = iio_priv(idev);
+
+ clk_prepare_enable(st->clk);
+ pinctrl_pm_select_default_state(dev);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(at91_adc_pm_ops, at91_adc_suspend, at91_adc_resume);
+
static struct at91_adc_caps at91sam9260_caps = {
.calc_startup_ticks = calc_startup_ticks_9260,
.num_channels = 4,
@@ -1441,6 +1468,7 @@ static struct platform_driver at91_adc_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(at91_adc_dt_ids),
+ .pm = &at91_adc_pm_ops,
},
};
diff --git a/drivers/iio/adc/envelope-detector.c b/drivers/iio/adc/envelope-detector.c
new file mode 100644
index 000000000000..fef15c0d7c9c
--- /dev/null
+++ b/drivers/iio/adc/envelope-detector.c
@@ -0,0 +1,422 @@
+/*
+ * Driver for an envelope detector using a DAC and a comparator
+ *
+ * Copyright (C) 2016 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * The DAC is used to find the peak level of an alternating voltage input
+ * signal by a binary search using the output of a comparator wired to
+ * an interrupt pin. Like so:
+ * _
+ * | \
+ * input +------>-------|+ \
+ * | \
+ * .-------. | }---.
+ * | | | / |
+ * | dac|-->--|- / |
+ * | | |_/ |
+ * | | |
+ * | | |
+ * | irq|------<-------'
+ * | |
+ * '-------'
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+struct envelope {
+ spinlock_t comp_lock; /* protects comp */
+ int comp;
+
+ struct mutex read_lock; /* protects everything else */
+
+ int comp_irq;
+ u32 comp_irq_trigger;
+ u32 comp_irq_trigger_inv;
+
+ struct iio_channel *dac;
+ struct delayed_work comp_timeout;
+
+ unsigned int comp_interval;
+ bool invert;
+ u32 dac_max;
+
+ int high;
+ int level;
+ int low;
+
+ struct completion done;
+};
+
+/*
+ * The envelope_detector_comp_latch function works together with the compare
+ * interrupt service routine below (envelope_detector_comp_isr) as a latch
+ * (one-bit memory) for if the interrupt has triggered since last calling
+ * this function.
+ * The ..._comp_isr function disables the interrupt so that the cpu does not
+ * need to service a possible interrupt flood from the comparator when no-one
+ * cares anyway, and this ..._comp_latch function reenables them again if
+ * needed.
+ */
+static int envelope_detector_comp_latch(struct envelope *env)
+{
+ int comp;
+
+ spin_lock_irq(&env->comp_lock);
+ comp = env->comp;
+ env->comp = 0;
+ spin_unlock_irq(&env->comp_lock);
+
+ if (!comp)
+ return 0;
+
+ /*
+ * The irq was disabled, and is reenabled just now.
+ * But there might have been a pending irq that
+ * happened while the irq was disabled that fires
+ * just as the irq is reenabled. That is not what
+ * is desired.
+ */
+ enable_irq(env->comp_irq);
+
+ /* So, synchronize this possibly pending irq... */
+ synchronize_irq(env->comp_irq);
+
+ /* ...and redo the whole dance. */
+ spin_lock_irq(&env->comp_lock);
+ comp = env->comp;
+ env->comp = 0;
+ spin_unlock_irq(&env->comp_lock);
+
+ if (comp)
+ enable_irq(env->comp_irq);
+
+ return 1;
+}
+
+static irqreturn_t envelope_detector_comp_isr(int irq, void *ctx)
+{
+ struct envelope *env = ctx;
+
+ spin_lock(&env->comp_lock);
+ env->comp = 1;
+ disable_irq_nosync(env->comp_irq);
+ spin_unlock(&env->comp_lock);
+
+ return IRQ_HANDLED;
+}
+
+static void envelope_detector_setup_compare(struct envelope *env)
+{
+ int ret;
+
+ /*
+ * Do a binary search for the peak input level, and stop
+ * when that level is "trapped" between two adjacent DAC
+ * values.
+ * When invert is active, use the midpoint floor so that
+ * env->level ends up as env->low when the termination
+ * criteria below is fulfilled, and use the midpoint
+ * ceiling when invert is not active so that env->level
+ * ends up as env->high in that case.
+ */
+ env->level = (env->high + env->low + !env->invert) / 2;
+
+ if (env->high == env->low + 1) {
+ complete(&env->done);
+ return;
+ }
+
+ /* Set a "safe" DAC level (if there is such a thing)... */
+ ret = iio_write_channel_raw(env->dac, env->invert ? 0 : env->dac_max);
+ if (ret < 0)
+ goto err;
+
+ /* ...clear the comparison result... */
+ envelope_detector_comp_latch(env);
+
+ /* ...set the real DAC level... */
+ ret = iio_write_channel_raw(env->dac, env->level);
+ if (ret < 0)
+ goto err;
+
+ /* ...and wait for a bit to see if the latch catches anything. */
+ schedule_delayed_work(&env->comp_timeout,
+ msecs_to_jiffies(env->comp_interval));
+ return;
+
+err:
+ env->level = ret;
+ complete(&env->done);
+}
+
+static void envelope_detector_timeout(struct work_struct *work)
+{
+ struct envelope *env = container_of(work, struct envelope,
+ comp_timeout.work);
+
+ /* Adjust low/high depending on the latch content... */
+ if (!envelope_detector_comp_latch(env) ^ !env->invert)
+ env->low = env->level;
+ else
+ env->high = env->level;
+
+ /* ...and continue the search. */
+ envelope_detector_setup_compare(env);
+}
+
+static int envelope_detector_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct envelope *env = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ /*
+ * When invert is active, start with high=max+1 and low=0
+ * since we will end up with the low value when the
+ * termination criteria is fulfilled (rounding down). And
+ * start with high=max and low=-1 when invert is not active
+ * since we will end up with the high value in that case.
+ * This ensures that the returned value in both cases are
+ * in the same range as the DAC and is a value that has not
+ * triggered the comparator.
+ */
+ mutex_lock(&env->read_lock);
+ env->high = env->dac_max + env->invert;
+ env->low = -1 + env->invert;
+ envelope_detector_setup_compare(env);
+ wait_for_completion(&env->done);
+ if (env->level < 0) {
+ ret = env->level;
+ goto err_unlock;
+ }
+ *val = env->invert ? env->dac_max - env->level : env->level;
+ mutex_unlock(&env->read_lock);
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ return iio_read_channel_scale(env->dac, val, val2);
+ }
+
+ return -EINVAL;
+
+err_unlock:
+ mutex_unlock(&env->read_lock);
+ return ret;
+}
+
+static ssize_t envelope_show_invert(struct iio_dev *indio_dev,
+ uintptr_t private,
+ struct iio_chan_spec const *ch, char *buf)
+{
+ struct envelope *env = iio_priv(indio_dev);
+
+ return sprintf(buf, "%u\n", env->invert);
+}
+
+static ssize_t envelope_store_invert(struct iio_dev *indio_dev,
+ uintptr_t private,
+ struct iio_chan_spec const *ch,
+ const char *buf, size_t len)
+{
+ struct envelope *env = iio_priv(indio_dev);
+ unsigned long invert;
+ int ret;
+ u32 trigger;
+
+ ret = kstrtoul(buf, 0, &invert);
+ if (ret < 0)
+ return ret;
+ if (invert > 1)
+ return -EINVAL;
+
+ trigger = invert ? env->comp_irq_trigger_inv : env->comp_irq_trigger;
+
+ mutex_lock(&env->read_lock);
+ if (invert != env->invert)
+ ret = irq_set_irq_type(env->comp_irq, trigger);
+ if (!ret) {
+ env->invert = invert;
+ ret = len;
+ }
+ mutex_unlock(&env->read_lock);
+
+ return ret;
+}
+
+static ssize_t envelope_show_comp_interval(struct iio_dev *indio_dev,
+ uintptr_t private,
+ struct iio_chan_spec const *ch,
+ char *buf)
+{
+ struct envelope *env = iio_priv(indio_dev);
+
+ return sprintf(buf, "%u\n", env->comp_interval);
+}
+
+static ssize_t envelope_store_comp_interval(struct iio_dev *indio_dev,
+ uintptr_t private,
+ struct iio_chan_spec const *ch,
+ const char *buf, size_t len)
+{
+ struct envelope *env = iio_priv(indio_dev);
+ unsigned long interval;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &interval);
+ if (ret < 0)
+ return ret;
+ if (interval > 1000)
+ return -EINVAL;
+
+ mutex_lock(&env->read_lock);
+ env->comp_interval = interval;
+ mutex_unlock(&env->read_lock);
+
+ return len;
+}
+
+static const struct iio_chan_spec_ext_info envelope_detector_ext_info[] = {
+ { .name = "invert",
+ .read = envelope_show_invert,
+ .write = envelope_store_invert, },
+ { .name = "compare_interval",
+ .read = envelope_show_comp_interval,
+ .write = envelope_store_comp_interval, },
+ { /* sentinel */ }
+};
+
+static const struct iio_chan_spec envelope_detector_iio_channel = {
+ .type = IIO_ALTVOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
+ | BIT(IIO_CHAN_INFO_SCALE),
+ .ext_info = envelope_detector_ext_info,
+ .indexed = 1,
+};
+
+static const struct iio_info envelope_detector_info = {
+ .read_raw = &envelope_detector_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int envelope_detector_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iio_dev *indio_dev;
+ struct envelope *env;
+ enum iio_chan_type type;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*env));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+ env = iio_priv(indio_dev);
+ env->comp_interval = 50; /* some sensible default? */
+
+ spin_lock_init(&env->comp_lock);
+ mutex_init(&env->read_lock);
+ init_completion(&env->done);
+ INIT_DELAYED_WORK(&env->comp_timeout, envelope_detector_timeout);
+
+ indio_dev->name = dev_name(dev);
+ indio_dev->dev.parent = dev;
+ indio_dev->dev.of_node = dev->of_node;
+ indio_dev->info = &envelope_detector_info;
+ indio_dev->channels = &envelope_detector_iio_channel;
+ indio_dev->num_channels = 1;
+
+ env->dac = devm_iio_channel_get(dev, "dac");
+ if (IS_ERR(env->dac)) {
+ if (PTR_ERR(env->dac) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get dac input channel\n");
+ return PTR_ERR(env->dac);
+ }
+
+ env->comp_irq = platform_get_irq_byname(pdev, "comp");
+ if (env->comp_irq < 0) {
+ if (env->comp_irq != -EPROBE_DEFER)
+ dev_err(dev, "failed to get compare interrupt\n");
+ return env->comp_irq;
+ }
+
+ ret = devm_request_irq(dev, env->comp_irq, envelope_detector_comp_isr,
+ 0, "envelope-detector", env);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to request interrupt\n");
+ return ret;
+ }
+ env->comp_irq_trigger = irq_get_trigger_type(env->comp_irq);
+ if (env->comp_irq_trigger & IRQF_TRIGGER_RISING)
+ env->comp_irq_trigger_inv |= IRQF_TRIGGER_FALLING;
+ if (env->comp_irq_trigger & IRQF_TRIGGER_FALLING)
+ env->comp_irq_trigger_inv |= IRQF_TRIGGER_RISING;
+ if (env->comp_irq_trigger & IRQF_TRIGGER_HIGH)
+ env->comp_irq_trigger_inv |= IRQF_TRIGGER_LOW;
+ if (env->comp_irq_trigger & IRQF_TRIGGER_LOW)
+ env->comp_irq_trigger_inv |= IRQF_TRIGGER_HIGH;
+
+ ret = iio_get_channel_type(env->dac, &type);
+ if (ret < 0)
+ return ret;
+
+ if (type != IIO_VOLTAGE) {
+ dev_err(dev, "dac is of the wrong type\n");
+ return -EINVAL;
+ }
+
+ ret = iio_read_max_channel_raw(env->dac, &env->dac_max);
+ if (ret < 0) {
+ dev_err(dev, "dac does not indicate its raw maximum value\n");
+ return ret;
+ }
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id envelope_detector_match[] = {
+ { .compatible = "axentia,tse850-envelope-detector", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, envelope_detector_match);
+
+static struct platform_driver envelope_detector_driver = {
+ .probe = envelope_detector_probe,
+ .driver = {
+ .name = "iio-envelope-detector",
+ .of_match_table = envelope_detector_match,
+ },
+};
+module_platform_driver(envelope_detector_driver);
+
+MODULE_DESCRIPTION("Envelope detector using a DAC and a comparator");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index 712fbd2b1f16..3b7c4f78f37a 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -238,7 +238,9 @@ static int max1027_read_single_value(struct iio_dev *indio_dev,
/* Configure conversion register with the requested chan */
st->reg = MAX1027_CONV_REG | MAX1027_CHAN(chan->channel) |
- MAX1027_NOSCAN | !!(chan->type == IIO_TEMP);
+ MAX1027_NOSCAN;
+ if (chan->type == IIO_TEMP)
+ st->reg |= MAX1027_TEMP;
ret = spi_write(st->spi, &st->reg, 1);
if (ret < 0) {
dev_err(&indio_dev->dev,
@@ -360,17 +362,6 @@ static int max1027_set_trigger_state(struct iio_trigger *trig, bool state)
return 0;
}
-static int max1027_validate_device(struct iio_trigger *trig,
- struct iio_dev *indio_dev)
-{
- struct iio_dev *indio = iio_trigger_get_drvdata(trig);
-
- if (indio != indio_dev)
- return -EINVAL;
-
- return 0;
-}
-
static irqreturn_t max1027_trigger_handler(int irq, void *private)
{
struct iio_poll_func *pf = (struct iio_poll_func *)private;
@@ -391,7 +382,7 @@ static irqreturn_t max1027_trigger_handler(int irq, void *private)
static const struct iio_trigger_ops max1027_trigger_ops = {
.owner = THIS_MODULE,
- .validate_device = &max1027_validate_device,
+ .validate_device = &iio_trigger_validate_own_device,
.set_trigger_state = &max1027_set_trigger_state,
};
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
new file mode 100644
index 000000000000..4214b0cd6b1b
--- /dev/null
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -0,0 +1,303 @@
+/*
+ * This file is part of STM32 ADC driver
+ *
+ * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
+ *
+ * Inspired from: fsl-imx25-tsadc
+ *
+ * License type: GPLv2
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdesc.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include "stm32-adc-core.h"
+
+/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
+#define STM32F4_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32F4_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x04)
+
+/* STM32F4_ADC_CSR - bit fields */
+#define STM32F4_EOC3 BIT(17)
+#define STM32F4_EOC2 BIT(9)
+#define STM32F4_EOC1 BIT(1)
+
+/* STM32F4_ADC_CCR - bit fields */
+#define STM32F4_ADC_ADCPRE_SHIFT 16
+#define STM32F4_ADC_ADCPRE_MASK GENMASK(17, 16)
+
+/* STM32 F4 maximum analog clock rate (from datasheet) */
+#define STM32F4_ADC_MAX_CLK_RATE 36000000
+
+/**
+ * struct stm32_adc_priv - stm32 ADC core private data
+ * @irq: irq for ADC block
+ * @domain: irq domain reference
+ * @aclk: clock reference for the analog circuitry
+ * @vref: regulator reference
+ * @common: common data for all ADC instances
+ */
+struct stm32_adc_priv {
+ int irq;
+ struct irq_domain *domain;
+ struct clk *aclk;
+ struct regulator *vref;
+ struct stm32_adc_common common;
+};
+
+static struct stm32_adc_priv *to_stm32_adc_priv(struct stm32_adc_common *com)
+{
+ return container_of(com, struct stm32_adc_priv, common);
+}
+
+/* STM32F4 ADC internal common clock prescaler division ratios */
+static int stm32f4_pclk_div[] = {2, 4, 6, 8};
+
+/**
+ * stm32f4_adc_clk_sel() - Select stm32f4 ADC common clock prescaler
+ * @priv: stm32 ADC core private data
+ * Select clock prescaler used for analog conversions, before using ADC.
+ */
+static int stm32f4_adc_clk_sel(struct platform_device *pdev,
+ struct stm32_adc_priv *priv)
+{
+ unsigned long rate;
+ u32 val;
+ int i;
+
+ rate = clk_get_rate(priv->aclk);
+ for (i = 0; i < ARRAY_SIZE(stm32f4_pclk_div); i++) {
+ if ((rate / stm32f4_pclk_div[i]) <= STM32F4_ADC_MAX_CLK_RATE)
+ break;
+ }
+ if (i >= ARRAY_SIZE(stm32f4_pclk_div))
+ return -EINVAL;
+
+ val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR);
+ val &= ~STM32F4_ADC_ADCPRE_MASK;
+ val |= i << STM32F4_ADC_ADCPRE_SHIFT;
+ writel_relaxed(val, priv->common.base + STM32F4_ADC_CCR);
+
+ dev_dbg(&pdev->dev, "Using analog clock source at %ld kHz\n",
+ rate / (stm32f4_pclk_div[i] * 1000));
+
+ return 0;
+}
+
+/* ADC common interrupt for all instances */
+static void stm32_adc_irq_handler(struct irq_desc *desc)
+{
+ struct stm32_adc_priv *priv = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 status;
+
+ chained_irq_enter(chip, desc);
+ status = readl_relaxed(priv->common.base + STM32F4_ADC_CSR);
+
+ if (status & STM32F4_EOC1)
+ generic_handle_irq(irq_find_mapping(priv->domain, 0));
+
+ if (status & STM32F4_EOC2)
+ generic_handle_irq(irq_find_mapping(priv->domain, 1));
+
+ if (status & STM32F4_EOC3)
+ generic_handle_irq(irq_find_mapping(priv->domain, 2));
+
+ chained_irq_exit(chip, desc);
+};
+
+static int stm32_adc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq);
+
+ return 0;
+}
+
+static void stm32_adc_domain_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops stm32_adc_domain_ops = {
+ .map = stm32_adc_domain_map,
+ .unmap = stm32_adc_domain_unmap,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int stm32_adc_irq_probe(struct platform_device *pdev,
+ struct stm32_adc_priv *priv)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return priv->irq;
+ }
+
+ priv->domain = irq_domain_add_simple(np, STM32_ADC_MAX_ADCS, 0,
+ &stm32_adc_domain_ops,
+ priv);
+ if (!priv->domain) {
+ dev_err(&pdev->dev, "Failed to add irq domain\n");
+ return -ENOMEM;
+ }
+
+ irq_set_chained_handler(priv->irq, stm32_adc_irq_handler);
+ irq_set_handler_data(priv->irq, priv);
+
+ return 0;
+}
+
+static void stm32_adc_irq_remove(struct platform_device *pdev,
+ struct stm32_adc_priv *priv)
+{
+ int hwirq;
+
+ for (hwirq = 0; hwirq < STM32_ADC_MAX_ADCS; hwirq++)
+ irq_dispose_mapping(irq_find_mapping(priv->domain, hwirq));
+ irq_domain_remove(priv->domain);
+ irq_set_chained_handler(priv->irq, NULL);
+}
+
+static int stm32_adc_probe(struct platform_device *pdev)
+{
+ struct stm32_adc_priv *priv;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+ int ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->common.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->common.base))
+ return PTR_ERR(priv->common.base);
+
+ priv->vref = devm_regulator_get(&pdev->dev, "vref");
+ if (IS_ERR(priv->vref)) {
+ ret = PTR_ERR(priv->vref);
+ dev_err(&pdev->dev, "vref get failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(priv->vref);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "vref enable failed\n");
+ return ret;
+ }
+
+ ret = regulator_get_voltage(priv->vref);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "vref get voltage failed, %d\n", ret);
+ goto err_regulator_disable;
+ }
+ priv->common.vref_mv = ret / 1000;
+ dev_dbg(&pdev->dev, "vref+=%dmV\n", priv->common.vref_mv);
+
+ priv->aclk = devm_clk_get(&pdev->dev, "adc");
+ if (IS_ERR(priv->aclk)) {
+ ret = PTR_ERR(priv->aclk);
+ dev_err(&pdev->dev, "Can't get 'adc' clock\n");
+ goto err_regulator_disable;
+ }
+
+ ret = clk_prepare_enable(priv->aclk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "adc clk enable failed\n");
+ goto err_regulator_disable;
+ }
+
+ ret = stm32f4_adc_clk_sel(pdev, priv);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "adc clk selection failed\n");
+ goto err_clk_disable;
+ }
+
+ ret = stm32_adc_irq_probe(pdev, priv);
+ if (ret < 0)
+ goto err_clk_disable;
+
+ platform_set_drvdata(pdev, &priv->common);
+
+ ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to populate DT children\n");
+ goto err_irq_remove;
+ }
+
+ return 0;
+
+err_irq_remove:
+ stm32_adc_irq_remove(pdev, priv);
+
+err_clk_disable:
+ clk_disable_unprepare(priv->aclk);
+
+err_regulator_disable:
+ regulator_disable(priv->vref);
+
+ return ret;
+}
+
+static int stm32_adc_remove(struct platform_device *pdev)
+{
+ struct stm32_adc_common *common = platform_get_drvdata(pdev);
+ struct stm32_adc_priv *priv = to_stm32_adc_priv(common);
+
+ of_platform_depopulate(&pdev->dev);
+ stm32_adc_irq_remove(pdev, priv);
+ clk_disable_unprepare(priv->aclk);
+ regulator_disable(priv->vref);
+
+ return 0;
+}
+
+static const struct of_device_id stm32_adc_of_match[] = {
+ { .compatible = "st,stm32f4-adc-core" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
+
+static struct platform_driver stm32_adc_driver = {
+ .probe = stm32_adc_probe,
+ .remove = stm32_adc_remove,
+ .driver = {
+ .name = "stm32-adc-core",
+ .of_match_table = stm32_adc_of_match,
+ },
+};
+module_platform_driver(stm32_adc_driver);
+
+MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 ADC core driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:stm32-adc-core");
diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h
new file mode 100644
index 000000000000..081fa5f55015
--- /dev/null
+++ b/drivers/iio/adc/stm32-adc-core.h
@@ -0,0 +1,52 @@
+/*
+ * This file is part of STM32 ADC driver
+ *
+ * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
+ *
+ * License type: GPLv2
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __STM32_ADC_H
+#define __STM32_ADC_H
+
+/*
+ * STM32 - ADC global register map
+ * ________________________________________________________
+ * | Offset | Register |
+ * --------------------------------------------------------
+ * | 0x000 | Master ADC1 |
+ * --------------------------------------------------------
+ * | 0x100 | Slave ADC2 |
+ * --------------------------------------------------------
+ * | 0x200 | Slave ADC3 |
+ * --------------------------------------------------------
+ * | 0x300 | Master & Slave common regs |
+ * --------------------------------------------------------
+ */
+#define STM32_ADC_MAX_ADCS 3
+#define STM32_ADCX_COMN_OFFSET 0x300
+
+/**
+ * struct stm32_adc_common - stm32 ADC driver common data (for all instances)
+ * @base: control registers base cpu addr
+ * @vref_mv: vref voltage (mv)
+ */
+struct stm32_adc_common {
+ void __iomem *base;
+ int vref_mv;
+};
+
+#endif
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
new file mode 100644
index 000000000000..5715e79f4935
--- /dev/null
+++ b/drivers/iio/adc/stm32-adc.c
@@ -0,0 +1,518 @@
+/*
+ * This file is part of STM32 ADC driver
+ *
+ * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
+ *
+ * License type: GPLv2
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include "stm32-adc-core.h"
+
+/* STM32F4 - Registers for each ADC instance */
+#define STM32F4_ADC_SR 0x00
+#define STM32F4_ADC_CR1 0x04
+#define STM32F4_ADC_CR2 0x08
+#define STM32F4_ADC_SMPR1 0x0C
+#define STM32F4_ADC_SMPR2 0x10
+#define STM32F4_ADC_HTR 0x24
+#define STM32F4_ADC_LTR 0x28
+#define STM32F4_ADC_SQR1 0x2C
+#define STM32F4_ADC_SQR2 0x30
+#define STM32F4_ADC_SQR3 0x34
+#define STM32F4_ADC_JSQR 0x38
+#define STM32F4_ADC_JDR1 0x3C
+#define STM32F4_ADC_JDR2 0x40
+#define STM32F4_ADC_JDR3 0x44
+#define STM32F4_ADC_JDR4 0x48
+#define STM32F4_ADC_DR 0x4C
+
+/* STM32F4_ADC_SR - bit fields */
+#define STM32F4_STRT BIT(4)
+#define STM32F4_EOC BIT(1)
+
+/* STM32F4_ADC_CR1 - bit fields */
+#define STM32F4_SCAN BIT(8)
+#define STM32F4_EOCIE BIT(5)
+
+/* STM32F4_ADC_CR2 - bit fields */
+#define STM32F4_SWSTART BIT(30)
+#define STM32F4_EXTEN_MASK GENMASK(29, 28)
+#define STM32F4_EOCS BIT(10)
+#define STM32F4_ADON BIT(0)
+
+/* STM32F4_ADC_SQR1 - bit fields */
+#define STM32F4_L_SHIFT 20
+#define STM32F4_L_MASK GENMASK(23, 20)
+
+/* STM32F4_ADC_SQR3 - bit fields */
+#define STM32F4_SQ1_SHIFT 0
+#define STM32F4_SQ1_MASK GENMASK(4, 0)
+
+#define STM32_ADC_TIMEOUT_US 100000
+#define STM32_ADC_TIMEOUT (msecs_to_jiffies(STM32_ADC_TIMEOUT_US / 1000))
+
+/**
+ * struct stm32_adc - private data of each ADC IIO instance
+ * @common: reference to ADC block common data
+ * @offset: ADC instance register offset in ADC block
+ * @completion: end of single conversion completion
+ * @buffer: data buffer
+ * @clk: clock for this adc instance
+ * @irq: interrupt for this adc instance
+ * @lock: spinlock
+ */
+struct stm32_adc {
+ struct stm32_adc_common *common;
+ u32 offset;
+ struct completion completion;
+ u16 *buffer;
+ struct clk *clk;
+ int irq;
+ spinlock_t lock; /* interrupt lock */
+};
+
+/**
+ * struct stm32_adc_chan_spec - specification of stm32 adc channel
+ * @type: IIO channel type
+ * @channel: channel number (single ended)
+ * @name: channel name (single ended)
+ */
+struct stm32_adc_chan_spec {
+ enum iio_chan_type type;
+ int channel;
+ const char *name;
+};
+
+/* Input definitions common for all STM32F4 instances */
+static const struct stm32_adc_chan_spec stm32f4_adc123_channels[] = {
+ { IIO_VOLTAGE, 0, "in0" },
+ { IIO_VOLTAGE, 1, "in1" },
+ { IIO_VOLTAGE, 2, "in2" },
+ { IIO_VOLTAGE, 3, "in3" },
+ { IIO_VOLTAGE, 4, "in4" },
+ { IIO_VOLTAGE, 5, "in5" },
+ { IIO_VOLTAGE, 6, "in6" },
+ { IIO_VOLTAGE, 7, "in7" },
+ { IIO_VOLTAGE, 8, "in8" },
+ { IIO_VOLTAGE, 9, "in9" },
+ { IIO_VOLTAGE, 10, "in10" },
+ { IIO_VOLTAGE, 11, "in11" },
+ { IIO_VOLTAGE, 12, "in12" },
+ { IIO_VOLTAGE, 13, "in13" },
+ { IIO_VOLTAGE, 14, "in14" },
+ { IIO_VOLTAGE, 15, "in15" },
+};
+
+/**
+ * STM32 ADC registers access routines
+ * @adc: stm32 adc instance
+ * @reg: reg offset in adc instance
+ *
+ * Note: All instances share same base, with 0x0, 0x100 or 0x200 offset resp.
+ * for adc1, adc2 and adc3.
+ */
+static u32 stm32_adc_readl(struct stm32_adc *adc, u32 reg)
+{
+ return readl_relaxed(adc->common->base + adc->offset + reg);
+}
+
+static u16 stm32_adc_readw(struct stm32_adc *adc, u32 reg)
+{
+ return readw_relaxed(adc->common->base + adc->offset + reg);
+}
+
+static void stm32_adc_writel(struct stm32_adc *adc, u32 reg, u32 val)
+{
+ writel_relaxed(val, adc->common->base + adc->offset + reg);
+}
+
+static void stm32_adc_set_bits(struct stm32_adc *adc, u32 reg, u32 bits)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adc->lock, flags);
+ stm32_adc_writel(adc, reg, stm32_adc_readl(adc, reg) | bits);
+ spin_unlock_irqrestore(&adc->lock, flags);
+}
+
+static void stm32_adc_clr_bits(struct stm32_adc *adc, u32 reg, u32 bits)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adc->lock, flags);
+ stm32_adc_writel(adc, reg, stm32_adc_readl(adc, reg) & ~bits);
+ spin_unlock_irqrestore(&adc->lock, flags);
+}
+
+/**
+ * stm32_adc_conv_irq_enable() - Enable end of conversion interrupt
+ * @adc: stm32 adc instance
+ */
+static void stm32_adc_conv_irq_enable(struct stm32_adc *adc)
+{
+ stm32_adc_set_bits(adc, STM32F4_ADC_CR1, STM32F4_EOCIE);
+};
+
+/**
+ * stm32_adc_conv_irq_disable() - Disable end of conversion interrupt
+ * @adc: stm32 adc instance
+ */
+static void stm32_adc_conv_irq_disable(struct stm32_adc *adc)
+{
+ stm32_adc_clr_bits(adc, STM32F4_ADC_CR1, STM32F4_EOCIE);
+}
+
+/**
+ * stm32_adc_start_conv() - Start conversions for regular channels.
+ * @adc: stm32 adc instance
+ */
+static void stm32_adc_start_conv(struct stm32_adc *adc)
+{
+ stm32_adc_set_bits(adc, STM32F4_ADC_CR1, STM32F4_SCAN);
+ stm32_adc_set_bits(adc, STM32F4_ADC_CR2, STM32F4_EOCS | STM32F4_ADON);
+
+ /* Wait for Power-up time (tSTAB from datasheet) */
+ usleep_range(2, 3);
+
+ /* Software start ? (e.g. trigger detection disabled ?) */
+ if (!(stm32_adc_readl(adc, STM32F4_ADC_CR2) & STM32F4_EXTEN_MASK))
+ stm32_adc_set_bits(adc, STM32F4_ADC_CR2, STM32F4_SWSTART);
+}
+
+static void stm32_adc_stop_conv(struct stm32_adc *adc)
+{
+ stm32_adc_clr_bits(adc, STM32F4_ADC_CR2, STM32F4_EXTEN_MASK);
+ stm32_adc_clr_bits(adc, STM32F4_ADC_SR, STM32F4_STRT);
+
+ stm32_adc_clr_bits(adc, STM32F4_ADC_CR1, STM32F4_SCAN);
+ stm32_adc_clr_bits(adc, STM32F4_ADC_CR2, STM32F4_ADON);
+}
+
+/**
+ * stm32_adc_single_conv() - Performs a single conversion
+ * @indio_dev: IIO device
+ * @chan: IIO channel
+ * @res: conversion result
+ *
+ * The function performs a single conversion on a given channel:
+ * - Program sequencer with one channel (e.g. in SQ1 with len = 1)
+ * - Use SW trigger
+ * - Start conversion, then wait for interrupt completion.
+ */
+static int stm32_adc_single_conv(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *res)
+{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ long timeout;
+ u32 val;
+ u16 result;
+ int ret;
+
+ reinit_completion(&adc->completion);
+
+ adc->buffer = &result;
+
+ /* Program chan number in regular sequence */
+ val = stm32_adc_readl(adc, STM32F4_ADC_SQR3);
+ val &= ~STM32F4_SQ1_MASK;
+ val |= chan->channel << STM32F4_SQ1_SHIFT;
+ stm32_adc_writel(adc, STM32F4_ADC_SQR3, val);
+
+ /* Set regular sequence len (0 for 1 conversion) */
+ stm32_adc_clr_bits(adc, STM32F4_ADC_SQR1, STM32F4_L_MASK);
+
+ /* Trigger detection disabled (conversion can be launched in SW) */
+ stm32_adc_clr_bits(adc, STM32F4_ADC_CR2, STM32F4_EXTEN_MASK);
+
+ stm32_adc_conv_irq_enable(adc);
+
+ stm32_adc_start_conv(adc);
+
+ timeout = wait_for_completion_interruptible_timeout(
+ &adc->completion, STM32_ADC_TIMEOUT);
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ } else if (timeout < 0) {
+ ret = timeout;
+ } else {
+ *res = result;
+ ret = IIO_VAL_INT;
+ }
+
+ stm32_adc_stop_conv(adc);
+
+ stm32_adc_conv_irq_disable(adc);
+
+ return ret;
+}
+
+static int stm32_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ if (chan->type == IIO_VOLTAGE)
+ ret = stm32_adc_single_conv(indio_dev, chan, val);
+ else
+ ret = -EINVAL;
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = adc->common->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t stm32_adc_isr(int irq, void *data)
+{
+ struct stm32_adc *adc = data;
+ u32 status = stm32_adc_readl(adc, STM32F4_ADC_SR);
+
+ if (status & STM32F4_EOC) {
+ *adc->buffer = stm32_adc_readw(adc, STM32F4_ADC_DR);
+ complete(&adc->completion);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int stm32_adc_of_xlate(struct iio_dev *indio_dev,
+ const struct of_phandle_args *iiospec)
+{
+ int i;
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ if (indio_dev->channels[i].channel == iiospec->args[0])
+ return i;
+
+ return -EINVAL;
+}
+
+/**
+ * stm32_adc_debugfs_reg_access - read or write register value
+ *
+ * To read a value from an ADC register:
+ * echo [ADC reg offset] > direct_reg_access
+ * cat direct_reg_access
+ *
+ * To write a value in a ADC register:
+ * echo [ADC_reg_offset] [value] > direct_reg_access
+ */
+static int stm32_adc_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned reg, unsigned writeval,
+ unsigned *readval)
+{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+
+ if (!readval)
+ stm32_adc_writel(adc, reg, writeval);
+ else
+ *readval = stm32_adc_readl(adc, reg);
+
+ return 0;
+}
+
+static const struct iio_info stm32_adc_iio_info = {
+ .read_raw = stm32_adc_read_raw,
+ .debugfs_reg_access = stm32_adc_debugfs_reg_access,
+ .of_xlate = stm32_adc_of_xlate,
+ .driver_module = THIS_MODULE,
+};
+
+static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
+ struct iio_chan_spec *chan,
+ const struct stm32_adc_chan_spec *channel,
+ int scan_index)
+{
+ chan->type = channel->type;
+ chan->channel = channel->channel;
+ chan->datasheet_name = channel->name;
+ chan->scan_index = scan_index;
+ chan->indexed = 1;
+ chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
+ chan->scan_type.sign = 'u';
+ chan->scan_type.realbits = 12;
+ chan->scan_type.storagebits = 16;
+}
+
+static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
+{
+ struct device_node *node = indio_dev->dev.of_node;
+ struct property *prop;
+ const __be32 *cur;
+ struct iio_chan_spec *channels;
+ int scan_index = 0, num_channels;
+ u32 val;
+
+ num_channels = of_property_count_u32_elems(node, "st,adc-channels");
+ if (num_channels < 0 ||
+ num_channels >= ARRAY_SIZE(stm32f4_adc123_channels)) {
+ dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
+ return num_channels < 0 ? num_channels : -EINVAL;
+ }
+
+ channels = devm_kcalloc(&indio_dev->dev, num_channels,
+ sizeof(struct iio_chan_spec), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ of_property_for_each_u32(node, "st,adc-channels", prop, cur, val) {
+ if (val >= ARRAY_SIZE(stm32f4_adc123_channels)) {
+ dev_err(&indio_dev->dev, "Invalid channel %d\n", val);
+ return -EINVAL;
+ }
+ stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
+ &stm32f4_adc123_channels[val],
+ scan_index);
+ scan_index++;
+ }
+
+ indio_dev->num_channels = scan_index;
+ indio_dev->channels = channels;
+
+ return 0;
+}
+
+static int stm32_adc_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct stm32_adc *adc;
+ int ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ adc->common = dev_get_drvdata(pdev->dev.parent);
+ spin_lock_init(&adc->lock);
+ init_completion(&adc->completion);
+
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->info = &stm32_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ platform_set_drvdata(pdev, adc);
+
+ ret = of_property_read_u32(pdev->dev.of_node, "reg", &adc->offset);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "missing reg property\n");
+ return -EINVAL;
+ }
+
+ adc->irq = platform_get_irq(pdev, 0);
+ if (adc->irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return adc->irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, adc->irq, stm32_adc_isr,
+ 0, pdev->name, adc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ return ret;
+ }
+
+ adc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(adc->clk)) {
+ dev_err(&pdev->dev, "Can't get clock\n");
+ return PTR_ERR(adc->clk);
+ }
+
+ ret = clk_prepare_enable(adc->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "clk enable failed\n");
+ return ret;
+ }
+
+ ret = stm32_adc_chan_of_init(indio_dev);
+ if (ret < 0)
+ goto err_clk_disable;
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "iio dev register failed\n");
+ goto err_clk_disable;
+ }
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(adc->clk);
+
+ return ret;
+}
+
+static int stm32_adc_remove(struct platform_device *pdev)
+{
+ struct stm32_adc *adc = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+
+ iio_device_unregister(indio_dev);
+ clk_disable_unprepare(adc->clk);
+
+ return 0;
+}
+
+static const struct of_device_id stm32_adc_of_match[] = {
+ { .compatible = "st,stm32f4-adc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
+
+static struct platform_driver stm32_adc_driver = {
+ .probe = stm32_adc_probe,
+ .remove = stm32_adc_remove,
+ .driver = {
+ .name = "stm32-adc",
+ .of_match_table = stm32_adc_of_match,
+ },
+};
+module_platform_driver(stm32_adc_driver);
+
+MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 ADC IIO driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:stm32-adc");
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
index f4ba23effe9a..e952e94a14af 100644
--- a/drivers/iio/adc/ti-adc0832.c
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -14,6 +14,10 @@
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
#include <linux/regulator/consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
enum {
adc0831,
@@ -38,10 +42,16 @@ struct adc0832 {
.indexed = 1, \
.channel = chan, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = chan, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 8, \
+ .storagebits = 8, \
+ }, \
}
-#define ADC0832_VOLTAGE_CHANNEL_DIFF(chan1, chan2) \
+#define ADC0832_VOLTAGE_CHANNEL_DIFF(chan1, chan2, si) \
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
@@ -49,18 +59,26 @@ struct adc0832 {
.channel2 = (chan2), \
.differential = 1, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = si, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 8, \
+ .storagebits = 8, \
+ }, \
}
static const struct iio_chan_spec adc0831_channels[] = {
- ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 0),
+ IIO_CHAN_SOFT_TIMESTAMP(1),
};
static const struct iio_chan_spec adc0832_channels[] = {
ADC0832_VOLTAGE_CHANNEL(0),
ADC0832_VOLTAGE_CHANNEL(1),
- ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
- ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 2),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0, 3),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
};
static const struct iio_chan_spec adc0834_channels[] = {
@@ -68,10 +86,11 @@ static const struct iio_chan_spec adc0834_channels[] = {
ADC0832_VOLTAGE_CHANNEL(1),
ADC0832_VOLTAGE_CHANNEL(2),
ADC0832_VOLTAGE_CHANNEL(3),
- ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
- ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
- ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3),
- ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 4),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0, 5),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3, 6),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2, 7),
+ IIO_CHAN_SOFT_TIMESTAMP(8),
};
static const struct iio_chan_spec adc0838_channels[] = {
@@ -83,14 +102,15 @@ static const struct iio_chan_spec adc0838_channels[] = {
ADC0832_VOLTAGE_CHANNEL(5),
ADC0832_VOLTAGE_CHANNEL(6),
ADC0832_VOLTAGE_CHANNEL(7),
- ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
- ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
- ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3),
- ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2),
- ADC0832_VOLTAGE_CHANNEL_DIFF(4, 5),
- ADC0832_VOLTAGE_CHANNEL_DIFF(5, 4),
- ADC0832_VOLTAGE_CHANNEL_DIFF(6, 7),
- ADC0832_VOLTAGE_CHANNEL_DIFF(7, 6),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 8),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0, 9),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3, 10),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2, 11),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(4, 5, 12),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(5, 4, 13),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(6, 7, 14),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(7, 6, 15),
+ IIO_CHAN_SOFT_TIMESTAMP(16),
};
static int adc0831_adc_conversion(struct adc0832 *adc)
@@ -178,6 +198,42 @@ static const struct iio_info adc0832_info = {
.driver_module = THIS_MODULE,
};
+static irqreturn_t adc0832_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adc0832 *adc = iio_priv(indio_dev);
+ u8 data[24] = { }; /* 16x 1 byte ADC data + 8 bytes timestamp */
+ int scan_index;
+ int i = 0;
+
+ mutex_lock(&adc->lock);
+
+ for_each_set_bit(scan_index, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ const struct iio_chan_spec *scan_chan =
+ &indio_dev->channels[scan_index];
+ int ret = adc0832_adc_conversion(adc, scan_chan->channel,
+ scan_chan->differential);
+ if (ret < 0) {
+ dev_warn(&adc->spi->dev,
+ "failed to get conversion data\n");
+ goto out;
+ }
+
+ data[i] = ret;
+ i++;
+ }
+ iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_get_time_ns(indio_dev));
+out:
+ mutex_unlock(&adc->lock);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static int adc0832_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -233,9 +289,20 @@ static int adc0832_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ adc0832_trigger_handler, NULL);
+ if (ret)
+ goto err_reg_disable;
+
ret = iio_device_register(indio_dev);
if (ret)
- regulator_disable(adc->reg);
+ goto err_buffer_cleanup;
+
+ return 0;
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+err_reg_disable:
+ regulator_disable(adc->reg);
return ret;
}
@@ -246,6 +313,7 @@ static int adc0832_remove(struct spi_device *spi)
struct adc0832 *adc = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
regulator_disable(adc->reg);
return 0;
diff --git a/drivers/iio/adc/ti-adc161s626.c b/drivers/iio/adc/ti-adc161s626.c
index f94b69f9c288..4836a0d7aef5 100644
--- a/drivers/iio/adc/ti-adc161s626.c
+++ b/drivers/iio/adc/ti-adc161s626.c
@@ -27,6 +27,7 @@
#include <linux/iio/buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
+#include <linux/regulator/consumer.h>
#define TI_ADC_DRV_NAME "ti-adc161s626"
@@ -39,7 +40,9 @@ static const struct iio_chan_spec ti_adc141s626_channels[] = {
{
.type = IIO_VOLTAGE,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
.scan_index = 0,
.scan_type = {
.sign = 's',
@@ -54,7 +57,9 @@ static const struct iio_chan_spec ti_adc161s626_channels[] = {
{
.type = IIO_VOLTAGE,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
.scan_index = 0,
.scan_type = {
.sign = 's',
@@ -68,6 +73,8 @@ static const struct iio_chan_spec ti_adc161s626_channels[] = {
struct ti_adc_data {
struct iio_dev *indio_dev;
struct spi_device *spi;
+ struct regulator *ref;
+
u8 read_size;
u8 shift;
@@ -135,18 +142,32 @@ static int ti_adc_read_raw(struct iio_dev *indio_dev,
struct ti_adc_data *data = iio_priv(indio_dev);
int ret;
- if (mask != IIO_CHAN_INFO_RAW)
- return -EINVAL;
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ ret = ti_adc_read_measurement(data, chan, val);
+ iio_device_release_direct_mode(indio_dev);
- ret = ti_adc_read_measurement(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
+ if (ret)
+ return ret;
- if (!ret)
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ ret = regulator_get_voltage(data->ref);
+ if (ret < 0)
+ return ret;
+
+ *val = ret / 1000;
+ *val2 = chan->scan_type.realbits;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 1 << (chan->scan_type.realbits - 1);
+ return IIO_VAL_INT;
+ }
return 0;
}
@@ -191,10 +212,17 @@ static int ti_adc_probe(struct spi_device *spi)
break;
}
+ data->ref = devm_regulator_get(&spi->dev, "vdda");
+ if (!IS_ERR(data->ref)) {
+ ret = regulator_enable(data->ref);
+ if (ret < 0)
+ return ret;
+ }
+
ret = iio_triggered_buffer_setup(indio_dev, NULL,
ti_adc_trigger_handler, NULL);
if (ret)
- return ret;
+ goto error_regulator_disable;
ret = iio_device_register(indio_dev);
if (ret)
@@ -205,15 +233,20 @@ static int ti_adc_probe(struct spi_device *spi)
error_unreg_buffer:
iio_triggered_buffer_cleanup(indio_dev);
+error_regulator_disable:
+ regulator_disable(data->ref);
+
return ret;
}
static int ti_adc_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ti_adc_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
+ regulator_disable(data->ref);
return 0;
}
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index c3cfacca2541..ad9dec30bb30 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -30,10 +30,28 @@
#include <linux/iio/buffer.h>
#include <linux/iio/kfifo_buf.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+
+#define DMA_BUFFER_SIZE SZ_2K
+
+struct tiadc_dma {
+ struct dma_slave_config conf;
+ struct dma_chan *chan;
+ dma_addr_t addr;
+ dma_cookie_t cookie;
+ u8 *buf;
+ int current_period;
+ int period_size;
+ u8 fifo_thresh;
+};
+
struct tiadc_device {
struct ti_tscadc_dev *mfd_tscadc;
+ struct tiadc_dma dma;
struct mutex fifo1_lock; /* to protect fifo access */
int channels;
+ int total_ch_enabled;
u8 channel_line[8];
u8 channel_step[8];
int buffer_en_ch_steps;
@@ -198,6 +216,67 @@ static irqreturn_t tiadc_worker_h(int irq, void *private)
return IRQ_HANDLED;
}
+static void tiadc_dma_rx_complete(void *param)
+{
+ struct iio_dev *indio_dev = param;
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct tiadc_dma *dma = &adc_dev->dma;
+ u8 *data;
+ int i;
+
+ data = dma->buf + dma->current_period * dma->period_size;
+ dma->current_period = 1 - dma->current_period; /* swap the buffer ID */
+
+ for (i = 0; i < dma->period_size; i += indio_dev->scan_bytes) {
+ iio_push_to_buffers(indio_dev, data);
+ data += indio_dev->scan_bytes;
+ }
+}
+
+static int tiadc_start_dma(struct iio_dev *indio_dev)
+{
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct tiadc_dma *dma = &adc_dev->dma;
+ struct dma_async_tx_descriptor *desc;
+
+ dma->current_period = 0; /* We start to fill period 0 */
+ /*
+ * Make the fifo thresh as the multiple of total number of
+ * channels enabled, so make sure that cyclic DMA period
+ * length is also a multiple of total number of channels
+ * enabled. This ensures that no invalid data is reported
+ * to the stack via iio_push_to_buffers().
+ */
+ dma->fifo_thresh = rounddown(FIFO1_THRESHOLD + 1,
+ adc_dev->total_ch_enabled) - 1;
+ /* Make sure that period length is multiple of fifo thresh level */
+ dma->period_size = rounddown(DMA_BUFFER_SIZE / 2,
+ (dma->fifo_thresh + 1) * sizeof(u16));
+
+ dma->conf.src_maxburst = dma->fifo_thresh + 1;
+ dmaengine_slave_config(dma->chan, &dma->conf);
+
+ desc = dmaengine_prep_dma_cyclic(dma->chan, dma->addr,
+ dma->period_size * 2,
+ dma->period_size, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc)
+ return -EBUSY;
+
+ desc->callback = tiadc_dma_rx_complete;
+ desc->callback_param = indio_dev;
+
+ dma->cookie = dmaengine_submit(desc);
+
+ dma_async_issue_pending(dma->chan);
+
+ tiadc_writel(adc_dev, REG_FIFO1THR, dma->fifo_thresh);
+ tiadc_writel(adc_dev, REG_DMA1REQ, dma->fifo_thresh);
+ tiadc_writel(adc_dev, REG_DMAENABLE_SET, DMA_FIFO1);
+
+ return 0;
+}
+
static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
{
struct tiadc_device *adc_dev = iio_priv(indio_dev);
@@ -218,20 +297,30 @@ static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
{
struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct tiadc_dma *dma = &adc_dev->dma;
+ unsigned int irq_enable;
unsigned int enb = 0;
u8 bit;
tiadc_step_config(indio_dev);
- for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels)
+ for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels) {
enb |= (get_adc_step_bit(adc_dev, bit) << 1);
+ adc_dev->total_ch_enabled++;
+ }
adc_dev->buffer_en_ch_steps = enb;
+ if (dma->chan)
+ tiadc_start_dma(indio_dev);
+
am335x_tsc_se_set_cache(adc_dev->mfd_tscadc, enb);
tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1THRES
| IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW);
- tiadc_writel(adc_dev, REG_IRQENABLE, IRQENB_FIFO1THRES
- | IRQENB_FIFO1OVRRUN);
+
+ irq_enable = IRQENB_FIFO1OVRRUN;
+ if (!dma->chan)
+ irq_enable |= IRQENB_FIFO1THRES;
+ tiadc_writel(adc_dev, REG_IRQENABLE, irq_enable);
return 0;
}
@@ -239,12 +328,18 @@ static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
static int tiadc_buffer_predisable(struct iio_dev *indio_dev)
{
struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct tiadc_dma *dma = &adc_dev->dma;
int fifo1count, i, read;
tiadc_writel(adc_dev, REG_IRQCLR, (IRQENB_FIFO1THRES |
IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW));
am335x_tsc_se_clr(adc_dev->mfd_tscadc, adc_dev->buffer_en_ch_steps);
adc_dev->buffer_en_ch_steps = 0;
+ adc_dev->total_ch_enabled = 0;
+ if (dma->chan) {
+ tiadc_writel(adc_dev, REG_DMAENABLE_CLEAR, 0x2);
+ dmaengine_terminate_async(dma->chan);
+ }
/* Flush FIFO of leftover data in the time it takes to disable adc */
fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
@@ -430,6 +525,41 @@ static const struct iio_info tiadc_info = {
.driver_module = THIS_MODULE,
};
+static int tiadc_request_dma(struct platform_device *pdev,
+ struct tiadc_device *adc_dev)
+{
+ struct tiadc_dma *dma = &adc_dev->dma;
+ dma_cap_mask_t mask;
+
+ /* Default slave configuration parameters */
+ dma->conf.direction = DMA_DEV_TO_MEM;
+ dma->conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ dma->conf.src_addr = adc_dev->mfd_tscadc->tscadc_phys_base + REG_FIFO1;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_CYCLIC, mask);
+
+ /* Get a channel for RX */
+ dma->chan = dma_request_chan(adc_dev->mfd_tscadc->dev, "fifo1");
+ if (IS_ERR(dma->chan)) {
+ int ret = PTR_ERR(dma->chan);
+
+ dma->chan = NULL;
+ return ret;
+ }
+
+ /* RX buffer */
+ dma->buf = dma_alloc_coherent(dma->chan->device->dev, DMA_BUFFER_SIZE,
+ &dma->addr, GFP_KERNEL);
+ if (!dma->buf)
+ goto err;
+
+ return 0;
+err:
+ dma_release_channel(dma->chan);
+ return -ENOMEM;
+}
+
static int tiadc_parse_dt(struct platform_device *pdev,
struct tiadc_device *adc_dev)
{
@@ -512,8 +642,14 @@ static int tiadc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, indio_dev);
+ err = tiadc_request_dma(pdev, adc_dev);
+ if (err && err == -EPROBE_DEFER)
+ goto err_dma;
+
return 0;
+err_dma:
+ iio_device_unregister(indio_dev);
err_buffer_unregister:
tiadc_iio_buffered_hardware_remove(indio_dev);
err_free_channels:
@@ -525,8 +661,14 @@ static int tiadc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct tiadc_dma *dma = &adc_dev->dma;
u32 step_en;
+ if (dma->chan) {
+ dma_free_coherent(dma->chan->device->dev, DMA_BUFFER_SIZE,
+ dma->buf, dma->addr);
+ dma_release_channel(dma->chan);
+ }
iio_device_unregister(indio_dev);
tiadc_iio_buffered_hardware_remove(indio_dev);
tiadc_channels_remove(indio_dev);
diff --git a/drivers/iio/common/Kconfig b/drivers/iio/common/Kconfig
index 26a6026de614..e108996a9627 100644
--- a/drivers/iio/common/Kconfig
+++ b/drivers/iio/common/Kconfig
@@ -2,6 +2,7 @@
# IIO common modules
#
+source "drivers/iio/common/cros_ec_sensors/Kconfig"
source "drivers/iio/common/hid-sensors/Kconfig"
source "drivers/iio/common/ms_sensors/Kconfig"
source "drivers/iio/common/ssp_sensors/Kconfig"
diff --git a/drivers/iio/common/Makefile b/drivers/iio/common/Makefile
index 585da6a1b188..6fa760e1bdd5 100644
--- a/drivers/iio/common/Makefile
+++ b/drivers/iio/common/Makefile
@@ -7,6 +7,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-y += cros_ec_sensors/
obj-y += hid-sensors/
obj-y += ms_sensors/
obj-y += ssp_sensors/
diff --git a/drivers/iio/common/cros_ec_sensors/Kconfig b/drivers/iio/common/cros_ec_sensors/Kconfig
new file mode 100644
index 000000000000..135f6825903f
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/Kconfig
@@ -0,0 +1,22 @@
+#
+# Chrome OS Embedded Controller managed sensors library
+#
+config IIO_CROS_EC_SENSORS_CORE
+ tristate "ChromeOS EC Sensors Core"
+ depends on SYSFS && MFD_CROS_EC
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Base module for the ChromeOS EC Sensors module.
+ Contains core functions used by other IIO CrosEC sensor
+ drivers.
+ Define common attributes and sysfs interrupt handler.
+
+config IIO_CROS_EC_SENSORS
+ tristate "ChromeOS EC Contiguous Sensors"
+ depends on IIO_CROS_EC_SENSORS_CORE
+ help
+ Module to handle 3d contiguous sensors like
+ Accelerometers, Gyroscope and Magnetometer that are
+ presented by the ChromeOS EC Sensor hub.
+ Creates an IIO device for each functions.
diff --git a/drivers/iio/common/cros_ec_sensors/Makefile b/drivers/iio/common/cros_ec_sensors/Makefile
new file mode 100644
index 000000000000..ec716ff2a775
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for sensors seen through the ChromeOS EC sensor hub.
+#
+
+obj-$(CONFIG_IIO_CROS_EC_SENSORS_CORE) += cros_ec_sensors_core.o
+obj-$(CONFIG_IIO_CROS_EC_SENSORS) += cros_ec_sensors.o
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
new file mode 100644
index 000000000000..d6c372bb433b
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -0,0 +1,322 @@
+/*
+ * cros_ec_sensors - Driver for Chrome OS Embedded Controller sensors.
+ *
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver uses the cros-ec interface to communicate with the Chrome OS
+ * EC about sensors data. Data access is presented through iio sysfs.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "cros_ec_sensors_core.h"
+
+#define CROS_EC_SENSORS_MAX_CHANNELS 4
+
+/* State data for ec_sensors iio driver. */
+struct cros_ec_sensors_state {
+ /* Shared by all sensors */
+ struct cros_ec_sensors_core_state core;
+
+ struct iio_chan_spec channels[CROS_EC_SENSORS_MAX_CHANNELS];
+};
+
+static int cros_ec_sensors_read(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct cros_ec_sensors_state *st = iio_priv(indio_dev);
+ s16 data = 0;
+ s64 val64;
+ int i;
+ int ret;
+ int idx = chan->scan_index;
+
+ mutex_lock(&st->core.cmd_lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data);
+ if (ret < 0)
+ break;
+
+ *val = data;
+ break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_OFFSET;
+ st->core.param.sensor_offset.flags = 0;
+
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+ if (ret < 0)
+ break;
+
+ /* Save values */
+ for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
+ st->core.calib[i] =
+ st->core.resp->sensor_offset.offset[i];
+
+ *val = st->core.calib[idx];
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE;
+ st->core.param.sensor_range.data = EC_MOTION_SENSE_NO_VALUE;
+
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+ if (ret < 0)
+ break;
+
+ val64 = st->core.resp->sensor_range.ret;
+ switch (st->core.type) {
+ case MOTIONSENSE_TYPE_ACCEL:
+ /*
+ * EC returns data in g, iio exepects m/s^2.
+ * Do not use IIO_G_TO_M_S_2 to avoid precision loss.
+ */
+ *val = div_s64(val64 * 980665, 10);
+ *val2 = 10000 << (CROS_EC_SENSOR_BITS - 1);
+ ret = IIO_VAL_FRACTIONAL;
+ break;
+ case MOTIONSENSE_TYPE_GYRO:
+ /*
+ * EC returns data in dps, iio expects rad/s.
+ * Do not use IIO_DEGREE_TO_RAD to avoid precision
+ * loss. Round to the nearest integer.
+ */
+ *val = div_s64(val64 * 314159 + 9000000ULL, 1000);
+ *val2 = 18000 << (CROS_EC_SENSOR_BITS - 1);
+ ret = IIO_VAL_FRACTIONAL;
+ break;
+ case MOTIONSENSE_TYPE_MAG:
+ /*
+ * EC returns data in 16LSB / uT,
+ * iio expects Gauss
+ */
+ *val = val64;
+ *val2 = 100 << (CROS_EC_SENSOR_BITS - 1);
+ ret = IIO_VAL_FRACTIONAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ ret = cros_ec_sensors_core_read(&st->core, chan, val, val2,
+ mask);
+ break;
+ }
+ mutex_unlock(&st->core.cmd_lock);
+
+ return ret;
+}
+
+static int cros_ec_sensors_write(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct cros_ec_sensors_state *st = iio_priv(indio_dev);
+ int i;
+ int ret;
+ int idx = chan->scan_index;
+
+ mutex_lock(&st->core.cmd_lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ st->core.calib[idx] = val;
+
+ /* Send to EC for each axis, even if not complete */
+ st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_OFFSET;
+ st->core.param.sensor_offset.flags =
+ MOTION_SENSE_SET_OFFSET;
+ for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
+ st->core.param.sensor_offset.offset[i] =
+ st->core.calib[i];
+ st->core.param.sensor_offset.temp =
+ EC_MOTION_SENSE_INVALID_CALIB_TEMP;
+
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ if (st->core.type == MOTIONSENSE_TYPE_MAG) {
+ ret = -EINVAL;
+ break;
+ }
+ st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE;
+ st->core.param.sensor_range.data = val;
+
+ /* Always roundup, so caller gets at least what it asks for. */
+ st->core.param.sensor_range.roundup = 1;
+
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+ break;
+ default:
+ ret = cros_ec_sensors_core_write(
+ &st->core, chan, val, val2, mask);
+ break;
+ }
+
+ mutex_unlock(&st->core.cmd_lock);
+
+ return ret;
+}
+
+static const struct iio_info ec_sensors_info = {
+ .read_raw = &cros_ec_sensors_read,
+ .write_raw = &cros_ec_sensors_write,
+ .driver_module = THIS_MODULE,
+};
+
+static int cros_ec_sensors_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
+ struct cros_ec_device *ec_device;
+ struct iio_dev *indio_dev;
+ struct cros_ec_sensors_state *state;
+ struct iio_chan_spec *channel;
+ int ret, i;
+
+ if (!ec_dev || !ec_dev->ec_dev) {
+ dev_warn(&pdev->dev, "No CROS EC device found.\n");
+ return -EINVAL;
+ }
+ ec_device = ec_dev->ec_dev;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*state));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ ret = cros_ec_sensors_core_init(pdev, indio_dev, true);
+ if (ret)
+ return ret;
+
+ indio_dev->info = &ec_sensors_info;
+ state = iio_priv(indio_dev);
+ for (channel = state->channels, i = CROS_EC_SENSOR_X;
+ i < CROS_EC_SENSOR_MAX_AXIS; i++, channel++) {
+ /* Common part */
+ channel->info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS);
+ channel->info_mask_shared_by_all =
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_FREQUENCY) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ);
+ channel->scan_type.realbits = CROS_EC_SENSOR_BITS;
+ channel->scan_type.storagebits = CROS_EC_SENSOR_BITS;
+ channel->scan_index = i;
+ channel->ext_info = cros_ec_sensors_ext_info;
+ channel->modified = 1;
+ channel->channel2 = IIO_MOD_X + i;
+ channel->scan_type.sign = 's';
+
+ /* Sensor specific */
+ switch (state->core.type) {
+ case MOTIONSENSE_TYPE_ACCEL:
+ channel->type = IIO_ACCEL;
+ break;
+ case MOTIONSENSE_TYPE_GYRO:
+ channel->type = IIO_ANGL_VEL;
+ break;
+ case MOTIONSENSE_TYPE_MAG:
+ channel->type = IIO_MAGN;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown motion sensor\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Timestamp */
+ channel->type = IIO_TIMESTAMP;
+ channel->channel = -1;
+ channel->scan_index = CROS_EC_SENSOR_MAX_AXIS;
+ channel->scan_type.sign = 's';
+ channel->scan_type.realbits = 64;
+ channel->scan_type.storagebits = 64;
+
+ indio_dev->channels = state->channels;
+ indio_dev->num_channels = CROS_EC_SENSORS_MAX_CHANNELS;
+
+ /* There is only enough room for accel and gyro in the io space */
+ if ((state->core.ec->cmd_readmem != NULL) &&
+ (state->core.type != MOTIONSENSE_TYPE_MAG))
+ state->core.read_ec_sensors_data = cros_ec_sensors_read_lpc;
+ else
+ state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ cros_ec_sensors_capture, NULL);
+ if (ret)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_uninit_buffer;
+
+ return 0;
+
+error_uninit_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return ret;
+}
+
+static int cros_ec_sensors_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return 0;
+}
+
+static const struct platform_device_id cros_ec_sensors_ids[] = {
+ {
+ .name = "cros-ec-accel",
+ },
+ {
+ .name = "cros-ec-gyro",
+ },
+ {
+ .name = "cros-ec-mag",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_sensors_ids);
+
+static struct platform_driver cros_ec_sensors_platform_driver = {
+ .driver = {
+ .name = "cros-ec-sensors",
+ },
+ .probe = cros_ec_sensors_probe,
+ .remove = cros_ec_sensors_remove,
+ .id_table = cros_ec_sensors_ids,
+};
+module_platform_driver(cros_ec_sensors_platform_driver);
+
+MODULE_DESCRIPTION("ChromeOS EC 3-axis sensors driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
new file mode 100644
index 000000000000..416cae5ebbd0
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -0,0 +1,450 @@
+/*
+ * cros_ec_sensors_core - Common function for Chrome OS EC sensor driver.
+ *
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/platform_device.h>
+
+#include "cros_ec_sensors_core.h"
+
+static char *cros_ec_loc[] = {
+ [MOTIONSENSE_LOC_BASE] = "base",
+ [MOTIONSENSE_LOC_LID] = "lid",
+ [MOTIONSENSE_LOC_MAX] = "unknown",
+};
+
+int cros_ec_sensors_core_init(struct platform_device *pdev,
+ struct iio_dev *indio_dev,
+ bool physical_device)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_sensors_core_state *state = iio_priv(indio_dev);
+ struct cros_ec_dev *ec = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ state->ec = ec->ec_dev;
+ state->msg = devm_kzalloc(&pdev->dev,
+ max((u16)sizeof(struct ec_params_motion_sense),
+ state->ec->max_response), GFP_KERNEL);
+ if (!state->msg)
+ return -ENOMEM;
+
+ state->resp = (struct ec_response_motion_sense *)state->msg->data;
+
+ mutex_init(&state->cmd_lock);
+
+ /* Set up the host command structure. */
+ state->msg->version = 2;
+ state->msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+ state->msg->outsize = sizeof(struct ec_params_motion_sense);
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = pdev->name;
+
+ if (physical_device) {
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ state->param.cmd = MOTIONSENSE_CMD_INFO;
+ state->param.info.sensor_num = sensor_platform->sensor_num;
+ if (cros_ec_motion_send_host_cmd(state, 0)) {
+ dev_warn(dev, "Can not access sensor info\n");
+ return -EIO;
+ }
+ state->type = state->resp->info.type;
+ state->loc = state->resp->info.location;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_core_init);
+
+int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *state,
+ u16 opt_length)
+{
+ int ret;
+
+ if (opt_length)
+ state->msg->insize = min(opt_length, state->ec->max_response);
+ else
+ state->msg->insize = state->ec->max_response;
+
+ memcpy(state->msg->data, &state->param, sizeof(state->param));
+
+ ret = cros_ec_cmd_xfer_status(state->ec, state->msg);
+ if (ret < 0)
+ return -EIO;
+
+ if (ret &&
+ state->resp != (struct ec_response_motion_sense *)state->msg->data)
+ memcpy(state->resp, state->msg->data, ret);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_motion_send_host_cmd);
+
+static ssize_t cros_ec_sensors_calibrate(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ int ret, i;
+ bool calibrate;
+
+ ret = strtobool(buf, &calibrate);
+ if (ret < 0)
+ return ret;
+ if (!calibrate)
+ return -EINVAL;
+
+ mutex_lock(&st->cmd_lock);
+ st->param.cmd = MOTIONSENSE_CMD_PERFORM_CALIB;
+ ret = cros_ec_motion_send_host_cmd(st, 0);
+ if (ret != 0) {
+ dev_warn(&indio_dev->dev, "Unable to calibrate sensor\n");
+ } else {
+ /* Save values */
+ for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
+ st->calib[i] = st->resp->perform_calib.offset[i];
+ }
+ mutex_unlock(&st->cmd_lock);
+
+ return ret ? ret : len;
+}
+
+static ssize_t cros_ec_sensors_loc(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", cros_ec_loc[st->loc]);
+}
+
+const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[] = {
+ {
+ .name = "calibrate",
+ .shared = IIO_SHARED_BY_ALL,
+ .write = cros_ec_sensors_calibrate
+ },
+ {
+ .name = "location",
+ .shared = IIO_SHARED_BY_ALL,
+ .read = cros_ec_sensors_loc
+ },
+ { },
+};
+EXPORT_SYMBOL_GPL(cros_ec_sensors_ext_info);
+
+/**
+ * cros_ec_sensors_idx_to_reg - convert index into offset in shared memory
+ * @st: pointer to state information for device
+ * @idx: sensor index (should be element of enum sensor_index)
+ *
+ * Return: address to read at
+ */
+static unsigned int cros_ec_sensors_idx_to_reg(
+ struct cros_ec_sensors_core_state *st,
+ unsigned int idx)
+{
+ /*
+ * When using LPC interface, only space for 2 Accel and one Gyro.
+ * First halfword of MOTIONSENSE_TYPE_ACCEL is used by angle.
+ */
+ if (st->type == MOTIONSENSE_TYPE_ACCEL)
+ return EC_MEMMAP_ACC_DATA + sizeof(u16) *
+ (1 + idx + st->param.info.sensor_num *
+ CROS_EC_SENSOR_MAX_AXIS);
+
+ return EC_MEMMAP_GYRO_DATA + sizeof(u16) * idx;
+}
+
+static int cros_ec_sensors_cmd_read_u8(struct cros_ec_device *ec,
+ unsigned int offset, u8 *dest)
+{
+ return ec->cmd_readmem(ec, offset, 1, dest);
+}
+
+static int cros_ec_sensors_cmd_read_u16(struct cros_ec_device *ec,
+ unsigned int offset, u16 *dest)
+{
+ __le16 tmp;
+ int ret = ec->cmd_readmem(ec, offset, 2, &tmp);
+
+ if (ret >= 0)
+ *dest = le16_to_cpu(tmp);
+
+ return ret;
+}
+
+/**
+ * cros_ec_sensors_read_until_not_busy() - read until is not busy
+ *
+ * @st: pointer to state information for device
+ *
+ * Read from EC status byte until it reads not busy.
+ * Return: 8-bit status if ok, -errno on failure.
+ */
+static int cros_ec_sensors_read_until_not_busy(
+ struct cros_ec_sensors_core_state *st)
+{
+ struct cros_ec_device *ec = st->ec;
+ u8 status;
+ int ret, attempts = 0;
+
+ ret = cros_ec_sensors_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ while (status & EC_MEMMAP_ACC_STATUS_BUSY_BIT) {
+ /* Give up after enough attempts, return error. */
+ if (attempts++ >= 50)
+ return -EIO;
+
+ /* Small delay every so often. */
+ if (attempts % 5 == 0)
+ msleep(25);
+
+ ret = cros_ec_sensors_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS,
+ &status);
+ if (ret < 0)
+ return ret;
+ }
+
+ return status;
+}
+
+/**
+ * read_ec_sensors_data_unsafe() - read acceleration data from EC shared memory
+ * @indio_dev: pointer to IIO device
+ * @scan_mask: bitmap of the sensor indices to scan
+ * @data: location to store data
+ *
+ * This is the unsafe function for reading the EC data. It does not guarantee
+ * that the EC will not modify the data as it is being read in.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int cros_ec_sensors_read_data_unsafe(struct iio_dev *indio_dev,
+ unsigned long scan_mask, s16 *data)
+{
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ struct cros_ec_device *ec = st->ec;
+ unsigned int i;
+ int ret;
+
+ /* Read all sensors enabled in scan_mask. Each value is 2 bytes. */
+ for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
+ ret = cros_ec_sensors_cmd_read_u16(ec,
+ cros_ec_sensors_idx_to_reg(st, i),
+ data);
+ if (ret < 0)
+ return ret;
+
+ data++;
+ }
+
+ return 0;
+}
+
+int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev,
+ unsigned long scan_mask, s16 *data)
+{
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ struct cros_ec_device *ec = st->ec;
+ u8 samp_id = 0xff, status = 0;
+ int ret, attempts = 0;
+
+ /*
+ * Continually read all data from EC until the status byte after
+ * all reads reflects that the EC is not busy and the sample id
+ * matches the sample id from before all reads. This guarantees
+ * that data read in was not modified by the EC while reading.
+ */
+ while ((status & (EC_MEMMAP_ACC_STATUS_BUSY_BIT |
+ EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK)) != samp_id) {
+ /* If we have tried to read too many times, return error. */
+ if (attempts++ >= 5)
+ return -EIO;
+
+ /* Read status byte until EC is not busy. */
+ ret = cros_ec_sensors_read_until_not_busy(st);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Store the current sample id so that we can compare to the
+ * sample id after reading the data.
+ */
+ samp_id = ret & EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK;
+
+ /* Read all EC data, format it, and store it into data. */
+ ret = cros_ec_sensors_read_data_unsafe(indio_dev, scan_mask,
+ data);
+ if (ret < 0)
+ return ret;
+
+ /* Read status byte. */
+ ret = cros_ec_sensors_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS,
+ &status);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_read_lpc);
+
+int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev,
+ unsigned long scan_mask, s16 *data)
+{
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ int ret;
+ unsigned int i;
+
+ /* Read all sensor data through a command. */
+ st->param.cmd = MOTIONSENSE_CMD_DATA;
+ ret = cros_ec_motion_send_host_cmd(st, sizeof(st->resp->data));
+ if (ret != 0) {
+ dev_warn(&indio_dev->dev, "Unable to read sensor data\n");
+ return ret;
+ }
+
+ for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
+ *data = st->resp->data.data[i];
+ data++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_read_cmd);
+
+irqreturn_t cros_ec_sensors_capture(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->cmd_lock);
+
+ /* Clear capture data. */
+ memset(st->samples, 0, indio_dev->scan_bytes);
+
+ /* Read data based on which channels are enabled in scan mask. */
+ ret = st->read_ec_sensors_data(indio_dev,
+ *(indio_dev->active_scan_mask),
+ (s16 *)st->samples);
+ if (ret < 0)
+ goto done;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, st->samples,
+ iio_get_time_ns(indio_dev));
+
+done:
+ /*
+ * Tell the core we are done with this trigger and ready for the
+ * next one.
+ */
+ iio_trigger_notify_done(indio_dev->trig);
+
+ mutex_unlock(&st->cmd_lock);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_capture);
+
+int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret = IIO_VAL_INT;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
+ st->param.ec_rate.data =
+ EC_MOTION_SENSE_NO_VALUE;
+
+ if (cros_ec_motion_send_host_cmd(st, 0))
+ ret = -EIO;
+ else
+ *val = st->resp->ec_rate.ret;
+ break;
+ case IIO_CHAN_INFO_FREQUENCY:
+ st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR;
+ st->param.sensor_odr.data =
+ EC_MOTION_SENSE_NO_VALUE;
+
+ if (cros_ec_motion_send_host_cmd(st, 0))
+ ret = -EIO;
+ else
+ *val = st->resp->sensor_odr.ret;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read);
+
+int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret = 0;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_FREQUENCY:
+ st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR;
+ st->param.sensor_odr.data = val;
+
+ /* Always roundup, so caller gets at least what it asks for. */
+ st->param.sensor_odr.roundup = 1;
+
+ if (cros_ec_motion_send_host_cmd(st, 0))
+ ret = -EIO;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
+ st->param.ec_rate.data = val;
+
+ if (cros_ec_motion_send_host_cmd(st, 0))
+ ret = -EIO;
+ else
+ st->curr_sampl_freq = val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_core_write);
+
+MODULE_DESCRIPTION("ChromeOS EC sensor hub core functions");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h
new file mode 100644
index 000000000000..8bc2ca3c2e2e
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h
@@ -0,0 +1,175 @@
+/*
+ * ChromeOS EC sensor hub
+ *
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CROS_EC_SENSORS_CORE_H
+#define __CROS_EC_SENSORS_CORE_H
+
+#include <linux/irqreturn.h>
+
+enum {
+ CROS_EC_SENSOR_X,
+ CROS_EC_SENSOR_Y,
+ CROS_EC_SENSOR_Z,
+ CROS_EC_SENSOR_MAX_AXIS,
+};
+
+/* EC returns sensor values using signed 16 bit registers */
+#define CROS_EC_SENSOR_BITS 16
+
+/*
+ * 4 16 bit channels are allowed.
+ * Good enough for current sensors, they use up to 3 16 bit vectors.
+ */
+#define CROS_EC_SAMPLE_SIZE (sizeof(s64) * 2)
+
+/* Minimum sampling period to use when device is suspending */
+#define CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY 1000 /* 1 second */
+
+/**
+ * struct cros_ec_sensors_core_state - state data for EC sensors IIO driver
+ * @ec: cros EC device structure
+ * @cmd_lock: lock used to prevent simultaneous access to the
+ * commands.
+ * @msg: cros EC command structure
+ * @param: motion sensor parameters structure
+ * @resp: motion sensor response structure
+ * @type: type of motion sensor
+ * @loc: location where the motion sensor is placed
+ * @calib: calibration parameters. Note that trigger
+ * captured data will always provide the calibrated
+ * data
+ * @samples: static array to hold data from a single capture.
+ * For each channel we need 2 bytes, except for
+ * the timestamp. The timestamp is always last and
+ * is always 8-byte aligned.
+ * @read_ec_sensors_data: function used for accessing sensors values
+ * @cuur_sampl_freq: current sampling period
+ */
+struct cros_ec_sensors_core_state {
+ struct cros_ec_device *ec;
+ struct mutex cmd_lock;
+
+ struct cros_ec_command *msg;
+ struct ec_params_motion_sense param;
+ struct ec_response_motion_sense *resp;
+
+ enum motionsensor_type type;
+ enum motionsensor_location loc;
+
+ s16 calib[CROS_EC_SENSOR_MAX_AXIS];
+
+ u8 samples[CROS_EC_SAMPLE_SIZE];
+
+ int (*read_ec_sensors_data)(struct iio_dev *indio_dev,
+ unsigned long scan_mask, s16 *data);
+
+ int curr_sampl_freq;
+};
+
+/**
+ * cros_ec_sensors_read_lpc() - retrieve data from EC shared memory
+ * @indio_dev: pointer to IIO device
+ * @scan_mask: bitmap of the sensor indices to scan
+ * @data: location to store data
+ *
+ * This is the safe function for reading the EC data. It guarantees that the
+ * data sampled was not modified by the EC while being read.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev, unsigned long scan_mask,
+ s16 *data);
+
+/**
+ * cros_ec_sensors_read_cmd() - retrieve data using the EC command protocol
+ * @indio_dev: pointer to IIO device
+ * @scan_mask: bitmap of the sensor indices to scan
+ * @data: location to store data
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask,
+ s16 *data);
+
+/**
+ * cros_ec_sensors_core_init() - basic initialization of the core structure
+ * @pdev: platform device created for the sensors
+ * @indio_dev: iio device structure of the device
+ * @physical_device: true if the device refers to a physical device
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_sensors_core_init(struct platform_device *pdev,
+ struct iio_dev *indio_dev, bool physical_device);
+
+/**
+ * cros_ec_sensors_capture() - the trigger handler function
+ * @irq: the interrupt number.
+ * @p: a pointer to the poll function.
+ *
+ * On a trigger event occurring, if the pollfunc is attached then this
+ * handler is called as a threaded interrupt (and hence may sleep). It
+ * is responsible for grabbing data from the device and pushing it into
+ * the associated buffer.
+ *
+ * Return: IRQ_HANDLED
+ */
+irqreturn_t cros_ec_sensors_capture(int irq, void *p);
+
+/**
+ * cros_ec_motion_send_host_cmd() - send motion sense host command
+ * @st: pointer to state information for device
+ * @opt_length: optional length to reduce the response size, useful on the data
+ * path. Otherwise, the maximal allowed response size is used
+ *
+ * When called, the sub-command is assumed to be set in param->cmd.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *st,
+ u16 opt_length);
+
+/**
+ * cros_ec_sensors_core_read() - function to request a value from the sensor
+ * @st: pointer to state information for device
+ * @chan: channel specification structure table
+ * @val: will contain one element making up the returned value
+ * @val2: will contain another element making up the returned value
+ * @mask: specifies which values to be requested
+ *
+ * Return: the type of value returned by the device
+ */
+int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask);
+
+/**
+ * cros_ec_sensors_core_write() - function to write a value to the sensor
+ * @st: pointer to state information for device
+ * @chan: channel specification structure table
+ * @val: first part of value to write
+ * @val2: second part of value to write
+ * @mask: specifies which values to write
+ *
+ * Return: the type of value returned by the device
+ */
+int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask);
+
+/* List of extended channel specification for all sensors */
+extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[];
+
+#endif /* __CROS_EC_SENSORS_CORE_H */
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index b5beea53d6f6..7ef94a90ecf7 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -201,7 +201,7 @@ int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
int ret;
if (val1 < 0 || val2 < 0)
- ret = -EINVAL;
+ return -EINVAL;
value = val1 * pow_10(6) + val2;
if (value) {
@@ -250,6 +250,9 @@ int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
s32 value;
int ret;
+ if (val1 < 0 || val2 < 0)
+ return -EINVAL;
+
value = convert_to_vtf_format(st->sensitivity.size,
st->sensitivity.unit_expo,
val1, val2);
diff --git a/drivers/iio/counter/104-quad-8.c b/drivers/iio/counter/104-quad-8.c
new file mode 100644
index 000000000000..2d2ee353dde7
--- /dev/null
+++ b/drivers/iio/counter/104-quad-8.c
@@ -0,0 +1,593 @@
+/*
+ * IIO driver for the ACCES 104-QUAD-8
+ * Copyright (C) 2016 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * This driver supports the ACCES 104-QUAD-8 and ACCES 104-QUAD-4.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+
+#define QUAD8_EXTENT 32
+
+static unsigned int base[max_num_isa_dev(QUAD8_EXTENT)];
+static unsigned int num_quad8;
+module_param_array(base, uint, &num_quad8, 0);
+MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
+
+#define QUAD8_NUM_COUNTERS 8
+
+/**
+ * struct quad8_iio - IIO device private data structure
+ * @preset: array of preset values
+ * @count_mode: array of count mode configurations
+ * @quadrature_mode: array of quadrature mode configurations
+ * @quadrature_scale: array of quadrature mode scale configurations
+ * @ab_enable: array of A and B inputs enable configurations
+ * @preset_enable: array of set_to_preset_on_index attribute configurations
+ * @synchronous_mode: array of index function synchronous mode configurations
+ * @index_polarity: array of index function polarity configurations
+ * @base: base port address of the IIO device
+ */
+struct quad8_iio {
+ unsigned int preset[QUAD8_NUM_COUNTERS];
+ unsigned int count_mode[QUAD8_NUM_COUNTERS];
+ unsigned int quadrature_mode[QUAD8_NUM_COUNTERS];
+ unsigned int quadrature_scale[QUAD8_NUM_COUNTERS];
+ unsigned int ab_enable[QUAD8_NUM_COUNTERS];
+ unsigned int preset_enable[QUAD8_NUM_COUNTERS];
+ unsigned int synchronous_mode[QUAD8_NUM_COUNTERS];
+ unsigned int index_polarity[QUAD8_NUM_COUNTERS];
+ unsigned int base;
+};
+
+static int quad8_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel;
+ unsigned int flags;
+ unsigned int borrow;
+ unsigned int carry;
+ int i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_INDEX) {
+ *val = !!(inb(priv->base + 0x16) & BIT(chan->channel));
+ return IIO_VAL_INT;
+ }
+
+ flags = inb(base_offset);
+ borrow = flags & BIT(0);
+ carry = !!(flags & BIT(1));
+
+ /* Borrow XOR Carry effectively doubles count range */
+ *val = (borrow ^ carry) << 24;
+
+ /* Reset Byte Pointer; transfer Counter to Output Latch */
+ outb(0x11, base_offset + 1);
+
+ for (i = 0; i < 3; i++)
+ *val |= (unsigned int)inb(base_offset) << (8 * i);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_ENABLE:
+ *val = priv->ab_enable[chan->channel];
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1;
+ *val2 = priv->quadrature_scale[chan->channel];
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+
+ return -EINVAL;
+}
+
+static int quad8_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel;
+ int i;
+ unsigned int ior_cfg;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_INDEX)
+ return -EINVAL;
+
+ /* Only 24-bit values are supported */
+ if ((unsigned int)val > 0xFFFFFF)
+ return -EINVAL;
+
+ /* Reset Byte Pointer */
+ outb(0x01, base_offset + 1);
+
+ /* Counter can only be set via Preset Register */
+ for (i = 0; i < 3; i++)
+ outb(val >> (8 * i), base_offset);
+
+ /* Transfer Preset Register to Counter */
+ outb(0x08, base_offset + 1);
+
+ /* Reset Byte Pointer */
+ outb(0x01, base_offset + 1);
+
+ /* Set Preset Register back to original value */
+ val = priv->preset[chan->channel];
+ for (i = 0; i < 3; i++)
+ outb(val >> (8 * i), base_offset);
+
+ /* Reset Borrow, Carry, Compare, and Sign flags */
+ outb(0x02, base_offset + 1);
+ /* Reset Error flag */
+ outb(0x06, base_offset + 1);
+
+ return 0;
+ case IIO_CHAN_INFO_ENABLE:
+ /* only boolean values accepted */
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ priv->ab_enable[chan->channel] = val;
+
+ ior_cfg = val | priv->preset_enable[chan->channel] << 1;
+
+ /* Load I/O control configuration */
+ outb(0x40 | ior_cfg, base_offset);
+
+ return 0;
+ case IIO_CHAN_INFO_SCALE:
+ /* Quadrature scaling only available in quadrature mode */
+ if (!priv->quadrature_mode[chan->channel] && (val2 || val != 1))
+ return -EINVAL;
+
+ /* Only three gain states (1, 0.5, 0.25) */
+ if (val == 1 && !val2)
+ priv->quadrature_scale[chan->channel] = 0;
+ else if (!val)
+ switch (val2) {
+ case 500000:
+ priv->quadrature_scale[chan->channel] = 1;
+ break;
+ case 250000:
+ priv->quadrature_scale[chan->channel] = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else
+ return -EINVAL;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info quad8_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = quad8_read_raw,
+ .write_raw = quad8_write_raw
+};
+
+static ssize_t quad8_read_preset(struct iio_dev *indio_dev, uintptr_t private,
+ const struct iio_chan_spec *chan, char *buf)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", priv->preset[chan->channel]);
+}
+
+static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
+ const struct iio_chan_spec *chan, const char *buf, size_t len)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel;
+ unsigned int preset;
+ int ret;
+ int i;
+
+ ret = kstrtouint(buf, 0, &preset);
+ if (ret)
+ return ret;
+
+ /* Only 24-bit values are supported */
+ if (preset > 0xFFFFFF)
+ return -EINVAL;
+
+ priv->preset[chan->channel] = preset;
+
+ /* Reset Byte Pointer */
+ outb(0x01, base_offset + 1);
+
+ /* Set Preset Register */
+ for (i = 0; i < 3; i++)
+ outb(preset >> (8 * i), base_offset);
+
+ return len;
+}
+
+static ssize_t quad8_read_set_to_preset_on_index(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, char *buf)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ priv->preset_enable[chan->channel]);
+}
+
+static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
+ size_t len)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel;
+ bool preset_enable;
+ int ret;
+ unsigned int ior_cfg;
+
+ ret = kstrtobool(buf, &preset_enable);
+ if (ret)
+ return ret;
+
+ priv->preset_enable[chan->channel] = preset_enable;
+
+ ior_cfg = priv->ab_enable[chan->channel] |
+ (unsigned int)preset_enable << 1;
+
+ /* Load I/O control configuration to Input / Output Control Register */
+ outb(0x40 | ior_cfg, base_offset);
+
+ return len;
+}
+
+static const char *const quad8_noise_error_states[] = {
+ "No excessive noise is present at the count inputs",
+ "Excessive noise is present at the count inputs"
+};
+
+static int quad8_get_noise_error(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ return !!(inb(base_offset) & BIT(4));
+}
+
+static const struct iio_enum quad8_noise_error_enum = {
+ .items = quad8_noise_error_states,
+ .num_items = ARRAY_SIZE(quad8_noise_error_states),
+ .get = quad8_get_noise_error
+};
+
+static const char *const quad8_count_direction_states[] = {
+ "down",
+ "up"
+};
+
+static int quad8_get_count_direction(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ return !!(inb(base_offset) & BIT(5));
+}
+
+static const struct iio_enum quad8_count_direction_enum = {
+ .items = quad8_count_direction_states,
+ .num_items = ARRAY_SIZE(quad8_count_direction_states),
+ .get = quad8_get_count_direction
+};
+
+static const char *const quad8_count_modes[] = {
+ "normal",
+ "range limit",
+ "non-recycle",
+ "modulo-n"
+};
+
+static int quad8_set_count_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int count_mode)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ unsigned int mode_cfg = count_mode << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ priv->count_mode[chan->channel] = count_mode;
+
+ /* Add quadrature mode configuration */
+ if (priv->quadrature_mode[chan->channel])
+ mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
+
+ /* Load mode configuration to Counter Mode Register */
+ outb(0x20 | mode_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_count_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->count_mode[chan->channel];
+}
+
+static const struct iio_enum quad8_count_mode_enum = {
+ .items = quad8_count_modes,
+ .num_items = ARRAY_SIZE(quad8_count_modes),
+ .set = quad8_set_count_mode,
+ .get = quad8_get_count_mode
+};
+
+static const char *const quad8_synchronous_modes[] = {
+ "non-synchronous",
+ "synchronous"
+};
+
+static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int synchronous_mode)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const unsigned int idr_cfg = synchronous_mode |
+ priv->index_polarity[chan->channel] << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ /* Index function must be non-synchronous in non-quadrature mode */
+ if (synchronous_mode && !priv->quadrature_mode[chan->channel])
+ return -EINVAL;
+
+ priv->synchronous_mode[chan->channel] = synchronous_mode;
+
+ /* Load Index Control configuration to Index Control Register */
+ outb(0x40 | idr_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_synchronous_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->synchronous_mode[chan->channel];
+}
+
+static const struct iio_enum quad8_synchronous_mode_enum = {
+ .items = quad8_synchronous_modes,
+ .num_items = ARRAY_SIZE(quad8_synchronous_modes),
+ .set = quad8_set_synchronous_mode,
+ .get = quad8_get_synchronous_mode
+};
+
+static const char *const quad8_quadrature_modes[] = {
+ "non-quadrature",
+ "quadrature"
+};
+
+static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int quadrature_mode)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ unsigned int mode_cfg = priv->count_mode[chan->channel] << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ if (quadrature_mode)
+ mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
+ else {
+ /* Quadrature scaling only available in quadrature mode */
+ priv->quadrature_scale[chan->channel] = 0;
+
+ /* Synchronous function not supported in non-quadrature mode */
+ if (priv->synchronous_mode[chan->channel])
+ quad8_set_synchronous_mode(indio_dev, chan, 0);
+ }
+
+ priv->quadrature_mode[chan->channel] = quadrature_mode;
+
+ /* Load mode configuration to Counter Mode Register */
+ outb(0x20 | mode_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->quadrature_mode[chan->channel];
+}
+
+static const struct iio_enum quad8_quadrature_mode_enum = {
+ .items = quad8_quadrature_modes,
+ .num_items = ARRAY_SIZE(quad8_quadrature_modes),
+ .set = quad8_set_quadrature_mode,
+ .get = quad8_get_quadrature_mode
+};
+
+static const char *const quad8_index_polarity_modes[] = {
+ "negative",
+ "positive"
+};
+
+static int quad8_set_index_polarity(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int index_polarity)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const unsigned int idr_cfg = priv->synchronous_mode[chan->channel] |
+ index_polarity << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ priv->index_polarity[chan->channel] = index_polarity;
+
+ /* Load Index Control configuration to Index Control Register */
+ outb(0x40 | idr_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_index_polarity(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->index_polarity[chan->channel];
+}
+
+static const struct iio_enum quad8_index_polarity_enum = {
+ .items = quad8_index_polarity_modes,
+ .num_items = ARRAY_SIZE(quad8_index_polarity_modes),
+ .set = quad8_set_index_polarity,
+ .get = quad8_get_index_polarity
+};
+
+static const struct iio_chan_spec_ext_info quad8_count_ext_info[] = {
+ {
+ .name = "preset",
+ .shared = IIO_SEPARATE,
+ .read = quad8_read_preset,
+ .write = quad8_write_preset
+ },
+ {
+ .name = "set_to_preset_on_index",
+ .shared = IIO_SEPARATE,
+ .read = quad8_read_set_to_preset_on_index,
+ .write = quad8_write_set_to_preset_on_index
+ },
+ IIO_ENUM("noise_error", IIO_SEPARATE, &quad8_noise_error_enum),
+ IIO_ENUM_AVAILABLE("noise_error", &quad8_noise_error_enum),
+ IIO_ENUM("count_direction", IIO_SEPARATE, &quad8_count_direction_enum),
+ IIO_ENUM_AVAILABLE("count_direction", &quad8_count_direction_enum),
+ IIO_ENUM("count_mode", IIO_SEPARATE, &quad8_count_mode_enum),
+ IIO_ENUM_AVAILABLE("count_mode", &quad8_count_mode_enum),
+ IIO_ENUM("quadrature_mode", IIO_SEPARATE, &quad8_quadrature_mode_enum),
+ IIO_ENUM_AVAILABLE("quadrature_mode", &quad8_quadrature_mode_enum),
+ {}
+};
+
+static const struct iio_chan_spec_ext_info quad8_index_ext_info[] = {
+ IIO_ENUM("synchronous_mode", IIO_SEPARATE,
+ &quad8_synchronous_mode_enum),
+ IIO_ENUM_AVAILABLE("synchronous_mode", &quad8_synchronous_mode_enum),
+ IIO_ENUM("index_polarity", IIO_SEPARATE, &quad8_index_polarity_enum),
+ IIO_ENUM_AVAILABLE("index_polarity", &quad8_index_polarity_enum),
+ {}
+};
+
+#define QUAD8_COUNT_CHAN(_chan) { \
+ .type = IIO_COUNT, \
+ .channel = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_ENABLE) | BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = quad8_count_ext_info, \
+ .indexed = 1 \
+}
+
+#define QUAD8_INDEX_CHAN(_chan) { \
+ .type = IIO_INDEX, \
+ .channel = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .ext_info = quad8_index_ext_info, \
+ .indexed = 1 \
+}
+
+static const struct iio_chan_spec quad8_channels[] = {
+ QUAD8_COUNT_CHAN(0), QUAD8_INDEX_CHAN(0),
+ QUAD8_COUNT_CHAN(1), QUAD8_INDEX_CHAN(1),
+ QUAD8_COUNT_CHAN(2), QUAD8_INDEX_CHAN(2),
+ QUAD8_COUNT_CHAN(3), QUAD8_INDEX_CHAN(3),
+ QUAD8_COUNT_CHAN(4), QUAD8_INDEX_CHAN(4),
+ QUAD8_COUNT_CHAN(5), QUAD8_INDEX_CHAN(5),
+ QUAD8_COUNT_CHAN(6), QUAD8_INDEX_CHAN(6),
+ QUAD8_COUNT_CHAN(7), QUAD8_INDEX_CHAN(7)
+};
+
+static int quad8_probe(struct device *dev, unsigned int id)
+{
+ struct iio_dev *indio_dev;
+ struct quad8_iio *priv;
+ int i, j;
+ unsigned int base_offset;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ if (!devm_request_region(dev, base[id], QUAD8_EXTENT,
+ dev_name(dev))) {
+ dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+ base[id], base[id] + QUAD8_EXTENT);
+ return -EBUSY;
+ }
+
+ indio_dev->info = &quad8_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = ARRAY_SIZE(quad8_channels);
+ indio_dev->channels = quad8_channels;
+ indio_dev->name = dev_name(dev);
+
+ priv = iio_priv(indio_dev);
+ priv->base = base[id];
+
+ /* Reset all counters and disable interrupt function */
+ outb(0x01, base[id] + 0x11);
+ /* Set initial configuration for all counters */
+ for (i = 0; i < QUAD8_NUM_COUNTERS; i++) {
+ base_offset = base[id] + 2 * i;
+ /* Reset Byte Pointer */
+ outb(0x01, base_offset + 1);
+ /* Reset Preset Register */
+ for (j = 0; j < 3; j++)
+ outb(0x00, base_offset);
+ /* Reset Borrow, Carry, Compare, and Sign flags */
+ outb(0x04, base_offset + 1);
+ /* Reset Error flag */
+ outb(0x06, base_offset + 1);
+ /* Binary encoding; Normal count; non-quadrature mode */
+ outb(0x20, base_offset + 1);
+ /* Disable A and B inputs; preset on index; FLG1 as Carry */
+ outb(0x40, base_offset + 1);
+ /* Disable index function; negative index polarity */
+ outb(0x60, base_offset + 1);
+ }
+ /* Enable all counters */
+ outb(0x00, base[id] + 0x11);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static struct isa_driver quad8_driver = {
+ .probe = quad8_probe,
+ .driver = {
+ .name = "104-quad-8"
+ }
+};
+
+module_isa_driver(quad8_driver, num_quad8);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("ACCES 104-QUAD-8 IIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/counter/Kconfig b/drivers/iio/counter/Kconfig
new file mode 100644
index 000000000000..44627f6e4861
--- /dev/null
+++ b/drivers/iio/counter/Kconfig
@@ -0,0 +1,24 @@
+#
+# Counter devices
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Counters"
+
+config 104_QUAD_8
+ tristate "ACCES 104-QUAD-8 driver"
+ depends on X86 && ISA_BUS_API
+ help
+ Say yes here to build support for the ACCES 104-QUAD-8 quadrature
+ encoder counter/interface device family (104-QUAD-8, 104-QUAD-4).
+
+ Performing a write to a counter's IIO_CHAN_INFO_RAW sets the counter and
+ also clears the counter's respective error flag. Although the counters
+ have a 25-bit range, only the lower 24 bits may be set, either directly
+ or via a counter's preset attribute. Interrupts are not supported by
+ this driver.
+
+ The base port addresses for the devices may be configured via the base
+ array module parameter.
+
+endmenu
diff --git a/drivers/iio/counter/Makefile b/drivers/iio/counter/Makefile
new file mode 100644
index 000000000000..007e88411648
--- /dev/null
+++ b/drivers/iio/counter/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for IIO counter devices
+#
+
+# When adding new entries keep the list in alphabetical order
+
+obj-$(CONFIG_104_QUAD_8) += 104-quad-8.o
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 120b24478469..d3084028905b 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -200,6 +200,16 @@ config AD8801
To compile this driver as a module choose M here: the module will be called
ad8801.
+config DPOT_DAC
+ tristate "DAC emulation using a DPOT"
+ depends on OF
+ help
+ Say yes here to build support for DAC emulation using a digital
+ potentiometer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called dpot-dac.
+
config LPC18XX_DAC
tristate "NXP LPC18xx DAC driver"
depends on ARCH_LPC18XX || COMPILE_TEST
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 27642bbf75f2..f01bf4a99867 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_AD5686) += ad5686.o
obj-$(CONFIG_AD7303) += ad7303.o
obj-$(CONFIG_AD8801) += ad8801.o
obj-$(CONFIG_CIO_DAC) += cio-dac.o
+obj-$(CONFIG_DPOT_DAC) += dpot-dac.o
obj-$(CONFIG_LPC18XX_DAC) += lpc18xx_dac.o
obj-$(CONFIG_M62332) += m62332.o
obj-$(CONFIG_MAX517) += max517.o
diff --git a/drivers/iio/dac/ad5592r.c b/drivers/iio/dac/ad5592r.c
index 0b235a2c7359..6eed5b7729be 100644
--- a/drivers/iio/dac/ad5592r.c
+++ b/drivers/iio/dac/ad5592r.c
@@ -17,7 +17,7 @@
#define AD5592R_GPIO_READBACK_EN BIT(10)
#define AD5592R_LDAC_READBACK_EN BIT(6)
-static int ad5592r_spi_wnop_r16(struct ad5592r_state *st, u16 *buf)
+static int ad5592r_spi_wnop_r16(struct ad5592r_state *st, __be16 *buf)
{
struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
struct spi_transfer t = {
diff --git a/drivers/iio/dac/dpot-dac.c b/drivers/iio/dac/dpot-dac.c
new file mode 100644
index 000000000000..960a2b430480
--- /dev/null
+++ b/drivers/iio/dac/dpot-dac.c
@@ -0,0 +1,266 @@
+/*
+ * IIO DAC emulation driver using a digital potentiometer
+ *
+ * Copyright (C) 2016 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * It is assumed that the dpot is used as a voltage divider between the
+ * current dpot wiper setting and the maximum resistance of the dpot. The
+ * divided voltage is provided by a vref regulator.
+ *
+ * .------.
+ * .-----------. | |
+ * | vref |--' .---.
+ * | regulator |--. | |
+ * '-----------' | | d |
+ * | | p |
+ * | | o | wiper
+ * | | t |<---------+
+ * | | |
+ * | '---' dac output voltage
+ * | |
+ * '------+------------+
+ */
+
+#include <linux/err.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+struct dpot_dac {
+ struct regulator *vref;
+ struct iio_channel *dpot;
+ u32 max_ohms;
+};
+
+static const struct iio_chan_spec dpot_dac_iio_channel = {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
+ | BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW),
+ .output = 1,
+ .indexed = 1,
+};
+
+static int dpot_dac_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct dpot_dac *dac = iio_priv(indio_dev);
+ int ret;
+ unsigned long long tmp;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return iio_read_channel_raw(dac->dpot, val);
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = iio_read_channel_scale(dac->dpot, val, val2);
+ switch (ret) {
+ case IIO_VAL_FRACTIONAL_LOG2:
+ tmp = *val * 1000000000LL;
+ do_div(tmp, dac->max_ohms);
+ tmp *= regulator_get_voltage(dac->vref) / 1000;
+ do_div(tmp, 1000000000LL);
+ *val = tmp;
+ return ret;
+ case IIO_VAL_INT:
+ /*
+ * Convert integer scale to fractional scale by
+ * setting the denominator (val2) to one...
+ */
+ *val2 = 1;
+ ret = IIO_VAL_FRACTIONAL;
+ /* ...and fall through. */
+ case IIO_VAL_FRACTIONAL:
+ *val *= regulator_get_voltage(dac->vref) / 1000;
+ *val2 *= dac->max_ohms;
+ break;
+ }
+
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+static int dpot_dac_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct dpot_dac *dac = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *type = IIO_VAL_INT;
+ return iio_read_avail_channel_raw(dac->dpot, vals, length);
+ }
+
+ return -EINVAL;
+}
+
+static int dpot_dac_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct dpot_dac *dac = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return iio_write_channel_raw(dac->dpot, val);
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info dpot_dac_info = {
+ .read_raw = dpot_dac_read_raw,
+ .read_avail = dpot_dac_read_avail,
+ .write_raw = dpot_dac_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int dpot_dac_channel_max_ohms(struct iio_dev *indio_dev)
+{
+ struct device *dev = &indio_dev->dev;
+ struct dpot_dac *dac = iio_priv(indio_dev);
+ unsigned long long tmp;
+ int ret;
+ int val;
+ int val2;
+ int max;
+
+ ret = iio_read_max_channel_raw(dac->dpot, &max);
+ if (ret < 0) {
+ dev_err(dev, "dpot does not indicate its raw maximum value\n");
+ return ret;
+ }
+
+ switch (iio_read_channel_scale(dac->dpot, &val, &val2)) {
+ case IIO_VAL_INT:
+ return max * val;
+ case IIO_VAL_FRACTIONAL:
+ tmp = (unsigned long long)max * val;
+ do_div(tmp, val2);
+ return tmp;
+ case IIO_VAL_FRACTIONAL_LOG2:
+ tmp = val * 1000000000LL * max >> val2;
+ do_div(tmp, 1000000000LL);
+ return tmp;
+ default:
+ dev_err(dev, "dpot has a scale that is too weird\n");
+ }
+
+ return -EINVAL;
+}
+
+static int dpot_dac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iio_dev *indio_dev;
+ struct dpot_dac *dac;
+ enum iio_chan_type type;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*dac));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+ dac = iio_priv(indio_dev);
+
+ indio_dev->name = dev_name(dev);
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &dpot_dac_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = &dpot_dac_iio_channel;
+ indio_dev->num_channels = 1;
+
+ dac->vref = devm_regulator_get(dev, "vref");
+ if (IS_ERR(dac->vref)) {
+ if (PTR_ERR(dac->vref) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get vref regulator\n");
+ return PTR_ERR(dac->vref);
+ }
+
+ dac->dpot = devm_iio_channel_get(dev, "dpot");
+ if (IS_ERR(dac->dpot)) {
+ if (PTR_ERR(dac->dpot) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get dpot input channel\n");
+ return PTR_ERR(dac->dpot);
+ }
+
+ ret = iio_get_channel_type(dac->dpot, &type);
+ if (ret < 0)
+ return ret;
+
+ if (type != IIO_RESISTANCE) {
+ dev_err(dev, "dpot is of the wrong type\n");
+ return -EINVAL;
+ }
+
+ ret = dpot_dac_channel_max_ohms(indio_dev);
+ if (ret < 0)
+ return ret;
+ dac->max_ohms = ret;
+
+ ret = regulator_enable(dac->vref);
+ if (ret) {
+ dev_err(dev, "failed to enable the vref regulator\n");
+ return ret;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "failed to register iio device\n");
+ goto disable_reg;
+ }
+
+ return 0;
+
+disable_reg:
+ regulator_disable(dac->vref);
+ return ret;
+}
+
+static int dpot_dac_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct dpot_dac *dac = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regulator_disable(dac->vref);
+
+ return 0;
+}
+
+static const struct of_device_id dpot_dac_match[] = {
+ { .compatible = "dpot-dac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dpot_dac_match);
+
+static struct platform_driver dpot_dac_driver = {
+ .probe = dpot_dac_probe,
+ .remove = dpot_dac_remove,
+ .driver = {
+ .name = "iio-dpot-dac",
+ .of_match_table = dpot_dac_match,
+ },
+};
+module_platform_driver(dpot_dac_driver);
+
+MODULE_DESCRIPTION("DAC emulation driver using a digital potentiometer");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index cca935c06f2b..db109f0cdd8c 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -18,6 +18,8 @@
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -26,12 +28,20 @@
#define MCP4725_DRV_NAME "mcp4725"
+#define MCP472X_REF_VDD 0x00
+#define MCP472X_REF_VREF_UNBUFFERED 0x02
+#define MCP472X_REF_VREF_BUFFERED 0x03
+
struct mcp4725_data {
struct i2c_client *client;
- u16 vref_mv;
+ int id;
+ unsigned ref_mode;
+ bool vref_buffered;
u16 dac_value;
bool powerdown;
unsigned powerdown_mode;
+ struct regulator *vdd_reg;
+ struct regulator *vref_reg;
};
static int mcp4725_suspend(struct device *dev)
@@ -86,6 +96,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
return 0;
inoutbuf[0] = 0x60; /* write EEPROM */
+ inoutbuf[0] |= data->ref_mode << 3;
inoutbuf[1] = data->dac_value >> 4;
inoutbuf[2] = (data->dac_value & 0xf) << 4;
@@ -278,18 +289,49 @@ static int mcp4725_set_value(struct iio_dev *indio_dev, int val)
return 0;
}
+static int mcp4726_set_cfg(struct iio_dev *indio_dev)
+{
+ struct mcp4725_data *data = iio_priv(indio_dev);
+ u8 outbuf[3];
+ int ret;
+
+ outbuf[0] = 0x40;
+ outbuf[0] |= data->ref_mode << 3;
+ if (data->powerdown)
+ outbuf[0] |= data->powerdown << 1;
+ outbuf[1] = data->dac_value >> 4;
+ outbuf[2] = (data->dac_value & 0xf) << 4;
+
+ ret = i2c_master_send(data->client, outbuf, 3);
+ if (ret < 0)
+ return ret;
+ else if (ret != 3)
+ return -EIO;
+ else
+ return 0;
+}
+
static int mcp4725_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct mcp4725_data *data = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
*val = data->dac_value;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = data->vref_mv;
+ if (data->ref_mode == MCP472X_REF_VDD)
+ ret = regulator_get_voltage(data->vdd_reg);
+ else
+ ret = regulator_get_voltage(data->vref_reg);
+
+ if (ret < 0)
+ return ret;
+
+ *val = ret / 1000;
*val2 = 12;
return IIO_VAL_FRACTIONAL_LOG2;
}
@@ -323,27 +365,98 @@ static const struct iio_info mcp4725_info = {
.driver_module = THIS_MODULE,
};
+#ifdef CONFIG_OF
+static int mcp4725_probe_dt(struct device *dev,
+ struct mcp4725_platform_data *pdata)
+{
+ struct device_node *np = dev->of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ /* check if is the vref-supply defined */
+ pdata->use_vref = of_property_read_bool(np, "vref-supply");
+ pdata->vref_buffered =
+ of_property_read_bool(np, "microchip,vref-buffered");
+
+ return 0;
+}
+#else
+static int mcp4725_probe_dt(struct device *dev,
+ struct mcp4725_platform_data *platform_data)
+{
+ return -ENODEV;
+}
+#endif
+
static int mcp4725_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct mcp4725_data *data;
struct iio_dev *indio_dev;
- struct mcp4725_platform_data *platform_data = client->dev.platform_data;
- u8 inbuf[3];
+ struct mcp4725_platform_data *pdata, pdata_dt;
+ u8 inbuf[4];
u8 pd;
+ u8 ref;
int err;
- if (!platform_data || !platform_data->vref_mv) {
- dev_err(&client->dev, "invalid platform data");
- return -EINVAL;
- }
-
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (indio_dev == NULL)
return -ENOMEM;
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
+ data->id = id->driver_data;
+ pdata = dev_get_platdata(&client->dev);
+
+ if (!pdata) {
+ err = mcp4725_probe_dt(&client->dev, &pdata_dt);
+ if (err) {
+ dev_err(&client->dev,
+ "invalid platform or devicetree data");
+ return err;
+ }
+ pdata = &pdata_dt;
+ }
+
+ if (data->id == MCP4725 && pdata->use_vref) {
+ dev_err(&client->dev,
+ "external reference is unavailable on MCP4725");
+ return -EINVAL;
+ }
+
+ if (!pdata->use_vref && pdata->vref_buffered) {
+ dev_err(&client->dev,
+ "buffering is unavailable on the internal reference");
+ return -EINVAL;
+ }
+
+ if (!pdata->use_vref)
+ data->ref_mode = MCP472X_REF_VDD;
+ else
+ data->ref_mode = pdata->vref_buffered ?
+ MCP472X_REF_VREF_BUFFERED :
+ MCP472X_REF_VREF_UNBUFFERED;
+
+ data->vdd_reg = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(data->vdd_reg))
+ return PTR_ERR(data->vdd_reg);
+
+ err = regulator_enable(data->vdd_reg);
+ if (err)
+ return err;
+
+ if (pdata->use_vref) {
+ data->vref_reg = devm_regulator_get(&client->dev, "vref");
+ if (IS_ERR(data->vref_reg)) {
+ err = PTR_ERR(data->vref_reg);
+ goto err_disable_vdd_reg;
+ }
+
+ err = regulator_enable(data->vref_reg);
+ if (err)
+ goto err_disable_vdd_reg;
+ }
indio_dev->dev.parent = &client->dev;
indio_dev->name = id->name;
@@ -352,25 +465,56 @@ static int mcp4725_probe(struct i2c_client *client,
indio_dev->num_channels = 1;
indio_dev->modes = INDIO_DIRECT_MODE;
- data->vref_mv = platform_data->vref_mv;
+ /* read current DAC value and settings */
+ err = i2c_master_recv(client, inbuf, data->id == MCP4725 ? 3 : 4);
- /* read current DAC value */
- err = i2c_master_recv(client, inbuf, 3);
if (err < 0) {
dev_err(&client->dev, "failed to read DAC value");
- return err;
+ goto err_disable_vref_reg;
}
pd = (inbuf[0] >> 1) & 0x3;
data->powerdown = pd > 0 ? true : false;
- data->powerdown_mode = pd ? pd - 1 : 2; /* largest register to gnd */
+ data->powerdown_mode = pd ? pd - 1 : 2; /* largest resistor to gnd */
data->dac_value = (inbuf[1] << 4) | (inbuf[2] >> 4);
+ if (data->id == MCP4726)
+ ref = (inbuf[3] >> 3) & 0x3;
+
+ if (data->id == MCP4726 && ref != data->ref_mode) {
+ dev_info(&client->dev,
+ "voltage reference mode differs (conf: %u, eeprom: %u), setting %u",
+ data->ref_mode, ref, data->ref_mode);
+ err = mcp4726_set_cfg(indio_dev);
+ if (err < 0)
+ goto err_disable_vref_reg;
+ }
+
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto err_disable_vref_reg;
+
+ return 0;
+
+err_disable_vref_reg:
+ if (data->vref_reg)
+ regulator_disable(data->vref_reg);
- return iio_device_register(indio_dev);
+err_disable_vdd_reg:
+ regulator_disable(data->vdd_reg);
+
+ return err;
}
static int mcp4725_remove(struct i2c_client *client)
{
- iio_device_unregister(i2c_get_clientdata(client));
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct mcp4725_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ if (data->vref_reg)
+ regulator_disable(data->vref_reg);
+ regulator_disable(data->vdd_reg);
+
return 0;
}
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 205a84420ae9..3126cf05e6b9 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -84,6 +84,24 @@ config HID_SENSOR_GYRO_3D
Say yes here to build support for the HID SENSOR
Gyroscope 3D.
+config MPU3050
+ tristate
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select REGMAP
+
+config MPU3050_I2C
+ tristate "Invensense MPU3050 devices on I2C"
+ depends on !(INPUT_MPU3050=y || INPUT_MPU3050=m)
+ depends on I2C
+ select MPU3050
+ select REGMAP_I2C
+ select I2C_MUX
+ help
+ This driver supports the Invensense MPU3050 gyroscope over I2C.
+ This driver can be built as a module. The module will be called
+ inv-mpu3050-i2c.
+
config IIO_ST_GYRO_3AXIS
tristate "STMicroelectronics gyroscopes 3-Axis Driver"
depends on (I2C || SPI_MASTER) && SYSFS
diff --git a/drivers/iio/gyro/Makefile b/drivers/iio/gyro/Makefile
index f866a4be0667..f0e149a606b0 100644
--- a/drivers/iio/gyro/Makefile
+++ b/drivers/iio/gyro/Makefile
@@ -14,6 +14,11 @@ obj-$(CONFIG_BMG160_SPI) += bmg160_spi.o
obj-$(CONFIG_HID_SENSOR_GYRO_3D) += hid-sensor-gyro-3d.o
+# Currently this is rolled into one module, split it if
+# we ever create a separate SPI interface for MPU-3050
+obj-$(CONFIG_MPU3050) += mpu3050.o
+mpu3050-objs := mpu3050-core.o mpu3050-i2c.o
+
itg3200-y := itg3200_core.o
itg3200-$(CONFIG_IIO_BUFFER) += itg3200_buffer.o
obj-$(CONFIG_ITG3200) += itg3200.o
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
new file mode 100644
index 000000000000..2be2a5d287e6
--- /dev/null
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -0,0 +1,1306 @@
+/*
+ * MPU3050 gyroscope driver
+ *
+ * Copyright (C) 2016 Linaro Ltd.
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the input subsystem driver, Copyright (C) 2011 Wistron Co.Ltd
+ * Joseph Lai <joseph_lai@wistron.com> and trimmed down by
+ * Alan Cox <alan@linux.intel.com> in turn based on bma023.c.
+ * Device behaviour based on a misc driver posted by Nathan Royer in 2011.
+ *
+ * TODO: add support for setting up the low pass 3dB frequency.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+
+#include "mpu3050.h"
+
+#define MPU3050_CHIP_ID 0x69
+
+/*
+ * Register map: anything suffixed *_H is a big-endian high byte and always
+ * followed by the corresponding low byte (*_L) even though these are not
+ * explicitly included in the register definitions.
+ */
+#define MPU3050_CHIP_ID_REG 0x00
+#define MPU3050_PRODUCT_ID_REG 0x01
+#define MPU3050_XG_OFFS_TC 0x05
+#define MPU3050_YG_OFFS_TC 0x08
+#define MPU3050_ZG_OFFS_TC 0x0B
+#define MPU3050_X_OFFS_USR_H 0x0C
+#define MPU3050_Y_OFFS_USR_H 0x0E
+#define MPU3050_Z_OFFS_USR_H 0x10
+#define MPU3050_FIFO_EN 0x12
+#define MPU3050_AUX_VDDIO 0x13
+#define MPU3050_SLV_ADDR 0x14
+#define MPU3050_SMPLRT_DIV 0x15
+#define MPU3050_DLPF_FS_SYNC 0x16
+#define MPU3050_INT_CFG 0x17
+#define MPU3050_AUX_ADDR 0x18
+#define MPU3050_INT_STATUS 0x1A
+#define MPU3050_TEMP_H 0x1B
+#define MPU3050_XOUT_H 0x1D
+#define MPU3050_YOUT_H 0x1F
+#define MPU3050_ZOUT_H 0x21
+#define MPU3050_DMP_CFG1 0x35
+#define MPU3050_DMP_CFG2 0x36
+#define MPU3050_BANK_SEL 0x37
+#define MPU3050_MEM_START_ADDR 0x38
+#define MPU3050_MEM_R_W 0x39
+#define MPU3050_FIFO_COUNT_H 0x3A
+#define MPU3050_FIFO_R 0x3C
+#define MPU3050_USR_CTRL 0x3D
+#define MPU3050_PWR_MGM 0x3E
+
+/* MPU memory bank read options */
+#define MPU3050_MEM_PRFTCH BIT(5)
+#define MPU3050_MEM_USER_BANK BIT(4)
+/* Bits 8-11 select memory bank */
+#define MPU3050_MEM_RAM_BANK_0 0
+#define MPU3050_MEM_RAM_BANK_1 1
+#define MPU3050_MEM_RAM_BANK_2 2
+#define MPU3050_MEM_RAM_BANK_3 3
+#define MPU3050_MEM_OTP_BANK_0 4
+
+#define MPU3050_AXIS_REGS(axis) (MPU3050_XOUT_H + (axis * 2))
+
+/* Register bits */
+
+/* FIFO Enable */
+#define MPU3050_FIFO_EN_FOOTER BIT(0)
+#define MPU3050_FIFO_EN_AUX_ZOUT BIT(1)
+#define MPU3050_FIFO_EN_AUX_YOUT BIT(2)
+#define MPU3050_FIFO_EN_AUX_XOUT BIT(3)
+#define MPU3050_FIFO_EN_GYRO_ZOUT BIT(4)
+#define MPU3050_FIFO_EN_GYRO_YOUT BIT(5)
+#define MPU3050_FIFO_EN_GYRO_XOUT BIT(6)
+#define MPU3050_FIFO_EN_TEMP_OUT BIT(7)
+
+/*
+ * Digital Low Pass filter (DLPF)
+ * Full Scale (FS)
+ * and Synchronization
+ */
+#define MPU3050_EXT_SYNC_NONE 0x00
+#define MPU3050_EXT_SYNC_TEMP 0x20
+#define MPU3050_EXT_SYNC_GYROX 0x40
+#define MPU3050_EXT_SYNC_GYROY 0x60
+#define MPU3050_EXT_SYNC_GYROZ 0x80
+#define MPU3050_EXT_SYNC_ACCELX 0xA0
+#define MPU3050_EXT_SYNC_ACCELY 0xC0
+#define MPU3050_EXT_SYNC_ACCELZ 0xE0
+#define MPU3050_EXT_SYNC_MASK 0xE0
+#define MPU3050_EXT_SYNC_SHIFT 5
+
+#define MPU3050_FS_250DPS 0x00
+#define MPU3050_FS_500DPS 0x08
+#define MPU3050_FS_1000DPS 0x10
+#define MPU3050_FS_2000DPS 0x18
+#define MPU3050_FS_MASK 0x18
+#define MPU3050_FS_SHIFT 3
+
+#define MPU3050_DLPF_CFG_256HZ_NOLPF2 0x00
+#define MPU3050_DLPF_CFG_188HZ 0x01
+#define MPU3050_DLPF_CFG_98HZ 0x02
+#define MPU3050_DLPF_CFG_42HZ 0x03
+#define MPU3050_DLPF_CFG_20HZ 0x04
+#define MPU3050_DLPF_CFG_10HZ 0x05
+#define MPU3050_DLPF_CFG_5HZ 0x06
+#define MPU3050_DLPF_CFG_2100HZ_NOLPF 0x07
+#define MPU3050_DLPF_CFG_MASK 0x07
+#define MPU3050_DLPF_CFG_SHIFT 0
+
+/* Interrupt config */
+#define MPU3050_INT_RAW_RDY_EN BIT(0)
+#define MPU3050_INT_DMP_DONE_EN BIT(1)
+#define MPU3050_INT_MPU_RDY_EN BIT(2)
+#define MPU3050_INT_ANYRD_2CLEAR BIT(4)
+#define MPU3050_INT_LATCH_EN BIT(5)
+#define MPU3050_INT_OPEN BIT(6)
+#define MPU3050_INT_ACTL BIT(7)
+/* Interrupt status */
+#define MPU3050_INT_STATUS_RAW_RDY BIT(0)
+#define MPU3050_INT_STATUS_DMP_DONE BIT(1)
+#define MPU3050_INT_STATUS_MPU_RDY BIT(2)
+#define MPU3050_INT_STATUS_FIFO_OVFLW BIT(7)
+/* USR_CTRL */
+#define MPU3050_USR_CTRL_FIFO_EN BIT(6)
+#define MPU3050_USR_CTRL_AUX_IF_EN BIT(5)
+#define MPU3050_USR_CTRL_AUX_IF_RST BIT(3)
+#define MPU3050_USR_CTRL_FIFO_RST BIT(1)
+#define MPU3050_USR_CTRL_GYRO_RST BIT(0)
+/* PWR_MGM */
+#define MPU3050_PWR_MGM_PLL_X 0x01
+#define MPU3050_PWR_MGM_PLL_Y 0x02
+#define MPU3050_PWR_MGM_PLL_Z 0x03
+#define MPU3050_PWR_MGM_CLKSEL_MASK 0x07
+#define MPU3050_PWR_MGM_STBY_ZG BIT(3)
+#define MPU3050_PWR_MGM_STBY_YG BIT(4)
+#define MPU3050_PWR_MGM_STBY_XG BIT(5)
+#define MPU3050_PWR_MGM_SLEEP BIT(6)
+#define MPU3050_PWR_MGM_RESET BIT(7)
+#define MPU3050_PWR_MGM_MASK 0xff
+
+/*
+ * Fullscale precision is (for finest precision) +/- 250 deg/s, so the full
+ * scale is actually 500 deg/s. All 16 bits are then used to cover this scale,
+ * in two's complement.
+ */
+static unsigned int mpu3050_fs_precision[] = {
+ IIO_DEGREE_TO_RAD(250),
+ IIO_DEGREE_TO_RAD(500),
+ IIO_DEGREE_TO_RAD(1000),
+ IIO_DEGREE_TO_RAD(2000)
+};
+
+/*
+ * Regulator names
+ */
+static const char mpu3050_reg_vdd[] = "vdd";
+static const char mpu3050_reg_vlogic[] = "vlogic";
+
+static unsigned int mpu3050_get_freq(struct mpu3050 *mpu3050)
+{
+ unsigned int freq;
+
+ if (mpu3050->lpf == MPU3050_DLPF_CFG_256HZ_NOLPF2)
+ freq = 8000;
+ else
+ freq = 1000;
+ freq /= (mpu3050->divisor + 1);
+
+ return freq;
+}
+
+static int mpu3050_start_sampling(struct mpu3050 *mpu3050)
+{
+ __be16 raw_val[3];
+ int ret;
+ int i;
+
+ /* Reset */
+ ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_RESET, MPU3050_PWR_MGM_RESET);
+ if (ret)
+ return ret;
+
+ /* Turn on the Z-axis PLL */
+ ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_CLKSEL_MASK,
+ MPU3050_PWR_MGM_PLL_Z);
+ if (ret)
+ return ret;
+
+ /* Write calibration offset registers */
+ for (i = 0; i < 3; i++)
+ raw_val[i] = cpu_to_be16(mpu3050->calibration[i]);
+
+ ret = regmap_bulk_write(mpu3050->map, MPU3050_X_OFFS_USR_H, raw_val,
+ sizeof(raw_val));
+ if (ret)
+ return ret;
+
+ /* Set low pass filter (sample rate), sync and full scale */
+ ret = regmap_write(mpu3050->map, MPU3050_DLPF_FS_SYNC,
+ MPU3050_EXT_SYNC_NONE << MPU3050_EXT_SYNC_SHIFT |
+ mpu3050->fullscale << MPU3050_FS_SHIFT |
+ mpu3050->lpf << MPU3050_DLPF_CFG_SHIFT);
+ if (ret)
+ return ret;
+
+ /* Set up sampling frequency */
+ ret = regmap_write(mpu3050->map, MPU3050_SMPLRT_DIV, mpu3050->divisor);
+ if (ret)
+ return ret;
+
+ /*
+ * Max 50 ms start-up time after setting DLPF_FS_SYNC
+ * according to the data sheet, then wait for the next sample
+ * at this frequency T = 1000/f ms.
+ */
+ msleep(50 + 1000 / mpu3050_get_freq(mpu3050));
+
+ return 0;
+}
+
+static int mpu3050_set_8khz_samplerate(struct mpu3050 *mpu3050)
+{
+ int ret;
+ u8 divisor;
+ enum mpu3050_lpf lpf;
+
+ lpf = mpu3050->lpf;
+ divisor = mpu3050->divisor;
+
+ mpu3050->lpf = LPF_256_HZ_NOLPF; /* 8 kHz base frequency */
+ mpu3050->divisor = 0; /* Divide by 1 */
+ ret = mpu3050_start_sampling(mpu3050);
+
+ mpu3050->lpf = lpf;
+ mpu3050->divisor = divisor;
+
+ return ret;
+}
+
+static int mpu3050_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ int ret;
+ __be16 raw_val;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ /* The temperature scaling is (x+23000)/280 Celsius */
+ *val = 23000;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = mpu3050->calibration[chan->scan_index-1];
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = mpu3050_get_freq(mpu3050);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_TEMP:
+ /* Millidegrees, see about temperature scaling above */
+ *val = 1000;
+ *val2 = 280;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_ANGL_VEL:
+ /*
+ * Convert to the corresponding full scale in
+ * radians. All 16 bits are used with sign to
+ * span the available scale: to account for the one
+ * missing value if we multiply by 1/S16_MAX, instead
+ * multiply with 2/U16_MAX.
+ */
+ *val = mpu3050_fs_precision[mpu3050->fullscale] * 2;
+ *val2 = U16_MAX;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_RAW:
+ /* Resume device */
+ pm_runtime_get_sync(mpu3050->dev);
+ mutex_lock(&mpu3050->lock);
+
+ ret = mpu3050_set_8khz_samplerate(mpu3050);
+ if (ret)
+ goto out_read_raw_unlock;
+
+ switch (chan->type) {
+ case IIO_TEMP:
+ ret = regmap_bulk_read(mpu3050->map, MPU3050_TEMP_H,
+ &raw_val, sizeof(raw_val));
+ if (ret) {
+ dev_err(mpu3050->dev,
+ "error reading temperature\n");
+ goto out_read_raw_unlock;
+ }
+
+ *val = be16_to_cpu(raw_val);
+ ret = IIO_VAL_INT;
+
+ goto out_read_raw_unlock;
+ case IIO_ANGL_VEL:
+ ret = regmap_bulk_read(mpu3050->map,
+ MPU3050_AXIS_REGS(chan->scan_index-1),
+ &raw_val,
+ sizeof(raw_val));
+ if (ret) {
+ dev_err(mpu3050->dev,
+ "error reading axis data\n");
+ goto out_read_raw_unlock;
+ }
+
+ *val = be16_to_cpu(raw_val);
+ ret = IIO_VAL_INT;
+
+ goto out_read_raw_unlock;
+ default:
+ ret = -EINVAL;
+ goto out_read_raw_unlock;
+ }
+ default:
+ break;
+ }
+
+ return -EINVAL;
+
+out_read_raw_unlock:
+ mutex_unlock(&mpu3050->lock);
+ pm_runtime_mark_last_busy(mpu3050->dev);
+ pm_runtime_put_autosuspend(mpu3050->dev);
+
+ return ret;
+}
+
+static int mpu3050_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long mask)
+{
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ /*
+ * Couldn't figure out a way to precalculate these at compile time.
+ */
+ unsigned int fs250 =
+ DIV_ROUND_CLOSEST(mpu3050_fs_precision[0] * 1000000 * 2,
+ U16_MAX);
+ unsigned int fs500 =
+ DIV_ROUND_CLOSEST(mpu3050_fs_precision[1] * 1000000 * 2,
+ U16_MAX);
+ unsigned int fs1000 =
+ DIV_ROUND_CLOSEST(mpu3050_fs_precision[2] * 1000000 * 2,
+ U16_MAX);
+ unsigned int fs2000 =
+ DIV_ROUND_CLOSEST(mpu3050_fs_precision[3] * 1000000 * 2,
+ U16_MAX);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (chan->type != IIO_ANGL_VEL)
+ return -EINVAL;
+ mpu3050->calibration[chan->scan_index-1] = val;
+ return 0;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ /*
+ * The max samplerate is 8000 Hz, the minimum
+ * 1000 / 256 ~= 4 Hz
+ */
+ if (val < 4 || val > 8000)
+ return -EINVAL;
+
+ /*
+ * Above 1000 Hz we must turn off the digital low pass filter
+ * so we get a base frequency of 8kHz to the divider
+ */
+ if (val > 1000) {
+ mpu3050->lpf = LPF_256_HZ_NOLPF;
+ mpu3050->divisor = DIV_ROUND_CLOSEST(8000, val) - 1;
+ return 0;
+ }
+
+ mpu3050->lpf = LPF_188_HZ;
+ mpu3050->divisor = DIV_ROUND_CLOSEST(1000, val) - 1;
+ return 0;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type != IIO_ANGL_VEL)
+ return -EINVAL;
+ /*
+ * We support +/-250, +/-500, +/-1000 and +/2000 deg/s
+ * which means we need to round to the closest radians
+ * which will be roughly +/-4.3, +/-8.7, +/-17.5, +/-35
+ * rad/s. The scale is then for the 16 bits used to cover
+ * it 2/(2^16) of that.
+ */
+
+ /* Just too large, set the max range */
+ if (val != 0) {
+ mpu3050->fullscale = FS_2000_DPS;
+ return 0;
+ }
+
+ /*
+ * Now we're dealing with fractions below zero in millirad/s
+ * do some integer interpolation and match with the closest
+ * fullscale in the table.
+ */
+ if (val2 <= fs250 ||
+ val2 < ((fs500 + fs250) / 2))
+ mpu3050->fullscale = FS_250_DPS;
+ else if (val2 <= fs500 ||
+ val2 < ((fs1000 + fs500) / 2))
+ mpu3050->fullscale = FS_500_DPS;
+ else if (val2 <= fs1000 ||
+ val2 < ((fs2000 + fs1000) / 2))
+ mpu3050->fullscale = FS_1000_DPS;
+ else
+ /* Catch-all */
+ mpu3050->fullscale = FS_2000_DPS;
+ return 0;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
+{
+ const struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ int ret;
+ /*
+ * Temperature 1*16 bits
+ * Three axes 3*16 bits
+ * Timestamp 64 bits (4*16 bits)
+ * Sum total 8*16 bits
+ */
+ __be16 hw_values[8];
+ s64 timestamp;
+ unsigned int datums_from_fifo = 0;
+
+ /*
+ * If we're using the hardware trigger, get the precise timestamp from
+ * the top half of the threaded IRQ handler. Otherwise get the
+ * timestamp here so it will be close in time to the actual values
+ * read from the registers.
+ */
+ if (iio_trigger_using_own(indio_dev))
+ timestamp = mpu3050->hw_timestamp;
+ else
+ timestamp = iio_get_time_ns(indio_dev);
+
+ mutex_lock(&mpu3050->lock);
+
+ /* Using the hardware IRQ trigger? Check the buffer then. */
+ if (mpu3050->hw_irq_trigger) {
+ __be16 raw_fifocnt;
+ u16 fifocnt;
+ /* X, Y, Z + temperature */
+ unsigned int bytes_per_datum = 8;
+ bool fifo_overflow = false;
+
+ ret = regmap_bulk_read(mpu3050->map,
+ MPU3050_FIFO_COUNT_H,
+ &raw_fifocnt,
+ sizeof(raw_fifocnt));
+ if (ret)
+ goto out_trigger_unlock;
+ fifocnt = be16_to_cpu(raw_fifocnt);
+
+ if (fifocnt == 512) {
+ dev_info(mpu3050->dev,
+ "FIFO overflow! Emptying and resetting FIFO\n");
+ fifo_overflow = true;
+ /* Reset and enable the FIFO */
+ ret = regmap_update_bits(mpu3050->map,
+ MPU3050_USR_CTRL,
+ MPU3050_USR_CTRL_FIFO_EN |
+ MPU3050_USR_CTRL_FIFO_RST,
+ MPU3050_USR_CTRL_FIFO_EN |
+ MPU3050_USR_CTRL_FIFO_RST);
+ if (ret) {
+ dev_info(mpu3050->dev, "error resetting FIFO\n");
+ goto out_trigger_unlock;
+ }
+ mpu3050->pending_fifo_footer = false;
+ }
+
+ if (fifocnt)
+ dev_dbg(mpu3050->dev,
+ "%d bytes in the FIFO\n",
+ fifocnt);
+
+ while (!fifo_overflow && fifocnt > bytes_per_datum) {
+ unsigned int toread;
+ unsigned int offset;
+ __be16 fifo_values[5];
+
+ /*
+ * If there is a FIFO footer in the pipe, first clear
+ * that out. This follows the complex algorithm in the
+ * datasheet that states that you may never leave the
+ * FIFO empty after the first reading: you have to
+ * always leave two footer bytes in it. The footer is
+ * in practice just two zero bytes.
+ */
+ if (mpu3050->pending_fifo_footer) {
+ toread = bytes_per_datum + 2;
+ offset = 0;
+ } else {
+ toread = bytes_per_datum;
+ offset = 1;
+ /* Put in some dummy value */
+ fifo_values[0] = 0xAAAA;
+ }
+
+ ret = regmap_bulk_read(mpu3050->map,
+ MPU3050_FIFO_R,
+ &fifo_values[offset],
+ toread);
+
+ dev_dbg(mpu3050->dev,
+ "%04x %04x %04x %04x %04x\n",
+ fifo_values[0],
+ fifo_values[1],
+ fifo_values[2],
+ fifo_values[3],
+ fifo_values[4]);
+
+ /* Index past the footer (fifo_values[0]) and push */
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ &fifo_values[1],
+ timestamp);
+
+ fifocnt -= toread;
+ datums_from_fifo++;
+ mpu3050->pending_fifo_footer = true;
+
+ /*
+ * If we're emptying the FIFO, just make sure to
+ * check if something new appeared.
+ */
+ if (fifocnt < bytes_per_datum) {
+ ret = regmap_bulk_read(mpu3050->map,
+ MPU3050_FIFO_COUNT_H,
+ &raw_fifocnt,
+ sizeof(raw_fifocnt));
+ if (ret)
+ goto out_trigger_unlock;
+ fifocnt = be16_to_cpu(raw_fifocnt);
+ }
+
+ if (fifocnt < bytes_per_datum)
+ dev_dbg(mpu3050->dev,
+ "%d bytes left in the FIFO\n",
+ fifocnt);
+
+ /*
+ * At this point, the timestamp that triggered the
+ * hardware interrupt is no longer valid for what
+ * we are reading (the interrupt likely fired for
+ * the value on the top of the FIFO), so set the
+ * timestamp to zero and let userspace deal with it.
+ */
+ timestamp = 0;
+ }
+ }
+
+ /*
+ * If we picked some datums from the FIFO that's enough, else
+ * fall through and just read from the current value registers.
+ * This happens in two cases:
+ *
+ * - We are using some other trigger (external, like an HRTimer)
+ * than the sensor's own sample generator. In this case the
+ * sensor is just set to the max sampling frequency and we give
+ * the trigger a copy of the latest value every time we get here.
+ *
+ * - The hardware trigger is active but unused and we actually use
+ * another trigger which calls here with a frequency higher
+ * than what the device provides data. We will then just read
+ * duplicate values directly from the hardware registers.
+ */
+ if (datums_from_fifo) {
+ dev_dbg(mpu3050->dev,
+ "read %d datums from the FIFO\n",
+ datums_from_fifo);
+ goto out_trigger_unlock;
+ }
+
+ ret = regmap_bulk_read(mpu3050->map, MPU3050_TEMP_H, &hw_values,
+ sizeof(hw_values));
+ if (ret) {
+ dev_err(mpu3050->dev,
+ "error reading axis data\n");
+ goto out_trigger_unlock;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, hw_values, timestamp);
+
+out_trigger_unlock:
+ mutex_unlock(&mpu3050->lock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int mpu3050_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+ pm_runtime_get_sync(mpu3050->dev);
+
+ /* Unless we have OUR trigger active, run at full speed */
+ if (!mpu3050->hw_irq_trigger)
+ return mpu3050_set_8khz_samplerate(mpu3050);
+
+ return 0;
+}
+
+static int mpu3050_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+ pm_runtime_mark_last_busy(mpu3050->dev);
+ pm_runtime_put_autosuspend(mpu3050->dev);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops mpu3050_buffer_setup_ops = {
+ .preenable = mpu3050_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = iio_triggered_buffer_predisable,
+ .postdisable = mpu3050_buffer_postdisable,
+};
+
+static const struct iio_mount_matrix *
+mpu3050_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+ return &mpu3050->orientation;
+}
+
+static const struct iio_chan_spec_ext_info mpu3050_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, mpu3050_get_mount_matrix),
+ { },
+};
+
+#define MPU3050_AXIS_CHANNEL(axis, index) \
+ { \
+ .type = IIO_ANGL_VEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .ext_info = mpu3050_ext_info, \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec mpu3050_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
+ },
+ MPU3050_AXIS_CHANNEL(X, 1),
+ MPU3050_AXIS_CHANNEL(Y, 2),
+ MPU3050_AXIS_CHANNEL(Z, 3),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+/* Four channels apart from timestamp, scan mask = 0x0f */
+static const unsigned long mpu3050_scan_masks[] = { 0xf, 0 };
+
+/*
+ * These are just the hardcoded factors resulting from the more elaborate
+ * calculations done with fractions in the scale raw get/set functions.
+ */
+static IIO_CONST_ATTR(anglevel_scale_available,
+ "0.000122070 "
+ "0.000274658 "
+ "0.000518798 "
+ "0.001068115");
+
+static struct attribute *mpu3050_attributes[] = {
+ &iio_const_attr_anglevel_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group mpu3050_attribute_group = {
+ .attrs = mpu3050_attributes,
+};
+
+static const struct iio_info mpu3050_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = mpu3050_read_raw,
+ .write_raw = mpu3050_write_raw,
+ .attrs = &mpu3050_attribute_group,
+};
+
+/**
+ * mpu3050_read_mem() - read MPU-3050 internal memory
+ * @mpu3050: device to read from
+ * @bank: target bank
+ * @addr: target address
+ * @len: number of bytes
+ * @buf: the buffer to store the read bytes in
+ */
+static int mpu3050_read_mem(struct mpu3050 *mpu3050,
+ u8 bank,
+ u8 addr,
+ u8 len,
+ u8 *buf)
+{
+ int ret;
+
+ ret = regmap_write(mpu3050->map,
+ MPU3050_BANK_SEL,
+ bank);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(mpu3050->map,
+ MPU3050_MEM_START_ADDR,
+ addr);
+ if (ret)
+ return ret;
+
+ return regmap_bulk_read(mpu3050->map,
+ MPU3050_MEM_R_W,
+ buf,
+ len);
+}
+
+static int mpu3050_hw_init(struct mpu3050 *mpu3050)
+{
+ int ret;
+ u8 otp[8];
+
+ /* Reset */
+ ret = regmap_update_bits(mpu3050->map,
+ MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_RESET,
+ MPU3050_PWR_MGM_RESET);
+ if (ret)
+ return ret;
+
+ /* Turn on the PLL */
+ ret = regmap_update_bits(mpu3050->map,
+ MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_CLKSEL_MASK,
+ MPU3050_PWR_MGM_PLL_Z);
+ if (ret)
+ return ret;
+
+ /* Disable IRQs */
+ ret = regmap_write(mpu3050->map,
+ MPU3050_INT_CFG,
+ 0);
+ if (ret)
+ return ret;
+
+ /* Read out the 8 bytes of OTP (one-time-programmable) memory */
+ ret = mpu3050_read_mem(mpu3050,
+ (MPU3050_MEM_PRFTCH |
+ MPU3050_MEM_USER_BANK |
+ MPU3050_MEM_OTP_BANK_0),
+ 0,
+ sizeof(otp),
+ otp);
+ if (ret)
+ return ret;
+
+ /* This is device-unique data so it goes into the entropy pool */
+ add_device_randomness(otp, sizeof(otp));
+
+ dev_info(mpu3050->dev,
+ "die ID: %04X, wafer ID: %02X, A lot ID: %04X, "
+ "W lot ID: %03X, WP ID: %01X, rev ID: %02X\n",
+ /* Die ID, bits 0-12 */
+ (otp[1] << 8 | otp[0]) & 0x1fff,
+ /* Wafer ID, bits 13-17 */
+ ((otp[2] << 8 | otp[1]) & 0x03e0) >> 5,
+ /* A lot ID, bits 18-33 */
+ ((otp[4] << 16 | otp[3] << 8 | otp[2]) & 0x3fffc) >> 2,
+ /* W lot ID, bits 34-45 */
+ ((otp[5] << 8 | otp[4]) & 0x3ffc) >> 2,
+ /* WP ID, bits 47-49 */
+ ((otp[6] << 8 | otp[5]) & 0x0380) >> 7,
+ /* rev ID, bits 50-55 */
+ otp[6] >> 2);
+
+ return 0;
+}
+
+static int mpu3050_power_up(struct mpu3050 *mpu3050)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs);
+ if (ret) {
+ dev_err(mpu3050->dev, "cannot enable regulators\n");
+ return ret;
+ }
+ /*
+ * 20-100 ms start-up time for register read/write according to
+ * the datasheet, be on the safe side and wait 200 ms.
+ */
+ msleep(200);
+
+ /* Take device out of sleep mode */
+ ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_SLEEP, 0);
+ if (ret) {
+ dev_err(mpu3050->dev, "error setting power mode\n");
+ return ret;
+ }
+ msleep(10);
+
+ return 0;
+}
+
+static int mpu3050_power_down(struct mpu3050 *mpu3050)
+{
+ int ret;
+
+ /*
+ * Put MPU-3050 into sleep mode before cutting regulators.
+ * This is important, because we may not be the sole user
+ * of the regulator so the power may stay on after this, and
+ * then we would be wasting power unless we go to sleep mode
+ * first.
+ */
+ ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_SLEEP, MPU3050_PWR_MGM_SLEEP);
+ if (ret)
+ dev_err(mpu3050->dev, "error putting to sleep\n");
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs);
+ if (ret)
+ dev_err(mpu3050->dev, "error disabling regulators\n");
+
+ return 0;
+}
+
+static irqreturn_t mpu3050_irq_handler(int irq, void *p)
+{
+ struct iio_trigger *trig = p;
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+ if (!mpu3050->hw_irq_trigger)
+ return IRQ_NONE;
+
+ /* Get the time stamp as close in time as possible */
+ mpu3050->hw_timestamp = iio_get_time_ns(indio_dev);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t mpu3050_irq_thread(int irq, void *p)
+{
+ struct iio_trigger *trig = p;
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ unsigned int val;
+ int ret;
+
+ /* ACK IRQ and check if it was from us */
+ ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val);
+ if (ret) {
+ dev_err(mpu3050->dev, "error reading IRQ status\n");
+ return IRQ_HANDLED;
+ }
+ if (!(val & MPU3050_INT_STATUS_RAW_RDY))
+ return IRQ_NONE;
+
+ iio_trigger_poll_chained(p);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * mpu3050_drdy_trigger_set_state() - set data ready interrupt state
+ * @trig: trigger instance
+ * @enable: true if trigger should be enabled, false to disable
+ */
+static int mpu3050_drdy_trigger_set_state(struct iio_trigger *trig,
+ bool enable)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ unsigned int val;
+ int ret;
+
+ /* Disabling trigger: disable interrupt and return */
+ if (!enable) {
+ /* Disable all interrupts */
+ ret = regmap_write(mpu3050->map,
+ MPU3050_INT_CFG,
+ 0);
+ if (ret)
+ dev_err(mpu3050->dev, "error disabling IRQ\n");
+
+ /* Clear IRQ flag */
+ ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val);
+ if (ret)
+ dev_err(mpu3050->dev, "error clearing IRQ status\n");
+
+ /* Disable all things in the FIFO and reset it */
+ ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN, 0);
+ if (ret)
+ dev_err(mpu3050->dev, "error disabling FIFO\n");
+
+ ret = regmap_write(mpu3050->map, MPU3050_USR_CTRL,
+ MPU3050_USR_CTRL_FIFO_RST);
+ if (ret)
+ dev_err(mpu3050->dev, "error resetting FIFO\n");
+
+ pm_runtime_mark_last_busy(mpu3050->dev);
+ pm_runtime_put_autosuspend(mpu3050->dev);
+ mpu3050->hw_irq_trigger = false;
+
+ return 0;
+ } else {
+ /* Else we're enabling the trigger from this point */
+ pm_runtime_get_sync(mpu3050->dev);
+ mpu3050->hw_irq_trigger = true;
+
+ /* Disable all things in the FIFO */
+ ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN, 0);
+ if (ret)
+ return ret;
+
+ /* Reset and enable the FIFO */
+ ret = regmap_update_bits(mpu3050->map, MPU3050_USR_CTRL,
+ MPU3050_USR_CTRL_FIFO_EN |
+ MPU3050_USR_CTRL_FIFO_RST,
+ MPU3050_USR_CTRL_FIFO_EN |
+ MPU3050_USR_CTRL_FIFO_RST);
+ if (ret)
+ return ret;
+
+ mpu3050->pending_fifo_footer = false;
+
+ /* Turn on the FIFO for temp+X+Y+Z */
+ ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN,
+ MPU3050_FIFO_EN_TEMP_OUT |
+ MPU3050_FIFO_EN_GYRO_XOUT |
+ MPU3050_FIFO_EN_GYRO_YOUT |
+ MPU3050_FIFO_EN_GYRO_ZOUT |
+ MPU3050_FIFO_EN_FOOTER);
+ if (ret)
+ return ret;
+
+ /* Configure the sample engine */
+ ret = mpu3050_start_sampling(mpu3050);
+ if (ret)
+ return ret;
+
+ /* Clear IRQ flag */
+ ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val);
+ if (ret)
+ dev_err(mpu3050->dev, "error clearing IRQ status\n");
+
+ /* Give us interrupts whenever there is new data ready */
+ val = MPU3050_INT_RAW_RDY_EN;
+
+ if (mpu3050->irq_actl)
+ val |= MPU3050_INT_ACTL;
+ if (mpu3050->irq_latch)
+ val |= MPU3050_INT_LATCH_EN;
+ if (mpu3050->irq_opendrain)
+ val |= MPU3050_INT_OPEN;
+
+ ret = regmap_write(mpu3050->map, MPU3050_INT_CFG, val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct iio_trigger_ops mpu3050_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = mpu3050_drdy_trigger_set_state,
+};
+
+static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq)
+{
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ unsigned long irq_trig;
+ int ret;
+
+ mpu3050->trig = devm_iio_trigger_alloc(&indio_dev->dev,
+ "%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!mpu3050->trig)
+ return -ENOMEM;
+
+ /* Check if IRQ is open drain */
+ if (of_property_read_bool(mpu3050->dev->of_node, "drive-open-drain"))
+ mpu3050->irq_opendrain = true;
+
+ irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ /*
+ * Configure the interrupt generator hardware to supply whatever
+ * the interrupt is configured for, edges low/high level low/high,
+ * we can provide it all.
+ */
+ switch (irq_trig) {
+ case IRQF_TRIGGER_RISING:
+ dev_info(&indio_dev->dev,
+ "pulse interrupts on the rising edge\n");
+ if (mpu3050->irq_opendrain) {
+ dev_info(&indio_dev->dev,
+ "rising edge incompatible with open drain\n");
+ mpu3050->irq_opendrain = false;
+ }
+ break;
+ case IRQF_TRIGGER_FALLING:
+ mpu3050->irq_actl = true;
+ dev_info(&indio_dev->dev,
+ "pulse interrupts on the falling edge\n");
+ break;
+ case IRQF_TRIGGER_HIGH:
+ mpu3050->irq_latch = true;
+ dev_info(&indio_dev->dev,
+ "interrupts active high level\n");
+ if (mpu3050->irq_opendrain) {
+ dev_info(&indio_dev->dev,
+ "active high incompatible with open drain\n");
+ mpu3050->irq_opendrain = false;
+ }
+ /*
+ * With level IRQs, we mask the IRQ until it is processed,
+ * but with edge IRQs (pulses) we can queue several interrupts
+ * in the top half.
+ */
+ irq_trig |= IRQF_ONESHOT;
+ break;
+ case IRQF_TRIGGER_LOW:
+ mpu3050->irq_latch = true;
+ mpu3050->irq_actl = true;
+ irq_trig |= IRQF_ONESHOT;
+ dev_info(&indio_dev->dev,
+ "interrupts active low level\n");
+ break;
+ default:
+ /* This is the most preferred mode, if possible */
+ dev_err(&indio_dev->dev,
+ "unsupported IRQ trigger specified (%lx), enforce "
+ "rising edge\n", irq_trig);
+ irq_trig = IRQF_TRIGGER_RISING;
+ break;
+ }
+
+ /* An open drain line can be shared with several devices */
+ if (mpu3050->irq_opendrain)
+ irq_trig |= IRQF_SHARED;
+
+ ret = request_threaded_irq(irq,
+ mpu3050_irq_handler,
+ mpu3050_irq_thread,
+ irq_trig,
+ mpu3050->trig->name,
+ mpu3050->trig);
+ if (ret) {
+ dev_err(mpu3050->dev,
+ "can't get IRQ %d, error %d\n", irq, ret);
+ return ret;
+ }
+
+ mpu3050->irq = irq;
+ mpu3050->trig->dev.parent = mpu3050->dev;
+ mpu3050->trig->ops = &mpu3050_trigger_ops;
+ iio_trigger_set_drvdata(mpu3050->trig, indio_dev);
+
+ ret = iio_trigger_register(mpu3050->trig);
+ if (ret)
+ return ret;
+
+ indio_dev->trig = iio_trigger_get(mpu3050->trig);
+
+ return 0;
+}
+
+int mpu3050_common_probe(struct device *dev,
+ struct regmap *map,
+ int irq,
+ const char *name)
+{
+ struct iio_dev *indio_dev;
+ struct mpu3050 *mpu3050;
+ unsigned int val;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*mpu3050));
+ if (!indio_dev)
+ return -ENOMEM;
+ mpu3050 = iio_priv(indio_dev);
+
+ mpu3050->dev = dev;
+ mpu3050->map = map;
+ mutex_init(&mpu3050->lock);
+ /* Default fullscale: 2000 degrees per second */
+ mpu3050->fullscale = FS_2000_DPS;
+ /* 1 kHz, divide by 100, default frequency = 10 Hz */
+ mpu3050->lpf = MPU3050_DLPF_CFG_188HZ;
+ mpu3050->divisor = 99;
+
+ /* Read the mounting matrix, if present */
+ ret = of_iio_read_mount_matrix(dev, "mount-matrix",
+ &mpu3050->orientation);
+ if (ret)
+ return ret;
+
+ /* Fetch and turn on regulators */
+ mpu3050->regs[0].supply = mpu3050_reg_vdd;
+ mpu3050->regs[1].supply = mpu3050_reg_vlogic;
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(mpu3050->regs),
+ mpu3050->regs);
+ if (ret) {
+ dev_err(dev, "Cannot get regulators\n");
+ return ret;
+ }
+
+ ret = mpu3050_power_up(mpu3050);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, MPU3050_CHIP_ID_REG, &val);
+ if (ret) {
+ dev_err(dev, "could not read device ID\n");
+ ret = -ENODEV;
+
+ goto err_power_down;
+ }
+
+ if (val != MPU3050_CHIP_ID) {
+ dev_err(dev, "unsupported chip id %02x\n", (u8)val);
+ ret = -ENODEV;
+ goto err_power_down;
+ }
+
+ ret = regmap_read(map, MPU3050_PRODUCT_ID_REG, &val);
+ if (ret) {
+ dev_err(dev, "could not read device ID\n");
+ ret = -ENODEV;
+
+ goto err_power_down;
+ }
+ dev_info(dev, "found MPU-3050 part no: %d, version: %d\n",
+ ((val >> 4) & 0xf), (val & 0xf));
+
+ ret = mpu3050_hw_init(mpu3050);
+ if (ret)
+ goto err_power_down;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->channels = mpu3050_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mpu3050_channels);
+ indio_dev->info = &mpu3050_info;
+ indio_dev->available_scan_masks = mpu3050_scan_masks;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = name;
+
+ ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
+ mpu3050_trigger_handler,
+ &mpu3050_buffer_setup_ops);
+ if (ret) {
+ dev_err(dev, "triggered buffer setup failed\n");
+ goto err_power_down;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "device register failed\n");
+ goto err_cleanup_buffer;
+ }
+
+ dev_set_drvdata(dev, indio_dev);
+
+ /* Check if we have an assigned IRQ to use as trigger */
+ if (irq) {
+ ret = mpu3050_trigger_probe(indio_dev, irq);
+ if (ret)
+ dev_err(dev, "failed to register trigger\n");
+ }
+
+ /* Enable runtime PM */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Set autosuspend to two orders of magnitude larger than the
+ * start-up time. 100ms start-up time means 10000ms autosuspend,
+ * i.e. 10 seconds.
+ */
+ pm_runtime_set_autosuspend_delay(dev, 10000);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put(dev);
+
+ return 0;
+
+err_cleanup_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+err_power_down:
+ mpu3050_power_down(mpu3050);
+
+ return ret;
+}
+EXPORT_SYMBOL(mpu3050_common_probe);
+
+int mpu3050_common_remove(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+ pm_runtime_get_sync(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ if (mpu3050->irq)
+ free_irq(mpu3050->irq, mpu3050);
+ iio_device_unregister(indio_dev);
+ mpu3050_power_down(mpu3050);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpu3050_common_remove);
+
+#ifdef CONFIG_PM
+static int mpu3050_runtime_suspend(struct device *dev)
+{
+ return mpu3050_power_down(iio_priv(dev_get_drvdata(dev)));
+}
+
+static int mpu3050_runtime_resume(struct device *dev)
+{
+ return mpu3050_power_up(iio_priv(dev_get_drvdata(dev)));
+}
+#endif /* CONFIG_PM */
+
+const struct dev_pm_ops mpu3050_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(mpu3050_runtime_suspend,
+ mpu3050_runtime_resume, NULL)
+};
+EXPORT_SYMBOL(mpu3050_dev_pm_ops);
+
+MODULE_AUTHOR("Linus Walleij");
+MODULE_DESCRIPTION("MPU3050 gyroscope driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
new file mode 100644
index 000000000000..06007200bf49
--- /dev/null
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -0,0 +1,124 @@
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+
+#include "mpu3050.h"
+
+static const struct regmap_config mpu3050_i2c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int mpu3050_i2c_bypass_select(struct i2c_mux_core *mux, u32 chan_id)
+{
+ struct mpu3050 *mpu3050 = i2c_mux_priv(mux);
+
+ /* Just power up the device, that is all that is needed */
+ pm_runtime_get_sync(mpu3050->dev);
+ return 0;
+}
+
+static int mpu3050_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id)
+{
+ struct mpu3050 *mpu3050 = i2c_mux_priv(mux);
+
+ pm_runtime_mark_last_busy(mpu3050->dev);
+ pm_runtime_put_autosuspend(mpu3050->dev);
+ return 0;
+}
+
+static int mpu3050_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+ const char *name;
+ struct mpu3050 *mpu3050;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -EOPNOTSUPP;
+
+ if (id)
+ name = id->name;
+ else
+ return -ENODEV;
+
+ regmap = devm_regmap_init_i2c(client, &mpu3050_i2c_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ ret = mpu3050_common_probe(&client->dev, regmap, client->irq, name);
+ if (ret)
+ return ret;
+
+ /* The main driver is up, now register the I2C mux */
+ mpu3050 = iio_priv(dev_get_drvdata(&client->dev));
+ mpu3050->i2cmux = i2c_mux_alloc(client->adapter, &client->dev,
+ 1, 0, I2C_MUX_LOCKED | I2C_MUX_GATE,
+ mpu3050_i2c_bypass_select,
+ mpu3050_i2c_bypass_deselect);
+ /* Just fail the mux, there is no point in killing the driver */
+ if (!mpu3050->i2cmux)
+ dev_err(&client->dev, "failed to allocate I2C mux\n");
+ else {
+ mpu3050->i2cmux->priv = mpu3050;
+ ret = i2c_mux_add_adapter(mpu3050->i2cmux, 0, 0, 0);
+ if (ret)
+ dev_err(&client->dev, "failed to add I2C mux\n");
+ }
+
+ return 0;
+}
+
+static int mpu3050_i2c_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(&client->dev);
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+ if (mpu3050->i2cmux)
+ i2c_mux_del_adapters(mpu3050->i2cmux);
+
+ return mpu3050_common_remove(&client->dev);
+}
+
+/*
+ * device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct i2c_device_id mpu3050_i2c_id[] = {
+ { "mpu3050" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mpu3050_i2c_id);
+
+static const struct of_device_id mpu3050_i2c_of_match[] = {
+ { .compatible = "invensense,mpu3050", .data = "mpu3050" },
+ /* Deprecated vendor ID from the Input driver */
+ { .compatible = "invn,mpu3050", .data = "mpu3050" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mpu3050_i2c_of_match);
+
+static struct i2c_driver mpu3050_i2c_driver = {
+ .probe = mpu3050_i2c_probe,
+ .remove = mpu3050_i2c_remove,
+ .id_table = mpu3050_i2c_id,
+ .driver = {
+ .of_match_table = mpu3050_i2c_of_match,
+ .name = "mpu3050-i2c",
+ .pm = &mpu3050_dev_pm_ops,
+ },
+};
+module_i2c_driver(mpu3050_i2c_driver);
+
+MODULE_AUTHOR("Linus Walleij");
+MODULE_DESCRIPTION("Invensense MPU3050 gyroscope driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/gyro/mpu3050.h b/drivers/iio/gyro/mpu3050.h
new file mode 100644
index 000000000000..bef87a714dc5
--- /dev/null
+++ b/drivers/iio/gyro/mpu3050.h
@@ -0,0 +1,96 @@
+#include <linux/iio/iio.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+
+/**
+ * enum mpu3050_fullscale - indicates the full range of the sensor in deg/sec
+ */
+enum mpu3050_fullscale {
+ FS_250_DPS = 0,
+ FS_500_DPS,
+ FS_1000_DPS,
+ FS_2000_DPS,
+};
+
+/**
+ * enum mpu3050_lpf - indicates the low pass filter width
+ */
+enum mpu3050_lpf {
+ /* This implicity sets sample frequency to 8 kHz */
+ LPF_256_HZ_NOLPF = 0,
+ /* All others sets the sample frequency to 1 kHz */
+ LPF_188_HZ,
+ LPF_98_HZ,
+ LPF_42_HZ,
+ LPF_20_HZ,
+ LPF_10_HZ,
+ LPF_5_HZ,
+ LPF_2100_HZ_NOLPF,
+};
+
+enum mpu3050_axis {
+ AXIS_X = 0,
+ AXIS_Y,
+ AXIS_Z,
+ AXIS_MAX,
+};
+
+/**
+ * struct mpu3050 - instance state container for the device
+ * @dev: parent device for this instance
+ * @orientation: mounting matrix, flipped axis etc
+ * @map: regmap to reach the registers
+ * @lock: serialization lock to marshal all requests
+ * @irq: the IRQ used for this device
+ * @regs: the regulators to power this device
+ * @fullscale: the current fullscale setting for the device
+ * @lpf: digital low pass filter setting for the device
+ * @divisor: base frequency divider: divides 8 or 1 kHz
+ * @calibration: the three signed 16-bit calibration settings that
+ * get written into the offset registers for each axis to compensate
+ * for DC offsets
+ * @trig: trigger for the MPU-3050 interrupt, if present
+ * @hw_irq_trigger: hardware interrupt trigger is in use
+ * @irq_actl: interrupt is active low
+ * @irq_latch: latched IRQ, this means that it is a level IRQ
+ * @irq_opendrain: the interrupt line shall be configured open drain
+ * @pending_fifo_footer: tells us if there is a pending footer in the FIFO
+ * that we have to read out first when handling the FIFO
+ * @hw_timestamp: latest hardware timestamp from the trigger IRQ, when in
+ * use
+ * @i2cmux: an I2C mux reflecting the fact that this sensor is a hub with
+ * a pass-through I2C interface coming out of it: this device needs to be
+ * powered up in order to reach devices on the other side of this mux
+ */
+struct mpu3050 {
+ struct device *dev;
+ struct iio_mount_matrix orientation;
+ struct regmap *map;
+ struct mutex lock;
+ int irq;
+ struct regulator_bulk_data regs[2];
+ enum mpu3050_fullscale fullscale;
+ enum mpu3050_lpf lpf;
+ u8 divisor;
+ s16 calibration[3];
+ struct iio_trigger *trig;
+ bool hw_irq_trigger;
+ bool irq_actl;
+ bool irq_latch;
+ bool irq_opendrain;
+ bool pending_fifo_footer;
+ s64 hw_timestamp;
+ struct i2c_mux_core *i2cmux;
+};
+
+/* Probe called from different transports */
+int mpu3050_common_probe(struct device *dev,
+ struct regmap *map,
+ int irq,
+ const char *name);
+int mpu3050_common_remove(struct device *dev);
+
+/* PM ops */
+extern const struct dev_pm_ops mpu3050_dev_pm_ops;
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index aea034d8fe0f..2a42b3d583e8 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -39,79 +39,6 @@
#define ST_GYRO_FS_AVL_500DPS 500
#define ST_GYRO_FS_AVL_2000DPS 2000
-/* CUSTOM VALUES FOR SENSOR 1 */
-#define ST_GYRO_1_WAI_EXP 0xd3
-#define ST_GYRO_1_ODR_ADDR 0x20
-#define ST_GYRO_1_ODR_MASK 0xc0
-#define ST_GYRO_1_ODR_AVL_100HZ_VAL 0x00
-#define ST_GYRO_1_ODR_AVL_200HZ_VAL 0x01
-#define ST_GYRO_1_ODR_AVL_400HZ_VAL 0x02
-#define ST_GYRO_1_ODR_AVL_800HZ_VAL 0x03
-#define ST_GYRO_1_PW_ADDR 0x20
-#define ST_GYRO_1_PW_MASK 0x08
-#define ST_GYRO_1_FS_ADDR 0x23
-#define ST_GYRO_1_FS_MASK 0x30
-#define ST_GYRO_1_FS_AVL_250_VAL 0x00
-#define ST_GYRO_1_FS_AVL_500_VAL 0x01
-#define ST_GYRO_1_FS_AVL_2000_VAL 0x02
-#define ST_GYRO_1_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
-#define ST_GYRO_1_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
-#define ST_GYRO_1_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
-#define ST_GYRO_1_BDU_ADDR 0x23
-#define ST_GYRO_1_BDU_MASK 0x80
-#define ST_GYRO_1_DRDY_IRQ_ADDR 0x22
-#define ST_GYRO_1_DRDY_IRQ_INT2_MASK 0x08
-#define ST_GYRO_1_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 2 */
-#define ST_GYRO_2_WAI_EXP 0xd4
-#define ST_GYRO_2_ODR_ADDR 0x20
-#define ST_GYRO_2_ODR_MASK 0xc0
-#define ST_GYRO_2_ODR_AVL_95HZ_VAL 0x00
-#define ST_GYRO_2_ODR_AVL_190HZ_VAL 0x01
-#define ST_GYRO_2_ODR_AVL_380HZ_VAL 0x02
-#define ST_GYRO_2_ODR_AVL_760HZ_VAL 0x03
-#define ST_GYRO_2_PW_ADDR 0x20
-#define ST_GYRO_2_PW_MASK 0x08
-#define ST_GYRO_2_FS_ADDR 0x23
-#define ST_GYRO_2_FS_MASK 0x30
-#define ST_GYRO_2_FS_AVL_250_VAL 0x00
-#define ST_GYRO_2_FS_AVL_500_VAL 0x01
-#define ST_GYRO_2_FS_AVL_2000_VAL 0x02
-#define ST_GYRO_2_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
-#define ST_GYRO_2_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
-#define ST_GYRO_2_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
-#define ST_GYRO_2_BDU_ADDR 0x23
-#define ST_GYRO_2_BDU_MASK 0x80
-#define ST_GYRO_2_DRDY_IRQ_ADDR 0x22
-#define ST_GYRO_2_DRDY_IRQ_INT2_MASK 0x08
-#define ST_GYRO_2_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 3 */
-#define ST_GYRO_3_WAI_EXP 0xd7
-#define ST_GYRO_3_ODR_ADDR 0x20
-#define ST_GYRO_3_ODR_MASK 0xc0
-#define ST_GYRO_3_ODR_AVL_95HZ_VAL 0x00
-#define ST_GYRO_3_ODR_AVL_190HZ_VAL 0x01
-#define ST_GYRO_3_ODR_AVL_380HZ_VAL 0x02
-#define ST_GYRO_3_ODR_AVL_760HZ_VAL 0x03
-#define ST_GYRO_3_PW_ADDR 0x20
-#define ST_GYRO_3_PW_MASK 0x08
-#define ST_GYRO_3_FS_ADDR 0x23
-#define ST_GYRO_3_FS_MASK 0x30
-#define ST_GYRO_3_FS_AVL_250_VAL 0x00
-#define ST_GYRO_3_FS_AVL_500_VAL 0x01
-#define ST_GYRO_3_FS_AVL_2000_VAL 0x02
-#define ST_GYRO_3_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
-#define ST_GYRO_3_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
-#define ST_GYRO_3_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
-#define ST_GYRO_3_BDU_ADDR 0x23
-#define ST_GYRO_3_BDU_MASK 0x80
-#define ST_GYRO_3_DRDY_IRQ_ADDR 0x22
-#define ST_GYRO_3_DRDY_IRQ_INT2_MASK 0x08
-#define ST_GYRO_3_MULTIREAD_BIT true
-
-
static const struct iio_chan_spec st_gyro_16bit_channels[] = {
ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -130,7 +57,7 @@ static const struct iio_chan_spec st_gyro_16bit_channels[] = {
static const struct st_sensor_settings st_gyro_sensors_settings[] = {
{
- .wai = ST_GYRO_1_WAI_EXP,
+ .wai = 0xd3,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = L3G4200D_GYRO_DEV_NAME,
@@ -138,18 +65,18 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
- .addr = ST_GYRO_1_ODR_ADDR,
- .mask = ST_GYRO_1_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xc0,
.odr_avl = {
- { 100, ST_GYRO_1_ODR_AVL_100HZ_VAL, },
- { 200, ST_GYRO_1_ODR_AVL_200HZ_VAL, },
- { 400, ST_GYRO_1_ODR_AVL_400HZ_VAL, },
- { 800, ST_GYRO_1_ODR_AVL_800HZ_VAL, },
+ { .hz = 100, .value = 0x00, },
+ { .hz = 200, .value = 0x01, },
+ { .hz = 400, .value = 0x02, },
+ { .hz = 800, .value = 0x03, },
},
},
.pw = {
- .addr = ST_GYRO_1_PW_ADDR,
- .mask = ST_GYRO_1_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x08,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -158,33 +85,33 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_GYRO_1_FS_ADDR,
- .mask = ST_GYRO_1_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_GYRO_FS_AVL_250DPS,
- .value = ST_GYRO_1_FS_AVL_250_VAL,
- .gain = ST_GYRO_1_FS_AVL_250_GAIN,
+ .value = 0x00,
+ .gain = IIO_DEGREE_TO_RAD(8750),
},
[1] = {
.num = ST_GYRO_FS_AVL_500DPS,
- .value = ST_GYRO_1_FS_AVL_500_VAL,
- .gain = ST_GYRO_1_FS_AVL_500_GAIN,
+ .value = 0x01,
+ .gain = IIO_DEGREE_TO_RAD(17500),
},
[2] = {
.num = ST_GYRO_FS_AVL_2000DPS,
- .value = ST_GYRO_1_FS_AVL_2000_VAL,
- .gain = ST_GYRO_1_FS_AVL_2000_GAIN,
+ .value = 0x02,
+ .gain = IIO_DEGREE_TO_RAD(70000),
},
},
},
.bdu = {
- .addr = ST_GYRO_1_BDU_ADDR,
- .mask = ST_GYRO_1_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_GYRO_1_DRDY_IRQ_ADDR,
- .mask_int2 = ST_GYRO_1_DRDY_IRQ_INT2_MASK,
+ .addr = 0x22,
+ .mask_int2 = 0x08,
/*
* The sensor has IHL (active low) and open
* drain settings, but only for INT1 and not
@@ -192,11 +119,11 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
*/
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_GYRO_1_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_GYRO_2_WAI_EXP,
+ .wai = 0xd4,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = L3GD20_GYRO_DEV_NAME,
@@ -208,18 +135,18 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
- .addr = ST_GYRO_2_ODR_ADDR,
- .mask = ST_GYRO_2_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xc0,
.odr_avl = {
- { 95, ST_GYRO_2_ODR_AVL_95HZ_VAL, },
- { 190, ST_GYRO_2_ODR_AVL_190HZ_VAL, },
- { 380, ST_GYRO_2_ODR_AVL_380HZ_VAL, },
- { 760, ST_GYRO_2_ODR_AVL_760HZ_VAL, },
+ { .hz = 95, .value = 0x00, },
+ { .hz = 190, .value = 0x01, },
+ { .hz = 380, .value = 0x02, },
+ { .hz = 760, .value = 0x03, },
},
},
.pw = {
- .addr = ST_GYRO_2_PW_ADDR,
- .mask = ST_GYRO_2_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x08,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -228,33 +155,33 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_GYRO_2_FS_ADDR,
- .mask = ST_GYRO_2_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_GYRO_FS_AVL_250DPS,
- .value = ST_GYRO_2_FS_AVL_250_VAL,
- .gain = ST_GYRO_2_FS_AVL_250_GAIN,
+ .value = 0x00,
+ .gain = IIO_DEGREE_TO_RAD(8750),
},
[1] = {
.num = ST_GYRO_FS_AVL_500DPS,
- .value = ST_GYRO_2_FS_AVL_500_VAL,
- .gain = ST_GYRO_2_FS_AVL_500_GAIN,
+ .value = 0x01,
+ .gain = IIO_DEGREE_TO_RAD(17500),
},
[2] = {
.num = ST_GYRO_FS_AVL_2000DPS,
- .value = ST_GYRO_2_FS_AVL_2000_VAL,
- .gain = ST_GYRO_2_FS_AVL_2000_GAIN,
+ .value = 0x02,
+ .gain = IIO_DEGREE_TO_RAD(70000),
},
},
},
.bdu = {
- .addr = ST_GYRO_2_BDU_ADDR,
- .mask = ST_GYRO_2_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_GYRO_2_DRDY_IRQ_ADDR,
- .mask_int2 = ST_GYRO_2_DRDY_IRQ_INT2_MASK,
+ .addr = 0x22,
+ .mask_int2 = 0x08,
/*
* The sensor has IHL (active low) and open
* drain settings, but only for INT1 and not
@@ -262,29 +189,29 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
*/
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_GYRO_2_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_GYRO_3_WAI_EXP,
+ .wai = 0xd7,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = L3GD20_GYRO_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
- .addr = ST_GYRO_3_ODR_ADDR,
- .mask = ST_GYRO_3_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xc0,
.odr_avl = {
- { 95, ST_GYRO_3_ODR_AVL_95HZ_VAL, },
- { 190, ST_GYRO_3_ODR_AVL_190HZ_VAL, },
- { 380, ST_GYRO_3_ODR_AVL_380HZ_VAL, },
- { 760, ST_GYRO_3_ODR_AVL_760HZ_VAL, },
+ { .hz = 95, .value = 0x00, },
+ { .hz = 190, .value = 0x01, },
+ { .hz = 380, .value = 0x02, },
+ { .hz = 760, .value = 0x03, },
},
},
.pw = {
- .addr = ST_GYRO_3_PW_ADDR,
- .mask = ST_GYRO_3_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x08,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -293,33 +220,33 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_GYRO_3_FS_ADDR,
- .mask = ST_GYRO_3_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_GYRO_FS_AVL_250DPS,
- .value = ST_GYRO_3_FS_AVL_250_VAL,
- .gain = ST_GYRO_3_FS_AVL_250_GAIN,
+ .value = 0x00,
+ .gain = IIO_DEGREE_TO_RAD(8750),
},
[1] = {
.num = ST_GYRO_FS_AVL_500DPS,
- .value = ST_GYRO_3_FS_AVL_500_VAL,
- .gain = ST_GYRO_3_FS_AVL_500_GAIN,
+ .value = 0x01,
+ .gain = IIO_DEGREE_TO_RAD(17500),
},
[2] = {
.num = ST_GYRO_FS_AVL_2000DPS,
- .value = ST_GYRO_3_FS_AVL_2000_VAL,
- .gain = ST_GYRO_3_FS_AVL_2000_GAIN,
+ .value = 0x02,
+ .gain = IIO_DEGREE_TO_RAD(70000),
},
},
},
.bdu = {
- .addr = ST_GYRO_3_BDU_ADDR,
- .mask = ST_GYRO_3_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_GYRO_3_DRDY_IRQ_ADDR,
- .mask_int2 = ST_GYRO_3_DRDY_IRQ_INT2_MASK,
+ .addr = 0x22,
+ .mask_int2 = 0x08,
/*
* The sensor has IHL (active low) and open
* drain settings, but only for INT1 and not
@@ -327,7 +254,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
*/
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_GYRO_3_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
};
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index b17e2e2bd4f5..912477d54be2 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -27,6 +27,8 @@ config DHT11
config HDC100X
tristate "TI HDC100x relative humidity and temperature sensor"
depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for the Texas Instruments
HDC1000 and HDC1008 relative humidity and temperature sensors.
@@ -34,6 +36,28 @@ config HDC100X
To compile this driver as a module, choose M here: the module
will be called hdc100x.
+config HTS221
+ tristate "STMicroelectronics HTS221 sensor Driver"
+ depends on (I2C || SPI)
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HTS221_I2C if (I2C)
+ select HTS221_SPI if (SPI_MASTER)
+ help
+ Say yes here to build support for STMicroelectronics HTS221
+ temperature-humidity sensor
+
+ To compile this driver as a module, choose M here: the module
+ will be called hts221.
+
+config HTS221_I2C
+ tristate
+ depends on HTS221
+
+config HTS221_SPI
+ tristate
+ depends on HTS221
+
config HTU21
tristate "Measurement Specialties HTU21 humidity & temperature sensor"
depends on I2C
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index 4a73442fcd9c..a6850e47c100 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -5,6 +5,13 @@
obj-$(CONFIG_AM2315) += am2315.o
obj-$(CONFIG_DHT11) += dht11.o
obj-$(CONFIG_HDC100X) += hdc100x.o
+
+hts221-y := hts221_core.o \
+ hts221_buffer.o
+obj-$(CONFIG_HTS221) += hts221.o
+obj-$(CONFIG_HTS221_I2C) += hts221_i2c.o
+obj-$(CONFIG_HTS221_SPI) += hts221_spi.o
+
obj-$(CONFIG_HTU21) += htu21.o
obj-$(CONFIG_SI7005) += si7005.o
obj-$(CONFIG_SI7020) += si7020.o
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index e0c9c70c2a4a..265c34da52d1 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -22,11 +22,15 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#define HDC100X_REG_TEMP 0x00
#define HDC100X_REG_HUMIDITY 0x01
#define HDC100X_REG_CONFIG 0x02
+#define HDC100X_REG_CONFIG_ACQ_MODE BIT(12)
#define HDC100X_REG_CONFIG_HEATER_EN BIT(13)
struct hdc100x_data {
@@ -87,22 +91,40 @@ static const struct iio_chan_spec hdc100x_channels[] = {
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_INT_TIME) |
BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
},
{
.type = IIO_HUMIDITYRELATIVE,
.address = HDC100X_REG_HUMIDITY,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_INT_TIME)
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
},
{
.type = IIO_CURRENT,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.extend_name = "heater",
.output = 1,
+ .scan_index = -1,
},
+ IIO_CHAN_SOFT_TIMESTAMP(2),
};
+static const unsigned long hdc100x_scan_masks[] = {0x3, 0};
+
static int hdc100x_update_config(struct hdc100x_data *data, int mask, int val)
{
int tmp = (~mask & data->config) | val;
@@ -183,7 +205,14 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
*val = hdc100x_get_heater_status(data);
ret = IIO_VAL_INT;
} else {
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+
ret = hdc100x_get_measurement(data, chan);
+ iio_device_release_direct_mode(indio_dev);
if (ret >= 0) {
*val = ret;
ret = IIO_VAL_INT;
@@ -246,6 +275,78 @@ static int hdc100x_write_raw(struct iio_dev *indio_dev,
}
}
+static int hdc100x_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct hdc100x_data *data = iio_priv(indio_dev);
+ int ret;
+
+ /* Buffer is enabled. First set ACQ Mode, then attach poll func */
+ mutex_lock(&data->lock);
+ ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE,
+ HDC100X_REG_CONFIG_ACQ_MODE);
+ mutex_unlock(&data->lock);
+ if (ret)
+ return ret;
+
+ return iio_triggered_buffer_postenable(indio_dev);
+}
+
+static int hdc100x_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct hdc100x_data *data = iio_priv(indio_dev);
+ int ret;
+
+ /* First detach poll func, then reset ACQ mode. OK to disable buffer */
+ ret = iio_triggered_buffer_predisable(indio_dev);
+ if (ret)
+ return ret;
+
+ mutex_lock(&data->lock);
+ ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static const struct iio_buffer_setup_ops hdc_buffer_setup_ops = {
+ .postenable = hdc100x_buffer_postenable,
+ .predisable = hdc100x_buffer_predisable,
+};
+
+static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct hdc100x_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ int delay = data->adc_int_us[0] + data->adc_int_us[1];
+ int ret;
+ s16 buf[8]; /* 2x s16 + padding + 8 byte timestamp */
+
+ /* dual read starts at temp register */
+ mutex_lock(&data->lock);
+ ret = i2c_smbus_write_byte(client, HDC100X_REG_TEMP);
+ if (ret < 0) {
+ dev_err(&client->dev, "cannot start measurement\n");
+ goto err;
+ }
+ usleep_range(delay, delay + 1000);
+
+ ret = i2c_master_recv(client, (u8 *)buf, 4);
+ if (ret < 0) {
+ dev_err(&client->dev, "cannot read sensor data\n");
+ goto err;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
+err:
+ mutex_unlock(&data->lock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static const struct iio_info hdc100x_info = {
.read_raw = hdc100x_read_raw,
.write_raw = hdc100x_write_raw,
@@ -258,6 +359,7 @@ static int hdc100x_probe(struct i2c_client *client,
{
struct iio_dev *indio_dev;
struct hdc100x_data *data;
+ int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
@@ -279,12 +381,35 @@ static int hdc100x_probe(struct i2c_client *client,
indio_dev->channels = hdc100x_channels;
indio_dev->num_channels = ARRAY_SIZE(hdc100x_channels);
+ indio_dev->available_scan_masks = hdc100x_scan_masks;
/* be sure we are in a known state */
hdc100x_set_it_time(data, 0, hdc100x_int_time[0][0]);
hdc100x_set_it_time(data, 1, hdc100x_int_time[1][0]);
+ hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0);
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ hdc100x_trigger_handler,
+ &hdc_buffer_setup_ops);
+ if (ret < 0) {
+ dev_err(&client->dev, "iio triggered buffer setup failed\n");
+ return ret;
+ }
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return ret;
+}
+
+static int hdc100x_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
- return devm_iio_device_register(&client->dev, indio_dev);
+ return 0;
}
static const struct i2c_device_id hdc100x_id[] = {
@@ -298,6 +423,7 @@ static struct i2c_driver hdc100x_driver = {
.name = "hdc100x",
},
.probe = hdc100x_probe,
+ .remove = hdc100x_remove,
.id_table = hdc100x_id,
};
module_i2c_driver(hdc100x_driver);
diff --git a/drivers/iio/humidity/hts221.h b/drivers/iio/humidity/hts221.h
new file mode 100644
index 000000000000..c7154665512e
--- /dev/null
+++ b/drivers/iio/humidity/hts221.h
@@ -0,0 +1,73 @@
+/*
+ * STMicroelectronics hts221 sensor driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef HTS221_H
+#define HTS221_H
+
+#define HTS221_DEV_NAME "hts221"
+
+#include <linux/iio/iio.h>
+
+#define HTS221_RX_MAX_LENGTH 8
+#define HTS221_TX_MAX_LENGTH 8
+
+#define HTS221_DATA_SIZE 2
+
+struct hts221_transfer_buffer {
+ u8 rx_buf[HTS221_RX_MAX_LENGTH];
+ u8 tx_buf[HTS221_TX_MAX_LENGTH] ____cacheline_aligned;
+};
+
+struct hts221_transfer_function {
+ int (*read)(struct device *dev, u8 addr, int len, u8 *data);
+ int (*write)(struct device *dev, u8 addr, int len, u8 *data);
+};
+
+#define HTS221_AVG_DEPTH 8
+struct hts221_avg_avl {
+ u16 avg;
+ u8 val;
+};
+
+enum hts221_sensor_type {
+ HTS221_SENSOR_H,
+ HTS221_SENSOR_T,
+ HTS221_SENSOR_MAX,
+};
+
+struct hts221_sensor {
+ u8 cur_avg_idx;
+ int slope, b_gen;
+};
+
+struct hts221_hw {
+ const char *name;
+ struct device *dev;
+
+ struct mutex lock;
+ struct iio_trigger *trig;
+ int irq;
+
+ struct hts221_sensor sensors[HTS221_SENSOR_MAX];
+
+ u8 odr;
+
+ const struct hts221_transfer_function *tf;
+ struct hts221_transfer_buffer tb;
+};
+
+int hts221_config_drdy(struct hts221_hw *hw, bool enable);
+int hts221_probe(struct iio_dev *iio_dev);
+int hts221_power_on(struct hts221_hw *hw);
+int hts221_power_off(struct hts221_hw *hw);
+int hts221_allocate_buffers(struct hts221_hw *hw);
+int hts221_allocate_trigger(struct hts221_hw *hw);
+
+#endif /* HTS221_H */
diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c
new file mode 100644
index 000000000000..72ddcdac21a2
--- /dev/null
+++ b/drivers/iio/humidity/hts221_buffer.c
@@ -0,0 +1,168 @@
+/*
+ * STMicroelectronics hts221 sensor driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/events.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/buffer.h>
+
+#include "hts221.h"
+
+#define HTS221_REG_STATUS_ADDR 0x27
+#define HTS221_RH_DRDY_MASK BIT(1)
+#define HTS221_TEMP_DRDY_MASK BIT(0)
+
+static int hts221_trig_set_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *iio_dev = iio_trigger_get_drvdata(trig);
+ struct hts221_hw *hw = iio_priv(iio_dev);
+
+ return hts221_config_drdy(hw, state);
+}
+
+static const struct iio_trigger_ops hts221_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = hts221_trig_set_state,
+};
+
+static irqreturn_t hts221_trigger_handler_thread(int irq, void *private)
+{
+ struct hts221_hw *hw = (struct hts221_hw *)private;
+ u8 status;
+ int err;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_STATUS_ADDR, sizeof(status),
+ &status);
+ if (err < 0)
+ return IRQ_HANDLED;
+
+ /*
+ * H_DA bit (humidity data available) is routed to DRDY line.
+ * Humidity sample is computed after temperature one.
+ * Here we can assume data channels are both available if H_DA bit
+ * is set in status register
+ */
+ if (!(status & HTS221_RH_DRDY_MASK))
+ return IRQ_NONE;
+
+ iio_trigger_poll_chained(hw->trig);
+
+ return IRQ_HANDLED;
+}
+
+int hts221_allocate_trigger(struct hts221_hw *hw)
+{
+ struct iio_dev *iio_dev = iio_priv_to_dev(hw);
+ unsigned long irq_type;
+ int err;
+
+ irq_type = irqd_get_trigger_type(irq_get_irq_data(hw->irq));
+
+ switch (irq_type) {
+ case IRQF_TRIGGER_HIGH:
+ case IRQF_TRIGGER_RISING:
+ break;
+ default:
+ dev_info(hw->dev,
+ "mode %lx unsupported, using IRQF_TRIGGER_RISING\n",
+ irq_type);
+ irq_type = IRQF_TRIGGER_RISING;
+ break;
+ }
+
+ err = devm_request_threaded_irq(hw->dev, hw->irq, NULL,
+ hts221_trigger_handler_thread,
+ irq_type | IRQF_ONESHOT,
+ hw->name, hw);
+ if (err) {
+ dev_err(hw->dev, "failed to request trigger irq %d\n",
+ hw->irq);
+ return err;
+ }
+
+ hw->trig = devm_iio_trigger_alloc(hw->dev, "%s-trigger",
+ iio_dev->name);
+ if (!hw->trig)
+ return -ENOMEM;
+
+ iio_trigger_set_drvdata(hw->trig, iio_dev);
+ hw->trig->ops = &hts221_trigger_ops;
+ hw->trig->dev.parent = hw->dev;
+ iio_dev->trig = iio_trigger_get(hw->trig);
+
+ return devm_iio_trigger_register(hw->dev, hw->trig);
+}
+
+static int hts221_buffer_preenable(struct iio_dev *iio_dev)
+{
+ return hts221_power_on(iio_priv(iio_dev));
+}
+
+static int hts221_buffer_postdisable(struct iio_dev *iio_dev)
+{
+ return hts221_power_off(iio_priv(iio_dev));
+}
+
+static const struct iio_buffer_setup_ops hts221_buffer_ops = {
+ .preenable = hts221_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = iio_triggered_buffer_predisable,
+ .postdisable = hts221_buffer_postdisable,
+};
+
+static irqreturn_t hts221_buffer_handler_thread(int irq, void *p)
+{
+ u8 buffer[ALIGN(2 * HTS221_DATA_SIZE, sizeof(s64)) + sizeof(s64)];
+ struct iio_poll_func *pf = p;
+ struct iio_dev *iio_dev = pf->indio_dev;
+ struct hts221_hw *hw = iio_priv(iio_dev);
+ struct iio_chan_spec const *ch;
+ int err;
+
+ /* humidity data */
+ ch = &iio_dev->channels[HTS221_SENSOR_H];
+ err = hw->tf->read(hw->dev, ch->address, HTS221_DATA_SIZE,
+ buffer);
+ if (err < 0)
+ goto out;
+
+ /* temperature data */
+ ch = &iio_dev->channels[HTS221_SENSOR_T];
+ err = hw->tf->read(hw->dev, ch->address, HTS221_DATA_SIZE,
+ buffer + HTS221_DATA_SIZE);
+ if (err < 0)
+ goto out;
+
+ iio_push_to_buffers_with_timestamp(iio_dev, buffer,
+ iio_get_time_ns(iio_dev));
+
+out:
+ iio_trigger_notify_done(hw->trig);
+
+ return IRQ_HANDLED;
+}
+
+int hts221_allocate_buffers(struct hts221_hw *hw)
+{
+ return devm_iio_triggered_buffer_setup(hw->dev, iio_priv_to_dev(hw),
+ NULL, hts221_buffer_handler_thread,
+ &hts221_buffer_ops);
+}
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics hts221 buffer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/hts221_core.c b/drivers/iio/humidity/hts221_core.c
new file mode 100644
index 000000000000..3f3ef4a1a474
--- /dev/null
+++ b/drivers/iio/humidity/hts221_core.c
@@ -0,0 +1,687 @@
+/*
+ * STMicroelectronics hts221 sensor driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/iio/sysfs.h>
+#include <linux/delay.h>
+#include <asm/unaligned.h>
+
+#include "hts221.h"
+
+#define HTS221_REG_WHOAMI_ADDR 0x0f
+#define HTS221_REG_WHOAMI_VAL 0xbc
+
+#define HTS221_REG_CNTRL1_ADDR 0x20
+#define HTS221_REG_CNTRL2_ADDR 0x21
+#define HTS221_REG_CNTRL3_ADDR 0x22
+
+#define HTS221_REG_AVG_ADDR 0x10
+#define HTS221_REG_H_OUT_L 0x28
+#define HTS221_REG_T_OUT_L 0x2a
+
+#define HTS221_HUMIDITY_AVG_MASK 0x07
+#define HTS221_TEMP_AVG_MASK 0x38
+
+#define HTS221_ODR_MASK 0x87
+#define HTS221_BDU_MASK BIT(2)
+
+#define HTS221_DRDY_MASK BIT(2)
+
+#define HTS221_ENABLE_SENSOR BIT(7)
+
+#define HTS221_HUMIDITY_AVG_4 0x00 /* 0.4 %RH */
+#define HTS221_HUMIDITY_AVG_8 0x01 /* 0.3 %RH */
+#define HTS221_HUMIDITY_AVG_16 0x02 /* 0.2 %RH */
+#define HTS221_HUMIDITY_AVG_32 0x03 /* 0.15 %RH */
+#define HTS221_HUMIDITY_AVG_64 0x04 /* 0.1 %RH */
+#define HTS221_HUMIDITY_AVG_128 0x05 /* 0.07 %RH */
+#define HTS221_HUMIDITY_AVG_256 0x06 /* 0.05 %RH */
+#define HTS221_HUMIDITY_AVG_512 0x07 /* 0.03 %RH */
+
+#define HTS221_TEMP_AVG_2 0x00 /* 0.08 degC */
+#define HTS221_TEMP_AVG_4 0x08 /* 0.05 degC */
+#define HTS221_TEMP_AVG_8 0x10 /* 0.04 degC */
+#define HTS221_TEMP_AVG_16 0x18 /* 0.03 degC */
+#define HTS221_TEMP_AVG_32 0x20 /* 0.02 degC */
+#define HTS221_TEMP_AVG_64 0x28 /* 0.015 degC */
+#define HTS221_TEMP_AVG_128 0x30 /* 0.01 degC */
+#define HTS221_TEMP_AVG_256 0x38 /* 0.007 degC */
+
+/* calibration registers */
+#define HTS221_REG_0RH_CAL_X_H 0x36
+#define HTS221_REG_1RH_CAL_X_H 0x3a
+#define HTS221_REG_0RH_CAL_Y_H 0x30
+#define HTS221_REG_1RH_CAL_Y_H 0x31
+#define HTS221_REG_0T_CAL_X_L 0x3c
+#define HTS221_REG_1T_CAL_X_L 0x3e
+#define HTS221_REG_0T_CAL_Y_H 0x32
+#define HTS221_REG_1T_CAL_Y_H 0x33
+#define HTS221_REG_T1_T0_CAL_Y_H 0x35
+
+struct hts221_odr {
+ u8 hz;
+ u8 val;
+};
+
+struct hts221_avg {
+ u8 addr;
+ u8 mask;
+ struct hts221_avg_avl avg_avl[HTS221_AVG_DEPTH];
+};
+
+static const struct hts221_odr hts221_odr_table[] = {
+ { 1, 0x01 }, /* 1Hz */
+ { 7, 0x02 }, /* 7Hz */
+ { 13, 0x03 }, /* 12.5Hz */
+};
+
+static const struct hts221_avg hts221_avg_list[] = {
+ {
+ .addr = HTS221_REG_AVG_ADDR,
+ .mask = HTS221_HUMIDITY_AVG_MASK,
+ .avg_avl = {
+ { 4, HTS221_HUMIDITY_AVG_4 },
+ { 8, HTS221_HUMIDITY_AVG_8 },
+ { 16, HTS221_HUMIDITY_AVG_16 },
+ { 32, HTS221_HUMIDITY_AVG_32 },
+ { 64, HTS221_HUMIDITY_AVG_64 },
+ { 128, HTS221_HUMIDITY_AVG_128 },
+ { 256, HTS221_HUMIDITY_AVG_256 },
+ { 512, HTS221_HUMIDITY_AVG_512 },
+ },
+ },
+ {
+ .addr = HTS221_REG_AVG_ADDR,
+ .mask = HTS221_TEMP_AVG_MASK,
+ .avg_avl = {
+ { 2, HTS221_TEMP_AVG_2 },
+ { 4, HTS221_TEMP_AVG_4 },
+ { 8, HTS221_TEMP_AVG_8 },
+ { 16, HTS221_TEMP_AVG_16 },
+ { 32, HTS221_TEMP_AVG_32 },
+ { 64, HTS221_TEMP_AVG_64 },
+ { 128, HTS221_TEMP_AVG_128 },
+ { 256, HTS221_TEMP_AVG_256 },
+ },
+ },
+};
+
+static const struct iio_chan_spec hts221_channels[] = {
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .address = HTS221_REG_H_OUT_L,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ },
+ {
+ .type = IIO_TEMP,
+ .address = HTS221_REG_T_OUT_L,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+static int hts221_write_with_mask(struct hts221_hw *hw, u8 addr, u8 mask,
+ u8 val)
+{
+ u8 data;
+ int err;
+
+ mutex_lock(&hw->lock);
+
+ err = hw->tf->read(hw->dev, addr, sizeof(data), &data);
+ if (err < 0) {
+ dev_err(hw->dev, "failed to read %02x register\n", addr);
+ goto unlock;
+ }
+
+ data = (data & ~mask) | (val & mask);
+
+ err = hw->tf->write(hw->dev, addr, sizeof(data), &data);
+ if (err < 0)
+ dev_err(hw->dev, "failed to write %02x register\n", addr);
+
+unlock:
+ mutex_unlock(&hw->lock);
+
+ return err;
+}
+
+static int hts221_check_whoami(struct hts221_hw *hw)
+{
+ u8 data;
+ int err;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_WHOAMI_ADDR, sizeof(data),
+ &data);
+ if (err < 0) {
+ dev_err(hw->dev, "failed to read whoami register\n");
+ return err;
+ }
+
+ if (data != HTS221_REG_WHOAMI_VAL) {
+ dev_err(hw->dev, "wrong whoami {%02x vs %02x}\n",
+ data, HTS221_REG_WHOAMI_VAL);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int hts221_config_drdy(struct hts221_hw *hw, bool enable)
+{
+ u8 val = enable ? BIT(2) : 0;
+ int err;
+
+ err = hts221_write_with_mask(hw, HTS221_REG_CNTRL3_ADDR,
+ HTS221_DRDY_MASK, val);
+
+ return err < 0 ? err : 0;
+}
+
+static int hts221_update_odr(struct hts221_hw *hw, u8 odr)
+{
+ int i, err;
+ u8 val;
+
+ for (i = 0; i < ARRAY_SIZE(hts221_odr_table); i++)
+ if (hts221_odr_table[i].hz == odr)
+ break;
+
+ if (i == ARRAY_SIZE(hts221_odr_table))
+ return -EINVAL;
+
+ val = HTS221_ENABLE_SENSOR | HTS221_BDU_MASK | hts221_odr_table[i].val;
+ err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR,
+ HTS221_ODR_MASK, val);
+ if (err < 0)
+ return err;
+
+ hw->odr = odr;
+
+ return 0;
+}
+
+static int hts221_update_avg(struct hts221_hw *hw,
+ enum hts221_sensor_type type,
+ u16 val)
+{
+ int i, err;
+ const struct hts221_avg *avg = &hts221_avg_list[type];
+
+ for (i = 0; i < HTS221_AVG_DEPTH; i++)
+ if (avg->avg_avl[i].avg == val)
+ break;
+
+ if (i == HTS221_AVG_DEPTH)
+ return -EINVAL;
+
+ err = hts221_write_with_mask(hw, avg->addr, avg->mask,
+ avg->avg_avl[i].val);
+ if (err < 0)
+ return err;
+
+ hw->sensors[type].cur_avg_idx = i;
+
+ return 0;
+}
+
+static ssize_t hts221_sysfs_sampling_freq(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i;
+ ssize_t len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(hts221_odr_table); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
+ hts221_odr_table[i].hz);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t
+hts221_sysfs_rh_oversampling_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct hts221_avg *avg = &hts221_avg_list[HTS221_SENSOR_H];
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(avg->avg_avl); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
+ avg->avg_avl[i].avg);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t
+hts221_sysfs_temp_oversampling_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct hts221_avg *avg = &hts221_avg_list[HTS221_SENSOR_T];
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(avg->avg_avl); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
+ avg->avg_avl[i].avg);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+int hts221_power_on(struct hts221_hw *hw)
+{
+ return hts221_update_odr(hw, hw->odr);
+}
+
+int hts221_power_off(struct hts221_hw *hw)
+{
+ u8 data[] = {0x00, 0x00};
+
+ return hw->tf->write(hw->dev, HTS221_REG_CNTRL1_ADDR, sizeof(data),
+ data);
+}
+
+static int hts221_parse_temp_caldata(struct hts221_hw *hw)
+{
+ int err, *slope, *b_gen;
+ s16 cal_x0, cal_x1, cal_y0, cal_y1;
+ u8 cal0, cal1;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_0T_CAL_Y_H,
+ sizeof(cal0), &cal0);
+ if (err < 0)
+ return err;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_T1_T0_CAL_Y_H,
+ sizeof(cal1), &cal1);
+ if (err < 0)
+ return err;
+ cal_y0 = (le16_to_cpu(cal1 & 0x3) << 8) | cal0;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_1T_CAL_Y_H,
+ sizeof(cal0), &cal0);
+ if (err < 0)
+ return err;
+ cal_y1 = (((cal1 & 0xc) >> 2) << 8) | cal0;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_0T_CAL_X_L, sizeof(cal_x0),
+ (u8 *)&cal_x0);
+ if (err < 0)
+ return err;
+ cal_x0 = le16_to_cpu(cal_x0);
+
+ err = hw->tf->read(hw->dev, HTS221_REG_1T_CAL_X_L, sizeof(cal_x1),
+ (u8 *)&cal_x1);
+ if (err < 0)
+ return err;
+ cal_x1 = le16_to_cpu(cal_x1);
+
+ slope = &hw->sensors[HTS221_SENSOR_T].slope;
+ b_gen = &hw->sensors[HTS221_SENSOR_T].b_gen;
+
+ *slope = ((cal_y1 - cal_y0) * 8000) / (cal_x1 - cal_x0);
+ *b_gen = (((s32)cal_x1 * cal_y0 - (s32)cal_x0 * cal_y1) * 1000) /
+ (cal_x1 - cal_x0);
+ *b_gen *= 8;
+
+ return 0;
+}
+
+static int hts221_parse_rh_caldata(struct hts221_hw *hw)
+{
+ int err, *slope, *b_gen;
+ s16 cal_x0, cal_x1, cal_y0, cal_y1;
+ u8 data;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_0RH_CAL_Y_H, sizeof(data),
+ &data);
+ if (err < 0)
+ return err;
+ cal_y0 = data;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_1RH_CAL_Y_H, sizeof(data),
+ &data);
+ if (err < 0)
+ return err;
+ cal_y1 = data;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_0RH_CAL_X_H, sizeof(cal_x0),
+ (u8 *)&cal_x0);
+ if (err < 0)
+ return err;
+ cal_x0 = le16_to_cpu(cal_x0);
+
+ err = hw->tf->read(hw->dev, HTS221_REG_1RH_CAL_X_H, sizeof(cal_x1),
+ (u8 *)&cal_x1);
+ if (err < 0)
+ return err;
+ cal_x1 = le16_to_cpu(cal_x1);
+
+ slope = &hw->sensors[HTS221_SENSOR_H].slope;
+ b_gen = &hw->sensors[HTS221_SENSOR_H].b_gen;
+
+ *slope = ((cal_y1 - cal_y0) * 8000) / (cal_x1 - cal_x0);
+ *b_gen = (((s32)cal_x1 * cal_y0 - (s32)cal_x0 * cal_y1) * 1000) /
+ (cal_x1 - cal_x0);
+ *b_gen *= 8;
+
+ return 0;
+}
+
+static int hts221_get_sensor_scale(struct hts221_hw *hw,
+ enum iio_chan_type ch_type,
+ int *val, int *val2)
+{
+ s64 tmp;
+ s32 rem, div, data;
+
+ switch (ch_type) {
+ case IIO_HUMIDITYRELATIVE:
+ data = hw->sensors[HTS221_SENSOR_H].slope;
+ div = (1 << 4) * 1000;
+ break;
+ case IIO_TEMP:
+ data = hw->sensors[HTS221_SENSOR_T].slope;
+ div = (1 << 6) * 1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tmp = div_s64(data * 1000000000LL, div);
+ tmp = div_s64_rem(tmp, 1000000000LL, &rem);
+
+ *val = tmp;
+ *val2 = rem;
+
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static int hts221_get_sensor_offset(struct hts221_hw *hw,
+ enum iio_chan_type ch_type,
+ int *val, int *val2)
+{
+ s64 tmp;
+ s32 rem, div, data;
+
+ switch (ch_type) {
+ case IIO_HUMIDITYRELATIVE:
+ data = hw->sensors[HTS221_SENSOR_H].b_gen;
+ div = hw->sensors[HTS221_SENSOR_H].slope;
+ break;
+ case IIO_TEMP:
+ data = hw->sensors[HTS221_SENSOR_T].b_gen;
+ div = hw->sensors[HTS221_SENSOR_T].slope;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tmp = div_s64(data * 1000000000LL, div);
+ tmp = div_s64_rem(tmp, 1000000000LL, &rem);
+
+ *val = tmp;
+ *val2 = rem;
+
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static int hts221_read_oneshot(struct hts221_hw *hw, u8 addr, int *val)
+{
+ u8 data[HTS221_DATA_SIZE];
+ int err;
+
+ err = hts221_power_on(hw);
+ if (err < 0)
+ return err;
+
+ msleep(50);
+
+ err = hw->tf->read(hw->dev, addr, sizeof(data), data);
+ if (err < 0)
+ return err;
+
+ hts221_power_off(hw);
+
+ *val = (s16)get_unaligned_le16(data);
+
+ return IIO_VAL_INT;
+}
+
+static int hts221_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *ch,
+ int *val, int *val2, long mask)
+{
+ struct hts221_hw *hw = iio_priv(iio_dev);
+ int ret;
+
+ ret = iio_device_claim_direct_mode(iio_dev);
+ if (ret)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = hts221_read_oneshot(hw, ch->address, val);
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ ret = hts221_get_sensor_scale(hw, ch->type, val, val2);
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ ret = hts221_get_sensor_offset(hw, ch->type, val, val2);
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = hw->odr;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO: {
+ u8 idx;
+ const struct hts221_avg *avg;
+
+ switch (ch->type) {
+ case IIO_HUMIDITYRELATIVE:
+ avg = &hts221_avg_list[HTS221_SENSOR_H];
+ idx = hw->sensors[HTS221_SENSOR_H].cur_avg_idx;
+ *val = avg->avg_avl[idx].avg;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_TEMP:
+ avg = &hts221_avg_list[HTS221_SENSOR_T];
+ idx = hw->sensors[HTS221_SENSOR_T].cur_avg_idx;
+ *val = avg->avg_avl[idx].avg;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ iio_device_release_direct_mode(iio_dev);
+
+ return ret;
+}
+
+static int hts221_write_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct hts221_hw *hw = iio_priv(iio_dev);
+ int ret;
+
+ ret = iio_device_claim_direct_mode(iio_dev);
+ if (ret)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hts221_update_odr(hw, val);
+ break;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_HUMIDITYRELATIVE:
+ ret = hts221_update_avg(hw, HTS221_SENSOR_H, val);
+ break;
+ case IIO_TEMP:
+ ret = hts221_update_avg(hw, HTS221_SENSOR_T, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ iio_device_release_direct_mode(iio_dev);
+
+ return ret;
+}
+
+static int hts221_validate_trigger(struct iio_dev *iio_dev,
+ struct iio_trigger *trig)
+{
+ struct hts221_hw *hw = iio_priv(iio_dev);
+
+ return hw->trig == trig ? 0 : -EINVAL;
+}
+
+static IIO_DEVICE_ATTR(in_humidity_oversampling_ratio_available, S_IRUGO,
+ hts221_sysfs_rh_oversampling_avail, NULL, 0);
+static IIO_DEVICE_ATTR(in_temp_oversampling_ratio_available, S_IRUGO,
+ hts221_sysfs_temp_oversampling_avail, NULL, 0);
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(hts221_sysfs_sampling_freq);
+
+static struct attribute *hts221_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_humidity_oversampling_ratio_available.dev_attr.attr,
+ &iio_dev_attr_in_temp_oversampling_ratio_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group hts221_attribute_group = {
+ .attrs = hts221_attributes,
+};
+
+static const struct iio_info hts221_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &hts221_attribute_group,
+ .read_raw = hts221_read_raw,
+ .write_raw = hts221_write_raw,
+ .validate_trigger = hts221_validate_trigger,
+};
+
+static const unsigned long hts221_scan_masks[] = {0x3, 0x0};
+
+int hts221_probe(struct iio_dev *iio_dev)
+{
+ struct hts221_hw *hw = iio_priv(iio_dev);
+ int err;
+ u8 data;
+
+ mutex_init(&hw->lock);
+
+ err = hts221_check_whoami(hw);
+ if (err < 0)
+ return err;
+
+ hw->odr = hts221_odr_table[0].hz;
+
+ iio_dev->modes = INDIO_DIRECT_MODE;
+ iio_dev->dev.parent = hw->dev;
+ iio_dev->available_scan_masks = hts221_scan_masks;
+ iio_dev->channels = hts221_channels;
+ iio_dev->num_channels = ARRAY_SIZE(hts221_channels);
+ iio_dev->name = HTS221_DEV_NAME;
+ iio_dev->info = &hts221_info;
+
+ /* configure humidity sensor */
+ err = hts221_parse_rh_caldata(hw);
+ if (err < 0) {
+ dev_err(hw->dev, "failed to get rh calibration data\n");
+ return err;
+ }
+
+ data = hts221_avg_list[HTS221_SENSOR_H].avg_avl[3].avg;
+ err = hts221_update_avg(hw, HTS221_SENSOR_H, data);
+ if (err < 0) {
+ dev_err(hw->dev, "failed to set rh oversampling ratio\n");
+ return err;
+ }
+
+ /* configure temperature sensor */
+ err = hts221_parse_temp_caldata(hw);
+ if (err < 0) {
+ dev_err(hw->dev,
+ "failed to get temperature calibration data\n");
+ return err;
+ }
+
+ data = hts221_avg_list[HTS221_SENSOR_T].avg_avl[3].avg;
+ err = hts221_update_avg(hw, HTS221_SENSOR_T, data);
+ if (err < 0) {
+ dev_err(hw->dev,
+ "failed to set temperature oversampling ratio\n");
+ return err;
+ }
+
+ if (hw->irq > 0) {
+ err = hts221_allocate_buffers(hw);
+ if (err < 0)
+ return err;
+
+ err = hts221_allocate_trigger(hw);
+ if (err)
+ return err;
+ }
+
+ return devm_iio_device_register(hw->dev, iio_dev);
+}
+EXPORT_SYMBOL(hts221_probe);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics hts221 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/hts221_i2c.c b/drivers/iio/humidity/hts221_i2c.c
new file mode 100644
index 000000000000..367ecd509f31
--- /dev/null
+++ b/drivers/iio/humidity/hts221_i2c.c
@@ -0,0 +1,110 @@
+/*
+ * STMicroelectronics hts221 i2c driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include "hts221.h"
+
+#define I2C_AUTO_INCREMENT 0x80
+
+static int hts221_i2c_read(struct device *dev, u8 addr, int len, u8 *data)
+{
+ struct i2c_msg msg[2];
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (len > 1)
+ addr |= I2C_AUTO_INCREMENT;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = client->flags;
+ msg[0].len = 1;
+ msg[0].buf = &addr;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = client->flags | I2C_M_RD;
+ msg[1].len = len;
+ msg[1].buf = data;
+
+ return i2c_transfer(client->adapter, msg, 2);
+}
+
+static int hts221_i2c_write(struct device *dev, u8 addr, int len, u8 *data)
+{
+ u8 send[len + 1];
+ struct i2c_msg msg;
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (len > 1)
+ addr |= I2C_AUTO_INCREMENT;
+
+ send[0] = addr;
+ memcpy(&send[1], data, len * sizeof(u8));
+
+ msg.addr = client->addr;
+ msg.flags = client->flags;
+ msg.len = len + 1;
+ msg.buf = send;
+
+ return i2c_transfer(client->adapter, &msg, 1);
+}
+
+static const struct hts221_transfer_function hts221_transfer_fn = {
+ .read = hts221_i2c_read,
+ .write = hts221_i2c_write,
+};
+
+static int hts221_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct hts221_hw *hw;
+ struct iio_dev *iio_dev;
+
+ iio_dev = devm_iio_device_alloc(&client->dev, sizeof(*hw));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, iio_dev);
+
+ hw = iio_priv(iio_dev);
+ hw->name = client->name;
+ hw->dev = &client->dev;
+ hw->irq = client->irq;
+ hw->tf = &hts221_transfer_fn;
+
+ return hts221_probe(iio_dev);
+}
+
+static const struct of_device_id hts221_i2c_of_match[] = {
+ { .compatible = "st,hts221", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hts221_i2c_of_match);
+
+static const struct i2c_device_id hts221_i2c_id_table[] = {
+ { HTS221_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, hts221_i2c_id_table);
+
+static struct i2c_driver hts221_driver = {
+ .driver = {
+ .name = "hts221_i2c",
+ .of_match_table = of_match_ptr(hts221_i2c_of_match),
+ },
+ .probe = hts221_i2c_probe,
+ .id_table = hts221_i2c_id_table,
+};
+module_i2c_driver(hts221_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics hts221 i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/hts221_spi.c b/drivers/iio/humidity/hts221_spi.c
new file mode 100644
index 000000000000..70df5e7150c1
--- /dev/null
+++ b/drivers/iio/humidity/hts221_spi.c
@@ -0,0 +1,125 @@
+/*
+ * STMicroelectronics hts221 spi driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include "hts221.h"
+
+#define SENSORS_SPI_READ 0x80
+#define SPI_AUTO_INCREMENT 0x40
+
+static int hts221_spi_read(struct device *dev, u8 addr, int len, u8 *data)
+{
+ int err;
+ struct spi_device *spi = to_spi_device(dev);
+ struct iio_dev *iio_dev = spi_get_drvdata(spi);
+ struct hts221_hw *hw = iio_priv(iio_dev);
+
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = hw->tb.tx_buf,
+ .bits_per_word = 8,
+ .len = 1,
+ },
+ {
+ .rx_buf = hw->tb.rx_buf,
+ .bits_per_word = 8,
+ .len = len,
+ }
+ };
+
+ if (len > 1)
+ addr |= SPI_AUTO_INCREMENT;
+ hw->tb.tx_buf[0] = addr | SENSORS_SPI_READ;
+
+ err = spi_sync_transfer(spi, xfers, ARRAY_SIZE(xfers));
+ if (err < 0)
+ return err;
+
+ memcpy(data, hw->tb.rx_buf, len * sizeof(u8));
+
+ return len;
+}
+
+static int hts221_spi_write(struct device *dev, u8 addr, int len, u8 *data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct iio_dev *iio_dev = spi_get_drvdata(spi);
+ struct hts221_hw *hw = iio_priv(iio_dev);
+
+ struct spi_transfer xfers = {
+ .tx_buf = hw->tb.tx_buf,
+ .bits_per_word = 8,
+ .len = len + 1,
+ };
+
+ if (len >= HTS221_TX_MAX_LENGTH)
+ return -ENOMEM;
+
+ if (len > 1)
+ addr |= SPI_AUTO_INCREMENT;
+ hw->tb.tx_buf[0] = addr;
+ memcpy(&hw->tb.tx_buf[1], data, len);
+
+ return spi_sync_transfer(spi, &xfers, 1);
+}
+
+static const struct hts221_transfer_function hts221_transfer_fn = {
+ .read = hts221_spi_read,
+ .write = hts221_spi_write,
+};
+
+static int hts221_spi_probe(struct spi_device *spi)
+{
+ struct hts221_hw *hw;
+ struct iio_dev *iio_dev;
+
+ iio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*hw));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, iio_dev);
+
+ hw = iio_priv(iio_dev);
+ hw->name = spi->modalias;
+ hw->dev = &spi->dev;
+ hw->irq = spi->irq;
+ hw->tf = &hts221_transfer_fn;
+
+ return hts221_probe(iio_dev);
+}
+
+static const struct of_device_id hts221_spi_of_match[] = {
+ { .compatible = "st,hts221", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hts221_spi_of_match);
+
+static const struct spi_device_id hts221_spi_id_table[] = {
+ { HTS221_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, hts221_spi_id_table);
+
+static struct spi_driver hts221_driver = {
+ .driver = {
+ .name = "hts221_spi",
+ .of_match_table = of_match_ptr(hts221_spi_of_match),
+ },
+ .probe = hts221_spi_probe,
+ .id_table = hts221_spi_id_table,
+};
+module_spi_driver(hts221_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics hts221 spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index ffc2ccf6374e..345a7656c5ef 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -154,8 +154,17 @@ static const struct i2c_device_id si7020_id[] = {
};
MODULE_DEVICE_TABLE(i2c, si7020_id);
+static const struct of_device_id si7020_dt_ids[] = {
+ { .compatible = "silabs,si7020" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, si7020_dt_ids);
+
static struct i2c_driver si7020_driver = {
- .driver.name = "si7020",
+ .driver = {
+ .name = "si7020",
+ .of_match_table = of_match_ptr(si7020_dt_ids),
+ },
.probe = si7020_probe,
.id_table = si7020_id,
};
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index e0251b8c1a52..5355507f8fa1 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -398,7 +398,8 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct bmi160_data *data = iio_priv(indio_dev);
- s16 buf[16]; /* 3 sens x 3 axis x s16 + 3 x s16 pad + 4 x s16 tstamp */
+ __le16 buf[16];
+ /* 3 sens x 3 axis x __le16 + 3 x __le16 pad + 4 x __le16 tstamp */
int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L;
__le16 sample;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 19580d1db597..2c3f8964a3ea 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -126,7 +126,7 @@ static int inv_mpu_probe(struct i2c_client *client,
st = iio_priv(dev_get_drvdata(&client->dev));
st->muxc = i2c_mux_alloc(client->adapter, &client->dev,
- 1, 0, I2C_MUX_LOCKED,
+ 1, 0, I2C_MUX_LOCKED | I2C_MUX_GATE,
inv_mpu6050_select_bypass,
inv_mpu6050_deselect_bypass);
if (!st->muxc) {
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 158aaf44dd95..b12830b09c7d 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -307,10 +307,9 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
const unsigned long *mask;
unsigned long *trialmask;
- trialmask = kmalloc(sizeof(*trialmask)*
- BITS_TO_LONGS(indio_dev->masklength),
- GFP_KERNEL);
-
+ trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
+ sizeof(*trialmask),
+ GFP_KERNEL);
if (trialmask == NULL)
return -ENOMEM;
if (!indio_dev->masklength) {
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index fc340ed3dca1..aaca42862389 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -81,6 +81,8 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_PH] = "ph",
[IIO_UVINDEX] = "uvindex",
[IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity",
+ [IIO_COUNT] = "count",
+ [IIO_INDEX] = "index",
};
static const char * const iio_modifier_names[] = {
@@ -575,66 +577,82 @@ int of_iio_read_mount_matrix(const struct device *dev,
#endif
EXPORT_SYMBOL(of_iio_read_mount_matrix);
-/**
- * iio_format_value() - Formats a IIO value into its string representation
- * @buf: The buffer to which the formatted value gets written
- * @type: One of the IIO_VAL_... constants. This decides how the val
- * and val2 parameters are formatted.
- * @size: Number of IIO value entries contained in vals
- * @vals: Pointer to the values, exact meaning depends on the
- * type parameter.
- *
- * Return: 0 by default, a negative number on failure or the
- * total number of characters written for a type that belongs
- * to the IIO_VAL_... constant.
- */
-ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
+static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
+ int size, const int *vals)
{
unsigned long long tmp;
+ int tmp0, tmp1;
bool scale_db = false;
switch (type) {
case IIO_VAL_INT:
- return sprintf(buf, "%d\n", vals[0]);
+ return snprintf(buf, len, "%d", vals[0]);
case IIO_VAL_INT_PLUS_MICRO_DB:
scale_db = true;
case IIO_VAL_INT_PLUS_MICRO:
if (vals[1] < 0)
- return sprintf(buf, "-%d.%06u%s\n", abs(vals[0]),
- -vals[1], scale_db ? " dB" : "");
+ return snprintf(buf, len, "-%d.%06u%s", abs(vals[0]),
+ -vals[1], scale_db ? " dB" : "");
else
- return sprintf(buf, "%d.%06u%s\n", vals[0], vals[1],
- scale_db ? " dB" : "");
+ return snprintf(buf, len, "%d.%06u%s", vals[0], vals[1],
+ scale_db ? " dB" : "");
case IIO_VAL_INT_PLUS_NANO:
if (vals[1] < 0)
- return sprintf(buf, "-%d.%09u\n", abs(vals[0]),
- -vals[1]);
+ return snprintf(buf, len, "-%d.%09u", abs(vals[0]),
+ -vals[1]);
else
- return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
+ return snprintf(buf, len, "%d.%09u", vals[0], vals[1]);
case IIO_VAL_FRACTIONAL:
tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
- vals[0] = (int)div_s64_rem(tmp, 1000000000, &vals[1]);
- return sprintf(buf, "%d.%09u\n", vals[0], abs(vals[1]));
+ tmp1 = vals[1];
+ tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1);
+ return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
case IIO_VAL_FRACTIONAL_LOG2:
tmp = (s64)vals[0] * 1000000000LL >> vals[1];
- vals[1] = do_div(tmp, 1000000000LL);
- vals[0] = tmp;
- return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
+ tmp1 = do_div(tmp, 1000000000LL);
+ tmp0 = tmp;
+ return snprintf(buf, len, "%d.%09u", tmp0, tmp1);
case IIO_VAL_INT_MULTIPLE:
{
int i;
- int len = 0;
+ int l = 0;
- for (i = 0; i < size; ++i)
- len += snprintf(&buf[len], PAGE_SIZE - len, "%d ",
- vals[i]);
- len += snprintf(&buf[len], PAGE_SIZE - len, "\n");
- return len;
+ for (i = 0; i < size; ++i) {
+ l += snprintf(&buf[l], len - l, "%d ", vals[i]);
+ if (l >= len)
+ break;
+ }
+ return l;
}
default:
return 0;
}
}
+
+/**
+ * iio_format_value() - Formats a IIO value into its string representation
+ * @buf: The buffer to which the formatted value gets written
+ * which is assumed to be big enough (i.e. PAGE_SIZE).
+ * @type: One of the IIO_VAL_... constants. This decides how the val
+ * and val2 parameters are formatted.
+ * @size: Number of IIO value entries contained in vals
+ * @vals: Pointer to the values, exact meaning depends on the
+ * type parameter.
+ *
+ * Return: 0 by default, a negative number on failure or the
+ * total number of characters written for a type that belongs
+ * to the IIO_VAL_... constant.
+ */
+ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
+{
+ ssize_t len;
+
+ len = __iio_format_value(buf, PAGE_SIZE, type, size, vals);
+ if (len >= PAGE_SIZE - 1)
+ return -EFBIG;
+
+ return len + sprintf(buf + len, "\n");
+}
EXPORT_SYMBOL_GPL(iio_format_value);
static ssize_t iio_read_channel_info(struct device *dev,
@@ -662,6 +680,119 @@ static ssize_t iio_read_channel_info(struct device *dev,
return iio_format_value(buf, ret, val_len, vals);
}
+static ssize_t iio_format_avail_list(char *buf, const int *vals,
+ int type, int length)
+{
+ int i;
+ ssize_t len = 0;
+
+ switch (type) {
+ case IIO_VAL_INT:
+ for (i = 0; i < length; i++) {
+ len += __iio_format_value(buf + len, PAGE_SIZE - len,
+ type, 1, &vals[i]);
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ if (i < length - 1)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " ");
+ else
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "\n");
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ }
+ break;
+ default:
+ for (i = 0; i < length / 2; i++) {
+ len += __iio_format_value(buf + len, PAGE_SIZE - len,
+ type, 2, &vals[i * 2]);
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ if (i < length / 2 - 1)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " ");
+ else
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "\n");
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ }
+ }
+
+ return len;
+}
+
+static ssize_t iio_format_avail_range(char *buf, const int *vals, int type)
+{
+ int i;
+ ssize_t len;
+
+ len = snprintf(buf, PAGE_SIZE, "[");
+ switch (type) {
+ case IIO_VAL_INT:
+ for (i = 0; i < 3; i++) {
+ len += __iio_format_value(buf + len, PAGE_SIZE - len,
+ type, 1, &vals[i]);
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ if (i < 2)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " ");
+ else
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "]\n");
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ }
+ break;
+ default:
+ for (i = 0; i < 3; i++) {
+ len += __iio_format_value(buf + len, PAGE_SIZE - len,
+ type, 2, &vals[i * 2]);
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ if (i < 2)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " ");
+ else
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "]\n");
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ }
+ }
+
+ return len;
+}
+
+static ssize_t iio_read_channel_info_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ const int *vals;
+ int ret;
+ int length;
+ int type;
+
+ ret = indio_dev->info->read_avail(indio_dev, this_attr->c,
+ &vals, &type, &length,
+ this_attr->address);
+
+ if (ret < 0)
+ return ret;
+ switch (ret) {
+ case IIO_AVAIL_LIST:
+ return iio_format_avail_list(buf, vals, type, length);
+ case IIO_AVAIL_RANGE:
+ return iio_format_avail_range(buf, vals, type);
+ default:
+ return -EINVAL;
+ }
+}
+
/**
* iio_str_to_fixpoint() - Parse a fixed-point number from a string
* @str: The string to parse
@@ -978,6 +1109,40 @@ static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
return attrcount;
}
+static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ enum iio_shared_by shared_by,
+ const long *infomask)
+{
+ int i, ret, attrcount = 0;
+ char *avail_postfix;
+
+ for_each_set_bit(i, infomask, sizeof(infomask) * 8) {
+ avail_postfix = kasprintf(GFP_KERNEL,
+ "%s_available",
+ iio_chan_info_postfix[i]);
+ if (!avail_postfix)
+ return -ENOMEM;
+
+ ret = __iio_add_chan_devattr(avail_postfix,
+ chan,
+ &iio_read_channel_info_avail,
+ NULL,
+ i,
+ shared_by,
+ &indio_dev->dev,
+ &indio_dev->channel_attr_list);
+ kfree(avail_postfix);
+ if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
+ continue;
+ else if (ret < 0)
+ return ret;
+ attrcount++;
+ }
+
+ return attrcount;
+}
+
static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
@@ -993,6 +1158,14 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
return ret;
attrcount += ret;
+ ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
+ IIO_SEPARATE,
+ &chan->
+ info_mask_separate_available);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
ret = iio_device_add_info_mask_type(indio_dev, chan,
IIO_SHARED_BY_TYPE,
&chan->info_mask_shared_by_type);
@@ -1000,6 +1173,14 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
return ret;
attrcount += ret;
+ ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
+ IIO_SHARED_BY_TYPE,
+ &chan->
+ info_mask_shared_by_type_available);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
ret = iio_device_add_info_mask_type(indio_dev, chan,
IIO_SHARED_BY_DIR,
&chan->info_mask_shared_by_dir);
@@ -1007,6 +1188,13 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
return ret;
attrcount += ret;
+ ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
+ IIO_SHARED_BY_DIR,
+ &chan->info_mask_shared_by_dir_available);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
ret = iio_device_add_info_mask_type(indio_dev, chan,
IIO_SHARED_BY_ALL,
&chan->info_mask_shared_by_all);
@@ -1014,6 +1202,13 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
return ret;
attrcount += ret;
+ ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
+ IIO_SHARED_BY_ALL,
+ &chan->info_mask_shared_by_all_available);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
if (chan->ext_info) {
unsigned int i = 0;
for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index e1e104845e38..978729f6d7c4 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -717,6 +717,27 @@ bool iio_trigger_using_own(struct iio_dev *indio_dev)
}
EXPORT_SYMBOL(iio_trigger_using_own);
+/**
+ * iio_trigger_validate_own_device - Check if a trigger and IIO device belong to
+ * the same device
+ * @trig: The IIO trigger to check
+ * @indio_dev: the IIO device to check
+ *
+ * This function can be used as the validate_device callback for triggers that
+ * can only be attached to their own device.
+ *
+ * Return: 0 if both the trigger and the IIO device belong to the same
+ * device, -EINVAL otherwise.
+ */
+int iio_trigger_validate_own_device(struct iio_trigger *trig,
+ struct iio_dev *indio_dev)
+{
+ if (indio_dev->dev.parent != trig->dev.parent)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL(iio_trigger_validate_own_device);
+
void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
{
indio_dev->groups[indio_dev->groupcounter++] =
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index c4757e6367e7..b0f4630a163f 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -658,6 +658,31 @@ err_unlock:
}
EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
+static int iio_read_channel_attribute(struct iio_channel *chan,
+ int *val, int *val2,
+ enum iio_chan_info_enum attribute)
+{
+ int ret;
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (chan->indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = iio_channel_read(chan, val, val2, attribute);
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+
+int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
+{
+ return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
+}
+EXPORT_SYMBOL_GPL(iio_read_channel_offset);
+
int iio_read_channel_processed(struct iio_channel *chan, int *val)
{
int ret;
@@ -687,21 +712,113 @@ EXPORT_SYMBOL_GPL(iio_read_channel_processed);
int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
{
+ return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
+}
+EXPORT_SYMBOL_GPL(iio_read_channel_scale);
+
+static int iio_channel_read_avail(struct iio_channel *chan,
+ const int **vals, int *type, int *length,
+ enum iio_chan_info_enum info)
+{
+ if (!iio_channel_has_available(chan->channel, info))
+ return -EINVAL;
+
+ return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
+ vals, type, length, info);
+}
+
+int iio_read_avail_channel_raw(struct iio_channel *chan,
+ const int **vals, int *length)
+{
int ret;
+ int type;
mutex_lock(&chan->indio_dev->info_exist_lock);
- if (chan->indio_dev->info == NULL) {
+ if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
- ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE);
+ ret = iio_channel_read_avail(chan,
+ vals, &type, length, IIO_CHAN_INFO_RAW);
err_unlock:
mutex_unlock(&chan->indio_dev->info_exist_lock);
+ if (ret >= 0 && type != IIO_VAL_INT) {
+ /* raw values are assumed to be IIO_VAL_INT */
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
return ret;
}
-EXPORT_SYMBOL_GPL(iio_read_channel_scale);
+EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
+
+static int iio_channel_read_max(struct iio_channel *chan,
+ int *val, int *val2, int *type,
+ enum iio_chan_info_enum info)
+{
+ int unused;
+ const int *vals;
+ int length;
+ int ret;
+
+ if (!val2)
+ val2 = &unused;
+
+ ret = iio_channel_read_avail(chan, &vals, type, &length, info);
+ switch (ret) {
+ case IIO_AVAIL_RANGE:
+ switch (*type) {
+ case IIO_VAL_INT:
+ *val = vals[2];
+ break;
+ default:
+ *val = vals[4];
+ *val2 = vals[5];
+ }
+ return 0;
+
+ case IIO_AVAIL_LIST:
+ if (length <= 0)
+ return -EINVAL;
+ switch (*type) {
+ case IIO_VAL_INT:
+ *val = vals[--length];
+ while (length) {
+ if (vals[--length] > *val)
+ *val = vals[length];
+ }
+ break;
+ default:
+ /* FIXME: learn about max for other iio values */
+ return -EINVAL;
+ }
+ return 0;
+
+ default:
+ return ret;
+ }
+}
+
+int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
+{
+ int ret;
+ int type;
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (!chan->indio_dev->info) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
{
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index ba2e64d7ee58..298ea5081a96 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -140,6 +140,18 @@ config GP2AP020A00F
To compile this driver as a module, choose M here: the
module will be called gp2ap020a00f.
+config SENSORS_ISL29018
+ tristate "Intersil 29018 light and proximity sensor"
+ depends on I2C
+ select REGMAP_I2C
+ default n
+ help
+ If you say yes here you get support for ambient light sensing and
+ proximity infrared sensing from Intersil ISL29018.
+ This driver will provide the measurements of ambient light intensity
+ in lux, proximity infrared sensing and normal infrared sensing.
+ Data from sensor is accessible via sysfs.
+
config ISL29125
tristate "Intersil ISL29125 digital color light sensor"
depends on I2C
@@ -326,6 +338,13 @@ config SENSORS_TSL2563
This driver can also be built as a module. If so, the module
will be called tsl2563.
+config TSL2583
+ tristate "TAOS TSL2580, TSL2581 and TSL2583 light-to-digital converters"
+ depends on I2C
+ help
+ Provides support for the TAOS tsl2580, tsl2581 and tsl2583 devices.
+ Access ALS data via iio, sysfs.
+
config TSL4531
tristate "TAOS TSL4531 ambient light sensors"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index c5768df87a17..4de520036e6e 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_CM36651) += cm36651.o
obj-$(CONFIG_GP2AP020A00F) += gp2ap020a00f.o
obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o
obj-$(CONFIG_HID_SENSOR_PROX) += hid-sensor-prox.o
+obj-$(CONFIG_SENSORS_ISL29018) += isl29018.o
obj-$(CONFIG_ISL29125) += isl29125.o
obj-$(CONFIG_JSA1212) += jsa1212.o
obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
@@ -30,6 +31,7 @@ obj-$(CONFIG_SI1145) += si1145.o
obj-$(CONFIG_STK3310) += stk3310.o
obj-$(CONFIG_TCS3414) += tcs3414.o
obj-$(CONFIG_TCS3472) += tcs3472.o
+obj-$(CONFIG_TSL2583) += tsl2583.o
obj-$(CONFIG_TSL4531) += tsl4531.o
obj-$(CONFIG_US5182D) += us5182d.o
obj-$(CONFIG_VCNL4000) += vcnl4000.o
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/iio/light/isl29018.c
index a767a43c995c..917dd8b43e72 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/iio/light/isl29018.c
@@ -62,16 +62,6 @@
#define ISL29035_BOUT_SHIFT 0x07
#define ISL29035_BOUT_MASK (0x01 << ISL29035_BOUT_SHIFT)
-#define ISL29018_INT_TIME_AVAIL "0.090000 0.005630 0.000351 0.000021"
-#define ISL29023_INT_TIME_AVAIL "0.090000 0.005600 0.000352 0.000022"
-#define ISL29035_INT_TIME_AVAIL "0.105000 0.006500 0.000410 0.000025"
-
-static const char * const int_time_avail[] = {
- ISL29018_INT_TIME_AVAIL,
- ISL29023_INT_TIME_AVAIL,
- ISL29035_INT_TIME_AVAIL,
-};
-
enum isl29018_int_time {
ISL29018_INT_TIME_16,
ISL29018_INT_TIME_12,
@@ -110,7 +100,8 @@ struct isl29018_chip {
static int isl29018_set_integration_time(struct isl29018_chip *chip,
unsigned int utime)
{
- int i, ret;
+ unsigned int i;
+ int ret;
unsigned int int_time, new_int_time;
for (i = 0; i < ARRAY_SIZE(isl29018_int_utimes[chip->type]); ++i) {
@@ -145,7 +136,8 @@ static int isl29018_set_integration_time(struct isl29018_chip *chip,
static int isl29018_set_scale(struct isl29018_chip *chip, int scale, int uscale)
{
- int i, ret;
+ unsigned int i;
+ int ret;
struct isl29018_scale new_scale;
for (i = 0; i < ARRAY_SIZE(isl29018_scales[chip->int_time]); ++i) {
@@ -276,29 +268,35 @@ static int isl29018_read_proximity_ir(struct isl29018_chip *chip, int scheme,
return 0;
}
-static ssize_t isl29018_show_scale_available(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t in_illuminance_scale_available_show
+ (struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct isl29018_chip *chip = iio_priv(indio_dev);
- int i, len = 0;
+ unsigned int i;
+ int len = 0;
+ mutex_lock(&chip->lock);
for (i = 0; i < ARRAY_SIZE(isl29018_scales[chip->int_time]); ++i)
len += sprintf(buf + len, "%d.%06d ",
isl29018_scales[chip->int_time][i].scale,
isl29018_scales[chip->int_time][i].uscale);
+ mutex_unlock(&chip->lock);
buf[len - 1] = '\n';
return len;
}
-static ssize_t isl29018_show_int_time_available(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t in_illuminance_integration_time_available_show
+ (struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct isl29018_chip *chip = iio_priv(indio_dev);
- int i, len = 0;
+ unsigned int i;
+ int len = 0;
for (i = 0; i < ARRAY_SIZE(isl29018_int_utimes[chip->type]); ++i)
len += sprintf(buf + len, "0.%06d ",
@@ -309,9 +307,27 @@ static ssize_t isl29018_show_int_time_available(struct device *dev,
return len;
}
-static ssize_t isl29018_show_prox_infrared_suppression(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+/*
+ * From ISL29018 Data Sheet (FN6619.4, Oct 8, 2012) regarding the
+ * infrared suppression:
+ *
+ * Proximity Sensing Scheme: Bit 7. This bit programs the function
+ * of the proximity detection. Logic 0 of this bit, Scheme 0, makes
+ * full n (4, 8, 12, 16) bits (unsigned) proximity detection. The range
+ * of Scheme 0 proximity count is from 0 to 2^n. Logic 1 of this bit,
+ * Scheme 1, makes n-1 (3, 7, 11, 15) bits (2's complementary)
+ * proximity_less_ambient detection. The range of Scheme 1
+ * proximity count is from -2^(n-1) to 2^(n-1) . The sign bit is extended
+ * for resolutions less than 16. While Scheme 0 has wider dynamic
+ * range, Scheme 1 proximity detection is less affected by the
+ * ambient IR noise variation.
+ *
+ * 0 Sensing IR from LED and ambient
+ * 1 Sensing IR from LED with ambient IR rejection
+ */
+static ssize_t proximity_on_chip_ambient_infrared_suppression_show
+ (struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct isl29018_chip *chip = iio_priv(indio_dev);
@@ -323,9 +339,9 @@ static ssize_t isl29018_show_prox_infrared_suppression(struct device *dev,
return sprintf(buf, "%d\n", chip->prox_scheme);
}
-static ssize_t isl29018_store_prox_infrared_suppression(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t proximity_on_chip_ambient_infrared_suppression_store
+ (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct isl29018_chip *chip = iio_priv(indio_dev);
@@ -357,6 +373,10 @@ static int isl29018_write_raw(struct iio_dev *indio_dev,
int ret = -EINVAL;
mutex_lock(&chip->lock);
+ if (chip->suspended) {
+ ret = -EBUSY;
+ goto write_done;
+ }
switch (mask) {
case IIO_CHAN_INFO_CALIBSCALE:
if (chan->type == IIO_LIGHT) {
@@ -366,13 +386,8 @@ static int isl29018_write_raw(struct iio_dev *indio_dev,
}
break;
case IIO_CHAN_INFO_INT_TIME:
- if (chan->type == IIO_LIGHT) {
- if (val) {
- mutex_unlock(&chip->lock);
- return -EINVAL;
- }
+ if (chan->type == IIO_LIGHT && !val)
ret = isl29018_set_integration_time(chip, val2);
- }
break;
case IIO_CHAN_INFO_SCALE:
if (chan->type == IIO_LIGHT)
@@ -381,6 +396,8 @@ static int isl29018_write_raw(struct iio_dev *indio_dev,
default:
break;
}
+
+write_done:
mutex_unlock(&chip->lock);
return ret;
@@ -397,8 +414,8 @@ static int isl29018_read_raw(struct iio_dev *indio_dev,
mutex_lock(&chip->lock);
if (chip->suspended) {
- mutex_unlock(&chip->lock);
- return -EBUSY;
+ ret = -EBUSY;
+ goto read_done;
}
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -445,7 +462,10 @@ static int isl29018_read_raw(struct iio_dev *indio_dev,
default:
break;
}
+
+read_done:
mutex_unlock(&chip->lock);
+
return ret;
}
@@ -482,14 +502,9 @@ static const struct iio_chan_spec isl29023_channels[] = {
ISL29018_IR_CHANNEL,
};
-static IIO_DEVICE_ATTR(in_illuminance_integration_time_available, S_IRUGO,
- isl29018_show_int_time_available, NULL, 0);
-static IIO_DEVICE_ATTR(in_illuminance_scale_available, S_IRUGO,
- isl29018_show_scale_available, NULL, 0);
-static IIO_DEVICE_ATTR(proximity_on_chip_ambient_infrared_suppression,
- S_IRUGO | S_IWUSR,
- isl29018_show_prox_infrared_suppression,
- isl29018_store_prox_infrared_suppression, 0);
+static IIO_DEVICE_ATTR_RO(in_illuminance_integration_time_available, 0);
+static IIO_DEVICE_ATTR_RO(in_illuminance_scale_available, 0);
+static IIO_DEVICE_ATTR_RW(proximity_on_chip_ambient_infrared_suppression, 0);
#define ISL29018_DEV_ATTR(name) (&iio_dev_attr_##name.dev_attr.attr)
@@ -514,30 +529,6 @@ static const struct attribute_group isl29023_group = {
.attrs = isl29023_attributes,
};
-static int isl29035_detect(struct isl29018_chip *chip)
-{
- int status;
- unsigned int id;
- struct device *dev = regmap_get_device(chip->regmap);
-
- status = regmap_read(chip->regmap, ISL29035_REG_DEVICE_ID, &id);
- if (status < 0) {
- dev_err(dev,
- "Error reading ID register with error %d\n",
- status);
- return status;
- }
-
- id = (id & ISL29035_DEVICE_ID_MASK) >> ISL29035_DEVICE_ID_SHIFT;
-
- if (id != ISL29035_DEVICE_ID)
- return -ENODEV;
-
- /* Clear brownout bit */
- return regmap_update_bits(chip->regmap, ISL29035_REG_DEVICE_ID,
- ISL29035_BOUT_MASK, 0);
-}
-
enum {
isl29018,
isl29023,
@@ -550,12 +541,31 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
struct device *dev = regmap_get_device(chip->regmap);
if (chip->type == isl29035) {
- status = isl29035_detect(chip);
+ unsigned int id;
+
+ status = regmap_read(chip->regmap, ISL29035_REG_DEVICE_ID, &id);
+ if (status < 0) {
+ dev_err(dev,
+ "Error reading ID register with error %d\n",
+ status);
+ return status;
+ }
+
+ id = (id & ISL29035_DEVICE_ID_MASK) >> ISL29035_DEVICE_ID_SHIFT;
+
+ if (id != ISL29035_DEVICE_ID)
+ return -ENODEV;
+
+ /* Clear brownout bit */
+ status = regmap_update_bits(chip->regmap,
+ ISL29035_REG_DEVICE_ID,
+ ISL29035_BOUT_MASK, 0);
if (status < 0)
return status;
}
- /* Code added per Intersil Application Note 1534:
+ /*
+ * Code added per Intersil Application Note 1534:
* When VDD sinks to approximately 1.8V or below, some of
* the part's registers may change their state. When VDD
* recovers to 2.25V (or greater), the part may thus be in an
@@ -582,7 +592,8 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
return status;
}
- /* See Intersil AN1534 comments above.
+ /*
+ * See Intersil AN1534 comments above.
* "Operating Mode" (COMMAND1) register is reprogrammed when
* data is read from the device.
*/
@@ -605,12 +616,10 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
status = isl29018_set_integration_time(chip,
isl29018_int_utimes[chip->type][chip->int_time]);
- if (status < 0) {
+ if (status < 0)
dev_err(dev, "Init of isl29018 fails\n");
- return status;
- }
- return 0;
+ return status;
}
static const struct iio_info isl29018_info = {
@@ -713,6 +722,7 @@ static int isl29018_probe(struct i2c_client *client,
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
if (!indio_dev)
return -ENOMEM;
+
chip = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
@@ -752,6 +762,7 @@ static int isl29018_probe(struct i2c_client *client,
indio_dev->name = name;
indio_dev->dev.parent = &client->dev;
indio_dev->modes = INDIO_DIRECT_MODE;
+
return devm_iio_device_register(&client->dev, indio_dev);
}
@@ -762,13 +773,15 @@ static int isl29018_suspend(struct device *dev)
mutex_lock(&chip->lock);
- /* Since this driver uses only polling commands, we are by default in
+ /*
+ * Since this driver uses only polling commands, we are by default in
* auto shutdown (ie, power-down) mode.
* So we do not have much to do here.
*/
chip->suspended = true;
mutex_unlock(&chip->lock);
+
return 0;
}
@@ -784,6 +797,7 @@ static int isl29018_resume(struct device *dev)
chip->suspended = false;
mutex_unlock(&chip->lock);
+
return err;
}
@@ -807,7 +821,6 @@ static const struct i2c_device_id isl29018_id[] = {
{"isl29035", isl29035},
{}
};
-
MODULE_DEVICE_TABLE(i2c, isl29018_id);
static const struct of_device_id isl29018_of_match[] = {
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 3afc53a3d0b6..b30e0c1c6cc4 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -631,14 +631,16 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_PROCESSED:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
-
switch (chan->type) {
case IIO_LIGHT:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
mutex_lock(&data->lock_als);
ret = ltr501_read_als(data, buf);
mutex_unlock(&data->lock_als);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
*val = ltr501_calculate_lux(le16_to_cpu(buf[1]),
@@ -648,8 +650,9 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (chan->type) {
case IIO_INTENSITY:
@@ -657,21 +660,28 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
ret = ltr501_read_als(data, buf);
mutex_unlock(&data->lock_als);
if (ret < 0)
- return ret;
+ break;
*val = le16_to_cpu(chan->address == LTR501_ALS_DATA1 ?
buf[0] : buf[1]);
- return IIO_VAL_INT;
+ ret = IIO_VAL_INT;
+ break;
case IIO_PROXIMITY:
mutex_lock(&data->lock_ps);
ret = ltr501_read_ps(data);
mutex_unlock(&data->lock_ps);
if (ret < 0)
- return ret;
+ break;
*val = ret & LTR501_PS_DATA_MASK;
- return IIO_VAL_INT;
+ ret = IIO_VAL_INT;
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_INTENSITY:
@@ -729,8 +739,9 @@ static int ltr501_write_raw(struct iio_dev *indio_dev,
int i, ret, freq_val, freq_val2;
struct ltr501_chip_info *info = data->chip_info;
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -739,85 +750,105 @@ static int ltr501_write_raw(struct iio_dev *indio_dev,
i = ltr501_get_gain_index(info->als_gain,
info->als_gain_tbl_size,
val, val2);
- if (i < 0)
- return -EINVAL;
+ if (i < 0) {
+ ret = -EINVAL;
+ break;
+ }
data->als_contr &= ~info->als_gain_mask;
data->als_contr |= i << info->als_gain_shift;
- return regmap_write(data->regmap, LTR501_ALS_CONTR,
- data->als_contr);
+ ret = regmap_write(data->regmap, LTR501_ALS_CONTR,
+ data->als_contr);
+ break;
case IIO_PROXIMITY:
i = ltr501_get_gain_index(info->ps_gain,
info->ps_gain_tbl_size,
val, val2);
- if (i < 0)
- return -EINVAL;
+ if (i < 0) {
+ ret = -EINVAL;
+ break;
+ }
data->ps_contr &= ~LTR501_CONTR_PS_GAIN_MASK;
data->ps_contr |= i << LTR501_CONTR_PS_GAIN_SHIFT;
- return regmap_write(data->regmap, LTR501_PS_CONTR,
- data->ps_contr);
+ ret = regmap_write(data->regmap, LTR501_PS_CONTR,
+ data->ps_contr);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+ break;
+
case IIO_CHAN_INFO_INT_TIME:
switch (chan->type) {
case IIO_INTENSITY:
- if (val != 0)
- return -EINVAL;
+ if (val != 0) {
+ ret = -EINVAL;
+ break;
+ }
mutex_lock(&data->lock_als);
- i = ltr501_set_it_time(data, val2);
+ ret = ltr501_set_it_time(data, val2);
mutex_unlock(&data->lock_als);
- return i;
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+ break;
+
case IIO_CHAN_INFO_SAMP_FREQ:
switch (chan->type) {
case IIO_INTENSITY:
ret = ltr501_als_read_samp_freq(data, &freq_val,
&freq_val2);
if (ret < 0)
- return ret;
+ break;
ret = ltr501_als_write_samp_freq(data, val, val2);
if (ret < 0)
- return ret;
+ break;
/* update persistence count when changing frequency */
ret = ltr501_write_intr_prst(data, chan->type,
0, data->als_period);
if (ret < 0)
- return ltr501_als_write_samp_freq(data,
- freq_val,
- freq_val2);
- return ret;
+ ret = ltr501_als_write_samp_freq(data, freq_val,
+ freq_val2);
+ break;
case IIO_PROXIMITY:
ret = ltr501_ps_read_samp_freq(data, &freq_val,
&freq_val2);
if (ret < 0)
- return ret;
+ break;
ret = ltr501_ps_write_samp_freq(data, val, val2);
if (ret < 0)
- return ret;
+ break;
/* update persistence count when changing frequency */
ret = ltr501_write_intr_prst(data, chan->type,
0, data->ps_period);
if (ret < 0)
- return ltr501_ps_write_samp_freq(data,
- freq_val,
- freq_val2);
- return ret;
+ ret = ltr501_ps_write_samp_freq(data, freq_val,
+ freq_val2);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
}
- return -EINVAL;
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
}
static int ltr501_read_thresh(struct iio_dev *indio_dev,
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index 6511b20a2a29..a144ca3461fc 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -204,17 +204,18 @@ static int max44000_write_alspga(struct max44000_data *data, int val)
static int max44000_read_alsval(struct max44000_data *data)
{
u16 regval;
+ __be16 val;
int alstim, ret;
ret = regmap_bulk_read(data->regmap, MAX44000_REG_ALS_DATA_HI,
- &regval, sizeof(regval));
+ &val, sizeof(val));
if (ret < 0)
return ret;
alstim = ret = max44000_read_alstim(data);
if (ret < 0)
return ret;
- regval = be16_to_cpu(regval);
+ regval = be16_to_cpu(val);
/*
* Overflow is explained on datasheet page 17.
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
new file mode 100644
index 000000000000..a78b6025c465
--- /dev/null
+++ b/drivers/iio/light/tsl2583.c
@@ -0,0 +1,913 @@
+/*
+ * Device driver for monitoring ambient light intensity (lux)
+ * within the TAOS tsl258x family of devices (tsl2580, tsl2581, tsl2583).
+ *
+ * Copyright (c) 2011, TAOS Corporation.
+ * Copyright (c) 2016 Brian Masney <masneyb@onstation.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/* Device Registers and Masks */
+#define TSL2583_CNTRL 0x00
+#define TSL2583_ALS_TIME 0X01
+#define TSL2583_INTERRUPT 0x02
+#define TSL2583_GAIN 0x07
+#define TSL2583_REVID 0x11
+#define TSL2583_CHIPID 0x12
+#define TSL2583_ALS_CHAN0LO 0x14
+#define TSL2583_ALS_CHAN0HI 0x15
+#define TSL2583_ALS_CHAN1LO 0x16
+#define TSL2583_ALS_CHAN1HI 0x17
+#define TSL2583_TMR_LO 0x18
+#define TSL2583_TMR_HI 0x19
+
+/* tsl2583 cmd reg masks */
+#define TSL2583_CMD_REG 0x80
+#define TSL2583_CMD_SPL_FN 0x60
+#define TSL2583_CMD_ALS_INT_CLR 0x01
+
+/* tsl2583 cntrl reg masks */
+#define TSL2583_CNTL_ADC_ENBL 0x02
+#define TSL2583_CNTL_PWR_OFF 0x00
+#define TSL2583_CNTL_PWR_ON 0x01
+
+/* tsl2583 status reg masks */
+#define TSL2583_STA_ADC_VALID 0x01
+#define TSL2583_STA_ADC_INTR 0x10
+
+/* Lux calculation constants */
+#define TSL2583_LUX_CALC_OVER_FLOW 65535
+
+#define TSL2583_INTERRUPT_DISABLED 0x00
+
+#define TSL2583_CHIP_ID 0x90
+#define TSL2583_CHIP_ID_MASK 0xf0
+
+/* Per-device data */
+struct tsl2583_als_info {
+ u16 als_ch0;
+ u16 als_ch1;
+ u16 lux;
+};
+
+struct tsl2583_lux {
+ unsigned int ratio;
+ unsigned int ch0;
+ unsigned int ch1;
+};
+
+static const struct tsl2583_lux tsl2583_default_lux[] = {
+ { 9830, 8520, 15729 },
+ { 12452, 10807, 23344 },
+ { 14746, 6383, 11705 },
+ { 17695, 4063, 6554 },
+ { 0, 0, 0 } /* Termination segment */
+};
+
+#define TSL2583_MAX_LUX_TABLE_ENTRIES 11
+
+struct tsl2583_settings {
+ int als_time;
+ int als_gain;
+ int als_gain_trim;
+ int als_cal_target;
+
+ /*
+ * This structure is intentionally large to accommodate updates via
+ * sysfs. Sized to 11 = max 10 segments + 1 termination segment.
+ * Assumption is that one and only one type of glass used.
+ */
+ struct tsl2583_lux als_device_lux[TSL2583_MAX_LUX_TABLE_ENTRIES];
+};
+
+struct tsl2583_chip {
+ struct mutex als_mutex;
+ struct i2c_client *client;
+ struct tsl2583_als_info als_cur_info;
+ struct tsl2583_settings als_settings;
+ int als_time_scale;
+ int als_saturation;
+ bool suspended;
+};
+
+struct gainadj {
+ s16 ch0;
+ s16 ch1;
+ s16 mean;
+};
+
+/* Index = (0 - 3) Used to validate the gain selection index */
+static const struct gainadj gainadj[] = {
+ { 1, 1, 1 },
+ { 8, 8, 8 },
+ { 16, 16, 16 },
+ { 107, 115, 111 }
+};
+
+/*
+ * Provides initial operational parameter defaults.
+ * These defaults may be changed through the device's sysfs files.
+ */
+static void tsl2583_defaults(struct tsl2583_chip *chip)
+{
+ /*
+ * The integration time must be a multiple of 50ms and within the
+ * range [50, 600] ms.
+ */
+ chip->als_settings.als_time = 100;
+
+ /*
+ * This is an index into the gainadj table. Assume clear glass as the
+ * default.
+ */
+ chip->als_settings.als_gain = 0;
+
+ /* Default gain trim to account for aperture effects */
+ chip->als_settings.als_gain_trim = 1000;
+
+ /* Known external ALS reading used for calibration */
+ chip->als_settings.als_cal_target = 130;
+
+ /* Default lux table. */
+ memcpy(chip->als_settings.als_device_lux, tsl2583_default_lux,
+ sizeof(tsl2583_default_lux));
+}
+
+/*
+ * Reads and calculates current lux value.
+ * The raw ch0 and ch1 values of the ambient light sensed in the last
+ * integration cycle are read from the device.
+ * Time scale factor array values are adjusted based on the integration time.
+ * The raw values are multiplied by a scale factor, and device gain is obtained
+ * using gain index. Limit checks are done next, then the ratio of a multiple
+ * of ch1 value, to the ch0 value, is calculated. The array als_device_lux[]
+ * declared above is then scanned to find the first ratio value that is just
+ * above the ratio we just calculated. The ch0 and ch1 multiplier constants in
+ * the array are then used along with the time scale factor array values, to
+ * calculate the lux.
+ */
+static int tsl2583_get_lux(struct iio_dev *indio_dev)
+{
+ u16 ch0, ch1; /* separated ch0/ch1 data from device */
+ u32 lux; /* raw lux calculated from device data */
+ u64 lux64;
+ u32 ratio;
+ u8 buf[5];
+ struct tsl2583_lux *p;
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int i, ret;
+
+ ret = i2c_smbus_read_byte_data(chip->client, TSL2583_CMD_REG);
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "%s: failed to read CMD_REG register\n",
+ __func__);
+ goto done;
+ }
+
+ /* is data new & valid */
+ if (!(ret & TSL2583_STA_ADC_INTR)) {
+ dev_err(&chip->client->dev, "%s: data not valid; returning last value\n",
+ __func__);
+ ret = chip->als_cur_info.lux; /* return LAST VALUE */
+ goto done;
+ }
+
+ for (i = 0; i < 4; i++) {
+ int reg = TSL2583_CMD_REG | (TSL2583_ALS_CHAN0LO + i);
+
+ ret = i2c_smbus_read_byte_data(chip->client, reg);
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "%s: failed to read register %x\n",
+ __func__, reg);
+ goto done;
+ }
+ buf[i] = ret;
+ }
+
+ /*
+ * Clear the pending interrupt status bit on the chip to allow the next
+ * integration cycle to start. This has to be done even though this
+ * driver currently does not support interrupts.
+ */
+ ret = i2c_smbus_write_byte(chip->client,
+ (TSL2583_CMD_REG | TSL2583_CMD_SPL_FN |
+ TSL2583_CMD_ALS_INT_CLR));
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "%s: failed to clear the interrupt bit\n",
+ __func__);
+ goto done; /* have no data, so return failure */
+ }
+
+ /* extract ALS/lux data */
+ ch0 = le16_to_cpup((const __le16 *)&buf[0]);
+ ch1 = le16_to_cpup((const __le16 *)&buf[2]);
+
+ chip->als_cur_info.als_ch0 = ch0;
+ chip->als_cur_info.als_ch1 = ch1;
+
+ if ((ch0 >= chip->als_saturation) || (ch1 >= chip->als_saturation))
+ goto return_max;
+
+ if (!ch0) {
+ /*
+ * The sensor appears to be in total darkness so set the
+ * calculated lux to 0 and return early to avoid a division by
+ * zero below when calculating the ratio.
+ */
+ ret = 0;
+ chip->als_cur_info.lux = 0;
+ goto done;
+ }
+
+ /* calculate ratio */
+ ratio = (ch1 << 15) / ch0;
+
+ /* convert to unscaled lux using the pointer to the table */
+ for (p = (struct tsl2583_lux *)chip->als_settings.als_device_lux;
+ p->ratio != 0 && p->ratio < ratio; p++)
+ ;
+
+ if (p->ratio == 0) {
+ lux = 0;
+ } else {
+ u32 ch0lux, ch1lux;
+
+ ch0lux = ((ch0 * p->ch0) +
+ (gainadj[chip->als_settings.als_gain].ch0 >> 1))
+ / gainadj[chip->als_settings.als_gain].ch0;
+ ch1lux = ((ch1 * p->ch1) +
+ (gainadj[chip->als_settings.als_gain].ch1 >> 1))
+ / gainadj[chip->als_settings.als_gain].ch1;
+
+ /* note: lux is 31 bit max at this point */
+ if (ch1lux > ch0lux) {
+ dev_dbg(&chip->client->dev, "%s: No Data - Returning 0\n",
+ __func__);
+ ret = 0;
+ chip->als_cur_info.lux = 0;
+ goto done;
+ }
+
+ lux = ch0lux - ch1lux;
+ }
+
+ /* adjust for active time scale */
+ if (chip->als_time_scale == 0)
+ lux = 0;
+ else
+ lux = (lux + (chip->als_time_scale >> 1)) /
+ chip->als_time_scale;
+
+ /*
+ * Adjust for active gain scale.
+ * The tsl2583_default_lux tables above have a factor of 8192 built in,
+ * so we need to shift right.
+ * User-specified gain provides a multiplier.
+ * Apply user-specified gain before shifting right to retain precision.
+ * Use 64 bits to avoid overflow on multiplication.
+ * Then go back to 32 bits before division to avoid using div_u64().
+ */
+ lux64 = lux;
+ lux64 = lux64 * chip->als_settings.als_gain_trim;
+ lux64 >>= 13;
+ lux = lux64;
+ lux = (lux + 500) / 1000;
+
+ if (lux > TSL2583_LUX_CALC_OVER_FLOW) { /* check for overflow */
+return_max:
+ lux = TSL2583_LUX_CALC_OVER_FLOW;
+ }
+
+ /* Update the structure with the latest VALID lux. */
+ chip->als_cur_info.lux = lux;
+ ret = lux;
+
+done:
+ return ret;
+}
+
+/*
+ * Obtain single reading and calculate the als_gain_trim (later used
+ * to derive actual lux).
+ * Return updated gain_trim value.
+ */
+static int tsl2583_als_calibrate(struct iio_dev *indio_dev)
+{
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ unsigned int gain_trim_val;
+ int ret;
+ int lux_val;
+
+ ret = i2c_smbus_read_byte_data(chip->client,
+ TSL2583_CMD_REG | TSL2583_CNTRL);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to read from the CNTRL register\n",
+ __func__);
+ return ret;
+ }
+
+ if ((ret & (TSL2583_CNTL_ADC_ENBL | TSL2583_CNTL_PWR_ON))
+ != (TSL2583_CNTL_ADC_ENBL | TSL2583_CNTL_PWR_ON)) {
+ dev_err(&chip->client->dev,
+ "%s: Device is not powered on and/or ADC is not enabled\n",
+ __func__);
+ return -EINVAL;
+ } else if ((ret & TSL2583_STA_ADC_VALID) != TSL2583_STA_ADC_VALID) {
+ dev_err(&chip->client->dev,
+ "%s: The two ADC channels have not completed an integration cycle\n",
+ __func__);
+ return -ENODATA;
+ }
+
+ lux_val = tsl2583_get_lux(indio_dev);
+ if (lux_val < 0) {
+ dev_err(&chip->client->dev, "%s: failed to get lux\n",
+ __func__);
+ return lux_val;
+ }
+
+ gain_trim_val = (unsigned int)(((chip->als_settings.als_cal_target)
+ * chip->als_settings.als_gain_trim) / lux_val);
+ if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
+ dev_err(&chip->client->dev,
+ "%s: trim_val of %d is not within the range [250, 4000]\n",
+ __func__, gain_trim_val);
+ return -ENODATA;
+ }
+
+ chip->als_settings.als_gain_trim = (int)gain_trim_val;
+
+ return 0;
+}
+
+static int tsl2583_set_als_time(struct tsl2583_chip *chip)
+{
+ int als_count, als_time, ret;
+ u8 val;
+
+ /* determine als integration register */
+ als_count = (chip->als_settings.als_time * 100 + 135) / 270;
+ if (!als_count)
+ als_count = 1; /* ensure at least one cycle */
+
+ /* convert back to time (encompasses overrides) */
+ als_time = (als_count * 27 + 5) / 10;
+
+ val = 256 - als_count;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2583_CMD_REG | TSL2583_ALS_TIME,
+ val);
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "%s: failed to set the als time to %d\n",
+ __func__, val);
+ return ret;
+ }
+
+ /* set chip struct re scaling and saturation */
+ chip->als_saturation = als_count * 922; /* 90% of full scale */
+ chip->als_time_scale = (als_time + 25) / 50;
+
+ return ret;
+}
+
+static int tsl2583_set_als_gain(struct tsl2583_chip *chip)
+{
+ int ret;
+
+ /* Set the gain based on als_settings struct */
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2583_CMD_REG | TSL2583_GAIN,
+ chip->als_settings.als_gain);
+ if (ret < 0)
+ dev_err(&chip->client->dev,
+ "%s: failed to set the gain to %d\n", __func__,
+ chip->als_settings.als_gain);
+
+ return ret;
+}
+
+static int tsl2583_set_power_state(struct tsl2583_chip *chip, u8 state)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2583_CMD_REG | TSL2583_CNTRL, state);
+ if (ret < 0)
+ dev_err(&chip->client->dev,
+ "%s: failed to set the power state to %d\n", __func__,
+ state);
+
+ return ret;
+}
+
+/*
+ * Turn the device on.
+ * Configuration must be set before calling this function.
+ */
+static int tsl2583_chip_init_and_power_on(struct iio_dev *indio_dev)
+{
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ /* Power on the device; ADC off. */
+ ret = tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_ON);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2583_CMD_REG | TSL2583_INTERRUPT,
+ TSL2583_INTERRUPT_DISABLED);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to disable interrupts\n", __func__);
+ return ret;
+ }
+
+ ret = tsl2583_set_als_time(chip);
+ if (ret < 0)
+ return ret;
+
+ ret = tsl2583_set_als_gain(chip);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(3000, 3500);
+
+ ret = tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_ON |
+ TSL2583_CNTL_ADC_ENBL);
+ if (ret < 0)
+ return ret;
+
+ chip->suspended = false;
+
+ return ret;
+}
+
+/* Sysfs Interface Functions */
+
+static ssize_t in_illuminance_input_target_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&chip->als_mutex);
+ ret = sprintf(buf, "%d\n", chip->als_settings.als_cal_target);
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static ssize_t in_illuminance_input_target_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int value;
+
+ if (kstrtoint(buf, 0, &value) || !value)
+ return -EINVAL;
+
+ mutex_lock(&chip->als_mutex);
+ chip->als_settings.als_cal_target = value;
+ mutex_unlock(&chip->als_mutex);
+
+ return len;
+}
+
+static ssize_t in_illuminance_calibrate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int value, ret;
+
+ if (kstrtoint(buf, 0, &value) || value != 1)
+ return -EINVAL;
+
+ mutex_lock(&chip->als_mutex);
+
+ if (chip->suspended) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ ret = tsl2583_als_calibrate(indio_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = len;
+done:
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static ssize_t in_illuminance_lux_table_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ unsigned int i;
+ int offset = 0;
+
+ for (i = 0; i < ARRAY_SIZE(chip->als_settings.als_device_lux); i++) {
+ offset += sprintf(buf + offset, "%u,%u,%u,",
+ chip->als_settings.als_device_lux[i].ratio,
+ chip->als_settings.als_device_lux[i].ch0,
+ chip->als_settings.als_device_lux[i].ch1);
+ if (chip->als_settings.als_device_lux[i].ratio == 0) {
+ /*
+ * We just printed the first "0" entry.
+ * Now get rid of the extra "," and break.
+ */
+ offset--;
+ break;
+ }
+ }
+
+ offset += sprintf(buf + offset, "\n");
+
+ return offset;
+}
+
+static ssize_t in_illuminance_lux_table_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ const unsigned int max_ints = TSL2583_MAX_LUX_TABLE_ENTRIES * 3;
+ int value[TSL2583_MAX_LUX_TABLE_ENTRIES * 3 + 1];
+ int ret = -EINVAL;
+ unsigned int n;
+
+ mutex_lock(&chip->als_mutex);
+
+ get_options(buf, ARRAY_SIZE(value), value);
+
+ /*
+ * We now have an array of ints starting at value[1], and
+ * enumerated by value[0].
+ * We expect each group of three ints is one table entry,
+ * and the last table entry is all 0.
+ */
+ n = value[0];
+ if ((n % 3) || n < 6 || n > max_ints) {
+ dev_err(dev,
+ "%s: The number of entries in the lux table must be a multiple of 3 and within the range [6, %d]\n",
+ __func__, max_ints);
+ goto done;
+ }
+ if ((value[n - 2] | value[n - 1] | value[n]) != 0) {
+ dev_err(dev, "%s: The last 3 entries in the lux table must be zeros.\n",
+ __func__);
+ goto done;
+ }
+
+ memcpy(chip->als_settings.als_device_lux, &value[1],
+ value[0] * sizeof(value[1]));
+
+ ret = len;
+
+done:
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static IIO_CONST_ATTR(in_illuminance_calibscale_available, "1 8 16 111");
+static IIO_CONST_ATTR(in_illuminance_integration_time_available,
+ "0.000050 0.000100 0.000150 0.000200 0.000250 0.000300 0.000350 0.000400 0.000450 0.000500 0.000550 0.000600 0.000650");
+static IIO_DEVICE_ATTR_RW(in_illuminance_input_target, 0);
+static IIO_DEVICE_ATTR_WO(in_illuminance_calibrate, 0);
+static IIO_DEVICE_ATTR_RW(in_illuminance_lux_table, 0);
+
+static struct attribute *sysfs_attrs_ctrl[] = {
+ &iio_const_attr_in_illuminance_calibscale_available.dev_attr.attr,
+ &iio_const_attr_in_illuminance_integration_time_available.dev_attr.attr,
+ &iio_dev_attr_in_illuminance_input_target.dev_attr.attr,
+ &iio_dev_attr_in_illuminance_calibrate.dev_attr.attr,
+ &iio_dev_attr_in_illuminance_lux_table.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tsl2583_attribute_group = {
+ .attrs = sysfs_attrs_ctrl,
+};
+
+static const struct iio_chan_spec tsl2583_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ },
+ {
+ .type = IIO_LIGHT,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ },
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ },
+};
+
+static int tsl2583_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ mutex_lock(&chip->als_mutex);
+
+ if (chip->suspended) {
+ ret = -EBUSY;
+ goto read_done;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_LIGHT) {
+ ret = tsl2583_get_lux(indio_dev);
+ if (ret < 0)
+ goto read_done;
+
+ /*
+ * From page 20 of the TSL2581, TSL2583 data
+ * sheet (TAOS134 − MARCH 2011):
+ *
+ * One of the photodiodes (channel 0) is
+ * sensitive to both visible and infrared light,
+ * while the second photodiode (channel 1) is
+ * sensitive primarily to infrared light.
+ */
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
+ *val = chip->als_cur_info.als_ch0;
+ else
+ *val = chip->als_cur_info.als_ch1;
+
+ ret = IIO_VAL_INT;
+ }
+ break;
+ case IIO_CHAN_INFO_PROCESSED:
+ if (chan->type == IIO_LIGHT) {
+ ret = tsl2583_get_lux(indio_dev);
+ if (ret < 0)
+ goto read_done;
+
+ *val = ret;
+ ret = IIO_VAL_INT;
+ }
+ break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (chan->type == IIO_LIGHT) {
+ *val = chip->als_settings.als_gain_trim;
+ ret = IIO_VAL_INT;
+ }
+ break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (chan->type == IIO_LIGHT) {
+ *val = gainadj[chip->als_settings.als_gain].mean;
+ ret = IIO_VAL_INT;
+ }
+ break;
+ case IIO_CHAN_INFO_INT_TIME:
+ if (chan->type == IIO_LIGHT) {
+ *val = 0;
+ *val2 = chip->als_settings.als_time;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ }
+ break;
+ default:
+ break;
+ }
+
+read_done:
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static int tsl2583_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ mutex_lock(&chip->als_mutex);
+
+ if (chip->suspended) {
+ ret = -EBUSY;
+ goto write_done;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (chan->type == IIO_LIGHT) {
+ chip->als_settings.als_gain_trim = val;
+ ret = 0;
+ }
+ break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (chan->type == IIO_LIGHT) {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gainadj); i++) {
+ if (gainadj[i].mean == val) {
+ chip->als_settings.als_gain = i;
+ ret = tsl2583_set_als_gain(chip);
+ break;
+ }
+ }
+ }
+ break;
+ case IIO_CHAN_INFO_INT_TIME:
+ if (chan->type == IIO_LIGHT && !val && val2 >= 50 &&
+ val2 <= 650 && !(val2 % 50)) {
+ chip->als_settings.als_time = val2;
+ ret = tsl2583_set_als_time(chip);
+ }
+ break;
+ default:
+ break;
+ }
+
+write_done:
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static const struct iio_info tsl2583_info = {
+ .attrs = &tsl2583_attribute_group,
+ .driver_module = THIS_MODULE,
+ .read_raw = tsl2583_read_raw,
+ .write_raw = tsl2583_write_raw,
+};
+
+static int tsl2583_probe(struct i2c_client *clientp,
+ const struct i2c_device_id *idp)
+{
+ int ret;
+ struct tsl2583_chip *chip;
+ struct iio_dev *indio_dev;
+
+ if (!i2c_check_functionality(clientp->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&clientp->dev, "%s: i2c smbus byte data functionality is unsupported\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ indio_dev = devm_iio_device_alloc(&clientp->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ chip = iio_priv(indio_dev);
+ chip->client = clientp;
+ i2c_set_clientdata(clientp, indio_dev);
+
+ mutex_init(&chip->als_mutex);
+ chip->suspended = true;
+
+ ret = i2c_smbus_read_byte_data(clientp,
+ TSL2583_CMD_REG | TSL2583_CHIPID);
+ if (ret < 0) {
+ dev_err(&clientp->dev,
+ "%s: failed to read the chip ID register\n", __func__);
+ return ret;
+ }
+
+ if ((ret & TSL2583_CHIP_ID_MASK) != TSL2583_CHIP_ID) {
+ dev_err(&clientp->dev, "%s: received an unknown chip ID %x\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+
+ indio_dev->info = &tsl2583_info;
+ indio_dev->channels = tsl2583_channels;
+ indio_dev->num_channels = ARRAY_SIZE(tsl2583_channels);
+ indio_dev->dev.parent = &clientp->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = chip->client->name;
+
+ ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
+ if (ret) {
+ dev_err(&clientp->dev, "%s: iio registration failed\n",
+ __func__);
+ return ret;
+ }
+
+ /* Load up the V2 defaults (these are hard coded defaults for now) */
+ tsl2583_defaults(chip);
+
+ /* Make sure the chip is on */
+ ret = tsl2583_chip_init_and_power_on(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ dev_info(&clientp->dev, "Light sensor found.\n");
+
+ return 0;
+}
+
+static int __maybe_unused tsl2583_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&chip->als_mutex);
+
+ ret = tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_OFF);
+ chip->suspended = true;
+
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static int __maybe_unused tsl2583_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&chip->als_mutex);
+
+ ret = tsl2583_chip_init_and_power_on(indio_dev);
+
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(tsl2583_pm_ops, tsl2583_suspend, tsl2583_resume);
+
+static struct i2c_device_id tsl2583_idtable[] = {
+ { "tsl2580", 0 },
+ { "tsl2581", 1 },
+ { "tsl2583", 2 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tsl2583_idtable);
+
+static const struct of_device_id tsl2583_of_match[] = {
+ { .compatible = "amstaos,tsl2580", },
+ { .compatible = "amstaos,tsl2581", },
+ { .compatible = "amstaos,tsl2583", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tsl2583_of_match);
+
+/* Driver definition */
+static struct i2c_driver tsl2583_driver = {
+ .driver = {
+ .name = "tsl2583",
+ .pm = &tsl2583_pm_ops,
+ .of_match_table = tsl2583_of_match,
+ },
+ .id_table = tsl2583_idtable,
+ .probe = tsl2583_probe,
+};
+module_i2c_driver(tsl2583_driver);
+
+MODULE_AUTHOR("J. August Brenner <jbrenner@taosinc.com>");
+MODULE_AUTHOR("Brian Masney <masneyb@onstation.org>");
+MODULE_DESCRIPTION("TAOS tsl2583 ambient light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 217353145676..ce09d771c1fb 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -287,7 +287,7 @@ static int ak8974_await_drdy(struct ak8974 *ak8974)
return 0;
}
-static int ak8974_getresult(struct ak8974 *ak8974, s16 *result)
+static int ak8974_getresult(struct ak8974 *ak8974, __le16 *result)
{
unsigned int src;
int ret;
@@ -395,7 +395,7 @@ static int ak8974_selftest(struct ak8974 *ak8974)
static int ak8974_get_u16_val(struct ak8974 *ak8974, u8 reg, u16 *val)
{
int ret;
- u16 bulk;
+ __le16 bulk;
ret = regmap_bulk_read(ak8974->map, reg, &bulk, 2);
if (ret)
@@ -453,7 +453,7 @@ static int ak8974_read_raw(struct iio_dev *indio_dev,
long mask)
{
struct ak8974 *ak8974 = iio_priv(indio_dev);
- s16 hw_values[3];
+ __le16 hw_values[3];
int ret = -EINVAL;
pm_runtime_get_sync(&ak8974->i2c->dev);
@@ -494,7 +494,7 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev)
{
struct ak8974 *ak8974 = iio_priv(indio_dev);
int ret;
- s16 hw_values[8]; /* Three axes + 64bit padding */
+ __le16 hw_values[8]; /* Three axes + 64bit padding */
pm_runtime_get_sync(&ak8974->i2c->dev);
mutex_lock(&ak8974->lock);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index af8606cc7812..825369fb1c57 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -690,6 +690,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
struct ak8975_data *data = iio_priv(indio_dev);
const struct i2c_client *client = data->client;
const struct ak_def *def = data->def;
+ __le16 rval;
u16 buff;
int ret;
@@ -703,7 +704,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
ret = i2c_smbus_read_i2c_block_data_or_emulated(
client, def->data_regs[index],
- sizeof(buff), (u8*)&buff);
+ sizeof(rval), (u8*)&rval);
if (ret < 0)
goto exit;
@@ -713,7 +714,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
pm_runtime_put_autosuspend(&data->client->dev);
/* Swap bytes and convert to valid range. */
- buff = le16_to_cpu(buff);
+ buff = le16_to_cpu(rval);
*val = clamp_t(s16, buff, -def->range, def->range);
return IIO_VAL_INT;
@@ -813,6 +814,7 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
const struct ak_def *def = data->def;
int ret;
s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */
+ __le16 fval[3];
mutex_lock(&data->lock);
@@ -826,17 +828,17 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
*/
ret = i2c_smbus_read_i2c_block_data_or_emulated(client,
def->data_regs[0],
- 3 * sizeof(buff[0]),
- (u8 *)buff);
+ 3 * sizeof(fval[0]),
+ (u8 *)fval);
if (ret < 0)
goto unlock;
mutex_unlock(&data->lock);
/* Clamp to valid range. */
- buff[0] = clamp_t(s16, le16_to_cpu(buff[0]), -def->range, def->range);
- buff[1] = clamp_t(s16, le16_to_cpu(buff[1]), -def->range, def->range);
- buff[2] = clamp_t(s16, le16_to_cpu(buff[2]), -def->range, def->range);
+ buff[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range);
+ buff[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range);
+ buff[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range);
iio_push_to_buffers_with_timestamp(indio_dev, buff,
iio_get_time_ns(indio_dev));
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index d8a0c8da8db0..0e791b02ed4a 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -42,9 +42,17 @@ enum magn_3d_channel {
MAGN_3D_CHANNEL_MAX,
};
+struct common_attributes {
+ int scale_pre_decml;
+ int scale_post_decml;
+ int scale_precision;
+ int value_offset;
+};
+
struct magn_3d_state {
struct hid_sensor_hub_callbacks callbacks;
- struct hid_sensor_common common_attributes;
+ struct hid_sensor_common magn_flux_attributes;
+ struct hid_sensor_common rot_attributes;
struct hid_sensor_hub_attribute_info magn[MAGN_3D_CHANNEL_MAX];
/* dynamically sized array to hold sensor values */
@@ -52,10 +60,8 @@ struct magn_3d_state {
/* array of pointers to sensor value */
u32 *magn_val_addr[MAGN_3D_CHANNEL_MAX];
- int scale_pre_decml;
- int scale_post_decml;
- int scale_precision;
- int value_offset;
+ struct common_attributes magn_flux_attr;
+ struct common_attributes rot_attr;
};
static const u32 magn_3d_addresses[MAGN_3D_CHANNEL_MAX] = {
@@ -162,41 +168,74 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
*val2 = 0;
switch (mask) {
case 0:
- hid_sensor_power_state(&magn_state->common_attributes, true);
+ hid_sensor_power_state(&magn_state->magn_flux_attributes, true);
report_id =
magn_state->magn[chan->address].report_id;
address = magn_3d_addresses[chan->address];
if (report_id >= 0)
*val = sensor_hub_input_attr_get_raw_value(
- magn_state->common_attributes.hsdev,
+ magn_state->magn_flux_attributes.hsdev,
HID_USAGE_SENSOR_COMPASS_3D, address,
report_id,
SENSOR_HUB_SYNC);
else {
*val = 0;
- hid_sensor_power_state(&magn_state->common_attributes,
- false);
+ hid_sensor_power_state(
+ &magn_state->magn_flux_attributes,
+ false);
return -EINVAL;
}
- hid_sensor_power_state(&magn_state->common_attributes, false);
+ hid_sensor_power_state(&magn_state->magn_flux_attributes,
+ false);
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SCALE:
- *val = magn_state->scale_pre_decml;
- *val2 = magn_state->scale_post_decml;
- ret_type = magn_state->scale_precision;
+ switch (chan->type) {
+ case IIO_MAGN:
+ *val = magn_state->magn_flux_attr.scale_pre_decml;
+ *val2 = magn_state->magn_flux_attr.scale_post_decml;
+ ret_type = magn_state->magn_flux_attr.scale_precision;
+ break;
+ case IIO_ROT:
+ *val = magn_state->rot_attr.scale_pre_decml;
+ *val2 = magn_state->rot_attr.scale_post_decml;
+ ret_type = magn_state->rot_attr.scale_precision;
+ break;
+ default:
+ ret_type = -EINVAL;
+ }
break;
case IIO_CHAN_INFO_OFFSET:
- *val = magn_state->value_offset;
- ret_type = IIO_VAL_INT;
+ switch (chan->type) {
+ case IIO_MAGN:
+ *val = magn_state->magn_flux_attr.value_offset;
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_ROT:
+ *val = magn_state->rot_attr.value_offset;
+ ret_type = IIO_VAL_INT;
+ break;
+ default:
+ ret_type = -EINVAL;
+ }
break;
case IIO_CHAN_INFO_SAMP_FREQ:
ret_type = hid_sensor_read_samp_freq_value(
- &magn_state->common_attributes, val, val2);
+ &magn_state->magn_flux_attributes, val, val2);
break;
case IIO_CHAN_INFO_HYSTERESIS:
- ret_type = hid_sensor_read_raw_hyst_value(
- &magn_state->common_attributes, val, val2);
+ switch (chan->type) {
+ case IIO_MAGN:
+ ret_type = hid_sensor_read_raw_hyst_value(
+ &magn_state->magn_flux_attributes, val, val2);
+ break;
+ case IIO_ROT:
+ ret_type = hid_sensor_read_raw_hyst_value(
+ &magn_state->rot_attributes, val, val2);
+ break;
+ default:
+ ret_type = -EINVAL;
+ }
break;
default:
ret_type = -EINVAL;
@@ -219,11 +258,21 @@ static int magn_3d_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
ret = hid_sensor_write_samp_freq_value(
- &magn_state->common_attributes, val, val2);
+ &magn_state->magn_flux_attributes, val, val2);
break;
case IIO_CHAN_INFO_HYSTERESIS:
- ret = hid_sensor_write_raw_hyst_value(
- &magn_state->common_attributes, val, val2);
+ switch (chan->type) {
+ case IIO_MAGN:
+ ret = hid_sensor_write_raw_hyst_value(
+ &magn_state->magn_flux_attributes, val, val2);
+ break;
+ case IIO_ROT:
+ ret = hid_sensor_write_raw_hyst_value(
+ &magn_state->rot_attributes, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
break;
default:
ret = -EINVAL;
@@ -254,7 +303,7 @@ static int magn_3d_proc_event(struct hid_sensor_hub_device *hsdev,
struct magn_3d_state *magn_state = iio_priv(indio_dev);
dev_dbg(&indio_dev->dev, "magn_3d_proc_event\n");
- if (atomic_read(&magn_state->common_attributes.data_ready))
+ if (atomic_read(&magn_state->magn_flux_attributes.data_ready))
hid_sensor_push_data(indio_dev, magn_state->iio_vals);
return 0;
@@ -389,21 +438,48 @@ static int magn_3d_parse_report(struct platform_device *pdev,
dev_dbg(&pdev->dev, "magn_3d Setup %d IIO channels\n",
*chan_count);
- st->scale_precision = hid_sensor_format_scale(
+ st->magn_flux_attr.scale_precision = hid_sensor_format_scale(
HID_USAGE_SENSOR_COMPASS_3D,
&st->magn[CHANNEL_SCAN_INDEX_X],
- &st->scale_pre_decml, &st->scale_post_decml);
+ &st->magn_flux_attr.scale_pre_decml,
+ &st->magn_flux_attr.scale_post_decml);
+ st->rot_attr.scale_precision
+ = hid_sensor_format_scale(
+ HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH,
+ &st->magn[CHANNEL_SCAN_INDEX_NORTH_MAGN_TILT_COMP],
+ &st->rot_attr.scale_pre_decml,
+ &st->rot_attr.scale_post_decml);
/* Set Sensitivity field ids, when there is no individual modifier */
- if (st->common_attributes.sensitivity.index < 0) {
+ if (st->magn_flux_attributes.sensitivity.index < 0) {
sensor_hub_input_get_attribute_info(hsdev,
HID_FEATURE_REPORT, usage_id,
HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
HID_USAGE_SENSOR_DATA_ORIENTATION,
- &st->common_attributes.sensitivity);
+ &st->magn_flux_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->magn_flux_attributes.sensitivity.index,
+ st->magn_flux_attributes.sensitivity.report_id);
+ }
+ if (st->magn_flux_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_ORIENT_MAGN_FLUX,
+ &st->magn_flux_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->magn_flux_attributes.sensitivity.index,
+ st->magn_flux_attributes.sensitivity.report_id);
+ }
+ if (st->rot_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH,
+ &st->rot_attributes.sensitivity);
dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
- st->common_attributes.sensitivity.index,
- st->common_attributes.sensitivity.report_id);
+ st->rot_attributes.sensitivity.index,
+ st->rot_attributes.sensitivity.report_id);
}
return 0;
@@ -428,16 +504,17 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, indio_dev);
magn_state = iio_priv(indio_dev);
- magn_state->common_attributes.hsdev = hsdev;
- magn_state->common_attributes.pdev = pdev;
+ magn_state->magn_flux_attributes.hsdev = hsdev;
+ magn_state->magn_flux_attributes.pdev = pdev;
ret = hid_sensor_parse_common_attributes(hsdev,
HID_USAGE_SENSOR_COMPASS_3D,
- &magn_state->common_attributes);
+ &magn_state->magn_flux_attributes);
if (ret) {
dev_err(&pdev->dev, "failed to setup common attributes\n");
return ret;
}
+ magn_state->rot_attributes = magn_state->magn_flux_attributes;
ret = magn_3d_parse_report(pdev, hsdev,
&channels, &chan_count,
@@ -460,9 +537,9 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
return ret;
}
- atomic_set(&magn_state->common_attributes.data_ready, 0);
+ atomic_set(&magn_state->magn_flux_attributes.data_ready, 0);
ret = hid_sensor_setup_trigger(indio_dev, name,
- &magn_state->common_attributes);
+ &magn_state->magn_flux_attributes);
if (ret < 0) {
dev_err(&pdev->dev, "trigger setup failed\n");
goto error_unreg_buffer_funcs;
@@ -489,7 +566,7 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&magn_state->common_attributes);
+ hid_sensor_remove_trigger(&magn_state->magn_flux_attributes);
error_unreg_buffer_funcs:
iio_triggered_buffer_cleanup(indio_dev);
return ret;
@@ -504,7 +581,7 @@ static int hid_magn_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&magn_state->common_attributes);
+ hid_sensor_remove_trigger(&magn_state->magn_flux_attributes);
iio_triggered_buffer_cleanup(indio_dev);
return 0;
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 3e1f06b2224c..8e1b0861fbe4 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -46,139 +46,12 @@
#define ST_MAGN_FS_AVL_15000MG 15000
#define ST_MAGN_FS_AVL_16000MG 16000
-/* CUSTOM VALUES FOR SENSOR 0 */
-#define ST_MAGN_0_ODR_ADDR 0x00
-#define ST_MAGN_0_ODR_MASK 0x1c
-#define ST_MAGN_0_ODR_AVL_1HZ_VAL 0x00
-#define ST_MAGN_0_ODR_AVL_2HZ_VAL 0x01
-#define ST_MAGN_0_ODR_AVL_3HZ_VAL 0x02
-#define ST_MAGN_0_ODR_AVL_8HZ_VAL 0x03
-#define ST_MAGN_0_ODR_AVL_15HZ_VAL 0x04
-#define ST_MAGN_0_ODR_AVL_30HZ_VAL 0x05
-#define ST_MAGN_0_ODR_AVL_75HZ_VAL 0x06
-#define ST_MAGN_0_ODR_AVL_220HZ_VAL 0x07
-#define ST_MAGN_0_PW_ADDR 0x02
-#define ST_MAGN_0_PW_MASK 0x03
-#define ST_MAGN_0_PW_ON 0x00
-#define ST_MAGN_0_PW_OFF 0x03
-#define ST_MAGN_0_FS_ADDR 0x01
-#define ST_MAGN_0_FS_MASK 0xe0
-#define ST_MAGN_0_FS_AVL_1300_VAL 0x01
-#define ST_MAGN_0_FS_AVL_1900_VAL 0x02
-#define ST_MAGN_0_FS_AVL_2500_VAL 0x03
-#define ST_MAGN_0_FS_AVL_4000_VAL 0x04
-#define ST_MAGN_0_FS_AVL_4700_VAL 0x05
-#define ST_MAGN_0_FS_AVL_5600_VAL 0x06
-#define ST_MAGN_0_FS_AVL_8100_VAL 0x07
-#define ST_MAGN_0_FS_AVL_1300_GAIN_XY 1100
-#define ST_MAGN_0_FS_AVL_1900_GAIN_XY 855
-#define ST_MAGN_0_FS_AVL_2500_GAIN_XY 670
-#define ST_MAGN_0_FS_AVL_4000_GAIN_XY 450
-#define ST_MAGN_0_FS_AVL_4700_GAIN_XY 400
-#define ST_MAGN_0_FS_AVL_5600_GAIN_XY 330
-#define ST_MAGN_0_FS_AVL_8100_GAIN_XY 230
-#define ST_MAGN_0_FS_AVL_1300_GAIN_Z 980
-#define ST_MAGN_0_FS_AVL_1900_GAIN_Z 760
-#define ST_MAGN_0_FS_AVL_2500_GAIN_Z 600
-#define ST_MAGN_0_FS_AVL_4000_GAIN_Z 400
-#define ST_MAGN_0_FS_AVL_4700_GAIN_Z 355
-#define ST_MAGN_0_FS_AVL_5600_GAIN_Z 295
-#define ST_MAGN_0_FS_AVL_8100_GAIN_Z 205
-#define ST_MAGN_0_MULTIREAD_BIT false
-
-/* CUSTOM VALUES FOR SENSOR 1 */
-#define ST_MAGN_1_WAI_EXP 0x3c
-#define ST_MAGN_1_ODR_ADDR 0x00
-#define ST_MAGN_1_ODR_MASK 0x1c
-#define ST_MAGN_1_ODR_AVL_1HZ_VAL 0x00
-#define ST_MAGN_1_ODR_AVL_2HZ_VAL 0x01
-#define ST_MAGN_1_ODR_AVL_3HZ_VAL 0x02
-#define ST_MAGN_1_ODR_AVL_8HZ_VAL 0x03
-#define ST_MAGN_1_ODR_AVL_15HZ_VAL 0x04
-#define ST_MAGN_1_ODR_AVL_30HZ_VAL 0x05
-#define ST_MAGN_1_ODR_AVL_75HZ_VAL 0x06
-#define ST_MAGN_1_ODR_AVL_220HZ_VAL 0x07
-#define ST_MAGN_1_PW_ADDR 0x02
-#define ST_MAGN_1_PW_MASK 0x03
-#define ST_MAGN_1_PW_ON 0x00
-#define ST_MAGN_1_PW_OFF 0x03
-#define ST_MAGN_1_FS_ADDR 0x01
-#define ST_MAGN_1_FS_MASK 0xe0
-#define ST_MAGN_1_FS_AVL_1300_VAL 0x01
-#define ST_MAGN_1_FS_AVL_1900_VAL 0x02
-#define ST_MAGN_1_FS_AVL_2500_VAL 0x03
-#define ST_MAGN_1_FS_AVL_4000_VAL 0x04
-#define ST_MAGN_1_FS_AVL_4700_VAL 0x05
-#define ST_MAGN_1_FS_AVL_5600_VAL 0x06
-#define ST_MAGN_1_FS_AVL_8100_VAL 0x07
-#define ST_MAGN_1_FS_AVL_1300_GAIN_XY 909
-#define ST_MAGN_1_FS_AVL_1900_GAIN_XY 1169
-#define ST_MAGN_1_FS_AVL_2500_GAIN_XY 1492
-#define ST_MAGN_1_FS_AVL_4000_GAIN_XY 2222
-#define ST_MAGN_1_FS_AVL_4700_GAIN_XY 2500
-#define ST_MAGN_1_FS_AVL_5600_GAIN_XY 3030
-#define ST_MAGN_1_FS_AVL_8100_GAIN_XY 4347
-#define ST_MAGN_1_FS_AVL_1300_GAIN_Z 1020
-#define ST_MAGN_1_FS_AVL_1900_GAIN_Z 1315
-#define ST_MAGN_1_FS_AVL_2500_GAIN_Z 1666
-#define ST_MAGN_1_FS_AVL_4000_GAIN_Z 2500
-#define ST_MAGN_1_FS_AVL_4700_GAIN_Z 2816
-#define ST_MAGN_1_FS_AVL_5600_GAIN_Z 3389
-#define ST_MAGN_1_FS_AVL_8100_GAIN_Z 4878
-#define ST_MAGN_1_MULTIREAD_BIT false
-
-/* CUSTOM VALUES FOR SENSOR 2 */
-#define ST_MAGN_2_WAI_EXP 0x3d
-#define ST_MAGN_2_ODR_ADDR 0x20
-#define ST_MAGN_2_ODR_MASK 0x1c
-#define ST_MAGN_2_ODR_AVL_1HZ_VAL 0x00
-#define ST_MAGN_2_ODR_AVL_2HZ_VAL 0x01
-#define ST_MAGN_2_ODR_AVL_3HZ_VAL 0x02
-#define ST_MAGN_2_ODR_AVL_5HZ_VAL 0x03
-#define ST_MAGN_2_ODR_AVL_10HZ_VAL 0x04
-#define ST_MAGN_2_ODR_AVL_20HZ_VAL 0x05
-#define ST_MAGN_2_ODR_AVL_40HZ_VAL 0x06
-#define ST_MAGN_2_ODR_AVL_80HZ_VAL 0x07
-#define ST_MAGN_2_PW_ADDR 0x22
-#define ST_MAGN_2_PW_MASK 0x03
-#define ST_MAGN_2_PW_ON 0x00
-#define ST_MAGN_2_PW_OFF 0x03
-#define ST_MAGN_2_FS_ADDR 0x21
-#define ST_MAGN_2_FS_MASK 0x60
-#define ST_MAGN_2_FS_AVL_4000_VAL 0x00
-#define ST_MAGN_2_FS_AVL_8000_VAL 0x01
-#define ST_MAGN_2_FS_AVL_12000_VAL 0x02
-#define ST_MAGN_2_FS_AVL_16000_VAL 0x03
-#define ST_MAGN_2_FS_AVL_4000_GAIN 146
-#define ST_MAGN_2_FS_AVL_8000_GAIN 292
-#define ST_MAGN_2_FS_AVL_12000_GAIN 438
-#define ST_MAGN_2_FS_AVL_16000_GAIN 584
-#define ST_MAGN_2_MULTIREAD_BIT false
+/* Special L addresses for Sensor 2 */
#define ST_MAGN_2_OUT_X_L_ADDR 0x28
#define ST_MAGN_2_OUT_Y_L_ADDR 0x2a
#define ST_MAGN_2_OUT_Z_L_ADDR 0x2c
-/* CUSTOM VALUES FOR SENSOR 3 */
-#define ST_MAGN_3_WAI_ADDR 0x4f
-#define ST_MAGN_3_WAI_EXP 0x40
-#define ST_MAGN_3_ODR_ADDR 0x60
-#define ST_MAGN_3_ODR_MASK 0x0c
-#define ST_MAGN_3_ODR_AVL_10HZ_VAL 0x00
-#define ST_MAGN_3_ODR_AVL_20HZ_VAL 0x01
-#define ST_MAGN_3_ODR_AVL_50HZ_VAL 0x02
-#define ST_MAGN_3_ODR_AVL_100HZ_VAL 0x03
-#define ST_MAGN_3_PW_ADDR 0x60
-#define ST_MAGN_3_PW_MASK 0x03
-#define ST_MAGN_3_PW_ON 0x00
-#define ST_MAGN_3_PW_OFF 0x03
-#define ST_MAGN_3_BDU_ADDR 0x62
-#define ST_MAGN_3_BDU_MASK 0x10
-#define ST_MAGN_3_DRDY_IRQ_ADDR 0x62
-#define ST_MAGN_3_DRDY_INT_MASK 0x01
-#define ST_MAGN_3_IHL_IRQ_ADDR 0x63
-#define ST_MAGN_3_IHL_IRQ_MASK 0x04
-#define ST_MAGN_3_FS_AVL_15000_GAIN 1500
-#define ST_MAGN_3_MULTIREAD_BIT false
+/* Special L addresses for sensor 3 */
#define ST_MAGN_3_OUT_X_L_ADDR 0x68
#define ST_MAGN_3_OUT_Y_L_ADDR 0x6a
#define ST_MAGN_3_OUT_Z_L_ADDR 0x6c
@@ -240,77 +113,78 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_magn_16bit_channels,
.odr = {
- .addr = ST_MAGN_0_ODR_ADDR,
- .mask = ST_MAGN_0_ODR_MASK,
+ .addr = 0x00,
+ .mask = 0x1c,
.odr_avl = {
- { 1, ST_MAGN_0_ODR_AVL_1HZ_VAL, },
- { 2, ST_MAGN_0_ODR_AVL_2HZ_VAL, },
- { 3, ST_MAGN_0_ODR_AVL_3HZ_VAL, },
- { 8, ST_MAGN_0_ODR_AVL_8HZ_VAL, },
- { 15, ST_MAGN_0_ODR_AVL_15HZ_VAL, },
- { 30, ST_MAGN_0_ODR_AVL_30HZ_VAL, },
- { 75, ST_MAGN_0_ODR_AVL_75HZ_VAL, },
+ { .hz = 1, .value = 0x00 },
+ { .hz = 2, .value = 0x01 },
+ { .hz = 3, .value = 0x02 },
+ { .hz = 8, .value = 0x03 },
+ { .hz = 15, .value = 0x04 },
+ { .hz = 30, .value = 0x05 },
+ { .hz = 75, .value = 0x06 },
+ /* 220 Hz, 0x07 reportedly exist */
},
},
.pw = {
- .addr = ST_MAGN_0_PW_ADDR,
- .mask = ST_MAGN_0_PW_MASK,
- .value_on = ST_MAGN_0_PW_ON,
- .value_off = ST_MAGN_0_PW_OFF,
+ .addr = 0x02,
+ .mask = 0x03,
+ .value_on = 0x00,
+ .value_off = 0x03,
},
.fs = {
- .addr = ST_MAGN_0_FS_ADDR,
- .mask = ST_MAGN_0_FS_MASK,
+ .addr = 0x01,
+ .mask = 0xe0,
.fs_avl = {
[0] = {
.num = ST_MAGN_FS_AVL_1300MG,
- .value = ST_MAGN_0_FS_AVL_1300_VAL,
- .gain = ST_MAGN_0_FS_AVL_1300_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_1300_GAIN_Z,
+ .value = 0x01,
+ .gain = 1100,
+ .gain2 = 980,
},
[1] = {
.num = ST_MAGN_FS_AVL_1900MG,
- .value = ST_MAGN_0_FS_AVL_1900_VAL,
- .gain = ST_MAGN_0_FS_AVL_1900_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_1900_GAIN_Z,
+ .value = 0x02,
+ .gain = 855,
+ .gain2 = 760,
},
[2] = {
.num = ST_MAGN_FS_AVL_2500MG,
- .value = ST_MAGN_0_FS_AVL_2500_VAL,
- .gain = ST_MAGN_0_FS_AVL_2500_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_2500_GAIN_Z,
+ .value = 0x03,
+ .gain = 670,
+ .gain2 = 600,
},
[3] = {
.num = ST_MAGN_FS_AVL_4000MG,
- .value = ST_MAGN_0_FS_AVL_4000_VAL,
- .gain = ST_MAGN_0_FS_AVL_4000_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_4000_GAIN_Z,
+ .value = 0x04,
+ .gain = 450,
+ .gain2 = 400,
},
[4] = {
.num = ST_MAGN_FS_AVL_4700MG,
- .value = ST_MAGN_0_FS_AVL_4700_VAL,
- .gain = ST_MAGN_0_FS_AVL_4700_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_4700_GAIN_Z,
+ .value = 0x05,
+ .gain = 400,
+ .gain2 = 355,
},
[5] = {
.num = ST_MAGN_FS_AVL_5600MG,
- .value = ST_MAGN_0_FS_AVL_5600_VAL,
- .gain = ST_MAGN_0_FS_AVL_5600_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_5600_GAIN_Z,
+ .value = 0x06,
+ .gain = 330,
+ .gain2 = 295,
},
[6] = {
.num = ST_MAGN_FS_AVL_8100MG,
- .value = ST_MAGN_0_FS_AVL_8100_VAL,
- .gain = ST_MAGN_0_FS_AVL_8100_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_8100_GAIN_Z,
+ .value = 0x07,
+ .gain = 230,
+ .gain2 = 205,
},
},
},
- .multi_read_bit = ST_MAGN_0_MULTIREAD_BIT,
+ .multi_read_bit = false,
.bootime = 2,
},
{
- .wai = ST_MAGN_1_WAI_EXP,
+ .wai = 0x3c,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LSM303DLHC_MAGN_DEV_NAME,
@@ -318,175 +192,175 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_magn_16bit_channels,
.odr = {
- .addr = ST_MAGN_1_ODR_ADDR,
- .mask = ST_MAGN_1_ODR_MASK,
+ .addr = 0x00,
+ .mask = 0x1c,
.odr_avl = {
- { 1, ST_MAGN_1_ODR_AVL_1HZ_VAL, },
- { 2, ST_MAGN_1_ODR_AVL_2HZ_VAL, },
- { 3, ST_MAGN_1_ODR_AVL_3HZ_VAL, },
- { 8, ST_MAGN_1_ODR_AVL_8HZ_VAL, },
- { 15, ST_MAGN_1_ODR_AVL_15HZ_VAL, },
- { 30, ST_MAGN_1_ODR_AVL_30HZ_VAL, },
- { 75, ST_MAGN_1_ODR_AVL_75HZ_VAL, },
- { 220, ST_MAGN_1_ODR_AVL_220HZ_VAL, },
+ { .hz = 1, .value = 0x00 },
+ { .hz = 2, .value = 0x01 },
+ { .hz = 3, .value = 0x02 },
+ { .hz = 8, .value = 0x03 },
+ { .hz = 15, .value = 0x04 },
+ { .hz = 30, .value = 0x05 },
+ { .hz = 75, .value = 0x06 },
+ { .hz = 220, .value = 0x07 },
},
},
.pw = {
- .addr = ST_MAGN_1_PW_ADDR,
- .mask = ST_MAGN_1_PW_MASK,
- .value_on = ST_MAGN_1_PW_ON,
- .value_off = ST_MAGN_1_PW_OFF,
+ .addr = 0x02,
+ .mask = 0x03,
+ .value_on = 0x00,
+ .value_off = 0x03,
},
.fs = {
- .addr = ST_MAGN_1_FS_ADDR,
- .mask = ST_MAGN_1_FS_MASK,
+ .addr = 0x01,
+ .mask = 0xe0,
.fs_avl = {
[0] = {
.num = ST_MAGN_FS_AVL_1300MG,
- .value = ST_MAGN_1_FS_AVL_1300_VAL,
- .gain = ST_MAGN_1_FS_AVL_1300_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_1300_GAIN_Z,
+ .value = 0x01,
+ .gain = 909,
+ .gain2 = 1020,
},
[1] = {
.num = ST_MAGN_FS_AVL_1900MG,
- .value = ST_MAGN_1_FS_AVL_1900_VAL,
- .gain = ST_MAGN_1_FS_AVL_1900_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_1900_GAIN_Z,
+ .value = 0x02,
+ .gain = 1169,
+ .gain2 = 1315,
},
[2] = {
.num = ST_MAGN_FS_AVL_2500MG,
- .value = ST_MAGN_1_FS_AVL_2500_VAL,
- .gain = ST_MAGN_1_FS_AVL_2500_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_2500_GAIN_Z,
+ .value = 0x03,
+ .gain = 1492,
+ .gain2 = 1666,
},
[3] = {
.num = ST_MAGN_FS_AVL_4000MG,
- .value = ST_MAGN_1_FS_AVL_4000_VAL,
- .gain = ST_MAGN_1_FS_AVL_4000_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_4000_GAIN_Z,
+ .value = 0x04,
+ .gain = 2222,
+ .gain2 = 2500,
},
[4] = {
.num = ST_MAGN_FS_AVL_4700MG,
- .value = ST_MAGN_1_FS_AVL_4700_VAL,
- .gain = ST_MAGN_1_FS_AVL_4700_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_4700_GAIN_Z,
+ .value = 0x05,
+ .gain = 2500,
+ .gain2 = 2816,
},
[5] = {
.num = ST_MAGN_FS_AVL_5600MG,
- .value = ST_MAGN_1_FS_AVL_5600_VAL,
- .gain = ST_MAGN_1_FS_AVL_5600_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_5600_GAIN_Z,
+ .value = 0x06,
+ .gain = 3030,
+ .gain2 = 3389,
},
[6] = {
.num = ST_MAGN_FS_AVL_8100MG,
- .value = ST_MAGN_1_FS_AVL_8100_VAL,
- .gain = ST_MAGN_1_FS_AVL_8100_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_8100_GAIN_Z,
+ .value = 0x07,
+ .gain = 4347,
+ .gain2 = 4878,
},
},
},
- .multi_read_bit = ST_MAGN_1_MULTIREAD_BIT,
+ .multi_read_bit = false,
.bootime = 2,
},
{
- .wai = ST_MAGN_2_WAI_EXP,
+ .wai = 0x3d,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS3MDL_MAGN_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_magn_2_16bit_channels,
.odr = {
- .addr = ST_MAGN_2_ODR_ADDR,
- .mask = ST_MAGN_2_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x1c,
.odr_avl = {
- { 1, ST_MAGN_2_ODR_AVL_1HZ_VAL, },
- { 2, ST_MAGN_2_ODR_AVL_2HZ_VAL, },
- { 3, ST_MAGN_2_ODR_AVL_3HZ_VAL, },
- { 5, ST_MAGN_2_ODR_AVL_5HZ_VAL, },
- { 10, ST_MAGN_2_ODR_AVL_10HZ_VAL, },
- { 20, ST_MAGN_2_ODR_AVL_20HZ_VAL, },
- { 40, ST_MAGN_2_ODR_AVL_40HZ_VAL, },
- { 80, ST_MAGN_2_ODR_AVL_80HZ_VAL, },
+ { .hz = 1, .value = 0x00 },
+ { .hz = 2, .value = 0x01 },
+ { .hz = 3, .value = 0x02 },
+ { .hz = 5, .value = 0x03 },
+ { .hz = 10, .value = 0x04 },
+ { .hz = 20, .value = 0x05 },
+ { .hz = 40, .value = 0x06 },
+ { .hz = 80, .value = 0x07 },
},
},
.pw = {
- .addr = ST_MAGN_2_PW_ADDR,
- .mask = ST_MAGN_2_PW_MASK,
- .value_on = ST_MAGN_2_PW_ON,
- .value_off = ST_MAGN_2_PW_OFF,
+ .addr = 0x22,
+ .mask = 0x03,
+ .value_on = 0x00,
+ .value_off = 0x03,
},
.fs = {
- .addr = ST_MAGN_2_FS_ADDR,
- .mask = ST_MAGN_2_FS_MASK,
+ .addr = 0x21,
+ .mask = 0x60,
.fs_avl = {
[0] = {
.num = ST_MAGN_FS_AVL_4000MG,
- .value = ST_MAGN_2_FS_AVL_4000_VAL,
- .gain = ST_MAGN_2_FS_AVL_4000_GAIN,
+ .value = 0x00,
+ .gain = 146,
},
[1] = {
.num = ST_MAGN_FS_AVL_8000MG,
- .value = ST_MAGN_2_FS_AVL_8000_VAL,
- .gain = ST_MAGN_2_FS_AVL_8000_GAIN,
+ .value = 0x01,
+ .gain = 292,
},
[2] = {
.num = ST_MAGN_FS_AVL_12000MG,
- .value = ST_MAGN_2_FS_AVL_12000_VAL,
- .gain = ST_MAGN_2_FS_AVL_12000_GAIN,
+ .value = 0x02,
+ .gain = 438,
},
[3] = {
.num = ST_MAGN_FS_AVL_16000MG,
- .value = ST_MAGN_2_FS_AVL_16000_VAL,
- .gain = ST_MAGN_2_FS_AVL_16000_GAIN,
+ .value = 0x03,
+ .gain = 584,
},
},
},
- .multi_read_bit = ST_MAGN_2_MULTIREAD_BIT,
+ .multi_read_bit = false,
.bootime = 2,
},
{
- .wai = ST_MAGN_3_WAI_EXP,
- .wai_addr = ST_MAGN_3_WAI_ADDR,
+ .wai = 0x40,
+ .wai_addr = 0x4f,
.sensors_supported = {
[0] = LSM303AGR_MAGN_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_magn_3_16bit_channels,
.odr = {
- .addr = ST_MAGN_3_ODR_ADDR,
- .mask = ST_MAGN_3_ODR_MASK,
+ .addr = 0x60,
+ .mask = 0x0c,
.odr_avl = {
- { 10, ST_MAGN_3_ODR_AVL_10HZ_VAL, },
- { 20, ST_MAGN_3_ODR_AVL_20HZ_VAL, },
- { 50, ST_MAGN_3_ODR_AVL_50HZ_VAL, },
- { 100, ST_MAGN_3_ODR_AVL_100HZ_VAL, },
+ { .hz = 10, .value = 0x00 },
+ { .hz = 20, .value = 0x01 },
+ { .hz = 50, .value = 0x02 },
+ { .hz = 100, .value = 0x03 },
},
},
.pw = {
- .addr = ST_MAGN_3_PW_ADDR,
- .mask = ST_MAGN_3_PW_MASK,
- .value_on = ST_MAGN_3_PW_ON,
- .value_off = ST_MAGN_3_PW_OFF,
+ .addr = 0x60,
+ .mask = 0x03,
+ .value_on = 0x00,
+ .value_off = 0x03,
},
.fs = {
.fs_avl = {
[0] = {
.num = ST_MAGN_FS_AVL_15000MG,
- .gain = ST_MAGN_3_FS_AVL_15000_GAIN,
+ .gain = 1500,
},
},
},
.bdu = {
- .addr = ST_MAGN_3_BDU_ADDR,
- .mask = ST_MAGN_3_BDU_MASK,
+ .addr = 0x62,
+ .mask = 0x10,
},
.drdy_irq = {
- .addr = ST_MAGN_3_DRDY_IRQ_ADDR,
- .mask_int1 = ST_MAGN_3_DRDY_INT_MASK,
- .addr_ihl = ST_MAGN_3_IHL_IRQ_ADDR,
- .mask_ihl = ST_MAGN_3_IHL_IRQ_MASK,
+ .addr = 0x62,
+ .mask_int1 = 0x01,
+ .addr_ihl = 0x63,
+ .mask_ihl = 0x04,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_MAGN_3_MULTIREAD_BIT,
+ .multi_read_bit = false,
.bootime = 2,
},
};
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index 13b6ae2fcf7b..0d1bcf89ae17 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -38,7 +38,7 @@
struct mcp4531_cfg {
int wipers;
- int max_pos;
+ int avail[3];
int kohms;
};
@@ -78,38 +78,38 @@ enum mcp4531_type {
};
static const struct mcp4531_cfg mcp4531_cfg[] = {
- [MCP453x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, },
- [MCP453x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
- [MCP453x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, },
- [MCP453x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
- [MCP454x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, },
- [MCP454x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
- [MCP454x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, },
- [MCP454x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
- [MCP455x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, },
- [MCP455x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, },
- [MCP455x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
- [MCP455x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
- [MCP456x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, },
- [MCP456x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, },
- [MCP456x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
- [MCP456x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
- [MCP463x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, },
- [MCP463x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, },
- [MCP463x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, },
- [MCP463x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
- [MCP464x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, },
- [MCP464x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, },
- [MCP464x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, },
- [MCP464x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
- [MCP465x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, },
- [MCP465x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, },
- [MCP465x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, },
- [MCP465x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
- [MCP466x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, },
- [MCP466x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, },
- [MCP466x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, },
- [MCP466x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
+ [MCP453x_502] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 5, },
+ [MCP453x_103] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 10, },
+ [MCP453x_503] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 50, },
+ [MCP453x_104] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 100, },
+ [MCP454x_502] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 5, },
+ [MCP454x_103] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 10, },
+ [MCP454x_503] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 50, },
+ [MCP454x_104] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 100, },
+ [MCP455x_502] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 5, },
+ [MCP455x_103] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 10, },
+ [MCP455x_503] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 50, },
+ [MCP455x_104] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 100, },
+ [MCP456x_502] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 5, },
+ [MCP456x_103] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 10, },
+ [MCP456x_503] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 50, },
+ [MCP456x_104] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 100, },
+ [MCP463x_502] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 5, },
+ [MCP463x_103] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 10, },
+ [MCP463x_503] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 50, },
+ [MCP463x_104] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 100, },
+ [MCP464x_502] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 5, },
+ [MCP464x_103] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 10, },
+ [MCP464x_503] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 50, },
+ [MCP464x_104] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 100, },
+ [MCP465x_502] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 5, },
+ [MCP465x_103] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 10, },
+ [MCP465x_503] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 50, },
+ [MCP465x_104] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 100, },
+ [MCP466x_502] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 5, },
+ [MCP466x_103] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 10, },
+ [MCP466x_503] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 50, },
+ [MCP466x_104] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 100, },
};
#define MCP4531_WRITE (0 << 2)
@@ -124,13 +124,14 @@ struct mcp4531_data {
const struct mcp4531_cfg *cfg;
};
-#define MCP4531_CHANNEL(ch) { \
- .type = IIO_RESISTANCE, \
- .indexed = 1, \
- .output = 1, \
- .channel = (ch), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+#define MCP4531_CHANNEL(ch) { \
+ .type = IIO_RESISTANCE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (ch), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_RAW), \
}
static const struct iio_chan_spec mcp4531_channels[] = {
@@ -156,13 +157,31 @@ static int mcp4531_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 1000 * data->cfg->kohms;
- *val2 = data->cfg->max_pos;
+ *val2 = data->cfg->avail[2];
return IIO_VAL_FRACTIONAL;
}
return -EINVAL;
}
+static int mcp4531_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct mcp4531_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *length = ARRAY_SIZE(data->cfg->avail);
+ *vals = data->cfg->avail;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_RANGE;
+ }
+
+ return -EINVAL;
+}
+
static int mcp4531_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
@@ -172,7 +191,7 @@ static int mcp4531_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (val > data->cfg->max_pos || val < 0)
+ if (val > data->cfg->avail[2] || val < 0)
return -EINVAL;
break;
default:
@@ -186,6 +205,7 @@ static int mcp4531_write_raw(struct iio_dev *indio_dev,
static const struct iio_info mcp4531_info = {
.read_raw = mcp4531_read_raw,
+ .read_avail = mcp4531_read_avail,
.write_raw = mcp4531_write_raw,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/iio/potentiostat/Kconfig b/drivers/iio/potentiostat/Kconfig
new file mode 100644
index 000000000000..1e3baf2cc97d
--- /dev/null
+++ b/drivers/iio/potentiostat/Kconfig
@@ -0,0 +1,22 @@
+#
+# Potentiostat drivers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Digital potentiostats"
+
+config LMP91000
+ tristate "Texas Instruments LMP91000 potentiostat driver"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_BUFFER_CB
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for the Texas Instruments
+ LMP91000 digital potentiostat chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called lmp91000
+
+endmenu
diff --git a/drivers/iio/potentiostat/Makefile b/drivers/iio/potentiostat/Makefile
new file mode 100644
index 000000000000..64d315ef4449
--- /dev/null
+++ b/drivers/iio/potentiostat/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for industrial I/O potentiostat drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_LMP91000) += lmp91000.o
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
new file mode 100644
index 000000000000..e22714365022
--- /dev/null
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -0,0 +1,446 @@
+/*
+ * lmp91000.c - Support for Texas Instruments digital potentiostats
+ *
+ * Copyright (C) 2016 Matt Ranostay <mranostay@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * TODO: bias voltage + polarity control, and multiple chip support
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define LMP91000_REG_LOCK 0x01
+#define LMP91000_REG_TIACN 0x10
+#define LMP91000_REG_TIACN_GAIN_SHIFT 2
+
+#define LMP91000_REG_REFCN 0x11
+#define LMP91000_REG_REFCN_EXT_REF 0x20
+#define LMP91000_REG_REFCN_50_ZERO 0x80
+
+#define LMP91000_REG_MODECN 0x12
+#define LMP91000_REG_MODECN_3LEAD 0x03
+#define LMP91000_REG_MODECN_TEMP 0x07
+
+#define LMP91000_DRV_NAME "lmp91000"
+
+static const int lmp91000_tia_gain[] = { 0, 2750, 3500, 7000, 14000, 35000,
+ 120000, 350000 };
+
+static const int lmp91000_rload[] = { 10, 33, 50, 100 };
+
+#define LMP91000_TEMP_BASE -40
+
+static const u16 lmp91000_temp_lut[] = {
+ 1875, 1867, 1860, 1852, 1844, 1836, 1828, 1821, 1813, 1805,
+ 1797, 1789, 1782, 1774, 1766, 1758, 1750, 1742, 1734, 1727,
+ 1719, 1711, 1703, 1695, 1687, 1679, 1671, 1663, 1656, 1648,
+ 1640, 1632, 1624, 1616, 1608, 1600, 1592, 1584, 1576, 1568,
+ 1560, 1552, 1544, 1536, 1528, 1520, 1512, 1504, 1496, 1488,
+ 1480, 1472, 1464, 1456, 1448, 1440, 1432, 1424, 1415, 1407,
+ 1399, 1391, 1383, 1375, 1367, 1359, 1351, 1342, 1334, 1326,
+ 1318, 1310, 1302, 1293, 1285, 1277, 1269, 1261, 1253, 1244,
+ 1236, 1228, 1220, 1212, 1203, 1195, 1187, 1179, 1170, 1162,
+ 1154, 1146, 1137, 1129, 1121, 1112, 1104, 1096, 1087, 1079,
+ 1071, 1063, 1054, 1046, 1038, 1029, 1021, 1012, 1004, 996,
+ 987, 979, 971, 962, 954, 945, 937, 929, 920, 912,
+ 903, 895, 886, 878, 870, 861 };
+
+static const struct regmap_config lmp91000_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+struct lmp91000_data {
+ struct regmap *regmap;
+ struct device *dev;
+
+ struct iio_trigger *trig;
+ struct iio_cb_buffer *cb_buffer;
+ struct iio_channel *adc_chan;
+
+ struct completion completion;
+ u8 chan_select;
+
+ u32 buffer[4]; /* 64-bit data + 64-bit timestamp */
+};
+
+static const struct iio_chan_spec lmp91000_channels[] = {
+ { /* chemical channel mV */
+ .type = IIO_VOLTAGE,
+ .channel = 0,
+ .address = LMP91000_REG_MODECN_3LEAD,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+ { /* temperature channel mV */
+ .type = IIO_TEMP,
+ .channel = 1,
+ .address = LMP91000_REG_MODECN_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .scan_index = -1,
+ },
+};
+
+static int lmp91000_read(struct lmp91000_data *data, int channel, int *val)
+{
+ int state, ret;
+
+ ret = regmap_read(data->regmap, LMP91000_REG_MODECN, &state);
+ if (ret)
+ return -EINVAL;
+
+ ret = regmap_write(data->regmap, LMP91000_REG_MODECN, channel);
+ if (ret)
+ return -EINVAL;
+
+ /* delay till first temperature reading is complete */
+ if ((state != channel) && (channel == LMP91000_REG_MODECN_TEMP))
+ usleep_range(3000, 4000);
+
+ data->chan_select = channel != LMP91000_REG_MODECN_3LEAD;
+
+ iio_trigger_poll_chained(data->trig);
+
+ ret = wait_for_completion_timeout(&data->completion, HZ);
+ reinit_completion(&data->completion);
+
+ if (!ret)
+ return -ETIMEDOUT;
+
+ *val = data->buffer[data->chan_select];
+
+ return 0;
+}
+
+static irqreturn_t lmp91000_buffer_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct lmp91000_data *data = iio_priv(indio_dev);
+ int ret, val;
+
+ memset(data->buffer, 0, sizeof(data->buffer));
+
+ ret = lmp91000_read(data, LMP91000_REG_MODECN_3LEAD, &val);
+ if (!ret) {
+ data->buffer[0] = val;
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_get_time_ns(indio_dev));
+ }
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int lmp91000_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct lmp91000_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_PROCESSED: {
+ int ret = iio_channel_start_all_cb(data->cb_buffer);
+
+ if (ret)
+ return ret;
+
+ ret = lmp91000_read(data, chan->address, val);
+
+ iio_channel_stop_all_cb(data->cb_buffer);
+
+ if (ret)
+ return ret;
+
+ if (mask == IIO_CHAN_INFO_PROCESSED) {
+ int tmp, i;
+
+ ret = iio_convert_raw_to_processed(data->adc_chan,
+ *val, &tmp, 1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(lmp91000_temp_lut); i++)
+ if (lmp91000_temp_lut[i] < tmp)
+ break;
+
+ *val = (LMP91000_TEMP_BASE + i) * 1000;
+ }
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ return iio_read_channel_offset(data->adc_chan, val, val2);
+ case IIO_CHAN_INFO_SCALE:
+ return iio_read_channel_scale(data->adc_chan, val, val2);
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info lmp91000_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = lmp91000_read_raw,
+};
+
+static int lmp91000_read_config(struct lmp91000_data *data)
+{
+ struct device *dev = data->dev;
+ struct device_node *np = dev->of_node;
+ unsigned int reg, val;
+ int i, ret;
+
+ ret = of_property_read_u32(np, "ti,tia-gain-ohm", &val);
+ if (ret) {
+ if (of_property_read_bool(np, "ti,external-tia-resistor"))
+ val = 0;
+ else {
+ dev_err(dev, "no ti,tia-gain-ohm defined");
+ return ret;
+ }
+ }
+
+ ret = -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(lmp91000_tia_gain); i++) {
+ if (lmp91000_tia_gain[i] == val) {
+ reg = i << LMP91000_REG_TIACN_GAIN_SHIFT;
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret) {
+ dev_err(dev, "invalid ti,tia-gain-ohm %d\n", val);
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "ti,rload-ohm", &val);
+ if (ret) {
+ val = 100;
+ dev_info(dev, "no ti,rload-ohm defined, default to %d\n", val);
+ }
+
+ ret = -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(lmp91000_rload); i++) {
+ if (lmp91000_rload[i] == val) {
+ reg |= i;
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret) {
+ dev_err(dev, "invalid ti,rload-ohm %d\n", val);
+ return ret;
+ }
+
+ regmap_write(data->regmap, LMP91000_REG_LOCK, 0);
+ regmap_write(data->regmap, LMP91000_REG_TIACN, reg);
+ regmap_write(data->regmap, LMP91000_REG_REFCN, LMP91000_REG_REFCN_EXT_REF
+ | LMP91000_REG_REFCN_50_ZERO);
+ regmap_write(data->regmap, LMP91000_REG_LOCK, 1);
+
+ return 0;
+}
+
+static int lmp91000_buffer_cb(const void *val, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct lmp91000_data *data = iio_priv(indio_dev);
+
+ data->buffer[data->chan_select] = *((int *)val);
+ complete_all(&data->completion);
+
+ return 0;
+}
+
+static const struct iio_trigger_ops lmp91000_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
+
+static int lmp91000_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct lmp91000_data *data = iio_priv(indio_dev);
+
+ return iio_channel_start_all_cb(data->cb_buffer);
+}
+
+static int lmp91000_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct lmp91000_data *data = iio_priv(indio_dev);
+
+ iio_channel_stop_all_cb(data->cb_buffer);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops lmp91000_buffer_setup_ops = {
+ .preenable = lmp91000_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = lmp91000_buffer_predisable,
+};
+
+static int lmp91000_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct lmp91000_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ indio_dev->info = &lmp91000_info;
+ indio_dev->channels = lmp91000_channels;
+ indio_dev->num_channels = ARRAY_SIZE(lmp91000_channels);
+ indio_dev->name = LMP91000_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ i2c_set_clientdata(client, indio_dev);
+
+ data = iio_priv(indio_dev);
+ data->dev = dev;
+ data->regmap = devm_regmap_init_i2c(client, &lmp91000_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(dev, "regmap initialization failed.\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ data->trig = devm_iio_trigger_alloc(data->dev, "%s-mux%d",
+ indio_dev->name, indio_dev->id);
+ if (!data->trig) {
+ dev_err(dev, "cannot allocate iio trigger.\n");
+ return -ENOMEM;
+ }
+
+ data->trig->ops = &lmp91000_trigger_ops;
+ data->trig->dev.parent = dev;
+ init_completion(&data->completion);
+
+ ret = lmp91000_read_config(data);
+ if (ret)
+ return ret;
+
+ ret = iio_trigger_set_immutable(iio_channel_cb_get_iio_dev(data->cb_buffer),
+ data->trig);
+ if (ret) {
+ dev_err(dev, "cannot set immutable trigger.\n");
+ return ret;
+ }
+
+ ret = iio_trigger_register(data->trig);
+ if (ret) {
+ dev_err(dev, "cannot register iio trigger.\n");
+ return ret;
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ &lmp91000_buffer_handler,
+ &lmp91000_buffer_setup_ops);
+ if (ret)
+ goto error_unreg_trigger;
+
+ data->cb_buffer = iio_channel_get_all_cb(dev, &lmp91000_buffer_cb,
+ indio_dev);
+
+ if (IS_ERR(data->cb_buffer)) {
+ if (PTR_ERR(data->cb_buffer) == -ENODEV)
+ ret = -EPROBE_DEFER;
+ else
+ ret = PTR_ERR(data->cb_buffer);
+
+ goto error_unreg_buffer;
+ }
+
+ data->adc_chan = iio_channel_cb_get_channels(data->cb_buffer);
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_unreg_cb_buffer;
+
+ return 0;
+
+error_unreg_cb_buffer:
+ iio_channel_release_all_cb(data->cb_buffer);
+
+error_unreg_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+
+error_unreg_trigger:
+ iio_trigger_unregister(data->trig);
+
+ return ret;
+}
+
+static int lmp91000_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct lmp91000_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ iio_channel_stop_all_cb(data->cb_buffer);
+ iio_channel_release_all_cb(data->cb_buffer);
+
+ iio_triggered_buffer_cleanup(indio_dev);
+ iio_trigger_unregister(data->trig);
+
+ return 0;
+}
+
+static const struct of_device_id lmp91000_of_match[] = {
+ { .compatible = "ti,lmp91000", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lmp91000_of_match);
+
+static const struct i2c_device_id lmp91000_id[] = {
+ { "lmp91000", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, lmp91000_id);
+
+static struct i2c_driver lmp91000_driver = {
+ .driver = {
+ .name = LMP91000_DRV_NAME,
+ .of_match_table = of_match_ptr(lmp91000_of_match),
+ },
+ .probe = lmp91000_probe,
+ .remove = lmp91000_remove,
+ .id_table = lmp91000_id,
+};
+module_i2c_driver(lmp91000_driver);
+
+MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_DESCRIPTION("LMP91000 digital potentiostat");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 15cd416365c1..bd8d96b96771 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -5,6 +5,16 @@
menu "Pressure sensors"
+config ABP060MG
+ tristate "Honeywell ABP pressure sensor driver"
+ depends on I2C
+ help
+ Say yes here to build support for the Honeywell ABP pressure
+ sensors.
+
+ To compile this driver as a module, choose M here: the module
+ will be called abp060mg.
+
config BMP280
tristate "Bosch Sensortec BMP180/BMP280 pressure sensor I2C driver"
depends on (I2C || SPI_MASTER)
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index fff77185a5cc..de3dbc81dc5a 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -3,6 +3,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_ABP060MG) += abp060mg.o
obj-$(CONFIG_BMP280) += bmp280.o
bmp280-objs := bmp280-core.o bmp280-regmap.o
obj-$(CONFIG_BMP280_I2C) += bmp280-i2c.o
diff --git a/drivers/iio/pressure/abp060mg.c b/drivers/iio/pressure/abp060mg.c
new file mode 100644
index 000000000000..43bdd0b9155f
--- /dev/null
+++ b/drivers/iio/pressure/abp060mg.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2016 - Marcin Malagowski <mrc@bourne.st>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+
+#define ABP060MG_ERROR_MASK 0xC000
+#define ABP060MG_RESP_TIME_MS 40
+#define ABP060MG_MIN_COUNTS 1638 /* = 0x0666 (10% of u14) */
+#define ABP060MG_MAX_COUNTS 14745 /* = 0x3999 (90% of u14) */
+#define ABP060MG_NUM_COUNTS (ABP060MG_MAX_COUNTS - ABP060MG_MIN_COUNTS)
+
+enum abp_variant {
+ /* gage [kPa] */
+ ABP006KG, ABP010KG, ABP016KG, ABP025KG, ABP040KG, ABP060KG, ABP100KG,
+ ABP160KG, ABP250KG, ABP400KG, ABP600KG, ABP001GG,
+ /* differential [kPa] */
+ ABP006KD, ABP010KD, ABP016KD, ABP025KD, ABP040KD, ABP060KD, ABP100KD,
+ ABP160KD, ABP250KD, ABP400KD,
+ /* gage [psi] */
+ ABP001PG, ABP005PG, ABP015PG, ABP030PG, ABP060PG, ABP100PG, ABP150PG,
+ /* differential [psi] */
+ ABP001PD, ABP005PD, ABP015PD, ABP030PD, ABP060PD,
+};
+
+struct abp_config {
+ int min;
+ int max;
+};
+
+static struct abp_config abp_config[] = {
+ /* mbar & kPa variants */
+ [ABP006KG] = { .min = 0, .max = 6000 },
+ [ABP010KG] = { .min = 0, .max = 10000 },
+ [ABP016KG] = { .min = 0, .max = 16000 },
+ [ABP025KG] = { .min = 0, .max = 25000 },
+ [ABP040KG] = { .min = 0, .max = 40000 },
+ [ABP060KG] = { .min = 0, .max = 60000 },
+ [ABP100KG] = { .min = 0, .max = 100000 },
+ [ABP160KG] = { .min = 0, .max = 160000 },
+ [ABP250KG] = { .min = 0, .max = 250000 },
+ [ABP400KG] = { .min = 0, .max = 400000 },
+ [ABP600KG] = { .min = 0, .max = 600000 },
+ [ABP001GG] = { .min = 0, .max = 1000000 },
+ [ABP006KD] = { .min = -6000, .max = 6000 },
+ [ABP010KD] = { .min = -10000, .max = 10000 },
+ [ABP016KD] = { .min = -16000, .max = 16000 },
+ [ABP025KD] = { .min = -25000, .max = 25000 },
+ [ABP040KD] = { .min = -40000, .max = 40000 },
+ [ABP060KD] = { .min = -60000, .max = 60000 },
+ [ABP100KD] = { .min = -100000, .max = 100000 },
+ [ABP160KD] = { .min = -160000, .max = 160000 },
+ [ABP250KD] = { .min = -250000, .max = 250000 },
+ [ABP400KD] = { .min = -400000, .max = 400000 },
+ /* psi variants (1 psi ~ 6895 Pa) */
+ [ABP001PG] = { .min = 0, .max = 6985 },
+ [ABP005PG] = { .min = 0, .max = 34474 },
+ [ABP015PG] = { .min = 0, .max = 103421 },
+ [ABP030PG] = { .min = 0, .max = 206843 },
+ [ABP060PG] = { .min = 0, .max = 413686 },
+ [ABP100PG] = { .min = 0, .max = 689476 },
+ [ABP150PG] = { .min = 0, .max = 1034214 },
+ [ABP001PD] = { .min = -6895, .max = 6895 },
+ [ABP005PD] = { .min = -34474, .max = 34474 },
+ [ABP015PD] = { .min = -103421, .max = 103421 },
+ [ABP030PD] = { .min = -206843, .max = 206843 },
+ [ABP060PD] = { .min = -413686, .max = 413686 },
+};
+
+struct abp_state {
+ struct i2c_client *client;
+ struct mutex lock;
+
+ /*
+ * bus-dependent MEASURE_REQUEST length.
+ * If no SMBUS_QUICK support, need to send dummy byte
+ */
+ int mreq_len;
+
+ /* model-dependent values (calculated on probe) */
+ int scale;
+ int offset;
+};
+
+static const struct iio_chan_spec abp060mg_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static int abp060mg_get_measurement(struct abp_state *state, int *val)
+{
+ struct i2c_client *client = state->client;
+ __be16 buf[2];
+ u16 pressure;
+ int ret;
+
+ buf[0] = 0;
+ ret = i2c_master_send(client, (u8 *)&buf, state->mreq_len);
+ if (ret < 0)
+ return ret;
+
+ msleep_interruptible(ABP060MG_RESP_TIME_MS);
+
+ ret = i2c_master_recv(client, (u8 *)&buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ pressure = be16_to_cpu(buf[0]);
+ if (pressure & ABP060MG_ERROR_MASK)
+ return -EIO;
+
+ if (pressure < ABP060MG_MIN_COUNTS || pressure > ABP060MG_MAX_COUNTS)
+ return -EIO;
+
+ *val = pressure;
+
+ return IIO_VAL_INT;
+}
+
+static int abp060mg_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct abp_state *state = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&state->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = abp060mg_get_measurement(state, val);
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = state->offset;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = state->scale;
+ *val2 = ABP060MG_NUM_COUNTS * 1000; /* to kPa */
+ ret = IIO_VAL_FRACTIONAL;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&state->lock);
+ return ret;
+}
+
+static const struct iio_info abp060mg_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = abp060mg_read_raw,
+};
+
+static void abp060mg_init_device(struct iio_dev *indio_dev, unsigned long id)
+{
+ struct abp_state *state = iio_priv(indio_dev);
+ struct abp_config *cfg = &abp_config[id];
+
+ state->scale = cfg->max - cfg->min;
+ state->offset = -ABP060MG_MIN_COUNTS;
+
+ if (cfg->min < 0) /* differential */
+ state->offset -= ABP060MG_NUM_COUNTS >> 1;
+}
+
+static int abp060mg_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct abp_state *state;
+ unsigned long cfg_id = id->driver_data;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*state));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ state = iio_priv(indio_dev);
+ i2c_set_clientdata(client, state);
+ state->client = client;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_QUICK))
+ state->mreq_len = 1;
+
+ abp060mg_init_device(indio_dev, cfg_id);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = dev_name(&client->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &abp060mg_info;
+
+ indio_dev->channels = abp060mg_channels;
+ indio_dev->num_channels = ARRAY_SIZE(abp060mg_channels);
+
+ mutex_init(&state->lock);
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id abp060mg_id_table[] = {
+ /* mbar & kPa variants (abp060m [60 mbar] == abp006k [6 kPa]) */
+ /* gage: */
+ { "abp060mg", ABP006KG }, { "abp006kg", ABP006KG },
+ { "abp100mg", ABP010KG }, { "abp010kg", ABP010KG },
+ { "abp160mg", ABP016KG }, { "abp016kg", ABP016KG },
+ { "abp250mg", ABP025KG }, { "abp025kg", ABP025KG },
+ { "abp400mg", ABP040KG }, { "abp040kg", ABP040KG },
+ { "abp600mg", ABP060KG }, { "abp060kg", ABP060KG },
+ { "abp001bg", ABP100KG }, { "abp100kg", ABP100KG },
+ { "abp1_6bg", ABP160KG }, { "abp160kg", ABP160KG },
+ { "abp2_5bg", ABP250KG }, { "abp250kg", ABP250KG },
+ { "abp004bg", ABP400KG }, { "abp400kg", ABP400KG },
+ { "abp006bg", ABP600KG }, { "abp600kg", ABP600KG },
+ { "abp010bg", ABP001GG }, { "abp001gg", ABP001GG },
+ /* differential: */
+ { "abp060md", ABP006KD }, { "abp006kd", ABP006KD },
+ { "abp100md", ABP010KD }, { "abp010kd", ABP010KD },
+ { "abp160md", ABP016KD }, { "abp016kd", ABP016KD },
+ { "abp250md", ABP025KD }, { "abp025kd", ABP025KD },
+ { "abp400md", ABP040KD }, { "abp040kd", ABP040KD },
+ { "abp600md", ABP060KD }, { "abp060kd", ABP060KD },
+ { "abp001bd", ABP100KD }, { "abp100kd", ABP100KD },
+ { "abp1_6bd", ABP160KD }, { "abp160kd", ABP160KD },
+ { "abp2_5bd", ABP250KD }, { "abp250kd", ABP250KD },
+ { "abp004bd", ABP400KD }, { "abp400kd", ABP400KD },
+ /* psi variants */
+ /* gage: */
+ { "abp001pg", ABP001PG },
+ { "abp005pg", ABP005PG },
+ { "abp015pg", ABP015PG },
+ { "abp030pg", ABP030PG },
+ { "abp060pg", ABP060PG },
+ { "abp100pg", ABP100PG },
+ { "abp150pg", ABP150PG },
+ /* differential: */
+ { "abp001pd", ABP001PD },
+ { "abp005pd", ABP005PD },
+ { "abp015pd", ABP015PD },
+ { "abp030pd", ABP030PD },
+ { "abp060pd", ABP060PD },
+ { /* empty */ },
+};
+MODULE_DEVICE_TABLE(i2c, abp060mg_id_table);
+
+static struct i2c_driver abp060mg_driver = {
+ .driver = {
+ .name = "abp060mg",
+ },
+ .probe = abp060mg_probe,
+ .id_table = abp060mg_id_table,
+};
+module_i2c_driver(abp060mg_driver);
+
+MODULE_AUTHOR("Marcin Malagowski <mrc@bourne.st>");
+MODULE_DESCRIPTION("Honeywell ABP pressure sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index 6392d7b62841..cc3f84139157 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -82,8 +82,9 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (chan->type) {
case IIO_PRESSURE: /* in 0.25 pascal / LSB */
@@ -91,32 +92,39 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
ret = mpl3115_request(data);
if (ret < 0) {
mutex_unlock(&data->lock);
- return ret;
+ break;
}
ret = i2c_smbus_read_i2c_block_data(data->client,
MPL3115_OUT_PRESS, 3, (u8 *) &tmp);
mutex_unlock(&data->lock);
if (ret < 0)
- return ret;
+ break;
*val = be32_to_cpu(tmp) >> 12;
- return IIO_VAL_INT;
+ ret = IIO_VAL_INT;
+ break;
case IIO_TEMP: /* in 0.0625 celsius / LSB */
mutex_lock(&data->lock);
ret = mpl3115_request(data);
if (ret < 0) {
mutex_unlock(&data->lock);
- return ret;
+ break;
}
ret = i2c_smbus_read_i2c_block_data(data->client,
MPL3115_OUT_TEMP, 2, (u8 *) &tmp);
mutex_unlock(&data->lock);
if (ret < 0)
- return ret;
+ break;
*val = sign_extend32(be32_to_cpu(tmp) >> 20, 11);
- return IIO_VAL_INT;
+ ret = IIO_VAL_INT;
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_PRESSURE:
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index a74ed1f0c880..6bd53e702667 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -392,17 +392,14 @@ static int ms5611_init(struct iio_dev *indio_dev)
/* Enable attached regulator if any. */
st->vdd = devm_regulator_get(indio_dev->dev.parent, "vdd");
- if (!IS_ERR(st->vdd)) {
- ret = regulator_enable(st->vdd);
- if (ret) {
- dev_err(indio_dev->dev.parent,
- "failed to enable Vdd supply: %d\n", ret);
- return ret;
- }
- } else {
- ret = PTR_ERR(st->vdd);
- if (ret != -ENODEV)
- return ret;
+ if (IS_ERR(st->vdd))
+ return PTR_ERR(st->vdd);
+
+ ret = regulator_enable(st->vdd);
+ if (ret) {
+ dev_err(indio_dev->dev.parent,
+ "failed to enable Vdd supply: %d\n", ret);
+ return ret;
}
ret = ms5611_reset(indio_dev);
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 55df9a75eb3a..e19e0787864c 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -112,115 +112,24 @@
#define ST_PRESS_1_OUT_XL_ADDR 0x28
#define ST_TEMP_1_OUT_L_ADDR 0x2b
-/*
- * CUSTOM VALUES FOR LPS331AP SENSOR
- * See LPS331AP datasheet:
- * http://www2.st.com/resource/en/datasheet/lps331ap.pdf
- */
-#define ST_PRESS_LPS331AP_WAI_EXP 0xbb
-#define ST_PRESS_LPS331AP_ODR_ADDR 0x20
-#define ST_PRESS_LPS331AP_ODR_MASK 0x70
-#define ST_PRESS_LPS331AP_ODR_AVL_1HZ_VAL 0x01
-#define ST_PRESS_LPS331AP_ODR_AVL_7HZ_VAL 0x05
-#define ST_PRESS_LPS331AP_ODR_AVL_13HZ_VAL 0x06
-#define ST_PRESS_LPS331AP_ODR_AVL_25HZ_VAL 0x07
-#define ST_PRESS_LPS331AP_PW_ADDR 0x20
-#define ST_PRESS_LPS331AP_PW_MASK 0x80
-#define ST_PRESS_LPS331AP_FS_ADDR 0x23
-#define ST_PRESS_LPS331AP_FS_MASK 0x30
-#define ST_PRESS_LPS331AP_BDU_ADDR 0x20
-#define ST_PRESS_LPS331AP_BDU_MASK 0x04
-#define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22
-#define ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK 0x04
-#define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK 0x20
-#define ST_PRESS_LPS331AP_IHL_IRQ_ADDR 0x22
-#define ST_PRESS_LPS331AP_IHL_IRQ_MASK 0x80
-#define ST_PRESS_LPS331AP_OD_IRQ_ADDR 0x22
-#define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40
-#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
-
-/*
- * CUSTOM VALUES FOR THE OBSOLETE LPS001WP SENSOR
- */
-
/* LPS001WP pressure resolution */
#define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL
/* LPS001WP temperature resolution */
#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS 64UL
-
-#define ST_PRESS_LPS001WP_WAI_EXP 0xba
-#define ST_PRESS_LPS001WP_ODR_ADDR 0x20
-#define ST_PRESS_LPS001WP_ODR_MASK 0x30
-#define ST_PRESS_LPS001WP_ODR_AVL_1HZ_VAL 0x01
-#define ST_PRESS_LPS001WP_ODR_AVL_7HZ_VAL 0x02
-#define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03
-#define ST_PRESS_LPS001WP_PW_ADDR 0x20
-#define ST_PRESS_LPS001WP_PW_MASK 0x40
+/* LPS001WP pressure gain */
#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \
(100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR)
-#define ST_PRESS_LPS001WP_BDU_ADDR 0x20
-#define ST_PRESS_LPS001WP_BDU_MASK 0x04
-#define ST_PRESS_LPS001WP_MULTIREAD_BIT true
+/* LPS001WP pressure and temp L addresses */
#define ST_PRESS_LPS001WP_OUT_L_ADDR 0x28
#define ST_TEMP_LPS001WP_OUT_L_ADDR 0x2a
-/*
- * CUSTOM VALUES FOR LPS25H SENSOR
- * See LPS25H datasheet:
- * http://www2.st.com/resource/en/datasheet/lps25h.pdf
- */
-#define ST_PRESS_LPS25H_WAI_EXP 0xbd
-#define ST_PRESS_LPS25H_ODR_ADDR 0x20
-#define ST_PRESS_LPS25H_ODR_MASK 0x70
-#define ST_PRESS_LPS25H_ODR_AVL_1HZ_VAL 0x01
-#define ST_PRESS_LPS25H_ODR_AVL_7HZ_VAL 0x02
-#define ST_PRESS_LPS25H_ODR_AVL_13HZ_VAL 0x03
-#define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04
-#define ST_PRESS_LPS25H_PW_ADDR 0x20
-#define ST_PRESS_LPS25H_PW_MASK 0x80
-#define ST_PRESS_LPS25H_BDU_ADDR 0x20
-#define ST_PRESS_LPS25H_BDU_MASK 0x04
-#define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23
-#define ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK 0x01
-#define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK 0x10
-#define ST_PRESS_LPS25H_IHL_IRQ_ADDR 0x22
-#define ST_PRESS_LPS25H_IHL_IRQ_MASK 0x80
-#define ST_PRESS_LPS25H_OD_IRQ_ADDR 0x22
-#define ST_PRESS_LPS25H_OD_IRQ_MASK 0x40
-#define ST_PRESS_LPS25H_MULTIREAD_BIT true
+/* LPS25H pressure and temp L addresses */
#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
#define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b
-/*
- * CUSTOM VALUES FOR LPS22HB SENSOR
- * See LPS22HB datasheet:
- * http://www2.st.com/resource/en/datasheet/lps22hb.pdf
- */
-
/* LPS22HB temperature sensitivity */
#define ST_PRESS_LPS22HB_LSB_PER_CELSIUS 100UL
-#define ST_PRESS_LPS22HB_WAI_EXP 0xb1
-#define ST_PRESS_LPS22HB_ODR_ADDR 0x10
-#define ST_PRESS_LPS22HB_ODR_MASK 0x70
-#define ST_PRESS_LPS22HB_ODR_AVL_1HZ_VAL 0x01
-#define ST_PRESS_LPS22HB_ODR_AVL_10HZ_VAL 0x02
-#define ST_PRESS_LPS22HB_ODR_AVL_25HZ_VAL 0x03
-#define ST_PRESS_LPS22HB_ODR_AVL_50HZ_VAL 0x04
-#define ST_PRESS_LPS22HB_ODR_AVL_75HZ_VAL 0x05
-#define ST_PRESS_LPS22HB_PW_ADDR 0x10
-#define ST_PRESS_LPS22HB_PW_MASK 0x70
-#define ST_PRESS_LPS22HB_BDU_ADDR 0x10
-#define ST_PRESS_LPS22HB_BDU_MASK 0x02
-#define ST_PRESS_LPS22HB_DRDY_IRQ_ADDR 0x12
-#define ST_PRESS_LPS22HB_DRDY_IRQ_INT1_MASK 0x04
-#define ST_PRESS_LPS22HB_DRDY_IRQ_INT2_MASK 0x08
-#define ST_PRESS_LPS22HB_IHL_IRQ_ADDR 0x12
-#define ST_PRESS_LPS22HB_IHL_IRQ_MASK 0x80
-#define ST_PRESS_LPS22HB_OD_IRQ_ADDR 0x12
-#define ST_PRESS_LPS22HB_OD_IRQ_MASK 0x40
-#define ST_PRESS_LPS22HB_MULTIREAD_BIT true
-
static const struct iio_chan_spec st_press_1_channels[] = {
{
.type = IIO_PRESSURE,
@@ -321,7 +230,12 @@ static const struct iio_chan_spec st_press_lps22hb_channels[] = {
static const struct st_sensor_settings st_press_sensors_settings[] = {
{
- .wai = ST_PRESS_LPS331AP_WAI_EXP,
+ /*
+ * CUSTOM VALUES FOR LPS331AP SENSOR
+ * See LPS331AP datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps331ap.pdf
+ */
+ .wai = 0xbb,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LPS331AP_PRESS_DEV_NAME,
@@ -329,24 +243,24 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.ch = (struct iio_chan_spec *)st_press_1_channels,
.num_ch = ARRAY_SIZE(st_press_1_channels),
.odr = {
- .addr = ST_PRESS_LPS331AP_ODR_ADDR,
- .mask = ST_PRESS_LPS331AP_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x70,
.odr_avl = {
- { 1, ST_PRESS_LPS331AP_ODR_AVL_1HZ_VAL, },
- { 7, ST_PRESS_LPS331AP_ODR_AVL_7HZ_VAL, },
- { 13, ST_PRESS_LPS331AP_ODR_AVL_13HZ_VAL, },
- { 25, ST_PRESS_LPS331AP_ODR_AVL_25HZ_VAL, },
+ { .hz = 1, .value = 0x01 },
+ { .hz = 7, .value = 0x05 },
+ { .hz = 13, .value = 0x06 },
+ { .hz = 25, .value = 0x07 },
},
},
.pw = {
- .addr = ST_PRESS_LPS331AP_PW_ADDR,
- .mask = ST_PRESS_LPS331AP_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x80,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
.fs = {
- .addr = ST_PRESS_LPS331AP_FS_ADDR,
- .mask = ST_PRESS_LPS331AP_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
/*
* Pressure and temperature sensitivity values
@@ -360,24 +274,27 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
},
},
.bdu = {
- .addr = ST_PRESS_LPS331AP_BDU_ADDR,
- .mask = ST_PRESS_LPS331AP_BDU_MASK,
+ .addr = 0x20,
+ .mask = 0x04,
},
.drdy_irq = {
- .addr = ST_PRESS_LPS331AP_DRDY_IRQ_ADDR,
- .mask_int1 = ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_PRESS_LPS331AP_IHL_IRQ_ADDR,
- .mask_ihl = ST_PRESS_LPS331AP_IHL_IRQ_MASK,
- .addr_od = ST_PRESS_LPS331AP_OD_IRQ_ADDR,
- .mask_od = ST_PRESS_LPS331AP_OD_IRQ_MASK,
+ .addr = 0x22,
+ .mask_int1 = 0x04,
+ .mask_int2 = 0x20,
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x80,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_PRESS_LPS331AP_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_PRESS_LPS001WP_WAI_EXP,
+ /*
+ * CUSTOM VALUES FOR LPS001WP SENSOR
+ */
+ .wai = 0xba,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LPS001WP_PRESS_DEV_NAME,
@@ -385,17 +302,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.ch = (struct iio_chan_spec *)st_press_lps001wp_channels,
.num_ch = ARRAY_SIZE(st_press_lps001wp_channels),
.odr = {
- .addr = ST_PRESS_LPS001WP_ODR_ADDR,
- .mask = ST_PRESS_LPS001WP_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x30,
.odr_avl = {
- { 1, ST_PRESS_LPS001WP_ODR_AVL_1HZ_VAL, },
- { 7, ST_PRESS_LPS001WP_ODR_AVL_7HZ_VAL, },
- { 13, ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL, },
+ { .hz = 1, .value = 0x01 },
+ { .hz = 7, .value = 0x02 },
+ { .hz = 13, .value = 0x03 },
},
},
.pw = {
- .addr = ST_PRESS_LPS001WP_PW_ADDR,
- .mask = ST_PRESS_LPS001WP_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x40,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -413,17 +330,22 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
},
},
.bdu = {
- .addr = ST_PRESS_LPS001WP_BDU_ADDR,
- .mask = ST_PRESS_LPS001WP_BDU_MASK,
+ .addr = 0x20,
+ .mask = 0x04,
},
.drdy_irq = {
.addr = 0,
},
- .multi_read_bit = ST_PRESS_LPS001WP_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_PRESS_LPS25H_WAI_EXP,
+ /*
+ * CUSTOM VALUES FOR LPS25H SENSOR
+ * See LPS25H datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps25h.pdf
+ */
+ .wai = 0xbd,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LPS25H_PRESS_DEV_NAME,
@@ -431,18 +353,18 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.ch = (struct iio_chan_spec *)st_press_1_channels,
.num_ch = ARRAY_SIZE(st_press_1_channels),
.odr = {
- .addr = ST_PRESS_LPS25H_ODR_ADDR,
- .mask = ST_PRESS_LPS25H_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x70,
.odr_avl = {
- { 1, ST_PRESS_LPS25H_ODR_AVL_1HZ_VAL, },
- { 7, ST_PRESS_LPS25H_ODR_AVL_7HZ_VAL, },
- { 13, ST_PRESS_LPS25H_ODR_AVL_13HZ_VAL, },
- { 25, ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL, },
+ { .hz = 1, .value = 0x01 },
+ { .hz = 7, .value = 0x02 },
+ { .hz = 13, .value = 0x03 },
+ { .hz = 25, .value = 0x04 },
},
},
.pw = {
- .addr = ST_PRESS_LPS25H_PW_ADDR,
- .mask = ST_PRESS_LPS25H_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x80,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -460,24 +382,29 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
},
},
.bdu = {
- .addr = ST_PRESS_LPS25H_BDU_ADDR,
- .mask = ST_PRESS_LPS25H_BDU_MASK,
+ .addr = 0x20,
+ .mask = 0x04,
},
.drdy_irq = {
- .addr = ST_PRESS_LPS25H_DRDY_IRQ_ADDR,
- .mask_int1 = ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_PRESS_LPS25H_IHL_IRQ_ADDR,
- .mask_ihl = ST_PRESS_LPS25H_IHL_IRQ_MASK,
- .addr_od = ST_PRESS_LPS25H_OD_IRQ_ADDR,
- .mask_od = ST_PRESS_LPS25H_OD_IRQ_MASK,
+ .addr = 0x23,
+ .mask_int1 = 0x01,
+ .mask_int2 = 0x10,
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x80,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_PRESS_LPS22HB_WAI_EXP,
+ /*
+ * CUSTOM VALUES FOR LPS22HB SENSOR
+ * See LPS22HB datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps22hb.pdf
+ */
+ .wai = 0xb1,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LPS22HB_PRESS_DEV_NAME,
@@ -485,19 +412,19 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.ch = (struct iio_chan_spec *)st_press_lps22hb_channels,
.num_ch = ARRAY_SIZE(st_press_lps22hb_channels),
.odr = {
- .addr = ST_PRESS_LPS22HB_ODR_ADDR,
- .mask = ST_PRESS_LPS22HB_ODR_MASK,
+ .addr = 0x10,
+ .mask = 0x70,
.odr_avl = {
- { 1, ST_PRESS_LPS22HB_ODR_AVL_1HZ_VAL, },
- { 10, ST_PRESS_LPS22HB_ODR_AVL_10HZ_VAL, },
- { 25, ST_PRESS_LPS22HB_ODR_AVL_25HZ_VAL, },
- { 50, ST_PRESS_LPS22HB_ODR_AVL_50HZ_VAL, },
- { 75, ST_PRESS_LPS22HB_ODR_AVL_75HZ_VAL, },
+ { .hz = 1, .value = 0x01 },
+ { .hz = 10, .value = 0x02 },
+ { .hz = 25, .value = 0x03 },
+ { .hz = 50, .value = 0x04 },
+ { .hz = 75, .value = 0x05 },
},
},
.pw = {
- .addr = ST_PRESS_LPS22HB_PW_ADDR,
- .mask = ST_PRESS_LPS22HB_PW_MASK,
+ .addr = 0x10,
+ .mask = 0x70,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
.fs = {
@@ -514,20 +441,20 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
},
},
.bdu = {
- .addr = ST_PRESS_LPS22HB_BDU_ADDR,
- .mask = ST_PRESS_LPS22HB_BDU_MASK,
+ .addr = 0x10,
+ .mask = 0x02,
},
.drdy_irq = {
- .addr = ST_PRESS_LPS22HB_DRDY_IRQ_ADDR,
- .mask_int1 = ST_PRESS_LPS22HB_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_PRESS_LPS22HB_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_PRESS_LPS22HB_IHL_IRQ_ADDR,
- .mask_ihl = ST_PRESS_LPS22HB_IHL_IRQ_MASK,
- .addr_od = ST_PRESS_LPS22HB_OD_IRQ_ADDR,
- .mask_od = ST_PRESS_LPS22HB_OD_IRQ_MASK,
+ .addr = 0x12,
+ .mask_int1 = 0x04,
+ .mask_int2 = 0x08,
+ .addr_ihl = 0x12,
+ .mask_ihl = 0x80,
+ .addr_od = 0x12,
+ .mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_PRESS_LPS22HB_MULTIREAD_BIT,
+ .multi_read_bit = true,
},
};
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 19d2eb46fda6..c720c3ac0b9b 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -147,12 +147,8 @@ struct zpa2326_private {
#define zpa2326_warn(_idev, _format, _arg...) \
dev_warn(_idev->dev.parent, _format, ##_arg)
-#ifdef DEBUG
#define zpa2326_dbg(_idev, _format, _arg...) \
dev_dbg(_idev->dev.parent, _format, ##_arg)
-#else
-#define zpa2326_dbg(_idev, _format, _arg...)
-#endif
bool zpa2326_isreg_writeable(struct device *dev, unsigned int reg)
{
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 3141c3c161bb..1fa9eefa0982 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -301,8 +301,6 @@ static int lidar_probe(struct i2c_client *client,
if (ret)
goto error_unreg_buffer;
pm_runtime_enable(&client->dev);
-
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_idle(&client->dev);
return 0;
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 982936334537..07ec465f1095 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -37,6 +37,8 @@ static void arizona_haptics_work(struct work_struct *work)
struct arizona_haptics,
work);
struct arizona *arizona = haptics->arizona;
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(arizona->dapm);
int ret;
if (!haptics->arizona->dapm) {
@@ -66,7 +68,7 @@ static void arizona_haptics_work(struct work_struct *work)
return;
}
- ret = snd_soc_dapm_enable_pin(arizona->dapm, "HAPTICS");
+ ret = snd_soc_component_enable_pin(component, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to start HAPTICS: %d\n",
ret);
@@ -81,7 +83,7 @@ static void arizona_haptics_work(struct work_struct *work)
}
} else {
/* This disable sequence will be a noop if already enabled */
- ret = snd_soc_dapm_disable_pin(arizona->dapm, "HAPTICS");
+ ret = snd_soc_component_disable_pin(component, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to disable HAPTICS: %d\n",
ret);
@@ -140,11 +142,14 @@ static int arizona_haptics_play(struct input_dev *input, void *data,
static void arizona_haptics_close(struct input_dev *input)
{
struct arizona_haptics *haptics = input_get_drvdata(input);
+ struct snd_soc_component *component;
cancel_work_sync(&haptics->work);
- if (haptics->arizona->dapm)
- snd_soc_dapm_disable_pin(haptics->arizona->dapm, "HAPTICS");
+ if (haptics->arizona->dapm) {
+ component = snd_soc_dapm_to_component(haptics->arizona->dapm);
+ snd_soc_component_disable_pin(component, "HAPTICS");
+ }
}
static int arizona_haptics_probe(struct platform_device *pdev)
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 227fbd2dbb71..3900875dec10 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -108,7 +108,8 @@ static irqreturn_t input_handler(int rq, void *dev_id)
static int xenkbd_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
- int ret, i, abs;
+ int ret, i;
+ unsigned int abs;
struct xenkbd_info *info;
struct input_dev *kbd, *ptr;
@@ -127,8 +128,7 @@ static int xenkbd_probe(struct xenbus_device *dev,
if (!info->page)
goto error_nomem;
- if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0)
- abs = 0;
+ abs = xenbus_read_unsigned(dev->otherend, "feature-abs-pointer", 0);
if (abs) {
ret = xenbus_write(XBT_NIL, dev->nodename,
"request-abs-pointer", "1");
@@ -322,11 +322,8 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
case XenbusStateInitWait:
InitWait:
- ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-abs-pointer", "%d", &val);
- if (ret < 0)
- val = 0;
- if (val) {
+ if (xenbus_read_unsigned(info->xbdev->otherend,
+ "feature-abs-pointer", 0)) {
ret = xenbus_write(XBT_NIL, info->xbdev->nodename,
"request-abs-pointer", "1");
if (ret)
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 90d6be3c26cc..83cf11312fd9 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev)
}
platform_set_drvdata(wm->battery_dev, wm);
wm->battery_dev->dev.parent = dev;
- wm->battery_dev->dev.platform_data = pdata;
+ wm->battery_dev->dev.platform_data = pdata->batt_pdata;
ret = platform_device_add(wm->battery_dev);
if (ret < 0)
goto batt_reg_err;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 19d642eae096..26e1d7fafb1e 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -120,11 +120,10 @@ static void gic_redist_wait_for_rwp(void)
}
#ifdef CONFIG_ARM64
-static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx);
static u64 __maybe_unused gic_read_iar(void)
{
- if (static_branch_unlikely(&is_cavium_thunderx))
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
return gic_read_iar_cavium_thunderx();
else
return gic_read_iar_common();
@@ -905,14 +904,6 @@ static const struct irq_domain_ops partition_domain_ops = {
.select = gic_irq_domain_select,
};
-static void gicv3_enable_quirks(void)
-{
-#ifdef CONFIG_ARM64
- if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154))
- static_branch_enable(&is_cavium_thunderx);
-#endif
-}
-
static int __init gic_init_bases(void __iomem *dist_base,
struct redist_region *rdist_regs,
u32 nr_redist_regions,
@@ -935,8 +926,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_data.nr_redist_regions = nr_redist_regions;
gic_data.redist_stride = redist_stride;
- gicv3_enable_quirks();
-
/*
* Find out how many interrupts are supported.
* The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index ba4beb25d872..298c8dba0321 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -855,7 +855,7 @@ struct DTag { /* Display tags */
{ 0x8c, "Reason" },
{ 0x8d, "Calling party name" },
{ 0x8e, "Called party name" },
- { 0x8f, "Orignal called name" },
+ { 0x8f, "Original called name" },
{ 0x90, "Redirecting name" },
{ 0x91, "Connected name" },
{ 0x92, "Originating restrictions" },
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 7622e3dc5d82..02240a0b39c9 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -22,7 +22,7 @@
#include <linux/types.h>
#include <linux/sem.h>
#include <linux/bitmap.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/miscdevice.h>
#include <linux/lightnvm.h>
#include <linux/sched/sysctl.h>
@@ -1129,10 +1129,4 @@ static struct miscdevice _nvm_misc = {
.nodename = "lightnvm/control",
.fops = &_ctl_fops,
};
-module_misc_device(_nvm_misc);
-
-MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
-
-MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
-MODULE_LICENSE("GPL v2");
-MODULE_VERSION("0.1");
+builtin_misc_device(_nvm_misc);
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index 4ca2739b4fad..ee7fb6ec96bd 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -149,7 +149,7 @@ static int chameleon_get_bar(char __iomem **base, phys_addr_t mapbase,
reg = readl(*base);
bar_count = BAR_CNT(reg);
- if (bar_count <= 0 && bar_count > CHAMELEON_BAR_MAX)
+ if (bar_count <= 0 || bar_count > CHAMELEON_BAR_MAX)
return -ENODEV;
c = kcalloc(bar_count, sizeof(struct chameleon_bar),
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 02a5345a44a6..b7767da50c26 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -240,9 +240,17 @@ config DM_BUFIO
as a cache, holding recently-read blocks in memory and performing
delayed writes.
+config DM_DEBUG_BLOCK_MANAGER_LOCKING
+ bool "Block manager locking"
+ depends on DM_BUFIO
+ ---help---
+ Block manager locking can catch various metadata corruption issues.
+
+ If unsure, say N.
+
config DM_DEBUG_BLOCK_STACK_TRACING
bool "Keep stack trace of persistent data block lock holders"
- depends on STACKTRACE_SUPPORT && DM_BUFIO
+ depends on STACKTRACE_SUPPORT && DM_DEBUG_BLOCK_MANAGER_LOCKING
select STACKTRACE
---help---
Enable this for messages that may help debug problems with the
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 2d826927a3bf..9fb2ccac958a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -27,6 +27,7 @@
#include <linux/mount.h>
#include <linux/buffer_head.h>
#include <linux/seq_file.h>
+#include <trace/events/block.h>
#include "md.h"
#include "bitmap.h"
@@ -208,11 +209,13 @@ static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde
static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
{
- struct md_rdev *rdev = NULL;
+ struct md_rdev *rdev;
struct block_device *bdev;
struct mddev *mddev = bitmap->mddev;
struct bitmap_storage *store = &bitmap->storage;
+restart:
+ rdev = NULL;
while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
int size = PAGE_SIZE;
loff_t offset = mddev->bitmap_info.offset;
@@ -268,8 +271,8 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
page);
}
- if (wait)
- md_super_wait(mddev);
+ if (wait && md_super_wait(mddev) < 0)
+ goto restart;
return 0;
bad_alignment:
@@ -405,10 +408,10 @@ static int read_page(struct file *file, unsigned long index,
ret = -EIO;
out:
if (ret)
- printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %d\n",
- (int)PAGE_SIZE,
- (unsigned long long)index << PAGE_SHIFT,
- ret);
+ pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
+ (int)PAGE_SIZE,
+ (unsigned long long)index << PAGE_SHIFT,
+ ret);
return ret;
}
@@ -416,6 +419,28 @@ out:
* bitmap file superblock operations
*/
+/*
+ * bitmap_wait_writes() should be called before writing any bitmap
+ * blocks, to ensure previous writes, particularly from
+ * bitmap_daemon_work(), have completed.
+ */
+static void bitmap_wait_writes(struct bitmap *bitmap)
+{
+ if (bitmap->storage.file)
+ wait_event(bitmap->write_wait,
+ atomic_read(&bitmap->pending_writes)==0);
+ else
+ /* Note that we ignore the return value. The writes
+ * might have failed, but that would just mean that
+ * some bits which should be cleared haven't been,
+ * which is safe. The relevant bitmap blocks will
+ * probably get written again, but there is no great
+ * loss if they aren't.
+ */
+ md_super_wait(bitmap->mddev);
+}
+
+
/* update the event counter and sync the superblock to disk */
void bitmap_update_sb(struct bitmap *bitmap)
{
@@ -455,24 +480,24 @@ void bitmap_print_sb(struct bitmap *bitmap)
if (!bitmap || !bitmap->storage.sb_page)
return;
sb = kmap_atomic(bitmap->storage.sb_page);
- printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
- printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
- printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
- printk(KERN_DEBUG " uuid: %08x.%08x.%08x.%08x\n",
- *(__u32 *)(sb->uuid+0),
- *(__u32 *)(sb->uuid+4),
- *(__u32 *)(sb->uuid+8),
- *(__u32 *)(sb->uuid+12));
- printk(KERN_DEBUG " events: %llu\n",
- (unsigned long long) le64_to_cpu(sb->events));
- printk(KERN_DEBUG "events cleared: %llu\n",
- (unsigned long long) le64_to_cpu(sb->events_cleared));
- printk(KERN_DEBUG " state: %08x\n", le32_to_cpu(sb->state));
- printk(KERN_DEBUG " chunksize: %d B\n", le32_to_cpu(sb->chunksize));
- printk(KERN_DEBUG " daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
- printk(KERN_DEBUG " sync size: %llu KB\n",
- (unsigned long long)le64_to_cpu(sb->sync_size)/2);
- printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
+ pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
+ pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
+ pr_debug(" version: %d\n", le32_to_cpu(sb->version));
+ pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
+ *(__u32 *)(sb->uuid+0),
+ *(__u32 *)(sb->uuid+4),
+ *(__u32 *)(sb->uuid+8),
+ *(__u32 *)(sb->uuid+12));
+ pr_debug(" events: %llu\n",
+ (unsigned long long) le64_to_cpu(sb->events));
+ pr_debug("events cleared: %llu\n",
+ (unsigned long long) le64_to_cpu(sb->events_cleared));
+ pr_debug(" state: %08x\n", le32_to_cpu(sb->state));
+ pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize));
+ pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
+ pr_debug(" sync size: %llu KB\n",
+ (unsigned long long)le64_to_cpu(sb->sync_size)/2);
+ pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind));
kunmap_atomic(sb);
}
@@ -506,14 +531,14 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
BUG_ON(!chunksize);
if (!is_power_of_2(chunksize)) {
kunmap_atomic(sb);
- printk(KERN_ERR "bitmap chunksize not a power of 2\n");
+ pr_warn("bitmap chunksize not a power of 2\n");
return -EINVAL;
}
sb->chunksize = cpu_to_le32(chunksize);
daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
- printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
+ pr_debug("Choosing daemon_sleep default (5 sec)\n");
daemon_sleep = 5 * HZ;
}
sb->daemon_sleep = cpu_to_le32(daemon_sleep);
@@ -584,7 +609,7 @@ re_read:
/* to 4k blocks */
bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
- pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
+ pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
bitmap->cluster_slot, offset);
}
@@ -634,7 +659,7 @@ re_read:
else if (write_behind > COUNTER_MAX)
reason = "write-behind limit out of range (0 - 16383)";
if (reason) {
- printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n",
+ pr_warn("%s: invalid bitmap file superblock: %s\n",
bmname(bitmap), reason);
goto out;
}
@@ -648,18 +673,15 @@ re_read:
* bitmap's UUID and event counter to the mddev's
*/
if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
- printk(KERN_INFO
- "%s: bitmap superblock UUID mismatch\n",
- bmname(bitmap));
+ pr_warn("%s: bitmap superblock UUID mismatch\n",
+ bmname(bitmap));
goto out;
}
events = le64_to_cpu(sb->events);
if (!nodes && (events < bitmap->mddev->events)) {
- printk(KERN_INFO
- "%s: bitmap file is out of date (%llu < %llu) "
- "-- forcing full recovery\n",
- bmname(bitmap), events,
- (unsigned long long) bitmap->mddev->events);
+ pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
+ bmname(bitmap), events,
+ (unsigned long long) bitmap->mddev->events);
set_bit(BITMAP_STALE, &bitmap->flags);
}
}
@@ -679,8 +701,8 @@ out:
if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
err = md_setup_cluster(bitmap->mddev, nodes);
if (err) {
- pr_err("%s: Could not setup cluster service (%d)\n",
- bmname(bitmap), err);
+ pr_warn("%s: Could not setup cluster service (%d)\n",
+ bmname(bitmap), err);
goto out_no_sb;
}
bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
@@ -847,15 +869,13 @@ static void bitmap_file_kick(struct bitmap *bitmap)
ptr = file_path(bitmap->storage.file,
path, PAGE_SIZE);
- printk(KERN_ALERT
- "%s: kicking failed bitmap file %s from array!\n",
- bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
+ pr_warn("%s: kicking failed bitmap file %s from array!\n",
+ bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
kfree(path);
} else
- printk(KERN_ALERT
- "%s: disabling internal bitmap due to errors\n",
- bmname(bitmap));
+ pr_warn("%s: disabling internal bitmap due to errors\n",
+ bmname(bitmap));
}
}
@@ -983,6 +1003,7 @@ void bitmap_unplug(struct bitmap *bitmap)
{
unsigned long i;
int dirty, need_write;
+ int writing = 0;
if (!bitmap || !bitmap->storage.filemap ||
test_bit(BITMAP_STALE, &bitmap->flags))
@@ -997,15 +1018,19 @@ void bitmap_unplug(struct bitmap *bitmap)
need_write = test_and_clear_page_attr(bitmap, i,
BITMAP_PAGE_NEEDWRITE);
if (dirty || need_write) {
+ if (!writing) {
+ bitmap_wait_writes(bitmap);
+ if (bitmap->mddev->queue)
+ blk_add_trace_msg(bitmap->mddev->queue,
+ "md bitmap_unplug");
+ }
clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
write_page(bitmap, bitmap->storage.filemap[i], 0);
+ writing = 1;
}
}
- if (bitmap->storage.file)
- wait_event(bitmap->write_wait,
- atomic_read(&bitmap->pending_writes)==0);
- else
- md_super_wait(bitmap->mddev);
+ if (writing)
+ bitmap_wait_writes(bitmap);
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
bitmap_file_kick(bitmap);
@@ -1056,14 +1081,13 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
if (outofdate)
- printk(KERN_INFO "%s: bitmap file is out of date, doing full "
- "recovery\n", bmname(bitmap));
+ pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap));
if (file && i_size_read(file->f_mapping->host) < store->bytes) {
- printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
- bmname(bitmap),
- (unsigned long) i_size_read(file->f_mapping->host),
- store->bytes);
+ pr_warn("%s: bitmap file too short %lu < %lu\n",
+ bmname(bitmap),
+ (unsigned long) i_size_read(file->f_mapping->host),
+ store->bytes);
goto err;
}
@@ -1137,16 +1161,15 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
offset = 0;
}
- printk(KERN_INFO "%s: bitmap initialized from disk: "
- "read %lu pages, set %lu of %lu bits\n",
- bmname(bitmap), store->file_pages,
- bit_cnt, chunks);
+ pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
+ bmname(bitmap), store->file_pages,
+ bit_cnt, chunks);
return 0;
err:
- printk(KERN_INFO "%s: bitmap initialisation failed: %d\n",
- bmname(bitmap), ret);
+ pr_warn("%s: bitmap initialisation failed: %d\n",
+ bmname(bitmap), ret);
return ret;
}
@@ -1225,6 +1248,10 @@ void bitmap_daemon_work(struct mddev *mddev)
}
bitmap->allclean = 1;
+ if (bitmap->mddev->queue)
+ blk_add_trace_msg(bitmap->mddev->queue,
+ "md bitmap_daemon_work");
+
/* Any file-page which is PENDING now needs to be written.
* So set NEEDWRITE now, then after we make any last-minute changes
* we will write it.
@@ -1289,6 +1316,7 @@ void bitmap_daemon_work(struct mddev *mddev)
}
spin_unlock_irq(&counts->lock);
+ bitmap_wait_writes(bitmap);
/* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
* DIRTY pages need to be written by bitmap_unplug so it can wait
* for them.
@@ -1595,7 +1623,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
atomic_read(&bitmap->mddev->recovery_active) == 0);
bitmap->mddev->curr_resync_completed = sector;
- set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
s = 0;
while (s < sector && s < bitmap->mddev->resync_max_sectors) {
@@ -1825,8 +1853,8 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
if (err)
goto error;
- printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
- bitmap->counts.pages, bmname(bitmap));
+ pr_debug("created bitmap (%lu pages) for device %s\n",
+ bitmap->counts.pages, bmname(bitmap));
err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
if (err)
@@ -2029,8 +2057,10 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
!bitmap->mddev->bitmap_info.external,
mddev_is_clustered(bitmap->mddev)
? bitmap->cluster_slot : 0);
- if (ret)
+ if (ret) {
+ bitmap_file_unmap(&store);
goto err;
+ }
pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
@@ -2089,7 +2119,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
BITMAP_BLOCK_SHIFT);
blocks = old_counts.chunks << old_counts.chunkshift;
- pr_err("Could not pre-allocate in-memory bitmap for cluster raid\n");
+ pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
break;
} else
bitmap->counts.bp[page].count += 1;
@@ -2266,7 +2296,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
/* Ensure new bitmap info is stored in
* metadata promptly.
*/
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
}
rv = 0;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 262e75365cc0..84d2f0e4c754 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -820,12 +820,14 @@ enum new_flag {
static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
{
struct dm_buffer *b;
+ bool tried_noio_alloc = false;
/*
* dm-bufio is resistant to allocation failures (it just keeps
* one buffer reserved in cases all the allocations fail).
* So set flags to not try too hard:
- * GFP_NOIO: don't recurse into the I/O layer
+ * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
+ * mutex and wait ourselves.
* __GFP_NORETRY: don't retry and rather return failure
* __GFP_NOMEMALLOC: don't use emergency reserves
* __GFP_NOWARN: don't print a warning in case of failure
@@ -835,7 +837,7 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
*/
while (1) {
if (dm_bufio_cache_size_latch != 1) {
- b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
if (b)
return b;
}
@@ -843,6 +845,15 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
if (nf == NF_PREFETCH)
return NULL;
+ if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
+ dm_bufio_unlock(c);
+ b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ dm_bufio_lock(c);
+ if (b)
+ return b;
+ tried_noio_alloc = true;
+ }
+
if (!list_empty(&c->reserved_buffers)) {
b = list_entry(c->reserved_buffers.next,
struct dm_buffer, lru_list);
@@ -1585,18 +1596,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
static unsigned long
dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
- struct dm_bufio_client *c;
- unsigned long count;
-
- c = container_of(shrink, struct dm_bufio_client, shrinker);
- if (sc->gfp_mask & __GFP_FS)
- dm_bufio_lock(c);
- else if (!dm_bufio_trylock(c))
- return 0;
+ struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
- count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
- dm_bufio_unlock(c);
- return count;
+ return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]);
}
/*
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 695577812cf6..624fe4319b24 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -383,7 +383,6 @@ static int __format_metadata(struct dm_cache_metadata *cmd)
goto bad;
dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
-
r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
if (r < 0)
goto bad;
@@ -789,7 +788,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
{
if (cmd->data_block_size != data_block_size) {
- DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
+ DMERR("data_block_size (%llu) different from that in metadata (%llu)",
(unsigned long long) data_block_size,
(unsigned long long) cmd->data_block_size);
return false;
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index c33f4a6e1d7d..f19c6930a67c 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1361,7 +1361,7 @@ static void smq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
static unsigned random_level(dm_cblock_t cblock)
{
- return hash_32_generic(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
+ return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
}
static int smq_load_mapping(struct dm_cache_policy *p,
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 59b2c50562e4..e04c61e0839e 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -989,7 +989,8 @@ static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mod
enum cache_metadata_mode old_mode = get_cache_mode(cache);
if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
- DMERR("unable to read needs_check flag, setting failure mode");
+ DMERR("%s: unable to read needs_check flag, setting failure mode.",
+ cache_device_name(cache));
new_mode = CM_FAIL;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 68a9eb4f3f36..7c6c57216bf2 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/key.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mempool.h>
@@ -23,12 +24,14 @@
#include <linux/atomic.h>
#include <linux/scatterlist.h>
#include <linux/rbtree.h>
+#include <linux/ctype.h>
#include <asm/page.h>
#include <asm/unaligned.h>
#include <crypto/hash.h>
#include <crypto/md5.h>
#include <crypto/algapi.h>
#include <crypto/skcipher.h>
+#include <keys/user-type.h>
#include <linux/device-mapper.h>
@@ -140,8 +143,9 @@ struct crypt_config {
char *cipher;
char *cipher_string;
+ char *key_string;
- struct crypt_iv_operations *iv_gen_ops;
+ const struct crypt_iv_operations *iv_gen_ops;
union {
struct iv_essiv_private essiv;
struct iv_benbi_private benbi;
@@ -758,15 +762,15 @@ static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
return r;
}
-static struct crypt_iv_operations crypt_iv_plain_ops = {
+static const struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
-static struct crypt_iv_operations crypt_iv_plain64_ops = {
+static const struct crypt_iv_operations crypt_iv_plain64_ops = {
.generator = crypt_iv_plain64_gen
};
-static struct crypt_iv_operations crypt_iv_essiv_ops = {
+static const struct crypt_iv_operations crypt_iv_essiv_ops = {
.ctr = crypt_iv_essiv_ctr,
.dtr = crypt_iv_essiv_dtr,
.init = crypt_iv_essiv_init,
@@ -774,17 +778,17 @@ static struct crypt_iv_operations crypt_iv_essiv_ops = {
.generator = crypt_iv_essiv_gen
};
-static struct crypt_iv_operations crypt_iv_benbi_ops = {
+static const struct crypt_iv_operations crypt_iv_benbi_ops = {
.ctr = crypt_iv_benbi_ctr,
.dtr = crypt_iv_benbi_dtr,
.generator = crypt_iv_benbi_gen
};
-static struct crypt_iv_operations crypt_iv_null_ops = {
+static const struct crypt_iv_operations crypt_iv_null_ops = {
.generator = crypt_iv_null_gen
};
-static struct crypt_iv_operations crypt_iv_lmk_ops = {
+static const struct crypt_iv_operations crypt_iv_lmk_ops = {
.ctr = crypt_iv_lmk_ctr,
.dtr = crypt_iv_lmk_dtr,
.init = crypt_iv_lmk_init,
@@ -793,7 +797,7 @@ static struct crypt_iv_operations crypt_iv_lmk_ops = {
.post = crypt_iv_lmk_post
};
-static struct crypt_iv_operations crypt_iv_tcw_ops = {
+static const struct crypt_iv_operations crypt_iv_tcw_ops = {
.ctr = crypt_iv_tcw_ctr,
.dtr = crypt_iv_tcw_dtr,
.init = crypt_iv_tcw_init,
@@ -994,7 +998,6 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
unsigned i, len, remaining_size;
struct page *page;
- struct bio_vec *bvec;
retry:
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
@@ -1019,12 +1022,7 @@ retry:
len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
- bvec = &clone->bi_io_vec[clone->bi_vcnt++];
- bvec->bv_page = page;
- bvec->bv_len = len;
- bvec->bv_offset = 0;
-
- clone->bi_iter.bi_size += len;
+ bio_add_page(clone, page, len, 0);
remaining_size -= len;
}
@@ -1471,7 +1469,7 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
return 0;
}
-static int crypt_setkey_allcpus(struct crypt_config *cc)
+static int crypt_setkey(struct crypt_config *cc)
{
unsigned subkey_size;
int err = 0, i, r;
@@ -1490,25 +1488,157 @@ static int crypt_setkey_allcpus(struct crypt_config *cc)
return err;
}
+#ifdef CONFIG_KEYS
+
+static bool contains_whitespace(const char *str)
+{
+ while (*str)
+ if (isspace(*str++))
+ return true;
+ return false;
+}
+
+static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
+{
+ char *new_key_string, *key_desc;
+ int ret;
+ struct key *key;
+ const struct user_key_payload *ukp;
+
+ /*
+ * Reject key_string with whitespace. dm core currently lacks code for
+ * proper whitespace escaping in arguments on DM_TABLE_STATUS path.
+ */
+ if (contains_whitespace(key_string)) {
+ DMERR("whitespace chars not allowed in key string");
+ return -EINVAL;
+ }
+
+ /* look for next ':' separating key_type from key_description */
+ key_desc = strpbrk(key_string, ":");
+ if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
+ return -EINVAL;
+
+ if (strncmp(key_string, "logon:", key_desc - key_string + 1) &&
+ strncmp(key_string, "user:", key_desc - key_string + 1))
+ return -EINVAL;
+
+ new_key_string = kstrdup(key_string, GFP_KERNEL);
+ if (!new_key_string)
+ return -ENOMEM;
+
+ key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user,
+ key_desc + 1, NULL);
+ if (IS_ERR(key)) {
+ kzfree(new_key_string);
+ return PTR_ERR(key);
+ }
+
+ rcu_read_lock();
+
+ ukp = user_key_payload(key);
+ if (!ukp) {
+ rcu_read_unlock();
+ key_put(key);
+ kzfree(new_key_string);
+ return -EKEYREVOKED;
+ }
+
+ if (cc->key_size != ukp->datalen) {
+ rcu_read_unlock();
+ key_put(key);
+ kzfree(new_key_string);
+ return -EINVAL;
+ }
+
+ memcpy(cc->key, ukp->data, cc->key_size);
+
+ rcu_read_unlock();
+ key_put(key);
+
+ /* clear the flag since following operations may invalidate previously valid key */
+ clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+
+ ret = crypt_setkey(cc);
+
+ /* wipe the kernel key payload copy in each case */
+ memset(cc->key, 0, cc->key_size * sizeof(u8));
+
+ if (!ret) {
+ set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+ kzfree(cc->key_string);
+ cc->key_string = new_key_string;
+ } else
+ kzfree(new_key_string);
+
+ return ret;
+}
+
+static int get_key_size(char **key_string)
+{
+ char *colon, dummy;
+ int ret;
+
+ if (*key_string[0] != ':')
+ return strlen(*key_string) >> 1;
+
+ /* look for next ':' in key string */
+ colon = strpbrk(*key_string + 1, ":");
+ if (!colon)
+ return -EINVAL;
+
+ if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':')
+ return -EINVAL;
+
+ *key_string = colon;
+
+ /* remaining key string should be :<logon|user>:<key_desc> */
+
+ return ret;
+}
+
+#else
+
+static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
+{
+ return -EINVAL;
+}
+
+static int get_key_size(char **key_string)
+{
+ return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
+}
+
+#endif
+
static int crypt_set_key(struct crypt_config *cc, char *key)
{
int r = -EINVAL;
int key_string_len = strlen(key);
- /* The key size may not be changed. */
- if (cc->key_size != (key_string_len >> 1))
- goto out;
-
/* Hyphen (which gives a key_size of zero) means there is no key. */
if (!cc->key_size && strcmp(key, "-"))
goto out;
- if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
+ /* ':' means the key is in kernel keyring, short-circuit normal key processing */
+ if (key[0] == ':') {
+ r = crypt_set_keyring_key(cc, key + 1);
goto out;
+ }
- set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+ /* clear the flag since following operations may invalidate previously valid key */
+ clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
- r = crypt_setkey_allcpus(cc);
+ /* wipe references to any kernel keyring key */
+ kzfree(cc->key_string);
+ cc->key_string = NULL;
+
+ if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
+ goto out;
+
+ r = crypt_setkey(cc);
+ if (!r)
+ set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
out:
/* Hex key string not needed after here, so wipe it. */
@@ -1521,8 +1651,10 @@ static int crypt_wipe_key(struct crypt_config *cc)
{
clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
memset(&cc->key, 0, cc->key_size * sizeof(u8));
+ kzfree(cc->key_string);
+ cc->key_string = NULL;
- return crypt_setkey_allcpus(cc);
+ return crypt_setkey(cc);
}
static void crypt_dtr(struct dm_target *ti)
@@ -1558,6 +1690,7 @@ static void crypt_dtr(struct dm_target *ti)
kzfree(cc->cipher);
kzfree(cc->cipher_string);
+ kzfree(cc->key_string);
/* Must zero key material before freeing */
kzfree(cc);
@@ -1726,12 +1859,13 @@ bad_mem:
/*
* Construct an encryption mapping:
- * <cipher> <key> <iv_offset> <dev_path> <start>
+ * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
*/
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct crypt_config *cc;
- unsigned int key_size, opt_params;
+ int key_size;
+ unsigned int opt_params;
unsigned long long tmpll;
int ret;
size_t iv_size_padding;
@@ -1748,7 +1882,11 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -EINVAL;
}
- key_size = strlen(argv[1]) >> 1;
+ key_size = get_key_size(&argv[1]);
+ if (key_size < 0) {
+ ti->error = "Cannot parse key size";
+ return -EINVAL;
+ }
cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
if (!cc) {
@@ -1955,10 +2093,13 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
case STATUSTYPE_TABLE:
DMEMIT("%s ", cc->cipher_string);
- if (cc->key_size > 0)
- for (i = 0; i < cc->key_size; i++)
- DMEMIT("%02x", cc->key[i]);
- else
+ if (cc->key_size > 0) {
+ if (cc->key_string)
+ DMEMIT(":%u:%s", cc->key_size, cc->key_string);
+ else
+ for (i = 0; i < cc->key_size; i++)
+ DMEMIT("%02x", cc->key[i]);
+ } else
DMEMIT("-");
DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
@@ -2014,7 +2155,7 @@ static void crypt_resume(struct dm_target *ti)
static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
{
struct crypt_config *cc = ti->private;
- int ret = -EINVAL;
+ int key_size, ret = -EINVAL;
if (argc < 2)
goto error;
@@ -2025,6 +2166,13 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
return -EINVAL;
}
if (argc == 3 && !strcasecmp(argv[1], "set")) {
+ /* The key size may not be changed. */
+ key_size = get_key_size(&argv[2]);
+ if (key_size < 0 || cc->key_size != key_size) {
+ memset(argv[2], '0', strlen(argv[2]));
+ return -EINVAL;
+ }
+
ret = crypt_set_key(cc, argv[2]);
if (ret)
return ret;
@@ -2068,7 +2216,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 14, 1},
+ .version = {1, 15, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 6a2e8dd44a1b..13305a182611 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -36,7 +36,8 @@ struct flakey_c {
};
enum feature_flag_bits {
- DROP_WRITES
+ DROP_WRITES,
+ ERROR_WRITES
};
struct per_bio_data {
@@ -76,6 +77,25 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
ti->error = "Feature drop_writes duplicated";
return -EINVAL;
+ } else if (test_bit(ERROR_WRITES, &fc->flags)) {
+ ti->error = "Feature drop_writes conflicts with feature error_writes";
+ return -EINVAL;
+ }
+
+ continue;
+ }
+
+ /*
+ * error_writes
+ */
+ if (!strcasecmp(arg_name, "error_writes")) {
+ if (test_and_set_bit(ERROR_WRITES, &fc->flags)) {
+ ti->error = "Feature error_writes duplicated";
+ return -EINVAL;
+
+ } else if (test_bit(DROP_WRITES, &fc->flags)) {
+ ti->error = "Feature error_writes conflicts with feature drop_writes";
+ return -EINVAL;
}
continue;
@@ -135,6 +155,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
return -EINVAL;
+
+ } else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
+ ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
+ return -EINVAL;
}
return 0;
@@ -200,11 +224,13 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (!(fc->up_interval + fc->down_interval)) {
ti->error = "Total (up + down) interval is zero";
+ r = -EINVAL;
goto bad;
}
if (fc->up_interval + fc->down_interval < fc->up_interval) {
ti->error = "Interval overflow";
+ r = -EINVAL;
goto bad;
}
@@ -289,22 +315,27 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
pb->bio_submitted = true;
/*
- * Error reads if neither corrupt_bio_byte or drop_writes are set.
+ * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set.
* Otherwise, flakey_end_io() will decide if the reads should be modified.
*/
if (bio_data_dir(bio) == READ) {
- if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags))
+ if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) &&
+ !test_bit(ERROR_WRITES, &fc->flags))
return -EIO;
goto map_bio;
}
/*
- * Drop writes?
+ * Drop or error writes?
*/
if (test_bit(DROP_WRITES, &fc->flags)) {
bio_endio(bio);
return DM_MAPIO_SUBMITTED;
}
+ else if (test_bit(ERROR_WRITES, &fc->flags)) {
+ bio_io_error(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
/*
* Corrupt matching writes.
@@ -340,10 +371,11 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
*/
corrupt_bio_data(bio, fc);
- } else if (!test_bit(DROP_WRITES, &fc->flags)) {
+ } else if (!test_bit(DROP_WRITES, &fc->flags) &&
+ !test_bit(ERROR_WRITES, &fc->flags)) {
/*
* Error read during the down_interval if drop_writes
- * wasn't configured.
+ * and error_writes were not configured.
*/
return -EIO;
}
@@ -357,7 +389,7 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
{
unsigned sz = 0;
struct flakey_c *fc = ti->private;
- unsigned drop_writes;
+ unsigned drop_writes, error_writes;
switch (type) {
case STATUSTYPE_INFO:
@@ -370,10 +402,13 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
fc->down_interval);
drop_writes = test_bit(DROP_WRITES, &fc->flags);
- DMEMIT("%u ", drop_writes + (fc->corrupt_bio_byte > 0) * 5);
+ error_writes = test_bit(ERROR_WRITES, &fc->flags);
+ DMEMIT("%u ", drop_writes + error_writes + (fc->corrupt_bio_byte > 0) * 5);
if (drop_writes)
DMEMIT("drop_writes ");
+ else if (error_writes)
+ DMEMIT("error_writes ");
if (fc->corrupt_bio_byte)
DMEMIT("corrupt_bio_byte %u %c %u %u ",
@@ -410,7 +445,7 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
- .version = {1, 3, 1},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 0bf1a12e35fe..03940bf36f6c 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -162,7 +162,10 @@ struct dpages {
struct page **p, unsigned long *len, unsigned *offset);
void (*next_page)(struct dpages *dp);
- unsigned context_u;
+ union {
+ unsigned context_u;
+ struct bvec_iter context_bi;
+ };
void *context_ptr;
void *vma_invalidate_address;
@@ -204,25 +207,36 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
static void bio_get_page(struct dpages *dp, struct page **p,
unsigned long *len, unsigned *offset)
{
- struct bio_vec *bvec = dp->context_ptr;
- *p = bvec->bv_page;
- *len = bvec->bv_len - dp->context_u;
- *offset = bvec->bv_offset + dp->context_u;
+ struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
+ dp->context_bi);
+
+ *p = bvec.bv_page;
+ *len = bvec.bv_len;
+ *offset = bvec.bv_offset;
+
+ /* avoid figuring it out again in bio_next_page() */
+ dp->context_bi.bi_sector = (sector_t)bvec.bv_len;
}
static void bio_next_page(struct dpages *dp)
{
- struct bio_vec *bvec = dp->context_ptr;
- dp->context_ptr = bvec + 1;
- dp->context_u = 0;
+ unsigned int len = (unsigned int)dp->context_bi.bi_sector;
+
+ bvec_iter_advance((struct bio_vec *)dp->context_ptr,
+ &dp->context_bi, len);
}
static void bio_dp_init(struct dpages *dp, struct bio *bio)
{
dp->get_page = bio_get_page;
dp->next_page = bio_next_page;
- dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
- dp->context_u = bio->bi_iter.bi_bvec_done;
+
+ /*
+ * We just use bvec iterator to retrieve pages, so it is ok to
+ * access the bvec table directly here
+ */
+ dp->context_ptr = bio->bi_io_vec;
+ dp->context_bi = bio->bi_iter;
}
/*
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 966eb4b61aed..c72a77048b73 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1697,7 +1697,7 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
{
struct dm_ioctl *dmi;
int secure_data;
- const size_t minimum_data_size = sizeof(*param_kernel) - sizeof(param_kernel->data);
+ const size_t minimum_data_size = offsetof(struct dm_ioctl, data);
if (copy_from_user(param_kernel, user, minimum_data_size))
return -EFAULT;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index e477af8596e2..6400cffb986d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -372,16 +372,13 @@ static int __pg_init_all_paths(struct multipath *m)
return atomic_read(&m->pg_init_in_progress);
}
-static int pg_init_all_paths(struct multipath *m)
+static void pg_init_all_paths(struct multipath *m)
{
- int r;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
- r = __pg_init_all_paths(m);
+ __pg_init_all_paths(m);
spin_unlock_irqrestore(&m->lock, flags);
-
- return r;
}
static void __switch_pg(struct multipath *m, struct priority_group *pg)
@@ -583,16 +580,17 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
* .request_fn stacked on blk-mq path(s) and
* blk-mq stacked on blk-mq path(s).
*/
- *__clone = blk_mq_alloc_request(bdev_get_queue(bdev),
- rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
- if (IS_ERR(*__clone)) {
- /* ENOMEM, requeue */
+ clone = blk_mq_alloc_request(bdev_get_queue(bdev),
+ rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
+ if (IS_ERR(clone)) {
+ /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
clear_request_fn_mpio(m, map_context);
return r;
}
- (*__clone)->bio = (*__clone)->biotail = NULL;
- (*__clone)->rq_disk = bdev->bd_disk;
- (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ clone->bio = clone->biotail = NULL;
+ clone->rq_disk = bdev->bd_disk;
+ clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ *__clone = clone;
}
if (pgpath->pg->ps.type->start_io)
@@ -852,18 +850,22 @@ retain:
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
if (attached_handler_name) {
/*
+ * Clear any hw_handler_params associated with a
+ * handler that isn't already attached.
+ */
+ if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
+ kfree(m->hw_handler_params);
+ m->hw_handler_params = NULL;
+ }
+
+ /*
* Reset hw_handler_name to match the attached handler
- * and clear any hw_handler_params associated with the
- * ignored handler.
*
* NB. This modifies the table line to show the actual
* handler instead of the original table passed in.
*/
kfree(m->hw_handler_name);
m->hw_handler_name = attached_handler_name;
-
- kfree(m->hw_handler_params);
- m->hw_handler_params = NULL;
}
}
@@ -1002,6 +1004,8 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
}
m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
+ if (!m->hw_handler_name)
+ return -EINVAL;
if (hw_argc > 1) {
char *p;
@@ -1362,7 +1366,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
- (pgnum > m->nr_priority_groups)) {
+ !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
DMWARN("invalid PG number supplied to switch_pg_num");
return -EINVAL;
}
@@ -1394,7 +1398,7 @@ static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
- (pgnum > m->nr_priority_groups)) {
+ !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
DMWARN("invalid PG number supplied to bypass_pg");
return -EINVAL;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 6d53810963f7..b8f978e551d7 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -160,7 +160,6 @@ struct raid_dev {
CTR_FLAG_DAEMON_SLEEP | \
CTR_FLAG_MIN_RECOVERY_RATE | \
CTR_FLAG_MAX_RECOVERY_RATE | \
- CTR_FLAG_MAX_WRITE_BEHIND | \
CTR_FLAG_STRIPE_CACHE | \
CTR_FLAG_REGION_SIZE | \
CTR_FLAG_DELTA_DISKS | \
@@ -171,7 +170,6 @@ struct raid_dev {
CTR_FLAG_DAEMON_SLEEP | \
CTR_FLAG_MIN_RECOVERY_RATE | \
CTR_FLAG_MAX_RECOVERY_RATE | \
- CTR_FLAG_MAX_WRITE_BEHIND | \
CTR_FLAG_STRIPE_CACHE | \
CTR_FLAG_REGION_SIZE | \
CTR_FLAG_DELTA_DISKS | \
@@ -2011,7 +2009,7 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
/* Force writing of superblocks to disk */
- set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags);
/* Any superblock is better than none, choose that if given */
return refdev ? 0 : 1;
@@ -2050,16 +2048,17 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
mddev->reshape_position = MaxSector;
+ mddev->raid_disks = le32_to_cpu(sb->num_devices);
+ mddev->level = le32_to_cpu(sb->level);
+ mddev->layout = le32_to_cpu(sb->layout);
+ mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
+
/*
* Reshaping is supported, e.g. reshape_position is valid
* in superblock and superblock content is authoritative.
*/
if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
/* Superblock is authoritative wrt given raid set layout! */
- mddev->raid_disks = le32_to_cpu(sb->num_devices);
- mddev->level = le32_to_cpu(sb->level);
- mddev->layout = le32_to_cpu(sb->layout);
- mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
mddev->new_level = le32_to_cpu(sb->new_level);
mddev->new_layout = le32_to_cpu(sb->new_layout);
mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors);
@@ -2087,38 +2086,44 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
/*
* No takeover/reshaping, because we don't have the extended v1.9.0 metadata
*/
- if (le32_to_cpu(sb->level) != mddev->new_level) {
- DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
- return -EINVAL;
- }
- if (le32_to_cpu(sb->layout) != mddev->new_layout) {
- DMERR("Reshaping raid sets not yet supported. (raid layout change)");
- DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
- DMERR(" Old layout: %s w/ %d copies",
- raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
- raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
- DMERR(" New layout: %s w/ %d copies",
- raid10_md_layout_to_format(mddev->layout),
- raid10_md_layout_to_copies(mddev->layout));
- return -EINVAL;
- }
- if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) {
- DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
- return -EINVAL;
- }
+ struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout);
+ struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
- /* We can only change the number of devices in raid1 with old (i.e. pre 1.0.7) metadata */
- if (!rt_is_raid1(rs->raid_type) &&
- (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
- DMERR("Reshaping raid sets not yet supported. (device count change from %u to %u)",
- sb->num_devices, mddev->raid_disks);
+ if (rs_takeover_requested(rs)) {
+ if (rt_cur && rt_new)
+ DMERR("Takeover raid sets from %s to %s not yet supported by metadata. (raid level change)",
+ rt_cur->name, rt_new->name);
+ else
+ DMERR("Takeover raid sets not yet supported by metadata. (raid level change)");
+ return -EINVAL;
+ } else if (rs_reshape_requested(rs)) {
+ DMERR("Reshaping raid sets not yet supported by metadata. (raid layout change keeping level)");
+ if (mddev->layout != mddev->new_layout) {
+ if (rt_cur && rt_new)
+ DMERR(" current layout %s vs new layout %s",
+ rt_cur->name, rt_new->name);
+ else
+ DMERR(" current layout 0x%X vs new layout 0x%X",
+ le32_to_cpu(sb->layout), mddev->new_layout);
+ }
+ if (mddev->chunk_sectors != mddev->new_chunk_sectors)
+ DMERR(" current stripe sectors %u vs new stripe sectors %u",
+ mddev->chunk_sectors, mddev->new_chunk_sectors);
+ if (rs->delta_disks)
+ DMERR(" current %u disks vs new %u disks",
+ mddev->raid_disks, mddev->raid_disks + rs->delta_disks);
+ if (rs_is_raid10(rs)) {
+ DMERR(" Old layout: %s w/ %u copies",
+ raid10_md_layout_to_format(mddev->layout),
+ raid10_md_layout_to_copies(mddev->layout));
+ DMERR(" New layout: %s w/ %u copies",
+ raid10_md_layout_to_format(mddev->new_layout),
+ raid10_md_layout_to_copies(mddev->new_layout));
+ }
return -EINVAL;
}
DMINFO("Discovered old metadata format; upgrading to extended metadata format");
-
- /* Table line is checked vs. authoritative superblock */
- rs_set_new(rs);
}
if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
@@ -2211,7 +2216,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
continue;
if (role != r->raid_disk) {
- if (__is_raid10_near(mddev->layout)) {
+ if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) {
if (mddev->raid_disks % __raid10_near_copies(mddev->layout) ||
rs->raid_disks % rs->raid10_copies) {
rs->ti->error =
@@ -2994,6 +2999,9 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
}
+ /* Disable/enable discard support on raid set. */
+ configure_discard_support(rs);
+
mddev_unlock(&rs->md);
return 0;
@@ -3497,7 +3505,7 @@ static void rs_update_sbs(struct raid_set *rs)
struct mddev *mddev = &rs->md;
int ro = mddev->ro;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
mddev->ro = 0;
md_update_sb(mddev, 1);
mddev->ro = ro;
@@ -3580,12 +3588,6 @@ static int raid_preresume(struct dm_target *ti)
if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags))
rs_update_sbs(rs);
- /*
- * Disable/enable discard support on raid set after any
- * conversion, because devices can have been added
- */
- configure_discard_support(rs);
-
/* Load the bitmap from disk unless raid0 */
r = __load_dirty_region_bitmap(rs);
if (r)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index b2a9e2d161e4..9d7275fb541a 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -23,11 +23,7 @@ static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
#define RESERVED_REQUEST_BASED_IOS 256
static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
-#ifdef CONFIG_DM_MQ_DEFAULT
-static bool use_blk_mq = true;
-#else
-static bool use_blk_mq = false;
-#endif
+static bool use_blk_mq = IS_ENABLED(CONFIG_DM_MQ_DEFAULT);
bool dm_use_blk_mq_default(void)
{
@@ -210,6 +206,9 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
*/
static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
{
+ struct request_queue *q = md->queue;
+ unsigned long flags;
+
atomic_dec(&md->pending[rw]);
/* nudge anyone waiting on suspend queue */
@@ -222,8 +221,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
* back into ->request_fn() could deadlock attempting to grab the
* queue lock again.
*/
- if (!md->queue->mq_ops && run_queue)
- blk_run_queue_async(md->queue);
+ if (!q->mq_ops && run_queue) {
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_run_queue_async(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
/*
* dm_put() must be at the end of this function. See the comment above
@@ -798,7 +800,7 @@ static void dm_old_request_fn(struct request_queue *q)
pos = blk_rq_pos(rq);
if ((dm_old_request_peeked_before_merge_deadline(md) &&
- md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
+ md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) &&
md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
(ti->type->busy && ti->type->busy(ti))) {
blk_delay_queue(q, 10);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index c4b53b332607..0a427de23ed2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -871,7 +871,7 @@ static int dm_table_determine_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0;
- bool verify_blk_mq = false;
+ unsigned sq_count = 0, mq_count = 0;
struct dm_target *tgt;
struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
@@ -924,12 +924,6 @@ static int dm_table_determine_type(struct dm_table *t)
BUG_ON(!request_based); /* No targets in this table */
- if (list_empty(devices) && __table_type_request_based(live_md_type)) {
- /* inherit live MD type */
- t->type = live_md_type;
- return 0;
- }
-
/*
* The only way to establish DM_TYPE_MQ_REQUEST_BASED is by
* having a compatible target use dm_table_set_type.
@@ -948,6 +942,19 @@ verify_rq_based:
return -EINVAL;
}
+ if (list_empty(devices)) {
+ int srcu_idx;
+ struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
+
+ /* inherit live table's type and all_blk_mq */
+ if (live_table) {
+ t->type = live_table->type;
+ t->all_blk_mq = live_table->all_blk_mq;
+ }
+ dm_put_live_table(t->md, srcu_idx);
+ return 0;
+ }
+
/* Non-request-stackable devices can't be used for request-based dm */
list_for_each_entry(dd, devices, list) {
struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
@@ -959,19 +966,19 @@ verify_rq_based:
}
if (q->mq_ops)
- verify_blk_mq = true;
+ mq_count++;
+ else
+ sq_count++;
}
+ if (sq_count && mq_count) {
+ DMERR("table load rejected: not all devices are blk-mq request-stackable");
+ return -EINVAL;
+ }
+ t->all_blk_mq = mq_count > 0;
- if (verify_blk_mq) {
- /* verify _all_ devices in the table are blk-mq devices */
- list_for_each_entry(dd, devices, list)
- if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) {
- DMERR("table load rejected: not all devices"
- " are blk-mq request-stackable");
- return -EINVAL;
- }
-
- t->all_blk_mq = true;
+ if (t->type == DM_TYPE_MQ_REQUEST_BASED && !t->all_blk_mq) {
+ DMERR("table load rejected: all devices are not blk-mq request-stackable");
+ return -EINVAL;
}
return 0;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 0aba34a7b3b3..7335d8a3fc47 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -868,7 +868,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = dm_get_device(ti, argv[2], FMODE_READ, &v->hash_dev);
if (r) {
- ti->error = "Data device lookup failed";
+ ti->error = "Hash device lookup failed";
goto bad;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ffa97b742a68..3086da5664f3 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1886,9 +1886,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
- spin_lock_irq(q->queue_lock);
- queue_flag_set(QUEUE_FLAG_DYING, q);
- spin_unlock_irq(q->queue_lock);
+ blk_set_queue_dying(q);
if (dm_request_based(md) && md->kworker_task)
kthread_flush_worker(&md->kworker);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 86f5d435901d..5975c9915684 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -21,6 +21,7 @@
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <trace/events/block.h>
#include "md.h"
#include "linear.h"
@@ -101,8 +102,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
sector_t sectors;
if (j < 0 || j >= raid_disks || disk->rdev) {
- printk(KERN_ERR "md/linear:%s: disk numbering problem. Aborting!\n",
- mdname(mddev));
+ pr_warn("md/linear:%s: disk numbering problem. Aborting!\n",
+ mdname(mddev));
goto out;
}
@@ -123,8 +124,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
discard_supported = true;
}
if (cnt != raid_disks) {
- printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n",
- mdname(mddev));
+ pr_warn("md/linear:%s: not enough drives present. Aborting!\n",
+ mdname(mddev));
goto out;
}
@@ -227,22 +228,22 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
}
do {
- tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
+ sector_t bio_sector = bio->bi_iter.bi_sector;
+ tmp_dev = which_dev(mddev, bio_sector);
start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
end_sector = tmp_dev->end_sector;
data_offset = tmp_dev->rdev->data_offset;
bio->bi_bdev = tmp_dev->rdev->bdev;
- if (unlikely(bio->bi_iter.bi_sector >= end_sector ||
- bio->bi_iter.bi_sector < start_sector))
+ if (unlikely(bio_sector >= end_sector ||
+ bio_sector < start_sector))
goto out_of_bounds;
if (unlikely(bio_end_sector(bio) > end_sector)) {
/* This bio crosses a device boundary, so we have to
* split it.
*/
- split = bio_split(bio, end_sector -
- bio->bi_iter.bi_sector,
+ split = bio_split(bio, end_sector - bio_sector,
GFP_NOIO, fs_bio_set);
bio_chain(split, bio);
} else {
@@ -256,15 +257,18 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */
bio_endio(split);
- } else
+ } else {
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(split->bi_bdev),
+ split, disk_devt(mddev->gendisk),
+ bio_sector);
generic_make_request(split);
+ }
} while (split != bio);
return;
out_of_bounds:
- printk(KERN_ERR
- "md/linear:%s: make_request: Sector %llu out of bounds on "
- "dev %s: %llu sectors, offset %llu\n",
+ pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %s: %llu sectors, offset %llu\n",
mdname(mddev),
(unsigned long long)bio->bi_iter.bi_sector,
bdevname(tmp_dev->rdev->bdev, b),
@@ -275,7 +279,6 @@ out_of_bounds:
static void linear_status (struct seq_file *seq, struct mddev *mddev)
{
-
seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f975cd08923d..82821ee0d57f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -30,6 +30,18 @@
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ Errors, Warnings, etc.
+ Please use:
+ pr_crit() for error conditions that risk data loss
+ pr_err() for error conditions that are unexpected, like an IO error
+ or internal inconsistency
+ pr_warn() for error conditions that could have been predicated, like
+ adding a device to an array when it has incompatible metadata
+ pr_info() for every interesting, very rare events, like an array starting
+ or stopping, or resync starting or stopping
+ pr_debug() for everything else.
+
*/
#include <linux/kthread.h>
@@ -52,6 +64,7 @@
#include <linux/raid/md_p.h>
#include <linux/raid/md_u.h>
#include <linux/slab.h>
+#include <trace/events/block.h>
#include "md.h"
#include "bitmap.h"
#include "md-cluster.h"
@@ -684,11 +697,8 @@ static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
static int alloc_disk_sb(struct md_rdev *rdev)
{
rdev->sb_page = alloc_page(GFP_KERNEL);
- if (!rdev->sb_page) {
- printk(KERN_ALERT "md: out of memory.\n");
+ if (!rdev->sb_page)
return -ENOMEM;
- }
-
return 0;
}
@@ -715,9 +725,15 @@ static void super_written(struct bio *bio)
struct mddev *mddev = rdev->mddev;
if (bio->bi_error) {
- printk("md: super_written gets error=%d\n", bio->bi_error);
+ pr_err("md: super_written gets error=%d\n", bio->bi_error);
md_error(mddev, rdev);
- }
+ if (!test_bit(Faulty, &rdev->flags)
+ && (bio->bi_opf & MD_FAILFAST)) {
+ set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
+ set_bit(LastDev, &rdev->flags);
+ }
+ } else
+ clear_bit(LastDev, &rdev->flags);
if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait);
@@ -734,7 +750,13 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
* if zero is reached.
* If an error occurred, call md_error
*/
- struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
+ struct bio *bio;
+ int ff = 0;
+
+ if (test_bit(Faulty, &rdev->flags))
+ return;
+
+ bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
atomic_inc(&rdev->nr_pending);
@@ -743,16 +765,24 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA;
+
+ if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
+ test_bit(FailFast, &rdev->flags) &&
+ !test_bit(LastDev, &rdev->flags))
+ ff = MD_FAILFAST;
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | ff;
atomic_inc(&mddev->pending_writes);
submit_bio(bio);
}
-void md_super_wait(struct mddev *mddev)
+int md_super_wait(struct mddev *mddev)
{
/* wait for all superblock writes that were scheduled to complete */
wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
+ if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
+ return -EAGAIN;
+ return 0;
}
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
@@ -795,8 +825,8 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
return 0;
fail:
- printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
- bdevname(rdev->bdev,b));
+ pr_err("md: disabled device %s, could not read superblock.\n",
+ bdevname(rdev->bdev,b));
return -EINVAL;
}
@@ -818,7 +848,6 @@ static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
if (!tmp1 || !tmp2) {
ret = 0;
- printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
goto abort;
}
@@ -932,7 +961,7 @@ int md_check_no_bitmap(struct mddev *mddev)
{
if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
return 0;
- printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
+ pr_warn("%s: bitmaps are not supported for %s\n",
mdname(mddev), mddev->pers->name);
return 1;
}
@@ -956,7 +985,8 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
rdev->sb_start = calc_dev_sboffset(rdev);
ret = read_disk_sb(rdev, MD_SB_BYTES);
- if (ret) return ret;
+ if (ret)
+ return ret;
ret = -EINVAL;
@@ -964,17 +994,15 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
sb = page_address(rdev->sb_page);
if (sb->md_magic != MD_SB_MAGIC) {
- printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
- b);
+ pr_warn("md: invalid raid superblock magic on %s\n", b);
goto abort;
}
if (sb->major_version != 0 ||
sb->minor_version < 90 ||
sb->minor_version > 91) {
- printk(KERN_WARNING "Bad version number %d.%d on %s\n",
- sb->major_version, sb->minor_version,
- b);
+ pr_warn("Bad version number %d.%d on %s\n",
+ sb->major_version, sb->minor_version, b);
goto abort;
}
@@ -982,8 +1010,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
goto abort;
if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
- printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
- b);
+ pr_warn("md: invalid superblock checksum on %s\n", b);
goto abort;
}
@@ -1004,14 +1031,13 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
__u64 ev1, ev2;
mdp_super_t *refsb = page_address(refdev->sb_page);
if (!uuid_equal(refsb, sb)) {
- printk(KERN_WARNING "md: %s has different UUID to %s\n",
+ pr_warn("md: %s has different UUID to %s\n",
b, bdevname(refdev->bdev,b2));
goto abort;
}
if (!sb_equal(refsb, sb)) {
- printk(KERN_WARNING "md: %s has same UUID"
- " but different superblock to %s\n",
- b, bdevname(refdev->bdev, b2));
+ pr_warn("md: %s has same UUID but different superblock to %s\n",
+ b, bdevname(refdev->bdev, b2));
goto abort;
}
ev1 = md_event(sb);
@@ -1158,6 +1184,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
}
if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
+ if (desc->state & (1<<MD_DISK_FAILFAST))
+ set_bit(FailFast, &rdev->flags);
} else /* MULTIPATH are always insync */
set_bit(In_sync, &rdev->flags);
return 0;
@@ -1283,6 +1311,8 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
}
if (test_bit(WriteMostly, &rdev2->flags))
d->state |= (1<<MD_DISK_WRITEMOSTLY);
+ if (test_bit(FailFast, &rdev2->flags))
+ d->state |= (1<<MD_DISK_FAILFAST);
}
/* now set the "removed" and "faulty" bits on any missing devices */
for (i=0 ; i < mddev->raid_disks ; i++) {
@@ -1324,9 +1354,10 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
rdev->mddev->level >= 1)
num_sectors = (sector_t)(2ULL << 32) - 2;
- md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+ do {
+ md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
- md_super_wait(rdev->mddev);
+ } while (md_super_wait(rdev->mddev) < 0);
return num_sectors;
}
@@ -1413,13 +1444,13 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
return -EINVAL;
if (calc_sb_1_csum(sb) != sb->sb_csum) {
- printk("md: invalid superblock checksum on %s\n",
+ pr_warn("md: invalid superblock checksum on %s\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
if (le64_to_cpu(sb->data_size) < 10) {
- printk("md: data_size too small on %s\n",
- bdevname(rdev->bdev,b));
+ pr_warn("md: data_size too small on %s\n",
+ bdevname(rdev->bdev,b));
return -EINVAL;
}
if (sb->pad0 ||
@@ -1503,8 +1534,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
sb->level != refsb->level ||
sb->layout != refsb->layout ||
sb->chunksize != refsb->chunksize) {
- printk(KERN_WARNING "md: %s has strangely different"
- " superblock to %s\n",
+ pr_warn("md: %s has strangely different superblock to %s\n",
bdevname(rdev->bdev,b),
bdevname(refdev->bdev,b2));
return -EINVAL;
@@ -1646,8 +1676,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
case MD_DISK_ROLE_JOURNAL: /* journal device */
if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
/* journal device without journal feature */
- printk(KERN_WARNING
- "md: journal device provided without journal feature, ignoring the device\n");
+ pr_warn("md: journal device provided without journal feature, ignoring the device\n");
return -EINVAL;
}
set_bit(Journal, &rdev->flags);
@@ -1669,6 +1698,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
}
if (sb->devflags & WriteMostly1)
set_bit(WriteMostly, &rdev->flags);
+ if (sb->devflags & FailFast1)
+ set_bit(FailFast, &rdev->flags);
if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
set_bit(Replacement, &rdev->flags);
} else /* MULTIPATH are always insync */
@@ -1707,6 +1738,10 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
+ if (test_bit(FailFast, &rdev->flags))
+ sb->devflags |= FailFast1;
+ else
+ sb->devflags &= ~FailFast1;
if (test_bit(WriteMostly, &rdev->flags))
sb->devflags |= WriteMostly1;
@@ -1863,9 +1898,10 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
sb->data_size = cpu_to_le64(num_sectors);
sb->super_offset = rdev->sb_start;
sb->sb_csum = calc_sb_1_csum(sb);
- md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
- rdev->sb_page);
- md_super_wait(rdev->mddev);
+ do {
+ md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+ rdev->sb_page);
+ } while (md_super_wait(rdev->mddev) < 0);
return num_sectors;
}
@@ -2004,9 +2040,9 @@ int md_integrity_register(struct mddev *mddev)
blk_integrity_register(mddev->gendisk,
bdev_get_integrity(reference->bdev));
- printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
+ pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
- printk(KERN_ERR "md: failed to create integrity pool for %s\n",
+ pr_err("md: failed to create integrity pool for %s\n",
mdname(mddev));
return -EINVAL;
}
@@ -2034,8 +2070,8 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
return 0;
if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
- printk(KERN_NOTICE "%s: incompatible integrity profile for %s\n",
- mdname(mddev), bdevname(rdev->bdev, name));
+ pr_err("%s: incompatible integrity profile for %s\n",
+ mdname(mddev), bdevname(rdev->bdev, name));
return -ENXIO;
}
@@ -2089,15 +2125,15 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
rcu_read_unlock();
if (!test_bit(Journal, &rdev->flags) &&
mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
- printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
- mdname(mddev), mddev->max_disks);
+ pr_warn("md: %s: array is limited to %d devices\n",
+ mdname(mddev), mddev->max_disks);
return -EBUSY;
}
bdevname(rdev->bdev,b);
strreplace(b, '/', '!');
rdev->mddev = mddev;
- printk(KERN_INFO "md: bind<%s>\n", b);
+ pr_debug("md: bind<%s>\n", b);
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
@@ -2116,8 +2152,8 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
return 0;
fail:
- printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
- b, mdname(mddev));
+ pr_warn("md: failed to register dev-%s for %s\n",
+ b, mdname(mddev));
return err;
}
@@ -2134,7 +2170,7 @@ static void unbind_rdev_from_array(struct md_rdev *rdev)
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
- printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
+ pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
@@ -2164,8 +2200,7 @@ static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
shared ? (struct md_rdev *)lock_rdev : rdev);
if (IS_ERR(bdev)) {
- printk(KERN_ERR "md: could not open %s.\n",
- __bdevname(dev, b));
+ pr_warn("md: could not open %s.\n", __bdevname(dev, b));
return PTR_ERR(bdev);
}
rdev->bdev = bdev;
@@ -2185,8 +2220,7 @@ static void export_rdev(struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
- printk(KERN_INFO "md: export_rdev(%s)\n",
- bdevname(rdev->bdev,b));
+ pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
md_rdev_clear(rdev);
#ifndef MODULE
if (test_bit(AutoDetected, &rdev->flags))
@@ -2288,24 +2322,24 @@ void md_update_sb(struct mddev *mddev, int force_change)
if (mddev->ro) {
if (force_change)
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
return;
}
repeat:
if (mddev_is_clustered(mddev)) {
- if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
+ if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
force_change = 1;
- if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
nospares = 1;
ret = md_cluster_ops->metadata_update_start(mddev);
/* Has someone else has updated the sb */
if (!does_sb_need_changing(mddev)) {
if (ret == 0)
md_cluster_ops->metadata_update_cancel(mddev);
- bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
- BIT(MD_CHANGE_DEVS) |
- BIT(MD_CHANGE_CLEAN));
+ bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
+ BIT(MD_SB_CHANGE_DEVS) |
+ BIT(MD_SB_CHANGE_CLEAN));
return;
}
}
@@ -2321,10 +2355,10 @@ repeat:
}
if (!mddev->persistent) {
- clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
- clear_bit(MD_CHANGE_DEVS, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+ clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
if (!mddev->external) {
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
rdev_for_each(rdev, mddev) {
if (rdev->badblocks.changed) {
rdev->badblocks.changed = 0;
@@ -2344,9 +2378,9 @@ repeat:
mddev->utime = ktime_get_real_seconds();
- if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
+ if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
force_change = 1;
- if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
/* just a clean<-> dirty transition, possibly leave spares alone,
* though if events isn't the right even/odd, we will have to do
* spares after all
@@ -2402,6 +2436,9 @@ repeat:
pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
mdname(mddev), mddev->in_sync);
+ if (mddev->queue)
+ blk_add_trace_msg(mddev->queue, "md md_update_sb");
+rewrite:
bitmap_update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
@@ -2433,15 +2470,16 @@ repeat:
/* only need to write one superblock... */
break;
}
- md_super_wait(mddev);
- /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
+ if (md_super_wait(mddev) < 0)
+ goto rewrite;
+ /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
if (mddev_is_clustered(mddev) && ret == 0)
md_cluster_ops->metadata_update_finish(mddev);
if (mddev->in_sync != sync_req ||
- !bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_CLEAN)))
+ !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
/* have to write it out again */
goto repeat;
wake_up(&mddev->sb_wait);
@@ -2485,7 +2523,7 @@ static int add_bound_rdev(struct md_rdev *rdev)
}
sysfs_notify_dirent_safe(rdev->sysfs_state);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -2523,51 +2561,41 @@ struct rdev_sysfs_entry {
static ssize_t
state_show(struct md_rdev *rdev, char *page)
{
- char *sep = "";
+ char *sep = ",";
size_t len = 0;
unsigned long flags = ACCESS_ONCE(rdev->flags);
if (test_bit(Faulty, &flags) ||
- rdev->badblocks.unacked_exist) {
- len+= sprintf(page+len, "%sfaulty",sep);
- sep = ",";
- }
- if (test_bit(In_sync, &flags)) {
- len += sprintf(page+len, "%sin_sync",sep);
- sep = ",";
- }
- if (test_bit(Journal, &flags)) {
- len += sprintf(page+len, "%sjournal",sep);
- sep = ",";
- }
- if (test_bit(WriteMostly, &flags)) {
- len += sprintf(page+len, "%swrite_mostly",sep);
- sep = ",";
- }
+ (!test_bit(ExternalBbl, &flags) &&
+ rdev->badblocks.unacked_exist))
+ len += sprintf(page+len, "faulty%s", sep);
+ if (test_bit(In_sync, &flags))
+ len += sprintf(page+len, "in_sync%s", sep);
+ if (test_bit(Journal, &flags))
+ len += sprintf(page+len, "journal%s", sep);
+ if (test_bit(WriteMostly, &flags))
+ len += sprintf(page+len, "write_mostly%s", sep);
if (test_bit(Blocked, &flags) ||
(rdev->badblocks.unacked_exist
- && !test_bit(Faulty, &flags))) {
- len += sprintf(page+len, "%sblocked", sep);
- sep = ",";
- }
+ && !test_bit(Faulty, &flags)))
+ len += sprintf(page+len, "blocked%s", sep);
if (!test_bit(Faulty, &flags) &&
!test_bit(Journal, &flags) &&
- !test_bit(In_sync, &flags)) {
- len += sprintf(page+len, "%sspare", sep);
- sep = ",";
- }
- if (test_bit(WriteErrorSeen, &flags)) {
- len += sprintf(page+len, "%swrite_error", sep);
- sep = ",";
- }
- if (test_bit(WantReplacement, &flags)) {
- len += sprintf(page+len, "%swant_replacement", sep);
- sep = ",";
- }
- if (test_bit(Replacement, &flags)) {
- len += sprintf(page+len, "%sreplacement", sep);
- sep = ",";
- }
+ !test_bit(In_sync, &flags))
+ len += sprintf(page+len, "spare%s", sep);
+ if (test_bit(WriteErrorSeen, &flags))
+ len += sprintf(page+len, "write_error%s", sep);
+ if (test_bit(WantReplacement, &flags))
+ len += sprintf(page+len, "want_replacement%s", sep);
+ if (test_bit(Replacement, &flags))
+ len += sprintf(page+len, "replacement%s", sep);
+ if (test_bit(ExternalBbl, &flags))
+ len += sprintf(page+len, "external_bbl%s", sep);
+ if (test_bit(FailFast, &flags))
+ len += sprintf(page+len, "failfast%s", sep);
+
+ if (len)
+ len -= strlen(sep);
return len+sprintf(page+len, "\n");
}
@@ -2587,6 +2615,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
* so that it gets rebuilt based on bitmap
* write_error - sets WriteErrorSeen
* -write_error - clears WriteErrorSeen
+ * {,-}failfast - set/clear FailFast
*/
int err = -EINVAL;
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
@@ -2610,8 +2639,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
if (err == 0) {
md_kick_rdev_from_array(rdev);
- if (mddev->pers)
- md_update_sb(mddev, 1);
+ if (mddev->pers) {
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ md_wakeup_thread(mddev->thread);
+ }
md_new_event(mddev);
}
}
@@ -2626,6 +2657,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = 0;
} else if (cmd_match(buf, "-blocked")) {
if (!test_bit(Faulty, &rdev->flags) &&
+ !test_bit(ExternalBbl, &rdev->flags) &&
rdev->badblocks.unacked_exist) {
/* metadata handler doesn't understand badblocks,
* so we need to fail the device
@@ -2642,6 +2674,12 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
set_bit(In_sync, &rdev->flags);
err = 0;
+ } else if (cmd_match(buf, "failfast")) {
+ set_bit(FailFast, &rdev->flags);
+ err = 0;
+ } else if (cmd_match(buf, "-failfast")) {
+ clear_bit(FailFast, &rdev->flags);
+ err = 0;
} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
!test_bit(Journal, &rdev->flags)) {
if (rdev->mddev->pers == NULL) {
@@ -2708,6 +2746,13 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
}
} else
err = -EBUSY;
+ } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
+ set_bit(ExternalBbl, &rdev->flags);
+ rdev->badblocks.shift = 0;
+ err = 0;
+ } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
+ clear_bit(ExternalBbl, &rdev->flags);
+ err = 0;
}
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
@@ -3211,10 +3256,8 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
sector_t size;
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
- if (!rdev) {
- printk(KERN_ERR "md: could not alloc mem for new device!\n");
+ if (!rdev)
return ERR_PTR(-ENOMEM);
- }
err = md_rdev_init(rdev);
if (err)
@@ -3231,8 +3274,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
if (!size) {
- printk(KERN_WARNING
- "md: %s has zero or unknown size, marking faulty!\n",
+ pr_warn("md: %s has zero or unknown size, marking faulty!\n",
bdevname(rdev->bdev,b));
err = -EINVAL;
goto abort_free;
@@ -3242,16 +3284,13 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
err = super_types[super_format].
load_super(rdev, NULL, super_minor);
if (err == -EINVAL) {
- printk(KERN_WARNING
- "md: %s does not have a valid v%d.%d "
- "superblock, not importing!\n",
+ pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
bdevname(rdev->bdev,b),
- super_format, super_minor);
+ super_format, super_minor);
goto abort_free;
}
if (err < 0) {
- printk(KERN_WARNING
- "md: could not read %s's sb, not importing!\n",
+ pr_warn("md: could not read %s's sb, not importing!\n",
bdevname(rdev->bdev,b));
goto abort_free;
}
@@ -3287,9 +3326,7 @@ static void analyze_sbs(struct mddev *mddev)
case 0:
break;
default:
- printk( KERN_ERR \
- "md: fatal superblock inconsistency in %s"
- " -- removing from array\n",
+ pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
bdevname(rdev->bdev,b));
md_kick_rdev_from_array(rdev);
}
@@ -3302,18 +3339,16 @@ static void analyze_sbs(struct mddev *mddev)
if (mddev->max_disks &&
(rdev->desc_nr >= mddev->max_disks ||
i > mddev->max_disks)) {
- printk(KERN_WARNING
- "md: %s: %s: only %d devices permitted\n",
- mdname(mddev), bdevname(rdev->bdev, b),
- mddev->max_disks);
+ pr_warn("md: %s: %s: only %d devices permitted\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mddev->max_disks);
md_kick_rdev_from_array(rdev);
continue;
}
if (rdev != freshest) {
if (super_types[mddev->major_version].
validate_super(mddev, rdev)) {
- printk(KERN_WARNING "md: kicking non-fresh %s"
- " from array!\n",
+ pr_warn("md: kicking non-fresh %s from array!\n",
bdevname(rdev->bdev,b));
md_kick_rdev_from_array(rdev);
continue;
@@ -3384,7 +3419,7 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
unsigned long msec;
if (mddev_is_clustered(mddev)) {
- pr_info("md: Safemode is disabled for clustered mode\n");
+ pr_warn("md: Safemode is disabled for clustered mode\n");
return -EINVAL;
}
@@ -3472,8 +3507,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
rv = -EINVAL;
if (!mddev->pers->quiesce) {
- printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
- mdname(mddev), mddev->pers->name);
+ pr_warn("md: %s: %s does not support online personality change\n",
+ mdname(mddev), mddev->pers->name);
goto out_unlock;
}
@@ -3491,7 +3526,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
pers = find_pers(level, clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
- printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
+ pr_warn("md: personality %s not loaded\n", clevel);
rv = -EINVAL;
goto out_unlock;
}
@@ -3505,8 +3540,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
}
if (!pers->takeover) {
module_put(pers->owner);
- printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
- mdname(mddev), clevel);
+ pr_warn("md: %s: %s does not support personality takeover\n",
+ mdname(mddev), clevel);
rv = -EINVAL;
goto out_unlock;
}
@@ -3526,8 +3561,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
module_put(pers->owner);
- printk(KERN_WARNING "md: %s: %s would not accept array\n",
- mdname(mddev), clevel);
+ pr_warn("md: %s: %s would not accept array\n",
+ mdname(mddev), clevel);
rv = PTR_ERR(priv);
goto out_unlock;
}
@@ -3570,9 +3605,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
pers->sync_request != NULL) {
/* need to add the md_redundancy_group */
if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
- printk(KERN_WARNING
- "md: cannot register extra attributes for %s\n",
- mdname(mddev));
+ pr_warn("md: cannot register extra attributes for %s\n",
+ mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
}
if (oldpers->sync_request != NULL &&
@@ -3603,9 +3637,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
clear_bit(In_sync, &rdev->flags);
else {
if (sysfs_link_rdev(mddev, rdev))
- printk(KERN_WARNING "md: cannot register rd%d"
- " for %s after level change\n",
- rdev->raid_disk, mdname(mddev));
+ pr_warn("md: cannot register rd%d for %s after level change\n",
+ rdev->raid_disk, mdname(mddev));
}
}
@@ -3618,7 +3651,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
}
blk_set_stacking_limits(&mddev->queue->limits);
pers->run(mddev);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
mddev_resume(mddev);
if (!mddev->thread)
md_update_sb(mddev, 1);
@@ -3813,7 +3846,7 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
if (!err) {
mddev->recovery_cp = n;
if (mddev->pers)
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
mddev_unlock(mddev);
return err ?: len;
@@ -3887,7 +3920,7 @@ array_state_show(struct mddev *mddev, char *page)
st = read_auto;
break;
case 0:
- if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
st = write_pending;
else if (mddev->in_sync)
st = clean;
@@ -3925,7 +3958,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
spin_lock(&mddev->lock);
if (st == active) {
restart_array(mddev);
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
+ md_wakeup_thread(mddev->thread);
wake_up(&mddev->sb_wait);
err = 0;
} else /* st == clean */ {
@@ -3935,7 +3969,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
err = 0;
} else
@@ -4001,7 +4035,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
err = 0;
} else
@@ -4015,7 +4049,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
err = restart_array(mddev);
if (err)
break;
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
wake_up(&mddev->sb_wait);
err = 0;
} else {
@@ -5071,13 +5105,13 @@ static int md_alloc(dev_t dev, char *name)
/* This isn't possible, but as kobject_init_and_add is marked
* __must_check, we must do something with the result
*/
- printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
- disk->disk_name);
+ pr_debug("md: cannot register %s/md - name in use\n",
+ disk->disk_name);
error = 0;
}
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_bitmap_group))
- printk(KERN_DEBUG "pointless warning\n");
+ pr_debug("pointless warning\n");
mutex_unlock(&mddev->open_mutex);
abort:
mutex_unlock(&disks_mutex);
@@ -5179,15 +5213,15 @@ int md_run(struct mddev *mddev)
if (mddev->dev_sectors &&
rdev->data_offset + mddev->dev_sectors
> rdev->sb_start) {
- printk("md: %s: data overlaps metadata\n",
- mdname(mddev));
+ pr_warn("md: %s: data overlaps metadata\n",
+ mdname(mddev));
return -EINVAL;
}
} else {
if (rdev->sb_start + rdev->sb_size/512
> rdev->data_offset) {
- printk("md: %s: metadata overlaps data\n",
- mdname(mddev));
+ pr_warn("md: %s: metadata overlaps data\n",
+ mdname(mddev));
return -EINVAL;
}
}
@@ -5202,11 +5236,11 @@ int md_run(struct mddev *mddev)
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
if (mddev->level != LEVEL_NONE)
- printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
- mddev->level);
+ pr_warn("md: personality for level %d is not loaded!\n",
+ mddev->level);
else
- printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
- mddev->clevel);
+ pr_warn("md: personality for level %s is not loaded!\n",
+ mddev->clevel);
return -EINVAL;
}
spin_unlock(&pers_lock);
@@ -5236,21 +5270,16 @@ int md_run(struct mddev *mddev)
if (rdev < rdev2 &&
rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains) {
- printk(KERN_WARNING
- "%s: WARNING: %s appears to be"
- " on the same physical disk as"
- " %s.\n",
- mdname(mddev),
- bdevname(rdev->bdev,b),
- bdevname(rdev2->bdev,b2));
+ pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
+ mdname(mddev),
+ bdevname(rdev->bdev,b),
+ bdevname(rdev2->bdev,b2));
warned = 1;
}
}
if (warned)
- printk(KERN_WARNING
- "True protection against single-disk"
- " failure might be compromised.\n");
+ pr_warn("True protection against single-disk failure might be compromised.\n");
}
mddev->recovery = 0;
@@ -5264,14 +5293,14 @@ int md_run(struct mddev *mddev)
err = pers->run(mddev);
if (err)
- printk(KERN_ERR "md: pers->run() failed ...\n");
+ pr_warn("md: pers->run() failed ...\n");
else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
- WARN_ONCE(!mddev->external_size, "%s: default size too small,"
- " but 'external_size' not in effect?\n", __func__);
- printk(KERN_ERR
- "md: invalid array_size %llu > default size %llu\n",
- (unsigned long long)mddev->array_sectors / 2,
- (unsigned long long)pers->size(mddev, 0, 0) / 2);
+ WARN_ONCE(!mddev->external_size,
+ "%s: default size too small, but 'external_size' not in effect?\n",
+ __func__);
+ pr_warn("md: invalid array_size %llu > default size %llu\n",
+ (unsigned long long)mddev->array_sectors / 2,
+ (unsigned long long)pers->size(mddev, 0, 0) / 2);
err = -EINVAL;
}
if (err == 0 && pers->sync_request &&
@@ -5281,8 +5310,8 @@ int md_run(struct mddev *mddev)
bitmap = bitmap_create(mddev, -1);
if (IS_ERR(bitmap)) {
err = PTR_ERR(bitmap);
- printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
- mdname(mddev), err);
+ pr_warn("%s: failed to create bitmap (%d)\n",
+ mdname(mddev), err);
} else
mddev->bitmap = bitmap;
@@ -5318,9 +5347,8 @@ int md_run(struct mddev *mddev)
if (pers->sync_request) {
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_redundancy_group))
- printk(KERN_WARNING
- "md: cannot register extra attributes for %s\n",
- mdname(mddev));
+ pr_warn("md: cannot register extra attributes for %s\n",
+ mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0;
@@ -5350,7 +5378,7 @@ int md_run(struct mddev *mddev)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- if (mddev->flags & MD_UPDATE_SB_FLAGS)
+ if (mddev->sb_flags)
md_update_sb(mddev, 0);
md_new_event(mddev);
@@ -5421,8 +5449,7 @@ static int restart_array(struct mddev *mddev)
mddev->safemode = 0;
mddev->ro = 0;
set_disk_ro(disk, 0);
- printk(KERN_INFO "md: %s switched to read-write mode.\n",
- mdname(mddev));
+ pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
/* Kick recovery or resync if necessary */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -5446,6 +5473,7 @@ static void md_clean(struct mddev *mddev)
mddev->level = LEVEL_NONE;
mddev->clevel[0] = 0;
mddev->flags = 0;
+ mddev->sb_flags = 0;
mddev->ro = 0;
mddev->metadata_type[0] = 0;
mddev->chunk_sectors = 0;
@@ -5490,12 +5518,15 @@ static void __md_stop_writes(struct mddev *mddev)
del_timer_sync(&mddev->safemode_timer);
+ if (mddev->pers && mddev->pers->quiesce) {
+ mddev->pers->quiesce(mddev, 1);
+ mddev->pers->quiesce(mddev, 0);
+ }
bitmap_flush(mddev);
- md_super_wait(mddev);
if (mddev->ro == 0 &&
((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
- (mddev->flags & MD_UPDATE_SB_FLAGS))) {
+ mddev->sb_flags)) {
/* mark array as shutdown cleanly */
if (!mddev_is_clustered(mddev))
mddev->in_sync = 1;
@@ -5516,8 +5547,8 @@ static void mddev_detach(struct mddev *mddev)
struct bitmap *bitmap = mddev->bitmap;
/* wait for behind writes to complete */
if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
- printk(KERN_INFO "md:%s: behind writes in progress - waiting to stop.\n",
- mdname(mddev));
+ pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
+ mdname(mddev));
/* need to kick something here to make sure I/O goes? */
wait_event(bitmap->behind_wait,
atomic_read(&bitmap->behind_writes) == 0);
@@ -5578,20 +5609,20 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
- if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
return -EBUSY;
mddev_unlock(mddev);
wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
&mddev->recovery));
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- printk("md: %s still in use.\n",mdname(mddev));
+ pr_warn("md: %s still in use.\n",mdname(mddev));
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -5653,7 +5684,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
mddev->sysfs_active ||
mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- printk("md: %s still in use.\n",mdname(mddev));
+ pr_warn("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex);
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -5690,7 +5721,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
* Free resources if final stop
*/
if (mode == 0) {
- printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
+ pr_info("md: %s stopped.\n", mdname(mddev));
bitmap_destroy(mddev);
if (mddev->bitmap_info.file) {
@@ -5722,17 +5753,17 @@ static void autorun_array(struct mddev *mddev)
if (list_empty(&mddev->disks))
return;
- printk(KERN_INFO "md: running: ");
+ pr_info("md: running: ");
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
- printk("<%s>", bdevname(rdev->bdev,b));
+ pr_cont("<%s>", bdevname(rdev->bdev,b));
}
- printk("\n");
+ pr_cont("\n");
err = do_md_run(mddev);
if (err) {
- printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
+ pr_warn("md: do_md_run() returned %d\n", err);
do_md_stop(mddev, 0, NULL);
}
}
@@ -5755,7 +5786,7 @@ static void autorun_devices(int part)
struct mddev *mddev;
char b[BDEVNAME_SIZE];
- printk(KERN_INFO "md: autorun ...\n");
+ pr_info("md: autorun ...\n");
while (!list_empty(&pending_raid_disks)) {
int unit;
dev_t dev;
@@ -5763,13 +5794,12 @@ static void autorun_devices(int part)
rdev0 = list_entry(pending_raid_disks.next,
struct md_rdev, same_set);
- printk(KERN_INFO "md: considering %s ...\n",
- bdevname(rdev0->bdev,b));
+ pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
INIT_LIST_HEAD(&candidates);
rdev_for_each_list(rdev, tmp, &pending_raid_disks)
if (super_90_load(rdev, rdev0, 0) >= 0) {
- printk(KERN_INFO "md: adding %s ...\n",
- bdevname(rdev->bdev,b));
+ pr_debug("md: adding %s ...\n",
+ bdevname(rdev->bdev,b));
list_move(&rdev->same_set, &candidates);
}
/*
@@ -5786,8 +5816,8 @@ static void autorun_devices(int part)
unit = MINOR(dev);
}
if (rdev0->preferred_minor != unit) {
- printk(KERN_INFO "md: unit number in %s is bad: %d\n",
- bdevname(rdev0->bdev, b), rdev0->preferred_minor);
+ pr_warn("md: unit number in %s is bad: %d\n",
+ bdevname(rdev0->bdev, b), rdev0->preferred_minor);
break;
}
@@ -5796,21 +5826,17 @@ static void autorun_devices(int part)
if (!mddev || !mddev->gendisk) {
if (mddev)
mddev_put(mddev);
- printk(KERN_ERR
- "md: cannot allocate memory for md drive.\n");
break;
}
if (mddev_lock(mddev))
- printk(KERN_WARNING "md: %s locked, cannot run\n",
- mdname(mddev));
+ pr_warn("md: %s locked, cannot run\n", mdname(mddev));
else if (mddev->raid_disks || mddev->major_version
|| !list_empty(&mddev->disks)) {
- printk(KERN_WARNING
- "md: %s already running, cannot run %s\n",
+ pr_warn("md: %s already running, cannot run %s\n",
mdname(mddev), bdevname(rdev0->bdev,b));
mddev_unlock(mddev);
} else {
- printk(KERN_INFO "md: created %s\n", mdname(mddev));
+ pr_debug("md: created %s\n", mdname(mddev));
mddev->persistent = 1;
rdev_for_each_list(rdev, tmp, &candidates) {
list_del_init(&rdev->same_set);
@@ -5829,7 +5855,7 @@ static void autorun_devices(int part)
}
mddev_put(mddev);
}
- printk(KERN_INFO "md: ... autorun DONE.\n");
+ pr_info("md: ... autorun DONE.\n");
}
#endif /* !MODULE */
@@ -5964,6 +5990,8 @@ static int get_disk_info(struct mddev *mddev, void __user * arg)
info.state |= (1<<MD_DISK_JOURNAL);
if (test_bit(WriteMostly, &rdev->flags))
info.state |= (1<<MD_DISK_WRITEMOSTLY);
+ if (test_bit(FailFast, &rdev->flags))
+ info.state |= (1<<MD_DISK_FAILFAST);
} else {
info.major = info.minor = 0;
info.raid_disk = -1;
@@ -5985,8 +6013,8 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
if (mddev_is_clustered(mddev) &&
!(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
- pr_err("%s: Cannot add to clustered mddev.\n",
- mdname(mddev));
+ pr_warn("%s: Cannot add to clustered mddev.\n",
+ mdname(mddev));
return -EINVAL;
}
@@ -5998,8 +6026,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
/* expecting a device which has a superblock */
rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
if (IS_ERR(rdev)) {
- printk(KERN_WARNING
- "md: md_import_device returned %ld\n",
+ pr_warn("md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
@@ -6010,8 +6037,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0) {
- printk(KERN_WARNING
- "md: %s has different UUID to %s\n",
+ pr_warn("md: %s has different UUID to %s\n",
bdevname(rdev->bdev,b),
bdevname(rdev0->bdev,b2));
export_rdev(rdev);
@@ -6032,9 +6058,8 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
if (mddev->pers) {
int err;
if (!mddev->pers->hot_add_disk) {
- printk(KERN_WARNING
- "%s: personality does not support diskops!\n",
- mdname(mddev));
+ pr_warn("%s: personality does not support diskops!\n",
+ mdname(mddev));
return -EINVAL;
}
if (mddev->persistent)
@@ -6043,8 +6068,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev)) {
- printk(KERN_WARNING
- "md: md_import_device returned %ld\n",
+ pr_warn("md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
@@ -6075,6 +6099,10 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
set_bit(WriteMostly, &rdev->flags);
else
clear_bit(WriteMostly, &rdev->flags);
+ if (info->state & (1<<MD_DISK_FAILFAST))
+ set_bit(FailFast, &rdev->flags);
+ else
+ clear_bit(FailFast, &rdev->flags);
if (info->state & (1<<MD_DISK_JOURNAL)) {
struct md_rdev *rdev2;
@@ -6140,8 +6168,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
* for major_version==0 superblocks
*/
if (mddev->major_version != 0) {
- printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
- mdname(mddev));
+ pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
return -EINVAL;
}
@@ -6149,8 +6176,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
int err;
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
- printk(KERN_WARNING
- "md: error, md_import_device() returned %ld\n",
+ pr_warn("md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
@@ -6166,9 +6192,11 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
+ if (info->state & (1<<MD_DISK_FAILFAST))
+ set_bit(FailFast, &rdev->flags);
if (!mddev->persistent) {
- printk(KERN_INFO "md: nonpersistent superblock ...\n");
+ pr_debug("md: nonpersistent superblock ...\n");
rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
} else
rdev->sb_start = calc_dev_sboffset(rdev);
@@ -6207,13 +6235,17 @@ kick_rdev:
md_cluster_ops->remove_disk(mddev, rdev);
md_kick_rdev_from_array(rdev);
- md_update_sb(mddev, 1);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ if (mddev->thread)
+ md_wakeup_thread(mddev->thread);
+ else
+ md_update_sb(mddev, 1);
md_new_event(mddev);
return 0;
busy:
- printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
- bdevname(rdev->bdev,b), mdname(mddev));
+ pr_debug("md: cannot remove active disk %s from %s ...\n",
+ bdevname(rdev->bdev,b), mdname(mddev));
return -EBUSY;
}
@@ -6227,22 +6259,19 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
return -ENODEV;
if (mddev->major_version != 0) {
- printk(KERN_WARNING "%s: HOT_ADD may only be used with"
- " version-0 superblocks.\n",
+ pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
mdname(mddev));
return -EINVAL;
}
if (!mddev->pers->hot_add_disk) {
- printk(KERN_WARNING
- "%s: personality does not support diskops!\n",
+ pr_warn("%s: personality does not support diskops!\n",
mdname(mddev));
return -EINVAL;
}
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
- printk(KERN_WARNING
- "md: error, md_import_device() returned %ld\n",
+ pr_warn("md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return -EINVAL;
}
@@ -6255,8 +6284,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
rdev->sectors = rdev->sb_start;
if (test_bit(Faulty, &rdev->flags)) {
- printk(KERN_WARNING
- "md: can not hot-add faulty %s disk to %s!\n",
+ pr_warn("md: can not hot-add faulty %s disk to %s!\n",
bdevname(rdev->bdev,b), mdname(mddev));
err = -EINVAL;
goto abort_export;
@@ -6276,7 +6304,9 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
rdev->raid_disk = -1;
- md_update_sb(mddev, 1);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ if (!mddev->thread)
+ md_update_sb(mddev, 1);
/*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
@@ -6312,23 +6342,23 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
f = fget(fd);
if (f == NULL) {
- printk(KERN_ERR "%s: error: failed to get bitmap file\n",
- mdname(mddev));
+ pr_warn("%s: error: failed to get bitmap file\n",
+ mdname(mddev));
return -EBADF;
}
inode = f->f_mapping->host;
if (!S_ISREG(inode->i_mode)) {
- printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
- mdname(mddev));
+ pr_warn("%s: error: bitmap file must be a regular file\n",
+ mdname(mddev));
err = -EBADF;
} else if (!(f->f_mode & FMODE_WRITE)) {
- printk(KERN_ERR "%s: error: bitmap file must open for write\n",
- mdname(mddev));
+ pr_warn("%s: error: bitmap file must open for write\n",
+ mdname(mddev));
err = -EBADF;
} else if (atomic_read(&inode->i_writecount) != 1) {
- printk(KERN_ERR "%s: error: bitmap file is already in use\n",
- mdname(mddev));
+ pr_warn("%s: error: bitmap file is already in use\n",
+ mdname(mddev));
err = -EBUSY;
}
if (err) {
@@ -6393,8 +6423,7 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
info->major_version >= ARRAY_SIZE(super_types) ||
super_types[info->major_version].name == NULL) {
/* maybe try to auto-load a module? */
- printk(KERN_INFO
- "md: superblock version %d not known\n",
+ pr_warn("md: superblock version %d not known\n",
info->major_version);
return -EINVAL;
}
@@ -6432,9 +6461,11 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->max_disks = MD_SB_DISKS;
- if (mddev->persistent)
+ if (mddev->persistent) {
mddev->flags = 0;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ mddev->sb_flags = 0;
+ }
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
@@ -6660,8 +6691,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
if (mddev->bitmap_info.nodes) {
/* hold PW on all the bitmap lock */
if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
- printk("md: can't change bitmap to none since the"
- " array is in use by more than one node\n");
+ pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
rv = -EPERM;
md_cluster_ops->unlock_all_bitmaps(mddev);
goto err;
@@ -6829,7 +6859,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
/* need to ensure recovery thread has run */
wait_event_interruptible_timeout(mddev->sb_wait,
!test_bit(MD_RECOVERY_NEEDED,
- &mddev->flags),
+ &mddev->recovery),
msecs_to_jiffies(5000));
if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
/* Need to flush page cache, and ensure no-one else opens
@@ -6847,9 +6877,8 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
}
err = mddev_lock(mddev);
if (err) {
- printk(KERN_INFO
- "md: ioctl lock interrupted, reason %d, cmd %d\n",
- err, cmd);
+ pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
+ err, cmd);
goto out;
}
@@ -6864,30 +6893,24 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
if (mddev->pers) {
err = update_array_info(mddev, &info);
if (err) {
- printk(KERN_WARNING "md: couldn't update"
- " array info. %d\n", err);
+ pr_warn("md: couldn't update array info. %d\n", err);
goto unlock;
}
goto unlock;
}
if (!list_empty(&mddev->disks)) {
- printk(KERN_WARNING
- "md: array %s already has disks!\n",
- mdname(mddev));
+ pr_warn("md: array %s already has disks!\n", mdname(mddev));
err = -EBUSY;
goto unlock;
}
if (mddev->raid_disks) {
- printk(KERN_WARNING
- "md: array %s already initialised!\n",
- mdname(mddev));
+ pr_warn("md: array %s already initialised!\n", mdname(mddev));
err = -EBUSY;
goto unlock;
}
err = set_array_info(mddev, &info);
if (err) {
- printk(KERN_WARNING "md: couldn't set"
- " array info. %d\n", err);
+ pr_warn("md: couldn't set array info. %d\n", err);
goto unlock;
}
goto unlock;
@@ -6987,11 +7010,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
/* If a device failed while we were read-only, we
* need to make sure the metadata is updated now.
*/
- if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
+ if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
mddev_unlock(mddev);
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
- !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+ !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
mddev_lock_nointr(mddev);
}
} else {
@@ -7092,7 +7115,8 @@ static int md_open(struct block_device *bdev, fmode_t mode)
if (test_bit(MD_CLOSING, &mddev->flags)) {
mutex_unlock(&mddev->open_mutex);
- return -ENODEV;
+ err = -ENODEV;
+ goto out;
}
err = 0;
@@ -7101,6 +7125,8 @@ static int md_open(struct block_device *bdev, fmode_t mode)
check_disk_change(bdev);
out:
+ if (err)
+ mddev_put(mddev);
return err;
}
@@ -7171,10 +7197,12 @@ static int md_thread(void *arg)
wait_event_interruptible_timeout
(thread->wqueue,
test_bit(THREAD_WAKEUP, &thread->flags)
- || kthread_should_stop(),
+ || kthread_should_stop() || kthread_should_park(),
thread->timeout);
clear_bit(THREAD_WAKEUP, &thread->flags);
+ if (kthread_should_park())
+ kthread_parkme();
if (!kthread_should_stop())
thread->run(thread);
}
@@ -7588,8 +7616,8 @@ static const struct file_operations md_seq_fops = {
int register_md_personality(struct md_personality *p)
{
- printk(KERN_INFO "md: %s personality registered for level %d\n",
- p->name, p->level);
+ pr_debug("md: %s personality registered for level %d\n",
+ p->name, p->level);
spin_lock(&pers_lock);
list_add_tail(&p->list, &pers_list);
spin_unlock(&pers_lock);
@@ -7599,7 +7627,7 @@ EXPORT_SYMBOL(register_md_personality);
int unregister_md_personality(struct md_personality *p)
{
- printk(KERN_INFO "md: %s personality unregistered\n", p->name);
+ pr_debug("md: %s personality unregistered\n", p->name);
spin_lock(&pers_lock);
list_del_init(&p->list);
spin_unlock(&pers_lock);
@@ -7639,7 +7667,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
spin_lock(&pers_lock);
/* ensure module won't be unloaded */
if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
- pr_err("can't find md-cluster module or get it's reference.\n");
+ pr_warn("can't find md-cluster module or get it's reference.\n");
spin_unlock(&pers_lock);
return -ENOENT;
}
@@ -7741,8 +7769,8 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
spin_lock(&mddev->lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+ set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
did_change = 1;
}
@@ -7751,7 +7779,7 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
}
EXPORT_SYMBOL(md_write_start);
@@ -7772,7 +7800,7 @@ EXPORT_SYMBOL(md_write_end);
* attempting a GFP_KERNEL allocation while holding the mddev lock.
* Must be called with mddev_lock held.
*
- * In the ->external case MD_CHANGE_PENDING can not be cleared until mddev->lock
+ * In the ->external case MD_SB_CHANGE_PENDING can not be cleared until mddev->lock
* is dropped, so return -EAGAIN after notifying userspace.
*/
int md_allow_write(struct mddev *mddev)
@@ -7787,8 +7815,8 @@ int md_allow_write(struct mddev *mddev)
spin_lock(&mddev->lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+ set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
if (mddev->safemode_delay &&
mddev->safemode == 0)
mddev->safemode = 1;
@@ -7798,7 +7826,7 @@ int md_allow_write(struct mddev *mddev)
} else
spin_unlock(&mddev->lock);
- if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
return -EAGAIN;
else
return 0;
@@ -7914,11 +7942,9 @@ void md_do_sync(struct md_thread *thread)
mddev2->curr_resync >= mddev->curr_resync) {
if (mddev2_minor != mddev2->md_minor) {
mddev2_minor = mddev2->md_minor;
- printk(KERN_INFO "md: delaying %s of %s"
- " until %s has finished (they"
- " share one or more physical units)\n",
- desc, mdname(mddev),
- mdname(mddev2));
+ pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
+ desc, mdname(mddev),
+ mdname(mddev2));
}
mddev_put(mddev2);
if (signal_pending(current))
@@ -7975,12 +8001,10 @@ void md_do_sync(struct md_thread *thread)
}
}
- printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
- printk(KERN_INFO "md: minimum _guaranteed_ speed:"
- " %d KB/sec/disk.\n", speed_min(mddev));
- printk(KERN_INFO "md: using maximum available idle IO bandwidth "
- "(but not more than %d KB/sec) for %s.\n",
- speed_max(mddev), desc);
+ pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
+ pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev));
+ pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
+ speed_max(mddev), desc);
is_mddev_idle(mddev, 1); /* this initializes IO event counters */
@@ -7997,16 +8021,15 @@ void md_do_sync(struct md_thread *thread)
* Tune reconstruction:
*/
window = 32*(PAGE_SIZE/512);
- printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
- window/2, (unsigned long long)max_sectors/2);
+ pr_debug("md: using %dk window, over a total of %lluk.\n",
+ window/2, (unsigned long long)max_sectors/2);
atomic_set(&mddev->recovery_active, 0);
last_check = 0;
if (j>2) {
- printk(KERN_INFO
- "md: resuming %s of %s from checkpoint.\n",
- desc, mdname(mddev));
+ pr_debug("md: resuming %s of %s from checkpoint.\n",
+ desc, mdname(mddev));
mddev->curr_resync = j;
} else
mddev->curr_resync = 3; /* no longer delayed */
@@ -8038,7 +8061,7 @@ void md_do_sync(struct md_thread *thread)
j > mddev->recovery_cp)
mddev->recovery_cp = j;
update_time = jiffies;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
@@ -8133,9 +8156,9 @@ void md_do_sync(struct md_thread *thread)
}
}
}
- printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
- test_bit(MD_RECOVERY_INTR, &mddev->recovery)
- ? "interrupted" : "done");
+ pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
+ test_bit(MD_RECOVERY_INTR, &mddev->recovery)
+ ? "interrupted" : "done");
/*
* this also signals 'finished resyncing' to md_stop
*/
@@ -8155,9 +8178,8 @@ void md_do_sync(struct md_thread *thread)
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) {
- printk(KERN_INFO
- "md: checkpointing %s of %s.\n",
- desc, mdname(mddev));
+ pr_debug("md: checkpointing %s of %s.\n",
+ desc, mdname(mddev));
if (test_bit(MD_RECOVERY_ERROR,
&mddev->recovery))
mddev->recovery_cp =
@@ -8187,8 +8209,8 @@ void md_do_sync(struct md_thread *thread)
/* set CHANGE_PENDING here since maybe another update is needed,
* so other nodes are informed. It should be harmless for normal
* raid */
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
spin_lock(&mddev->lock);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
@@ -8288,12 +8310,12 @@ static int remove_and_add_spares(struct mddev *mddev,
if (!test_bit(Journal, &rdev->flags))
spares++;
md_new_event(mddev);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
}
}
no_add:
if (removed)
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
return spares;
}
@@ -8305,8 +8327,8 @@ static void md_start_sync(struct work_struct *ws)
mddev,
"resync");
if (!mddev->sync_thread) {
- printk(KERN_ERR "%s: could not start resync thread...\n",
- mdname(mddev));
+ pr_warn("%s: could not start resync thread...\n",
+ mdname(mddev));
/* leave the spares where they are, it shouldn't hurt */
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -8356,8 +8378,8 @@ void md_check_recovery(struct mddev *mddev)
if (signal_pending(current)) {
if (mddev->pers->sync_request && !mddev->external) {
- printk(KERN_INFO "md: %s in immediate safe mode\n",
- mdname(mddev));
+ pr_debug("md: %s in immediate safe mode\n",
+ mdname(mddev));
mddev->safemode = 2;
}
flush_signals(current);
@@ -8366,7 +8388,7 @@ void md_check_recovery(struct mddev *mddev)
if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return;
if ( ! (
- (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
+ (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
test_bit(MD_RELOAD_SB, &mddev->flags) ||
@@ -8404,7 +8426,7 @@ void md_check_recovery(struct mddev *mddev)
md_reap_sync_thread(mddev);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
goto unlock;
}
@@ -8432,7 +8454,7 @@ void md_check_recovery(struct mddev *mddev)
mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
did_change = 1;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
if (mddev->safemode == 1)
mddev->safemode = 0;
@@ -8441,7 +8463,7 @@ void md_check_recovery(struct mddev *mddev)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
- if (mddev->flags & MD_UPDATE_SB_FLAGS)
+ if (mddev->sb_flags)
md_update_sb(mddev, 0);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
@@ -8537,7 +8559,7 @@ void md_reap_sync_thread(struct mddev *mddev)
if (mddev->pers->spare_active(mddev)) {
sysfs_notify(&mddev->kobj, NULL,
"degraded");
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
}
}
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
@@ -8552,7 +8574,7 @@ void md_reap_sync_thread(struct mddev *mddev)
rdev->saved_raid_disk = -1;
md_update_sb(mddev, 1);
- /* MD_CHANGE_PENDING should be cleared by md_update_sb, so we can
+ /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
* call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
* clustered raid */
if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
@@ -8614,9 +8636,12 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
if (rv == 0) {
/* Make sure they get written out promptly */
+ if (test_bit(ExternalBbl, &rdev->flags))
+ sysfs_notify(&rdev->kobj, NULL,
+ "unacknowledged_bad_blocks");
sysfs_notify_dirent_safe(rdev->sysfs_state);
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_CLEAN) | BIT(MD_CHANGE_PENDING));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
md_wakeup_thread(rdev->mddev->thread);
return 1;
} else
@@ -8627,12 +8652,15 @@ EXPORT_SYMBOL_GPL(rdev_set_badblocks);
int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new)
{
+ int rv;
if (is_new)
s += rdev->new_data_offset;
else
s += rdev->data_offset;
- return badblocks_clear(&rdev->badblocks,
- s, sectors);
+ rv = badblocks_clear(&rdev->badblocks, s, sectors);
+ if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
+ sysfs_notify(&rdev->kobj, NULL, "bad_blocks");
+ return rv;
}
EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
@@ -8749,7 +8777,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
rdev2->saved_raid_disk = role;
ret = remove_and_add_spares(mddev, rdev2);
pr_info("Activated spare: %s\n",
- bdevname(rdev2->bdev,b));
+ bdevname(rdev2->bdev,b));
/* wakeup mddev->thread here, so array could
* perform resync with the new activated disk */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -8785,15 +8813,18 @@ static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
* variable in case we err in the future
*/
rdev->sb_page = NULL;
- alloc_disk_sb(rdev);
- ClearPageUptodate(rdev->sb_page);
- rdev->sb_loaded = 0;
- err = super_types[mddev->major_version].load_super(rdev, NULL, mddev->minor_version);
-
+ err = alloc_disk_sb(rdev);
+ if (err == 0) {
+ ClearPageUptodate(rdev->sb_page);
+ rdev->sb_loaded = 0;
+ err = super_types[mddev->major_version].
+ load_super(rdev, NULL, mddev->minor_version);
+ }
if (err < 0) {
pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
__func__, __LINE__, rdev->desc_nr, err);
- put_page(rdev->sb_page);
+ if (rdev->sb_page)
+ put_page(rdev->sb_page);
rdev->sb_page = swapout;
rdev->sb_loaded = 1;
return err;
@@ -8871,9 +8902,6 @@ void md_autodetect_dev(dev_t dev)
mutex_lock(&detected_devices_mutex);
list_add_tail(&node_detected_dev->list, &all_detected_devices);
mutex_unlock(&detected_devices_mutex);
- } else {
- printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
- ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
}
}
@@ -8887,7 +8915,7 @@ static void autostart_arrays(int part)
i_scanned = 0;
i_passed = 0;
- printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
+ pr_info("md: Autodetecting RAID arrays.\n");
mutex_lock(&detected_devices_mutex);
while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
@@ -8912,8 +8940,7 @@ static void autostart_arrays(int part)
}
mutex_unlock(&detected_devices_mutex);
- printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
- i_scanned, i_passed);
+ pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
autorun_devices(part);
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 2b2041773e79..e38936d05df1 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -30,6 +30,16 @@
#define MaxSector (~(sector_t)0)
/*
+ * These flags should really be called "NO_RETRY" rather than
+ * "FAILFAST" because they don't make any promise about time lapse,
+ * only about the number of retries, which will be zero.
+ * REQ_FAILFAST_DRIVER is not included because
+ * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.")
+ * seems to suggest that the errors it avoids retrying should usually
+ * be retried.
+ */
+#define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
+/*
* MD's 'extended' device
*/
struct md_rdev {
@@ -168,6 +178,19 @@ enum flag_bits {
* so it is safe to remove without
* another synchronize_rcu() call.
*/
+ ExternalBbl, /* External metadata provides bad
+ * block management for a disk
+ */
+ FailFast, /* Minimal retries should be attempted on
+ * this device, so use REQ_FAILFAST_DEV.
+ * Also don't try to repair failed reads.
+ * It is expects that no bad block log
+ * is present.
+ */
+ LastDev, /* Seems to be the last working dev as
+ * it didn't fail, so don't use FailFast
+ * any more for metadata
+ */
};
static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
@@ -189,6 +212,31 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new);
struct md_cluster_info;
+enum mddev_flags {
+ MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */
+ MD_CLOSING, /* If set, we are closing the array, do not open
+ * it then */
+ MD_JOURNAL_CLEAN, /* A raid with journal is already clean */
+ MD_HAS_JOURNAL, /* The raid array has journal feature set */
+ MD_RELOAD_SB, /* Reload the superblock because another node
+ * updated it.
+ */
+ MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node
+ * already took resync lock, need to
+ * release the lock */
+ MD_FAILFAST_SUPPORTED, /* Using MD_FAILFAST on metadata writes is
+ * supported as calls to md_error() will
+ * never cause the array to become failed.
+ */
+};
+
+enum mddev_sb_flags {
+ MD_SB_CHANGE_DEVS, /* Some device status has changed */
+ MD_SB_CHANGE_CLEAN, /* transition to or from 'clean' */
+ MD_SB_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */
+ MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
+};
+
struct mddev {
void *private;
struct md_personality *pers;
@@ -196,21 +244,7 @@ struct mddev {
int md_minor;
struct list_head disks;
unsigned long flags;
-#define MD_CHANGE_DEVS 0 /* Some device status has changed */
-#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
-#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
-#define MD_UPDATE_SB_FLAGS (1 | 2 | 4) /* If these are set, md_update_sb needed */
-#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
-#define MD_CLOSING 4 /* If set, we are closing the array, do not open
- * it then */
-#define MD_JOURNAL_CLEAN 5 /* A raid with journal is already clean */
-#define MD_HAS_JOURNAL 6 /* The raid array has journal feature set */
-#define MD_RELOAD_SB 7 /* Reload the superblock because another node
- * updated it.
- */
-#define MD_CLUSTER_RESYNC_LOCKED 8 /* cluster raid only, which means node
- * already took resync lock, need to
- * release the lock */
+ unsigned long sb_flags;
int suspended;
atomic_t active_io;
@@ -304,31 +338,6 @@ struct mddev {
int parallel_resync;
int ok_start_degraded;
- /* recovery/resync flags
- * NEEDED: we might need to start a resync/recover
- * RUNNING: a thread is running, or about to be started
- * SYNC: actually doing a resync, not a recovery
- * RECOVER: doing recovery, or need to try it.
- * INTR: resync needs to be aborted for some reason
- * DONE: thread is done and is waiting to be reaped
- * REQUEST: user-space has requested a sync (used with SYNC)
- * CHECK: user-space request for check-only, no repair
- * RESHAPE: A reshape is happening
- * ERROR: sync-action interrupted because io-error
- *
- * If neither SYNC or RESHAPE are set, then it is a recovery.
- */
-#define MD_RECOVERY_RUNNING 0
-#define MD_RECOVERY_SYNC 1
-#define MD_RECOVERY_RECOVER 2
-#define MD_RECOVERY_INTR 3
-#define MD_RECOVERY_DONE 4
-#define MD_RECOVERY_NEEDED 5
-#define MD_RECOVERY_REQUESTED 6
-#define MD_RECOVERY_CHECK 7
-#define MD_RECOVERY_RESHAPE 8
-#define MD_RECOVERY_FROZEN 9
-#define MD_RECOVERY_ERROR 10
unsigned long recovery;
/* If a RAID personality determines that recovery (of a particular
@@ -442,6 +451,23 @@ struct mddev {
unsigned int good_device_nr; /* good device num within cluster raid */
};
+enum recovery_flags {
+ /*
+ * If neither SYNC or RESHAPE are set, then it is a recovery.
+ */
+ MD_RECOVERY_RUNNING, /* a thread is running, or about to be started */
+ MD_RECOVERY_SYNC, /* actually doing a resync, not a recovery */
+ MD_RECOVERY_RECOVER, /* doing recovery, or need to try it. */
+ MD_RECOVERY_INTR, /* resync needs to be aborted for some reason */
+ MD_RECOVERY_DONE, /* thread is done and is waiting to be reaped */
+ MD_RECOVERY_NEEDED, /* we might need to start a resync/recover */
+ MD_RECOVERY_REQUESTED, /* user-space has requested a sync (used with SYNC) */
+ MD_RECOVERY_CHECK, /* user-space request for check-only, no repair */
+ MD_RECOVERY_RESHAPE, /* A reshape is happening */
+ MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
+ MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
+};
+
static inline int __must_check mddev_lock(struct mddev *mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
@@ -623,7 +649,7 @@ extern int mddev_congested(struct mddev *mddev, int bits);
extern void md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page);
-extern void md_super_wait(struct mddev *mddev);
+extern int md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int op, int op_flags,
bool metadata_op);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 4da06d813b8f..aa8c4e5c1ee2 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -52,7 +52,7 @@ static int multipath_map (struct mpconf *conf)
}
rcu_read_unlock();
- printk(KERN_ERR "multipath_map(): no more operational IO paths?\n");
+ pr_crit_ratelimited("multipath_map(): no more operational IO paths?\n");
return (-1);
}
@@ -97,9 +97,9 @@ static void multipath_end_request(struct bio *bio)
*/
char b[BDEVNAME_SIZE];
md_error (mp_bh->mddev, rdev);
- printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
- bdevname(rdev->bdev,b),
- (unsigned long long)bio->bi_iter.bi_sector);
+ pr_info("multipath: %s: rescheduling sector %llu\n",
+ bdevname(rdev->bdev,b),
+ (unsigned long long)bio->bi_iter.bi_sector);
multipath_reschedule_retry(mp_bh);
} else
multipath_end_bh_io(mp_bh, bio->bi_error);
@@ -194,8 +194,7 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
* first check if this is a queued request for a device
* which has just failed.
*/
- printk(KERN_ALERT
- "multipath: only one IO path left and IO error.\n");
+ pr_warn("multipath: only one IO path left and IO error.\n");
/* leave it active... it's all we have */
return;
}
@@ -209,11 +208,9 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
spin_unlock_irqrestore(&conf->device_lock, flags);
}
set_bit(Faulty, &rdev->flags);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
- printk(KERN_ALERT "multipath: IO failure on %s,"
- " disabling IO path.\n"
- "multipath: Operation continuing"
- " on %d IO paths.\n",
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ pr_err("multipath: IO failure on %s, disabling IO path.\n"
+ "multipath: Operation continuing on %d IO paths.\n",
bdevname(rdev->bdev, b),
conf->raid_disks - mddev->degraded);
}
@@ -223,21 +220,21 @@ static void print_multipath_conf (struct mpconf *conf)
int i;
struct multipath_info *tmp;
- printk("MULTIPATH conf printout:\n");
+ pr_debug("MULTIPATH conf printout:\n");
if (!conf) {
- printk("(conf==NULL)\n");
+ pr_debug("(conf==NULL)\n");
return;
}
- printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
- conf->raid_disks);
+ pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+ conf->raid_disks);
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
tmp = conf->multipaths + i;
if (tmp->rdev)
- printk(" disk%d, o:%d, dev:%s\n",
- i,!test_bit(Faulty, &tmp->rdev->flags),
- bdevname(tmp->rdev->bdev,b));
+ pr_debug(" disk%d, o:%d, dev:%s\n",
+ i,!test_bit(Faulty, &tmp->rdev->flags),
+ bdevname(tmp->rdev->bdev,b));
}
}
@@ -292,8 +289,7 @@ static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev == p->rdev) {
if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) {
- printk(KERN_ERR "hot-remove-disk, slot %d is identified"
- " but is still operational!\n", number);
+ pr_warn("hot-remove-disk, slot %d is identified but is still operational!\n", number);
err = -EBUSY;
goto abort;
}
@@ -346,16 +342,14 @@ static void multipathd(struct md_thread *thread)
bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
if ((mp_bh->path = multipath_map (conf))<0) {
- printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
- " error for block %llu\n",
- bdevname(bio->bi_bdev,b),
- (unsigned long long)bio->bi_iter.bi_sector);
+ pr_err("multipath: %s: unrecoverable IO read error for block %llu\n",
+ bdevname(bio->bi_bdev,b),
+ (unsigned long long)bio->bi_iter.bi_sector);
multipath_end_bh_io(mp_bh, -EIO);
} else {
- printk(KERN_ERR "multipath: %s: redirecting sector %llu"
- " to another IO path\n",
- bdevname(bio->bi_bdev,b),
- (unsigned long long)bio->bi_iter.bi_sector);
+ pr_err("multipath: %s: redirecting sector %llu to another IO path\n",
+ bdevname(bio->bi_bdev,b),
+ (unsigned long long)bio->bi_iter.bi_sector);
*bio = *(mp_bh->master_bio);
bio->bi_iter.bi_sector +=
conf->multipaths[mp_bh->path].rdev->data_offset;
@@ -389,8 +383,8 @@ static int multipath_run (struct mddev *mddev)
return -EINVAL;
if (mddev->level != LEVEL_MULTIPATH) {
- printk("multipath: %s: raid level not set to multipath IO (%d)\n",
- mdname(mddev), mddev->level);
+ pr_warn("multipath: %s: raid level not set to multipath IO (%d)\n",
+ mdname(mddev), mddev->level);
goto out;
}
/*
@@ -401,21 +395,13 @@ static int multipath_run (struct mddev *mddev)
conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL);
mddev->private = conf;
- if (!conf) {
- printk(KERN_ERR
- "multipath: couldn't allocate memory for %s\n",
- mdname(mddev));
+ if (!conf)
goto out;
- }
conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks,
GFP_KERNEL);
- if (!conf->multipaths) {
- printk(KERN_ERR
- "multipath: couldn't allocate memory for %s\n",
- mdname(mddev));
+ if (!conf->multipaths)
goto out_free_conf;
- }
working_disks = 0;
rdev_for_each(rdev, mddev) {
@@ -439,7 +425,7 @@ static int multipath_run (struct mddev *mddev)
INIT_LIST_HEAD(&conf->retry_list);
if (!working_disks) {
- printk(KERN_ERR "multipath: no operational IO paths for %s\n",
+ pr_warn("multipath: no operational IO paths for %s\n",
mdname(mddev));
goto out_free_conf;
}
@@ -447,27 +433,17 @@ static int multipath_run (struct mddev *mddev)
conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS,
sizeof(struct multipath_bh));
- if (conf->pool == NULL) {
- printk(KERN_ERR
- "multipath: couldn't allocate memory for %s\n",
- mdname(mddev));
+ if (conf->pool == NULL)
goto out_free_conf;
- }
- {
- mddev->thread = md_register_thread(multipathd, mddev,
- "multipath");
- if (!mddev->thread) {
- printk(KERN_ERR "multipath: couldn't allocate thread"
- " for %s\n", mdname(mddev));
- goto out_free_conf;
- }
- }
+ mddev->thread = md_register_thread(multipathd, mddev,
+ "multipath");
+ if (!mddev->thread)
+ goto out_free_conf;
- printk(KERN_INFO
- "multipath: array %s active with %d out of %d IO paths\n",
+ pr_info("multipath: array %s active with %d out of %d IO paths\n",
mdname(mddev), conf->raid_disks - mddev->degraded,
- mddev->raid_disks);
+ mddev->raid_disks);
/*
* Ok, everything is just fine now
*/
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index e83047cbb2da..7938cd21fa4c 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -700,13 +700,11 @@ static int populate_ablock_with_values(struct dm_array_info *info, struct array_
{
int r;
unsigned i;
- uint32_t nr_entries;
struct dm_btree_value_type *vt = &info->value_type;
BUG_ON(le32_to_cpu(ab->nr_entries));
BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
- nr_entries = le32_to_cpu(ab->nr_entries);
for (i = 0; i < new_nr; i++) {
r = fn(base + i, element_at(info, ab, i), context);
if (r)
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 1e33dd51c21f..a6dde7cab458 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -18,6 +18,8 @@
/*----------------------------------------------------------------*/
+#ifdef CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING
+
/*
* This is a read/write semaphore with a couple of differences.
*
@@ -302,6 +304,18 @@ static void report_recursive_bug(dm_block_t b, int r)
(unsigned long long) b);
}
+#else /* !CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING */
+
+#define bl_init(x) do { } while (0)
+#define bl_down_read(x) 0
+#define bl_down_read_nonblock(x) 0
+#define bl_up_read(x) do { } while (0)
+#define bl_down_write(x) 0
+#define bl_up_write(x) do { } while (0)
+#define report_recursive_bug(x, y) do { } while (0)
+
+#endif /* CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING */
+
/*----------------------------------------------------------------*/
/*
@@ -330,8 +344,11 @@ EXPORT_SYMBOL_GPL(dm_block_data);
struct buffer_aux {
struct dm_block_validator *validator;
- struct block_lock lock;
int write_locked;
+
+#ifdef CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING
+ struct block_lock lock;
+#endif
};
static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 306d2e4502c4..4c28608a0c94 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -464,7 +464,8 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
ll->nr_allocated--;
le32_add_cpu(&ie_disk.nr_free, 1);
ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit));
- }
+ } else
+ *ev = SM_NONE;
return ll->save_ie(ll, index, &ie_disk);
}
@@ -547,7 +548,6 @@ static int metadata_ll_init_index(struct ll_disk *ll)
if (r < 0)
return r;
- memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
ll->bitmap_root = dm_block_location(b);
dm_tm_unlock(ll->tm, b);
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 7e44005595c1..20557e2c60c6 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -775,17 +775,15 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
r = sm_ll_new_metadata(&smm->ll, tm);
+ if (!r) {
+ if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
+ nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
+ r = sm_ll_extend(&smm->ll, nr_blocks);
+ }
+ memcpy(&smm->sm, &ops, sizeof(smm->sm));
if (r)
return r;
- if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
- nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
- r = sm_ll_extend(&smm->ll, nr_blocks);
- if (r)
- return r;
-
- memcpy(&smm->sm, &ops, sizeof(smm->sm));
-
/*
* Now we need to update the newly created data structures with the
* allocated blocks that they were built from.
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 258986a2699d..a162fedeb51a 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -21,6 +21,7 @@
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <trace/events/block.h>
#include "md.h"
#include "raid0.h"
#include "raid5.h"
@@ -51,20 +52,21 @@ static void dump_zones(struct mddev *mddev)
char b[BDEVNAME_SIZE];
struct r0conf *conf = mddev->private;
int raid_disks = conf->strip_zone[0].nb_dev;
- printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n",
- mdname(mddev),
- conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
+ pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
+ mdname(mddev),
+ conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
for (j = 0; j < conf->nr_strip_zones; j++) {
- printk(KERN_INFO "md: zone%d=[", j);
+ char line[200];
+ int len = 0;
+
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
- printk(KERN_CONT "%s%s", k?"/":"",
- bdevname(conf->devlist[j*raid_disks
- + k]->bdev, b));
- printk(KERN_CONT "]\n");
+ len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
+ bdevname(conf->devlist[j*raid_disks
+ + k]->bdev, b));
+ pr_debug("md: zone%d=[%s]\n", j, line);
zone_size = conf->strip_zone[j].zone_end - zone_start;
- printk(KERN_INFO " zone-offset=%10lluKB, "
- "device-offset=%10lluKB, size=%10lluKB\n",
+ pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
(unsigned long long)zone_start>>1,
(unsigned long long)conf->strip_zone[j].dev_start>>1,
(unsigned long long)zone_size>>1);
@@ -142,9 +144,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
* chunk size is a multiple of that sector size
*/
if ((mddev->chunk_sectors << 9) % blksize) {
- printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
- mdname(mddev),
- mddev->chunk_sectors << 9, blksize);
+ pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
+ mdname(mddev),
+ mddev->chunk_sectors << 9, blksize);
err = -EINVAL;
goto abort;
}
@@ -186,19 +188,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
}
if (j < 0) {
- printk(KERN_ERR
- "md/raid0:%s: remove inactive devices before converting to RAID0\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
+ mdname(mddev));
goto abort;
}
if (j >= mddev->raid_disks) {
- printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
- "aborting!\n", mdname(mddev), j);
+ pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
+ mdname(mddev), j);
goto abort;
}
if (dev[j]) {
- printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
- "aborting!\n", mdname(mddev), j);
+ pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
+ mdname(mddev), j);
goto abort;
}
dev[j] = rdev1;
@@ -208,8 +209,8 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
cnt++;
}
if (cnt != mddev->raid_disks) {
- printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
- "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
+ pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
+ mdname(mddev), cnt, mddev->raid_disks);
goto abort;
}
zone->nb_dev = cnt;
@@ -357,8 +358,7 @@ static int raid0_run(struct mddev *mddev)
int ret;
if (mddev->chunk_sectors == 0) {
- printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
return -EINVAL;
}
if (md_check_no_bitmap(mddev))
@@ -399,9 +399,9 @@ static int raid0_run(struct mddev *mddev)
/* calculate array device size */
md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
- printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
- mdname(mddev),
- (unsigned long long)mddev->array_sectors);
+ pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
+ mdname(mddev),
+ (unsigned long long)mddev->array_sectors);
if (mddev->queue) {
/* calculate the max read-ahead size.
@@ -464,7 +464,8 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
}
do {
- sector_t sector = bio->bi_iter.bi_sector;
+ sector_t bio_sector = bio->bi_iter.bi_sector;
+ sector_t sector = bio_sector;
unsigned chunk_sects = mddev->chunk_sectors;
unsigned sectors = chunk_sects -
@@ -473,7 +474,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
: sector_div(sector, chunk_sects));
/* Restore due to sector_div */
- sector = bio->bi_iter.bi_sector;
+ sector = bio_sector;
if (sectors < bio_sectors(bio)) {
split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
@@ -492,8 +493,13 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */
bio_endio(split);
- } else
+ } else {
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(split->bi_bdev),
+ split, disk_devt(mddev->gendisk),
+ bio_sector);
generic_make_request(split);
+ }
} while (split != bio);
}
@@ -509,17 +515,17 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
struct r0conf *priv_conf;
if (mddev->degraded != 1) {
- printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
- mdname(mddev),
- mddev->degraded);
+ pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
+ mdname(mddev),
+ mddev->degraded);
return ERR_PTR(-EINVAL);
}
rdev_for_each(rdev, mddev) {
/* check slot number for a disk */
if (rdev->raid_disk == mddev->raid_disks-1) {
- printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
rdev->sectors = mddev->dev_sectors;
@@ -533,8 +539,11 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
mddev->delta_disks = -1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
+ clear_bit(MD_HAS_JOURNAL, &mddev->flags);
+ clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
create_strip_zones(mddev, &priv_conf);
+
return priv_conf;
}
@@ -549,19 +558,19 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
* - all mirrors must be already degraded
*/
if (mddev->layout != ((1 << 8) + 2)) {
- printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
- mdname(mddev),
- mddev->layout);
+ pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
+ mdname(mddev),
+ mddev->layout);
return ERR_PTR(-EINVAL);
}
if (mddev->raid_disks & 1) {
- printk(KERN_ERR "md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
if (mddev->degraded != (mddev->raid_disks>>1)) {
- printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
@@ -574,6 +583,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
mddev->degraded = 0;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
+ clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
create_strip_zones(mddev, &priv_conf);
return priv_conf;
@@ -588,7 +598,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
* - (N - 1) mirror drives must be already faulty
*/
if ((mddev->raid_disks - 1) != mddev->degraded) {
- printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
+ pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
mdname(mddev));
return ERR_PTR(-EINVAL);
}
@@ -616,6 +626,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
mddev->raid_disks = 1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
+ clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
create_strip_zones(mddev, &priv_conf);
return priv_conf;
@@ -631,8 +642,8 @@ static void *raid0_takeover(struct mddev *mddev)
*/
if (mddev->bitmap) {
- printk(KERN_ERR "md/raid0: %s: cannot takeover array with bitmap\n",
- mdname(mddev));
+ pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
+ mdname(mddev));
return ERR_PTR(-EBUSY);
}
if (mddev->level == 4)
@@ -642,8 +653,8 @@ static void *raid0_takeover(struct mddev *mddev)
if (mddev->layout == ALGORITHM_PARITY_N)
return raid0_takeover_raid45(mddev);
- printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
- mdname(mddev), ALGORITHM_PARITY_N);
+ pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
+ mdname(mddev), ALGORITHM_PARITY_N);
}
if (mddev->level == 10)
@@ -652,7 +663,7 @@ static void *raid0_takeover(struct mddev *mddev)
if (mddev->level == 1)
return raid0_takeover_raid1(mddev);
- printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
+ pr_warn("Takeover from raid%i to raid0 not supported\n",
mddev->level);
return ERR_PTR(-EINVAL);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 29e2df5cd77b..a1f3fbed9100 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -37,6 +37,7 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
+#include <trace/events/block.h>
#include "md.h"
#include "raid1.h"
#include "bitmap.h"
@@ -70,6 +71,9 @@ static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
sector_t bi_sector);
static void lower_barrier(struct r1conf *conf);
+#define raid1_log(md, fmt, args...) \
+ do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
+
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
{
struct pool_info *pi = data;
@@ -325,6 +329,11 @@ static void raid1_end_read_request(struct bio *bio)
if (uptodate)
set_bit(R1BIO_Uptodate, &r1_bio->state);
+ else if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R1BIO_FailFast, &r1_bio->state))
+ /* This was a fail-fast read so we definitely
+ * want to retry */
+ ;
else {
/* If all other devices have failed, we want to return
* the error upwards rather than fail the last device.
@@ -347,13 +356,10 @@ static void raid1_end_read_request(struct bio *bio)
* oops, read error:
*/
char b[BDEVNAME_SIZE];
- printk_ratelimited(
- KERN_ERR "md/raid1:%s: %s: "
- "rescheduling sector %llu\n",
- mdname(conf->mddev),
- bdevname(rdev->bdev,
- b),
- (unsigned long long)r1_bio->sector);
+ pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
+ mdname(conf->mddev),
+ bdevname(rdev->bdev, b),
+ (unsigned long long)r1_bio->sector);
set_bit(R1BIO_ReadError, &r1_bio->state);
reschedule_retry(r1_bio);
/* don't drop the reference on read_disk yet */
@@ -416,7 +422,24 @@ static void raid1_end_write_request(struct bio *bio)
set_bit(MD_RECOVERY_NEEDED, &
conf->mddev->recovery);
- set_bit(R1BIO_WriteError, &r1_bio->state);
+ if (test_bit(FailFast, &rdev->flags) &&
+ (bio->bi_opf & MD_FAILFAST) &&
+ /* We never try FailFast to WriteMostly devices */
+ !test_bit(WriteMostly, &rdev->flags)) {
+ md_error(r1_bio->mddev, rdev);
+ if (!test_bit(Faulty, &rdev->flags))
+ /* This is the only remaining device,
+ * We need to retry the write without
+ * FailFast
+ */
+ set_bit(R1BIO_WriteError, &r1_bio->state);
+ else {
+ /* Finished with this branch */
+ r1_bio->bios[mirror] = NULL;
+ to_put = bio;
+ }
+ } else
+ set_bit(R1BIO_WriteError, &r1_bio->state);
} else {
/*
* Set R1BIO_Uptodate in our master bio, so that we
@@ -534,6 +557,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
best_good_sectors = 0;
has_nonrot_disk = 0;
choose_next_idle = 0;
+ clear_bit(R1BIO_FailFast, &r1_bio->state);
if ((conf->mddev->recovery_cp < this_sector + sectors) ||
(mddev_is_clustered(conf->mddev) &&
@@ -607,6 +631,10 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
} else
best_good_sectors = sectors;
+ if (best_disk >= 0)
+ /* At least two disks to choose from so failfast is OK */
+ set_bit(R1BIO_FailFast, &r1_bio->state);
+
nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
has_nonrot_disk |= nonrot;
pending = atomic_read(&rdev->nr_pending);
@@ -645,11 +673,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
}
break;
}
- /* If device is idle, use it */
- if (pending == 0) {
- best_disk = disk;
- break;
- }
if (choose_next_idle)
continue;
@@ -672,7 +695,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
* mixed ratation/non-rotational disks depending on workload.
*/
if (best_disk == -1) {
- if (has_nonrot_disk)
+ if (has_nonrot_disk || min_pending == 0)
best_disk = best_pending_disk;
else
best_disk = best_dist_disk;
@@ -745,9 +768,14 @@ static void flush_pending_writes(struct r1conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
- if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ bio->bi_bdev = rdev->bdev;
+ if (test_bit(Faulty, &rdev->flags)) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+ } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
else
@@ -832,7 +860,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
else if (conf->barrier && bio_data_dir(bio) == WRITE) {
if ((conf->mddev->curr_resync_completed
>= bio_end_sector(bio)) ||
- (conf->next_resync + NEXT_NORMALIO_DISTANCE
+ (conf->start_next_window + NEXT_NORMALIO_DISTANCE
<= bio->bi_iter.bi_sector))
wait = false;
else
@@ -858,6 +886,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
* that queue to allow conf->start_next_window
* to increase.
*/
+ raid1_log(conf->mddev, "wait barrier");
wait_event_lock_irq(conf->wait_barrier,
!conf->array_frozen &&
(!conf->barrier ||
@@ -937,6 +966,7 @@ static void freeze_array(struct r1conf *conf, int extra)
*/
spin_lock_irq(&conf->resync_lock);
conf->array_frozen = 1;
+ raid1_log(conf->mddev, "wait freeze");
wait_event_lock_irq_cmd(conf->wait_barrier,
conf->nr_pending == conf->nr_queued+extra,
conf->resync_lock,
@@ -1019,9 +1049,14 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
- if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ bio->bi_bdev = rdev->bdev;
+ if (test_bit(Faulty, &rdev->flags)) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+ } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
else
@@ -1136,6 +1171,7 @@ read_again:
* take care not to over-take any writes
* that are 'behind'
*/
+ raid1_log(mddev, "wait behind writes");
wait_event(bitmap->behind_wait,
atomic_read(&bitmap->behind_writes) == 0);
}
@@ -1153,8 +1189,16 @@ read_again:
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid1_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync);
+ if (test_bit(FailFast, &mirror->rdev->flags) &&
+ test_bit(R1BIO_FailFast, &r1_bio->state))
+ read_bio->bi_opf |= MD_FAILFAST;
read_bio->bi_private = r1_bio;
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+ read_bio, disk_devt(mddev->gendisk),
+ r1_bio->sector);
+
if (max_sectors < r1_bio->sectors) {
/* could not read all from this device, so we will
* need another r1_bio.
@@ -1195,6 +1239,7 @@ read_again:
*/
if (conf->pending_count >= max_queued_requests) {
md_wakeup_thread(mddev->thread);
+ raid1_log(mddev, "wait queued");
wait_event(conf->wait_barrier,
conf->pending_count < max_queued_requests);
}
@@ -1286,6 +1331,7 @@ read_again:
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
r1_bio->state = 0;
allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
+ raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
start_next_window = wait_barrier(conf, bio);
/*
@@ -1363,10 +1409,21 @@ read_again:
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
bio_set_op_attrs(mbio, op, do_flush_fua | do_sync);
+ if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
+ !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
+ conf->raid_disks - mddev->degraded > 1)
+ mbio->bi_opf |= MD_FAILFAST;
mbio->bi_private = r1_bio;
atomic_inc(&r1_bio->remaining);
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+ mbio, disk_devt(mddev->gendisk),
+ r1_bio->sector);
+ /* flush_pending_writes() needs access to the rdev so...*/
+ mbio->bi_bdev = (void*)conf->mirrors[i].rdev;
+
cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
if (cb)
plug = container_of(cb, struct raid1_plug_cb, cb);
@@ -1436,6 +1493,7 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
* next level up know.
* else mark the drive as failed
*/
+ spin_lock_irqsave(&conf->device_lock, flags);
if (test_bit(In_sync, &rdev->flags)
&& (conf->raid_disks - mddev->degraded) == 1) {
/*
@@ -1445,10 +1503,10 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
* it is very likely to fail.
*/
conf->recovery_disabled = mddev->recovery_disabled;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
return;
}
set_bit(Blocked, &rdev->flags);
- spin_lock_irqsave(&conf->device_lock, flags);
if (test_and_clear_bit(In_sync, &rdev->flags)) {
mddev->degraded++;
set_bit(Faulty, &rdev->flags);
@@ -1459,36 +1517,35 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
* if recovery is running, make sure it aborts.
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
- printk(KERN_ALERT
- "md/raid1:%s: Disk failure on %s, disabling device.\n"
- "md/raid1:%s: Operation continuing on %d devices.\n",
- mdname(mddev), bdevname(rdev->bdev, b),
- mdname(mddev), conf->raid_disks - mddev->degraded);
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
+ pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
+ "md/raid1:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), conf->raid_disks - mddev->degraded);
}
static void print_conf(struct r1conf *conf)
{
int i;
- printk(KERN_DEBUG "RAID1 conf printout:\n");
+ pr_debug("RAID1 conf printout:\n");
if (!conf) {
- printk(KERN_DEBUG "(!conf)\n");
+ pr_debug("(!conf)\n");
return;
}
- printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
- conf->raid_disks);
+ pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+ conf->raid_disks);
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev)
- printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
- i, !test_bit(In_sync, &rdev->flags),
- !test_bit(Faulty, &rdev->flags),
- bdevname(rdev->bdev,b));
+ pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
+ i, !test_bit(In_sync, &rdev->flags),
+ !test_bit(Faulty, &rdev->flags),
+ bdevname(rdev->bdev,b));
}
rcu_read_unlock();
}
@@ -1788,12 +1845,24 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
sector_t sect = r1_bio->sector;
int sectors = r1_bio->sectors;
int idx = 0;
+ struct md_rdev *rdev;
+
+ rdev = conf->mirrors[r1_bio->read_disk].rdev;
+ if (test_bit(FailFast, &rdev->flags)) {
+ /* Don't try recovering from here - just fail it
+ * ... unless it is the last working device of course */
+ md_error(mddev, rdev);
+ if (test_bit(Faulty, &rdev->flags))
+ /* Don't try to read from here, but make sure
+ * put_buf does it's thing
+ */
+ bio->bi_end_io = end_sync_write;
+ }
while(sectors) {
int s = sectors;
int d = r1_bio->read_disk;
int success = 0;
- struct md_rdev *rdev;
int start;
if (s > (PAGE_SIZE>>9))
@@ -1825,11 +1894,10 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
* work just disable and interrupt the recovery.
* Don't fail devices as that won't really help.
*/
- printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
- " for block %llu\n",
- mdname(mddev),
- bdevname(bio->bi_bdev, b),
- (unsigned long long)r1_bio->sector);
+ pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
+ mdname(mddev),
+ bdevname(bio->bi_bdev, b),
+ (unsigned long long)r1_bio->sector);
for (d = 0; d < conf->raid_disks * 2; d++) {
rdev = conf->mirrors[d].rdev;
if (!rdev || test_bit(Faulty, &rdev->flags))
@@ -2013,6 +2081,9 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
continue;
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
+ if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
+ wbio->bi_opf |= MD_FAILFAST;
+
wbio->bi_end_io = end_sync_write;
atomic_inc(&r1_bio->remaining);
md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
@@ -2122,13 +2193,11 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
if (r1_sync_page_io(rdev, sect, s,
conf->tmppage, READ)) {
atomic_add(s, &rdev->corrected_errors);
- printk(KERN_INFO
- "md/raid1:%s: read error corrected "
- "(%d sectors at %llu on %s)\n",
- mdname(mddev), s,
- (unsigned long long)(sect +
- rdev->data_offset),
- bdevname(rdev->bdev, b));
+ pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
+ mdname(mddev), s,
+ (unsigned long long)(sect +
+ rdev->data_offset),
+ bdevname(rdev->bdev, b));
}
rdev_dec_pending(rdev, mddev);
} else
@@ -2287,6 +2356,8 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
struct bio *bio;
char b[BDEVNAME_SIZE];
struct md_rdev *rdev;
+ dev_t bio_dev;
+ sector_t bio_sector;
clear_bit(R1BIO_ReadError, &r1_bio->state);
/* we got a read error. Maybe the drive is bad. Maybe just
@@ -2300,10 +2371,14 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
bio = r1_bio->bios[r1_bio->read_disk];
bdevname(bio->bi_bdev, b);
+ bio_dev = bio->bi_bdev->bd_dev;
+ bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector;
bio_put(bio);
r1_bio->bios[r1_bio->read_disk] = NULL;
- if (mddev->ro == 0) {
+ rdev = conf->mirrors[r1_bio->read_disk].rdev;
+ if (mddev->ro == 0
+ && !test_bit(FailFast, &rdev->flags)) {
freeze_array(conf, 1);
fix_read_error(conf, r1_bio->read_disk,
r1_bio->sector, r1_bio->sectors);
@@ -2312,14 +2387,13 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
}
- rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
+ rdev_dec_pending(rdev, conf->mddev);
read_more:
disk = read_balance(conf, r1_bio, &max_sectors);
if (disk == -1) {
- printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
- " read error for block %llu\n",
- mdname(mddev), b, (unsigned long long)r1_bio->sector);
+ pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
+ mdname(mddev), b, (unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio);
} else {
const unsigned long do_sync
@@ -2330,16 +2404,17 @@ read_more:
max_sectors);
r1_bio->bios[r1_bio->read_disk] = bio;
rdev = conf->mirrors[disk].rdev;
- printk_ratelimited(KERN_ERR
- "md/raid1:%s: redirecting sector %llu"
- " to other mirror: %s\n",
- mdname(mddev),
- (unsigned long long)r1_bio->sector,
- bdevname(rdev->bdev, b));
+ pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
+ mdname(mddev),
+ (unsigned long long)r1_bio->sector,
+ bdevname(rdev->bdev, b));
bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
+ if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R1BIO_FailFast, &r1_bio->state))
+ bio->bi_opf |= MD_FAILFAST;
bio->bi_private = r1_bio;
if (max_sectors < r1_bio->sectors) {
/* Drat - have to split this up more */
@@ -2353,6 +2428,8 @@ read_more:
else
mbio->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock);
+ trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+ bio, bio_dev, bio_sector);
generic_make_request(bio);
bio = NULL;
@@ -2367,8 +2444,11 @@ read_more:
sectors_handled;
goto read_more;
- } else
+ } else {
+ trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+ bio, bio_dev, bio_sector);
generic_make_request(bio);
+ }
}
}
@@ -2384,10 +2464,10 @@ static void raid1d(struct md_thread *thread)
md_check_recovery(mddev);
if (!list_empty_careful(&conf->bio_end_io_list) &&
- !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags);
- if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
while (!list_empty(&conf->bio_end_io_list)) {
list_move(conf->bio_end_io_list.prev, &tmp);
conf->nr_queued--;
@@ -2441,7 +2521,7 @@ static void raid1d(struct md_thread *thread)
generic_make_request(r1_bio->bios[r1_bio->read_disk]);
cond_resched();
- if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+ if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
md_check_recovery(mddev);
}
blk_finish_plug(&plug);
@@ -2623,6 +2703,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_private = r1_bio;
+ if (test_bit(FailFast, &rdev->flags))
+ bio->bi_opf |= MD_FAILFAST;
}
}
rcu_read_unlock();
@@ -2642,7 +2724,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
min_bad, 0
) && ok;
}
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
*skipped = 1;
put_buf(r1_bio);
@@ -2753,6 +2835,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (bio->bi_end_io == end_sync_read) {
read_targets--;
md_sync_acct(bio->bi_bdev, nr_sectors);
+ if (read_targets == 1)
+ bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio);
}
}
@@ -2760,6 +2844,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
atomic_set(&r1_bio->remaining, 1);
bio = r1_bio->bios[r1_bio->read_disk];
md_sync_acct(bio->bi_bdev, nr_sectors);
+ if (read_targets == 1)
+ bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio);
}
@@ -2875,12 +2961,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
err = -ENOMEM;
conf->thread = md_register_thread(raid1d, mddev, "raid1");
- if (!conf->thread) {
- printk(KERN_ERR
- "md/raid1:%s: couldn't allocate thread\n",
- mdname(mddev));
+ if (!conf->thread)
goto abort;
- }
return conf;
@@ -2905,13 +2987,13 @@ static int raid1_run(struct mddev *mddev)
bool discard_supported = false;
if (mddev->level != 1) {
- printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
- mdname(mddev), mddev->level);
+ pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
+ mdname(mddev), mddev->level);
return -EIO;
}
if (mddev->reshape_position != MaxSector) {
- printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
- mdname(mddev));
+ pr_warn("md/raid1:%s: reshape_position set but not supported\n",
+ mdname(mddev));
return -EIO;
}
/*
@@ -2950,11 +3032,9 @@ static int raid1_run(struct mddev *mddev)
mddev->recovery_cp = MaxSector;
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "md/raid1:%s: not clean"
- " -- starting background reconstruction\n",
- mdname(mddev));
- printk(KERN_INFO
- "md/raid1:%s: active with %d out of %d mirrors\n",
+ pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
+ mdname(mddev));
+ pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
mdname(mddev), mddev->raid_disks - mddev->degraded,
mddev->raid_disks);
@@ -2964,6 +3044,7 @@ static int raid1_run(struct mddev *mddev)
mddev->thread = conf->thread;
conf->thread = NULL;
mddev->private = conf;
+ set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
@@ -3107,9 +3188,8 @@ static int raid1_reshape(struct mddev *mddev)
rdev->raid_disk = d2;
sysfs_unlink_rdev(mddev, rdev);
if (sysfs_link_rdev(mddev, rdev))
- printk(KERN_WARNING
- "md/raid1:%s: cannot register rd%d\n",
- mdname(mddev), rdev->raid_disk);
+ pr_warn("md/raid1:%s: cannot register rd%d\n",
+ mdname(mddev), rdev->raid_disk);
}
if (rdev)
newmirrors[d2++].rdev = rdev;
@@ -3163,9 +3243,12 @@ static void *raid1_takeover(struct mddev *mddev)
mddev->new_layout = 0;
mddev->new_chunk_sectors = 0;
conf = setup_conf(mddev);
- if (!IS_ERR(conf))
+ if (!IS_ERR(conf)) {
/* Array must appear to be quiesced */
conf->array_frozen = 1;
+ clear_bit(MD_HAS_JOURNAL, &mddev->flags);
+ clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
+ }
return conf;
}
return ERR_PTR(-EINVAL);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 61c39b390cd8..c52ef424a24b 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -161,14 +161,15 @@ struct r1bio {
};
/* bits for r1bio.state */
-#define R1BIO_Uptodate 0
-#define R1BIO_IsSync 1
-#define R1BIO_Degraded 2
-#define R1BIO_BehindIO 3
+enum r1bio_state {
+ R1BIO_Uptodate,
+ R1BIO_IsSync,
+ R1BIO_Degraded,
+ R1BIO_BehindIO,
/* Set ReadError on bios that experience a readerror so that
* raid1d knows what to do with them.
*/
-#define R1BIO_ReadError 4
+ R1BIO_ReadError,
/* For write-behind requests, we call bi_end_io when
* the last non-write-behind device completes, providing
* any write was successful. Otherwise we call when
@@ -176,10 +177,12 @@ struct r1bio {
* with failure when last write completes (and all failed).
* Record that bi_end_io was called with this flag...
*/
-#define R1BIO_Returned 6
+ R1BIO_Returned,
/* If a write for this request means we can clear some
* known-bad-block records, we set this flag
*/
-#define R1BIO_MadeGood 7
-#define R1BIO_WriteError 8
+ R1BIO_MadeGood,
+ R1BIO_WriteError,
+ R1BIO_FailFast,
+};
#endif
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 39fddda2fef2..ab5e86209322 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -25,6 +25,7 @@
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include <linux/kthread.h>
+#include <trace/events/block.h>
#include "md.h"
#include "raid10.h"
#include "raid0.h"
@@ -99,12 +100,16 @@ static int max_queued_requests = 1024;
static void allow_barrier(struct r10conf *conf);
static void lower_barrier(struct r10conf *conf);
static int _enough(struct r10conf *conf, int previous, int ignore);
+static int enough(struct r10conf *conf, int ignore);
static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
int *skipped);
static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
static void end_reshape_write(struct bio *bio);
static void end_reshape(struct r10conf *conf);
+#define raid10_log(md, fmt, args...) \
+ do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
+
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
{
struct r10conf *conf = data;
@@ -404,8 +409,7 @@ static void raid10_end_read_request(struct bio *bio)
* oops, read error - keep the refcount on the rdev
*/
char b[BDEVNAME_SIZE];
- printk_ratelimited(KERN_ERR
- "md/raid10:%s: %s: rescheduling sector %llu\n",
+ pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n",
mdname(conf->mddev),
bdevname(rdev->bdev, b),
(unsigned long long)r10_bio->sector);
@@ -447,6 +451,7 @@ static void raid10_end_write_request(struct bio *bio)
struct r10conf *conf = r10_bio->mddev->private;
int slot, repl;
struct md_rdev *rdev = NULL;
+ struct bio *to_put = NULL;
bool discard_error;
discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
@@ -474,8 +479,24 @@ static void raid10_end_write_request(struct bio *bio)
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED,
&rdev->mddev->recovery);
- set_bit(R10BIO_WriteError, &r10_bio->state);
+
dec_rdev = 0;
+ if (test_bit(FailFast, &rdev->flags) &&
+ (bio->bi_opf & MD_FAILFAST)) {
+ md_error(rdev->mddev, rdev);
+ if (!test_bit(Faulty, &rdev->flags))
+ /* This is the only remaining device,
+ * We need to retry the write without
+ * FailFast
+ */
+ set_bit(R10BIO_WriteError, &r10_bio->state);
+ else {
+ r10_bio->devs[slot].bio = NULL;
+ to_put = bio;
+ dec_rdev = 1;
+ }
+ } else
+ set_bit(R10BIO_WriteError, &r10_bio->state);
}
} else {
/*
@@ -525,6 +546,8 @@ static void raid10_end_write_request(struct bio *bio)
one_write_done(r10_bio);
if (dec_rdev)
rdev_dec_pending(rdev, conf->mddev);
+ if (to_put)
+ bio_put(to_put);
}
/*
@@ -716,6 +739,7 @@ static struct md_rdev *read_balance(struct r10conf *conf,
best_dist = MaxSector;
best_good_sectors = 0;
do_balance = 1;
+ clear_bit(R10BIO_FailFast, &r10_bio->state);
/*
* Check if we can balance. We can balance on the whole
* device if no resync is going on (recovery is ok), or below
@@ -780,15 +804,18 @@ static struct md_rdev *read_balance(struct r10conf *conf,
if (!do_balance)
break;
+ if (best_slot >= 0)
+ /* At least 2 disks to choose from so failfast is OK */
+ set_bit(R10BIO_FailFast, &r10_bio->state);
/* This optimisation is debatable, and completely destroys
* sequential read speed for 'far copies' arrays. So only
* keep it for 'near' arrays, and review those later.
*/
if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
- break;
+ new_distance = 0;
/* for far > 1 always use the lowest address */
- if (geo->far_copies > 1)
+ else if (geo->far_copies > 1)
new_distance = r10_bio->devs[slot].addr;
else
new_distance = abs(r10_bio->devs[slot].addr -
@@ -859,9 +886,14 @@ static void flush_pending_writes(struct r10conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
- if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ bio->bi_bdev = rdev->bdev;
+ if (test_bit(Faulty, &rdev->flags)) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+ } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
else
@@ -937,6 +969,7 @@ static void wait_barrier(struct r10conf *conf)
* that queue to get the nr_pending
* count down.
*/
+ raid10_log(conf->mddev, "wait barrier");
wait_event_lock_irq(conf->wait_barrier,
!conf->barrier ||
(atomic_read(&conf->nr_pending) &&
@@ -1037,9 +1070,14 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
- if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ bio->bi_bdev = rdev->bdev;
+ if (test_bit(Faulty, &rdev->flags)) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+ } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
else
@@ -1083,6 +1121,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
/* IO spans the reshape position. Need to wait for
* reshape to pass
*/
+ raid10_log(conf->mddev, "wait reshape");
allow_barrier(conf);
wait_event(conf->wait_barrier,
conf->reshape_progress <= bio->bi_iter.bi_sector ||
@@ -1099,11 +1138,12 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
bio->bi_iter.bi_sector < conf->reshape_progress))) {
/* Need to update reshape_position in metadata */
mddev->reshape_position = conf->reshape_progress;
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
md_wakeup_thread(mddev->thread);
+ raid10_log(conf->mddev, "wait reshape metadata");
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
conf->reshape_safe = mddev->reshape_position;
}
@@ -1154,8 +1194,15 @@ read_again:
read_bio->bi_bdev = rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync);
+ if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R10BIO_FailFast, &r10_bio->state))
+ read_bio->bi_opf |= MD_FAILFAST;
read_bio->bi_private = r10_bio;
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+ read_bio, disk_devt(mddev->gendisk),
+ r10_bio->sector);
if (max_sectors < r10_bio->sectors) {
/* Could not read all from this device, so we will
* need another r10_bio.
@@ -1195,6 +1242,7 @@ read_again:
*/
if (conf->pending_count >= max_queued_requests) {
md_wakeup_thread(mddev->thread);
+ raid10_log(mddev, "wait queued");
wait_event(conf->wait_barrier,
conf->pending_count < max_queued_requests);
}
@@ -1322,6 +1370,7 @@ retry_write:
}
}
allow_barrier(conf);
+ raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
wait_barrier(conf);
goto retry_write;
@@ -1361,8 +1410,18 @@ retry_write:
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
bio_set_op_attrs(mbio, op, do_sync | do_fua);
+ if (test_bit(FailFast, &conf->mirrors[d].rdev->flags) &&
+ enough(conf, d))
+ mbio->bi_opf |= MD_FAILFAST;
mbio->bi_private = r10_bio;
+ if (conf->mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+ mbio, disk_devt(conf->mddev->gendisk),
+ r10_bio->sector);
+ /* flush_pending_writes() needs access to the rdev so...*/
+ mbio->bi_bdev = (void*)rdev;
+
atomic_inc(&r10_bio->remaining);
cb = blk_check_plugged(raid10_unplug, mddev,
@@ -1405,6 +1464,13 @@ retry_write:
bio_set_op_attrs(mbio, op, do_sync | do_fua);
mbio->bi_private = r10_bio;
+ if (conf->mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+ mbio, disk_devt(conf->mddev->gendisk),
+ r10_bio->sector);
+ /* flush_pending_writes() needs access to the rdev so...*/
+ mbio->bi_bdev = (void*)rdev;
+
atomic_inc(&r10_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
@@ -1586,14 +1652,13 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
spin_unlock_irqrestore(&conf->device_lock, flags);
- printk(KERN_ALERT
- "md/raid10:%s: Disk failure on %s, disabling device.\n"
- "md/raid10:%s: Operation continuing on %d devices.\n",
- mdname(mddev), bdevname(rdev->bdev, b),
- mdname(mddev), conf->geo.raid_disks - mddev->degraded);
+ pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
+ "md/raid10:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), conf->geo.raid_disks - mddev->degraded);
}
static void print_conf(struct r10conf *conf)
@@ -1601,13 +1666,13 @@ static void print_conf(struct r10conf *conf)
int i;
struct md_rdev *rdev;
- printk(KERN_DEBUG "RAID10 conf printout:\n");
+ pr_debug("RAID10 conf printout:\n");
if (!conf) {
- printk(KERN_DEBUG "(!conf)\n");
+ pr_debug("(!conf)\n");
return;
}
- printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
- conf->geo.raid_disks);
+ pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
+ conf->geo.raid_disks);
/* This is only called with ->reconfix_mutex held, so
* rcu protection of rdev is not needed */
@@ -1615,10 +1680,10 @@ static void print_conf(struct r10conf *conf)
char b[BDEVNAME_SIZE];
rdev = conf->mirrors[i].rdev;
if (rdev)
- printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
- i, !test_bit(In_sync, &rdev->flags),
- !test_bit(Faulty, &rdev->flags),
- bdevname(rdev->bdev,b));
+ pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
+ i, !test_bit(In_sync, &rdev->flags),
+ !test_bit(Faulty, &rdev->flags),
+ bdevname(rdev->bdev,b));
}
}
@@ -1953,6 +2018,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
/* now find blocks with errors */
for (i=0 ; i < conf->copies ; i++) {
int j, d;
+ struct md_rdev *rdev;
tbio = r10_bio->devs[i].bio;
@@ -1960,6 +2026,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
continue;
if (i == first)
continue;
+ d = r10_bio->devs[i].devnum;
+ rdev = conf->mirrors[d].rdev;
if (!r10_bio->devs[i].bio->bi_error) {
/* We know that the bi_io_vec layout is the same for
* both 'first' and 'i', so we just compare them.
@@ -1982,6 +2050,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
/* Don't fix anything. */
continue;
+ } else if (test_bit(FailFast, &rdev->flags)) {
+ /* Just give up on this device */
+ md_error(rdev->mddev, rdev);
+ continue;
}
/* Ok, we need to write this bio, either to correct an
* inconsistency or to correct an unreadable block.
@@ -1999,11 +2071,12 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
bio_copy_data(tbio, fbio);
- d = r10_bio->devs[i].devnum;
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
+ if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
+ tbio->bi_opf |= MD_FAILFAST;
tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
generic_make_request(tbio);
@@ -2109,10 +2182,8 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
ok = rdev_set_badblocks(rdev2, addr, s, 0);
if (!ok) {
/* just abort the recovery */
- printk(KERN_NOTICE
- "md/raid10:%s: recovery aborted"
- " due to read error\n",
- mdname(mddev));
+ pr_notice("md/raid10:%s: recovery aborted due to read error\n",
+ mdname(mddev));
conf->mirrors[dw].recovery_disabled
= mddev->recovery_disabled;
@@ -2259,14 +2330,11 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
char b[BDEVNAME_SIZE];
bdevname(rdev->bdev, b);
- printk(KERN_NOTICE
- "md/raid10:%s: %s: Raid device exceeded "
- "read_error threshold [cur %d:max %d]\n",
- mdname(mddev), b,
- atomic_read(&rdev->read_errors), max_read_errors);
- printk(KERN_NOTICE
- "md/raid10:%s: %s: Failing raid device\n",
- mdname(mddev), b);
+ pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n",
+ mdname(mddev), b,
+ atomic_read(&rdev->read_errors), max_read_errors);
+ pr_notice("md/raid10:%s: %s: Failing raid device\n",
+ mdname(mddev), b);
md_error(mddev, rdev);
r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
return;
@@ -2356,20 +2424,16 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
s, conf->tmppage, WRITE)
== 0) {
/* Well, this device is dead */
- printk(KERN_NOTICE
- "md/raid10:%s: read correction "
- "write failed"
- " (%d sectors at %llu on %s)\n",
- mdname(mddev), s,
- (unsigned long long)(
- sect +
- choose_data_offset(r10_bio,
- rdev)),
- bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "md/raid10:%s: %s: failing "
- "drive\n",
- mdname(mddev),
- bdevname(rdev->bdev, b));
+ pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n",
+ mdname(mddev), s,
+ (unsigned long long)(
+ sect +
+ choose_data_offset(r10_bio,
+ rdev)),
+ bdevname(rdev->bdev, b));
+ pr_notice("md/raid10:%s: %s: failing drive\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b));
}
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
@@ -2397,24 +2461,18 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
READ)) {
case 0:
/* Well, this device is dead */
- printk(KERN_NOTICE
- "md/raid10:%s: unable to read back "
- "corrected sectors"
- " (%d sectors at %llu on %s)\n",
+ pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(
sect +
choose_data_offset(r10_bio, rdev)),
bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "md/raid10:%s: %s: failing "
- "drive\n",
+ pr_notice("md/raid10:%s: %s: failing drive\n",
mdname(mddev),
bdevname(rdev->bdev, b));
break;
case 1:
- printk(KERN_INFO
- "md/raid10:%s: read error corrected"
- " (%d sectors at %llu on %s)\n",
+ pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(
sect +
@@ -2503,6 +2561,8 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
char b[BDEVNAME_SIZE];
unsigned long do_sync;
int max_sectors;
+ dev_t bio_dev;
+ sector_t bio_last_sector;
/* we got a read error. Maybe the drive is bad. Maybe just
* the block and we can fix it.
@@ -2514,38 +2574,38 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
*/
bio = r10_bio->devs[slot].bio;
bdevname(bio->bi_bdev, b);
+ bio_dev = bio->bi_bdev->bd_dev;
+ bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors;
bio_put(bio);
r10_bio->devs[slot].bio = NULL;
- if (mddev->ro == 0) {
+ if (mddev->ro)
+ r10_bio->devs[slot].bio = IO_BLOCKED;
+ else if (!test_bit(FailFast, &rdev->flags)) {
freeze_array(conf, 1);
fix_read_error(conf, mddev, r10_bio);
unfreeze_array(conf);
} else
- r10_bio->devs[slot].bio = IO_BLOCKED;
+ md_error(mddev, rdev);
rdev_dec_pending(rdev, mddev);
read_more:
rdev = read_balance(conf, r10_bio, &max_sectors);
if (rdev == NULL) {
- printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
- " read error for block %llu\n",
- mdname(mddev), b,
- (unsigned long long)r10_bio->sector);
+ pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
+ mdname(mddev), b,
+ (unsigned long long)r10_bio->sector);
raid_end_bio_io(r10_bio);
return;
}
do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC);
slot = r10_bio->read_slot;
- printk_ratelimited(
- KERN_ERR
- "md/raid10:%s: %s: redirecting "
- "sector %llu to another mirror\n",
- mdname(mddev),
- bdevname(rdev->bdev, b),
- (unsigned long long)r10_bio->sector);
+ pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b),
+ (unsigned long long)r10_bio->sector);
bio = bio_clone_mddev(r10_bio->master_bio,
GFP_NOIO, mddev);
bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
@@ -2555,8 +2615,15 @@ read_more:
+ choose_data_offset(r10_bio, rdev);
bio->bi_bdev = rdev->bdev;
bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
+ if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R10BIO_FailFast, &r10_bio->state))
+ bio->bi_opf |= MD_FAILFAST;
bio->bi_private = r10_bio;
bio->bi_end_io = raid10_end_read_request;
+ trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+ bio, bio_dev,
+ bio_last_sector - r10_bio->sectors);
+
if (max_sectors < r10_bio->sectors) {
/* Drat - have to split this up more */
struct bio *mbio = r10_bio->master_bio;
@@ -2694,10 +2761,10 @@ static void raid10d(struct md_thread *thread)
md_check_recovery(mddev);
if (!list_empty_careful(&conf->bio_end_io_list) &&
- !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags);
- if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
while (!list_empty(&conf->bio_end_io_list)) {
list_move(conf->bio_end_io_list.prev, &tmp);
conf->nr_queued--;
@@ -2755,7 +2822,7 @@ static void raid10d(struct md_thread *thread)
}
cond_resched();
- if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+ if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
md_check_recovery(mddev);
}
blk_finish_plug(&plug);
@@ -3072,6 +3139,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ if (test_bit(FailFast, &rdev->flags))
+ bio->bi_opf |= MD_FAILFAST;
from_addr = r10_bio->devs[j].addr;
bio->bi_iter.bi_sector = from_addr +
rdev->data_offset;
@@ -3160,8 +3229,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (!any_working) {
if (!test_and_set_bit(MD_RECOVERY_INTR,
&mddev->recovery))
- printk(KERN_INFO "md/raid10:%s: insufficient "
- "working devices for recovery.\n",
+ pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
mdname(mddev));
mirror->recovery_disabled
= mddev->recovery_disabled;
@@ -3178,6 +3246,23 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
rdev_dec_pending(mrdev, mddev);
if (mreplace)
rdev_dec_pending(mreplace, mddev);
+ if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
+ /* Only want this if there is elsewhere to
+ * read from. 'j' is currently the first
+ * readable copy.
+ */
+ int targets = 1;
+ for (; j < conf->copies; j++) {
+ int d = r10_bio->devs[j].devnum;
+ if (conf->mirrors[d].rdev &&
+ test_bit(In_sync,
+ &conf->mirrors[d].rdev->flags))
+ targets++;
+ }
+ if (targets == 1)
+ r10_bio->devs[0].bio->bi_opf
+ &= ~MD_FAILFAST;
+ }
}
if (biolist == NULL) {
while (r10_bio) {
@@ -3256,6 +3341,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
+ bio->bi_opf |= MD_FAILFAST;
bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
count++;
@@ -3279,6 +3366,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
+ bio->bi_opf |= MD_FAILFAST;
bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
count++;
@@ -3489,15 +3578,14 @@ static struct r10conf *setup_conf(struct mddev *mddev)
copies = setup_geo(&geo, mddev, geo_new);
if (copies == -2) {
- printk(KERN_ERR "md/raid10:%s: chunk size must be "
- "at least PAGE_SIZE(%ld) and be a power of 2.\n",
- mdname(mddev), PAGE_SIZE);
+ pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
+ mdname(mddev), PAGE_SIZE);
goto out;
}
if (copies < 2 || copies > mddev->raid_disks) {
- printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
- mdname(mddev), mddev->new_layout);
+ pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
+ mdname(mddev), mddev->new_layout);
goto out;
}
@@ -3557,9 +3645,6 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return conf;
out:
- if (err == -ENOMEM)
- printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
- mdname(mddev));
if (conf) {
mempool_destroy(conf->r10bio_pool);
kfree(conf->mirrors);
@@ -3656,7 +3741,7 @@ static int raid10_run(struct mddev *mddev)
}
/* need to check that every block has at least one working mirror */
if (!enough(conf, -1)) {
- printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
+ pr_err("md/raid10:%s: not enough operational mirrors.\n",
mdname(mddev));
goto out_free_conf;
}
@@ -3698,11 +3783,9 @@ static int raid10_run(struct mddev *mddev)
}
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "md/raid10:%s: not clean"
- " -- starting background reconstruction\n",
- mdname(mddev));
- printk(KERN_INFO
- "md/raid10:%s: active with %d out of %d devices\n",
+ pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
+ mdname(mddev));
+ pr_info("md/raid10:%s: active with %d out of %d devices\n",
mdname(mddev), conf->geo.raid_disks - mddev->degraded,
conf->geo.raid_disks);
/*
@@ -3712,6 +3795,7 @@ static int raid10_run(struct mddev *mddev)
size = raid10_size(mddev, 0, 0);
md_set_array_sectors(mddev, size);
mddev->resync_max_sectors = size;
+ set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
if (mddev->queue) {
int stripe = conf->geo.raid_disks *
@@ -3739,7 +3823,7 @@ static int raid10_run(struct mddev *mddev)
if (max(before_length, after_length) > min_offset_diff) {
/* This cannot work */
- printk("md/raid10: offset difference not enough to continue reshape\n");
+ pr_warn("md/raid10: offset difference not enough to continue reshape\n");
goto out_free_conf;
}
conf->offset_diff = min_offset_diff;
@@ -3846,8 +3930,8 @@ static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
struct r10conf *conf;
if (mddev->degraded > 0) {
- printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
- mdname(mddev));
+ pr_warn("md/raid10:%s: Error: degraded raid0!\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
sector_div(size, devs);
@@ -3887,9 +3971,8 @@ static void *raid10_takeover(struct mddev *mddev)
/* for raid0 takeover only one zone is supported */
raid0_conf = mddev->private;
if (raid0_conf->nr_strip_zones > 1) {
- printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
- " with more than one zone.\n",
- mdname(mddev));
+ pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
return raid10_takeover_raid0(mddev,
@@ -4078,8 +4161,8 @@ static int raid10_start_reshape(struct mddev *mddev)
sector_t size = raid10_size(mddev, 0, 0);
if (size < mddev->array_sectors) {
spin_unlock_irq(&conf->device_lock);
- printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n",
- mdname(mddev));
+ pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
+ mdname(mddev));
return -EINVAL;
}
mddev->resync_max_sectors = size;
@@ -4126,7 +4209,7 @@ static int raid10_start_reshape(struct mddev *mddev)
spin_unlock_irq(&conf->device_lock);
mddev->raid_disks = conf->geo.raid_disks;
mddev->reshape_position = conf->reshape_progress;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -4321,9 +4404,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
else
mddev->curr_resync_completed = conf->reshape_progress;
conf->reshape_checkpoint = jiffies;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
- wait_event(mddev->sb_wait, mddev->flags == 0 ||
+ wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
test_bit(MD_RECOVERY_INTR, &mddev->recovery));
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
allow_barrier(conf);
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 18ec1f7a98bf..3162615e57bd 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -156,5 +156,7 @@ enum r10bio_state {
* flag is set
*/
R10BIO_Previous,
+/* failfast devices did receive failfast requests. */
+ R10BIO_FailFast,
};
#endif
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 8491edcfb5a6..d7bfb6fc8aef 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2015 Shaohua Li <shli@fb.com>
+ * Copyright (C) 2016 Song Liu <songliubraving@fb.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -18,8 +19,10 @@
#include <linux/raid/md_p.h>
#include <linux/crc32c.h>
#include <linux/random.h>
+#include <linux/kthread.h>
#include "md.h"
#include "raid5.h"
+#include "bitmap.h"
/*
* metadata/data stored in disk with 4k size unit (a block) regardless
@@ -28,18 +31,70 @@
#define BLOCK_SECTORS (8)
/*
- * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
- * recovery scans a very long log
+ * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
+ *
+ * In write through mode, the reclaim runs every log->max_free_space.
+ * This can prevent the recovery scans for too long
*/
#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
#define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
+/* wake up reclaim thread periodically */
+#define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
+/* start flush with these full stripes */
+#define R5C_FULL_STRIPE_FLUSH_BATCH 256
+/* reclaim stripes in groups */
+#define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
+
/*
* We only need 2 bios per I/O unit to make progress, but ensure we
* have a few more available to not get too tight.
*/
#define R5L_POOL_SIZE 4
+/*
+ * r5c journal modes of the array: write-back or write-through.
+ * write-through mode has identical behavior as existing log only
+ * implementation.
+ */
+enum r5c_journal_mode {
+ R5C_JOURNAL_MODE_WRITE_THROUGH = 0,
+ R5C_JOURNAL_MODE_WRITE_BACK = 1,
+};
+
+static char *r5c_journal_mode_str[] = {"write-through",
+ "write-back"};
+/*
+ * raid5 cache state machine
+ *
+ * With the RAID cache, each stripe works in two phases:
+ * - caching phase
+ * - writing-out phase
+ *
+ * These two phases are controlled by bit STRIPE_R5C_CACHING:
+ * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
+ * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
+ *
+ * When there is no journal, or the journal is in write-through mode,
+ * the stripe is always in writing-out phase.
+ *
+ * For write-back journal, the stripe is sent to caching phase on write
+ * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
+ * the write-out phase by clearing STRIPE_R5C_CACHING.
+ *
+ * Stripes in caching phase do not write the raid disks. Instead, all
+ * writes are committed from the log device. Therefore, a stripe in
+ * caching phase handles writes as:
+ * - write to log device
+ * - return IO
+ *
+ * Stripes in writing-out phase handle writes as:
+ * - calculate parity
+ * - write pending data and parity to journal
+ * - write data and parity to raid disks
+ * - return IO for pending writes
+ */
+
struct r5l_log {
struct md_rdev *rdev;
@@ -58,7 +113,6 @@ struct r5l_log {
u64 seq; /* log head sequence */
sector_t next_checkpoint;
- u64 next_cp_seq;
struct mutex io_mutex;
struct r5l_io_unit *current_io; /* current io_unit accepting new data */
@@ -96,6 +150,18 @@ struct r5l_log {
spinlock_t no_space_stripes_lock;
bool need_cache_flush;
+
+ /* for r5c_cache */
+ enum r5c_journal_mode r5c_journal_mode;
+
+ /* all stripes in r5cache, in the order of seq at sh->log_start */
+ struct list_head stripe_in_journal_list;
+
+ spinlock_t stripe_in_journal_lock;
+ atomic_t stripe_in_journal_count;
+
+ /* to submit async io_units, to fulfill ordering of flush */
+ struct work_struct deferred_io_work;
};
/*
@@ -122,6 +188,18 @@ struct r5l_io_unit {
int state;
bool need_split_bio;
+ struct bio *split_bio;
+
+ unsigned int has_flush:1; /* include flush request */
+ unsigned int has_fua:1; /* include fua request */
+ unsigned int has_null_flush:1; /* include empty flush request */
+ /*
+ * io isn't sent yet, flush/fua request can only be submitted till it's
+ * the first IO in running_ios list
+ */
+ unsigned int io_deferred:1;
+
+ struct bio_list flush_barriers; /* size == 0 flush bios */
};
/* r5l_io_unit state */
@@ -133,6 +211,12 @@ enum r5l_io_unit_state {
IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
};
+bool r5c_is_writeback(struct r5l_log *log)
+{
+ return (log != NULL &&
+ log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
+}
+
static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
{
start += inc;
@@ -168,12 +252,235 @@ static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
io->state = state;
}
+static void
+r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev,
+ struct bio_list *return_bi)
+{
+ struct bio *wbi, *wbi2;
+
+ wbi = dev->written;
+ dev->written = NULL;
+ while (wbi && wbi->bi_iter.bi_sector <
+ dev->sector + STRIPE_SECTORS) {
+ wbi2 = r5_next_bio(wbi, dev->sector);
+ if (!raid5_dec_bi_active_stripes(wbi)) {
+ md_write_end(conf->mddev);
+ bio_list_add(return_bi, wbi);
+ }
+ wbi = wbi2;
+ }
+}
+
+void r5c_handle_cached_data_endio(struct r5conf *conf,
+ struct stripe_head *sh, int disks, struct bio_list *return_bi)
+{
+ int i;
+
+ for (i = sh->disks; i--; ) {
+ if (sh->dev[i].written) {
+ set_bit(R5_UPTODATE, &sh->dev[i].flags);
+ r5c_return_dev_pending_writes(conf, &sh->dev[i],
+ return_bi);
+ bitmap_endwrite(conf->mddev->bitmap, sh->sector,
+ STRIPE_SECTORS,
+ !test_bit(STRIPE_DEGRADED, &sh->state),
+ 0);
+ }
+ }
+}
+
+/* Check whether we should flush some stripes to free up stripe cache */
+void r5c_check_stripe_cache_usage(struct r5conf *conf)
+{
+ int total_cached;
+
+ if (!r5c_is_writeback(conf->log))
+ return;
+
+ total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
+ atomic_read(&conf->r5c_cached_full_stripes);
+
+ /*
+ * The following condition is true for either of the following:
+ * - stripe cache pressure high:
+ * total_cached > 3/4 min_nr_stripes ||
+ * empty_inactive_list_nr > 0
+ * - stripe cache pressure moderate:
+ * total_cached > 1/2 min_nr_stripes
+ */
+ if (total_cached > conf->min_nr_stripes * 1 / 2 ||
+ atomic_read(&conf->empty_inactive_list_nr) > 0)
+ r5l_wake_reclaim(conf->log, 0);
+}
+
+/*
+ * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
+ * stripes in the cache
+ */
+void r5c_check_cached_full_stripe(struct r5conf *conf)
+{
+ if (!r5c_is_writeback(conf->log))
+ return;
+
+ /*
+ * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
+ * or a full stripe (chunk size / 4k stripes).
+ */
+ if (atomic_read(&conf->r5c_cached_full_stripes) >=
+ min(R5C_FULL_STRIPE_FLUSH_BATCH,
+ conf->chunk_sectors >> STRIPE_SHIFT))
+ r5l_wake_reclaim(conf->log, 0);
+}
+
+/*
+ * Total log space (in sectors) needed to flush all data in cache
+ *
+ * Currently, writing-out phase automatically includes all pending writes
+ * to the same sector. So the reclaim of each stripe takes up to
+ * (conf->raid_disks + 1) pages of log space.
+ *
+ * To totally avoid deadlock due to log space, the code reserves
+ * (conf->raid_disks + 1) pages for each stripe in cache, which is not
+ * necessary in most cases.
+ *
+ * To improve this, we will need writing-out phase to be able to NOT include
+ * pending writes, which will reduce the requirement to
+ * (conf->max_degraded + 1) pages per stripe in cache.
+ */
+static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf)
+{
+ struct r5l_log *log = conf->log;
+
+ if (!r5c_is_writeback(log))
+ return 0;
+
+ return BLOCK_SECTORS * (conf->raid_disks + 1) *
+ atomic_read(&log->stripe_in_journal_count);
+}
+
+/*
+ * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
+ *
+ * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
+ * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
+ * device is less than 2x of reclaim_required_space.
+ */
+static inline void r5c_update_log_state(struct r5l_log *log)
+{
+ struct r5conf *conf = log->rdev->mddev->private;
+ sector_t free_space;
+ sector_t reclaim_space;
+ bool wake_reclaim = false;
+
+ if (!r5c_is_writeback(log))
+ return;
+
+ free_space = r5l_ring_distance(log, log->log_start,
+ log->last_checkpoint);
+ reclaim_space = r5c_log_required_to_flush_cache(conf);
+ if (free_space < 2 * reclaim_space)
+ set_bit(R5C_LOG_CRITICAL, &conf->cache_state);
+ else {
+ if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
+ wake_reclaim = true;
+ clear_bit(R5C_LOG_CRITICAL, &conf->cache_state);
+ }
+ if (free_space < 3 * reclaim_space)
+ set_bit(R5C_LOG_TIGHT, &conf->cache_state);
+ else
+ clear_bit(R5C_LOG_TIGHT, &conf->cache_state);
+
+ if (wake_reclaim)
+ r5l_wake_reclaim(log, 0);
+}
+
+/*
+ * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
+ * This function should only be called in write-back mode.
+ */
+void r5c_make_stripe_write_out(struct stripe_head *sh)
+{
+ struct r5conf *conf = sh->raid_conf;
+ struct r5l_log *log = conf->log;
+
+ BUG_ON(!r5c_is_writeback(log));
+
+ WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+ clear_bit(STRIPE_R5C_CACHING, &sh->state);
+
+ if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ atomic_inc(&conf->preread_active_stripes);
+
+ if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) {
+ BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0);
+ atomic_dec(&conf->r5c_cached_partial_stripes);
+ }
+
+ if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
+ BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0);
+ atomic_dec(&conf->r5c_cached_full_stripes);
+ }
+}
+
+static void r5c_handle_data_cached(struct stripe_head *sh)
+{
+ int i;
+
+ for (i = sh->disks; i--; )
+ if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
+ set_bit(R5_InJournal, &sh->dev[i].flags);
+ clear_bit(R5_LOCKED, &sh->dev[i].flags);
+ }
+ clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
+}
+
+/*
+ * this journal write must contain full parity,
+ * it may also contain some data pages
+ */
+static void r5c_handle_parity_cached(struct stripe_head *sh)
+{
+ int i;
+
+ for (i = sh->disks; i--; )
+ if (test_bit(R5_InJournal, &sh->dev[i].flags))
+ set_bit(R5_Wantwrite, &sh->dev[i].flags);
+}
+
+/*
+ * Setting proper flags after writing (or flushing) data and/or parity to the
+ * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
+ */
+static void r5c_finish_cache_stripe(struct stripe_head *sh)
+{
+ struct r5l_log *log = sh->raid_conf->log;
+
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
+ BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
+ /*
+ * Set R5_InJournal for parity dev[pd_idx]. This means
+ * all data AND parity in the journal. For RAID 6, it is
+ * NOT necessary to set the flag for dev[qd_idx], as the
+ * two parities are written out together.
+ */
+ set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
+ } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+ r5c_handle_data_cached(sh);
+ } else {
+ r5c_handle_parity_cached(sh);
+ set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
+ }
+}
+
static void r5l_io_run_stripes(struct r5l_io_unit *io)
{
struct stripe_head *sh, *next;
list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
list_del_init(&sh->log_list);
+
+ r5c_finish_cache_stripe(sh);
+
set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh);
}
@@ -209,9 +516,11 @@ static void r5l_move_to_end_ios(struct r5l_log *log)
}
}
+static void __r5l_stripe_write_finished(struct r5l_io_unit *io);
static void r5l_log_endio(struct bio *bio)
{
struct r5l_io_unit *io = bio->bi_private;
+ struct r5l_io_unit *io_deferred;
struct r5l_log *log = io->log;
unsigned long flags;
@@ -227,18 +536,89 @@ static void r5l_log_endio(struct bio *bio)
r5l_move_to_end_ios(log);
else
r5l_log_run_stripes(log);
+ if (!list_empty(&log->running_ios)) {
+ /*
+ * FLUSH/FUA io_unit is deferred because of ordering, now we
+ * can dispatch it
+ */
+ io_deferred = list_first_entry(&log->running_ios,
+ struct r5l_io_unit, log_sibling);
+ if (io_deferred->io_deferred)
+ schedule_work(&log->deferred_io_work);
+ }
+
spin_unlock_irqrestore(&log->io_list_lock, flags);
if (log->need_cache_flush)
md_wakeup_thread(log->rdev->mddev->thread);
+
+ if (io->has_null_flush) {
+ struct bio *bi;
+
+ WARN_ON(bio_list_empty(&io->flush_barriers));
+ while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
+ bio_endio(bi);
+ atomic_dec(&io->pending_stripe);
+ }
+ if (atomic_read(&io->pending_stripe) == 0)
+ __r5l_stripe_write_finished(io);
+ }
+}
+
+static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&log->io_list_lock, flags);
+ __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
+ spin_unlock_irqrestore(&log->io_list_lock, flags);
+
+ if (io->has_flush)
+ io->current_bio->bi_opf |= REQ_PREFLUSH;
+ if (io->has_fua)
+ io->current_bio->bi_opf |= REQ_FUA;
+ submit_bio(io->current_bio);
+
+ if (!io->split_bio)
+ return;
+
+ if (io->has_flush)
+ io->split_bio->bi_opf |= REQ_PREFLUSH;
+ if (io->has_fua)
+ io->split_bio->bi_opf |= REQ_FUA;
+ submit_bio(io->split_bio);
+}
+
+/* deferred io_unit will be dispatched here */
+static void r5l_submit_io_async(struct work_struct *work)
+{
+ struct r5l_log *log = container_of(work, struct r5l_log,
+ deferred_io_work);
+ struct r5l_io_unit *io = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&log->io_list_lock, flags);
+ if (!list_empty(&log->running_ios)) {
+ io = list_first_entry(&log->running_ios, struct r5l_io_unit,
+ log_sibling);
+ if (!io->io_deferred)
+ io = NULL;
+ else
+ io->io_deferred = 0;
+ }
+ spin_unlock_irqrestore(&log->io_list_lock, flags);
+ if (io)
+ r5l_do_submit_io(log, io);
}
static void r5l_submit_current_io(struct r5l_log *log)
{
struct r5l_io_unit *io = log->current_io;
+ struct bio *bio;
struct r5l_meta_block *block;
unsigned long flags;
u32 crc;
+ bool do_submit = true;
if (!io)
return;
@@ -247,13 +627,20 @@ static void r5l_submit_current_io(struct r5l_log *log)
block->meta_size = cpu_to_le32(io->meta_offset);
crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
block->checksum = cpu_to_le32(crc);
+ bio = io->current_bio;
log->current_io = NULL;
spin_lock_irqsave(&log->io_list_lock, flags);
- __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
+ if (io->has_flush || io->has_fua) {
+ if (io != list_first_entry(&log->running_ios,
+ struct r5l_io_unit, log_sibling)) {
+ io->io_deferred = 1;
+ do_submit = false;
+ }
+ }
spin_unlock_irqrestore(&log->io_list_lock, flags);
-
- submit_bio(io->current_bio);
+ if (do_submit)
+ r5l_do_submit_io(log, io);
}
static struct bio *r5l_bio_alloc(struct r5l_log *log)
@@ -271,6 +658,7 @@ static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
{
log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
+ r5c_update_log_state(log);
/*
* If we filled up the log device start from the beginning again,
* which will require a new bio.
@@ -297,6 +685,7 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
io->log = log;
INIT_LIST_HEAD(&io->log_sibling);
INIT_LIST_HEAD(&io->stripe_list);
+ bio_list_init(&io->flush_barriers);
io->state = IO_UNIT_RUNNING;
io->meta_page = mempool_alloc(log->meta_pool, GFP_NOIO);
@@ -367,12 +756,11 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
struct r5l_io_unit *io = log->current_io;
if (io->need_split_bio) {
- struct bio *prev = io->current_bio;
-
+ BUG_ON(io->split_bio);
+ io->split_bio = io->current_bio;
io->current_bio = r5l_bio_alloc(log);
- bio_chain(io->current_bio, prev);
-
- submit_bio(prev);
+ bio_chain(io->current_bio, io->split_bio);
+ io->need_split_bio = false;
}
if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
@@ -401,50 +789,85 @@ static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
io = log->current_io;
+ if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state))
+ io->has_flush = 1;
+
for (i = 0; i < sh->disks; i++) {
- if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+ if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
+ test_bit(R5_InJournal, &sh->dev[i].flags))
continue;
if (i == sh->pd_idx || i == sh->qd_idx)
continue;
+ if (test_bit(R5_WantFUA, &sh->dev[i].flags) &&
+ log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
+ io->has_fua = 1;
+ /*
+ * we need to flush journal to make sure recovery can
+ * reach the data with fua flag
+ */
+ io->has_flush = 1;
+ }
r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
raid5_compute_blocknr(sh, i, 0),
sh->dev[i].log_checksum, 0, false);
r5l_append_payload_page(log, sh->dev[i].page);
}
- if (sh->qd_idx >= 0) {
+ if (parity_pages == 2) {
r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
sh->sector, sh->dev[sh->pd_idx].log_checksum,
sh->dev[sh->qd_idx].log_checksum, true);
r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
- } else {
+ } else if (parity_pages == 1) {
r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
sh->sector, sh->dev[sh->pd_idx].log_checksum,
0, false);
r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
- }
+ } else /* Just writing data, not parity, in caching phase */
+ BUG_ON(parity_pages != 0);
list_add_tail(&sh->log_list, &io->stripe_list);
atomic_inc(&io->pending_stripe);
sh->log_io = io;
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+ return 0;
+
+ if (sh->log_start == MaxSector) {
+ BUG_ON(!list_empty(&sh->r5c));
+ sh->log_start = io->log_start;
+ spin_lock_irq(&log->stripe_in_journal_lock);
+ list_add_tail(&sh->r5c,
+ &log->stripe_in_journal_list);
+ spin_unlock_irq(&log->stripe_in_journal_lock);
+ atomic_inc(&log->stripe_in_journal_count);
+ }
return 0;
}
-static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
+/* add stripe to no_space_stripes, and then wake up reclaim */
+static inline void r5l_add_no_space_stripe(struct r5l_log *log,
+ struct stripe_head *sh)
+{
+ spin_lock(&log->no_space_stripes_lock);
+ list_add_tail(&sh->log_list, &log->no_space_stripes);
+ spin_unlock(&log->no_space_stripes_lock);
+}
+
/*
* running in raid5d, where reclaim could wait for raid5d too (when it flushes
* data from log to raid disks), so we shouldn't wait for reclaim here
*/
int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
{
+ struct r5conf *conf = sh->raid_conf;
int write_disks = 0;
int data_pages, parity_pages;
- int meta_size;
int reserve;
int i;
int ret = 0;
+ bool wake_reclaim = false;
if (!log)
return -EAGAIN;
@@ -456,11 +879,15 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
return -EAGAIN;
}
+ WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
+
for (i = 0; i < sh->disks; i++) {
void *addr;
- if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+ if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
+ test_bit(R5_InJournal, &sh->dev[i].flags))
continue;
+
write_disks++;
/* checksum is already calculated in last run */
if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
@@ -473,15 +900,6 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
parity_pages = 1 + !!(sh->qd_idx >= 0);
data_pages = write_disks - parity_pages;
- meta_size =
- ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
- * data_pages) +
- sizeof(struct r5l_payload_data_parity) +
- sizeof(__le32) * parity_pages;
- /* Doesn't work with very big raid array */
- if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
- return -EINVAL;
-
set_bit(STRIPE_LOG_TRAPPED, &sh->state);
/*
* The stripe must enter state machine again to finish the write, so
@@ -493,22 +911,49 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
mutex_lock(&log->io_mutex);
/* meta + data */
reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
- if (!r5l_has_free_space(log, reserve)) {
- spin_lock(&log->no_space_stripes_lock);
- list_add_tail(&sh->log_list, &log->no_space_stripes);
- spin_unlock(&log->no_space_stripes_lock);
- r5l_wake_reclaim(log, reserve);
- } else {
- ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
- if (ret) {
- spin_lock_irq(&log->io_list_lock);
- list_add_tail(&sh->log_list, &log->no_mem_stripes);
- spin_unlock_irq(&log->io_list_lock);
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
+ if (!r5l_has_free_space(log, reserve)) {
+ r5l_add_no_space_stripe(log, sh);
+ wake_reclaim = true;
+ } else {
+ ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
+ if (ret) {
+ spin_lock_irq(&log->io_list_lock);
+ list_add_tail(&sh->log_list,
+ &log->no_mem_stripes);
+ spin_unlock_irq(&log->io_list_lock);
+ }
+ }
+ } else { /* R5C_JOURNAL_MODE_WRITE_BACK */
+ /*
+ * log space critical, do not process stripes that are
+ * not in cache yet (sh->log_start == MaxSector).
+ */
+ if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
+ sh->log_start == MaxSector) {
+ r5l_add_no_space_stripe(log, sh);
+ wake_reclaim = true;
+ reserve = 0;
+ } else if (!r5l_has_free_space(log, reserve)) {
+ if (sh->log_start == log->last_checkpoint)
+ BUG();
+ else
+ r5l_add_no_space_stripe(log, sh);
+ } else {
+ ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
+ if (ret) {
+ spin_lock_irq(&log->io_list_lock);
+ list_add_tail(&sh->log_list,
+ &log->no_mem_stripes);
+ spin_unlock_irq(&log->io_list_lock);
+ }
}
}
mutex_unlock(&log->io_mutex);
+ if (wake_reclaim)
+ r5l_wake_reclaim(log, reserve);
return 0;
}
@@ -525,17 +970,34 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
{
if (!log)
return -ENODEV;
- /*
- * we flush log disk cache first, then write stripe data to raid disks.
- * So if bio is finished, the log disk cache is flushed already. The
- * recovery guarantees we can recovery the bio from log disk, so we
- * don't need to flush again
- */
- if (bio->bi_iter.bi_size == 0) {
- bio_endio(bio);
- return 0;
+
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
+ /*
+ * in write through (journal only)
+ * we flush log disk cache first, then write stripe data to
+ * raid disks. So if bio is finished, the log disk cache is
+ * flushed already. The recovery guarantees we can recovery
+ * the bio from log disk, so we don't need to flush again
+ */
+ if (bio->bi_iter.bi_size == 0) {
+ bio_endio(bio);
+ return 0;
+ }
+ bio->bi_opf &= ~REQ_PREFLUSH;
+ } else {
+ /* write back (with cache) */
+ if (bio->bi_iter.bi_size == 0) {
+ mutex_lock(&log->io_mutex);
+ r5l_get_meta(log, 0);
+ bio_list_add(&log->current_io->flush_barriers, bio);
+ log->current_io->has_flush = 1;
+ log->current_io->has_null_flush = 1;
+ atomic_inc(&log->current_io->pending_stripe);
+ r5l_submit_current_io(log);
+ mutex_unlock(&log->io_mutex);
+ return 0;
+ }
}
- bio->bi_opf &= ~REQ_PREFLUSH;
return -EAGAIN;
}
@@ -555,10 +1017,40 @@ static void r5l_run_no_space_stripes(struct r5l_log *log)
spin_unlock(&log->no_space_stripes_lock);
}
+/*
+ * calculate new last_checkpoint
+ * for write through mode, returns log->next_checkpoint
+ * for write back, returns log_start of first sh in stripe_in_journal_list
+ */
+static sector_t r5c_calculate_new_cp(struct r5conf *conf)
+{
+ struct stripe_head *sh;
+ struct r5l_log *log = conf->log;
+ sector_t new_cp;
+ unsigned long flags;
+
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+ return log->next_checkpoint;
+
+ spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
+ if (list_empty(&conf->log->stripe_in_journal_list)) {
+ /* all stripes flushed */
+ spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+ return log->next_checkpoint;
+ }
+ sh = list_first_entry(&conf->log->stripe_in_journal_list,
+ struct stripe_head, r5c);
+ new_cp = sh->log_start;
+ spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+ return new_cp;
+}
+
static sector_t r5l_reclaimable_space(struct r5l_log *log)
{
+ struct r5conf *conf = log->rdev->mddev->private;
+
return r5l_ring_distance(log, log->last_checkpoint,
- log->next_checkpoint);
+ r5c_calculate_new_cp(conf));
}
static void r5l_run_no_mem_stripe(struct r5l_log *log)
@@ -589,7 +1081,6 @@ static bool r5l_complete_finished_ios(struct r5l_log *log)
break;
log->next_checkpoint = io->log_start;
- log->next_cp_seq = io->seq;
list_del(&io->log_sibling);
mempool_free(io, log->io_pool);
@@ -604,6 +1095,7 @@ static bool r5l_complete_finished_ios(struct r5l_log *log)
static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
{
struct r5l_log *log = io->log;
+ struct r5conf *conf = log->rdev->mddev->private;
unsigned long flags;
spin_lock_irqsave(&log->io_list_lock, flags);
@@ -614,7 +1106,8 @@ static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
return;
}
- if (r5l_reclaimable_space(log) > log->max_free_space)
+ if (r5l_reclaimable_space(log) > log->max_free_space ||
+ test_bit(R5C_LOG_TIGHT, &conf->cache_state))
r5l_wake_reclaim(log, 0);
spin_unlock_irqrestore(&log->io_list_lock, flags);
@@ -713,8 +1206,8 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
* there is a deadlock. We workaround this issue with a trylock.
* FIXME: we could miss discard if we can't take reconfig mutex
*/
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
if (!mddev_trylock(mddev))
return;
md_update_sb(mddev, 1);
@@ -735,15 +1228,148 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
}
}
+/*
+ * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
+ * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
+ *
+ * must hold conf->device_lock
+ */
+static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
+{
+ BUG_ON(list_empty(&sh->lru));
+ BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+ BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
+
+ /*
+ * The stripe is not ON_RELEASE_LIST, so it is safe to call
+ * raid5_release_stripe() while holding conf->device_lock
+ */
+ BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
+ assert_spin_locked(&conf->device_lock);
+
+ list_del_init(&sh->lru);
+ atomic_inc(&sh->count);
+
+ set_bit(STRIPE_HANDLE, &sh->state);
+ atomic_inc(&conf->active_stripes);
+ r5c_make_stripe_write_out(sh);
+
+ raid5_release_stripe(sh);
+}
+
+/*
+ * if num == 0, flush all full stripes
+ * if num > 0, flush all full stripes. If less than num full stripes are
+ * flushed, flush some partial stripes until totally num stripes are
+ * flushed or there is no more cached stripes.
+ */
+void r5c_flush_cache(struct r5conf *conf, int num)
+{
+ int count;
+ struct stripe_head *sh, *next;
+
+ assert_spin_locked(&conf->device_lock);
+ if (!conf->log)
+ return;
+
+ count = 0;
+ list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) {
+ r5c_flush_stripe(conf, sh);
+ count++;
+ }
+
+ if (count >= num)
+ return;
+ list_for_each_entry_safe(sh, next,
+ &conf->r5c_partial_stripe_list, lru) {
+ r5c_flush_stripe(conf, sh);
+ if (++count >= num)
+ break;
+ }
+}
+
+static void r5c_do_reclaim(struct r5conf *conf)
+{
+ struct r5l_log *log = conf->log;
+ struct stripe_head *sh;
+ int count = 0;
+ unsigned long flags;
+ int total_cached;
+ int stripes_to_flush;
+
+ if (!r5c_is_writeback(log))
+ return;
+
+ total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
+ atomic_read(&conf->r5c_cached_full_stripes);
+
+ if (total_cached > conf->min_nr_stripes * 3 / 4 ||
+ atomic_read(&conf->empty_inactive_list_nr) > 0)
+ /*
+ * if stripe cache pressure high, flush all full stripes and
+ * some partial stripes
+ */
+ stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP;
+ else if (total_cached > conf->min_nr_stripes * 1 / 2 ||
+ atomic_read(&conf->r5c_cached_full_stripes) >
+ R5C_FULL_STRIPE_FLUSH_BATCH)
+ /*
+ * if stripe cache pressure moderate, or if there is many full
+ * stripes,flush all full stripes
+ */
+ stripes_to_flush = 0;
+ else
+ /* no need to flush */
+ stripes_to_flush = -1;
+
+ if (stripes_to_flush >= 0) {
+ spin_lock_irqsave(&conf->device_lock, flags);
+ r5c_flush_cache(conf, stripes_to_flush);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ }
+
+ /* if log space is tight, flush stripes on stripe_in_journal_list */
+ if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) {
+ spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
+ spin_lock(&conf->device_lock);
+ list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
+ /*
+ * stripes on stripe_in_journal_list could be in any
+ * state of the stripe_cache state machine. In this
+ * case, we only want to flush stripe on
+ * r5c_cached_full/partial_stripes. The following
+ * condition makes sure the stripe is on one of the
+ * two lists.
+ */
+ if (!list_empty(&sh->lru) &&
+ !test_bit(STRIPE_HANDLE, &sh->state) &&
+ atomic_read(&sh->count) == 0) {
+ r5c_flush_stripe(conf, sh);
+ }
+ if (count++ >= R5C_RECLAIM_STRIPE_GROUP)
+ break;
+ }
+ spin_unlock(&conf->device_lock);
+ spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+ }
+
+ if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
+ r5l_run_no_space_stripes(log);
+
+ md_wakeup_thread(conf->mddev->thread);
+}
static void r5l_do_reclaim(struct r5l_log *log)
{
+ struct r5conf *conf = log->rdev->mddev->private;
sector_t reclaim_target = xchg(&log->reclaim_target, 0);
sector_t reclaimable;
sector_t next_checkpoint;
- u64 next_cp_seq;
+ bool write_super;
spin_lock_irq(&log->io_list_lock);
+ write_super = r5l_reclaimable_space(log) > log->max_free_space ||
+ reclaim_target != 0 || !list_empty(&log->no_space_stripes);
/*
* move proper io_unit to reclaim list. We should not change the order.
* reclaimable/unreclaimable io_unit can be mixed in the list, we
@@ -764,12 +1390,12 @@ static void r5l_do_reclaim(struct r5l_log *log)
log->io_list_lock);
}
- next_checkpoint = log->next_checkpoint;
- next_cp_seq = log->next_cp_seq;
+ next_checkpoint = r5c_calculate_new_cp(conf);
spin_unlock_irq(&log->io_list_lock);
BUG_ON(reclaimable < 0);
- if (reclaimable == 0)
+
+ if (reclaimable == 0 || !write_super)
return;
/*
@@ -781,7 +1407,7 @@ static void r5l_do_reclaim(struct r5l_log *log)
mutex_lock(&log->io_mutex);
log->last_checkpoint = next_checkpoint;
- log->last_cp_seq = next_cp_seq;
+ r5c_update_log_state(log);
mutex_unlock(&log->io_mutex);
r5l_run_no_space_stripes(log);
@@ -795,14 +1421,17 @@ static void r5l_reclaim_thread(struct md_thread *thread)
if (!log)
return;
+ r5c_do_reclaim(conf);
r5l_do_reclaim(log);
}
-static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
+void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
{
unsigned long target;
unsigned long new = (unsigned long)space; /* overflow in theory */
+ if (!log)
+ return;
do {
target = log->reclaim_target;
if (new < target)
@@ -816,22 +1445,14 @@ void r5l_quiesce(struct r5l_log *log, int state)
struct mddev *mddev;
if (!log || state == 2)
return;
- if (state == 0) {
- /*
- * This is a special case for hotadd. In suspend, the array has
- * no journal. In resume, journal is initialized as well as the
- * reclaim thread.
- */
- if (log->reclaim_thread)
- return;
- log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
- log->rdev->mddev, "reclaim");
- } else if (state == 1) {
+ if (state == 0)
+ kthread_unpark(log->reclaim_thread->tsk);
+ else if (state == 1) {
/* make sure r5l_write_super_and_discard_space exits */
mddev = log->rdev->mddev;
wake_up(&mddev->sb_wait);
- r5l_wake_reclaim(log, -1L);
- md_unregister_thread(&log->reclaim_thread);
+ kthread_park(log->reclaim_thread->tsk);
+ r5l_wake_reclaim(log, MaxSector);
r5l_do_reclaim(log);
}
}
@@ -857,10 +1478,13 @@ struct r5l_recovery_ctx {
sector_t meta_total_blocks; /* total size of current meta and data */
sector_t pos; /* recovery position */
u64 seq; /* recovery position seq */
+ int data_parity_stripes; /* number of data_parity stripes */
+ int data_only_stripes; /* number of data_only stripes */
+ struct list_head cached_list;
};
-static int r5l_read_meta_block(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx)
+static int r5l_recovery_read_meta_block(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
{
struct page *page = ctx->meta_page;
struct r5l_meta_block *mb;
@@ -892,170 +1516,618 @@ static int r5l_read_meta_block(struct r5l_log *log,
return 0;
}
-static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx,
- sector_t stripe_sect,
- int *offset, sector_t *log_offset)
+static void
+r5l_recovery_create_empty_meta_block(struct r5l_log *log,
+ struct page *page,
+ sector_t pos, u64 seq)
{
- struct r5conf *conf = log->rdev->mddev->private;
- struct stripe_head *sh;
- struct r5l_payload_data_parity *payload;
- int disk_index;
+ struct r5l_meta_block *mb;
- sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
- while (1) {
- payload = page_address(ctx->meta_page) + *offset;
+ mb = page_address(page);
+ clear_page(mb);
+ mb->magic = cpu_to_le32(R5LOG_MAGIC);
+ mb->version = R5LOG_VERSION;
+ mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
+ mb->seq = cpu_to_le64(seq);
+ mb->position = cpu_to_le64(pos);
+}
- if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
- raid5_compute_sector(conf,
- le64_to_cpu(payload->location), 0,
- &disk_index, sh);
+static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
+ u64 seq)
+{
+ struct page *page;
+ struct r5l_meta_block *mb;
- sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
- sh->dev[disk_index].page, REQ_OP_READ, 0,
- false);
- sh->dev[disk_index].log_checksum =
- le32_to_cpu(payload->checksum[0]);
- set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
- ctx->meta_total_blocks += BLOCK_SECTORS;
- } else {
- disk_index = sh->pd_idx;
- sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
- sh->dev[disk_index].page, REQ_OP_READ, 0,
- false);
- sh->dev[disk_index].log_checksum =
- le32_to_cpu(payload->checksum[0]);
- set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
-
- if (sh->qd_idx >= 0) {
- disk_index = sh->qd_idx;
- sync_page_io(log->rdev,
- r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
- PAGE_SIZE, sh->dev[disk_index].page,
- REQ_OP_READ, 0, false);
- sh->dev[disk_index].log_checksum =
- le32_to_cpu(payload->checksum[1]);
- set_bit(R5_Wantwrite,
- &sh->dev[disk_index].flags);
- }
- ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
- }
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+ r5l_recovery_create_empty_meta_block(log, page, pos, seq);
+ mb = page_address(page);
+ mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
+ mb, PAGE_SIZE));
+ if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
+ REQ_FUA, false)) {
+ __free_page(page);
+ return -EIO;
+ }
+ __free_page(page);
+ return 0;
+}
- *log_offset = r5l_ring_add(log, *log_offset,
- le32_to_cpu(payload->size));
- *offset += sizeof(struct r5l_payload_data_parity) +
- sizeof(__le32) *
- (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
- if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
- break;
+/*
+ * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
+ * to mark valid (potentially not flushed) data in the journal.
+ *
+ * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
+ * so there should not be any mismatch here.
+ */
+static void r5l_recovery_load_data(struct r5l_log *log,
+ struct stripe_head *sh,
+ struct r5l_recovery_ctx *ctx,
+ struct r5l_payload_data_parity *payload,
+ sector_t log_offset)
+{
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+ int dd_idx;
+
+ raid5_compute_sector(conf,
+ le64_to_cpu(payload->location), 0,
+ &dd_idx, sh);
+ sync_page_io(log->rdev, log_offset, PAGE_SIZE,
+ sh->dev[dd_idx].page, REQ_OP_READ, 0, false);
+ sh->dev[dd_idx].log_checksum =
+ le32_to_cpu(payload->checksum[0]);
+ ctx->meta_total_blocks += BLOCK_SECTORS;
+
+ set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags);
+ set_bit(STRIPE_R5C_CACHING, &sh->state);
+}
+
+static void r5l_recovery_load_parity(struct r5l_log *log,
+ struct stripe_head *sh,
+ struct r5l_recovery_ctx *ctx,
+ struct r5l_payload_data_parity *payload,
+ sector_t log_offset)
+{
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+
+ ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
+ sync_page_io(log->rdev, log_offset, PAGE_SIZE,
+ sh->dev[sh->pd_idx].page, REQ_OP_READ, 0, false);
+ sh->dev[sh->pd_idx].log_checksum =
+ le32_to_cpu(payload->checksum[0]);
+ set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
+
+ if (sh->qd_idx >= 0) {
+ sync_page_io(log->rdev,
+ r5l_ring_add(log, log_offset, BLOCK_SECTORS),
+ PAGE_SIZE, sh->dev[sh->qd_idx].page,
+ REQ_OP_READ, 0, false);
+ sh->dev[sh->qd_idx].log_checksum =
+ le32_to_cpu(payload->checksum[1]);
+ set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
}
+ clear_bit(STRIPE_R5C_CACHING, &sh->state);
+}
- for (disk_index = 0; disk_index < sh->disks; disk_index++) {
- void *addr;
- u32 checksum;
+static void r5l_recovery_reset_stripe(struct stripe_head *sh)
+{
+ int i;
+ sh->state = 0;
+ sh->log_start = MaxSector;
+ for (i = sh->disks; i--; )
+ sh->dev[i].flags = 0;
+}
+
+static void
+r5l_recovery_replay_one_stripe(struct r5conf *conf,
+ struct stripe_head *sh,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct md_rdev *rdev, *rrdev;
+ int disk_index;
+ int data_count = 0;
+
+ for (disk_index = 0; disk_index < sh->disks; disk_index++) {
if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
continue;
- addr = kmap_atomic(sh->dev[disk_index].page);
- checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
- kunmap_atomic(addr);
- if (checksum != sh->dev[disk_index].log_checksum)
- goto error;
+ if (disk_index == sh->qd_idx || disk_index == sh->pd_idx)
+ continue;
+ data_count++;
}
- for (disk_index = 0; disk_index < sh->disks; disk_index++) {
- struct md_rdev *rdev, *rrdev;
+ /*
+ * stripes that only have parity must have been flushed
+ * before the crash that we are now recovering from, so
+ * there is nothing more to recovery.
+ */
+ if (data_count == 0)
+ goto out;
- if (!test_and_clear_bit(R5_Wantwrite,
- &sh->dev[disk_index].flags))
+ for (disk_index = 0; disk_index < sh->disks; disk_index++) {
+ if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
continue;
/* in case device is broken */
+ rcu_read_lock();
rdev = rcu_dereference(conf->disks[disk_index].rdev);
- if (rdev)
- sync_page_io(rdev, stripe_sect, PAGE_SIZE,
+ if (rdev) {
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ sync_page_io(rdev, sh->sector, PAGE_SIZE,
sh->dev[disk_index].page, REQ_OP_WRITE, 0,
false);
+ rdev_dec_pending(rdev, rdev->mddev);
+ rcu_read_lock();
+ }
rrdev = rcu_dereference(conf->disks[disk_index].replacement);
- if (rrdev)
- sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
+ if (rrdev) {
+ atomic_inc(&rrdev->nr_pending);
+ rcu_read_unlock();
+ sync_page_io(rrdev, sh->sector, PAGE_SIZE,
sh->dev[disk_index].page, REQ_OP_WRITE, 0,
false);
+ rdev_dec_pending(rrdev, rrdev->mddev);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
}
- raid5_release_stripe(sh);
+ ctx->data_parity_stripes++;
+out:
+ r5l_recovery_reset_stripe(sh);
+}
+
+static struct stripe_head *
+r5c_recovery_alloc_stripe(struct r5conf *conf,
+ sector_t stripe_sect,
+ sector_t log_start)
+{
+ struct stripe_head *sh;
+
+ sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
+ if (!sh)
+ return NULL; /* no more stripe available */
+
+ r5l_recovery_reset_stripe(sh);
+ sh->log_start = log_start;
+
+ return sh;
+}
+
+static struct stripe_head *
+r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect)
+{
+ struct stripe_head *sh;
+
+ list_for_each_entry(sh, list, lru)
+ if (sh->sector == sect)
+ return sh;
+ return NULL;
+}
+
+static void
+r5c_recovery_drop_stripes(struct list_head *cached_stripe_list,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct stripe_head *sh, *next;
+
+ list_for_each_entry_safe(sh, next, cached_stripe_list, lru) {
+ r5l_recovery_reset_stripe(sh);
+ list_del_init(&sh->lru);
+ raid5_release_stripe(sh);
+ }
+}
+
+static void
+r5c_recovery_replay_stripes(struct list_head *cached_stripe_list,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct stripe_head *sh, *next;
+
+ list_for_each_entry_safe(sh, next, cached_stripe_list, lru)
+ if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+ r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx);
+ list_del_init(&sh->lru);
+ raid5_release_stripe(sh);
+ }
+}
+
+/* if matches return 0; otherwise return -EINVAL */
+static int
+r5l_recovery_verify_data_checksum(struct r5l_log *log, struct page *page,
+ sector_t log_offset, __le32 log_checksum)
+{
+ void *addr;
+ u32 checksum;
+
+ sync_page_io(log->rdev, log_offset, PAGE_SIZE,
+ page, REQ_OP_READ, 0, false);
+ addr = kmap_atomic(page);
+ checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
+ kunmap_atomic(addr);
+ return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
+}
+
+/*
+ * before loading data to stripe cache, we need verify checksum for all data,
+ * if there is mismatch for any data page, we drop all data in the mata block
+ */
+static int
+r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+ struct r5l_meta_block *mb = page_address(ctx->meta_page);
+ sector_t mb_offset = sizeof(struct r5l_meta_block);
+ sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
+ struct page *page;
+ struct r5l_payload_data_parity *payload;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ while (mb_offset < le32_to_cpu(mb->meta_size)) {
+ payload = (void *)mb + mb_offset;
+
+ if (payload->header.type == R5LOG_PAYLOAD_DATA) {
+ if (r5l_recovery_verify_data_checksum(
+ log, page, log_offset,
+ payload->checksum[0]) < 0)
+ goto mismatch;
+ } else if (payload->header.type == R5LOG_PAYLOAD_PARITY) {
+ if (r5l_recovery_verify_data_checksum(
+ log, page, log_offset,
+ payload->checksum[0]) < 0)
+ goto mismatch;
+ if (conf->max_degraded == 2 && /* q for RAID 6 */
+ r5l_recovery_verify_data_checksum(
+ log, page,
+ r5l_ring_add(log, log_offset,
+ BLOCK_SECTORS),
+ payload->checksum[1]) < 0)
+ goto mismatch;
+ } else /* not R5LOG_PAYLOAD_DATA or R5LOG_PAYLOAD_PARITY */
+ goto mismatch;
+
+ log_offset = r5l_ring_add(log, log_offset,
+ le32_to_cpu(payload->size));
+
+ mb_offset += sizeof(struct r5l_payload_data_parity) +
+ sizeof(__le32) *
+ (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
+ }
+
+ put_page(page);
return 0;
-error:
- for (disk_index = 0; disk_index < sh->disks; disk_index++)
- sh->dev[disk_index].flags = 0;
- raid5_release_stripe(sh);
+mismatch:
+ put_page(page);
return -EINVAL;
}
-static int r5l_recovery_flush_one_meta(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx)
+/*
+ * Analyze all data/parity pages in one meta block
+ * Returns:
+ * 0 for success
+ * -EINVAL for unknown playload type
+ * -EAGAIN for checksum mismatch of data page
+ * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
+ */
+static int
+r5c_recovery_analyze_meta_block(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx,
+ struct list_head *cached_stripe_list)
{
- struct r5conf *conf = log->rdev->mddev->private;
- struct r5l_payload_data_parity *payload;
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
struct r5l_meta_block *mb;
- int offset;
+ struct r5l_payload_data_parity *payload;
+ int mb_offset;
sector_t log_offset;
- sector_t stripe_sector;
+ sector_t stripe_sect;
+ struct stripe_head *sh;
+ int ret;
+
+ /*
+ * for mismatch in data blocks, we will drop all data in this mb, but
+ * we will still read next mb for other data with FLUSH flag, as
+ * io_unit could finish out of order.
+ */
+ ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
+ if (ret == -EINVAL)
+ return -EAGAIN;
+ else if (ret)
+ return ret; /* -ENOMEM duo to alloc_page() failed */
mb = page_address(ctx->meta_page);
- offset = sizeof(struct r5l_meta_block);
+ mb_offset = sizeof(struct r5l_meta_block);
log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
- while (offset < le32_to_cpu(mb->meta_size)) {
+ while (mb_offset < le32_to_cpu(mb->meta_size)) {
int dd;
- payload = (void *)mb + offset;
- stripe_sector = raid5_compute_sector(conf,
- le64_to_cpu(payload->location), 0, &dd, NULL);
- if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
- &offset, &log_offset))
+ payload = (void *)mb + mb_offset;
+ stripe_sect = (payload->header.type == R5LOG_PAYLOAD_DATA) ?
+ raid5_compute_sector(
+ conf, le64_to_cpu(payload->location), 0, &dd,
+ NULL)
+ : le64_to_cpu(payload->location);
+
+ sh = r5c_recovery_lookup_stripe(cached_stripe_list,
+ stripe_sect);
+
+ if (!sh) {
+ sh = r5c_recovery_alloc_stripe(conf, stripe_sect, ctx->pos);
+ /*
+ * cannot get stripe from raid5_get_active_stripe
+ * try replay some stripes
+ */
+ if (!sh) {
+ r5c_recovery_replay_stripes(
+ cached_stripe_list, ctx);
+ sh = r5c_recovery_alloc_stripe(
+ conf, stripe_sect, ctx->pos);
+ }
+ if (!sh) {
+ pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
+ mdname(mddev),
+ conf->min_nr_stripes * 2);
+ raid5_set_cache_size(mddev,
+ conf->min_nr_stripes * 2);
+ sh = r5c_recovery_alloc_stripe(
+ conf, stripe_sect, ctx->pos);
+ }
+ if (!sh) {
+ pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
+ mdname(mddev));
+ return -ENOMEM;
+ }
+ list_add_tail(&sh->lru, cached_stripe_list);
+ }
+
+ if (payload->header.type == R5LOG_PAYLOAD_DATA) {
+ if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
+ test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
+ r5l_recovery_replay_one_stripe(conf, sh, ctx);
+ sh->log_start = ctx->pos;
+ list_move_tail(&sh->lru, cached_stripe_list);
+ }
+ r5l_recovery_load_data(log, sh, ctx, payload,
+ log_offset);
+ } else if (payload->header.type == R5LOG_PAYLOAD_PARITY)
+ r5l_recovery_load_parity(log, sh, ctx, payload,
+ log_offset);
+ else
return -EINVAL;
+
+ log_offset = r5l_ring_add(log, log_offset,
+ le32_to_cpu(payload->size));
+
+ mb_offset += sizeof(struct r5l_payload_data_parity) +
+ sizeof(__le32) *
+ (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
}
+
return 0;
}
-/* copy data/parity from log to raid disks */
-static void r5l_recovery_flush_log(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx)
+/*
+ * Load the stripe into cache. The stripe will be written out later by
+ * the stripe cache state machine.
+ */
+static void r5c_recovery_load_one_stripe(struct r5l_log *log,
+ struct stripe_head *sh)
{
+ struct r5dev *dev;
+ int i;
+
+ for (i = sh->disks; i--; ) {
+ dev = sh->dev + i;
+ if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) {
+ set_bit(R5_InJournal, &dev->flags);
+ set_bit(R5_UPTODATE, &dev->flags);
+ }
+ }
+ list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
+ atomic_inc(&log->stripe_in_journal_count);
+}
+
+/*
+ * Scan through the log for all to-be-flushed data
+ *
+ * For stripes with data and parity, namely Data-Parity stripe
+ * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
+ *
+ * For stripes with only data, namely Data-Only stripe
+ * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
+ *
+ * For a stripe, if we see data after parity, we should discard all previous
+ * data and parity for this stripe, as these data are already flushed to
+ * the array.
+ *
+ * At the end of the scan, we return the new journal_tail, which points to
+ * first data-only stripe on the journal device, or next invalid meta block.
+ */
+static int r5c_recovery_flush_log(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct stripe_head *sh;
+ int ret = 0;
+
+ /* scan through the log */
while (1) {
- if (r5l_read_meta_block(log, ctx))
- return;
- if (r5l_recovery_flush_one_meta(log, ctx))
- return;
+ if (r5l_recovery_read_meta_block(log, ctx))
+ break;
+
+ ret = r5c_recovery_analyze_meta_block(log, ctx,
+ &ctx->cached_list);
+ /*
+ * -EAGAIN means mismatch in data block, in this case, we still
+ * try scan the next metablock
+ */
+ if (ret && ret != -EAGAIN)
+ break; /* ret == -EINVAL or -ENOMEM */
ctx->seq++;
ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
}
+
+ if (ret == -ENOMEM) {
+ r5c_recovery_drop_stripes(&ctx->cached_list, ctx);
+ return ret;
+ }
+
+ /* replay data-parity stripes */
+ r5c_recovery_replay_stripes(&ctx->cached_list, ctx);
+
+ /* load data-only stripes to stripe cache */
+ list_for_each_entry(sh, &ctx->cached_list, lru) {
+ WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+ r5c_recovery_load_one_stripe(log, sh);
+ ctx->data_only_stripes++;
+ }
+
+ return 0;
}
-static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
- u64 seq)
+/*
+ * we did a recovery. Now ctx.pos points to an invalid meta block. New
+ * log will start here. but we can't let superblock point to last valid
+ * meta block. The log might looks like:
+ * | meta 1| meta 2| meta 3|
+ * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
+ * superblock points to meta 1, we write a new valid meta 2n. if crash
+ * happens again, new recovery will start from meta 1. Since meta 2n is
+ * valid now, recovery will think meta 3 is valid, which is wrong.
+ * The solution is we create a new meta in meta2 with its seq == meta
+ * 1's seq + 10000 and let superblock points to meta2. The same recovery
+ * will not think meta 3 is a valid meta, because its seq doesn't match
+ */
+
+/*
+ * Before recovery, the log looks like the following
+ *
+ * ---------------------------------------------
+ * | valid log | invalid log |
+ * ---------------------------------------------
+ * ^
+ * |- log->last_checkpoint
+ * |- log->last_cp_seq
+ *
+ * Now we scan through the log until we see invalid entry
+ *
+ * ---------------------------------------------
+ * | valid log | invalid log |
+ * ---------------------------------------------
+ * ^ ^
+ * |- log->last_checkpoint |- ctx->pos
+ * |- log->last_cp_seq |- ctx->seq
+ *
+ * From this point, we need to increase seq number by 10 to avoid
+ * confusing next recovery.
+ *
+ * ---------------------------------------------
+ * | valid log | invalid log |
+ * ---------------------------------------------
+ * ^ ^
+ * |- log->last_checkpoint |- ctx->pos+1
+ * |- log->last_cp_seq |- ctx->seq+10001
+ *
+ * However, it is not safe to start the state machine yet, because data only
+ * parities are not yet secured in RAID. To save these data only parities, we
+ * rewrite them from seq+11.
+ *
+ * -----------------------------------------------------------------
+ * | valid log | data only stripes | invalid log |
+ * -----------------------------------------------------------------
+ * ^ ^
+ * |- log->last_checkpoint |- ctx->pos+n
+ * |- log->last_cp_seq |- ctx->seq+10000+n
+ *
+ * If failure happens again during this process, the recovery can safe start
+ * again from log->last_checkpoint.
+ *
+ * Once data only stripes are rewritten to journal, we move log_tail
+ *
+ * -----------------------------------------------------------------
+ * | old log | data only stripes | invalid log |
+ * -----------------------------------------------------------------
+ * ^ ^
+ * |- log->last_checkpoint |- ctx->pos+n
+ * |- log->last_cp_seq |- ctx->seq+10000+n
+ *
+ * Then we can safely start the state machine. If failure happens from this
+ * point on, the recovery will start from new log->last_checkpoint.
+ */
+static int
+r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
{
+ struct stripe_head *sh, *next;
+ struct mddev *mddev = log->rdev->mddev;
struct page *page;
- struct r5l_meta_block *mb;
- u32 crc;
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page)
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
+ mdname(mddev));
return -ENOMEM;
- mb = page_address(page);
- mb->magic = cpu_to_le32(R5LOG_MAGIC);
- mb->version = R5LOG_VERSION;
- mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
- mb->seq = cpu_to_le64(seq);
- mb->position = cpu_to_le64(pos);
- crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
- mb->checksum = cpu_to_le32(crc);
+ }
- if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
- REQ_FUA, false)) {
- __free_page(page);
- return -EIO;
+ list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
+ struct r5l_meta_block *mb;
+ int i;
+ int offset;
+ sector_t write_pos;
+
+ WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+ r5l_recovery_create_empty_meta_block(log, page,
+ ctx->pos, ctx->seq);
+ mb = page_address(page);
+ offset = le32_to_cpu(mb->meta_size);
+ write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
+
+ for (i = sh->disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ struct r5l_payload_data_parity *payload;
+ void *addr;
+
+ if (test_bit(R5_InJournal, &dev->flags)) {
+ payload = (void *)mb + offset;
+ payload->header.type = cpu_to_le16(
+ R5LOG_PAYLOAD_DATA);
+ payload->size = BLOCK_SECTORS;
+ payload->location = cpu_to_le64(
+ raid5_compute_blocknr(sh, i, 0));
+ addr = kmap_atomic(dev->page);
+ payload->checksum[0] = cpu_to_le32(
+ crc32c_le(log->uuid_checksum, addr,
+ PAGE_SIZE));
+ kunmap_atomic(addr);
+ sync_page_io(log->rdev, write_pos, PAGE_SIZE,
+ dev->page, REQ_OP_WRITE, 0, false);
+ write_pos = r5l_ring_add(log, write_pos,
+ BLOCK_SECTORS);
+ offset += sizeof(__le32) +
+ sizeof(struct r5l_payload_data_parity);
+
+ }
+ }
+ mb->meta_size = cpu_to_le32(offset);
+ mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
+ mb, PAGE_SIZE));
+ sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
+ REQ_OP_WRITE, REQ_FUA, false);
+ sh->log_start = ctx->pos;
+ ctx->pos = write_pos;
+ ctx->seq += 1;
+
+ list_del_init(&sh->lru);
+ raid5_release_stripe(sh);
}
__free_page(page);
return 0;
@@ -1063,45 +2135,60 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
static int r5l_recovery_log(struct r5l_log *log)
{
+ struct mddev *mddev = log->rdev->mddev;
struct r5l_recovery_ctx ctx;
+ int ret;
+ sector_t pos;
+ struct stripe_head *sh;
ctx.pos = log->last_checkpoint;
ctx.seq = log->last_cp_seq;
ctx.meta_page = alloc_page(GFP_KERNEL);
+ ctx.data_only_stripes = 0;
+ ctx.data_parity_stripes = 0;
+ INIT_LIST_HEAD(&ctx.cached_list);
+
if (!ctx.meta_page)
return -ENOMEM;
- r5l_recovery_flush_log(log, &ctx);
+ ret = r5c_recovery_flush_log(log, &ctx);
__free_page(ctx.meta_page);
- /*
- * we did a recovery. Now ctx.pos points to an invalid meta block. New
- * log will start here. but we can't let superblock point to last valid
- * meta block. The log might looks like:
- * | meta 1| meta 2| meta 3|
- * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
- * superblock points to meta 1, we write a new valid meta 2n. if crash
- * happens again, new recovery will start from meta 1. Since meta 2n is
- * valid now, recovery will think meta 3 is valid, which is wrong.
- * The solution is we create a new meta in meta2 with its seq == meta
- * 1's seq + 10 and let superblock points to meta2. The same recovery will
- * not think meta 3 is a valid meta, because its seq doesn't match
- */
- if (ctx.seq > log->last_cp_seq) {
- int ret;
-
- ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
- if (ret)
- return ret;
- log->seq = ctx.seq + 11;
- log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
- r5l_write_super(log, ctx.pos);
- log->last_checkpoint = ctx.pos;
+ if (ret)
+ return ret;
+
+ pos = ctx.pos;
+ ctx.seq += 10000;
+
+ if (ctx.data_only_stripes == 0) {
log->next_checkpoint = ctx.pos;
+ r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
+ ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
} else {
- log->log_start = ctx.pos;
- log->seq = ctx.seq;
+ sh = list_last_entry(&ctx.cached_list, struct stripe_head, lru);
+ log->next_checkpoint = sh->log_start;
}
+
+ if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
+ pr_debug("md/raid:%s: starting from clean shutdown\n",
+ mdname(mddev));
+ else {
+ pr_debug("md/raid:%s: recoverying %d data-only stripes and %d data-parity stripes\n",
+ mdname(mddev), ctx.data_only_stripes,
+ ctx.data_parity_stripes);
+
+ if (ctx.data_only_stripes > 0)
+ if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
+ pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
+ mdname(mddev));
+ return -EIO;
+ }
+ }
+
+ log->log_start = ctx.pos;
+ log->seq = ctx.seq;
+ log->last_checkpoint = pos;
+ r5l_write_super(log, pos);
return 0;
}
@@ -1110,7 +2197,293 @@ static void r5l_write_super(struct r5l_log *log, sector_t cp)
struct mddev *mddev = log->rdev->mddev;
log->rdev->journal_tail = cp;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+}
+
+static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
+{
+ struct r5conf *conf = mddev->private;
+ int ret;
+
+ if (!conf->log)
+ return 0;
+
+ switch (conf->log->r5c_journal_mode) {
+ case R5C_JOURNAL_MODE_WRITE_THROUGH:
+ ret = snprintf(
+ page, PAGE_SIZE, "[%s] %s\n",
+ r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
+ r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
+ break;
+ case R5C_JOURNAL_MODE_WRITE_BACK:
+ ret = snprintf(
+ page, PAGE_SIZE, "%s [%s]\n",
+ r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
+ r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
+ break;
+ default:
+ ret = 0;
+ }
+ return ret;
+}
+
+static ssize_t r5c_journal_mode_store(struct mddev *mddev,
+ const char *page, size_t length)
+{
+ struct r5conf *conf = mddev->private;
+ struct r5l_log *log = conf->log;
+ int val = -1, i;
+ int len = length;
+
+ if (!log)
+ return -ENODEV;
+
+ if (len && page[len - 1] == '\n')
+ len -= 1;
+ for (i = 0; i < ARRAY_SIZE(r5c_journal_mode_str); i++)
+ if (strlen(r5c_journal_mode_str[i]) == len &&
+ strncmp(page, r5c_journal_mode_str[i], len) == 0) {
+ val = i;
+ break;
+ }
+ if (val < R5C_JOURNAL_MODE_WRITE_THROUGH ||
+ val > R5C_JOURNAL_MODE_WRITE_BACK)
+ return -EINVAL;
+
+ mddev_suspend(mddev);
+ conf->log->r5c_journal_mode = val;
+ mddev_resume(mddev);
+
+ pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
+ mdname(mddev), val, r5c_journal_mode_str[val]);
+ return length;
+}
+
+struct md_sysfs_entry
+r5c_journal_mode = __ATTR(journal_mode, 0644,
+ r5c_journal_mode_show, r5c_journal_mode_store);
+
+/*
+ * Try handle write operation in caching phase. This function should only
+ * be called in write-back mode.
+ *
+ * If all outstanding writes can be handled in caching phase, returns 0
+ * If writes requires write-out phase, call r5c_make_stripe_write_out()
+ * and returns -EAGAIN
+ */
+int r5c_try_caching_write(struct r5conf *conf,
+ struct stripe_head *sh,
+ struct stripe_head_state *s,
+ int disks)
+{
+ struct r5l_log *log = conf->log;
+ int i;
+ struct r5dev *dev;
+ int to_cache = 0;
+
+ BUG_ON(!r5c_is_writeback(log));
+
+ if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+ /*
+ * There are two different scenarios here:
+ * 1. The stripe has some data cached, and it is sent to
+ * write-out phase for reclaim
+ * 2. The stripe is clean, and this is the first write
+ *
+ * For 1, return -EAGAIN, so we continue with
+ * handle_stripe_dirtying().
+ *
+ * For 2, set STRIPE_R5C_CACHING and continue with caching
+ * write.
+ */
+
+ /* case 1: anything injournal or anything in written */
+ if (s->injournal > 0 || s->written > 0)
+ return -EAGAIN;
+ /* case 2 */
+ set_bit(STRIPE_R5C_CACHING, &sh->state);
+ }
+
+ for (i = disks; i--; ) {
+ dev = &sh->dev[i];
+ /* if non-overwrite, use writing-out phase */
+ if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) &&
+ !test_bit(R5_InJournal, &dev->flags)) {
+ r5c_make_stripe_write_out(sh);
+ return -EAGAIN;
+ }
+ }
+
+ for (i = disks; i--; ) {
+ dev = &sh->dev[i];
+ if (dev->towrite) {
+ set_bit(R5_Wantwrite, &dev->flags);
+ set_bit(R5_Wantdrain, &dev->flags);
+ set_bit(R5_LOCKED, &dev->flags);
+ to_cache++;
+ }
+ }
+
+ if (to_cache) {
+ set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
+ /*
+ * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
+ * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
+ * r5c_handle_data_cached()
+ */
+ set_bit(STRIPE_LOG_TRAPPED, &sh->state);
+ }
+
+ return 0;
+}
+
+/*
+ * free extra pages (orig_page) we allocated for prexor
+ */
+void r5c_release_extra_page(struct stripe_head *sh)
+{
+ struct r5conf *conf = sh->raid_conf;
+ int i;
+ bool using_disk_info_extra_page;
+
+ using_disk_info_extra_page =
+ sh->dev[0].orig_page == conf->disks[0].extra_page;
+
+ for (i = sh->disks; i--; )
+ if (sh->dev[i].page != sh->dev[i].orig_page) {
+ struct page *p = sh->dev[i].orig_page;
+
+ sh->dev[i].orig_page = sh->dev[i].page;
+ if (!using_disk_info_extra_page)
+ put_page(p);
+ }
+
+ if (using_disk_info_extra_page) {
+ clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state);
+ md_wakeup_thread(conf->mddev->thread);
+ }
+}
+
+void r5c_use_extra_page(struct stripe_head *sh)
+{
+ struct r5conf *conf = sh->raid_conf;
+ int i;
+ struct r5dev *dev;
+
+ for (i = sh->disks; i--; ) {
+ dev = &sh->dev[i];
+ if (dev->orig_page != dev->page)
+ put_page(dev->orig_page);
+ dev->orig_page = conf->disks[i].extra_page;
+ }
+}
+
+/*
+ * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
+ * stripe is committed to RAID disks.
+ */
+void r5c_finish_stripe_write_out(struct r5conf *conf,
+ struct stripe_head *sh,
+ struct stripe_head_state *s)
+{
+ int i;
+ int do_wakeup = 0;
+
+ if (!conf->log ||
+ !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
+ return;
+
+ WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
+ clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
+
+ if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+ return;
+
+ for (i = sh->disks; i--; ) {
+ clear_bit(R5_InJournal, &sh->dev[i].flags);
+ if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+ do_wakeup = 1;
+ }
+
+ /*
+ * analyse_stripe() runs before r5c_finish_stripe_write_out(),
+ * We updated R5_InJournal, so we also update s->injournal.
+ */
+ s->injournal = 0;
+
+ if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
+ if (atomic_dec_and_test(&conf->pending_full_writes))
+ md_wakeup_thread(conf->mddev->thread);
+
+ if (do_wakeup)
+ wake_up(&conf->wait_for_overlap);
+
+ if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+ return;
+
+ spin_lock_irq(&conf->log->stripe_in_journal_lock);
+ list_del_init(&sh->r5c);
+ spin_unlock_irq(&conf->log->stripe_in_journal_lock);
+ sh->log_start = MaxSector;
+ atomic_dec(&conf->log->stripe_in_journal_count);
+ r5c_update_log_state(conf->log);
+}
+
+int
+r5c_cache_data(struct r5l_log *log, struct stripe_head *sh,
+ struct stripe_head_state *s)
+{
+ struct r5conf *conf = sh->raid_conf;
+ int pages = 0;
+ int reserve;
+ int i;
+ int ret = 0;
+
+ BUG_ON(!log);
+
+ for (i = 0; i < sh->disks; i++) {
+ void *addr;
+
+ if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+ continue;
+ addr = kmap_atomic(sh->dev[i].page);
+ sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
+ addr, PAGE_SIZE);
+ kunmap_atomic(addr);
+ pages++;
+ }
+ WARN_ON(pages == 0);
+
+ /*
+ * The stripe must enter state machine again to call endio, so
+ * don't delay.
+ */
+ clear_bit(STRIPE_DELAYED, &sh->state);
+ atomic_inc(&sh->count);
+
+ mutex_lock(&log->io_mutex);
+ /* meta + data */
+ reserve = (1 + pages) << (PAGE_SHIFT - 9);
+
+ if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
+ sh->log_start == MaxSector)
+ r5l_add_no_space_stripe(log, sh);
+ else if (!r5l_has_free_space(log, reserve)) {
+ if (sh->log_start == log->last_checkpoint)
+ BUG();
+ else
+ r5l_add_no_space_stripe(log, sh);
+ } else {
+ ret = r5l_log_stripe(log, sh, pages, 0);
+ if (ret) {
+ spin_lock_irq(&log->io_list_lock);
+ list_add_tail(&sh->log_list, &log->no_mem_stripes);
+ spin_unlock_irq(&log->io_list_lock);
+ }
+ }
+
+ mutex_unlock(&log->io_mutex);
+ return 0;
}
static int r5l_load_log(struct r5l_log *log)
@@ -1121,7 +2494,7 @@ static int r5l_load_log(struct r5l_log *log)
sector_t cp = log->rdev->journal_tail;
u32 stored_crc, expected_crc;
bool create_super = false;
- int ret;
+ int ret = 0;
/* Make sure it's valid */
if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
@@ -1171,11 +2544,18 @@ create:
if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
log->max_free_space = RECLAIM_MAX_FREE_SPACE;
log->last_checkpoint = cp;
- log->next_checkpoint = cp;
__free_page(page);
- return r5l_recovery_log(log);
+ if (create_super) {
+ log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
+ log->seq = log->last_cp_seq + 1;
+ log->next_checkpoint = cp;
+ } else
+ ret = r5l_recovery_log(log);
+
+ r5c_update_log_state(log);
+ return ret;
ioerr:
__free_page(page);
return ret;
@@ -1188,6 +2568,22 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
if (PAGE_SIZE != 4096)
return -EINVAL;
+
+ /*
+ * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
+ * raid_disks r5l_payload_data_parity.
+ *
+ * Write journal and cache does not work for very big array
+ * (raid_disks > 203)
+ */
+ if (sizeof(struct r5l_meta_block) +
+ ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) *
+ conf->raid_disks) > PAGE_SIZE) {
+ pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
+ mdname(conf->mddev), conf->raid_disks);
+ return -EINVAL;
+ }
+
log = kzalloc(sizeof(*log), GFP_KERNEL);
if (!log)
return -ENOMEM;
@@ -1227,6 +2623,8 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
log->rdev->mddev, "reclaim");
if (!log->reclaim_thread)
goto reclaim_thread;
+ log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
+
init_waitqueue_head(&log->iounit_wait);
INIT_LIST_HEAD(&log->no_mem_stripes);
@@ -1234,6 +2632,13 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
INIT_LIST_HEAD(&log->no_space_stripes);
spin_lock_init(&log->no_space_stripes_lock);
+ INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
+
+ log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
+ INIT_LIST_HEAD(&log->stripe_in_journal_list);
+ spin_lock_init(&log->stripe_in_journal_lock);
+ atomic_set(&log->stripe_in_journal_count, 0);
+
if (r5l_load_log(log))
goto error;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5f9e28443c8a..06d7279bdd04 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -70,19 +70,6 @@ module_param(devices_handle_discard_safely, bool, 0644);
MODULE_PARM_DESC(devices_handle_discard_safely,
"Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
static struct workqueue_struct *raid5_wq;
-/*
- * Stripe cache
- */
-
-#define NR_STRIPES 256
-#define STRIPE_SIZE PAGE_SIZE
-#define STRIPE_SHIFT (PAGE_SHIFT - 9)
-#define STRIPE_SECTORS (STRIPE_SIZE>>9)
-#define IO_THRESHOLD 1
-#define BYPASS_THRESHOLD 1
-#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
-#define HASH_MASK (NR_HASH - 1)
-#define MAX_STRIPE_BATCH 8
static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
{
@@ -126,64 +113,6 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
local_irq_enable();
}
-/* bio's attached to a stripe+device for I/O are linked together in bi_sector
- * order without overlap. There may be several bio's per stripe+device, and
- * a bio could span several devices.
- * When walking this list for a particular stripe+device, we must never proceed
- * beyond a bio that extends past this device, as the next bio might no longer
- * be valid.
- * This function is used to determine the 'next' bio in the list, given the sector
- * of the current stripe+device
- */
-static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
-{
- int sectors = bio_sectors(bio);
- if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
- return bio->bi_next;
- else
- return NULL;
-}
-
-/*
- * We maintain a biased count of active stripes in the bottom 16 bits of
- * bi_phys_segments, and a count of processed stripes in the upper 16 bits
- */
-static inline int raid5_bi_processed_stripes(struct bio *bio)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- return (atomic_read(segments) >> 16) & 0xffff;
-}
-
-static inline int raid5_dec_bi_active_stripes(struct bio *bio)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- return atomic_sub_return(1, segments) & 0xffff;
-}
-
-static inline void raid5_inc_bi_active_stripes(struct bio *bio)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- atomic_inc(segments);
-}
-
-static inline void raid5_set_bi_processed_stripes(struct bio *bio,
- unsigned int cnt)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- int old, new;
-
- do {
- old = atomic_read(segments);
- new = (old & 0xffff) | (cnt << 16);
- } while (atomic_cmpxchg(segments, old, new) != old);
-}
-
-static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- atomic_set(segments, cnt);
-}
-
/* Find first data disk in a raid6 stripe */
static inline int raid6_d0(struct stripe_head *sh)
{
@@ -289,8 +218,27 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
struct list_head *temp_inactive_list)
{
+ int i;
+ int injournal = 0; /* number of date pages with R5_InJournal */
+
BUG_ON(!list_empty(&sh->lru));
BUG_ON(atomic_read(&conf->active_stripes)==0);
+
+ if (r5c_is_writeback(conf->log))
+ for (i = sh->disks; i--; )
+ if (test_bit(R5_InJournal, &sh->dev[i].flags))
+ injournal++;
+ /*
+ * When quiesce in r5c write back, set STRIPE_HANDLE for stripes with
+ * data in journal, so they are not released to cached lists
+ */
+ if (conf->quiesce && r5c_is_writeback(conf->log) &&
+ !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0) {
+ if (test_bit(STRIPE_R5C_CACHING, &sh->state))
+ r5c_make_stripe_write_out(sh);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ }
+
if (test_bit(STRIPE_HANDLE, &sh->state)) {
if (test_bit(STRIPE_DELAYED, &sh->state) &&
!test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
@@ -316,8 +264,30 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
< IO_THRESHOLD)
md_wakeup_thread(conf->mddev->thread);
atomic_dec(&conf->active_stripes);
- if (!test_bit(STRIPE_EXPANDING, &sh->state))
- list_add_tail(&sh->lru, temp_inactive_list);
+ if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
+ if (!r5c_is_writeback(conf->log))
+ list_add_tail(&sh->lru, temp_inactive_list);
+ else {
+ WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags));
+ if (injournal == 0)
+ list_add_tail(&sh->lru, temp_inactive_list);
+ else if (injournal == conf->raid_disks - conf->max_degraded) {
+ /* full stripe */
+ if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state))
+ atomic_inc(&conf->r5c_cached_full_stripes);
+ if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state))
+ atomic_dec(&conf->r5c_cached_partial_stripes);
+ list_add_tail(&sh->lru, &conf->r5c_full_stripe_list);
+ r5c_check_cached_full_stripe(conf);
+ } else {
+ /* partial stripe */
+ if (!test_and_set_bit(STRIPE_R5C_PARTIAL_STRIPE,
+ &sh->state))
+ atomic_inc(&conf->r5c_cached_partial_stripes);
+ list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list);
+ }
+ }
+ }
}
}
@@ -541,7 +511,7 @@ retry:
if (dev->toread || dev->read || dev->towrite || dev->written ||
test_bit(R5_LOCKED, &dev->flags)) {
- printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
+ pr_err("sector=%llx i=%d %p %p %p %p %d\n",
(unsigned long long)sh->sector, i, dev->toread,
dev->read, dev->towrite, dev->written,
test_bit(R5_LOCKED, &dev->flags));
@@ -680,9 +650,12 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
}
if (noblock && sh == NULL)
break;
+
+ r5c_check_stripe_cache_usage(conf);
if (!sh) {
set_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state);
+ r5l_wake_reclaim(conf->log, 0);
wait_event_lock_irq(
conf->wait_for_stripe,
!list_empty(conf->inactive_list + hash) &&
@@ -901,8 +874,19 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
might_sleep();
- if (r5l_write_stripe(conf->log, sh) == 0)
- return;
+ if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+ /* writing out phase */
+ if (s->waiting_extra_page)
+ return;
+ if (r5l_write_stripe(conf->log, sh) == 0)
+ return;
+ } else { /* caching phase */
+ if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) {
+ r5c_cache_data(conf->log, sh, s);
+ return;
+ }
+ }
+
for (i = disks; i--; ) {
int op, op_flags = 0;
int replace_only = 0;
@@ -977,7 +961,7 @@ again:
if (bad < 0) {
set_bit(BlockedBadBlocks, &rdev->flags);
if (!conf->mddev->external &&
- conf->mddev->flags) {
+ conf->mddev->sb_flags) {
/* It is very unlikely, but we might
* still need to write out the
* bad block log - better give it
@@ -1115,7 +1099,7 @@ again:
static struct dma_async_tx_descriptor *
async_copy_data(int frombio, struct bio *bio, struct page **page,
sector_t sector, struct dma_async_tx_descriptor *tx,
- struct stripe_head *sh)
+ struct stripe_head *sh, int no_skipcopy)
{
struct bio_vec bvl;
struct bvec_iter iter;
@@ -1155,7 +1139,8 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
if (frombio) {
if (sh->raid_conf->skip_copy &&
b_offset == 0 && page_offset == 0 &&
- clen == STRIPE_SIZE)
+ clen == STRIPE_SIZE &&
+ !no_skipcopy)
*page = bio_page;
else
tx = async_memcpy(*page, bio_page, page_offset,
@@ -1237,7 +1222,7 @@ static void ops_run_biofill(struct stripe_head *sh)
while (rbi && rbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
tx = async_copy_data(0, rbi, &dev->page,
- dev->sector, tx, sh);
+ dev->sector, tx, sh, 0);
rbi = r5_next_bio(rbi, dev->sector);
}
}
@@ -1364,10 +1349,15 @@ static int set_syndrome_sources(struct page **srcs,
if (i == sh->qd_idx || i == sh->pd_idx ||
(srctype == SYNDROME_SRC_ALL) ||
(srctype == SYNDROME_SRC_WANT_DRAIN &&
- test_bit(R5_Wantdrain, &dev->flags)) ||
+ (test_bit(R5_Wantdrain, &dev->flags) ||
+ test_bit(R5_InJournal, &dev->flags))) ||
(srctype == SYNDROME_SRC_WRITTEN &&
- dev->written))
- srcs[slot] = sh->dev[i].page;
+ dev->written)) {
+ if (test_bit(R5_InJournal, &dev->flags))
+ srcs[slot] = sh->dev[i].orig_page;
+ else
+ srcs[slot] = sh->dev[i].page;
+ }
i = raid6_next_disk(i, disks);
} while (i != d0_idx);
@@ -1546,6 +1536,13 @@ static void ops_complete_prexor(void *stripe_head_ref)
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
+
+ if (r5c_is_writeback(sh->raid_conf->log))
+ /*
+ * raid5-cache write back uses orig_page during prexor.
+ * After prexor, it is time to free orig_page
+ */
+ r5c_release_extra_page(sh);
}
static struct dma_async_tx_descriptor *
@@ -1567,7 +1564,9 @@ ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu,
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
/* Only process blocks that are known to be uptodate */
- if (test_bit(R5_Wantdrain, &dev->flags))
+ if (test_bit(R5_InJournal, &dev->flags))
+ xor_srcs[count++] = dev->orig_page;
+ else if (test_bit(R5_Wantdrain, &dev->flags))
xor_srcs[count++] = dev->page;
}
@@ -1601,6 +1600,7 @@ ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu,
static struct dma_async_tx_descriptor *
ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
{
+ struct r5conf *conf = sh->raid_conf;
int disks = sh->disks;
int i;
struct stripe_head *head_sh = sh;
@@ -1618,6 +1618,11 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
again:
dev = &sh->dev[i];
+ /*
+ * clear R5_InJournal, so when rewriting a page in
+ * journal, it is not skipped by r5l_log_stripe()
+ */
+ clear_bit(R5_InJournal, &dev->flags);
spin_lock_irq(&sh->stripe_lock);
chosen = dev->towrite;
dev->towrite = NULL;
@@ -1637,8 +1642,10 @@ again:
set_bit(R5_Discard, &dev->flags);
else {
tx = async_copy_data(1, wbi, &dev->page,
- dev->sector, tx, sh);
- if (dev->page != dev->orig_page) {
+ dev->sector, tx, sh,
+ r5c_is_writeback(conf->log));
+ if (dev->page != dev->orig_page &&
+ !r5c_is_writeback(conf->log)) {
set_bit(R5_SkipCopy, &dev->flags);
clear_bit(R5_UPTODATE, &dev->flags);
clear_bit(R5_OVERWRITE, &dev->flags);
@@ -1746,7 +1753,8 @@ again:
xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if (head_sh->dev[i].written)
+ if (head_sh->dev[i].written ||
+ test_bit(R5_InJournal, &head_sh->dev[i].flags))
xor_srcs[count++] = dev->page;
}
} else {
@@ -2000,7 +2008,10 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
spin_lock_init(&sh->batch_lock);
INIT_LIST_HEAD(&sh->batch_list);
INIT_LIST_HEAD(&sh->lru);
+ INIT_LIST_HEAD(&sh->r5c);
+ INIT_LIST_HEAD(&sh->log_list);
atomic_set(&sh->count, 1);
+ sh->log_start = MaxSector;
for (i = 0; i < disks; i++) {
struct r5dev *dev = &sh->dev[i];
@@ -2240,10 +2251,24 @@ static int resize_stripes(struct r5conf *conf, int newsize)
*/
ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
if (ndisks) {
- for (i=0; i<conf->raid_disks; i++)
+ for (i = 0; i < conf->pool_size; i++)
ndisks[i] = conf->disks[i];
- kfree(conf->disks);
- conf->disks = ndisks;
+
+ for (i = conf->pool_size; i < newsize; i++) {
+ ndisks[i].extra_page = alloc_page(GFP_NOIO);
+ if (!ndisks[i].extra_page)
+ err = -ENOMEM;
+ }
+
+ if (err) {
+ for (i = conf->pool_size; i < newsize; i++)
+ if (ndisks[i].extra_page)
+ put_page(ndisks[i].extra_page);
+ kfree(ndisks);
+ } else {
+ kfree(conf->disks);
+ conf->disks = ndisks;
+ }
} else
err = -ENOMEM;
@@ -2342,10 +2367,8 @@ static void raid5_end_read_request(struct bio * bi)
* replacement device. We just fail those on
* any error
*/
- printk_ratelimited(
- KERN_INFO
- "md/raid:%s: read error corrected"
- " (%lu sectors at %llu on %s)\n",
+ pr_info_ratelimited(
+ "md/raid:%s: read error corrected (%lu sectors at %llu on %s)\n",
mdname(conf->mddev), STRIPE_SECTORS,
(unsigned long long)s,
bdevname(rdev->bdev, b));
@@ -2365,36 +2388,29 @@ static void raid5_end_read_request(struct bio * bi)
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
atomic_inc(&rdev->read_errors);
if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
- printk_ratelimited(
- KERN_WARNING
- "md/raid:%s: read error on replacement device "
- "(sector %llu on %s).\n",
+ pr_warn_ratelimited(
+ "md/raid:%s: read error on replacement device (sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)s,
bdn);
else if (conf->mddev->degraded >= conf->max_degraded) {
set_bad = 1;
- printk_ratelimited(
- KERN_WARNING
- "md/raid:%s: read error not correctable "
- "(sector %llu on %s).\n",
+ pr_warn_ratelimited(
+ "md/raid:%s: read error not correctable (sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)s,
bdn);
} else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
/* Oh, no!!! */
set_bad = 1;
- printk_ratelimited(
- KERN_WARNING
- "md/raid:%s: read error NOT corrected!! "
- "(sector %llu on %s).\n",
+ pr_warn_ratelimited(
+ "md/raid:%s: read error NOT corrected!! (sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)s,
bdn);
} else if (atomic_read(&rdev->read_errors)
> conf->max_nr_stripes)
- printk(KERN_WARNING
- "md/raid:%s: Too many read errors, failing device %s.\n",
+ pr_warn("md/raid:%s: Too many read errors, failing device %s.\n",
mdname(conf->mddev), bdn);
else
retry = 1;
@@ -2526,15 +2542,14 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
- printk(KERN_ALERT
- "md/raid:%s: Disk failure on %s, disabling device.\n"
- "md/raid:%s: Operation continuing on %d devices.\n",
- mdname(mddev),
- bdevname(rdev->bdev, b),
- mdname(mddev),
- conf->raid_disks - mddev->degraded);
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
+ pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
+ "md/raid:%s: Operation continuing on %d devices.\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b),
+ mdname(mddev),
+ conf->raid_disks - mddev->degraded);
}
/*
@@ -2856,8 +2871,8 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
previous, &dummy1, &sh2);
if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
|| sh2.qd_idx != sh->qd_idx) {
- printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
- mdname(conf->mddev));
+ pr_warn("md/raid:%s: compute_blocknr: map not correct\n",
+ mdname(conf->mddev));
return 0;
}
return r_sector;
@@ -2872,6 +2887,13 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
int level = conf->level;
if (rcw) {
+ /*
+ * In some cases, handle_stripe_dirtying initially decided to
+ * run rmw and allocates extra page for prexor. However, rcw is
+ * cheaper later on. We need to free the extra page now,
+ * because we won't be able to do that in ops_complete_prexor().
+ */
+ r5c_release_extra_page(sh);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
@@ -2882,6 +2904,9 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
if (!expand)
clear_bit(R5_UPTODATE, &dev->flags);
s->locked++;
+ } else if (test_bit(R5_InJournal, &dev->flags)) {
+ set_bit(R5_LOCKED, &dev->flags);
+ s->locked++;
}
}
/* if we are not expanding this is a proper write request, and
@@ -2921,6 +2946,9 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
set_bit(R5_LOCKED, &dev->flags);
clear_bit(R5_UPTODATE, &dev->flags);
s->locked++;
+ } else if (test_bit(R5_InJournal, &dev->flags)) {
+ set_bit(R5_LOCKED, &dev->flags);
+ s->locked++;
}
}
if (!s->locked)
@@ -3564,10 +3592,10 @@ unhash:
break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
}
-static void handle_stripe_dirtying(struct r5conf *conf,
- struct stripe_head *sh,
- struct stripe_head_state *s,
- int disks)
+static int handle_stripe_dirtying(struct r5conf *conf,
+ struct stripe_head *sh,
+ struct stripe_head_state *s,
+ int disks)
{
int rmw = 0, rcw = 0, i;
sector_t recovery_cp = conf->mddev->recovery_cp;
@@ -3592,9 +3620,12 @@ static void handle_stripe_dirtying(struct r5conf *conf,
} else for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
+ if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx ||
+ test_bit(R5_InJournal, &dev->flags)) &&
!test_bit(R5_LOCKED, &dev->flags) &&
- !(test_bit(R5_UPTODATE, &dev->flags) ||
+ !((test_bit(R5_UPTODATE, &dev->flags) &&
+ (!test_bit(R5_InJournal, &dev->flags) ||
+ dev->page != dev->orig_page)) ||
test_bit(R5_Wantcompute, &dev->flags))) {
if (test_bit(R5_Insync, &dev->flags))
rmw++;
@@ -3606,13 +3637,15 @@ static void handle_stripe_dirtying(struct r5conf *conf,
i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_Wantcompute, &dev->flags))) {
+ test_bit(R5_InJournal, &dev->flags) ||
+ test_bit(R5_Wantcompute, &dev->flags))) {
if (test_bit(R5_Insync, &dev->flags))
rcw++;
else
rcw += 2*disks;
}
}
+
pr_debug("for sector %llu, rmw=%d rcw=%d\n",
(unsigned long long)sh->sector, rmw, rcw);
set_bit(STRIPE_HANDLE, &sh->state);
@@ -3624,10 +3657,44 @@ static void handle_stripe_dirtying(struct r5conf *conf,
(unsigned long long)sh->sector, rmw);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
+ if (test_bit(R5_InJournal, &dev->flags) &&
+ dev->page == dev->orig_page &&
+ !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) {
+ /* alloc page for prexor */
+ struct page *p = alloc_page(GFP_NOIO);
+
+ if (p) {
+ dev->orig_page = p;
+ continue;
+ }
+
+ /*
+ * alloc_page() failed, try use
+ * disk_info->extra_page
+ */
+ if (!test_and_set_bit(R5C_EXTRA_PAGE_IN_USE,
+ &conf->cache_state)) {
+ r5c_use_extra_page(sh);
+ break;
+ }
+
+ /* extra_page in use, add to delayed_list */
+ set_bit(STRIPE_DELAYED, &sh->state);
+ s->waiting_extra_page = 1;
+ return -EAGAIN;
+ }
+ }
+
+ for (i = disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ if ((dev->towrite ||
+ i == sh->pd_idx || i == sh->qd_idx ||
+ test_bit(R5_InJournal, &dev->flags)) &&
!test_bit(R5_LOCKED, &dev->flags) &&
- !(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_Wantcompute, &dev->flags)) &&
+ !((test_bit(R5_UPTODATE, &dev->flags) &&
+ (!test_bit(R5_InJournal, &dev->flags) ||
+ dev->page != dev->orig_page)) ||
+ test_bit(R5_Wantcompute, &dev->flags)) &&
test_bit(R5_Insync, &dev->flags)) {
if (test_bit(STRIPE_PREREAD_ACTIVE,
&sh->state)) {
@@ -3653,6 +3720,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
+ test_bit(R5_InJournal, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
rcw++;
if (test_bit(R5_Insync, &dev->flags) &&
@@ -3692,8 +3760,9 @@ static void handle_stripe_dirtying(struct r5conf *conf,
*/
if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
(s->locked == 0 && (rcw == 0 || rmw == 0) &&
- !test_bit(STRIPE_BIT_DELAY, &sh->state)))
+ !test_bit(STRIPE_BIT_DELAY, &sh->state)))
schedule_reconstruction(sh, s, rcw == 0, 0);
+ return 0;
}
static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
@@ -3777,7 +3846,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
case check_state_compute_run:
break;
default:
- printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
+ pr_err("%s: unknown check_state: %d sector: %llu\n",
__func__, sh->check_state,
(unsigned long long) sh->sector);
BUG();
@@ -3941,9 +4010,9 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
case check_state_compute_run:
break;
default:
- printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
- __func__, sh->check_state,
- (unsigned long long) sh->sector);
+ pr_warn("%s: unknown check_state: %d sector: %llu\n",
+ __func__, sh->check_state,
+ (unsigned long long) sh->sector);
BUG();
}
}
@@ -4183,6 +4252,11 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (rdev && !test_bit(Faulty, &rdev->flags))
do_recovery = 1;
}
+
+ if (test_bit(R5_InJournal, &dev->flags))
+ s->injournal++;
+ if (test_bit(R5_InJournal, &dev->flags) && dev->written)
+ s->just_cached++;
}
if (test_bit(STRIPE_SYNCING, &sh->state)) {
/* If there is a failed device being replaced,
@@ -4411,7 +4485,8 @@ static void handle_stripe(struct stripe_head *sh)
struct r5dev *dev = &sh->dev[i];
if (test_bit(R5_LOCKED, &dev->flags) &&
(i == sh->pd_idx || i == sh->qd_idx ||
- dev->written)) {
+ dev->written || test_bit(R5_InJournal,
+ &dev->flags))) {
pr_debug("Writing block %d\n", i);
set_bit(R5_Wantwrite, &dev->flags);
if (prexor)
@@ -4451,6 +4526,10 @@ static void handle_stripe(struct stripe_head *sh)
test_bit(R5_Discard, &qdev->flags))))))
handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
+ if (s.just_cached)
+ r5c_handle_cached_data_endio(conf, sh, disks, &s.return_bi);
+ r5l_stripe_write_finished(sh);
+
/* Now we might consider reading some blocks, either to check/generate
* parity, or to satisfy requests
* or to load a block that is being partially written.
@@ -4462,14 +4541,51 @@ static void handle_stripe(struct stripe_head *sh)
|| s.expanding)
handle_stripe_fill(sh, &s, disks);
- /* Now to consider new write requests and what else, if anything
- * should be read. We do not handle new writes when:
+ /*
+ * When the stripe finishes full journal write cycle (write to journal
+ * and raid disk), this is the clean up procedure so it is ready for
+ * next operation.
+ */
+ r5c_finish_stripe_write_out(conf, sh, &s);
+
+ /*
+ * Now to consider new write requests, cache write back and what else,
+ * if anything should be read. We do not handle new writes when:
* 1/ A 'write' operation (copy+xor) is already in flight.
* 2/ A 'check' operation is in flight, as it may clobber the parity
* block.
+ * 3/ A r5c cache log write is in flight.
*/
- if (s.to_write && !sh->reconstruct_state && !sh->check_state)
- handle_stripe_dirtying(conf, sh, &s, disks);
+
+ if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) {
+ if (!r5c_is_writeback(conf->log)) {
+ if (s.to_write)
+ handle_stripe_dirtying(conf, sh, &s, disks);
+ } else { /* write back cache */
+ int ret = 0;
+
+ /* First, try handle writes in caching phase */
+ if (s.to_write)
+ ret = r5c_try_caching_write(conf, sh, &s,
+ disks);
+ /*
+ * If caching phase failed: ret == -EAGAIN
+ * OR
+ * stripe under reclaim: !caching && injournal
+ *
+ * fall back to handle_stripe_dirtying()
+ */
+ if (ret == -EAGAIN ||
+ /* stripe under reclaim: !caching && injournal */
+ (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
+ s.injournal > 0)) {
+ ret = handle_stripe_dirtying(conf, sh, &s,
+ disks);
+ if (ret == -EAGAIN)
+ goto finish;
+ }
+ }
+ }
/* maybe we need to check and possibly fix the parity for this stripe
* Any reads will already have been scheduled, so we just see if enough
@@ -4640,9 +4756,7 @@ finish:
}
if (!bio_list_empty(&s.return_bi)) {
- if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags) &&
- (s.failed <= conf->max_degraded ||
- conf->mddev->external == 0)) {
+ if (test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) {
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->return_bi, &s.return_bi);
spin_unlock_irq(&conf->device_lock);
@@ -4698,6 +4812,10 @@ static int raid5_congested(struct mddev *mddev, int bits)
if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
return 1;
+
+ /* Also checks whether there is pressure on r5cache log space */
+ if (test_bit(R5C_LOG_TIGHT, &conf->cache_state))
+ return 1;
if (conf->quiesce)
return 1;
if (atomic_read(&conf->empty_inactive_list_nr))
@@ -5167,6 +5285,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
int remaining;
DEFINE_WAIT(w);
bool do_prepare;
+ bool do_flush = false;
if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
int ret = r5l_handle_flush_request(conf->log, bi);
@@ -5178,6 +5297,11 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
return;
}
/* ret == -EAGAIN, fallback */
+ /*
+ * if r5l_handle_flush_request() didn't clear REQ_PREFLUSH,
+ * we need to flush journal device
+ */
+ do_flush = bi->bi_opf & REQ_PREFLUSH;
}
md_write_start(mddev, bi);
@@ -5188,6 +5312,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
* data on failed drives.
*/
if (rw == READ && mddev->degraded == 0 &&
+ !r5c_is_writeback(conf->log) &&
mddev->reshape_position == MaxSector) {
bi = chunk_aligned_read(mddev, bi);
if (!bi)
@@ -5316,6 +5441,12 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
do_prepare = true;
goto retry;
}
+ if (do_flush) {
+ set_bit(STRIPE_R5C_PREFLUSH, &sh->state);
+ /* we only need flush for one stripe */
+ do_flush = false;
+ }
+
set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
if ((!sh->batch_head || sh == sh->batch_head) &&
@@ -5481,9 +5612,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
conf->reshape_checkpoint = jiffies;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
- wait_event(mddev->sb_wait, mddev->flags == 0 ||
+ wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
test_bit(MD_RECOVERY_INTR, &mddev->recovery));
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
return 0;
@@ -5579,10 +5710,10 @@ finish:
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
conf->reshape_checkpoint = jiffies;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_DEVS, &mddev->flags)
+ !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)
|| test_bit(MD_RECOVERY_INTR, &mddev->recovery));
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto ret;
@@ -5857,10 +5988,10 @@ static void raid5d(struct md_thread *thread)
md_check_recovery(mddev);
if (!bio_list_empty(&conf->return_bi) &&
- !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
struct bio_list tmp = BIO_EMPTY_LIST;
spin_lock_irq(&conf->device_lock);
- if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
bio_list_merge(&tmp, &conf->return_bi);
bio_list_init(&conf->return_bi);
}
@@ -5907,7 +6038,7 @@ static void raid5d(struct md_thread *thread)
break;
handled += batch_size;
- if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
+ if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) {
spin_unlock_irq(&conf->device_lock);
md_check_recovery(mddev);
spin_lock_irq(&conf->device_lock);
@@ -6237,6 +6368,7 @@ static struct attribute *raid5_attrs[] = {
&raid5_group_thread_cnt.attr,
&raid5_skip_copy.attr,
&raid5_rmw_level.attr,
+ &r5c_journal_mode.attr,
NULL,
};
static struct attribute_group raid5_attrs_group = {
@@ -6363,6 +6495,8 @@ static void raid5_free_percpu(struct r5conf *conf)
static void free_conf(struct r5conf *conf)
{
+ int i;
+
if (conf->log)
r5l_exit_log(conf->log);
if (conf->shrinker.nr_deferred)
@@ -6371,6 +6505,9 @@ static void free_conf(struct r5conf *conf)
free_thread_groups(conf);
shrink_stripes(conf);
raid5_free_percpu(conf);
+ for (i = 0; i < conf->pool_size; i++)
+ if (conf->disks[i].extra_page)
+ put_page(conf->disks[i].extra_page);
kfree(conf->disks);
kfree(conf->stripe_hashtbl);
kfree(conf);
@@ -6382,8 +6519,8 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
if (alloc_scratch_buffer(conf, percpu)) {
- pr_err("%s: failed memory allocation for cpu%u\n",
- __func__, cpu);
+ pr_warn("%s: failed memory allocation for cpu%u\n",
+ __func__, cpu);
return -ENOMEM;
}
return 0;
@@ -6453,29 +6590,29 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (mddev->new_level != 5
&& mddev->new_level != 4
&& mddev->new_level != 6) {
- printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
- mdname(mddev), mddev->new_level);
+ pr_warn("md/raid:%s: raid level not set to 4/5/6 (%d)\n",
+ mdname(mddev), mddev->new_level);
return ERR_PTR(-EIO);
}
if ((mddev->new_level == 5
&& !algorithm_valid_raid5(mddev->new_layout)) ||
(mddev->new_level == 6
&& !algorithm_valid_raid6(mddev->new_layout))) {
- printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
- mdname(mddev), mddev->new_layout);
+ pr_warn("md/raid:%s: layout %d not supported\n",
+ mdname(mddev), mddev->new_layout);
return ERR_PTR(-EIO);
}
if (mddev->new_level == 6 && mddev->raid_disks < 4) {
- printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
- mdname(mddev), mddev->raid_disks);
+ pr_warn("md/raid:%s: not enough configured devices (%d, minimum 4)\n",
+ mdname(mddev), mddev->raid_disks);
return ERR_PTR(-EINVAL);
}
if (!mddev->new_chunk_sectors ||
(mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
!is_power_of_2(mddev->new_chunk_sectors)) {
- printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
- mdname(mddev), mddev->new_chunk_sectors << 9);
+ pr_warn("md/raid:%s: invalid chunk size %d\n",
+ mdname(mddev), mddev->new_chunk_sectors << 9);
return ERR_PTR(-EINVAL);
}
@@ -6517,9 +6654,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
GFP_KERNEL);
+
if (!conf->disks)
goto abort;
+ for (i = 0; i < max_disks; i++) {
+ conf->disks[i].extra_page = alloc_page(GFP_KERNEL);
+ if (!conf->disks[i].extra_page)
+ goto abort;
+ }
+
conf->mddev = mddev;
if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
@@ -6540,6 +6684,11 @@ static struct r5conf *setup_conf(struct mddev *mddev)
for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
INIT_LIST_HEAD(conf->temp_inactive_list + i);
+ atomic_set(&conf->r5c_cached_full_stripes, 0);
+ INIT_LIST_HEAD(&conf->r5c_full_stripe_list);
+ atomic_set(&conf->r5c_cached_partial_stripes, 0);
+ INIT_LIST_HEAD(&conf->r5c_partial_stripe_list);
+
conf->level = mddev->new_level;
conf->chunk_sectors = mddev->new_chunk_sectors;
if (raid5_alloc_percpu(conf) != 0)
@@ -6566,9 +6715,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (test_bit(In_sync, &rdev->flags)) {
char b[BDEVNAME_SIZE];
- printk(KERN_INFO "md/raid:%s: device %s operational as raid"
- " disk %d\n",
- mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
+ pr_info("md/raid:%s: device %s operational as raid disk %d\n",
+ mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
} else if (rdev->saved_raid_disk != raid_disk)
/* Cannot rely on bitmap to complete recovery */
conf->fullsync = 1;
@@ -6602,21 +6750,18 @@ static struct r5conf *setup_conf(struct mddev *mddev)
((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4);
conf->min_nr_stripes = max(NR_STRIPES, stripes);
if (conf->min_nr_stripes != NR_STRIPES)
- printk(KERN_INFO
- "md/raid:%s: force stripe size %d for reshape\n",
+ pr_info("md/raid:%s: force stripe size %d for reshape\n",
mdname(mddev), conf->min_nr_stripes);
}
memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
if (grow_stripes(conf, conf->min_nr_stripes)) {
- printk(KERN_ERR
- "md/raid:%s: couldn't allocate %dkB for buffers\n",
- mdname(mddev), memory);
+ pr_warn("md/raid:%s: couldn't allocate %dkB for buffers\n",
+ mdname(mddev), memory);
goto abort;
} else
- printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
- mdname(mddev), memory);
+ pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory);
/*
* Losing a stripe head costs more than the time to refill it,
* it reduces the queue depth and so can hurt throughput.
@@ -6628,18 +6773,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->shrinker.batch = 128;
conf->shrinker.flags = 0;
if (register_shrinker(&conf->shrinker)) {
- printk(KERN_ERR
- "md/raid:%s: couldn't register shrinker.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: couldn't register shrinker.\n",
+ mdname(mddev));
goto abort;
}
sprintf(pers_name, "raid%d", mddev->new_level);
conf->thread = md_register_thread(raid5d, mddev, pers_name);
if (!conf->thread) {
- printk(KERN_ERR
- "md/raid:%s: couldn't allocate thread.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: couldn't allocate thread.\n",
+ mdname(mddev));
goto abort;
}
@@ -6692,9 +6835,8 @@ static int raid5_run(struct mddev *mddev)
int first = 1;
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "md/raid:%s: not clean"
- " -- starting background reconstruction\n",
- mdname(mddev));
+ pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
+ mdname(mddev));
rdev_for_each(rdev, mddev) {
long long diff;
@@ -6737,15 +6879,14 @@ static int raid5_run(struct mddev *mddev)
int new_data_disks;
if (journal_dev) {
- printk(KERN_ERR "md/raid:%s: don't support reshape with journal - aborting.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: don't support reshape with journal - aborting.\n",
+ mdname(mddev));
return -EINVAL;
}
if (mddev->new_level != mddev->level) {
- printk(KERN_ERR "md/raid:%s: unsupported reshape "
- "required - aborting.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: unsupported reshape required - aborting.\n",
+ mdname(mddev));
return -EINVAL;
}
old_disks = mddev->raid_disks - mddev->delta_disks;
@@ -6760,8 +6901,8 @@ static int raid5_run(struct mddev *mddev)
chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors);
new_data_disks = mddev->raid_disks - max_degraded;
if (sector_div(here_new, chunk_sectors * new_data_disks)) {
- printk(KERN_ERR "md/raid:%s: reshape_position not "
- "on a stripe boundary\n", mdname(mddev));
+ pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n",
+ mdname(mddev));
return -EINVAL;
}
reshape_offset = here_new * chunk_sectors;
@@ -6782,10 +6923,8 @@ static int raid5_run(struct mddev *mddev)
abs(min_offset_diff) >= mddev->new_chunk_sectors)
/* not really in-place - so OK */;
else if (mddev->ro == 0) {
- printk(KERN_ERR "md/raid:%s: in-place reshape "
- "must be started in read-only mode "
- "- aborting\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n",
+ mdname(mddev));
return -EINVAL;
}
} else if (mddev->reshape_backwards
@@ -6794,13 +6933,11 @@ static int raid5_run(struct mddev *mddev)
: (here_new * chunk_sectors >=
here_old * chunk_sectors + (-min_offset_diff))) {
/* Reading from the same stripe as writing to - bad */
- printk(KERN_ERR "md/raid:%s: reshape_position too early for "
- "auto-recovery - aborting.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: reshape_position too early for auto-recovery - aborting.\n",
+ mdname(mddev));
return -EINVAL;
}
- printk(KERN_INFO "md/raid:%s: reshape will continue\n",
- mdname(mddev));
+ pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev));
/* OK, we should be able to continue; */
} else {
BUG_ON(mddev->level != mddev->new_level);
@@ -6819,8 +6956,8 @@ static int raid5_run(struct mddev *mddev)
if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
if (!journal_dev) {
- pr_err("md/raid:%s: journal disk is missing, force array readonly\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: journal disk is missing, force array readonly\n",
+ mdname(mddev));
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
} else if (mddev->recovery_cp == MaxSector)
@@ -6847,8 +6984,7 @@ static int raid5_run(struct mddev *mddev)
if (conf->disks[i].replacement &&
conf->reshape_progress != MaxSector) {
/* replacements and reshape simply do not mix. */
- printk(KERN_ERR "md: cannot handle concurrent "
- "replacement and reshape.\n");
+ pr_warn("md: cannot handle concurrent replacement and reshape.\n");
goto abort;
}
if (test_bit(In_sync, &rdev->flags)) {
@@ -6890,8 +7026,7 @@ static int raid5_run(struct mddev *mddev)
mddev->degraded = calc_degraded(conf);
if (has_failed(conf)) {
- printk(KERN_ERR "md/raid:%s: not enough operational devices"
- " (%d/%d failed)\n",
+ pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n",
mdname(mddev), mddev->degraded, conf->raid_disks);
goto abort;
}
@@ -6903,29 +7038,19 @@ static int raid5_run(struct mddev *mddev)
if (mddev->degraded > dirty_parity_disks &&
mddev->recovery_cp != MaxSector) {
if (mddev->ok_start_degraded)
- printk(KERN_WARNING
- "md/raid:%s: starting dirty degraded array"
- " - data corruption possible.\n",
- mdname(mddev));
+ pr_crit("md/raid:%s: starting dirty degraded array - data corruption possible.\n",
+ mdname(mddev));
else {
- printk(KERN_ERR
- "md/raid:%s: cannot start dirty degraded array.\n",
- mdname(mddev));
+ pr_crit("md/raid:%s: cannot start dirty degraded array.\n",
+ mdname(mddev));
goto abort;
}
}
- if (mddev->degraded == 0)
- printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
- " devices, algorithm %d\n", mdname(mddev), conf->level,
- mddev->raid_disks-mddev->degraded, mddev->raid_disks,
- mddev->new_layout);
- else
- printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
- " out of %d devices, algorithm %d\n",
- mdname(mddev), conf->level,
- mddev->raid_disks - mddev->degraded,
- mddev->raid_disks, mddev->new_layout);
+ pr_info("md/raid:%s: raid level %d active with %d out of %d devices, algorithm %d\n",
+ mdname(mddev), conf->level,
+ mddev->raid_disks-mddev->degraded, mddev->raid_disks,
+ mddev->new_layout);
print_raid5_conf(conf);
@@ -6945,9 +7070,8 @@ static int raid5_run(struct mddev *mddev)
mddev->to_remove = NULL;
else if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
- printk(KERN_WARNING
- "raid5: failed to create sysfs attributes for %s\n",
- mdname(mddev));
+ pr_warn("raid5: failed to create sysfs attributes for %s\n",
+ mdname(mddev));
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
if (mddev->queue) {
@@ -6979,6 +7103,15 @@ static int raid5_run(struct mddev *mddev)
stripe = (stripe | (stripe-1)) + 1;
mddev->queue->limits.discard_alignment = stripe;
mddev->queue->limits.discard_granularity = stripe;
+
+ /*
+ * We use 16-bit counter of active stripes in bi_phys_segments
+ * (minus one for over-loaded initialization)
+ */
+ blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS);
+ blk_queue_max_discard_sectors(mddev->queue,
+ 0xfffe * STRIPE_SECTORS);
+
/*
* unaligned part of discard request will be ignored, so can't
* guarantee discard_zeroes_data
@@ -7035,9 +7168,10 @@ static int raid5_run(struct mddev *mddev)
if (journal_dev) {
char b[BDEVNAME_SIZE];
- printk(KERN_INFO"md/raid:%s: using device %s as journal\n",
- mdname(mddev), bdevname(journal_dev->bdev, b));
- r5l_init_log(conf, journal_dev);
+ pr_debug("md/raid:%s: using device %s as journal\n",
+ mdname(mddev), bdevname(journal_dev->bdev, b));
+ if (r5l_init_log(conf, journal_dev))
+ goto abort;
}
return 0;
@@ -7046,7 +7180,7 @@ abort:
print_raid5_conf(conf);
free_conf(conf);
mddev->private = NULL;
- printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
+ pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev));
return -EIO;
}
@@ -7080,12 +7214,12 @@ static void print_raid5_conf (struct r5conf *conf)
int i;
struct disk_info *tmp;
- printk(KERN_DEBUG "RAID conf printout:\n");
+ pr_debug("RAID conf printout:\n");
if (!conf) {
- printk("(conf==NULL)\n");
+ pr_debug("(conf==NULL)\n");
return;
}
- printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
+ pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level,
conf->raid_disks,
conf->raid_disks - conf->mddev->degraded);
@@ -7093,7 +7227,7 @@ static void print_raid5_conf (struct r5conf *conf)
char b[BDEVNAME_SIZE];
tmp = conf->disks + i;
if (tmp->rdev)
- printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
+ pr_debug(" disk %d, o:%d, dev:%s\n",
i, !test_bit(Faulty, &tmp->rdev->flags),
bdevname(tmp->rdev->bdev, b));
}
@@ -7241,8 +7375,8 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
* write requests running. We should be safe
*/
r5l_init_log(conf, rdev);
- printk(KERN_INFO"md/raid:%s: using device %s as journal\n",
- mdname(mddev), bdevname(rdev->bdev, b));
+ pr_debug("md/raid:%s: using device %s as journal\n",
+ mdname(mddev), bdevname(rdev->bdev, b));
return 0;
}
if (mddev->recovery_disabled == conf->recovery_disabled)
@@ -7346,10 +7480,10 @@ static int check_stripe_cache(struct mddev *mddev)
> conf->min_nr_stripes ||
((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
> conf->min_nr_stripes) {
- printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
- mdname(mddev),
- ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
- / STRIPE_SIZE)*4);
+ pr_warn("md/raid:%s: reshape: not enough stripes. Needed %lu\n",
+ mdname(mddev),
+ ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
+ / STRIPE_SIZE)*4);
return 0;
}
return 1;
@@ -7430,8 +7564,8 @@ static int raid5_start_reshape(struct mddev *mddev)
*/
if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
< mddev->array_sectors) {
- printk(KERN_ERR "md/raid:%s: array size must be reduced "
- "before number of disks\n", mdname(mddev));
+ pr_warn("md/raid:%s: array size must be reduced before number of disks\n",
+ mdname(mddev));
return -EINVAL;
}
@@ -7501,7 +7635,7 @@ static int raid5_start_reshape(struct mddev *mddev)
}
mddev->raid_disks = conf->raid_disks;
mddev->reshape_position = conf->reshape_progress;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -7619,6 +7753,7 @@ static void raid5_quiesce(struct mddev *mddev, int state)
/* '2' tells resync/reshape to pause so that all
* active stripes can drain
*/
+ r5c_flush_cache(conf, INT_MAX);
conf->quiesce = 2;
wait_event_cmd(conf->wait_for_quiescent,
atomic_read(&conf->active_stripes) == 0 &&
@@ -7649,8 +7784,8 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level)
/* for raid0 takeover only one zone is supported */
if (raid0_conf->nr_strip_zones > 1) {
- printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: cannot takeover raid0 with more than one zone.\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
@@ -7671,6 +7806,7 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level)
static void *raid5_takeover_raid1(struct mddev *mddev)
{
int chunksect;
+ void *ret;
if (mddev->raid_disks != 2 ||
mddev->degraded > 1)
@@ -7692,7 +7828,10 @@ static void *raid5_takeover_raid1(struct mddev *mddev)
mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
mddev->new_chunk_sectors = chunksect;
- return setup_conf(mddev);
+ ret = setup_conf(mddev);
+ if (!IS_ERR_VALUE(ret))
+ clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+ return ret;
}
static void *raid5_takeover_raid6(struct mddev *mddev)
@@ -7762,7 +7901,7 @@ static int raid5_check_reshape(struct mddev *mddev)
conf->chunk_sectors = new_chunk ;
mddev->chunk_sectors = new_chunk;
}
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
}
return check_reshape(mddev);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 57ec49f0839e..ed8e1362ab36 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -226,6 +226,8 @@ struct stripe_head {
struct r5l_io_unit *log_io;
struct list_head log_list;
+ sector_t log_start; /* first meta block on the journal */
+ struct list_head r5c; /* for r5c_cache->stripe_in_journal */
/**
* struct stripe_operations
* @target - STRIPE_OP_COMPUTE_BLK target
@@ -264,6 +266,7 @@ struct stripe_head_state {
int syncing, expanding, expanded, replacing;
int locked, uptodate, to_read, to_write, failed, written;
int to_fill, compute, req_compute, non_overwrite;
+ int injournal, just_cached;
int failed_num[2];
int p_failed, q_failed;
int dec_preread_active;
@@ -273,6 +276,7 @@ struct stripe_head_state {
struct md_rdev *blocked_rdev;
int handle_bad_blocks;
int log_failed;
+ int waiting_extra_page;
};
/* Flags for struct r5dev.flags */
@@ -313,6 +317,11 @@ enum r5dev_flags {
*/
R5_Discard, /* Discard the stripe */
R5_SkipCopy, /* Don't copy data from bio to stripe cache */
+ R5_InJournal, /* data being written is in the journal device.
+ * if R5_InJournal is set for parity pd_idx, all the
+ * data and parity being written are in the journal
+ * device
+ */
};
/*
@@ -345,7 +354,30 @@ enum {
STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
* to batch yet.
*/
- STRIPE_LOG_TRAPPED, /* trapped into log */
+ STRIPE_LOG_TRAPPED, /* trapped into log (see raid5-cache.c)
+ * this bit is used in two scenarios:
+ *
+ * 1. write-out phase
+ * set in first entry of r5l_write_stripe
+ * clear in second entry of r5l_write_stripe
+ * used to bypass logic in handle_stripe
+ *
+ * 2. caching phase
+ * set in r5c_try_caching_write()
+ * clear when journal write is done
+ * used to initiate r5c_cache_data()
+ * also used to bypass logic in handle_stripe
+ */
+ STRIPE_R5C_CACHING, /* the stripe is in caching phase
+ * see more detail in the raid5-cache.c
+ */
+ STRIPE_R5C_PARTIAL_STRIPE, /* in r5c cache (to-be/being handled or
+ * in conf->r5c_partial_stripe_list)
+ */
+ STRIPE_R5C_FULL_STRIPE, /* in r5c cache (to-be/being handled or
+ * in conf->r5c_full_stripe_list)
+ */
+ STRIPE_R5C_PREFLUSH, /* need to flush journal device */
};
#define STRIPE_EXPAND_SYNC_FLAGS \
@@ -408,8 +440,86 @@ enum {
struct disk_info {
struct md_rdev *rdev, *replacement;
+ struct page *extra_page; /* extra page to use in prexor */
};
+/*
+ * Stripe cache
+ */
+
+#define NR_STRIPES 256
+#define STRIPE_SIZE PAGE_SIZE
+#define STRIPE_SHIFT (PAGE_SHIFT - 9)
+#define STRIPE_SECTORS (STRIPE_SIZE>>9)
+#define IO_THRESHOLD 1
+#define BYPASS_THRESHOLD 1
+#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
+#define HASH_MASK (NR_HASH - 1)
+#define MAX_STRIPE_BATCH 8
+
+/* bio's attached to a stripe+device for I/O are linked together in bi_sector
+ * order without overlap. There may be several bio's per stripe+device, and
+ * a bio could span several devices.
+ * When walking this list for a particular stripe+device, we must never proceed
+ * beyond a bio that extends past this device, as the next bio might no longer
+ * be valid.
+ * This function is used to determine the 'next' bio in the list, given the
+ * sector of the current stripe+device
+ */
+static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
+{
+ int sectors = bio_sectors(bio);
+
+ if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
+ return bio->bi_next;
+ else
+ return NULL;
+}
+
+/*
+ * We maintain a biased count of active stripes in the bottom 16 bits of
+ * bi_phys_segments, and a count of processed stripes in the upper 16 bits
+ */
+static inline int raid5_bi_processed_stripes(struct bio *bio)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+ return (atomic_read(segments) >> 16) & 0xffff;
+}
+
+static inline int raid5_dec_bi_active_stripes(struct bio *bio)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+ return atomic_sub_return(1, segments) & 0xffff;
+}
+
+static inline void raid5_inc_bi_active_stripes(struct bio *bio)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+ atomic_inc(segments);
+}
+
+static inline void raid5_set_bi_processed_stripes(struct bio *bio,
+ unsigned int cnt)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+ int old, new;
+
+ do {
+ old = atomic_read(segments);
+ new = (old & 0xffff) | (cnt << 16);
+ } while (atomic_cmpxchg(segments, old, new) != old);
+}
+
+static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+ atomic_set(segments, cnt);
+}
+
/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
* This is because we sometimes take all the spinlocks
* and creating that much locking depth can cause
@@ -432,6 +542,30 @@ struct r5worker_group {
int stripes_cnt;
};
+enum r5_cache_state {
+ R5_INACTIVE_BLOCKED, /* release of inactive stripes blocked,
+ * waiting for 25% to be free
+ */
+ R5_ALLOC_MORE, /* It might help to allocate another
+ * stripe.
+ */
+ R5_DID_ALLOC, /* A stripe was allocated, don't allocate
+ * more until at least one has been
+ * released. This avoids flooding
+ * the cache.
+ */
+ R5C_LOG_TIGHT, /* log device space tight, need to
+ * prioritize stripes at last_checkpoint
+ */
+ R5C_LOG_CRITICAL, /* log device is running out of space,
+ * only process stripes that are already
+ * occupying the log
+ */
+ R5C_EXTRA_PAGE_IN_USE, /* a stripe is using disk_info.extra_page
+ * for prexor
+ */
+};
+
struct r5conf {
struct hlist_head *stripe_hashtbl;
/* only protect corresponding hash list and inactive_list */
@@ -519,23 +653,18 @@ struct r5conf {
*/
atomic_t active_stripes;
struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
+
+ atomic_t r5c_cached_full_stripes;
+ struct list_head r5c_full_stripe_list;
+ atomic_t r5c_cached_partial_stripes;
+ struct list_head r5c_partial_stripe_list;
+
atomic_t empty_inactive_list_nr;
struct llist_head released_stripes;
wait_queue_head_t wait_for_quiescent;
wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
unsigned long cache_state;
-#define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked,
- * waiting for 25% to be free
- */
-#define R5_ALLOC_MORE 2 /* It might help to allocate another
- * stripe.
- */
-#define R5_DID_ALLOC 4 /* A stripe was allocated, don't allocate
- * more until at least one has been
- * released. This avoids flooding
- * the cache.
- */
struct shrinker shrinker;
int pool_size; /* number of disks in stripeheads in pool */
spinlock_t device_lock;
@@ -633,4 +762,23 @@ extern void r5l_stripe_write_finished(struct stripe_head *sh);
extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
extern void r5l_quiesce(struct r5l_log *log, int state);
extern bool r5l_log_disk_error(struct r5conf *conf);
+extern bool r5c_is_writeback(struct r5l_log *log);
+extern int
+r5c_try_caching_write(struct r5conf *conf, struct stripe_head *sh,
+ struct stripe_head_state *s, int disks);
+extern void
+r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh,
+ struct stripe_head_state *s);
+extern void r5c_release_extra_page(struct stripe_head *sh);
+extern void r5c_use_extra_page(struct stripe_head *sh);
+extern void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
+extern void r5c_handle_cached_data_endio(struct r5conf *conf,
+ struct stripe_head *sh, int disks, struct bio_list *return_bi);
+extern int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh,
+ struct stripe_head_state *s);
+extern void r5c_make_stripe_write_out(struct stripe_head *sh);
+extern void r5c_flush_cache(struct r5conf *conf, int num);
+extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
+extern void r5c_check_cached_full_stripe(struct r5conf *conf);
+extern struct md_sysfs_entry r5c_journal_mode;
#endif
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 941ceff9b268..29011dfabb11 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -1455,7 +1455,7 @@ static const struct usb_device_id af9015_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CONCEPTRONIC_CTVDIGRCU,
&af9015_props, "Conceptronic USB2.0 DVB-T CTVDIGRCU V3.0", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_MC810,
- &af9015_props, "KWorld Digial MC-810", NULL) },
+ &af9015_props, "KWorld Digital MC-810", NULL) },
{ DVB_USB_DEVICE(USB_VID_KYE, USB_PID_GENIUS_TVGO_DVB_T03,
&af9015_props, "Genius TVGo DVB-T03", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_399U_2,
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
index dc76fd41e00f..ceb953be0770 100644
--- a/drivers/media/usb/usbtv/usbtv-core.c
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -71,6 +71,7 @@ static int usbtv_probe(struct usb_interface *intf,
int size;
struct device *dev = &intf->dev;
struct usbtv *usbtv;
+ struct usb_host_endpoint *ep;
/* Checks that the device is what we think it is. */
if (intf->num_altsetting != 2)
@@ -78,10 +79,12 @@ static int usbtv_probe(struct usb_interface *intf,
if (intf->altsetting[1].desc.bNumEndpoints != 4)
return -ENODEV;
+ ep = &intf->altsetting[1].endpoint[0];
+
/* Packet size is split into 11 bits of base size and count of
* extra multiplies of it.*/
- size = usb_endpoint_maxp(&intf->altsetting[1].endpoint[0].desc);
- size = (size & 0x07ff) * (((size & 0x1800) >> 11) + 1);
+ size = usb_endpoint_maxp(&ep->desc);
+ size = (size & 0x07ff) * usb_endpoint_maxp_mult(&ep->desc);
/* Device structure */
usbtv = kzalloc(sizeof(struct usbtv), GFP_KERNEL);
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index b5589d5f5da4..f3c1c852e401 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1467,6 +1467,7 @@ static unsigned int uvc_endpoint_max_bpi(struct usb_device *dev,
struct usb_host_endpoint *ep)
{
u16 psize;
+ u16 mult;
switch (dev->speed) {
case USB_SPEED_SUPER:
@@ -1474,7 +1475,8 @@ static unsigned int uvc_endpoint_max_bpi(struct usb_device *dev,
return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
case USB_SPEED_HIGH:
psize = usb_endpoint_maxp(&ep->desc);
- return (psize & 0x07ff) * (1 + ((psize >> 11) & 3));
+ mult = usb_endpoint_maxp_mult(&ep->desc);
+ return (psize & 0x07ff) * mult;
case USB_SPEED_WIRELESS:
psize = usb_endpoint_maxp(&ep->desc);
return psize;
@@ -1551,7 +1553,7 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
u16 psize;
u32 size;
- psize = usb_endpoint_maxp(&ep->desc) & 0x7ff;
+ psize = usb_endpoint_maxp(&ep->desc);
size = stream->ctrl.dwMaxPayloadTransferSize;
stream->bulk.max_payload_size = size;
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 89c7ed16b4df..1e73064b0fb2 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -2585,10 +2585,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
(void) GetLanConfigPages(ioc);
a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- "LanAddr = %02X:%02X:%02X"
- ":%02X:%02X:%02X\n",
- ioc->name, a[5], a[4],
- a[3], a[2], a[1], a[0]));
+ "LanAddr = %pMR\n", ioc->name, a));
}
break;
@@ -2868,21 +2865,21 @@ MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
printk(KERN_INFO "%s: ", ioc->name);
if (ioc->prod_name)
- printk("%s: ", ioc->prod_name);
- printk("Capabilities={");
+ pr_cont("%s: ", ioc->prod_name);
+ pr_cont("Capabilities={");
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
- printk("Initiator");
+ pr_cont("Initiator");
i++;
}
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
- printk("%sTarget", i ? "," : "");
+ pr_cont("%sTarget", i ? "," : "");
i++;
}
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
- printk("%sLAN", i ? "," : "");
+ pr_cont("%sLAN", i ? "," : "");
i++;
}
@@ -2891,12 +2888,12 @@ MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
* This would probably evoke more questions than it's worth
*/
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
- printk("%sLogBusAddr", i ? "," : "");
+ pr_cont("%sLogBusAddr", i ? "," : "");
i++;
}
#endif
- printk("}\n");
+ pr_cont("}\n");
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -6783,8 +6780,7 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
if (ioc->bus_type == FC) {
if (ioc->pfacts[p].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
- seq_printf(m, " LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
- a[5], a[4], a[3], a[2], a[1], a[0]);
+ seq_printf(m, " LanAddr = %pMR\n", a);
}
seq_printf(m, " WWN = %08X%08X:%08X%08X\n",
ioc->fc_port_page0[p].WWNN.High,
@@ -6861,8 +6857,7 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
- y += sprintf(buffer+len+y, ", LanAddr=%02X:%02X:%02X:%02X:%02X:%02X",
- a[5], a[4], a[3], a[2], a[1], a[0]);
+ y += sprintf(buffer+len+y, ", LanAddr=%pMR", a);
}
y += sprintf(buffer+len+y, ", IRQ=%d", ioc->pci_irq);
@@ -6896,8 +6891,7 @@ static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int
if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
- seq_printf(m, ", LanAddr=%02X:%02X:%02X:%02X:%02X:%02X",
- a[5], a[4], a[3], a[2], a[1], a[0]);
+ seq_printf(m, ", LanAddr=%pMR", a);
}
seq_printf(m, ", IRQ=%d", ioc->pci_irq);
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 6c9fc11efb87..08a807d6a44f 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1366,15 +1366,10 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt)
/* Default to untagged. Once a target structure has been allocated,
* use the Inquiry data to determine if device supports tagged.
*/
- if ((vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)
- && (SCpnt->device->tagged_supported)) {
+ if ((vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES) &&
+ SCpnt->device->tagged_supported)
scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
- if (SCpnt->request && SCpnt->request->ioprio) {
- if (((SCpnt->request->ioprio & 0x7) == 1) ||
- !(SCpnt->request->ioprio & 0x7))
- scsictl |= MPI_SCSIIO_CONTROL_HEADOFQ;
- }
- } else
+ else
scsictl = scsidir | MPI_SCSIIO_CONTROL_UNTAGGED;
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index 258757e216c4..b1700b5fa640 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -461,7 +461,7 @@ static int max77620_probe(struct i2c_client *client,
chip->rmap = devm_regmap_init_i2c(client, rmap_config);
if (IS_ERR(chip->rmap)) {
ret = PTR_ERR(chip->rmap);
- dev_err(chip->dev, "Failed to intialise regmap: %d\n", ret);
+ dev_err(chip->dev, "Failed to initialise regmap: %d\n", ret);
return ret;
}
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index c8f027b4ea4c..0f3fab47fe48 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -183,6 +183,7 @@ static int ti_tscadc_probe(struct platform_device *pdev)
tscadc->irq = err;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tscadc->tscadc_phys_base = res->start;
tscadc->tscadc_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(tscadc->tscadc_base))
return PTR_ERR(tscadc->tscadc_base);
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index cb851c14ca4b..5813b5f25006 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -41,7 +41,6 @@
#include "genwqe_driver.h"
#define GENWQE_MSI_IRQS 4 /* Just one supported, no MSIx */
-#define GENWQE_FLAG_MSI_ENABLED (1 << 0)
#define GENWQE_MAX_VFS 15 /* maximum 15 VFs are possible */
#define GENWQE_MAX_FUNCS 16 /* 1 PF and 15 VFs */
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index fc2794b513fa..147b83011b58 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -740,13 +740,10 @@ int genwqe_read_softreset(struct genwqe_dev *cd)
int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
{
int rc;
- struct pci_dev *pci_dev = cd->pci_dev;
- rc = pci_enable_msi_range(pci_dev, 1, count);
+ rc = pci_alloc_irq_vectors(cd->pci_dev, 1, count, PCI_IRQ_MSI);
if (rc < 0)
return rc;
-
- cd->flags |= GENWQE_FLAG_MSI_ENABLED;
return 0;
}
@@ -756,12 +753,7 @@ int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
*/
void genwqe_reset_interrupt_capability(struct genwqe_dev *cd)
{
- struct pci_dev *pci_dev = cd->pci_dev;
-
- if (cd->flags & GENWQE_FLAG_MSI_ENABLED) {
- pci_disable_msi(pci_dev);
- cd->flags &= ~GENWQE_FLAG_MSI_ENABLED;
- }
+ pci_free_irq_vectors(cd->pci_dev);
}
/**
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index 6b3bf9ab051d..c5a456b0a564 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -170,7 +170,7 @@ static void ibmasm_remove_one(struct pci_dev *pdev)
ibmasm_unregister_uart(sp);
dbg("Sending OS down message\n");
if (ibmasm_send_os_state(sp, SYSTEM_STATE_OS_DOWN))
- err("failed to get repsonse to 'Send OS State' command\n");
+ err("failed to get response to 'Send OS State' command\n");
dbg("Disabling heartbeats\n");
ibmasm_heartbeat_exit(sp);
dbg("Disabling interrupts\n");
diff --git a/drivers/misc/lkdtm_bugs.c b/drivers/misc/lkdtm_bugs.c
index f336206d4b1f..91edd0b55e5c 100644
--- a/drivers/misc/lkdtm_bugs.c
+++ b/drivers/misc/lkdtm_bugs.c
@@ -85,7 +85,8 @@ noinline void lkdtm_CORRUPT_STACK(void)
/* Use default char array length that triggers stack protection. */
char data[8];
- memset((void *)data, 0, 64);
+ memset((void *)data, 'a', 64);
+ pr_info("Corrupted stack with '%16s'...\n", data);
}
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
diff --git a/drivers/misc/lkdtm_perms.c b/drivers/misc/lkdtm_perms.c
index 45f1c0f96612..c7635a79341f 100644
--- a/drivers/misc/lkdtm_perms.c
+++ b/drivers/misc/lkdtm_perms.c
@@ -60,15 +60,18 @@ static noinline void execute_location(void *dst, bool write)
static void execute_user_location(void *dst)
{
+ int copied;
+
/* Intentionally crossing kernel/user memory boundary. */
void (*func)(void) = dst;
pr_info("attempting ok execution at %p\n", do_nothing);
do_nothing();
- if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
+ copied = access_process_vm(current, (unsigned long)dst, do_nothing,
+ EXEC_SIZE, FOLL_WRITE);
+ if (copied < EXEC_SIZE)
return;
- flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
pr_info("attempting bad execution at %p\n", func);
func();
}
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 7ae89b4a21d5..466afb2611c6 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -144,7 +144,7 @@ int mei_amthif_run_next_cmd(struct mei_device *dev)
dev->iamthif_state = MEI_IAMTHIF_WRITING;
cl->fp = cb->fp;
- ret = mei_cl_write(cl, cb, false);
+ ret = mei_cl_write(cl, cb);
if (ret < 0)
return ret;
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 75b9d4ac8b1e..18e05ca7584f 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -38,6 +38,9 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
#define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \
0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB)
+#define MEI_UUID_MKHIF_FIX UUID_LE(0x55213584, 0x9a29, 0x4916, \
+ 0xba, 0xdf, 0xf, 0xb7, 0xed, 0x68, 0x2a, 0xeb)
+
#define MEI_UUID_ANY NULL_UUID_LE
/**
@@ -69,6 +72,97 @@ static void blacklist(struct mei_cl_device *cldev)
cldev->do_match = 0;
}
+#define OSTYPE_LINUX 2
+struct mei_os_ver {
+ __le16 build;
+ __le16 reserved1;
+ u8 os_type;
+ u8 major;
+ u8 minor;
+ u8 reserved2;
+} __packed;
+
+#define MKHI_FEATURE_PTT 0x10
+
+struct mkhi_rule_id {
+ __le16 rule_type;
+ u8 feature_id;
+ u8 reserved;
+} __packed;
+
+struct mkhi_fwcaps {
+ struct mkhi_rule_id id;
+ u8 len;
+ u8 data[0];
+} __packed;
+
+#define MKHI_FWCAPS_GROUP_ID 0x3
+#define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6
+struct mkhi_msg_hdr {
+ u8 group_id;
+ u8 command;
+ u8 reserved;
+ u8 result;
+} __packed;
+
+struct mkhi_msg {
+ struct mkhi_msg_hdr hdr;
+ u8 data[0];
+} __packed;
+
+static int mei_osver(struct mei_cl_device *cldev)
+{
+ int ret;
+ const size_t size = sizeof(struct mkhi_msg_hdr) +
+ sizeof(struct mkhi_fwcaps) +
+ sizeof(struct mei_os_ver);
+ size_t length = 8;
+ char buf[size];
+ struct mkhi_msg *req;
+ struct mkhi_fwcaps *fwcaps;
+ struct mei_os_ver *os_ver;
+ unsigned int mode = MEI_CL_IO_TX_BLOCKING | MEI_CL_IO_TX_INTERNAL;
+
+ memset(buf, 0, size);
+
+ req = (struct mkhi_msg *)buf;
+ req->hdr.group_id = MKHI_FWCAPS_GROUP_ID;
+ req->hdr.command = MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD;
+
+ fwcaps = (struct mkhi_fwcaps *)req->data;
+
+ fwcaps->id.rule_type = 0x0;
+ fwcaps->id.feature_id = MKHI_FEATURE_PTT;
+ fwcaps->len = sizeof(*os_ver);
+ os_ver = (struct mei_os_ver *)fwcaps->data;
+ os_ver->os_type = OSTYPE_LINUX;
+
+ ret = __mei_cl_send(cldev->cl, buf, size, mode);
+ if (ret < 0)
+ return ret;
+
+ ret = __mei_cl_recv(cldev->cl, buf, length, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void mei_mkhi_fix(struct mei_cl_device *cldev)
+{
+ int ret;
+
+ ret = mei_cldev_enable(cldev);
+ if (ret)
+ return;
+
+ ret = mei_osver(cldev);
+ if (ret)
+ dev_err(&cldev->dev, "OS version command failed %d\n", ret);
+
+ mei_cldev_disable(cldev);
+}
+
/**
* mei_wd - wd client on the bus, change protocol version
* as the API has changed.
@@ -162,7 +256,8 @@ static int mei_nfc_if_version(struct mei_cl *cl,
WARN_ON(mutex_is_locked(&bus->device_lock));
- ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd), 1);
+ ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd),
+ MEI_CL_IO_TX_BLOCKING);
if (ret < 0) {
dev_err(bus->dev, "Could not send IF version cmd\n");
return ret;
@@ -177,7 +272,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
return -ENOMEM;
ret = 0;
- bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
+ bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0);
if (bytes_recv < if_version_length) {
dev_err(bus->dev, "Could not read IF version\n");
ret = -EIO;
@@ -309,6 +404,7 @@ static struct mei_fixup {
MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist),
MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc),
MEI_FIXUP(MEI_UUID_WD, mei_wd),
+ MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix),
};
/**
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 8cac7ef9ad0d..0037153c80a6 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -36,12 +36,12 @@
* @cl: host client
* @buf: buffer to send
* @length: buffer length
- * @blocking: wait for write completion
+ * @mode: sending mode
*
* Return: written size bytes or < 0 on error
*/
ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
- bool blocking)
+ unsigned int mode)
{
struct mei_device *bus;
struct mei_cl_cb *cb;
@@ -80,9 +80,11 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
goto out;
}
+ cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
+ cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
memcpy(cb->buf.data, buf, length);
- rets = mei_cl_write(cl, cb, blocking);
+ rets = mei_cl_write(cl, cb);
out:
mutex_unlock(&bus->device_lock);
@@ -96,15 +98,18 @@ out:
* @cl: host client
* @buf: buffer to receive
* @length: buffer length
+ * @mode: io mode
*
* Return: read size in bytes of < 0 on error
*/
-ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
+ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
+ unsigned int mode)
{
struct mei_device *bus;
struct mei_cl_cb *cb;
size_t r_length;
ssize_t rets;
+ bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK);
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
@@ -125,6 +130,11 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
if (rets && rets != -EBUSY)
goto out;
+ if (nonblock) {
+ rets = -EAGAIN;
+ goto out;
+ }
+
/* wait on event only if there is no other waiter */
/* synchronized under device mutex */
if (!waitqueue_active(&cl->rx_wait)) {
@@ -185,14 +195,30 @@ ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
{
struct mei_cl *cl = cldev->cl;
- if (cl == NULL)
- return -ENODEV;
-
- return __mei_cl_send(cl, buf, length, 1);
+ return __mei_cl_send(cl, buf, length, MEI_CL_IO_TX_BLOCKING);
}
EXPORT_SYMBOL_GPL(mei_cldev_send);
/**
+ * mei_cldev_recv_nonblock - non block client receive (read)
+ *
+ * @cldev: me client device
+ * @buf: buffer to receive
+ * @length: buffer length
+ *
+ * Return: read size in bytes of < 0 on error
+ * -EAGAIN if function will block.
+ */
+ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
+ size_t length)
+{
+ struct mei_cl *cl = cldev->cl;
+
+ return __mei_cl_recv(cl, buf, length, MEI_CL_IO_RX_NONBLOCK);
+}
+EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
+
+/**
* mei_cldev_recv - client receive (read)
*
* @cldev: me client device
@@ -205,39 +231,45 @@ ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
{
struct mei_cl *cl = cldev->cl;
- if (cl == NULL)
- return -ENODEV;
-
- return __mei_cl_recv(cl, buf, length);
+ return __mei_cl_recv(cl, buf, length, 0);
}
EXPORT_SYMBOL_GPL(mei_cldev_recv);
/**
- * mei_cl_bus_event_work - dispatch rx event for a bus device
- * and schedule new work
+ * mei_cl_bus_rx_work - dispatch rx event for a bus device
*
* @work: work
*/
-static void mei_cl_bus_event_work(struct work_struct *work)
+static void mei_cl_bus_rx_work(struct work_struct *work)
{
struct mei_cl_device *cldev;
struct mei_device *bus;
- cldev = container_of(work, struct mei_cl_device, event_work);
+ cldev = container_of(work, struct mei_cl_device, rx_work);
bus = cldev->bus;
- if (cldev->event_cb)
- cldev->event_cb(cldev, cldev->events, cldev->event_context);
+ if (cldev->rx_cb)
+ cldev->rx_cb(cldev);
+
+ mutex_lock(&bus->device_lock);
+ mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
+ mutex_unlock(&bus->device_lock);
+}
+
+/**
+ * mei_cl_bus_notif_work - dispatch FW notif event for a bus device
+ *
+ * @work: work
+ */
+static void mei_cl_bus_notif_work(struct work_struct *work)
+{
+ struct mei_cl_device *cldev;
- cldev->events = 0;
+ cldev = container_of(work, struct mei_cl_device, notif_work);
- /* Prepare for the next read */
- if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
- mutex_lock(&bus->device_lock);
- mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
- mutex_unlock(&bus->device_lock);
- }
+ if (cldev->notif_cb)
+ cldev->notif_cb(cldev);
}
/**
@@ -252,18 +284,13 @@ bool mei_cl_bus_notify_event(struct mei_cl *cl)
{
struct mei_cl_device *cldev = cl->cldev;
- if (!cldev || !cldev->event_cb)
- return false;
-
- if (!(cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)))
+ if (!cldev || !cldev->notif_cb)
return false;
if (!cl->notify_ev)
return false;
- set_bit(MEI_CL_EVENT_NOTIF, &cldev->events);
-
- schedule_work(&cldev->event_work);
+ schedule_work(&cldev->notif_work);
cl->notify_ev = false;
@@ -271,7 +298,7 @@ bool mei_cl_bus_notify_event(struct mei_cl *cl)
}
/**
- * mei_cl_bus_rx_event - schedule rx event
+ * mei_cl_bus_rx_event - schedule rx event
*
* @cl: host client
*
@@ -282,66 +309,81 @@ bool mei_cl_bus_rx_event(struct mei_cl *cl)
{
struct mei_cl_device *cldev = cl->cldev;
- if (!cldev || !cldev->event_cb)
- return false;
-
- if (!(cldev->events_mask & BIT(MEI_CL_EVENT_RX)))
+ if (!cldev || !cldev->rx_cb)
return false;
- set_bit(MEI_CL_EVENT_RX, &cldev->events);
-
- schedule_work(&cldev->event_work);
+ schedule_work(&cldev->rx_work);
return true;
}
/**
- * mei_cldev_register_event_cb - register event callback
+ * mei_cldev_register_rx_cb - register Rx event callback
*
* @cldev: me client devices
- * @event_cb: callback function
- * @events_mask: requested events bitmask
- * @context: driver context data
+ * @rx_cb: callback function
*
* Return: 0 on success
* -EALREADY if an callback is already registered
* <0 on other errors
*/
-int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
- unsigned long events_mask,
- mei_cldev_event_cb_t event_cb, void *context)
+int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb)
{
struct mei_device *bus = cldev->bus;
int ret;
- if (cldev->event_cb)
+ if (!rx_cb)
+ return -EINVAL;
+ if (cldev->rx_cb)
return -EALREADY;
- cldev->events = 0;
- cldev->events_mask = events_mask;
- cldev->event_cb = event_cb;
- cldev->event_context = context;
- INIT_WORK(&cldev->event_work, mei_cl_bus_event_work);
+ cldev->rx_cb = rx_cb;
+ INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work);
- if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
- mutex_lock(&bus->device_lock);
- ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
- mutex_unlock(&bus->device_lock);
- if (ret && ret != -EBUSY)
- return ret;
- }
+ mutex_lock(&bus->device_lock);
+ ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
+ mutex_unlock(&bus->device_lock);
+ if (ret && ret != -EBUSY)
+ return ret;
- if (cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)) {
- mutex_lock(&bus->device_lock);
- ret = mei_cl_notify_request(cldev->cl, NULL, event_cb ? 1 : 0);
- mutex_unlock(&bus->device_lock);
- if (ret)
- return ret;
- }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb);
+
+/**
+ * mei_cldev_register_notif_cb - register FW notification event callback
+ *
+ * @cldev: me client devices
+ * @notif_cb: callback function
+ *
+ * Return: 0 on success
+ * -EALREADY if an callback is already registered
+ * <0 on other errors
+ */
+int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
+ mei_cldev_cb_t notif_cb)
+{
+ struct mei_device *bus = cldev->bus;
+ int ret;
+
+ if (!notif_cb)
+ return -EINVAL;
+
+ if (cldev->notif_cb)
+ return -EALREADY;
+
+ cldev->notif_cb = notif_cb;
+ INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work);
+
+ mutex_lock(&bus->device_lock);
+ ret = mei_cl_notify_request(cldev->cl, NULL, 1);
+ mutex_unlock(&bus->device_lock);
+ if (ret)
+ return ret;
return 0;
}
-EXPORT_SYMBOL_GPL(mei_cldev_register_event_cb);
+EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb);
/**
* mei_cldev_get_drvdata - driver data getter
@@ -403,7 +445,7 @@ EXPORT_SYMBOL_GPL(mei_cldev_ver);
*/
bool mei_cldev_enabled(struct mei_cl_device *cldev)
{
- return cldev->cl && mei_cl_is_connected(cldev->cl);
+ return mei_cl_is_connected(cldev->cl);
}
EXPORT_SYMBOL_GPL(mei_cldev_enabled);
@@ -423,14 +465,13 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
cl = cldev->cl;
- if (!cl) {
+ if (cl->state == MEI_FILE_UNINITIALIZED) {
mutex_lock(&bus->device_lock);
- cl = mei_cl_alloc_linked(bus);
+ ret = mei_cl_link(cl);
mutex_unlock(&bus->device_lock);
- if (IS_ERR(cl))
- return PTR_ERR(cl);
+ if (ret)
+ return ret;
/* update pointers */
- cldev->cl = cl;
cl->cldev = cldev;
}
@@ -471,19 +512,17 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
struct mei_cl *cl;
int err;
- if (!cldev || !cldev->cl)
+ if (!cldev)
return -ENODEV;
cl = cldev->cl;
bus = cldev->bus;
- cldev->event_cb = NULL;
-
mutex_lock(&bus->device_lock);
if (!mei_cl_is_connected(cl)) {
- dev_err(bus->dev, "Already disconnected");
+ dev_dbg(bus->dev, "Already disconnected");
err = 0;
goto out;
}
@@ -497,9 +536,6 @@ out:
mei_cl_flush_queues(cl, NULL);
mei_cl_unlink(cl);
- kfree(cl);
- cldev->cl = NULL;
-
mutex_unlock(&bus->device_lock);
return err;
}
@@ -629,9 +665,13 @@ static int mei_cl_device_remove(struct device *dev)
if (!cldev || !dev->driver)
return 0;
- if (cldev->event_cb) {
- cldev->event_cb = NULL;
- cancel_work_sync(&cldev->event_work);
+ if (cldev->rx_cb) {
+ cancel_work_sync(&cldev->rx_work);
+ cldev->rx_cb = NULL;
+ }
+ if (cldev->notif_cb) {
+ cancel_work_sync(&cldev->notif_work);
+ cldev->notif_cb = NULL;
}
cldrv = to_mei_cl_driver(dev->driver);
@@ -754,6 +794,7 @@ static void mei_cl_bus_dev_release(struct device *dev)
mei_me_cl_put(cldev->me_cl);
mei_dev_bus_put(cldev->bus);
+ kfree(cldev->cl);
kfree(cldev);
}
@@ -786,17 +827,25 @@ static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
struct mei_me_client *me_cl)
{
struct mei_cl_device *cldev;
+ struct mei_cl *cl;
cldev = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL);
if (!cldev)
return NULL;
+ cl = mei_cl_allocate(bus);
+ if (!cl) {
+ kfree(cldev);
+ return NULL;
+ }
+
device_initialize(&cldev->dev);
cldev->dev.parent = bus->dev;
cldev->dev.bus = &mei_cl_bus_type;
cldev->dev.type = &mei_cl_device_type;
cldev->bus = mei_dev_bus_get(bus);
cldev->me_cl = mei_me_cl_get(me_cl);
+ cldev->cl = cl;
mei_cl_bus_set_name(cldev);
cldev->is_added = 0;
INIT_LIST_HEAD(&cldev->bus_list);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 6fe02350578d..391936c1aa04 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -425,7 +425,7 @@ static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
*
* @cl: host client
* @length: size of the buffer
- * @type: operation type
+ * @fop_type: operation type
* @fp: associated file pointer (might be NULL)
*
* Return: cb on success and NULL on failure
@@ -459,7 +459,7 @@ struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
*
* @cl: host client
* @length: size of the buffer
- * @type: operation type
+ * @fop_type: operation type
* @fp: associated file pointer (might be NULL)
*
* Return: cb on success and NULL on failure
@@ -571,7 +571,7 @@ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
INIT_LIST_HEAD(&cl->rd_pending);
INIT_LIST_HEAD(&cl->link);
cl->writing_state = MEI_IDLE;
- cl->state = MEI_FILE_INITIALIZING;
+ cl->state = MEI_FILE_UNINITIALIZED;
cl->dev = dev;
}
@@ -672,7 +672,12 @@ int mei_cl_unlink(struct mei_cl *cl)
list_del_init(&cl->link);
- cl->state = MEI_FILE_INITIALIZING;
+ cl->state = MEI_FILE_UNINITIALIZED;
+ cl->writing_state = MEI_IDLE;
+
+ WARN_ON(!list_empty(&cl->rd_completed) ||
+ !list_empty(&cl->rd_pending) ||
+ !list_empty(&cl->link));
return 0;
}
@@ -686,7 +691,7 @@ void mei_host_client_init(struct mei_device *dev)
pm_runtime_mark_last_busy(dev->dev);
dev_dbg(dev->dev, "rpm: autosuspend\n");
- pm_runtime_autosuspend(dev->dev);
+ pm_request_autosuspend(dev->dev);
}
/**
@@ -756,7 +761,7 @@ void mei_cl_set_disconnected(struct mei_cl *cl)
struct mei_device *dev = cl->dev;
if (cl->state == MEI_FILE_DISCONNECTED ||
- cl->state == MEI_FILE_INITIALIZING)
+ cl->state <= MEI_FILE_INITIALIZING)
return;
cl->state = MEI_FILE_DISCONNECTED;
@@ -1598,18 +1603,17 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
*
* @cl: host client
* @cb: write callback with filled data
- * @blocking: block until completed
*
* Return: number of bytes sent on success, <0 on failure.
*/
-int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
+int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
{
struct mei_device *dev;
struct mei_msg_data *buf;
struct mei_msg_hdr mei_hdr;
int size;
int rets;
-
+ bool blocking;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
@@ -1621,6 +1625,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
buf = &cb->buf;
size = buf->size;
+ blocking = cb->blocking;
cl_dbg(dev, cl, "size=%d\n", size);
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index d2bfabecd882..f2545af9be7b 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -219,7 +219,7 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp);
int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
struct mei_cl_cb *cmpl_list);
-int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking);
+int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb);
int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
struct mei_cl_cb *cmpl_list);
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 7ad15d678878..c8307e8b4c16 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -122,6 +122,8 @@
#define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */
#define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */
+#define MEI_DEV_ID_LBG 0xA1BA /* Lewisburg (SPT) */
+
#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 56c2101e80ad..a05375a3338a 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -246,6 +246,36 @@ static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
return hw->pg_state;
}
+static inline u32 me_intr_src(u32 hcsr)
+{
+ return hcsr & H_CSR_IS_MASK;
+}
+
+/**
+ * me_intr_disable - disables mei device interrupts
+ * using supplied hcsr register value.
+ *
+ * @dev: the device structure
+ * @hcsr: supplied hcsr register value
+ */
+static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
+{
+ hcsr &= ~H_CSR_IE_MASK;
+ mei_hcsr_set(dev, hcsr);
+}
+
+/**
+ * mei_me_intr_clear - clear and stop interrupts
+ *
+ * @dev: the device structure
+ * @hcsr: supplied hcsr register value
+ */
+static inline void me_intr_clear(struct mei_device *dev, u32 hcsr)
+{
+ if (me_intr_src(hcsr))
+ mei_hcsr_write(dev, hcsr);
+}
+
/**
* mei_me_intr_clear - clear and stop interrupts
*
@@ -255,8 +285,7 @@ static void mei_me_intr_clear(struct mei_device *dev)
{
u32 hcsr = mei_hcsr_read(dev);
- if (hcsr & H_CSR_IS_MASK)
- mei_hcsr_write(dev, hcsr);
+ me_intr_clear(dev, hcsr);
}
/**
* mei_me_intr_enable - enables mei device interrupts
@@ -280,8 +309,19 @@ static void mei_me_intr_disable(struct mei_device *dev)
{
u32 hcsr = mei_hcsr_read(dev);
- hcsr &= ~H_CSR_IE_MASK;
- mei_hcsr_set(dev, hcsr);
+ me_intr_disable(dev, hcsr);
+}
+
+/**
+ * mei_me_synchronize_irq - wait for pending IRQ handlers
+ *
+ * @dev: the device structure
+ */
+static void mei_me_synchronize_irq(struct mei_device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ synchronize_irq(pdev->irq);
}
/**
@@ -450,7 +490,7 @@ static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
/**
- * mei_me_write_message - writes a message to mei device.
+ * mei_me_hbuf_write - writes a message to host hw buffer.
*
* @dev: the device structure
* @header: mei HECI header of message
@@ -458,9 +498,9 @@ static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
*
* Return: -EIO if write has failed
*/
-static int mei_me_write_message(struct mei_device *dev,
- struct mei_msg_hdr *header,
- unsigned char *buf)
+static int mei_me_hbuf_write(struct mei_device *dev,
+ struct mei_msg_hdr *header,
+ const unsigned char *buf)
{
unsigned long rem;
unsigned long length = header->length;
@@ -956,13 +996,14 @@ static void mei_me_pg_legacy_intr(struct mei_device *dev)
* mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
*
* @dev: the device structure
+ * @intr_source: interrupt source
*/
-static void mei_me_d0i3_intr(struct mei_device *dev)
+static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
{
struct mei_me_hw *hw = to_me_hw(dev);
if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
- (hw->intr_source & H_D0I3C_IS)) {
+ (intr_source & H_D0I3C_IS)) {
dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
if (hw->pg_state == MEI_PG_ON) {
hw->pg_state = MEI_PG_OFF;
@@ -981,7 +1022,7 @@ static void mei_me_d0i3_intr(struct mei_device *dev)
wake_up(&dev->wait_pg);
}
- if (hw->pg_state == MEI_PG_ON && (hw->intr_source & H_IS)) {
+ if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) {
/*
* HW sent some data and we are in D0i3, so
* we got here because of HW initiated exit from D0i3.
@@ -996,13 +1037,14 @@ static void mei_me_d0i3_intr(struct mei_device *dev)
* mei_me_pg_intr - perform pg processing in interrupt thread handler
*
* @dev: the device structure
+ * @intr_source: interrupt source
*/
-static void mei_me_pg_intr(struct mei_device *dev)
+static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source)
{
struct mei_me_hw *hw = to_me_hw(dev);
if (hw->d0i3_supported)
- mei_me_d0i3_intr(dev);
+ mei_me_d0i3_intr(dev, intr_source);
else
mei_me_pg_legacy_intr(dev);
}
@@ -1121,19 +1163,16 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
{
struct mei_device *dev = (struct mei_device *)dev_id;
- struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr;
hcsr = mei_hcsr_read(dev);
- if (!(hcsr & H_CSR_IS_MASK))
+ if (!me_intr_src(hcsr))
return IRQ_NONE;
- hw->intr_source = hcsr & H_CSR_IS_MASK;
- dev_dbg(dev->dev, "interrupt source 0x%08X.\n", hw->intr_source);
-
- /* clear H_IS and H_D0I3C_IS bits in H_CSR to clear the interrupts */
- mei_hcsr_write(dev, hcsr);
+ dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
+ /* disable interrupts on device */
+ me_intr_disable(dev, hcsr);
return IRQ_WAKE_THREAD;
}
@@ -1152,11 +1191,16 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
struct mei_device *dev = (struct mei_device *) dev_id;
struct mei_cl_cb complete_list;
s32 slots;
+ u32 hcsr;
int rets = 0;
dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
/* initialize our complete list */
mutex_lock(&dev->device_lock);
+
+ hcsr = mei_hcsr_read(dev);
+ me_intr_clear(dev, hcsr);
+
mei_io_list_init(&complete_list);
/* check if ME wants a reset */
@@ -1166,7 +1210,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
goto end;
}
- mei_me_pg_intr(dev);
+ mei_me_pg_intr(dev, me_intr_src(hcsr));
/* check if we need to start the dev */
if (!mei_host_is_ready(dev)) {
@@ -1216,6 +1260,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
end:
dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
+ mei_me_intr_enable(dev);
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
}
@@ -1238,12 +1283,13 @@ static const struct mei_hw_ops mei_me_hw_ops = {
.intr_clear = mei_me_intr_clear,
.intr_enable = mei_me_intr_enable,
.intr_disable = mei_me_intr_disable,
+ .synchronize_irq = mei_me_synchronize_irq,
.hbuf_free_slots = mei_me_hbuf_empty_slots,
.hbuf_is_ready = mei_me_hbuf_is_empty,
.hbuf_max_len = mei_me_hbuf_max_len,
- .write = mei_me_write_message,
+ .write = mei_me_hbuf_write,
.rdbuf_full_slots = mei_me_count_full_read_slots,
.read_hdr = mei_me_mecbrw_read,
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 2ee14dc1b2ea..cf64847a35b9 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -51,14 +51,12 @@ struct mei_cfg {
*
* @cfg: per device generation config and ops
* @mem_addr: io memory address
- * @intr_source: interrupt source
* @pg_state: power gating state
* @d0i3_supported: di03 support
*/
struct mei_me_hw {
const struct mei_cfg *cfg;
void __iomem *mem_addr;
- u32 intr_source;
enum mei_pg_state pg_state;
bool d0i3_supported;
};
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 60415a2bfcbd..e9f8c0aeec13 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -19,7 +19,7 @@
#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/kthread.h>
-#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/mei.h>
@@ -441,6 +441,18 @@ static void mei_txe_intr_enable(struct mei_device *dev)
}
/**
+ * mei_txe_synchronize_irq - wait for pending IRQ handlers
+ *
+ * @dev: the device structure
+ */
+static void mei_txe_synchronize_irq(struct mei_device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ synchronize_irq(pdev->irq);
+}
+
+/**
* mei_txe_pending_interrupts - check if there are pending interrupts
* only Aliveness, Input ready, and output doorbell are of relevance
*
@@ -691,7 +703,8 @@ static void mei_txe_hw_config(struct mei_device *dev)
*/
static int mei_txe_write(struct mei_device *dev,
- struct mei_msg_hdr *header, unsigned char *buf)
+ struct mei_msg_hdr *header,
+ const unsigned char *buf)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
unsigned long rem;
@@ -1167,6 +1180,7 @@ static const struct mei_hw_ops mei_txe_hw_ops = {
.intr_clear = mei_txe_intr_clear,
.intr_enable = mei_txe_intr_enable,
.intr_disable = mei_txe_intr_disable,
+ .synchronize_irq = mei_txe_synchronize_irq,
.hbuf_free_slots = mei_txe_hbuf_empty_slots,
.hbuf_is_ready = mei_txe_is_input_ready,
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 9a9c2484d107..41e5760a6886 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -122,6 +122,10 @@ int mei_reset(struct mei_device *dev)
mei_dev_state_str(state), fw_sts_str);
}
+ mei_clear_interrupts(dev);
+
+ mei_synchronize_irq(dev);
+
/* we're already in reset, cancel the init timer
* if the reset was called due the hbm protocol error
* we need to call it before hw start
@@ -273,8 +277,6 @@ int mei_restart(struct mei_device *dev)
mutex_lock(&dev->device_lock);
- mei_clear_interrupts(dev);
-
dev->dev_state = MEI_DEV_POWER_UP;
dev->reset_count = 0;
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 5a4893ce9c24..b584749bcc4a 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -118,7 +118,6 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
if (!mei_cl_is_connected(cl)) {
cl_dbg(dev, cl, "not connected\n");
- list_move_tail(&cb->list, &complete_list->list);
cb->status = -ENODEV;
goto discard;
}
@@ -128,8 +127,6 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
if (buf_sz < cb->buf_idx) {
cl_err(dev, cl, "message is too big len %d idx %zu\n",
mei_hdr->length, cb->buf_idx);
-
- list_move_tail(&cb->list, &complete_list->list);
cb->status = -EMSGSIZE;
goto discard;
}
@@ -137,8 +134,6 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
if (cb->buf.size < buf_sz) {
cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n",
cb->buf.size, mei_hdr->length, cb->buf_idx);
-
- list_move_tail(&cb->list, &complete_list->list);
cb->status = -EMSGSIZE;
goto discard;
}
@@ -158,6 +153,8 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
return 0;
discard:
+ if (cb)
+ list_move_tail(&cb->list, &complete_list->list);
mei_irq_discard_msg(dev, mei_hdr);
return 0;
}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index fa50635512e8..e1bf54481fd6 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -322,7 +322,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
goto out;
}
- rets = mei_cl_write(cl, cb, false);
+ rets = mei_cl_write(cl, cb);
out:
mutex_unlock(&dev->device_lock);
return rets;
@@ -653,7 +653,7 @@ static int mei_fasync(int fd, struct file *file, int band)
}
/**
- * fw_status_show - mei device attribute show method
+ * fw_status_show - mei device fw_status attribute show method
*
* @device: device pointer
* @attr: attribute pointer
@@ -684,8 +684,49 @@ static ssize_t fw_status_show(struct device *device,
}
static DEVICE_ATTR_RO(fw_status);
+/**
+ * hbm_ver_show - display HBM protocol version negotiated with FW
+ *
+ * @device: device pointer
+ * @attr: attribute pointer
+ * @buf: char out buffer
+ *
+ * Return: number of the bytes printed into buf or error
+ */
+static ssize_t hbm_ver_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mei_device *dev = dev_get_drvdata(device);
+ struct hbm_version ver;
+
+ mutex_lock(&dev->device_lock);
+ ver = dev->version;
+ mutex_unlock(&dev->device_lock);
+
+ return sprintf(buf, "%u.%u\n", ver.major_version, ver.minor_version);
+}
+static DEVICE_ATTR_RO(hbm_ver);
+
+/**
+ * hbm_ver_drv_show - display HBM protocol version advertised by driver
+ *
+ * @device: device pointer
+ * @attr: attribute pointer
+ * @buf: char out buffer
+ *
+ * Return: number of the bytes printed into buf or error
+ */
+static ssize_t hbm_ver_drv_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u.%u\n", HBM_MAJOR_VERSION, HBM_MINOR_VERSION);
+}
+static DEVICE_ATTR_RO(hbm_ver_drv);
+
static struct attribute *mei_attrs[] = {
&dev_attr_fw_status.attr,
+ &dev_attr_hbm_ver.attr,
+ &dev_attr_hbm_ver_drv.attr,
NULL
};
ATTRIBUTE_GROUPS(mei);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 1169fd9e7d02..699693cd8c59 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -55,7 +55,8 @@ extern const uuid_le mei_amthif_guid;
/* File state */
enum file_state {
- MEI_FILE_INITIALIZING = 0,
+ MEI_FILE_UNINITIALIZED = 0,
+ MEI_FILE_INITIALIZING,
MEI_FILE_CONNECTING,
MEI_FILE_CONNECTED,
MEI_FILE_DISCONNECTING,
@@ -109,6 +110,21 @@ enum mei_cb_file_ops {
MEI_FOP_NOTIFY_STOP,
};
+/**
+ * enum mei_cl_io_mode - io mode between driver and fw
+ *
+ * @MEI_CL_IO_TX_BLOCKING: send is blocking
+ * @MEI_CL_IO_TX_INTERNAL: internal communication between driver and FW
+ *
+ * @MEI_CL_IO_RX_NONBLOCK: recv is non-blocking
+ */
+enum mei_cl_io_mode {
+ MEI_CL_IO_TX_BLOCKING = BIT(0),
+ MEI_CL_IO_TX_INTERNAL = BIT(1),
+
+ MEI_CL_IO_RX_NONBLOCK = BIT(2),
+};
+
/*
* Intel MEI message data struct
*/
@@ -169,6 +185,7 @@ struct mei_cl;
* @fp: pointer to file structure
* @status: io status of the cb
* @internal: communication between driver and FW flag
+ * @blocking: transmission blocking mode
* @completed: the transfer or reception has completed
*/
struct mei_cl_cb {
@@ -180,6 +197,7 @@ struct mei_cl_cb {
const struct file *fp;
int status;
u32 internal:1;
+ u32 blocking:1;
u32 completed:1;
};
@@ -253,6 +271,7 @@ struct mei_cl {
* @intr_clear : clear pending interrupts
* @intr_enable : enable interrupts
* @intr_disable : disable interrupts
+ * @synchronize_irq : synchronize irqs
*
* @hbuf_free_slots : query for write buffer empty slots
* @hbuf_is_ready : query if write buffer is empty
@@ -274,7 +293,6 @@ struct mei_hw_ops {
int (*hw_start)(struct mei_device *dev);
void (*hw_config)(struct mei_device *dev);
-
int (*fw_status)(struct mei_device *dev, struct mei_fw_status *fw_sts);
enum mei_pg_state (*pg_state)(struct mei_device *dev);
bool (*pg_in_transition)(struct mei_device *dev);
@@ -283,14 +301,14 @@ struct mei_hw_ops {
void (*intr_clear)(struct mei_device *dev);
void (*intr_enable)(struct mei_device *dev);
void (*intr_disable)(struct mei_device *dev);
+ void (*synchronize_irq)(struct mei_device *dev);
int (*hbuf_free_slots)(struct mei_device *dev);
bool (*hbuf_is_ready)(struct mei_device *dev);
size_t (*hbuf_max_len)(const struct mei_device *dev);
-
int (*write)(struct mei_device *dev,
struct mei_msg_hdr *hdr,
- unsigned char *buf);
+ const unsigned char *buf);
int (*rdbuf_full_slots)(struct mei_device *dev);
@@ -304,8 +322,9 @@ void mei_cl_bus_rescan(struct mei_device *bus);
void mei_cl_bus_rescan_work(struct work_struct *work);
void mei_cl_bus_dev_fixup(struct mei_cl_device *dev);
ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
- bool blocking);
-ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
+ unsigned int mode);
+ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
+ unsigned int mode);
bool mei_cl_bus_rx_event(struct mei_cl *cl);
bool mei_cl_bus_notify_event(struct mei_cl *cl);
void mei_cl_bus_remove_devices(struct mei_device *bus);
@@ -627,6 +646,11 @@ static inline void mei_disable_interrupts(struct mei_device *dev)
dev->ops->intr_disable(dev);
}
+static inline void mei_synchronize_irq(struct mei_device *dev)
+{
+ dev->ops->synchronize_irq(dev);
+}
+
static inline bool mei_host_is_ready(struct mei_device *dev)
{
return dev->ops->host_is_ready(dev);
@@ -652,7 +676,7 @@ static inline size_t mei_hbuf_max_len(const struct mei_device *dev)
}
static inline int mei_write_message(struct mei_device *dev,
- struct mei_msg_hdr *hdr, void *buf)
+ struct mei_msg_hdr *hdr, const void *buf)
{
return dev->ops->write(dev, hdr, buf);
}
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index f3ffd883b232..f9c6ec4b98ab 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -87,6 +87,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index f2eeb38efa65..7e803fc454d1 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -23,8 +23,6 @@ if MMC
source "drivers/mmc/core/Kconfig"
-source "drivers/mmc/card/Kconfig"
-
source "drivers/mmc/host/Kconfig"
endif # MMC
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile
index 400756ec7c49..416b6d1c9ec6 100644
--- a/drivers/mmc/Makefile
+++ b/drivers/mmc/Makefile
@@ -5,5 +5,4 @@
subdir-ccflags-$(CONFIG_MMC_DEBUG) := -DDEBUG
obj-$(CONFIG_MMC) += core/
-obj-$(CONFIG_MMC) += card/
obj-$(subst m,y,$(CONFIG_MMC)) += host/
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
deleted file mode 100644
index 5562308699bc..000000000000
--- a/drivers/mmc/card/Kconfig
+++ /dev/null
@@ -1,70 +0,0 @@
-#
-# MMC/SD card drivers
-#
-
-comment "MMC/SD/SDIO Card Drivers"
-
-config MMC_BLOCK
- tristate "MMC block device driver"
- depends on BLOCK
- default y
- help
- Say Y here to enable the MMC block device driver support.
- This provides a block device driver, which you can use to
- mount the filesystem. Almost everyone wishing MMC support
- should say Y or M here.
-
-config MMC_BLOCK_MINORS
- int "Number of minors per block device"
- depends on MMC_BLOCK
- range 4 256
- default 8
- help
- Number of minors per block device. One is needed for every
- partition on the disk (plus one for the whole disk).
-
- Number of total MMC minors available is 256, so your number
- of supported block devices will be limited to 256 divided
- by this number.
-
- Default is 8 to be backwards compatible with previous
- hardwired device numbering.
-
- If unsure, say 8 here.
-
-config MMC_BLOCK_BOUNCE
- bool "Use bounce buffer for simple hosts"
- depends on MMC_BLOCK
- default y
- help
- SD/MMC is a high latency protocol where it is crucial to
- send large requests in order to get high performance. Many
- controllers, however, are restricted to continuous memory
- (i.e. they can't do scatter-gather), something the kernel
- rarely can provide.
-
- Say Y here to help these restricted hosts by bouncing
- requests back and forth from a large buffer. You will get
- a big performance gain at the cost of up to 64 KiB of
- physical memory.
-
- If unsure, say Y here.
-
-config SDIO_UART
- tristate "SDIO UART/GPS class support"
- depends on TTY
- help
- SDIO function driver for SDIO cards that implements the UART
- class, as well as the GPS class which appears like a UART.
-
-config MMC_TEST
- tristate "MMC host test driver"
- help
- Development driver that performs a series of reads and writes
- to a memory card in order to expose certain well known bugs
- in host controllers. The tests are executed by writing to the
- "test" file in debugfs under each card. Note that whatever is
- on your card will be overwritten by these tests.
-
- This driver is only of interest to those developing or
- testing a host driver. Most people should say N here.
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
deleted file mode 100644
index c73b406a06cd..000000000000
--- a/drivers/mmc/card/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-#
-# Makefile for MMC/SD card drivers
-#
-
-obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
-mmc_block-objs := block.o queue.o
-obj-$(CONFIG_MMC_TEST) += mmc_test.o
-
-obj-$(CONFIG_SDIO_UART) += sdio_uart.o
-
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 250f223aaa80..cdfa8520a4b1 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -22,3 +22,69 @@ config PWRSEQ_SIMPLE
This driver can also be built as a module. If so, the module
will be called pwrseq_simple.
+
+config MMC_BLOCK
+ tristate "MMC block device driver"
+ depends on BLOCK
+ default y
+ help
+ Say Y here to enable the MMC block device driver support.
+ This provides a block device driver, which you can use to
+ mount the filesystem. Almost everyone wishing MMC support
+ should say Y or M here.
+
+config MMC_BLOCK_MINORS
+ int "Number of minors per block device"
+ depends on MMC_BLOCK
+ range 4 256
+ default 8
+ help
+ Number of minors per block device. One is needed for every
+ partition on the disk (plus one for the whole disk).
+
+ Number of total MMC minors available is 256, so your number
+ of supported block devices will be limited to 256 divided
+ by this number.
+
+ Default is 8 to be backwards compatible with previous
+ hardwired device numbering.
+
+ If unsure, say 8 here.
+
+config MMC_BLOCK_BOUNCE
+ bool "Use bounce buffer for simple hosts"
+ depends on MMC_BLOCK
+ default y
+ help
+ SD/MMC is a high latency protocol where it is crucial to
+ send large requests in order to get high performance. Many
+ controllers, however, are restricted to continuous memory
+ (i.e. they can't do scatter-gather), something the kernel
+ rarely can provide.
+
+ Say Y here to help these restricted hosts by bouncing
+ requests back and forth from a large buffer. You will get
+ a big performance gain at the cost of up to 64 KiB of
+ physical memory.
+
+ If unsure, say Y here.
+
+config SDIO_UART
+ tristate "SDIO UART/GPS class support"
+ depends on TTY
+ help
+ SDIO function driver for SDIO cards that implements the UART
+ class, as well as the GPS class which appears like a UART.
+
+config MMC_TEST
+ tristate "MMC host test driver"
+ help
+ Development driver that performs a series of reads and writes
+ to a memory card in order to expose certain well known bugs
+ in host controllers. The tests are executed by writing to the
+ "test" file in debugfs under each card. Note that whatever is
+ on your card will be overwritten by these tests.
+
+ This driver is only of interest to those developing or
+ testing a host driver. Most people should say N here.
+
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index f007151dfdc6..b2a257dc644f 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -12,3 +12,7 @@ mmc_core-$(CONFIG_OF) += pwrseq.o
obj-$(CONFIG_PWRSEQ_SIMPLE) += pwrseq_simple.o
obj-$(CONFIG_PWRSEQ_EMMC) += pwrseq_emmc.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
+obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
+mmc_block-objs := block.o queue.o
+obj-$(CONFIG_MMC_TEST) += mmc_test.o
+obj-$(CONFIG_SDIO_UART) += sdio_uart.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/core/block.c
index bab3f07b1117..bab3f07b1117 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/core/block.c
diff --git a/drivers/mmc/card/block.h b/drivers/mmc/core/block.h
index cdabb2ee74be..cdabb2ee74be 100644
--- a/drivers/mmc/card/block.h
+++ b/drivers/mmc/core/block.h
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/core/mmc_test.c
index ec1d1c46eb90..3ab6e52d106c 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/mmc/card/mmc_test.c
- *
* Copyright 2007-2008 Pierre Ossman
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/core/queue.c
index 6ae6bfb8b221..a6496d8027bc 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/mmc/card/queue.c
- *
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright 2006-2007 Pierre Ossman
*
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/core/queue.h
index dac8c3d010dd..dac8c3d010dd 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/core/queue.h
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/core/sdio_uart.c
index 491c187744f5..d3c91f412b69 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/core/sdio_uart.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/card/sdio_uart.c - SDIO UART/GPS driver
+ * SDIO UART/GPS driver
*
* Based on drivers/serial/8250.c and drivers/serial/serial_core.c
* by Russell King.
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index b9fbd0107008..b21d8aa8d653 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1567,7 +1567,6 @@ static const struct net_device_ops slic_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = slic_get_stats,
.ndo_set_rx_mode = slic_set_rx_mode,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 3f77d0863543..6fad22adbbb9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -585,7 +585,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
mcast.mcast_list_len = mc_num;
rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET);
if (rc)
- BNX2X_ERR("Faled to set multicasts\n");
+ BNX2X_ERR("Failed to set multicasts\n");
} else {
/* clear existing mcasts */
rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index bb74e1c10ffe..c68dbf7092b1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1377,7 +1377,7 @@ static const char *attn_master_to_str(u8 master)
case 9: return "DBU";
case 10: return "DMAE";
default:
- return "Unkown";
+ return "Unknown";
}
}
@@ -1555,7 +1555,7 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
DORQ_REG_DB_DROP_DETAILS);
DP_INFO(p_hwfn->cdev,
- "DORQ db_drop: adress 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
+ "DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
DORQ_REG_DB_DROP_DETAILS_ADDRESS),
(u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index d0a58282f2a8..a39ef2e7a9a6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -369,7 +369,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
break;
default:
- DP_NOTICE(p_hwfn, "Unkown personality %d\n",
+ DP_NOTICE(p_hwfn, "Unknown personality %d\n",
p_hwfn->hw_info.personality);
p_ramrod->personality = PERSONALITY_ETH;
}
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 0457e315d336..b541a1c74488 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -2091,7 +2091,7 @@ int ath10k_pci_init_config(struct ath10k *ar)
ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
if (ret != 0) {
- ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
+ ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 4ac9ba04afed..c1b4bb03e997 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -338,7 +338,7 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
if (skb_headroom(skb) < rtap_len &&
pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
- wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
+ wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
return;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
index 1186755e55b8..e5505387260b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
@@ -134,7 +134,7 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wating too long for FW read clear HMEBox(%d)!\n",
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
boxnum);
break;
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 55a4488633e4..3124eaec9427 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -785,12 +785,9 @@ static void xen_mcast_ctrl_changed(struct xenbus_watch *watch,
struct xenvif *vif = container_of(watch, struct xenvif,
mcast_ctrl_watch);
struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
- int val;
- if (xenbus_scanf(XBT_NIL, dev->otherend,
- "request-multicast-control", "%d", &val) < 0)
- val = 0;
- vif->multicast_control = !!val;
+ vif->multicast_control = !!xenbus_read_unsigned(dev->otherend,
+ "request-multicast-control", 0);
}
static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
@@ -934,14 +931,11 @@ static void connect(struct backend_info *be)
/* Check whether the frontend requested multiple queues
* and read the number requested.
*/
- err = xenbus_scanf(XBT_NIL, dev->otherend,
- "multi-queue-num-queues",
- "%u", &requested_num_queues);
- if (err < 0) {
- requested_num_queues = 1; /* Fall back to single queue */
- } else if (requested_num_queues > xenvif_max_queues) {
+ requested_num_queues = xenbus_read_unsigned(dev->otherend,
+ "multi-queue-num-queues", 1);
+ if (requested_num_queues > xenvif_max_queues) {
/* buggy or malicious guest */
- xenbus_dev_fatal(dev, err,
+ xenbus_dev_fatal(dev, -EINVAL,
"guest requested %u queues, exceeding the maximum of %u.",
requested_num_queues, xenvif_max_queues);
return;
@@ -1134,7 +1128,7 @@ static int read_xenbus_vif_flags(struct backend_info *be)
struct xenvif *vif = be->vif;
struct xenbus_device *dev = be->dev;
unsigned int rx_copy;
- int err, val;
+ int err;
err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
&rx_copy);
@@ -1150,10 +1144,7 @@ static int read_xenbus_vif_flags(struct backend_info *be)
if (!rx_copy)
return -EOPNOTSUPP;
- if (xenbus_scanf(XBT_NIL, dev->otherend,
- "feature-rx-notify", "%d", &val) < 0)
- val = 0;
- if (!val) {
+ if (!xenbus_read_unsigned(dev->otherend, "feature-rx-notify", 0)) {
/* - Reduce drain timeout to poll more frequently for
* Rx requests.
* - Disable Rx stall detection.
@@ -1162,34 +1153,21 @@ static int read_xenbus_vif_flags(struct backend_info *be)
be->vif->stall_timeout = 0;
}
- if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg",
- "%d", &val) < 0)
- val = 0;
- vif->can_sg = !!val;
+ vif->can_sg = !!xenbus_read_unsigned(dev->otherend, "feature-sg", 0);
vif->gso_mask = 0;
- if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
- "%d", &val) < 0)
- val = 0;
- if (val)
+ if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv4", 0))
vif->gso_mask |= GSO_BIT(TCPV4);
- if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6",
- "%d", &val) < 0)
- val = 0;
- if (val)
+ if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv6", 0))
vif->gso_mask |= GSO_BIT(TCPV6);
- if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
- "%d", &val) < 0)
- val = 0;
- vif->ip_csum = !val;
+ vif->ip_csum = !xenbus_read_unsigned(dev->otherend,
+ "feature-no-csum-offload", 0);
- if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload",
- "%d", &val) < 0)
- val = 0;
- vif->ipv6_csum = !!val;
+ vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend,
+ "feature-ipv6-csum-offload", 0);
return 0;
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e085c8c31cfe..a479cd99911d 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1169,43 +1169,23 @@ static netdev_features_t xennet_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct netfront_info *np = netdev_priv(dev);
- int val;
- if (features & NETIF_F_SG) {
- if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
- "%d", &val) < 0)
- val = 0;
+ if (features & NETIF_F_SG &&
+ !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
+ features &= ~NETIF_F_SG;
- if (!val)
- features &= ~NETIF_F_SG;
- }
-
- if (features & NETIF_F_IPV6_CSUM) {
- if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
- "feature-ipv6-csum-offload", "%d", &val) < 0)
- val = 0;
-
- if (!val)
- features &= ~NETIF_F_IPV6_CSUM;
- }
-
- if (features & NETIF_F_TSO) {
- if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
- "feature-gso-tcpv4", "%d", &val) < 0)
- val = 0;
+ if (features & NETIF_F_IPV6_CSUM &&
+ !xenbus_read_unsigned(np->xbdev->otherend,
+ "feature-ipv6-csum-offload", 0))
+ features &= ~NETIF_F_IPV6_CSUM;
- if (!val)
- features &= ~NETIF_F_TSO;
- }
+ if (features & NETIF_F_TSO &&
+ !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
+ features &= ~NETIF_F_TSO;
- if (features & NETIF_F_TSO6) {
- if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
- "feature-gso-tcpv6", "%d", &val) < 0)
- val = 0;
-
- if (!val)
- features &= ~NETIF_F_TSO6;
- }
+ if (features & NETIF_F_TSO6 &&
+ !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
+ features &= ~NETIF_F_TSO6;
return features;
}
@@ -1823,18 +1803,13 @@ static int talk_to_netback(struct xenbus_device *dev,
info->netdev->irq = 0;
/* Check if backend supports multiple queues */
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "multi-queue-max-queues", "%u", &max_queues);
- if (err < 0)
- max_queues = 1;
+ max_queues = xenbus_read_unsigned(info->xbdev->otherend,
+ "multi-queue-max-queues", 1);
num_queues = min(max_queues, xennet_max_queues);
/* Check feature-split-event-channels */
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-split-event-channels", "%u",
- &feature_split_evtchn);
- if (err < 0)
- feature_split_evtchn = 0;
+ feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
+ "feature-split-event-channels", 0);
/* Read mac addr. */
err = xen_net_read_mac(dev, info->netdev->dev_addr);
@@ -1968,16 +1943,10 @@ static int xennet_connect(struct net_device *dev)
struct netfront_info *np = netdev_priv(dev);
unsigned int num_queues = 0;
int err;
- unsigned int feature_rx_copy;
unsigned int j = 0;
struct netfront_queue *queue = NULL;
- err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
- "feature-rx-copy", "%u", &feature_rx_copy);
- if (err != 1)
- feature_rx_copy = 0;
-
- if (!feature_rx_copy) {
+ if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
dev_info(&dev->dev,
"backend does not support copying receive path\n");
return -ENODEV;
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 6f9563a96488..8a04c5e02999 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -297,35 +297,34 @@ static int mei_nfc_recv(struct nfc_mei_phy *phy, u8 *buf, size_t length)
}
-static void nfc_mei_event_cb(struct mei_cl_device *cldev, u32 events,
- void *context)
+static void nfc_mei_rx_cb(struct mei_cl_device *cldev)
{
- struct nfc_mei_phy *phy = context;
+ struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev);
+ struct sk_buff *skb;
+ int reply_size;
- if (phy->hard_fault != 0)
+ if (!phy)
return;
- if (events & BIT(MEI_CL_EVENT_RX)) {
- struct sk_buff *skb;
- int reply_size;
+ if (phy->hard_fault != 0)
+ return;
- skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL);
- if (!skb)
- return;
+ skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL);
+ if (!skb)
+ return;
- reply_size = mei_nfc_recv(phy, skb->data, MEI_NFC_MAX_READ);
- if (reply_size < MEI_NFC_HEADER_SIZE) {
- kfree_skb(skb);
- return;
- }
+ reply_size = mei_nfc_recv(phy, skb->data, MEI_NFC_MAX_READ);
+ if (reply_size < MEI_NFC_HEADER_SIZE) {
+ kfree_skb(skb);
+ return;
+ }
- skb_put(skb, reply_size);
- skb_pull(skb, MEI_NFC_HEADER_SIZE);
+ skb_put(skb, reply_size);
+ skb_pull(skb, MEI_NFC_HEADER_SIZE);
- MEI_DUMP_SKB_IN("mei frame read", skb);
+ MEI_DUMP_SKB_IN("mei frame read", skb);
- nfc_hci_recv_frame(phy->hdev, skb);
- }
+ nfc_hci_recv_frame(phy->hdev, skb);
}
static int nfc_mei_phy_enable(void *phy_id)
@@ -356,8 +355,7 @@ static int nfc_mei_phy_enable(void *phy_id)
goto err;
}
- r = mei_cldev_register_event_cb(phy->cldev, BIT(MEI_CL_EVENT_RX),
- nfc_mei_event_cb, phy);
+ r = mei_cldev_register_rx_cb(phy->cldev, nfc_mei_rx_cb);
if (r) {
pr_err("Event cb registration failed %d\n", r);
goto err;
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index 3092501f26c4..eb5eddf1794e 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -82,28 +82,7 @@ static struct mei_cl_driver microread_driver = {
.remove = microread_mei_remove,
};
-static int microread_mei_init(void)
-{
- int r;
-
- pr_debug(DRIVER_DESC ": %s\n", __func__);
-
- r = mei_cldev_driver_register(&microread_driver);
- if (r) {
- pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n");
- return r;
- }
-
- return 0;
-}
-
-static void microread_mei_exit(void)
-{
- mei_cldev_driver_unregister(&microread_driver);
-}
-
-module_init(microread_mei_init);
-module_exit(microread_mei_exit);
+module_mei_cl_driver(microread_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
index 46d0eb24eef9..ad57a8ec00d6 100644
--- a/drivers/nfc/pn544/mei.c
+++ b/drivers/nfc/pn544/mei.c
@@ -82,28 +82,7 @@ static struct mei_cl_driver pn544_driver = {
.remove = pn544_mei_remove,
};
-static int pn544_mei_init(void)
-{
- int r;
-
- pr_debug(DRIVER_DESC ": %s\n", __func__);
-
- r = mei_cldev_driver_register(&pn544_driver);
- if (r) {
- pr_err(PN544_DRIVER_NAME ": driver registration failed\n");
- return r;
- }
-
- return 0;
-}
-
-static void pn544_mei_exit(void)
-{
- mei_cldev_driver_unregister(&pn544_driver);
-}
-
-module_init(pn544_mei_init);
-module_exit(pn544_mei_exit);
+module_mei_cl_driver(pn544_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 35b3fee5a453..b40cfb076f02 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -263,21 +263,6 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
return BLK_MQ_RQ_QUEUE_OK;
}
-static inline void nvme_setup_write_zeroes(struct nvme_ns *ns,
- struct request *req, struct nvme_command *cmnd)
-{
- struct nvme_write_zeroes_cmd *write_zeroes = &cmnd->write_zeroes;
-
- memset(cmnd, 0, sizeof(*cmnd));
- write_zeroes->opcode = nvme_cmd_write_zeroes;
- write_zeroes->nsid = cpu_to_le32(ns->ns_id);
- write_zeroes->slba =
- cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
- write_zeroes->length =
- cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
- write_zeroes->control = 0;
-}
-
static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd)
{
@@ -330,8 +315,6 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
nvme_setup_flush(ns, cmd);
else if (req_op(req) == REQ_OP_DISCARD)
ret = nvme_setup_discard(ns, req, cmd);
- else if (req_op(req) == REQ_OP_WRITE_ZEROES)
- nvme_setup_write_zeroes(ns, req, cmd);
else
nvme_setup_rw(ns, req, cmd);
@@ -952,10 +935,6 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)
nvme_config_discard(ns);
- if (ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES)
- blk_queue_max_write_zeroes_sectors(ns->queue,
- ((u32)(USHRT_MAX + 1) * bs) >> 9);
-
blk_mq_unfreeze_queue(disk->queue);
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3ba38f4d774d..d6e6bce93d0c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2086,9 +2086,6 @@ static const struct pci_error_handlers nvme_err_handler = {
.reset_notify = nvme_reset_notify,
};
-/* Move to pci_ids.h later */
-#define PCI_CLASS_STORAGE_EXPRESS 0x010802
-
static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0953),
.driver_data = NVME_QUIRK_STRIPE_SIZE |
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index d0f60c36d576..6f5074153dcd 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -480,7 +480,7 @@ out_free_link:
return ret;
}
-static int nvmet_port_subsys_drop_link(struct config_item *parent,
+static void nvmet_port_subsys_drop_link(struct config_item *parent,
struct config_item *target)
{
struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
@@ -493,7 +493,7 @@ static int nvmet_port_subsys_drop_link(struct config_item *parent,
goto found;
}
up_write(&nvmet_config_sem);
- return -EINVAL;
+ return;
found:
list_del(&p->entry);
@@ -502,7 +502,6 @@ found:
nvmet_disable_port(port);
up_write(&nvmet_config_sem);
kfree(p);
- return 0;
}
static struct configfs_item_operations nvmet_port_subsys_item_ops = {
@@ -556,7 +555,7 @@ out_free_link:
return ret;
}
-static int nvmet_allowed_hosts_drop_link(struct config_item *parent,
+static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
struct config_item *target)
{
struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
@@ -569,14 +568,13 @@ static int nvmet_allowed_hosts_drop_link(struct config_item *parent,
goto found;
}
up_write(&nvmet_config_sem);
- return -EINVAL;
+ return;
found:
list_del(&p->entry);
nvmet_genctr++;
up_write(&nvmet_config_sem);
kfree(p);
- return 0;
}
static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index ba140eaee5c8..650f1b1797ad 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -35,6 +35,16 @@ config NVMEM_LPC18XX_EEPROM
To compile this driver as a module, choose M here: the module
will be called nvmem_lpc18xx_eeprom.
+config NVMEM_LPC18XX_OTP
+ tristate "NXP LPC18XX OTP Memory Support"
+ depends on ARCH_LPC18XX || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say Y here to include support for NXP LPC18xx OTP memory found on
+ all LPC18xx and LPC43xx devices.
+ To compile this driver as a module, choose M here: the module
+ will be called nvmem_lpc18xx_otp.
+
config NVMEM_MXS_OCOTP
tristate "Freescale MXS On-Chip OTP Memory Support"
depends on ARCH_MXS || COMPILE_TEST
@@ -80,6 +90,18 @@ config ROCKCHIP_EFUSE
This driver can also be built as a module. If so, the module
will be called nvmem_rockchip_efuse.
+config NVMEM_BCM_OCOTP
+ tristate "Broadcom On-Chip OTP Controller support"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ depends on HAS_IOMEM
+ default ARCH_BCM_IPROC
+ help
+ Say y here to enable read/write access to the Broadcom OTP
+ controller.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-bcm-ocotp.
+
config NVMEM_SUNXI_SID
tristate "Allwinner SoCs SID support"
depends on ARCH_SUNXI
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 8f942a0cdaec..86e45995fdad 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -6,10 +6,14 @@ obj-$(CONFIG_NVMEM) += nvmem_core.o
nvmem_core-y := core.o
# Devices
+obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o
+nvmem-bcm-ocotp-y := bcm-ocotp.o
obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o
nvmem-imx-ocotp-y := imx-ocotp.o
obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o
nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o
+obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o
+nvmem_lpc18xx_otp-y := lpc18xx_otp.o
obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o
nvmem-mxs-ocotp-y := mxs-ocotp.o
obj-$(CONFIG_MTK_EFUSE) += nvmem_mtk-efuse.o
diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c
new file mode 100644
index 000000000000..646cadbf1f93
--- /dev/null
+++ b/drivers/nvmem/bcm-ocotp.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+/*
+ * # of tries for OTP Status. The time to execute a command varies. The slowest
+ * commands are writes which also vary based on the # of bits turned on. Writing
+ * 0xffffffff takes ~3800 us.
+ */
+#define OTPC_RETRIES 5000
+
+/* Sequence to enable OTP program */
+#define OTPC_PROG_EN_SEQ { 0xf, 0x4, 0x8, 0xd }
+
+/* OTPC Commands */
+#define OTPC_CMD_READ 0x0
+#define OTPC_CMD_OTP_PROG_ENABLE 0x2
+#define OTPC_CMD_OTP_PROG_DISABLE 0x3
+#define OTPC_CMD_PROGRAM 0xA
+
+/* OTPC Status Bits */
+#define OTPC_STAT_CMD_DONE BIT(1)
+#define OTPC_STAT_PROG_OK BIT(2)
+
+/* OTPC register definition */
+#define OTPC_MODE_REG_OFFSET 0x0
+#define OTPC_MODE_REG_OTPC_MODE 0
+#define OTPC_COMMAND_OFFSET 0x4
+#define OTPC_COMMAND_COMMAND_WIDTH 6
+#define OTPC_CMD_START_OFFSET 0x8
+#define OTPC_CMD_START_START 0
+#define OTPC_CPU_STATUS_OFFSET 0xc
+#define OTPC_CPUADDR_REG_OFFSET 0x28
+#define OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH 16
+#define OTPC_CPU_WRITE_REG_OFFSET 0x2c
+
+#define OTPC_CMD_MASK (BIT(OTPC_COMMAND_COMMAND_WIDTH) - 1)
+#define OTPC_ADDR_MASK (BIT(OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH) - 1)
+
+
+struct otpc_map {
+ /* in words. */
+ u32 otpc_row_size;
+ /* 128 bit row / 4 words support. */
+ u16 data_r_offset[4];
+ /* 128 bit row / 4 words support. */
+ u16 data_w_offset[4];
+};
+
+static struct otpc_map otp_map = {
+ .otpc_row_size = 1,
+ .data_r_offset = {0x10},
+ .data_w_offset = {0x2c},
+};
+
+static struct otpc_map otp_map_v2 = {
+ .otpc_row_size = 2,
+ .data_r_offset = {0x10, 0x5c},
+ .data_w_offset = {0x2c, 0x64},
+};
+
+struct otpc_priv {
+ struct device *dev;
+ void __iomem *base;
+ struct otpc_map *map;
+ struct nvmem_config *config;
+};
+
+static inline void set_command(void __iomem *base, u32 command)
+{
+ writel(command & OTPC_CMD_MASK, base + OTPC_COMMAND_OFFSET);
+}
+
+static inline void set_cpu_address(void __iomem *base, u32 addr)
+{
+ writel(addr & OTPC_ADDR_MASK, base + OTPC_CPUADDR_REG_OFFSET);
+}
+
+static inline void set_start_bit(void __iomem *base)
+{
+ writel(1 << OTPC_CMD_START_START, base + OTPC_CMD_START_OFFSET);
+}
+
+static inline void reset_start_bit(void __iomem *base)
+{
+ writel(0, base + OTPC_CMD_START_OFFSET);
+}
+
+static inline void write_cpu_data(void __iomem *base, u32 value)
+{
+ writel(value, base + OTPC_CPU_WRITE_REG_OFFSET);
+}
+
+static int poll_cpu_status(void __iomem *base, u32 value)
+{
+ u32 status;
+ u32 retries;
+
+ for (retries = 0; retries < OTPC_RETRIES; retries++) {
+ status = readl(base + OTPC_CPU_STATUS_OFFSET);
+ if (status & value)
+ break;
+ udelay(1);
+ }
+ if (retries == OTPC_RETRIES)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int enable_ocotp_program(void __iomem *base)
+{
+ static const u32 vals[] = OTPC_PROG_EN_SEQ;
+ int i;
+ int ret;
+
+ /* Write the magic sequence to enable programming */
+ set_command(base, OTPC_CMD_OTP_PROG_ENABLE);
+ for (i = 0; i < ARRAY_SIZE(vals); i++) {
+ write_cpu_data(base, vals[i]);
+ set_start_bit(base);
+ ret = poll_cpu_status(base, OTPC_STAT_CMD_DONE);
+ reset_start_bit(base);
+ if (ret)
+ return ret;
+ }
+
+ return poll_cpu_status(base, OTPC_STAT_PROG_OK);
+}
+
+static int disable_ocotp_program(void __iomem *base)
+{
+ int ret;
+
+ set_command(base, OTPC_CMD_OTP_PROG_DISABLE);
+ set_start_bit(base);
+ ret = poll_cpu_status(base, OTPC_STAT_PROG_OK);
+ reset_start_bit(base);
+
+ return ret;
+}
+
+static int bcm_otpc_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct otpc_priv *priv = context;
+ u32 *buf = val;
+ u32 bytes_read;
+ u32 address = offset / priv->config->word_size;
+ int i, ret;
+
+ for (bytes_read = 0; bytes_read < bytes;) {
+ set_command(priv->base, OTPC_CMD_READ);
+ set_cpu_address(priv->base, address++);
+ set_start_bit(priv->base);
+ ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE);
+ if (ret) {
+ dev_err(priv->dev, "otp read error: 0x%x", ret);
+ return -EIO;
+ }
+
+ for (i = 0; i < priv->map->otpc_row_size; i++) {
+ *buf++ = readl(priv->base +
+ priv->map->data_r_offset[i]);
+ bytes_read += sizeof(*buf);
+ }
+
+ reset_start_bit(priv->base);
+ }
+
+ return 0;
+}
+
+static int bcm_otpc_write(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct otpc_priv *priv = context;
+ u32 *buf = val;
+ u32 bytes_written;
+ u32 address = offset / priv->config->word_size;
+ int i, ret;
+
+ if (offset % priv->config->word_size)
+ return -EINVAL;
+
+ ret = enable_ocotp_program(priv->base);
+ if (ret)
+ return -EIO;
+
+ for (bytes_written = 0; bytes_written < bytes;) {
+ set_command(priv->base, OTPC_CMD_PROGRAM);
+ set_cpu_address(priv->base, address++);
+ for (i = 0; i < priv->map->otpc_row_size; i++) {
+ writel(*buf, priv->base + priv->map->data_r_offset[i]);
+ buf++;
+ bytes_written += sizeof(*buf);
+ }
+ set_start_bit(priv->base);
+ ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE);
+ reset_start_bit(priv->base);
+ if (ret) {
+ dev_err(priv->dev, "otp write error: 0x%x", ret);
+ return -EIO;
+ }
+ }
+
+ disable_ocotp_program(priv->base);
+
+ return 0;
+}
+
+static struct nvmem_config bcm_otpc_nvmem_config = {
+ .name = "bcm-ocotp",
+ .read_only = false,
+ .word_size = 4,
+ .stride = 4,
+ .owner = THIS_MODULE,
+ .reg_read = bcm_otpc_read,
+ .reg_write = bcm_otpc_write,
+};
+
+static const struct of_device_id bcm_otpc_dt_ids[] = {
+ { .compatible = "brcm,ocotp" },
+ { .compatible = "brcm,ocotp-v2" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids);
+
+static int bcm_otpc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node;
+ struct resource *res;
+ struct otpc_priv *priv;
+ struct nvmem_device *nvmem;
+ int err;
+ u32 num_words;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (of_device_is_compatible(dev->of_node, "brcm,ocotp"))
+ priv->map = &otp_map;
+ else if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2"))
+ priv->map = &otp_map_v2;
+ else {
+ dev_err(&pdev->dev,
+ "%s otpc config map not defined\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Get OTP base address register. */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base)) {
+ dev_err(dev, "unable to map I/O memory\n");
+ return PTR_ERR(priv->base);
+ }
+
+ /* Enable CPU access to OTPC. */
+ writel(readl(priv->base + OTPC_MODE_REG_OFFSET) |
+ BIT(OTPC_MODE_REG_OTPC_MODE),
+ priv->base + OTPC_MODE_REG_OFFSET);
+ reset_start_bit(priv->base);
+
+ /* Read size of memory in words. */
+ err = of_property_read_u32(dn, "brcm,ocotp-size", &num_words);
+ if (err) {
+ dev_err(dev, "size parameter not specified\n");
+ return -EINVAL;
+ } else if (num_words == 0) {
+ dev_err(dev, "size must be > 0\n");
+ return -EINVAL;
+ }
+
+ bcm_otpc_nvmem_config.size = 4 * num_words;
+ bcm_otpc_nvmem_config.dev = dev;
+ bcm_otpc_nvmem_config.priv = priv;
+
+ if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2")) {
+ bcm_otpc_nvmem_config.word_size = 8;
+ bcm_otpc_nvmem_config.stride = 8;
+ }
+
+ priv->config = &bcm_otpc_nvmem_config;
+
+ nvmem = nvmem_register(&bcm_otpc_nvmem_config);
+ if (IS_ERR(nvmem)) {
+ dev_err(dev, "error registering nvmem config\n");
+ return PTR_ERR(nvmem);
+ }
+
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
+}
+
+static int bcm_otpc_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static struct platform_driver bcm_otpc_driver = {
+ .probe = bcm_otpc_probe,
+ .remove = bcm_otpc_remove,
+ .driver = {
+ .name = "brcm-otpc",
+ .of_match_table = bcm_otpc_dt_ids,
+ },
+};
+module_platform_driver(bcm_otpc_driver);
+
+MODULE_DESCRIPTION("Broadcom OTPC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/lpc18xx_otp.c b/drivers/nvmem/lpc18xx_otp.c
new file mode 100644
index 000000000000..be8d07403ffc
--- /dev/null
+++ b/drivers/nvmem/lpc18xx_otp.c
@@ -0,0 +1,124 @@
+/*
+ * NXP LPC18xx/43xx OTP memory NVMEM driver
+ *
+ * Copyright (c) 2016 Joachim Eastwood <manabian@gmail.com>
+ *
+ * Based on the imx ocotp driver,
+ * Copyright (c) 2015 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * TODO: add support for writing OTP register via API in boot ROM.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/*
+ * LPC18xx OTP memory contains 4 banks with 4 32-bit words. Bank 0 starts
+ * at offset 0 from the base.
+ *
+ * Bank 0 contains the part ID for Flashless devices and is reseverd for
+ * devices with Flash.
+ * Bank 1/2 is generale purpose or AES key storage for secure devices.
+ * Bank 3 contains control data, USB ID and generale purpose words.
+ */
+#define LPC18XX_OTP_NUM_BANKS 4
+#define LPC18XX_OTP_WORDS_PER_BANK 4
+#define LPC18XX_OTP_WORD_SIZE sizeof(u32)
+#define LPC18XX_OTP_SIZE (LPC18XX_OTP_NUM_BANKS * \
+ LPC18XX_OTP_WORDS_PER_BANK * \
+ LPC18XX_OTP_WORD_SIZE)
+
+struct lpc18xx_otp {
+ void __iomem *base;
+};
+
+static int lpc18xx_otp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct lpc18xx_otp *otp = context;
+ unsigned int count = bytes >> 2;
+ u32 index = offset >> 2;
+ u32 *buf = val;
+ int i;
+
+ if (count > (LPC18XX_OTP_SIZE - index))
+ count = LPC18XX_OTP_SIZE - index;
+
+ for (i = index; i < (index + count); i++)
+ *buf++ = readl(otp->base + i * LPC18XX_OTP_WORD_SIZE);
+
+ return 0;
+}
+
+static struct nvmem_config lpc18xx_otp_nvmem_config = {
+ .name = "lpc18xx-otp",
+ .read_only = true,
+ .word_size = LPC18XX_OTP_WORD_SIZE,
+ .stride = LPC18XX_OTP_WORD_SIZE,
+ .owner = THIS_MODULE,
+ .reg_read = lpc18xx_otp_read,
+};
+
+static int lpc18xx_otp_probe(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem;
+ struct lpc18xx_otp *otp;
+ struct resource *res;
+
+ otp = devm_kzalloc(&pdev->dev, sizeof(*otp), GFP_KERNEL);
+ if (!otp)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ otp->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(otp->base))
+ return PTR_ERR(otp->base);
+
+ lpc18xx_otp_nvmem_config.size = LPC18XX_OTP_SIZE;
+ lpc18xx_otp_nvmem_config.dev = &pdev->dev;
+ lpc18xx_otp_nvmem_config.priv = otp;
+
+ nvmem = nvmem_register(&lpc18xx_otp_nvmem_config);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
+}
+
+static int lpc18xx_otp_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static const struct of_device_id lpc18xx_otp_dt_ids[] = {
+ { .compatible = "nxp,lpc1850-otp" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_otp_dt_ids);
+
+static struct platform_driver lpc18xx_otp_driver = {
+ .probe = lpc18xx_otp_probe,
+ .remove = lpc18xx_otp_remove,
+ .driver = {
+ .name = "lpc18xx_otp",
+ .of_match_table = lpc18xx_otp_dt_ids,
+ },
+};
+module_platform_driver(lpc18xx_otp_driver);
+
+MODULE_AUTHOR("Joachim Eastwoood <manabian@gmail.com>");
+MODULE_DESCRIPTION("NXP LPC18xx OTP NVMEM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 318dbb51e7a2..0d4cda7050e0 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -58,6 +58,41 @@ struct of_overlay {
static int of_overlay_apply_one(struct of_overlay *ov,
struct device_node *target, const struct device_node *overlay);
+static BLOCKING_NOTIFIER_HEAD(of_overlay_chain);
+
+int of_overlay_notifier_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&of_overlay_chain, nb);
+}
+EXPORT_SYMBOL_GPL(of_overlay_notifier_register);
+
+int of_overlay_notifier_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&of_overlay_chain, nb);
+}
+EXPORT_SYMBOL_GPL(of_overlay_notifier_unregister);
+
+static int of_overlay_notify(struct of_overlay *ov,
+ enum of_overlay_notify_action action)
+{
+ struct of_overlay_notify_data nd;
+ int i, ret;
+
+ for (i = 0; i < ov->count; i++) {
+ struct of_overlay_info *ovinfo = &ov->ovinfo_tab[i];
+
+ nd.target = ovinfo->target;
+ nd.overlay = ovinfo->overlay;
+
+ ret = blocking_notifier_call_chain(&of_overlay_chain,
+ action, &nd);
+ if (ret)
+ return notifier_to_errno(ret);
+ }
+
+ return 0;
+}
+
static int of_overlay_apply_single_property(struct of_overlay *ov,
struct device_node *target, struct property *prop)
{
@@ -368,6 +403,13 @@ int of_overlay_create(struct device_node *tree)
goto err_free_idr;
}
+ err = of_overlay_notify(ov, OF_OVERLAY_PRE_APPLY);
+ if (err < 0) {
+ pr_err("%s: Pre-apply notifier failed (err=%d)\n",
+ __func__, err);
+ goto err_free_idr;
+ }
+
/* apply the overlay */
err = of_overlay_apply(ov);
if (err)
@@ -382,6 +424,8 @@ int of_overlay_create(struct device_node *tree)
/* add to the tail of the overlay list */
list_add_tail(&ov->node, &ov_list);
+ of_overlay_notify(ov, OF_OVERLAY_POST_APPLY);
+
mutex_unlock(&of_mutex);
return id;
@@ -498,9 +542,10 @@ int of_overlay_destroy(int id)
goto out;
}
-
+ of_overlay_notify(ov, OF_OVERLAY_PRE_REMOVE);
list_del(&ov->node);
__of_changeset_revert(&ov->cset);
+ of_overlay_notify(ov, OF_OVERLAY_POST_REMOVE);
of_free_overlay_info(ov);
idr_remove(&ov_idr, id);
of_changeset_destroy(&ov->cset);
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 50b8b7d54416..530d0e49f2ed 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -5,12 +5,13 @@
*
* Author(s):
* Jan Glauber <jang@linux.vnet.ibm.com>
+ *
+ * License: GPL
*/
#define KMSG_COMPONENT "zpci"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
@@ -21,10 +22,6 @@
#define SLOT_NAME_SIZE 10
static LIST_HEAD(s390_hotplug_slot_list);
-MODULE_AUTHOR("Jan Glauber <jang@linux.vnet.ibm.com");
-MODULE_DESCRIPTION("Hot Plug PCI Controller for System z");
-MODULE_LICENSE("GPL");
-
static int zpci_fn_configured(enum zpci_state state)
{
return state == ZPCI_FN_STATE_CONFIGURED ||
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index d6ff5e82377d..8fc2e9532575 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -1038,10 +1038,8 @@ static int pcifront_detach_devices(struct pcifront_device *pdev)
err = -ENOMEM;
goto out;
}
- err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%d",
- &state);
- if (err != 1)
- state = XenbusStateUnknown;
+ state = xenbus_read_unsigned(pdev->xdev->otherend, str,
+ XenbusStateUnknown);
if (state != XenbusStateClosing)
continue;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index fe00f9134d51..e8eb7f225a88 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -129,16 +129,6 @@ config PHY_MIPHY28LP
Enable this to support the miphy transceiver (for SATA/PCIE/USB3)
that is part of STMicroelectronics STiH407 SoC.
-config PHY_MIPHY365X
- tristate "STMicroelectronics MIPHY365X PHY driver for STiH41x series"
- depends on ARCH_STI
- depends on HAS_IOMEM
- depends on OF
- select GENERIC_PHY
- help
- Enable this to support the miphy transceiver (for SATA/PCIE)
- that is part of STMicroelectronics STiH41x SoC series.
-
config PHY_RCAR_GEN2
tristate "Renesas R-Car generation 2 USB PHY driver"
depends on ARCH_RENESAS
@@ -373,7 +363,9 @@ config PHY_ROCKCHIP_INNO_USB2
tristate "Rockchip INNO USB2PHY Driver"
depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
depends on COMMON_CLK
+ depends on USB_SUPPORT
select GENERIC_PHY
+ select USB_COMMON
help
Support for Rockchip USB2.0 PHY with Innosilicon IP block.
@@ -438,14 +430,6 @@ config PHY_STIH407_USB
Enable this support to enable the picoPHY device used by USB2
and USB3 controllers on STMicroelectronics STiH407 SoC families.
-config PHY_STIH41X_USB
- tristate "STMicroelectronics USB2 PHY driver for STiH41x series"
- depends on ARCH_STI
- select GENERIC_PHY
- help
- Enable this to support the USB transceiver that is part of
- STMicroelectronics STiH41x SoC series.
-
config PHY_QCOM_UFS
tristate "Qualcomm UFS PHY driver"
depends on OF && ARCH_QCOM
@@ -489,4 +473,17 @@ config PHY_NS2_PCIE
help
Enable this to support the Broadcom Northstar2 PCIe PHY.
If unsure, say N.
+
+config PHY_MESON8B_USB2
+ tristate "Meson8b and GXBB USB2 PHY driver"
+ default ARCH_MESON
+ depends on OF && (ARCH_MESON || COMPILE_TEST)
+ depends on USB_SUPPORT
+ select USB_COMMON
+ select GENERIC_PHY
+ help
+ Enable this to support the Meson USB2 PHYs found in Meson8b
+ and GXBB SoCs.
+ If unsure, say N.
+
endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index a534cf5be07d..65eb2f436a41 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_PHY_PXA_28NM_USB2) += phy-pxa-28nm-usb2.o
obj-$(CONFIG_PHY_PXA_28NM_HSIC) += phy-pxa-28nm-hsic.o
obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o
obj-$(CONFIG_PHY_MIPHY28LP) += phy-miphy28lp.o
-obj-$(CONFIG_PHY_MIPHY365X) += phy-miphy365x.o
obj-$(CONFIG_PHY_RCAR_GEN2) += phy-rcar-gen2.o
obj-$(CONFIG_PHY_RCAR_GEN3_USB2) += phy-rcar-gen3-usb2.o
obj-$(CONFIG_OMAP_CONTROL_PHY) += phy-omap-control.o
@@ -50,7 +49,6 @@ obj-$(CONFIG_PHY_ST_SPEAR1310_MIPHY) += phy-spear1310-miphy.o
obj-$(CONFIG_PHY_ST_SPEAR1340_MIPHY) += phy-spear1340-miphy.o
obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
obj-$(CONFIG_PHY_STIH407_USB) += phy-stih407-usb.o
-obj-$(CONFIG_PHY_STIH41X_USB) += phy-stih41x-usb.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-20nm.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-14nm.o
@@ -60,3 +58,4 @@ obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o
+obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o
diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c
index f84a33a1bdd9..2c7a57f2d595 100644
--- a/drivers/phy/phy-berlin-sata.c
+++ b/drivers/phy/phy-berlin-sata.c
@@ -85,7 +85,6 @@ static int phy_berlin_sata_power_on(struct phy *phy)
struct phy_berlin_desc *desc = phy_get_drvdata(phy);
struct phy_berlin_priv *priv = dev_get_drvdata(phy->dev.parent);
void __iomem *ctrl_reg = priv->base + 0x60 + (desc->index * 0x80);
- int ret = 0;
u32 regval;
clk_prepare_enable(priv->clk);
@@ -130,7 +129,7 @@ static int phy_berlin_sata_power_on(struct phy *phy)
clk_disable_unprepare(priv->clk);
- return ret;
+ return 0;
}
static int phy_berlin_sata_power_off(struct phy *phy)
diff --git a/drivers/phy/phy-brcm-sata.c b/drivers/phy/phy-brcm-sata.c
index 8ffc44afdb75..ccbc3d994998 100644
--- a/drivers/phy/phy-brcm-sata.c
+++ b/drivers/phy/phy-brcm-sata.c
@@ -140,7 +140,7 @@ static inline void __iomem *brcm_sata_pcb_base(struct brcm_sata_port *port)
default:
dev_err(priv->dev, "invalid phy version\n");
break;
- };
+ }
return priv->phy_base + (port->portnum * size);
}
@@ -157,7 +157,7 @@ static inline void __iomem *brcm_sata_ctrl_base(struct brcm_sata_port *port)
default:
dev_err(priv->dev, "invalid phy version\n");
break;
- };
+ }
return priv->ctrl_base + (port->portnum * size);
}
@@ -365,7 +365,7 @@ static int brcm_sata_phy_init(struct phy *phy)
break;
default:
rc = -ENODEV;
- };
+ }
return rc;
}
diff --git a/drivers/phy/phy-da8xx-usb.c b/drivers/phy/phy-da8xx-usb.c
index c85fb0b59729..1b82bff6330f 100644
--- a/drivers/phy/phy-da8xx-usb.c
+++ b/drivers/phy/phy-da8xx-usb.c
@@ -23,6 +23,8 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#define PHY_INIT_BITS (CFGCHIP2_SESENDEN | CFGCHIP2_VBDTCTEN)
+
struct da8xx_usb_phy {
struct phy_provider *phy_provider;
struct phy *usb11_phy;
@@ -208,6 +210,9 @@ static int da8xx_usb_phy_probe(struct platform_device *pdev)
dev_warn(dev, "Failed to create usb20 phy lookup\n");
}
+ regmap_write_bits(d_phy->regmap, CFGCHIP(2),
+ PHY_INIT_BITS, PHY_INIT_BITS);
+
return 0;
}
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index 8b851f718123..6bee04cc4d53 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -229,19 +229,6 @@ struct exynos_mipi_video_phy {
spinlock_t slock;
};
-static inline int __is_running(const struct exynos_mipi_phy_desc *data,
- struct exynos_mipi_video_phy *state)
-{
- u32 val;
- int ret;
-
- ret = regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
- if (ret)
- return 0;
-
- return val & data->resetn_val;
-}
-
static int __set_phy_state(const struct exynos_mipi_phy_desc *data,
struct exynos_mipi_video_phy *state, unsigned int on)
{
@@ -251,7 +238,7 @@ static int __set_phy_state(const struct exynos_mipi_phy_desc *data,
/* disable in PMU sysreg */
if (!on && data->coupled_phy_id >= 0 &&
- !__is_running(state->phys[data->coupled_phy_id].data, state)) {
+ state->phys[data->coupled_phy_id].phy->power_count == 0) {
regmap_read(state->regmaps[data->enable_map], data->enable_reg,
&val);
val &= ~data->enable_val;
diff --git a/drivers/phy/phy-exynos4210-usb2.c b/drivers/phy/phy-exynos4210-usb2.c
index f30bbb0fb3b2..1f50e1004828 100644
--- a/drivers/phy/phy-exynos4210-usb2.c
+++ b/drivers/phy/phy-exynos4210-usb2.c
@@ -141,7 +141,7 @@ static void exynos4210_isol(struct samsung_usb2_phy_instance *inst, bool on)
break;
default:
return;
- };
+ }
regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask);
}
@@ -179,7 +179,7 @@ static void exynos4210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
rstbits = EXYNOS_4210_URSTCON_PHY1_P1P2 |
EXYNOS_4210_URSTCON_HOST_LINK_P2;
break;
- };
+ }
if (on) {
clk = readl(drv->reg_phy + EXYNOS_4210_UPHYCLK);
diff --git a/drivers/phy/phy-exynos4x12-usb2.c b/drivers/phy/phy-exynos4x12-usb2.c
index 765da90a536f..7f27a91acf87 100644
--- a/drivers/phy/phy-exynos4x12-usb2.c
+++ b/drivers/phy/phy-exynos4x12-usb2.c
@@ -187,7 +187,7 @@ static void exynos4x12_isol(struct samsung_usb2_phy_instance *inst, bool on)
break;
default:
return;
- };
+ }
regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask);
}
@@ -237,7 +237,7 @@ static void exynos4x12_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
rstbits = EXYNOS_4x12_URSTCON_HSIC1 |
EXYNOS_4x12_URSTCON_HOST_LINK_P1;
break;
- };
+ }
if (on) {
pwr = readl(drv->reg_phy + EXYNOS_4x12_UPHYPWR);
diff --git a/drivers/phy/phy-exynos5250-usb2.c b/drivers/phy/phy-exynos5250-usb2.c
index 2ed1735a076a..aad806272305 100644
--- a/drivers/phy/phy-exynos5250-usb2.c
+++ b/drivers/phy/phy-exynos5250-usb2.c
@@ -192,7 +192,7 @@ static void exynos5250_isol(struct samsung_usb2_phy_instance *inst, bool on)
break;
default:
return;
- };
+ }
regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask);
}
diff --git a/drivers/phy/phy-meson8b-usb2.c b/drivers/phy/phy-meson8b-usb2.c
new file mode 100644
index 000000000000..33c9f4ba157d
--- /dev/null
+++ b/drivers/phy/phy-meson8b-usb2.c
@@ -0,0 +1,286 @@
+/*
+ * Meson8b and GXBB USB2 PHY driver
+ *
+ * Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/usb/of.h>
+
+#define REG_CONFIG 0x00
+ #define REG_CONFIG_CLK_EN BIT(0)
+ #define REG_CONFIG_CLK_SEL_MASK GENMASK(3, 1)
+ #define REG_CONFIG_CLK_DIV_MASK GENMASK(10, 4)
+ #define REG_CONFIG_CLK_32k_ALTSEL BIT(15)
+ #define REG_CONFIG_TEST_TRIG BIT(31)
+
+#define REG_CTRL 0x04
+ #define REG_CTRL_SOFT_PRST BIT(0)
+ #define REG_CTRL_SOFT_HRESET BIT(1)
+ #define REG_CTRL_SS_SCALEDOWN_MODE_MASK GENMASK(3, 2)
+ #define REG_CTRL_CLK_DET_RST BIT(4)
+ #define REG_CTRL_INTR_SEL BIT(5)
+ #define REG_CTRL_CLK_DETECTED BIT(8)
+ #define REG_CTRL_SOF_SENT_RCVD_TGL BIT(9)
+ #define REG_CTRL_SOF_TOGGLE_OUT BIT(10)
+ #define REG_CTRL_POWER_ON_RESET BIT(15)
+ #define REG_CTRL_SLEEPM BIT(16)
+ #define REG_CTRL_TX_BITSTUFF_ENN_H BIT(17)
+ #define REG_CTRL_TX_BITSTUFF_ENN BIT(18)
+ #define REG_CTRL_COMMON_ON BIT(19)
+ #define REG_CTRL_REF_CLK_SEL_MASK GENMASK(21, 20)
+ #define REG_CTRL_REF_CLK_SEL_SHIFT 20
+ #define REG_CTRL_FSEL_MASK GENMASK(24, 22)
+ #define REG_CTRL_FSEL_SHIFT 22
+ #define REG_CTRL_PORT_RESET BIT(25)
+ #define REG_CTRL_THREAD_ID_MASK GENMASK(31, 26)
+
+#define REG_ENDP_INTR 0x08
+
+/* bits [31:26], [24:21] and [15:3] seem to be read-only */
+#define REG_ADP_BC 0x0c
+ #define REG_ADP_BC_VBUS_VLD_EXT_SEL BIT(0)
+ #define REG_ADP_BC_VBUS_VLD_EXT BIT(1)
+ #define REG_ADP_BC_OTG_DISABLE BIT(2)
+ #define REG_ADP_BC_ID_PULLUP BIT(3)
+ #define REG_ADP_BC_DRV_VBUS BIT(4)
+ #define REG_ADP_BC_ADP_PRB_EN BIT(5)
+ #define REG_ADP_BC_ADP_DISCHARGE BIT(6)
+ #define REG_ADP_BC_ADP_CHARGE BIT(7)
+ #define REG_ADP_BC_SESS_END BIT(8)
+ #define REG_ADP_BC_DEVICE_SESS_VLD BIT(9)
+ #define REG_ADP_BC_B_VALID BIT(10)
+ #define REG_ADP_BC_A_VALID BIT(11)
+ #define REG_ADP_BC_ID_DIG BIT(12)
+ #define REG_ADP_BC_VBUS_VALID BIT(13)
+ #define REG_ADP_BC_ADP_PROBE BIT(14)
+ #define REG_ADP_BC_ADP_SENSE BIT(15)
+ #define REG_ADP_BC_ACA_ENABLE BIT(16)
+ #define REG_ADP_BC_DCD_ENABLE BIT(17)
+ #define REG_ADP_BC_VDAT_DET_EN_B BIT(18)
+ #define REG_ADP_BC_VDAT_SRC_EN_B BIT(19)
+ #define REG_ADP_BC_CHARGE_SEL BIT(20)
+ #define REG_ADP_BC_CHARGE_DETECT BIT(21)
+ #define REG_ADP_BC_ACA_PIN_RANGE_C BIT(22)
+ #define REG_ADP_BC_ACA_PIN_RANGE_B BIT(23)
+ #define REG_ADP_BC_ACA_PIN_RANGE_A BIT(24)
+ #define REG_ADP_BC_ACA_PIN_GND BIT(25)
+ #define REG_ADP_BC_ACA_PIN_FLOAT BIT(26)
+
+#define REG_DBG_UART 0x14
+
+#define REG_TEST 0x18
+ #define REG_TEST_DATA_IN_MASK GENMASK(3, 0)
+ #define REG_TEST_EN_MASK GENMASK(7, 4)
+ #define REG_TEST_ADDR_MASK GENMASK(11, 8)
+ #define REG_TEST_DATA_OUT_SEL BIT(12)
+ #define REG_TEST_CLK BIT(13)
+ #define REG_TEST_VA_TEST_EN_B_MASK GENMASK(15, 14)
+ #define REG_TEST_DATA_OUT_MASK GENMASK(19, 16)
+ #define REG_TEST_DISABLE_ID_PULLUP BIT(20)
+
+#define REG_TUNE 0x1c
+ #define REG_TUNE_TX_RES_TUNE_MASK GENMASK(1, 0)
+ #define REG_TUNE_TX_HSXV_TUNE_MASK GENMASK(3, 2)
+ #define REG_TUNE_TX_VREF_TUNE_MASK GENMASK(7, 4)
+ #define REG_TUNE_TX_RISE_TUNE_MASK GENMASK(9, 8)
+ #define REG_TUNE_TX_PREEMP_PULSE_TUNE BIT(10)
+ #define REG_TUNE_TX_PREEMP_AMP_TUNE_MASK GENMASK(12, 11)
+ #define REG_TUNE_TX_FSLS_TUNE_MASK GENMASK(16, 13)
+ #define REG_TUNE_SQRX_TUNE_MASK GENMASK(19, 17)
+ #define REG_TUNE_OTG_TUNE GENMASK(22, 20)
+ #define REG_TUNE_COMP_DIS_TUNE GENMASK(25, 23)
+ #define REG_TUNE_HOST_DM_PULLDOWN BIT(26)
+ #define REG_TUNE_HOST_DP_PULLDOWN BIT(27)
+
+#define RESET_COMPLETE_TIME 500
+#define ACA_ENABLE_COMPLETE_TIME 50
+
+struct phy_meson8b_usb2_priv {
+ void __iomem *regs;
+ enum usb_dr_mode dr_mode;
+ struct clk *clk_usb_general;
+ struct clk *clk_usb;
+ struct reset_control *reset;
+};
+
+static u32 phy_meson8b_usb2_read(struct phy_meson8b_usb2_priv *phy_priv,
+ u32 reg)
+{
+ return readl(phy_priv->regs + reg);
+}
+
+static void phy_meson8b_usb2_mask_bits(struct phy_meson8b_usb2_priv *phy_priv,
+ u32 reg, u32 mask, u32 value)
+{
+ u32 data;
+
+ data = phy_meson8b_usb2_read(phy_priv, reg);
+ data &= ~mask;
+ data |= (value & mask);
+
+ writel(data, phy_priv->regs + reg);
+}
+
+static int phy_meson8b_usb2_power_on(struct phy *phy)
+{
+ struct phy_meson8b_usb2_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ if (!IS_ERR_OR_NULL(priv->reset)) {
+ ret = reset_control_reset(priv->reset);
+ if (ret) {
+ dev_err(&phy->dev, "Failed to trigger USB reset\n");
+ return ret;
+ }
+ }
+
+ ret = clk_prepare_enable(priv->clk_usb_general);
+ if (ret) {
+ dev_err(&phy->dev, "Failed to enable USB general clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->clk_usb);
+ if (ret) {
+ dev_err(&phy->dev, "Failed to enable USB DDR clock\n");
+ clk_disable_unprepare(priv->clk_usb_general);
+ return ret;
+ }
+
+ phy_meson8b_usb2_mask_bits(priv, REG_CONFIG, REG_CONFIG_CLK_32k_ALTSEL,
+ REG_CONFIG_CLK_32k_ALTSEL);
+
+ phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_REF_CLK_SEL_MASK,
+ 0x2 << REG_CTRL_REF_CLK_SEL_SHIFT);
+
+ phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_FSEL_MASK,
+ 0x5 << REG_CTRL_FSEL_SHIFT);
+
+ /* reset the PHY */
+ phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_POWER_ON_RESET,
+ REG_CTRL_POWER_ON_RESET);
+ udelay(RESET_COMPLETE_TIME);
+ phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_POWER_ON_RESET, 0);
+ udelay(RESET_COMPLETE_TIME);
+
+ phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_SOF_TOGGLE_OUT,
+ REG_CTRL_SOF_TOGGLE_OUT);
+
+ if (priv->dr_mode == USB_DR_MODE_HOST) {
+ phy_meson8b_usb2_mask_bits(priv, REG_ADP_BC,
+ REG_ADP_BC_ACA_ENABLE,
+ REG_ADP_BC_ACA_ENABLE);
+
+ udelay(ACA_ENABLE_COMPLETE_TIME);
+
+ if (phy_meson8b_usb2_read(priv, REG_ADP_BC) &
+ REG_ADP_BC_ACA_PIN_FLOAT) {
+ dev_warn(&phy->dev, "USB ID detect failed!\n");
+ clk_disable_unprepare(priv->clk_usb);
+ clk_disable_unprepare(priv->clk_usb_general);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int phy_meson8b_usb2_power_off(struct phy *phy)
+{
+ struct phy_meson8b_usb2_priv *priv = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(priv->clk_usb);
+ clk_disable_unprepare(priv->clk_usb_general);
+
+ return 0;
+}
+
+static const struct phy_ops phy_meson8b_usb2_ops = {
+ .power_on = phy_meson8b_usb2_power_on,
+ .power_off = phy_meson8b_usb2_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int phy_meson8b_usb2_probe(struct platform_device *pdev)
+{
+ struct phy_meson8b_usb2_priv *priv;
+ struct resource *res;
+ struct phy *phy;
+ struct phy_provider *phy_provider;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->regs))
+ return PTR_ERR(priv->regs);
+
+ priv->clk_usb_general = devm_clk_get(&pdev->dev, "usb_general");
+ if (IS_ERR(priv->clk_usb_general))
+ return PTR_ERR(priv->clk_usb_general);
+
+ priv->clk_usb = devm_clk_get(&pdev->dev, "usb");
+ if (IS_ERR(priv->clk_usb))
+ return PTR_ERR(priv->clk_usb);
+
+ priv->reset = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
+ if (PTR_ERR(priv->reset) == -EPROBE_DEFER)
+ return PTR_ERR(priv->reset);
+
+ priv->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node, -1);
+ if (priv->dr_mode == USB_DR_MODE_UNKNOWN) {
+ dev_err(&pdev->dev,
+ "missing dual role configuration of the controller\n");
+ return -EINVAL;
+ }
+
+ phy = devm_phy_create(&pdev->dev, NULL, &phy_meson8b_usb2_ops);
+ if (IS_ERR(phy)) {
+ dev_err(&pdev->dev, "failed to create PHY\n");
+ return PTR_ERR(phy);
+ }
+
+ phy_set_drvdata(phy, priv);
+
+ phy_provider =
+ devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id phy_meson8b_usb2_of_match[] = {
+ { .compatible = "amlogic,meson8b-usb2-phy", },
+ { .compatible = "amlogic,meson-gxbb-usb2-phy", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, phy_meson8b_usb2_of_match);
+
+static struct platform_driver phy_meson8b_usb2_driver = {
+ .probe = phy_meson8b_usb2_probe,
+ .driver = {
+ .name = "phy-meson-usb2",
+ .of_match_table = phy_meson8b_usb2_of_match,
+ },
+};
+module_platform_driver(phy_meson8b_usb2_driver);
+
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_DESCRIPTION("Meson8b and GXBB USB2 PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c
deleted file mode 100644
index e661f3b36eaa..000000000000
--- a/drivers/phy/phy-miphy365x.c
+++ /dev/null
@@ -1,625 +0,0 @@
-/*
- * Copyright (C) 2014 STMicroelectronics – All Rights Reserved
- *
- * STMicroelectronics PHY driver MiPHY365 (for SoC STiH416).
- *
- * Authors: Alexandre Torgue <alexandre.torgue@st.com>
- * Lee Jones <lee.jones@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2, as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/clk.h>
-#include <linux/phy/phy.h>
-#include <linux/delay.h>
-#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
-
-#include <dt-bindings/phy/phy.h>
-
-#define HFC_TIMEOUT 100
-
-#define SYSCFG_SELECT_SATA_MASK BIT(1)
-#define SYSCFG_SELECT_SATA_POS 1
-
-/* MiPHY365x register definitions */
-#define RESET_REG 0x00
-#define RST_PLL BIT(1)
-#define RST_PLL_CAL BIT(2)
-#define RST_RX BIT(4)
-#define RST_MACRO BIT(7)
-
-#define STATUS_REG 0x01
-#define IDLL_RDY BIT(0)
-#define PLL_RDY BIT(1)
-#define DES_BIT_LOCK BIT(2)
-#define DES_SYMBOL_LOCK BIT(3)
-
-#define CTRL_REG 0x02
-#define TERM_EN BIT(0)
-#define PCI_EN BIT(2)
-#define DES_BIT_LOCK_EN BIT(3)
-#define TX_POL BIT(5)
-
-#define INT_CTRL_REG 0x03
-
-#define BOUNDARY1_REG 0x10
-#define SPDSEL_SEL BIT(0)
-
-#define BOUNDARY3_REG 0x12
-#define TX_SPDSEL_GEN1_VAL 0
-#define TX_SPDSEL_GEN2_VAL 0x01
-#define TX_SPDSEL_GEN3_VAL 0x02
-#define RX_SPDSEL_GEN1_VAL 0
-#define RX_SPDSEL_GEN2_VAL (0x01 << 3)
-#define RX_SPDSEL_GEN3_VAL (0x02 << 3)
-
-#define PCIE_REG 0x16
-
-#define BUF_SEL_REG 0x20
-#define CONF_GEN_SEL_GEN3 0x02
-#define CONF_GEN_SEL_GEN2 0x01
-#define PD_VDDTFILTER BIT(4)
-
-#define TXBUF1_REG 0x21
-#define SWING_VAL 0x04
-#define SWING_VAL_GEN1 0x03
-#define PREEMPH_VAL (0x3 << 5)
-
-#define TXBUF2_REG 0x22
-#define TXSLEW_VAL 0x2
-#define TXSLEW_VAL_GEN1 0x4
-
-#define RXBUF_OFFSET_CTRL_REG 0x23
-
-#define RXBUF_REG 0x25
-#define SDTHRES_VAL 0x01
-#define EQ_ON3 (0x03 << 4)
-#define EQ_ON1 (0x01 << 4)
-
-#define COMP_CTRL1_REG 0x40
-#define START_COMSR BIT(0)
-#define START_COMZC BIT(1)
-#define COMSR_DONE BIT(2)
-#define COMZC_DONE BIT(3)
-#define COMP_AUTO_LOAD BIT(4)
-
-#define COMP_CTRL2_REG 0x41
-#define COMP_2MHZ_RAT_GEN1 0x1e
-#define COMP_2MHZ_RAT 0xf
-
-#define COMP_CTRL3_REG 0x42
-#define COMSR_COMP_REF 0x33
-
-#define COMP_IDLL_REG 0x47
-#define COMZC_IDLL 0x2a
-
-#define PLL_CTRL1_REG 0x50
-#define PLL_START_CAL BIT(0)
-#define BUF_EN BIT(2)
-#define SYNCHRO_TX BIT(3)
-#define SSC_EN BIT(6)
-#define CONFIG_PLL BIT(7)
-
-#define PLL_CTRL2_REG 0x51
-#define BYPASS_PLL_CAL BIT(1)
-
-#define PLL_RAT_REG 0x52
-
-#define PLL_SSC_STEP_MSB_REG 0x56
-#define PLL_SSC_STEP_MSB_VAL 0x03
-
-#define PLL_SSC_STEP_LSB_REG 0x57
-#define PLL_SSC_STEP_LSB_VAL 0x63
-
-#define PLL_SSC_PER_MSB_REG 0x58
-#define PLL_SSC_PER_MSB_VAL 0
-
-#define PLL_SSC_PER_LSB_REG 0x59
-#define PLL_SSC_PER_LSB_VAL 0xf1
-
-#define IDLL_TEST_REG 0x72
-#define START_CLK_HF BIT(6)
-
-#define DES_BITLOCK_REG 0x86
-#define BIT_LOCK_LEVEL 0x01
-#define BIT_LOCK_CNT_512 (0x03 << 5)
-
-struct miphy365x_phy {
- struct phy *phy;
- void __iomem *base;
- bool pcie_tx_pol_inv;
- bool sata_tx_pol_inv;
- u32 sata_gen;
- u32 ctrlreg;
- u8 type;
-};
-
-struct miphy365x_dev {
- struct device *dev;
- struct regmap *regmap;
- struct mutex miphy_mutex;
- struct miphy365x_phy **phys;
- int nphys;
-};
-
-/*
- * These values are represented in Device tree. They are considered to be ABI
- * and although they can be extended any existing values must not change.
- */
-enum miphy_sata_gen {
- SATA_GEN1 = 1,
- SATA_GEN2,
- SATA_GEN3
-};
-
-static u8 rx_tx_spd[] = {
- 0, /* GEN0 doesn't exist. */
- TX_SPDSEL_GEN1_VAL | RX_SPDSEL_GEN1_VAL,
- TX_SPDSEL_GEN2_VAL | RX_SPDSEL_GEN2_VAL,
- TX_SPDSEL_GEN3_VAL | RX_SPDSEL_GEN3_VAL
-};
-
-/*
- * This function selects the system configuration,
- * either two SATA, one SATA and one PCIe, or two PCIe lanes.
- */
-static int miphy365x_set_path(struct miphy365x_phy *miphy_phy,
- struct miphy365x_dev *miphy_dev)
-{
- bool sata = (miphy_phy->type == PHY_TYPE_SATA);
-
- return regmap_update_bits(miphy_dev->regmap,
- miphy_phy->ctrlreg,
- SYSCFG_SELECT_SATA_MASK,
- sata << SYSCFG_SELECT_SATA_POS);
-}
-
-static int miphy365x_init_pcie_port(struct miphy365x_phy *miphy_phy,
- struct miphy365x_dev *miphy_dev)
-{
- u8 val;
-
- if (miphy_phy->pcie_tx_pol_inv) {
- /* Invert Tx polarity and clear pci_txdetect_pol bit */
- val = TERM_EN | PCI_EN | DES_BIT_LOCK_EN | TX_POL;
- writeb_relaxed(val, miphy_phy->base + CTRL_REG);
- writeb_relaxed(0x00, miphy_phy->base + PCIE_REG);
- }
-
- return 0;
-}
-
-static inline int miphy365x_hfc_not_rdy(struct miphy365x_phy *miphy_phy,
- struct miphy365x_dev *miphy_dev)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(HFC_TIMEOUT);
- u8 mask = IDLL_RDY | PLL_RDY;
- u8 regval;
-
- do {
- regval = readb_relaxed(miphy_phy->base + STATUS_REG);
- if (!(regval & mask))
- return 0;
-
- usleep_range(2000, 2500);
- } while (time_before(jiffies, timeout));
-
- dev_err(miphy_dev->dev, "HFC ready timeout!\n");
- return -EBUSY;
-}
-
-static inline int miphy365x_rdy(struct miphy365x_phy *miphy_phy,
- struct miphy365x_dev *miphy_dev)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(HFC_TIMEOUT);
- u8 mask = IDLL_RDY | PLL_RDY;
- u8 regval;
-
- do {
- regval = readb_relaxed(miphy_phy->base + STATUS_REG);
- if ((regval & mask) == mask)
- return 0;
-
- usleep_range(2000, 2500);
- } while (time_before(jiffies, timeout));
-
- dev_err(miphy_dev->dev, "PHY not ready timeout!\n");
- return -EBUSY;
-}
-
-static inline void miphy365x_set_comp(struct miphy365x_phy *miphy_phy,
- struct miphy365x_dev *miphy_dev)
-{
- u8 val, mask;
-
- if (miphy_phy->sata_gen == SATA_GEN1)
- writeb_relaxed(COMP_2MHZ_RAT_GEN1,
- miphy_phy->base + COMP_CTRL2_REG);
- else
- writeb_relaxed(COMP_2MHZ_RAT,
- miphy_phy->base + COMP_CTRL2_REG);
-
- if (miphy_phy->sata_gen != SATA_GEN3) {
- writeb_relaxed(COMSR_COMP_REF,
- miphy_phy->base + COMP_CTRL3_REG);
- /*
- * Force VCO current to value defined by address 0x5A
- * and disable PCIe100Mref bit
- * Enable auto load compensation for pll_i_bias
- */
- writeb_relaxed(BYPASS_PLL_CAL, miphy_phy->base + PLL_CTRL2_REG);
- writeb_relaxed(COMZC_IDLL, miphy_phy->base + COMP_IDLL_REG);
- }
-
- /*
- * Force restart compensation and enable auto load
- * for Comzc_Tx, Comzc_Rx and Comsr on macro
- */
- val = START_COMSR | START_COMZC | COMP_AUTO_LOAD;
- writeb_relaxed(val, miphy_phy->base + COMP_CTRL1_REG);
-
- mask = COMSR_DONE | COMZC_DONE;
- while ((readb_relaxed(miphy_phy->base + COMP_CTRL1_REG) & mask) != mask)
- cpu_relax();
-}
-
-static inline void miphy365x_set_ssc(struct miphy365x_phy *miphy_phy,
- struct miphy365x_dev *miphy_dev)
-{
- u8 val;
-
- /*
- * SSC Settings. SSC will be enabled through Link
- * SSC Ampl. = 0.4%
- * SSC Freq = 31KHz
- */
- writeb_relaxed(PLL_SSC_STEP_MSB_VAL,
- miphy_phy->base + PLL_SSC_STEP_MSB_REG);
- writeb_relaxed(PLL_SSC_STEP_LSB_VAL,
- miphy_phy->base + PLL_SSC_STEP_LSB_REG);
- writeb_relaxed(PLL_SSC_PER_MSB_VAL,
- miphy_phy->base + PLL_SSC_PER_MSB_REG);
- writeb_relaxed(PLL_SSC_PER_LSB_VAL,
- miphy_phy->base + PLL_SSC_PER_LSB_REG);
-
- /* SSC Settings complete */
- if (miphy_phy->sata_gen == SATA_GEN1) {
- val = PLL_START_CAL | BUF_EN | SYNCHRO_TX | CONFIG_PLL;
- writeb_relaxed(val, miphy_phy->base + PLL_CTRL1_REG);
- } else {
- val = SSC_EN | PLL_START_CAL | BUF_EN | SYNCHRO_TX | CONFIG_PLL;
- writeb_relaxed(val, miphy_phy->base + PLL_CTRL1_REG);
- }
-}
-
-static int miphy365x_init_sata_port(struct miphy365x_phy *miphy_phy,
- struct miphy365x_dev *miphy_dev)
-{
- int ret;
- u8 val;
-
- /*
- * Force PHY macro reset, PLL calibration reset, PLL reset
- * and assert Deserializer Reset
- */
- val = RST_PLL | RST_PLL_CAL | RST_RX | RST_MACRO;
- writeb_relaxed(val, miphy_phy->base + RESET_REG);
-
- if (miphy_phy->sata_tx_pol_inv)
- writeb_relaxed(TX_POL, miphy_phy->base + CTRL_REG);
-
- /*
- * Force macro1 to use rx_lspd, tx_lspd
- * Force Rx_Clock on first I-DLL phase
- * Force Des in HP mode on macro, rx_lspd, tx_lspd for Gen2/3
- */
- writeb_relaxed(SPDSEL_SEL, miphy_phy->base + BOUNDARY1_REG);
- writeb_relaxed(START_CLK_HF, miphy_phy->base + IDLL_TEST_REG);
- val = rx_tx_spd[miphy_phy->sata_gen];
- writeb_relaxed(val, miphy_phy->base + BOUNDARY3_REG);
-
- /* Wait for HFC_READY = 0 */
- ret = miphy365x_hfc_not_rdy(miphy_phy, miphy_dev);
- if (ret)
- return ret;
-
- /* Compensation Recalibration */
- miphy365x_set_comp(miphy_phy, miphy_dev);
-
- switch (miphy_phy->sata_gen) {
- case SATA_GEN3:
- /*
- * TX Swing target 550-600mv peak to peak diff
- * Tx Slew target 90-110ps rising/falling time
- * Rx Eq ON3, Sigdet threshold SDTH1
- */
- val = PD_VDDTFILTER | CONF_GEN_SEL_GEN3;
- writeb_relaxed(val, miphy_phy->base + BUF_SEL_REG);
- val = SWING_VAL | PREEMPH_VAL;
- writeb_relaxed(val, miphy_phy->base + TXBUF1_REG);
- writeb_relaxed(TXSLEW_VAL, miphy_phy->base + TXBUF2_REG);
- writeb_relaxed(0x00, miphy_phy->base + RXBUF_OFFSET_CTRL_REG);
- val = SDTHRES_VAL | EQ_ON3;
- writeb_relaxed(val, miphy_phy->base + RXBUF_REG);
- break;
- case SATA_GEN2:
- /*
- * conf gen sel=0x1 to program Gen2 banked registers
- * VDDT filter ON
- * Tx Swing target 550-600mV peak-to-peak diff
- * Tx Slew target 90-110 ps rising/falling time
- * RX Equalization ON1, Sigdet threshold SDTH1
- */
- writeb_relaxed(CONF_GEN_SEL_GEN2,
- miphy_phy->base + BUF_SEL_REG);
- writeb_relaxed(SWING_VAL, miphy_phy->base + TXBUF1_REG);
- writeb_relaxed(TXSLEW_VAL, miphy_phy->base + TXBUF2_REG);
- val = SDTHRES_VAL | EQ_ON1;
- writeb_relaxed(val, miphy_phy->base + RXBUF_REG);
- break;
- case SATA_GEN1:
- /*
- * conf gen sel = 00b to program Gen1 banked registers
- * VDDT filter ON
- * Tx Swing target 500-550mV peak-to-peak diff
- * Tx Slew target120-140 ps rising/falling time
- */
- writeb_relaxed(PD_VDDTFILTER, miphy_phy->base + BUF_SEL_REG);
- writeb_relaxed(SWING_VAL_GEN1, miphy_phy->base + TXBUF1_REG);
- writeb_relaxed(TXSLEW_VAL_GEN1, miphy_phy->base + TXBUF2_REG);
- break;
- default:
- break;
- }
-
- /* Force Macro1 in partial mode & release pll cal reset */
- writeb_relaxed(RST_RX, miphy_phy->base + RESET_REG);
- usleep_range(100, 150);
-
- miphy365x_set_ssc(miphy_phy, miphy_dev);
-
- /* Wait for phy_ready */
- ret = miphy365x_rdy(miphy_phy, miphy_dev);
- if (ret)
- return ret;
-
- /*
- * Enable macro1 to use rx_lspd & tx_lspd
- * Release Rx_Clock on first I-DLL phase on macro1
- * Assert deserializer reset
- * des_bit_lock_en is set
- * bit lock detection strength
- * Deassert deserializer reset
- */
- writeb_relaxed(0x00, miphy_phy->base + BOUNDARY1_REG);
- writeb_relaxed(0x00, miphy_phy->base + IDLL_TEST_REG);
- writeb_relaxed(RST_RX, miphy_phy->base + RESET_REG);
- val = miphy_phy->sata_tx_pol_inv ?
- (TX_POL | DES_BIT_LOCK_EN) : DES_BIT_LOCK_EN;
- writeb_relaxed(val, miphy_phy->base + CTRL_REG);
-
- val = BIT_LOCK_CNT_512 | BIT_LOCK_LEVEL;
- writeb_relaxed(val, miphy_phy->base + DES_BITLOCK_REG);
- writeb_relaxed(0x00, miphy_phy->base + RESET_REG);
-
- return 0;
-}
-
-static int miphy365x_init(struct phy *phy)
-{
- struct miphy365x_phy *miphy_phy = phy_get_drvdata(phy);
- struct miphy365x_dev *miphy_dev = dev_get_drvdata(phy->dev.parent);
- int ret = 0;
-
- mutex_lock(&miphy_dev->miphy_mutex);
-
- ret = miphy365x_set_path(miphy_phy, miphy_dev);
- if (ret) {
- mutex_unlock(&miphy_dev->miphy_mutex);
- return ret;
- }
-
- /* Initialise Miphy for PCIe or SATA */
- if (miphy_phy->type == PHY_TYPE_PCIE)
- ret = miphy365x_init_pcie_port(miphy_phy, miphy_dev);
- else
- ret = miphy365x_init_sata_port(miphy_phy, miphy_dev);
-
- mutex_unlock(&miphy_dev->miphy_mutex);
-
- return ret;
-}
-
-static int miphy365x_get_addr(struct device *dev,
- struct miphy365x_phy *miphy_phy, int index)
-{
- struct device_node *phynode = miphy_phy->phy->dev.of_node;
- const char *name;
- int type = miphy_phy->type;
- int ret;
-
- ret = of_property_read_string_index(phynode, "reg-names", index, &name);
- if (ret) {
- dev_err(dev, "no reg-names property not found\n");
- return ret;
- }
-
- if (!((!strncmp(name, "sata", 4) && type == PHY_TYPE_SATA) ||
- (!strncmp(name, "pcie", 4) && type == PHY_TYPE_PCIE)))
- return 0;
-
- miphy_phy->base = of_iomap(phynode, index);
- if (!miphy_phy->base) {
- dev_err(dev, "Failed to map %s\n", phynode->full_name);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static struct phy *miphy365x_xlate(struct device *dev,
- struct of_phandle_args *args)
-{
- struct miphy365x_dev *miphy_dev = dev_get_drvdata(dev);
- struct miphy365x_phy *miphy_phy = NULL;
- struct device_node *phynode = args->np;
- int ret, index;
-
- if (args->args_count != 1) {
- dev_err(dev, "Invalid number of cells in 'phy' property\n");
- return ERR_PTR(-EINVAL);
- }
-
- for (index = 0; index < miphy_dev->nphys; index++)
- if (phynode == miphy_dev->phys[index]->phy->dev.of_node) {
- miphy_phy = miphy_dev->phys[index];
- break;
- }
-
- if (!miphy_phy) {
- dev_err(dev, "Failed to find appropriate phy\n");
- return ERR_PTR(-EINVAL);
- }
-
- miphy_phy->type = args->args[0];
-
- if (!(miphy_phy->type == PHY_TYPE_SATA ||
- miphy_phy->type == PHY_TYPE_PCIE)) {
- dev_err(dev, "Unsupported device type: %d\n", miphy_phy->type);
- return ERR_PTR(-EINVAL);
- }
-
- /* Each port handles SATA and PCIE - third entry is always sysconf. */
- for (index = 0; index < 3; index++) {
- ret = miphy365x_get_addr(dev, miphy_phy, index);
- if (ret < 0)
- return ERR_PTR(ret);
- }
-
- return miphy_phy->phy;
-}
-
-static const struct phy_ops miphy365x_ops = {
- .init = miphy365x_init,
- .owner = THIS_MODULE,
-};
-
-static int miphy365x_of_probe(struct device_node *phynode,
- struct miphy365x_phy *miphy_phy)
-{
- of_property_read_u32(phynode, "st,sata-gen", &miphy_phy->sata_gen);
- if (!miphy_phy->sata_gen)
- miphy_phy->sata_gen = SATA_GEN1;
-
- miphy_phy->pcie_tx_pol_inv =
- of_property_read_bool(phynode, "st,pcie-tx-pol-inv");
-
- miphy_phy->sata_tx_pol_inv =
- of_property_read_bool(phynode, "st,sata-tx-pol-inv");
-
- return 0;
-}
-
-static int miphy365x_probe(struct platform_device *pdev)
-{
- struct device_node *child, *np = pdev->dev.of_node;
- struct miphy365x_dev *miphy_dev;
- struct phy_provider *provider;
- struct phy *phy;
- int ret, port = 0;
-
- miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL);
- if (!miphy_dev)
- return -ENOMEM;
-
- miphy_dev->nphys = of_get_child_count(np);
- miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys,
- sizeof(*miphy_dev->phys), GFP_KERNEL);
- if (!miphy_dev->phys)
- return -ENOMEM;
-
- miphy_dev->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
- if (IS_ERR(miphy_dev->regmap)) {
- dev_err(miphy_dev->dev, "No syscfg phandle specified\n");
- return PTR_ERR(miphy_dev->regmap);
- }
-
- miphy_dev->dev = &pdev->dev;
-
- dev_set_drvdata(&pdev->dev, miphy_dev);
-
- mutex_init(&miphy_dev->miphy_mutex);
-
- for_each_child_of_node(np, child) {
- struct miphy365x_phy *miphy_phy;
-
- miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy),
- GFP_KERNEL);
- if (!miphy_phy) {
- ret = -ENOMEM;
- goto put_child;
- }
-
- miphy_dev->phys[port] = miphy_phy;
-
- phy = devm_phy_create(&pdev->dev, child, &miphy365x_ops);
- if (IS_ERR(phy)) {
- dev_err(&pdev->dev, "failed to create PHY\n");
- ret = PTR_ERR(phy);
- goto put_child;
- }
-
- miphy_dev->phys[port]->phy = phy;
-
- ret = miphy365x_of_probe(child, miphy_phy);
- if (ret)
- goto put_child;
-
- phy_set_drvdata(phy, miphy_dev->phys[port]);
-
- port++;
- /* sysconfig offsets are indexed from 1 */
- ret = of_property_read_u32_index(np, "st,syscfg", port,
- &miphy_phy->ctrlreg);
- if (ret) {
- dev_err(&pdev->dev, "No sysconfig offset found\n");
- goto put_child;
- }
- }
-
- provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate);
- return PTR_ERR_OR_ZERO(provider);
-put_child:
- of_node_put(child);
- return ret;
-}
-
-static const struct of_device_id miphy365x_of_match[] = {
- { .compatible = "st,miphy365x-phy", },
- { },
-};
-MODULE_DEVICE_TABLE(of, miphy365x_of_match);
-
-static struct platform_driver miphy365x_driver = {
- .probe = miphy365x_probe,
- .driver = {
- .name = "miphy365x-phy",
- .of_match_table = miphy365x_of_match,
- }
-};
-module_platform_driver(miphy365x_driver);
-
-MODULE_AUTHOR("Alexandre Torgue <alexandre.torgue@st.com>");
-MODULE_DESCRIPTION("STMicroelectronics miphy365x driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-ufs-i.h b/drivers/phy/phy-qcom-ufs-i.h
index 2bd5ce43a724..d505d98cf5f8 100644
--- a/drivers/phy/phy-qcom-ufs-i.h
+++ b/drivers/phy/phy-qcom-ufs-i.h
@@ -141,11 +141,8 @@ struct ufs_qcom_phy_specific_ops {
struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy);
int ufs_qcom_phy_power_on(struct phy *generic_phy);
int ufs_qcom_phy_power_off(struct phy *generic_phy);
-int ufs_qcom_phy_exit(struct phy *generic_phy);
-int ufs_qcom_phy_init_clks(struct phy *generic_phy,
- struct ufs_qcom_phy *phy_common);
-int ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
- struct ufs_qcom_phy *phy_common);
+int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common);
+int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common);
int ufs_qcom_phy_remove(struct phy *generic_phy,
struct ufs_qcom_phy *ufs_qcom_phy);
struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
index 6ee51490f786..c71c84734916 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-14nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
@@ -44,30 +44,12 @@ void ufs_qcom_phy_qmp_14nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
static int ufs_qcom_phy_qmp_14nm_init(struct phy *generic_phy)
{
- struct ufs_qcom_phy_qmp_14nm *phy = phy_get_drvdata(generic_phy);
- struct ufs_qcom_phy *phy_common = &phy->common_cfg;
- int err;
-
- err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
- if (err) {
- dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
- __func__, err);
- goto out;
- }
-
- err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
- if (err) {
- dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
- __func__, err);
- goto out;
- }
- phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV;
- phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV;
-
- ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common);
+ return 0;
+}
-out:
- return err;
+static int ufs_qcom_phy_qmp_14nm_exit(struct phy *generic_phy)
+{
+ return 0;
}
static
@@ -117,7 +99,7 @@ static int ufs_qcom_phy_qmp_14nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
static const struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = {
.init = ufs_qcom_phy_qmp_14nm_init,
- .exit = ufs_qcom_phy_exit,
+ .exit = ufs_qcom_phy_qmp_14nm_exit,
.power_on = ufs_qcom_phy_power_on,
.power_off = ufs_qcom_phy_power_off,
.owner = THIS_MODULE,
@@ -136,6 +118,7 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct phy *generic_phy;
struct ufs_qcom_phy_qmp_14nm *phy;
+ struct ufs_qcom_phy *phy_common;
int err = 0;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
@@ -143,8 +126,9 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
err = -ENOMEM;
goto out;
}
+ phy_common = &phy->common_cfg;
- generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+ generic_phy = ufs_qcom_phy_generic_probe(pdev, phy_common,
&ufs_qcom_phy_qmp_14nm_phy_ops, &phy_14nm_ops);
if (!generic_phy) {
@@ -154,39 +138,43 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
goto out;
}
- phy_set_drvdata(generic_phy, phy);
+ err = ufs_qcom_phy_init_clks(phy_common);
+ if (err) {
+ dev_err(phy_common->dev,
+ "%s: ufs_qcom_phy_init_clks() failed %d\n",
+ __func__, err);
+ goto out;
+ }
- strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
- sizeof(phy->common_cfg.name));
+ err = ufs_qcom_phy_init_vregulators(phy_common);
+ if (err) {
+ dev_err(phy_common->dev,
+ "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+ __func__, err);
+ goto out;
+ }
+ phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV;
+ phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV;
-out:
- return err;
-}
+ ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common);
-static int ufs_qcom_phy_qmp_14nm_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct phy *generic_phy = to_phy(dev);
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
- int err = 0;
+ phy_set_drvdata(generic_phy, phy);
- err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
- if (err)
- dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
- __func__, err);
+ strlcpy(phy_common->name, UFS_PHY_NAME, sizeof(phy_common->name));
+out:
return err;
}
static const struct of_device_id ufs_qcom_phy_qmp_14nm_of_match[] = {
{.compatible = "qcom,ufs-phy-qmp-14nm"},
+ {.compatible = "qcom,msm8996-ufs-phy-qmp-14nm"},
{},
};
MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_14nm_of_match);
static struct platform_driver ufs_qcom_phy_qmp_14nm_driver = {
.probe = ufs_qcom_phy_qmp_14nm_probe,
- .remove = ufs_qcom_phy_qmp_14nm_remove,
.driver = {
.of_match_table = ufs_qcom_phy_qmp_14nm_of_match,
.name = "ufs_qcom_phy_qmp_14nm",
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
index 770087ab05e2..1a26a64e06d3 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-20nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
@@ -63,28 +63,12 @@ void ufs_qcom_phy_qmp_20nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
static int ufs_qcom_phy_qmp_20nm_init(struct phy *generic_phy)
{
- struct ufs_qcom_phy_qmp_20nm *phy = phy_get_drvdata(generic_phy);
- struct ufs_qcom_phy *phy_common = &phy->common_cfg;
- int err = 0;
-
- err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
- if (err) {
- dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
- __func__, err);
- goto out;
- }
-
- err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
- if (err) {
- dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
- __func__, err);
- goto out;
- }
-
- ufs_qcom_phy_qmp_20nm_advertise_quirks(phy_common);
+ return 0;
+}
-out:
- return err;
+static int ufs_qcom_phy_qmp_20nm_exit(struct phy *generic_phy)
+{
+ return 0;
}
static
@@ -173,7 +157,7 @@ static int ufs_qcom_phy_qmp_20nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
static const struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
.init = ufs_qcom_phy_qmp_20nm_init,
- .exit = ufs_qcom_phy_exit,
+ .exit = ufs_qcom_phy_qmp_20nm_exit,
.power_on = ufs_qcom_phy_power_on,
.power_off = ufs_qcom_phy_power_off,
.owner = THIS_MODULE,
@@ -192,6 +176,7 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct phy *generic_phy;
struct ufs_qcom_phy_qmp_20nm *phy;
+ struct ufs_qcom_phy *phy_common;
int err = 0;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
@@ -199,8 +184,9 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
err = -ENOMEM;
goto out;
}
+ phy_common = &phy->common_cfg;
- generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+ generic_phy = ufs_qcom_phy_generic_probe(pdev, phy_common,
&ufs_qcom_phy_qmp_20nm_phy_ops, &phy_20nm_ops);
if (!generic_phy) {
@@ -210,27 +196,27 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
goto out;
}
- phy_set_drvdata(generic_phy, phy);
+ err = ufs_qcom_phy_init_clks(phy_common);
+ if (err) {
+ dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
+ __func__, err);
+ goto out;
+ }
- strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
- sizeof(phy->common_cfg.name));
+ err = ufs_qcom_phy_init_vregulators(phy_common);
+ if (err) {
+ dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+ __func__, err);
+ goto out;
+ }
-out:
- return err;
-}
+ ufs_qcom_phy_qmp_20nm_advertise_quirks(phy_common);
-static int ufs_qcom_phy_qmp_20nm_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct phy *generic_phy = to_phy(dev);
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
- int err = 0;
+ phy_set_drvdata(generic_phy, phy);
- err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
- if (err)
- dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
- __func__, err);
+ strlcpy(phy_common->name, UFS_PHY_NAME, sizeof(phy_common->name));
+out:
return err;
}
@@ -242,7 +228,6 @@ MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_20nm_of_match);
static struct platform_driver ufs_qcom_phy_qmp_20nm_driver = {
.probe = ufs_qcom_phy_qmp_20nm_probe,
- .remove = ufs_qcom_phy_qmp_20nm_remove,
.driver = {
.of_match_table = ufs_qcom_phy_qmp_20nm_of_match,
.name = "ufs_qcom_phy_qmp_20nm",
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index 18a5b495ad65..c69568b8543d 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -22,13 +22,6 @@
#define VDDP_REF_CLK_MIN_UV 1200000
#define VDDP_REF_CLK_MAX_UV 1200000
-static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
- const char *, bool);
-static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
- const char *);
-static int ufs_qcom_phy_base_init(struct platform_device *pdev,
- struct ufs_qcom_phy *phy_common);
-
int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
struct ufs_qcom_phy_calibration *tbl_A,
int tbl_size_A,
@@ -75,45 +68,6 @@ out:
}
EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate);
-struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
- struct ufs_qcom_phy *common_cfg,
- const struct phy_ops *ufs_qcom_phy_gen_ops,
- struct ufs_qcom_phy_specific_ops *phy_spec_ops)
-{
- int err;
- struct device *dev = &pdev->dev;
- struct phy *generic_phy = NULL;
- struct phy_provider *phy_provider;
-
- err = ufs_qcom_phy_base_init(pdev, common_cfg);
- if (err) {
- dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
- goto out;
- }
-
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider)) {
- err = PTR_ERR(phy_provider);
- dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
- goto out;
- }
-
- generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
- if (IS_ERR(generic_phy)) {
- err = PTR_ERR(generic_phy);
- dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
- generic_phy = NULL;
- goto out;
- }
-
- common_cfg->phy_spec_ops = phy_spec_ops;
- common_cfg->dev = dev;
-
-out:
- return generic_phy;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe);
-
/*
* This assumes the embedded phy structure inside generic_phy is of type
* struct ufs_qcom_phy. In order to function properly it's crucial
@@ -154,13 +108,50 @@ int ufs_qcom_phy_base_init(struct platform_device *pdev,
return 0;
}
-static int __ufs_qcom_phy_clk_get(struct phy *phy,
+struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
+ struct ufs_qcom_phy *common_cfg,
+ const struct phy_ops *ufs_qcom_phy_gen_ops,
+ struct ufs_qcom_phy_specific_ops *phy_spec_ops)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy = NULL;
+ struct phy_provider *phy_provider;
+
+ err = ufs_qcom_phy_base_init(pdev, common_cfg);
+ if (err) {
+ dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
+ goto out;
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ err = PTR_ERR(phy_provider);
+ dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
+ goto out;
+ }
+
+ generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
+ if (IS_ERR(generic_phy)) {
+ err = PTR_ERR(generic_phy);
+ dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
+ generic_phy = NULL;
+ goto out;
+ }
+
+ common_cfg->phy_spec_ops = phy_spec_ops;
+ common_cfg->dev = dev;
+
+out:
+ return generic_phy;
+}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe);
+
+static int __ufs_qcom_phy_clk_get(struct device *dev,
const char *name, struct clk **clk_out, bool err_print)
{
struct clk *clk;
int err = 0;
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
clk = devm_clk_get(dev, name);
if (IS_ERR(clk)) {
@@ -174,42 +165,44 @@ static int __ufs_qcom_phy_clk_get(struct phy *phy,
return err;
}
-static
-int ufs_qcom_phy_clk_get(struct phy *phy,
+static int ufs_qcom_phy_clk_get(struct device *dev,
const char *name, struct clk **clk_out)
{
- return __ufs_qcom_phy_clk_get(phy, name, clk_out, true);
+ return __ufs_qcom_phy_clk_get(dev, name, clk_out, true);
}
-int
-ufs_qcom_phy_init_clks(struct phy *generic_phy,
- struct ufs_qcom_phy *phy_common)
+int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common)
{
int err;
- err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
+ if (of_device_is_compatible(phy_common->dev->of_node,
+ "qcom,msm8996-ufs-phy-qmp-14nm"))
+ goto skip_txrx_clk;
+
+ err = ufs_qcom_phy_clk_get(phy_common->dev, "tx_iface_clk",
&phy_common->tx_iface_clk);
if (err)
goto out;
- err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
+ err = ufs_qcom_phy_clk_get(phy_common->dev, "rx_iface_clk",
&phy_common->rx_iface_clk);
if (err)
goto out;
- err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src",
+ err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_src",
&phy_common->ref_clk_src);
if (err)
goto out;
+skip_txrx_clk:
/*
* "ref_clk_parent" is optional hence don't abort init if it's not
* found.
*/
- __ufs_qcom_phy_clk_get(generic_phy, "ref_clk_parent",
+ __ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_parent",
&phy_common->ref_clk_parent, false);
- err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk",
+ err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk",
&phy_common->ref_clk);
out:
@@ -217,41 +210,14 @@ out:
}
EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks);
-int
-ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
- struct ufs_qcom_phy *phy_common)
-{
- int err;
-
- err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll,
- "vdda-pll");
- if (err)
- goto out;
-
- err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy,
- "vdda-phy");
-
- if (err)
- goto out;
-
- /* vddp-ref-clk-* properties are optional */
- __ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk,
- "vddp-ref-clk", true);
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators);
-
-static int __ufs_qcom_phy_init_vreg(struct phy *phy,
+static int __ufs_qcom_phy_init_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional)
{
int err = 0;
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
char prop_name[MAX_PROP_NAME];
- vreg->name = kstrdup(name, GFP_KERNEL);
+ vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
if (!vreg->name) {
err = -ENOMEM;
goto out;
@@ -304,14 +270,36 @@ out:
return err;
}
-static int ufs_qcom_phy_init_vreg(struct phy *phy,
+static int ufs_qcom_phy_init_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg, const char *name)
{
- return __ufs_qcom_phy_init_vreg(phy, vreg, name, false);
+ return __ufs_qcom_phy_init_vreg(dev, vreg, name, false);
}
-static
-int ufs_qcom_phy_cfg_vreg(struct phy *phy,
+int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common)
+{
+ int err;
+
+ err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_pll,
+ "vdda-pll");
+ if (err)
+ goto out;
+
+ err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_phy,
+ "vdda-phy");
+
+ if (err)
+ goto out;
+
+ /* vddp-ref-clk-* properties are optional */
+ __ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vddp_ref_clk,
+ "vddp-ref-clk", true);
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators);
+
+static int ufs_qcom_phy_cfg_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg, bool on)
{
int ret = 0;
@@ -319,10 +307,6 @@ int ufs_qcom_phy_cfg_vreg(struct phy *phy,
const char *name = vreg->name;
int min_uV;
int uA_load;
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
-
- BUG_ON(!vreg);
if (regulator_count_voltages(reg) > 0) {
min_uV = on ? vreg->min_uV : 0;
@@ -350,18 +334,15 @@ out:
return ret;
}
-static
-int ufs_qcom_phy_enable_vreg(struct phy *phy,
+static int ufs_qcom_phy_enable_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg)
{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
int ret = 0;
if (!vreg || vreg->enabled)
goto out;
- ret = ufs_qcom_phy_cfg_vreg(phy, vreg, true);
+ ret = ufs_qcom_phy_cfg_vreg(dev, vreg, true);
if (ret) {
dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n",
__func__, ret);
@@ -380,10 +361,9 @@ out:
return ret;
}
-int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy)
+static int ufs_qcom_phy_enable_ref_clk(struct ufs_qcom_phy *phy)
{
int ret = 0;
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
if (phy->is_ref_clk_enabled)
goto out;
@@ -430,14 +410,10 @@ out_disable_src:
out:
return ret;
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk);
-static
-int ufs_qcom_phy_disable_vreg(struct phy *phy,
+static int ufs_qcom_phy_disable_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg)
{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
int ret = 0;
if (!vreg || !vreg->enabled || vreg->is_always_on)
@@ -447,7 +423,7 @@ int ufs_qcom_phy_disable_vreg(struct phy *phy,
if (!ret) {
/* ignore errors on applying disable config */
- ufs_qcom_phy_cfg_vreg(phy, vreg, false);
+ ufs_qcom_phy_cfg_vreg(dev, vreg, false);
vreg->enabled = false;
} else {
dev_err(dev, "%s: %s disable failed, err=%d\n",
@@ -457,10 +433,8 @@ out:
return ret;
}
-void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
+static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy)
{
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
-
if (phy->is_ref_clk_enabled) {
clk_disable_unprepare(phy->ref_clk);
/*
@@ -473,7 +447,6 @@ void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
phy->is_ref_clk_enabled = false;
}
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk);
#define UFS_REF_CLK_EN (1 << 5)
@@ -526,9 +499,8 @@ void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk);
/* Turn ON M-PHY RMMI interface clocks */
-int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
+static int ufs_qcom_phy_enable_iface_clk(struct ufs_qcom_phy *phy)
{
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
int ret = 0;
if (phy->is_iface_clk_enabled)
@@ -552,20 +524,16 @@ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
out:
return ret;
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk);
/* Turn OFF M-PHY RMMI interface clocks */
-void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
+void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy)
{
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
-
if (phy->is_iface_clk_enabled) {
clk_disable_unprepare(phy->tx_iface_clk);
clk_disable_unprepare(phy->rx_iface_clk);
phy->is_iface_clk_enabled = false;
}
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk);
int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
{
@@ -634,29 +602,6 @@ int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
}
EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
-int ufs_qcom_phy_remove(struct phy *generic_phy,
- struct ufs_qcom_phy *ufs_qcom_phy)
-{
- phy_power_off(generic_phy);
-
- kfree(ufs_qcom_phy->vdda_pll.name);
- kfree(ufs_qcom_phy->vdda_phy.name);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_remove);
-
-int ufs_qcom_phy_exit(struct phy *generic_phy)
-{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
-
- if (ufs_qcom_phy->is_powered_on)
- phy_power_off(generic_phy);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_exit);
-
int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
{
struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
@@ -678,7 +623,10 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
struct device *dev = phy_common->dev;
int err;
- err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_phy);
+ if (phy_common->is_powered_on)
+ return 0;
+
+ err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy);
if (err) {
dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
__func__, err);
@@ -688,23 +636,30 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
phy_common->phy_spec_ops->power_control(phy_common, true);
/* vdda_pll also enables ref clock LDOs so enable it first */
- err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_pll);
+ err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_pll);
if (err) {
dev_err(dev, "%s enable vdda_pll failed, err=%d\n",
__func__, err);
goto out_disable_phy;
}
- err = ufs_qcom_phy_enable_ref_clk(generic_phy);
+ err = ufs_qcom_phy_enable_iface_clk(phy_common);
if (err) {
- dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
+ dev_err(dev, "%s enable phy iface clock failed, err=%d\n",
__func__, err);
goto out_disable_pll;
}
+ err = ufs_qcom_phy_enable_ref_clk(phy_common);
+ if (err) {
+ dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
+ __func__, err);
+ goto out_disable_iface_clk;
+ }
+
/* enable device PHY ref_clk pad rail */
if (phy_common->vddp_ref_clk.reg) {
- err = ufs_qcom_phy_enable_vreg(generic_phy,
+ err = ufs_qcom_phy_enable_vreg(dev,
&phy_common->vddp_ref_clk);
if (err) {
dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n",
@@ -717,11 +672,13 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
goto out;
out_disable_ref_clk:
- ufs_qcom_phy_disable_ref_clk(generic_phy);
+ ufs_qcom_phy_disable_ref_clk(phy_common);
+out_disable_iface_clk:
+ ufs_qcom_phy_disable_iface_clk(phy_common);
out_disable_pll:
- ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
+ ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_pll);
out_disable_phy:
- ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
+ ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_phy);
out:
return err;
}
@@ -731,15 +688,19 @@ int ufs_qcom_phy_power_off(struct phy *generic_phy)
{
struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+ if (!phy_common->is_powered_on)
+ return 0;
+
phy_common->phy_spec_ops->power_control(phy_common, false);
if (phy_common->vddp_ref_clk.reg)
- ufs_qcom_phy_disable_vreg(generic_phy,
+ ufs_qcom_phy_disable_vreg(phy_common->dev,
&phy_common->vddp_ref_clk);
- ufs_qcom_phy_disable_ref_clk(generic_phy);
+ ufs_qcom_phy_disable_ref_clk(phy_common);
+ ufs_qcom_phy_disable_iface_clk(phy_common);
- ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
- ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
+ ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_pll);
+ ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_phy);
phy_common->is_powered_on = false;
return 0;
diff --git a/drivers/phy/phy-rcar-gen3-usb2.c b/drivers/phy/phy-rcar-gen3-usb2.c
index 3d97eadd247d..c63da1b955c1 100644
--- a/drivers/phy/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/phy-rcar-gen3-usb2.c
@@ -70,6 +70,7 @@
#define USB2_LINECTRL1_DP_RPD BIT(18)
#define USB2_LINECTRL1_DMRPD_EN BIT(17)
#define USB2_LINECTRL1_DM_RPD BIT(16)
+#define USB2_LINECTRL1_OPMODE_NODRV BIT(6)
/* ADPCTRL */
#define USB2_ADPCTRL_OTGSESSVLD BIT(20)
@@ -161,6 +162,43 @@ static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch)
schedule_work(&ch->work);
}
+static void rcar_gen3_init_for_b_host(struct rcar_gen3_chan *ch)
+{
+ void __iomem *usb2_base = ch->base;
+ u32 val;
+
+ val = readl(usb2_base + USB2_LINECTRL1);
+ writel(val | USB2_LINECTRL1_OPMODE_NODRV, usb2_base + USB2_LINECTRL1);
+
+ rcar_gen3_set_linectrl(ch, 1, 1);
+ rcar_gen3_set_host_mode(ch, 1);
+ rcar_gen3_enable_vbus_ctrl(ch, 0);
+
+ val = readl(usb2_base + USB2_LINECTRL1);
+ writel(val & ~USB2_LINECTRL1_OPMODE_NODRV, usb2_base + USB2_LINECTRL1);
+}
+
+static void rcar_gen3_init_for_a_peri(struct rcar_gen3_chan *ch)
+{
+ rcar_gen3_set_linectrl(ch, 0, 1);
+ rcar_gen3_set_host_mode(ch, 0);
+ rcar_gen3_enable_vbus_ctrl(ch, 1);
+}
+
+static void rcar_gen3_init_from_a_peri_to_a_host(struct rcar_gen3_chan *ch)
+{
+ void __iomem *usb2_base = ch->base;
+ u32 val;
+
+ val = readl(usb2_base + USB2_OBINTEN);
+ writel(val & ~USB2_OBINT_BITS, usb2_base + USB2_OBINTEN);
+
+ rcar_gen3_enable_vbus_ctrl(ch, 0);
+ rcar_gen3_init_for_host(ch);
+
+ writel(val | USB2_OBINT_BITS, usb2_base + USB2_OBINTEN);
+}
+
static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
{
return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
@@ -174,6 +212,65 @@ static void rcar_gen3_device_recognition(struct rcar_gen3_chan *ch)
rcar_gen3_init_for_peri(ch);
}
+static bool rcar_gen3_is_host(struct rcar_gen3_chan *ch)
+{
+ return !(readl(ch->base + USB2_COMMCTRL) & USB2_COMMCTRL_OTG_PERI);
+}
+
+static ssize_t role_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
+ bool is_b_device, is_host, new_mode_is_host;
+
+ if (!ch->has_otg || !ch->phy->init_count)
+ return -EIO;
+
+ /*
+ * is_b_device: true is B-Device. false is A-Device.
+ * If {new_mode_}is_host: true is Host mode. false is Peripheral mode.
+ */
+ is_b_device = rcar_gen3_check_id(ch);
+ is_host = rcar_gen3_is_host(ch);
+ if (!strncmp(buf, "host", strlen("host")))
+ new_mode_is_host = true;
+ else if (!strncmp(buf, "peripheral", strlen("peripheral")))
+ new_mode_is_host = false;
+ else
+ return -EINVAL;
+
+ /* If current and new mode is the same, this returns the error */
+ if (is_host == new_mode_is_host)
+ return -EINVAL;
+
+ if (new_mode_is_host) { /* And is_host must be false */
+ if (!is_b_device) /* A-Peripheral */
+ rcar_gen3_init_from_a_peri_to_a_host(ch);
+ else /* B-Peripheral */
+ rcar_gen3_init_for_b_host(ch);
+ } else { /* And is_host must be true */
+ if (!is_b_device) /* A-Host */
+ rcar_gen3_init_for_a_peri(ch);
+ else /* B-Host */
+ rcar_gen3_init_for_peri(ch);
+ }
+
+ return count;
+}
+
+static ssize_t role_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
+
+ if (!ch->has_otg || !ch->phy->init_count)
+ return -EIO;
+
+ return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" :
+ "peripheral");
+}
+static DEVICE_ATTR_RW(role);
+
static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
{
void __iomem *usb2_base = ch->base;
@@ -351,21 +448,40 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
channel->vbus = NULL;
}
+ platform_set_drvdata(pdev, channel);
phy_set_drvdata(channel->phy, channel);
provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(provider))
+ if (IS_ERR(provider)) {
dev_err(dev, "Failed to register PHY provider\n");
+ } else if (channel->has_otg) {
+ int ret;
+
+ ret = device_create_file(dev, &dev_attr_role);
+ if (ret < 0)
+ return ret;
+ }
return PTR_ERR_OR_ZERO(provider);
}
+static int rcar_gen3_phy_usb2_remove(struct platform_device *pdev)
+{
+ struct rcar_gen3_chan *channel = platform_get_drvdata(pdev);
+
+ if (channel->has_otg)
+ device_remove_file(&pdev->dev, &dev_attr_role);
+
+ return 0;
+};
+
static struct platform_driver rcar_gen3_phy_usb2_driver = {
.driver = {
.name = "phy_rcar_gen3_usb2",
.of_match_table = rcar_gen3_phy_usb2_match_table,
},
.probe = rcar_gen3_phy_usb2_probe,
+ .remove = rcar_gen3_phy_usb2_remove,
};
module_platform_driver(rcar_gen3_phy_usb2_driver);
diff --git a/drivers/phy/phy-rockchip-emmc.c b/drivers/phy/phy-rockchip-emmc.c
index fd57345ffed2..f1b24f18e9b2 100644
--- a/drivers/phy/phy-rockchip-emmc.c
+++ b/drivers/phy/phy-rockchip-emmc.c
@@ -132,7 +132,7 @@ static int rockchip_emmc_phy_power(struct phy *phy, bool on_off)
default:
ideal_rate = 200000000;
break;
- };
+ }
diff = (rate > ideal_rate) ?
rate - ideal_rate : ideal_rate - rate;
diff --git a/drivers/phy/phy-rockchip-inno-usb2.c b/drivers/phy/phy-rockchip-inno-usb2.c
index ac203107b071..2f99ec95079c 100644
--- a/drivers/phy/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/phy-rockchip-inno-usb2.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/extcon.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/gpio/consumer.h>
@@ -30,11 +31,15 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
+#include <linux/usb/of.h>
+#include <linux/usb/otg.h>
#define BIT_WRITEABLE_SHIFT 16
-#define SCHEDULE_DELAY (60 * HZ)
+#define SCHEDULE_DELAY (60 * HZ)
+#define OTG_SCHEDULE_DELAY (2 * HZ)
enum rockchip_usb2phy_port_id {
USB2PHY_PORT_OTG,
@@ -49,6 +54,37 @@ enum rockchip_usb2phy_host_state {
PHY_STATE_FS_LS_ONLINE = 4,
};
+/**
+ * Different states involved in USB charger detection.
+ * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection
+ * process is not yet started.
+ * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact.
+ * USB_CHG_STATE_DCD_DONE Data pin contact is detected.
+ * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects
+ * between SDP and DCP/CDP).
+ * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects
+ * between DCP and CDP).
+ * USB_CHG_STATE_DETECTED USB charger type is determined.
+ */
+enum usb_chg_state {
+ USB_CHG_STATE_UNDEFINED = 0,
+ USB_CHG_STATE_WAIT_FOR_DCD,
+ USB_CHG_STATE_DCD_DONE,
+ USB_CHG_STATE_PRIMARY_DONE,
+ USB_CHG_STATE_SECONDARY_DONE,
+ USB_CHG_STATE_DETECTED,
+};
+
+static const unsigned int rockchip_usb2phy_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_CHG_USB_SDP,
+ EXTCON_CHG_USB_CDP,
+ EXTCON_CHG_USB_DCP,
+ EXTCON_CHG_USB_SLOW,
+ EXTCON_NONE,
+};
+
struct usb2phy_reg {
unsigned int offset;
unsigned int bitend;
@@ -58,19 +94,55 @@ struct usb2phy_reg {
};
/**
+ * struct rockchip_chg_det_reg: usb charger detect registers
+ * @cp_det: charging port detected successfully.
+ * @dcp_det: dedicated charging port detected successfully.
+ * @dp_det: assert data pin connect successfully.
+ * @idm_sink_en: open dm sink curren.
+ * @idp_sink_en: open dp sink current.
+ * @idp_src_en: open dm source current.
+ * @rdm_pdwn_en: open dm pull down resistor.
+ * @vdm_src_en: open dm voltage source.
+ * @vdp_src_en: open dp voltage source.
+ * @opmode: utmi operational mode.
+ */
+struct rockchip_chg_det_reg {
+ struct usb2phy_reg cp_det;
+ struct usb2phy_reg dcp_det;
+ struct usb2phy_reg dp_det;
+ struct usb2phy_reg idm_sink_en;
+ struct usb2phy_reg idp_sink_en;
+ struct usb2phy_reg idp_src_en;
+ struct usb2phy_reg rdm_pdwn_en;
+ struct usb2phy_reg vdm_src_en;
+ struct usb2phy_reg vdp_src_en;
+ struct usb2phy_reg opmode;
+};
+
+/**
* struct rockchip_usb2phy_port_cfg: usb-phy port configuration.
* @phy_sus: phy suspend register.
+ * @bvalid_det_en: vbus valid rise detection enable register.
+ * @bvalid_det_st: vbus valid rise detection status register.
+ * @bvalid_det_clr: vbus valid rise detection clear register.
* @ls_det_en: linestate detection enable register.
* @ls_det_st: linestate detection state register.
* @ls_det_clr: linestate detection clear register.
+ * @utmi_avalid: utmi vbus avalid status register.
+ * @utmi_bvalid: utmi vbus bvalid status register.
* @utmi_ls: utmi linestate state register.
* @utmi_hstdet: utmi host disconnect register.
*/
struct rockchip_usb2phy_port_cfg {
struct usb2phy_reg phy_sus;
+ struct usb2phy_reg bvalid_det_en;
+ struct usb2phy_reg bvalid_det_st;
+ struct usb2phy_reg bvalid_det_clr;
struct usb2phy_reg ls_det_en;
struct usb2phy_reg ls_det_st;
struct usb2phy_reg ls_det_clr;
+ struct usb2phy_reg utmi_avalid;
+ struct usb2phy_reg utmi_bvalid;
struct usb2phy_reg utmi_ls;
struct usb2phy_reg utmi_hstdet;
};
@@ -80,31 +152,51 @@ struct rockchip_usb2phy_port_cfg {
* @reg: the address offset of grf for usb-phy config.
* @num_ports: specify how many ports that the phy has.
* @clkout_ctl: keep on/turn off output clk of phy.
+ * @chg_det: charger detection registers.
*/
struct rockchip_usb2phy_cfg {
unsigned int reg;
unsigned int num_ports;
struct usb2phy_reg clkout_ctl;
const struct rockchip_usb2phy_port_cfg port_cfgs[USB2PHY_NUM_PORTS];
+ const struct rockchip_chg_det_reg chg_det;
};
/**
* struct rockchip_usb2phy_port: usb-phy port data.
* @port_id: flag for otg port or host port.
* @suspended: phy suspended flag.
+ * @utmi_avalid: utmi avalid status usage flag.
+ * true - use avalid to get vbus status
+ * flase - use bvalid to get vbus status
+ * @vbus_attached: otg device vbus status.
+ * @bvalid_irq: IRQ number assigned for vbus valid rise detection.
* @ls_irq: IRQ number assigned for linestate detection.
* @mutex: for register updating in sm_work.
- * @sm_work: OTG state machine work.
+ * @chg_work: charge detect work.
+ * @otg_sm_work: OTG state machine work.
+ * @sm_work: HOST state machine work.
* @phy_cfg: port register configuration, assigned by driver data.
+ * @event_nb: hold event notification callback.
+ * @state: define OTG enumeration states before device reset.
+ * @mode: the dr_mode of the controller.
*/
struct rockchip_usb2phy_port {
struct phy *phy;
unsigned int port_id;
bool suspended;
+ bool utmi_avalid;
+ bool vbus_attached;
+ int bvalid_irq;
int ls_irq;
struct mutex mutex;
+ struct delayed_work chg_work;
+ struct delayed_work otg_sm_work;
struct delayed_work sm_work;
const struct rockchip_usb2phy_port_cfg *port_cfg;
+ struct notifier_block event_nb;
+ enum usb_otg_state state;
+ enum usb_dr_mode mode;
};
/**
@@ -113,6 +205,11 @@ struct rockchip_usb2phy_port {
* @clk: clock struct of phy input clk.
* @clk480m: clock struct of phy output clk.
* @clk_hw: clock struct of phy output clk management.
+ * @chg_state: states involved in USB charger detection.
+ * @chg_type: USB charger types.
+ * @dcd_retries: The retry count used to track Data contact
+ * detection process.
+ * @edev: extcon device for notification registration
* @phy_cfg: phy register configuration, assigned by driver data.
* @ports: phy port instance.
*/
@@ -122,6 +219,10 @@ struct rockchip_usb2phy {
struct clk *clk;
struct clk *clk480m;
struct clk_hw clk480m_hw;
+ enum usb_chg_state chg_state;
+ enum power_supply_type chg_type;
+ u8 dcd_retries;
+ struct extcon_dev *edev;
const struct rockchip_usb2phy_cfg *phy_cfg;
struct rockchip_usb2phy_port ports[USB2PHY_NUM_PORTS];
};
@@ -153,7 +254,7 @@ static inline bool property_enabled(struct rockchip_usb2phy *rphy,
return tmp == reg->enable;
}
-static int rockchip_usb2phy_clk480m_enable(struct clk_hw *hw)
+static int rockchip_usb2phy_clk480m_prepare(struct clk_hw *hw)
{
struct rockchip_usb2phy *rphy =
container_of(hw, struct rockchip_usb2phy, clk480m_hw);
@@ -165,14 +266,14 @@ static int rockchip_usb2phy_clk480m_enable(struct clk_hw *hw)
if (ret)
return ret;
- /* waitting for the clk become stable */
- mdelay(1);
+ /* waiting for the clk become stable */
+ usleep_range(1200, 1300);
}
return 0;
}
-static void rockchip_usb2phy_clk480m_disable(struct clk_hw *hw)
+static void rockchip_usb2phy_clk480m_unprepare(struct clk_hw *hw)
{
struct rockchip_usb2phy *rphy =
container_of(hw, struct rockchip_usb2phy, clk480m_hw);
@@ -181,7 +282,7 @@ static void rockchip_usb2phy_clk480m_disable(struct clk_hw *hw)
property_enable(rphy, &rphy->phy_cfg->clkout_ctl, false);
}
-static int rockchip_usb2phy_clk480m_enabled(struct clk_hw *hw)
+static int rockchip_usb2phy_clk480m_prepared(struct clk_hw *hw)
{
struct rockchip_usb2phy *rphy =
container_of(hw, struct rockchip_usb2phy, clk480m_hw);
@@ -197,9 +298,9 @@ rockchip_usb2phy_clk480m_recalc_rate(struct clk_hw *hw,
}
static const struct clk_ops rockchip_usb2phy_clkout_ops = {
- .enable = rockchip_usb2phy_clk480m_enable,
- .disable = rockchip_usb2phy_clk480m_disable,
- .is_enabled = rockchip_usb2phy_clk480m_enabled,
+ .prepare = rockchip_usb2phy_clk480m_prepare,
+ .unprepare = rockchip_usb2phy_clk480m_unprepare,
+ .is_prepared = rockchip_usb2phy_clk480m_prepared,
.recalc_rate = rockchip_usb2phy_clk480m_recalc_rate,
};
@@ -263,33 +364,84 @@ err_ret:
return ret;
}
-static int rockchip_usb2phy_init(struct phy *phy)
+static int rockchip_usb2phy_extcon_register(struct rockchip_usb2phy *rphy)
{
- struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy);
- struct rockchip_usb2phy *rphy = dev_get_drvdata(phy->dev.parent);
int ret;
+ struct device_node *node = rphy->dev->of_node;
+ struct extcon_dev *edev;
+
+ if (of_property_read_bool(node, "extcon")) {
+ edev = extcon_get_edev_by_phandle(rphy->dev, 0);
+ if (IS_ERR(edev)) {
+ if (PTR_ERR(edev) != -EPROBE_DEFER)
+ dev_err(rphy->dev, "Invalid or missing extcon\n");
+ return PTR_ERR(edev);
+ }
+ } else {
+ /* Initialize extcon device */
+ edev = devm_extcon_dev_allocate(rphy->dev,
+ rockchip_usb2phy_extcon_cable);
- if (rport->port_id == USB2PHY_PORT_HOST) {
- /* clear linestate and enable linestate detect irq */
- mutex_lock(&rport->mutex);
+ if (IS_ERR(edev))
+ return -ENOMEM;
- ret = property_enable(rphy, &rport->port_cfg->ls_det_clr, true);
+ ret = devm_extcon_dev_register(rphy->dev, edev);
if (ret) {
- mutex_unlock(&rport->mutex);
+ dev_err(rphy->dev, "failed to register extcon device\n");
return ret;
}
+ }
- ret = property_enable(rphy, &rport->port_cfg->ls_det_en, true);
- if (ret) {
- mutex_unlock(&rport->mutex);
- return ret;
+ rphy->edev = edev;
+
+ return 0;
+}
+
+static int rockchip_usb2phy_init(struct phy *phy)
+{
+ struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy);
+ struct rockchip_usb2phy *rphy = dev_get_drvdata(phy->dev.parent);
+ int ret = 0;
+
+ mutex_lock(&rport->mutex);
+
+ if (rport->port_id == USB2PHY_PORT_OTG) {
+ if (rport->mode != USB_DR_MODE_HOST) {
+ /* clear bvalid status and enable bvalid detect irq */
+ ret = property_enable(rphy,
+ &rport->port_cfg->bvalid_det_clr,
+ true);
+ if (ret)
+ goto out;
+
+ ret = property_enable(rphy,
+ &rport->port_cfg->bvalid_det_en,
+ true);
+ if (ret)
+ goto out;
+
+ schedule_delayed_work(&rport->otg_sm_work,
+ OTG_SCHEDULE_DELAY);
+ } else {
+ /* If OTG works in host only mode, do nothing. */
+ dev_dbg(&rport->phy->dev, "mode %d\n", rport->mode);
}
+ } else if (rport->port_id == USB2PHY_PORT_HOST) {
+ /* clear linestate and enable linestate detect irq */
+ ret = property_enable(rphy, &rport->port_cfg->ls_det_clr, true);
+ if (ret)
+ goto out;
+
+ ret = property_enable(rphy, &rport->port_cfg->ls_det_en, true);
+ if (ret)
+ goto out;
- mutex_unlock(&rport->mutex);
schedule_delayed_work(&rport->sm_work, SCHEDULE_DELAY);
}
- return 0;
+out:
+ mutex_unlock(&rport->mutex);
+ return ret;
}
static int rockchip_usb2phy_power_on(struct phy *phy)
@@ -340,7 +492,11 @@ static int rockchip_usb2phy_exit(struct phy *phy)
{
struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy);
- if (rport->port_id == USB2PHY_PORT_HOST)
+ if (rport->port_id == USB2PHY_PORT_OTG &&
+ rport->mode != USB_DR_MODE_HOST) {
+ cancel_delayed_work_sync(&rport->otg_sm_work);
+ cancel_delayed_work_sync(&rport->chg_work);
+ } else if (rport->port_id == USB2PHY_PORT_HOST)
cancel_delayed_work_sync(&rport->sm_work);
return 0;
@@ -354,6 +510,249 @@ static const struct phy_ops rockchip_usb2phy_ops = {
.owner = THIS_MODULE,
};
+static void rockchip_usb2phy_otg_sm_work(struct work_struct *work)
+{
+ struct rockchip_usb2phy_port *rport =
+ container_of(work, struct rockchip_usb2phy_port,
+ otg_sm_work.work);
+ struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
+ static unsigned int cable;
+ unsigned long delay;
+ bool vbus_attach, sch_work, notify_charger;
+
+ if (rport->utmi_avalid)
+ vbus_attach =
+ property_enabled(rphy, &rport->port_cfg->utmi_avalid);
+ else
+ vbus_attach =
+ property_enabled(rphy, &rport->port_cfg->utmi_bvalid);
+
+ sch_work = false;
+ notify_charger = false;
+ delay = OTG_SCHEDULE_DELAY;
+ dev_dbg(&rport->phy->dev, "%s otg sm work\n",
+ usb_otg_state_string(rport->state));
+
+ switch (rport->state) {
+ case OTG_STATE_UNDEFINED:
+ rport->state = OTG_STATE_B_IDLE;
+ if (!vbus_attach)
+ rockchip_usb2phy_power_off(rport->phy);
+ /* fall through */
+ case OTG_STATE_B_IDLE:
+ if (extcon_get_cable_state_(rphy->edev, EXTCON_USB_HOST) > 0) {
+ dev_dbg(&rport->phy->dev, "usb otg host connect\n");
+ rport->state = OTG_STATE_A_HOST;
+ rockchip_usb2phy_power_on(rport->phy);
+ return;
+ } else if (vbus_attach) {
+ dev_dbg(&rport->phy->dev, "vbus_attach\n");
+ switch (rphy->chg_state) {
+ case USB_CHG_STATE_UNDEFINED:
+ schedule_delayed_work(&rport->chg_work, 0);
+ return;
+ case USB_CHG_STATE_DETECTED:
+ switch (rphy->chg_type) {
+ case POWER_SUPPLY_TYPE_USB:
+ dev_dbg(&rport->phy->dev,
+ "sdp cable is connecetd\n");
+ rockchip_usb2phy_power_on(rport->phy);
+ rport->state = OTG_STATE_B_PERIPHERAL;
+ notify_charger = true;
+ sch_work = true;
+ cable = EXTCON_CHG_USB_SDP;
+ break;
+ case POWER_SUPPLY_TYPE_USB_DCP:
+ dev_dbg(&rport->phy->dev,
+ "dcp cable is connecetd\n");
+ rockchip_usb2phy_power_off(rport->phy);
+ notify_charger = true;
+ sch_work = true;
+ cable = EXTCON_CHG_USB_DCP;
+ break;
+ case POWER_SUPPLY_TYPE_USB_CDP:
+ dev_dbg(&rport->phy->dev,
+ "cdp cable is connecetd\n");
+ rockchip_usb2phy_power_on(rport->phy);
+ rport->state = OTG_STATE_B_PERIPHERAL;
+ notify_charger = true;
+ sch_work = true;
+ cable = EXTCON_CHG_USB_CDP;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ } else {
+ notify_charger = true;
+ rphy->chg_state = USB_CHG_STATE_UNDEFINED;
+ rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN;
+ }
+
+ if (rport->vbus_attached != vbus_attach) {
+ rport->vbus_attached = vbus_attach;
+
+ if (notify_charger && rphy->edev)
+ extcon_set_cable_state_(rphy->edev,
+ cable, vbus_attach);
+ }
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ if (!vbus_attach) {
+ dev_dbg(&rport->phy->dev, "usb disconnect\n");
+ rphy->chg_state = USB_CHG_STATE_UNDEFINED;
+ rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN;
+ rport->state = OTG_STATE_B_IDLE;
+ delay = 0;
+ rockchip_usb2phy_power_off(rport->phy);
+ }
+ sch_work = true;
+ break;
+ case OTG_STATE_A_HOST:
+ if (extcon_get_cable_state_(rphy->edev, EXTCON_USB_HOST) == 0) {
+ dev_dbg(&rport->phy->dev, "usb otg host disconnect\n");
+ rport->state = OTG_STATE_B_IDLE;
+ rockchip_usb2phy_power_off(rport->phy);
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (sch_work)
+ schedule_delayed_work(&rport->otg_sm_work, delay);
+}
+
+static const char *chg_to_string(enum power_supply_type chg_type)
+{
+ switch (chg_type) {
+ case POWER_SUPPLY_TYPE_USB:
+ return "USB_SDP_CHARGER";
+ case POWER_SUPPLY_TYPE_USB_DCP:
+ return "USB_DCP_CHARGER";
+ case POWER_SUPPLY_TYPE_USB_CDP:
+ return "USB_CDP_CHARGER";
+ default:
+ return "INVALID_CHARGER";
+ }
+}
+
+static void rockchip_chg_enable_dcd(struct rockchip_usb2phy *rphy,
+ bool en)
+{
+ property_enable(rphy, &rphy->phy_cfg->chg_det.rdm_pdwn_en, en);
+ property_enable(rphy, &rphy->phy_cfg->chg_det.idp_src_en, en);
+}
+
+static void rockchip_chg_enable_primary_det(struct rockchip_usb2phy *rphy,
+ bool en)
+{
+ property_enable(rphy, &rphy->phy_cfg->chg_det.vdp_src_en, en);
+ property_enable(rphy, &rphy->phy_cfg->chg_det.idm_sink_en, en);
+}
+
+static void rockchip_chg_enable_secondary_det(struct rockchip_usb2phy *rphy,
+ bool en)
+{
+ property_enable(rphy, &rphy->phy_cfg->chg_det.vdm_src_en, en);
+ property_enable(rphy, &rphy->phy_cfg->chg_det.idp_sink_en, en);
+}
+
+#define CHG_DCD_POLL_TIME (100 * HZ / 1000)
+#define CHG_DCD_MAX_RETRIES 6
+#define CHG_PRIMARY_DET_TIME (40 * HZ / 1000)
+#define CHG_SECONDARY_DET_TIME (40 * HZ / 1000)
+static void rockchip_chg_detect_work(struct work_struct *work)
+{
+ struct rockchip_usb2phy_port *rport =
+ container_of(work, struct rockchip_usb2phy_port, chg_work.work);
+ struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
+ bool is_dcd, tmout, vout;
+ unsigned long delay;
+
+ dev_dbg(&rport->phy->dev, "chg detection work state = %d\n",
+ rphy->chg_state);
+ switch (rphy->chg_state) {
+ case USB_CHG_STATE_UNDEFINED:
+ if (!rport->suspended)
+ rockchip_usb2phy_power_off(rport->phy);
+ /* put the controller in non-driving mode */
+ property_enable(rphy, &rphy->phy_cfg->chg_det.opmode, false);
+ /* Start DCD processing stage 1 */
+ rockchip_chg_enable_dcd(rphy, true);
+ rphy->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
+ rphy->dcd_retries = 0;
+ delay = CHG_DCD_POLL_TIME;
+ break;
+ case USB_CHG_STATE_WAIT_FOR_DCD:
+ /* get data contact detection status */
+ is_dcd = property_enabled(rphy, &rphy->phy_cfg->chg_det.dp_det);
+ tmout = ++rphy->dcd_retries == CHG_DCD_MAX_RETRIES;
+ /* stage 2 */
+ if (is_dcd || tmout) {
+ /* stage 4 */
+ /* Turn off DCD circuitry */
+ rockchip_chg_enable_dcd(rphy, false);
+ /* Voltage Source on DP, Probe on DM */
+ rockchip_chg_enable_primary_det(rphy, true);
+ delay = CHG_PRIMARY_DET_TIME;
+ rphy->chg_state = USB_CHG_STATE_DCD_DONE;
+ } else {
+ /* stage 3 */
+ delay = CHG_DCD_POLL_TIME;
+ }
+ break;
+ case USB_CHG_STATE_DCD_DONE:
+ vout = property_enabled(rphy, &rphy->phy_cfg->chg_det.cp_det);
+ rockchip_chg_enable_primary_det(rphy, false);
+ if (vout) {
+ /* Voltage Source on DM, Probe on DP */
+ rockchip_chg_enable_secondary_det(rphy, true);
+ delay = CHG_SECONDARY_DET_TIME;
+ rphy->chg_state = USB_CHG_STATE_PRIMARY_DONE;
+ } else {
+ if (rphy->dcd_retries == CHG_DCD_MAX_RETRIES) {
+ /* floating charger found */
+ rphy->chg_type = POWER_SUPPLY_TYPE_USB_DCP;
+ rphy->chg_state = USB_CHG_STATE_DETECTED;
+ delay = 0;
+ } else {
+ rphy->chg_type = POWER_SUPPLY_TYPE_USB;
+ rphy->chg_state = USB_CHG_STATE_DETECTED;
+ delay = 0;
+ }
+ }
+ break;
+ case USB_CHG_STATE_PRIMARY_DONE:
+ vout = property_enabled(rphy, &rphy->phy_cfg->chg_det.dcp_det);
+ /* Turn off voltage source */
+ rockchip_chg_enable_secondary_det(rphy, false);
+ if (vout)
+ rphy->chg_type = POWER_SUPPLY_TYPE_USB_DCP;
+ else
+ rphy->chg_type = POWER_SUPPLY_TYPE_USB_CDP;
+ /* fall through */
+ case USB_CHG_STATE_SECONDARY_DONE:
+ rphy->chg_state = USB_CHG_STATE_DETECTED;
+ delay = 0;
+ /* fall through */
+ case USB_CHG_STATE_DETECTED:
+ /* put the controller in normal mode */
+ property_enable(rphy, &rphy->phy_cfg->chg_det.opmode, true);
+ rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work);
+ dev_info(&rport->phy->dev, "charger = %s\n",
+ chg_to_string(rphy->chg_type));
+ return;
+ default:
+ return;
+ }
+
+ schedule_delayed_work(&rport->chg_work, delay);
+}
+
/*
* The function manage host-phy port state and suspend/resume phy port
* to save power.
@@ -485,6 +884,26 @@ static irqreturn_t rockchip_usb2phy_linestate_irq(int irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t rockchip_usb2phy_bvalid_irq(int irq, void *data)
+{
+ struct rockchip_usb2phy_port *rport = data;
+ struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
+
+ if (!property_enabled(rphy, &rport->port_cfg->bvalid_det_st))
+ return IRQ_NONE;
+
+ mutex_lock(&rport->mutex);
+
+ /* clear bvalid detect irq pending status */
+ property_enable(rphy, &rport->port_cfg->bvalid_det_clr, true);
+
+ mutex_unlock(&rport->mutex);
+
+ rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work);
+
+ return IRQ_HANDLED;
+}
+
static int rockchip_usb2phy_host_port_init(struct rockchip_usb2phy *rphy,
struct rockchip_usb2phy_port *rport,
struct device_node *child_np)
@@ -509,13 +928,86 @@ static int rockchip_usb2phy_host_port_init(struct rockchip_usb2phy *rphy,
IRQF_ONESHOT,
"rockchip_usb2phy", rport);
if (ret) {
- dev_err(rphy->dev, "failed to request irq handle\n");
+ dev_err(rphy->dev, "failed to request linestate irq handle\n");
return ret;
}
return 0;
}
+static int rockchip_otg_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct rockchip_usb2phy_port *rport =
+ container_of(nb, struct rockchip_usb2phy_port, event_nb);
+
+ schedule_delayed_work(&rport->otg_sm_work, OTG_SCHEDULE_DELAY);
+
+ return NOTIFY_DONE;
+}
+
+static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy,
+ struct rockchip_usb2phy_port *rport,
+ struct device_node *child_np)
+{
+ int ret;
+
+ rport->port_id = USB2PHY_PORT_OTG;
+ rport->port_cfg = &rphy->phy_cfg->port_cfgs[USB2PHY_PORT_OTG];
+ rport->state = OTG_STATE_UNDEFINED;
+
+ /*
+ * set suspended flag to true, but actually don't
+ * put phy in suspend mode, it aims to enable usb
+ * phy and clock in power_on() called by usb controller
+ * driver during probe.
+ */
+ rport->suspended = true;
+ rport->vbus_attached = false;
+
+ mutex_init(&rport->mutex);
+
+ rport->mode = of_usb_get_dr_mode_by_phy(child_np, -1);
+ if (rport->mode == USB_DR_MODE_HOST) {
+ ret = 0;
+ goto out;
+ }
+
+ INIT_DELAYED_WORK(&rport->chg_work, rockchip_chg_detect_work);
+ INIT_DELAYED_WORK(&rport->otg_sm_work, rockchip_usb2phy_otg_sm_work);
+
+ rport->utmi_avalid =
+ of_property_read_bool(child_np, "rockchip,utmi-avalid");
+
+ rport->bvalid_irq = of_irq_get_byname(child_np, "otg-bvalid");
+ if (rport->bvalid_irq < 0) {
+ dev_err(rphy->dev, "no vbus valid irq provided\n");
+ ret = rport->bvalid_irq;
+ goto out;
+ }
+
+ ret = devm_request_threaded_irq(rphy->dev, rport->bvalid_irq, NULL,
+ rockchip_usb2phy_bvalid_irq,
+ IRQF_ONESHOT,
+ "rockchip_usb2phy_bvalid", rport);
+ if (ret) {
+ dev_err(rphy->dev, "failed to request otg-bvalid irq handle\n");
+ goto out;
+ }
+
+ if (!IS_ERR(rphy->edev)) {
+ rport->event_nb.notifier_call = rockchip_otg_event;
+
+ ret = extcon_register_notifier(rphy->edev, EXTCON_USB_HOST,
+ &rport->event_nb);
+ if (ret)
+ dev_err(rphy->dev, "register USB HOST notifier failed\n");
+ }
+
+out:
+ return ret;
+}
+
static int rockchip_usb2phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -553,8 +1045,14 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
rphy->dev = dev;
phy_cfgs = match->data;
+ rphy->chg_state = USB_CHG_STATE_UNDEFINED;
+ rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN;
platform_set_drvdata(pdev, rphy);
+ ret = rockchip_usb2phy_extcon_register(rphy);
+ if (ret)
+ return ret;
+
/* find out a proper config which can be matched with dt. */
index = 0;
while (phy_cfgs[index].reg) {
@@ -591,13 +1089,9 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
struct rockchip_usb2phy_port *rport = &rphy->ports[index];
struct phy *phy;
- /*
- * This driver aim to support both otg-port and host-port,
- * but unfortunately, the otg part is not ready in current,
- * so this comments and below codes are interim, which should
- * be changed after otg-port is supplied soon.
- */
- if (of_node_cmp(child_np->name, "host-port"))
+ /* This driver aims to support both otg-port and host-port */
+ if (of_node_cmp(child_np->name, "host-port") &&
+ of_node_cmp(child_np->name, "otg-port"))
goto next_child;
phy = devm_phy_create(dev, child_np, &rockchip_usb2phy_ops);
@@ -610,9 +1104,18 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
rport->phy = phy;
phy_set_drvdata(rport->phy, rport);
- ret = rockchip_usb2phy_host_port_init(rphy, rport, child_np);
- if (ret)
- goto put_child;
+ /* initialize otg/host port separately */
+ if (!of_node_cmp(child_np->name, "host-port")) {
+ ret = rockchip_usb2phy_host_port_init(rphy, rport,
+ child_np);
+ if (ret)
+ goto put_child;
+ } else {
+ ret = rockchip_usb2phy_otg_port_init(rphy, rport,
+ child_np);
+ if (ret)
+ goto put_child;
+ }
next_child:
/* to prevent out of boundary */
@@ -654,10 +1157,18 @@ static const struct rockchip_usb2phy_cfg rk3366_phy_cfgs[] = {
static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
{
- .reg = 0xe450,
+ .reg = 0xe450,
.num_ports = 2,
.clkout_ctl = { 0xe450, 4, 4, 1, 0 },
.port_cfgs = {
+ [USB2PHY_PORT_OTG] = {
+ .phy_sus = { 0xe454, 1, 0, 2, 1 },
+ .bvalid_det_en = { 0xe3c0, 3, 3, 0, 1 },
+ .bvalid_det_st = { 0xe3e0, 3, 3, 0, 1 },
+ .bvalid_det_clr = { 0xe3d0, 3, 3, 0, 1 },
+ .utmi_avalid = { 0xe2ac, 7, 7, 0, 1 },
+ .utmi_bvalid = { 0xe2ac, 12, 12, 0, 1 },
+ },
[USB2PHY_PORT_HOST] = {
.phy_sus = { 0xe458, 1, 0, 0x2, 0x1 },
.ls_det_en = { 0xe3c0, 6, 6, 0, 1 },
@@ -667,12 +1178,32 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
.utmi_hstdet = { 0xe2ac, 23, 23, 0, 1 }
}
},
+ .chg_det = {
+ .opmode = { 0xe454, 3, 0, 5, 1 },
+ .cp_det = { 0xe2ac, 2, 2, 0, 1 },
+ .dcp_det = { 0xe2ac, 1, 1, 0, 1 },
+ .dp_det = { 0xe2ac, 0, 0, 0, 1 },
+ .idm_sink_en = { 0xe450, 8, 8, 0, 1 },
+ .idp_sink_en = { 0xe450, 7, 7, 0, 1 },
+ .idp_src_en = { 0xe450, 9, 9, 0, 1 },
+ .rdm_pdwn_en = { 0xe450, 10, 10, 0, 1 },
+ .vdm_src_en = { 0xe450, 12, 12, 0, 1 },
+ .vdp_src_en = { 0xe450, 11, 11, 0, 1 },
+ },
},
{
- .reg = 0xe460,
+ .reg = 0xe460,
.num_ports = 2,
.clkout_ctl = { 0xe460, 4, 4, 1, 0 },
.port_cfgs = {
+ [USB2PHY_PORT_OTG] = {
+ .phy_sus = { 0xe464, 1, 0, 2, 1 },
+ .bvalid_det_en = { 0xe3c0, 8, 8, 0, 1 },
+ .bvalid_det_st = { 0xe3e0, 8, 8, 0, 1 },
+ .bvalid_det_clr = { 0xe3d0, 8, 8, 0, 1 },
+ .utmi_avalid = { 0xe2ac, 10, 10, 0, 1 },
+ .utmi_bvalid = { 0xe2ac, 16, 16, 0, 1 },
+ },
[USB2PHY_PORT_HOST] = {
.phy_sus = { 0xe468, 1, 0, 0x2, 0x1 },
.ls_det_en = { 0xe3c0, 11, 11, 0, 1 },
diff --git a/drivers/phy/phy-s5pv210-usb2.c b/drivers/phy/phy-s5pv210-usb2.c
index 004d320767e4..f6f72339bbc3 100644
--- a/drivers/phy/phy-s5pv210-usb2.c
+++ b/drivers/phy/phy-s5pv210-usb2.c
@@ -103,7 +103,7 @@ static void s5pv210_isol(struct samsung_usb2_phy_instance *inst, bool on)
break;
default:
return;
- };
+ }
regmap_update_bits(drv->reg_pmu, S5PV210_USB_ISOL_OFFSET,
mask, on ? 0 : mask);
@@ -127,7 +127,7 @@ static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
rstbits = S5PV210_URSTCON_PHY1_ALL |
S5PV210_URSTCON_HOST_LINK_ALL;
break;
- };
+ }
if (on) {
writel(drv->ref_reg_val, drv->reg_phy + S5PV210_UPHYCLK);
diff --git a/drivers/phy/phy-stih41x-usb.c b/drivers/phy/phy-stih41x-usb.c
deleted file mode 100644
index 0ac74639ad02..000000000000
--- a/drivers/phy/phy-stih41x-usb.c
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Copyright (C) 2014 STMicroelectronics
- *
- * STMicroelectronics PHY driver for STiH41x USB.
- *
- * Author: Maxime Coquelin <maxime.coquelin@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2, as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/clk.h>
-#include <linux/phy/phy.h>
-#include <linux/regmap.h>
-#include <linux/mfd/syscon.h>
-
-#define SYSCFG332 0x80
-#define SYSCFG2520 0x820
-
-/**
- * struct stih41x_usb_cfg - SoC specific PHY register mapping
- * @syscfg: Offset in syscfg registers bank
- * @cfg_mask: Bits mask for PHY configuration
- * @cfg: Static configuration value for PHY
- * @oscok: Notify the PHY oscillator clock is ready
- * Setting this bit enable the PHY
- */
-struct stih41x_usb_cfg {
- u32 syscfg;
- u32 cfg_mask;
- u32 cfg;
- u32 oscok;
-};
-
-/**
- * struct stih41x_usb_phy - Private data for the PHY
- * @dev: device for this controller
- * @regmap: Syscfg registers bank in which PHY is configured
- * @cfg: SoC specific PHY register mapping
- * @clk: Oscillator used by the PHY
- */
-struct stih41x_usb_phy {
- struct device *dev;
- struct regmap *regmap;
- const struct stih41x_usb_cfg *cfg;
- struct clk *clk;
-};
-
-static struct stih41x_usb_cfg stih415_usb_phy_cfg = {
- .syscfg = SYSCFG332,
- .cfg_mask = 0x3f,
- .cfg = 0x38,
- .oscok = BIT(6),
-};
-
-static struct stih41x_usb_cfg stih416_usb_phy_cfg = {
- .syscfg = SYSCFG2520,
- .cfg_mask = 0x33f,
- .cfg = 0x238,
- .oscok = BIT(6),
-};
-
-static int stih41x_usb_phy_init(struct phy *phy)
-{
- struct stih41x_usb_phy *phy_dev = phy_get_drvdata(phy);
-
- return regmap_update_bits(phy_dev->regmap, phy_dev->cfg->syscfg,
- phy_dev->cfg->cfg_mask, phy_dev->cfg->cfg);
-}
-
-static int stih41x_usb_phy_power_on(struct phy *phy)
-{
- struct stih41x_usb_phy *phy_dev = phy_get_drvdata(phy);
- int ret;
-
- ret = clk_prepare_enable(phy_dev->clk);
- if (ret) {
- dev_err(phy_dev->dev, "Failed to enable osc_phy clock\n");
- return ret;
- }
-
- ret = regmap_update_bits(phy_dev->regmap, phy_dev->cfg->syscfg,
- phy_dev->cfg->oscok, phy_dev->cfg->oscok);
- if (ret)
- clk_disable_unprepare(phy_dev->clk);
-
- return ret;
-}
-
-static int stih41x_usb_phy_power_off(struct phy *phy)
-{
- struct stih41x_usb_phy *phy_dev = phy_get_drvdata(phy);
- int ret;
-
- ret = regmap_update_bits(phy_dev->regmap, phy_dev->cfg->syscfg,
- phy_dev->cfg->oscok, 0);
- if (ret) {
- dev_err(phy_dev->dev, "Failed to clear oscok bit\n");
- return ret;
- }
-
- clk_disable_unprepare(phy_dev->clk);
-
- return 0;
-}
-
-static const struct phy_ops stih41x_usb_phy_ops = {
- .init = stih41x_usb_phy_init,
- .power_on = stih41x_usb_phy_power_on,
- .power_off = stih41x_usb_phy_power_off,
- .owner = THIS_MODULE,
-};
-
-static const struct of_device_id stih41x_usb_phy_of_match[];
-
-static int stih41x_usb_phy_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *match;
- struct stih41x_usb_phy *phy_dev;
- struct device *dev = &pdev->dev;
- struct phy_provider *phy_provider;
- struct phy *phy;
-
- phy_dev = devm_kzalloc(dev, sizeof(*phy_dev), GFP_KERNEL);
- if (!phy_dev)
- return -ENOMEM;
-
- match = of_match_device(stih41x_usb_phy_of_match, &pdev->dev);
- if (!match)
- return -ENODEV;
-
- phy_dev->cfg = match->data;
-
- phy_dev->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
- if (IS_ERR(phy_dev->regmap)) {
- dev_err(dev, "No syscfg phandle specified\n");
- return PTR_ERR(phy_dev->regmap);
- }
-
- phy_dev->clk = devm_clk_get(dev, "osc_phy");
- if (IS_ERR(phy_dev->clk)) {
- dev_err(dev, "osc_phy clk not found\n");
- return PTR_ERR(phy_dev->clk);
- }
-
- phy = devm_phy_create(dev, NULL, &stih41x_usb_phy_ops);
-
- if (IS_ERR(phy)) {
- dev_err(dev, "failed to create phy\n");
- return PTR_ERR(phy);
- }
-
- phy_dev->dev = dev;
-
- phy_set_drvdata(phy, phy_dev);
-
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- return PTR_ERR_OR_ZERO(phy_provider);
-}
-
-static const struct of_device_id stih41x_usb_phy_of_match[] = {
- { .compatible = "st,stih415-usb-phy", .data = &stih415_usb_phy_cfg },
- { .compatible = "st,stih416-usb-phy", .data = &stih416_usb_phy_cfg },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, stih41x_usb_phy_of_match);
-
-static struct platform_driver stih41x_usb_phy_driver = {
- .probe = stih41x_usb_phy_probe,
- .driver = {
- .name = "stih41x-usb-phy",
- .of_match_table = stih41x_usb_phy_of_match,
- }
-};
-module_platform_driver(stih41x_usb_phy_driver);
-
-MODULE_AUTHOR("Maxime Coquelin <maxime.coquelin@st.com>");
-MODULE_DESCRIPTION("STMicroelectronics USB PHY driver for STiH41x series");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index fec34f5213c4..bf28a0fdd569 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -436,25 +436,31 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy, enum phy_mode mode)
{
struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
+ int new_mode;
if (phy->index != 0)
return -EINVAL;
switch (mode) {
case PHY_MODE_USB_HOST:
- data->dr_mode = USB_DR_MODE_HOST;
+ new_mode = USB_DR_MODE_HOST;
break;
case PHY_MODE_USB_DEVICE:
- data->dr_mode = USB_DR_MODE_PERIPHERAL;
+ new_mode = USB_DR_MODE_PERIPHERAL;
break;
case PHY_MODE_USB_OTG:
- data->dr_mode = USB_DR_MODE_OTG;
+ new_mode = USB_DR_MODE_OTG;
break;
default:
return -EINVAL;
}
- dev_info(&_phy->dev, "Changing dr_mode to %d\n", (int)data->dr_mode);
+ if (new_mode != data->dr_mode) {
+ dev_info(&_phy->dev, "Changing dr_mode to %d\n", new_mode);
+ data->dr_mode = new_mode;
+ }
+
+ data->id_det = -1; /* Force reprocessing of id */
data->force_session_end = true;
queue_delayed_work(system_wq, &data->detect, 0);
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index bf46844dc387..9c84d32c6f60 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -537,10 +537,7 @@ static int ti_pipe3_get_pll_base(struct ti_pipe3 *phy)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"pll_ctrl");
phy->pll_ctrl_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(phy->pll_ctrl_base))
- return PTR_ERR(phy->pll_ctrl_base);
-
- return 0;
+ return PTR_ERR_OR_ZERO(phy->pll_ctrl_base);
}
static int ti_pipe3_probe(struct platform_device *pdev)
@@ -592,10 +589,7 @@ static int ti_pipe3_probe(struct platform_device *pdev)
ti_pipe3_power_off(generic_phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
- return 0;
+ return PTR_ERR_OR_ZERO(phy_provider);
}
static int ti_pipe3_remove(struct platform_device *pdev)
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 547ca7b3f098..2990b3965460 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -317,6 +317,9 @@ static enum musb_vbus_id_status
linkstat = MUSB_VBUS_OFF;
}
+ kobject_uevent(&twl->dev->kobj, linkstat == MUSB_VBUS_VALID
+ ? KOBJ_ONLINE : KOBJ_OFFLINE);
+
dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n",
status, status, linkstat);
diff --git a/drivers/phy/tegra/xusb-tegra124.c b/drivers/phy/tegra/xusb-tegra124.c
index 119957249a51..c45cbedc6634 100644
--- a/drivers/phy/tegra/xusb-tegra124.c
+++ b/drivers/phy/tegra/xusb-tegra124.c
@@ -1483,7 +1483,6 @@ static int tegra124_usb3_port_enable(struct tegra_xusb_port *port)
struct tegra_xusb_padctl *padctl = port->padctl;
struct tegra_xusb_lane *lane = usb3->base.lane;
unsigned int index = port->index, offset;
- int ret = 0;
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
@@ -1612,7 +1611,7 @@ static int tegra124_usb3_port_enable(struct tegra_xusb_port *port)
value &= ~XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
- return ret;
+ return 0;
}
static void tegra124_usb3_port_disable(struct tegra_xusb_port *port)
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 873424ab0e32..3cbcb2537657 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -561,10 +561,7 @@ static int tegra_xusb_usb2_port_parse_dt(struct tegra_xusb_usb2_port *usb2)
usb2->internal = of_property_read_bool(np, "nvidia,internal");
usb2->supply = devm_regulator_get(&port->dev, "vbus");
- if (IS_ERR(usb2->supply))
- return PTR_ERR(usb2->supply);
-
- return 0;
+ return PTR_ERR_OR_ZERO(usb2->supply);
}
static int tegra_xusb_add_usb2_port(struct tegra_xusb_padctl *padctl,
@@ -731,10 +728,7 @@ static int tegra_xusb_usb3_port_parse_dt(struct tegra_xusb_usb3_port *usb3)
usb3->internal = of_property_read_bool(np, "nvidia,internal");
usb3->supply = devm_regulator_get(&port->dev, "vbus");
- if (IS_ERR(usb3->supply))
- return PTR_ERR(usb3->supply);
-
- return 0;
+ return PTR_ERR_OR_ZERO(usb3->supply);
}
static int tegra_xusb_add_usb3_port(struct tegra_xusb_padctl *padctl,
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
index 8abd80dbcbed..47268ecedc4d 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/platform/chrome/cros_ec_dev.c
@@ -18,6 +18,7 @@
*/
#include <linux/fs.h>
+#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -87,6 +88,41 @@ exit:
return ret;
}
+static int cros_ec_check_features(struct cros_ec_dev *ec, int feature)
+{
+ struct cros_ec_command *msg;
+ int ret;
+
+ if (ec->features[0] == -1U && ec->features[1] == -1U) {
+ /* features bitmap not read yet */
+
+ msg = kmalloc(sizeof(*msg) + sizeof(ec->features), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = 0;
+ msg->command = EC_CMD_GET_FEATURES + ec->cmd_offset;
+ msg->insize = sizeof(ec->features);
+ msg->outsize = 0;
+
+ ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ if (ret < 0 || msg->result != EC_RES_SUCCESS) {
+ dev_warn(ec->dev, "cannot get EC features: %d/%d\n",
+ ret, msg->result);
+ memset(ec->features, 0, sizeof(ec->features));
+ }
+
+ memcpy(ec->features, msg->data, sizeof(ec->features));
+
+ dev_dbg(ec->dev, "EC features %08x %08x\n",
+ ec->features[0], ec->features[1]);
+
+ kfree(msg);
+ }
+
+ return ec->features[feature / 32] & EC_FEATURE_MASK_0(feature);
+}
+
/* Device file ops */
static int ec_device_open(struct inode *inode, struct file *filp)
{
@@ -230,6 +266,123 @@ static void __remove(struct device *dev)
kfree(ec);
}
+static void cros_ec_sensors_register(struct cros_ec_dev *ec)
+{
+ /*
+ * Issue a command to get the number of sensor reported.
+ * Build an array of sensors driver and register them all.
+ */
+ int ret, i, id, sensor_num;
+ struct mfd_cell *sensor_cells;
+ struct cros_ec_sensor_platform *sensor_platforms;
+ int sensor_type[MOTIONSENSE_TYPE_MAX];
+ struct ec_params_motion_sense *params;
+ struct ec_response_motion_sense *resp;
+ struct cros_ec_command *msg;
+
+ msg = kzalloc(sizeof(struct cros_ec_command) +
+ max(sizeof(*params), sizeof(*resp)), GFP_KERNEL);
+ if (msg == NULL)
+ return;
+
+ msg->version = 2;
+ msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+ msg->outsize = sizeof(*params);
+ msg->insize = sizeof(*resp);
+
+ params = (struct ec_params_motion_sense *)msg->data;
+ params->cmd = MOTIONSENSE_CMD_DUMP;
+
+ ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ if (ret < 0 || msg->result != EC_RES_SUCCESS) {
+ dev_warn(ec->dev, "cannot get EC sensor information: %d/%d\n",
+ ret, msg->result);
+ goto error;
+ }
+
+ resp = (struct ec_response_motion_sense *)msg->data;
+ sensor_num = resp->dump.sensor_count;
+ /* Allocate 2 extra sensors in case lid angle or FIFO are needed */
+ sensor_cells = kzalloc(sizeof(struct mfd_cell) * (sensor_num + 2),
+ GFP_KERNEL);
+ if (sensor_cells == NULL)
+ goto error;
+
+ sensor_platforms = kzalloc(sizeof(struct cros_ec_sensor_platform) *
+ (sensor_num + 1), GFP_KERNEL);
+ if (sensor_platforms == NULL)
+ goto error_platforms;
+
+ memset(sensor_type, 0, sizeof(sensor_type));
+ id = 0;
+ for (i = 0; i < sensor_num; i++) {
+ params->cmd = MOTIONSENSE_CMD_INFO;
+ params->info.sensor_num = i;
+ ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ if (ret < 0 || msg->result != EC_RES_SUCCESS) {
+ dev_warn(ec->dev, "no info for EC sensor %d : %d/%d\n",
+ i, ret, msg->result);
+ continue;
+ }
+ switch (resp->info.type) {
+ case MOTIONSENSE_TYPE_ACCEL:
+ sensor_cells[id].name = "cros-ec-accel";
+ break;
+ case MOTIONSENSE_TYPE_GYRO:
+ sensor_cells[id].name = "cros-ec-gyro";
+ break;
+ case MOTIONSENSE_TYPE_MAG:
+ sensor_cells[id].name = "cros-ec-mag";
+ break;
+ case MOTIONSENSE_TYPE_PROX:
+ sensor_cells[id].name = "cros-ec-prox";
+ break;
+ case MOTIONSENSE_TYPE_LIGHT:
+ sensor_cells[id].name = "cros-ec-light";
+ break;
+ case MOTIONSENSE_TYPE_ACTIVITY:
+ sensor_cells[id].name = "cros-ec-activity";
+ break;
+ default:
+ dev_warn(ec->dev, "unknown type %d\n", resp->info.type);
+ continue;
+ }
+ sensor_platforms[id].sensor_num = i;
+ sensor_cells[id].id = sensor_type[resp->info.type];
+ sensor_cells[id].platform_data = &sensor_platforms[id];
+ sensor_cells[id].pdata_size =
+ sizeof(struct cros_ec_sensor_platform);
+
+ sensor_type[resp->info.type]++;
+ id++;
+ }
+ if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2) {
+ sensor_platforms[id].sensor_num = sensor_num;
+
+ sensor_cells[id].name = "cros-ec-angle";
+ sensor_cells[id].id = 0;
+ sensor_cells[id].platform_data = &sensor_platforms[id];
+ sensor_cells[id].pdata_size =
+ sizeof(struct cros_ec_sensor_platform);
+ id++;
+ }
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
+ sensor_cells[id].name = "cros-ec-ring";
+ id++;
+ }
+
+ ret = mfd_add_devices(ec->dev, 0, sensor_cells, id,
+ NULL, 0, NULL);
+ if (ret)
+ dev_err(ec->dev, "failed to add EC sensors\n");
+
+ kfree(sensor_platforms);
+error_platforms:
+ kfree(sensor_cells);
+error:
+ kfree(msg);
+}
+
static int ec_device_probe(struct platform_device *pdev)
{
int retval = -ENOMEM;
@@ -245,6 +398,8 @@ static int ec_device_probe(struct platform_device *pdev)
ec->ec_dev = dev_get_drvdata(dev->parent);
ec->dev = dev;
ec->cmd_offset = ec_platform->cmd_offset;
+ ec->features[0] = -1U; /* Not cached yet */
+ ec->features[1] = -1U; /* Not cached yet */
device_initialize(&ec->class_dev);
cdev_init(&ec->cdev, &fops);
@@ -282,6 +437,10 @@ static int ec_device_probe(struct platform_device *pdev)
goto dev_reg_failed;
}
+ /* check whether this EC is a sensor hub. */
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE))
+ cros_ec_sensors_register(ec);
+
return 0;
dev_reg_failed:
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 1aba2c74160e..2b21033f11f0 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -308,10 +308,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
* returns a small amount, then there's no need to pin that
* much memory to the process.
*/
- down_read(&current->mm->mmap_sem);
- ret = get_user_pages(address, 1, is_write ? 0 : FOLL_WRITE,
- &page, NULL);
- up_read(&current->mm->mmap_sem);
+ ret = get_user_pages_unlocked(address, 1, &page,
+ is_write ? 0 : FOLL_WRITE);
if (ret < 0)
break;
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index c74c3f67b8da..abeb77217a21 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -104,6 +104,16 @@ config POWER_RESET_MSM
help
Power off and restart support for Qualcomm boards.
+config POWER_RESET_PIIX4_POWEROFF
+ tristate "Intel PIIX4 power-off driver"
+ depends on PCI
+ depends on MIPS || COMPILE_TEST
+ help
+ This driver supports powering off a system using the Intel PIIX4
+ southbridge, for example the MIPS Malta development board. The
+ southbridge SOff state is entered in response to a request to
+ power off the system.
+
config POWER_RESET_LTC2952
bool "LTC2952 PowerPath power-off driver"
depends on OF_GPIO
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 1be307c7fc25..11dae3b56ff9 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
obj-$(CONFIG_POWER_RESET_IMX) += imx-snvs-poweroff.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
+obj-$(CONFIG_POWER_RESET_PIIX4_POWEROFF) += piix4-poweroff.o
obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o
obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c
index e9e24df35f26..a85dd4d233af 100644
--- a/drivers/power/reset/at91-poweroff.c
+++ b/drivers/power/reset/at91-poweroff.c
@@ -169,6 +169,7 @@ static const struct of_device_id at91_poweroff_of_match[] = {
{ .compatible = "atmel,at91sam9x5-shdwc", },
{ /*sentinel*/ }
};
+MODULE_DEVICE_TABLE(of, at91_poweroff_of_match);
static struct platform_driver at91_poweroff_driver = {
.remove = __exit_p(at91_poweroff_remove),
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 1b5d450586d1..568580cf0655 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -175,6 +175,7 @@ static const struct of_device_id at91_reset_of_match[] = {
{ .compatible = "atmel,sama5d3-rstc", .data = sama5d3_restart },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, at91_reset_of_match);
static struct notifier_block at91_restart_nb = {
.priority = 192,
@@ -242,6 +243,7 @@ static const struct platform_device_id at91_reset_plat_match[] = {
{ "at91-sam9g45-reset", (unsigned long)at91sam9g45_restart },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(platform, at91_reset_plat_match);
static struct platform_driver at91_reset_driver = {
.remove = __exit_p(at91_reset_remove),
diff --git a/drivers/power/reset/piix4-poweroff.c b/drivers/power/reset/piix4-poweroff.c
new file mode 100644
index 000000000000..bacfc95783f0
--- /dev/null
+++ b/drivers/power/reset/piix4-poweroff.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+
+static struct pci_dev *pm_dev;
+static resource_size_t io_offset;
+
+enum piix4_pm_io_reg {
+ PIIX4_FUNC3IO_PMSTS = 0x00,
+#define PIIX4_FUNC3IO_PMSTS_PWRBTN_STS BIT(8)
+ PIIX4_FUNC3IO_PMCNTRL = 0x04,
+#define PIIX4_FUNC3IO_PMCNTRL_SUS_EN BIT(13)
+#define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF (0x0 << 10)
+};
+
+#define PIIX4_SUSPEND_MAGIC 0x00120002
+
+static const int piix4_pm_io_region = PCI_BRIDGE_RESOURCES;
+
+static void piix4_poweroff(void)
+{
+ int spec_devid;
+ u16 sts;
+
+ /* Ensure the power button status is clear */
+ while (1) {
+ sts = inw(io_offset + PIIX4_FUNC3IO_PMSTS);
+ if (!(sts & PIIX4_FUNC3IO_PMSTS_PWRBTN_STS))
+ break;
+ outw(sts, io_offset + PIIX4_FUNC3IO_PMSTS);
+ }
+
+ /* Enable entry to suspend */
+ outw(PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF | PIIX4_FUNC3IO_PMCNTRL_SUS_EN,
+ io_offset + PIIX4_FUNC3IO_PMCNTRL);
+
+ /* If the special cycle occurs too soon this doesn't work... */
+ mdelay(10);
+
+ /*
+ * The PIIX4 will enter the suspend state only after seeing a special
+ * cycle with the correct magic data on the PCI bus. Generate that
+ * cycle now.
+ */
+ spec_devid = PCI_DEVID(0, PCI_DEVFN(0x1f, 0x7));
+ pci_bus_write_config_dword(pm_dev->bus, spec_devid, 0,
+ PIIX4_SUSPEND_MAGIC);
+
+ /* Give the system some time to power down, then error */
+ mdelay(1000);
+ pr_emerg("Unable to poweroff system\n");
+}
+
+static int piix4_poweroff_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ int res;
+
+ if (pm_dev)
+ return -EINVAL;
+
+ /* Request access to the PIIX4 PM IO registers */
+ res = pci_request_region(dev, piix4_pm_io_region,
+ "PIIX4 PM IO registers");
+ if (res) {
+ dev_err(&dev->dev, "failed to request PM IO registers: %d\n",
+ res);
+ return res;
+ }
+
+ pm_dev = dev;
+ io_offset = pci_resource_start(dev, piix4_pm_io_region);
+ pm_power_off = piix4_poweroff;
+
+ return 0;
+}
+
+static void piix4_poweroff_remove(struct pci_dev *dev)
+{
+ if (pm_power_off == piix4_poweroff)
+ pm_power_off = NULL;
+
+ pci_release_region(dev, piix4_pm_io_region);
+ pm_dev = NULL;
+}
+
+static const struct pci_device_id piix4_poweroff_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
+ { 0 },
+};
+
+static struct pci_driver piix4_poweroff_driver = {
+ .name = "piix4-poweroff",
+ .id_table = piix4_poweroff_ids,
+ .probe = piix4_poweroff_probe,
+ .remove = piix4_poweroff_remove,
+};
+
+module_pci_driver(piix4_poweroff_driver);
+MODULE_AUTHOR("Paul Burton <paul.burton@imgtec.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/reset/syscon-reboot-mode.c b/drivers/power/reset/syscon-reboot-mode.c
index 1ecb51d67149..c8c371b285b1 100644
--- a/drivers/power/reset/syscon-reboot-mode.c
+++ b/drivers/power/reset/syscon-reboot-mode.c
@@ -74,6 +74,7 @@ static const struct of_device_id syscon_reboot_mode_of_match[] = {
{ .compatible = "syscon-reboot-mode" },
{}
};
+MODULE_DEVICE_TABLE(of, syscon_reboot_mode_of_match);
static struct platform_driver syscon_reboot_mode_driver = {
.probe = syscon_reboot_mode_probe,
diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
index b0b1eb3a78c2..7549c7f74a3c 100644
--- a/drivers/power/reset/zx-reboot.c
+++ b/drivers/power/reset/zx-reboot.c
@@ -72,6 +72,7 @@ static const struct of_device_id zx_reboot_of_match[] = {
{ .compatible = "zte,sysctrl" },
{}
};
+MODULE_DEVICE_TABLE(of, zx_reboot_of_match);
static struct platform_driver zx_reboot_driver = {
.probe = zx_reboot_probe,
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 2199f673118c..c569f82a0071 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -1900,7 +1900,7 @@ static void ab8500_fg_low_bat_work(struct work_struct *work)
* ab8500_fg_battok_calc - calculate the bit pattern corresponding
* to the target voltage.
* @di: pointer to the ab8500_fg structure
- * @target target voltage
+ * @target: target voltage
*
* Returns bit pattern closest to the target voltage
* valid return values are 0-14. (0-BATT_OK_MAX_NR_INCREMENTS)
@@ -2391,7 +2391,7 @@ static void ab8500_fg_external_power_changed(struct power_supply *psy)
}
/**
- * abab8500_fg_reinit_work() - work to reset the FG algorithm
+ * ab8500_fg_reinit_work() - work to reset the FG algorithm
* @work: pointer to the work_struct structure
*
* Used to reset the current battery capacity to be able to
@@ -2528,7 +2528,7 @@ static struct kobj_type ab8500_fg_ktype = {
};
/**
- * ab8500_chargalg_sysfs_exit() - de-init of sysfs entry
+ * ab8500_fg_sysfs_exit() - de-init of sysfs entry
* @di: pointer to the struct ab8500_chargalg
*
* This function removes the entry in sysfs.
@@ -2539,7 +2539,7 @@ static void ab8500_fg_sysfs_exit(struct ab8500_fg *di)
}
/**
- * ab8500_chargalg_sysfs_init() - init of sysfs entry
+ * ab8500_fg_sysfs_init() - init of sysfs entry
* @di: pointer to the struct ab8500_chargalg
*
* This function adds an entry in sysfs.
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 5bdde692f724..539eb41504bb 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -1120,6 +1120,7 @@ static const struct platform_device_id axp288_fg_id_table[] = {
{ .name = DEV_NAME },
{},
};
+MODULE_DEVICE_TABLE(platform, axp288_fg_id_table);
static int axp288_fuel_gauge_remove(struct platform_device *pdev)
{
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index f5746b9f4e83..e9584330aeed 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -1141,7 +1141,7 @@ static int bq24190_battery_set_property(struct power_supply *psy,
dev_dbg(bdi->dev, "prop: %d\n", psp);
- pm_runtime_put_sync(bdi->dev);
+ pm_runtime_get_sync(bdi->dev);
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 3b0dbc689d72..08c36b8e04bd 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -164,6 +164,25 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
[BQ27XXX_REG_DCAP] = 0x3c,
[BQ27XXX_REG_AP] = INVALID_REG_ADDR,
},
+ [BQ27510] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x06,
+ [BQ27XXX_REG_INT_TEMP] = 0x28,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x14,
+ [BQ27XXX_REG_FLAGS] = 0x0a,
+ [BQ27XXX_REG_TTE] = 0x16,
+ [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTES] = 0x1a,
+ [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_FCC] = 0x12,
+ [BQ27XXX_REG_CYCT] = 0x1e,
+ [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SOC] = 0x20,
+ [BQ27XXX_REG_DCAP] = 0x2e,
+ [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+ },
[BQ27530] = {
[BQ27XXX_REG_CTRL] = 0x00,
[BQ27XXX_REG_TEMP] = 0x06,
@@ -302,6 +321,24 @@ static enum power_supply_property bq27500_battery_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
+static enum power_supply_property bq27510_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
static enum power_supply_property bq27530_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -385,6 +422,7 @@ static struct {
BQ27XXX_PROP(BQ27000, bq27000_battery_props),
BQ27XXX_PROP(BQ27010, bq27010_battery_props),
BQ27XXX_PROP(BQ27500, bq27500_battery_props),
+ BQ27XXX_PROP(BQ27510, bq27510_battery_props),
BQ27XXX_PROP(BQ27530, bq27530_battery_props),
BQ27XXX_PROP(BQ27541, bq27541_battery_props),
BQ27XXX_PROP(BQ27545, bq27545_battery_props),
@@ -397,10 +435,11 @@ static LIST_HEAD(bq27xxx_battery_devices);
static int poll_interval_param_set(const char *val, const struct kernel_param *kp)
{
struct bq27xxx_device_info *di;
+ unsigned int prev_val = *(unsigned int *) kp->arg;
int ret;
ret = param_set_uint(val, kp);
- if (ret < 0)
+ if (ret < 0 || prev_val == *(unsigned int *) kp->arg)
return ret;
mutex_lock(&bq27xxx_list_lock);
@@ -635,7 +674,8 @@ static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di)
*/
static bool bq27xxx_battery_overtemp(struct bq27xxx_device_info *di, u16 flags)
{
- if (di->chip == BQ27500 || di->chip == BQ27541 || di->chip == BQ27545)
+ if (di->chip == BQ27500 || di->chip == BQ27510 ||
+ di->chip == BQ27541 || di->chip == BQ27545)
return flags & (BQ27XXX_FLAG_OTC | BQ27XXX_FLAG_OTD);
if (di->chip == BQ27530 || di->chip == BQ27421)
return flags & BQ27XXX_FLAG_OT;
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 85d4ea2a9c20..5c5c3a6f9923 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -149,8 +149,8 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
{ "bq27200", BQ27000 },
{ "bq27210", BQ27010 },
{ "bq27500", BQ27500 },
- { "bq27510", BQ27500 },
- { "bq27520", BQ27500 },
+ { "bq27510", BQ27510 },
+ { "bq27520", BQ27510 },
{ "bq27530", BQ27530 },
{ "bq27531", BQ27530 },
{ "bq27541", BQ27541 },
diff --git a/drivers/power/supply/ipaq_micro_battery.c b/drivers/power/supply/ipaq_micro_battery.c
index 4af7b770f293..2fa6edd6e8b1 100644
--- a/drivers/power/supply/ipaq_micro_battery.c
+++ b/drivers/power/supply/ipaq_micro_battery.c
@@ -313,4 +313,4 @@ module_platform_driver(micro_batt_device_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("driver for iPAQ Atmel micro battery");
-MODULE_ALIAS("platform:battery-ipaq-micro");
+MODULE_ALIAS("platform:ipaq-micro-battery");
diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
index 7321b727d484..509e2b341bd6 100644
--- a/drivers/power/supply/lp8788-charger.c
+++ b/drivers/power/supply/lp8788-charger.c
@@ -384,9 +384,6 @@ static int lp8788_update_charger_params(struct platform_device *pdev,
for (i = 0; i < pdata->num_chg_params; i++) {
param = pdata->chg_params + i;
- if (!param)
- continue;
-
if (lp8788_is_valid_charger_register(param->addr)) {
ret = lp8788_write_byte(lp, param->addr, param->val);
if (ret)
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index 8689c80202b5..e7c3649b31a0 100644
--- a/drivers/power/supply/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
@@ -21,18 +21,13 @@
#include <linux/max17040_battery.h>
#include <linux/slab.h>
-#define MAX17040_VCELL_MSB 0x02
-#define MAX17040_VCELL_LSB 0x03
-#define MAX17040_SOC_MSB 0x04
-#define MAX17040_SOC_LSB 0x05
-#define MAX17040_MODE_MSB 0x06
-#define MAX17040_MODE_LSB 0x07
-#define MAX17040_VER_MSB 0x08
-#define MAX17040_VER_LSB 0x09
-#define MAX17040_RCOMP_MSB 0x0C
-#define MAX17040_RCOMP_LSB 0x0D
-#define MAX17040_CMD_MSB 0xFE
-#define MAX17040_CMD_LSB 0xFF
+#define MAX17040_VCELL 0x02
+#define MAX17040_SOC 0x04
+#define MAX17040_MODE 0x06
+#define MAX17040_VER 0x08
+#define MAX17040_RCOMP 0x0C
+#define MAX17040_CMD 0xFE
+
#define MAX17040_DELAY 1000
#define MAX17040_BATTERY_FULL 95
@@ -78,11 +73,11 @@ static int max17040_get_property(struct power_supply *psy,
return 0;
}
-static int max17040_write_reg(struct i2c_client *client, int reg, u8 value)
+static int max17040_write_reg(struct i2c_client *client, int reg, u16 value)
{
int ret;
- ret = i2c_smbus_write_byte_data(client, reg, value);
+ ret = i2c_smbus_write_word_swapped(client, reg, value);
if (ret < 0)
dev_err(&client->dev, "%s: err %d\n", __func__, ret);
@@ -94,7 +89,7 @@ static int max17040_read_reg(struct i2c_client *client, int reg)
{
int ret;
- ret = i2c_smbus_read_byte_data(client, reg);
+ ret = i2c_smbus_read_word_swapped(client, reg);
if (ret < 0)
dev_err(&client->dev, "%s: err %d\n", __func__, ret);
@@ -104,43 +99,36 @@ static int max17040_read_reg(struct i2c_client *client, int reg)
static void max17040_reset(struct i2c_client *client)
{
- max17040_write_reg(client, MAX17040_CMD_MSB, 0x54);
- max17040_write_reg(client, MAX17040_CMD_LSB, 0x00);
+ max17040_write_reg(client, MAX17040_CMD, 0x0054);
}
static void max17040_get_vcell(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- u8 msb;
- u8 lsb;
+ u16 vcell;
- msb = max17040_read_reg(client, MAX17040_VCELL_MSB);
- lsb = max17040_read_reg(client, MAX17040_VCELL_LSB);
+ vcell = max17040_read_reg(client, MAX17040_VCELL);
- chip->vcell = (msb << 4) + (lsb >> 4);
+ chip->vcell = vcell;
}
static void max17040_get_soc(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- u8 msb;
- u8 lsb;
+ u16 soc;
- msb = max17040_read_reg(client, MAX17040_SOC_MSB);
- lsb = max17040_read_reg(client, MAX17040_SOC_LSB);
+ soc = max17040_read_reg(client, MAX17040_SOC);
- chip->soc = msb;
+ chip->soc = (soc >> 8);
}
static void max17040_get_version(struct i2c_client *client)
{
- u8 msb;
- u8 lsb;
+ u16 version;
- msb = max17040_read_reg(client, MAX17040_VER_MSB);
- lsb = max17040_read_reg(client, MAX17040_VER_LSB);
+ version = max17040_read_reg(client, MAX17040_VER);
- dev_info(&client->dev, "MAX17040 Fuel-Gauge Ver %d%d\n", msb, lsb);
+ dev_info(&client->dev, "MAX17040 Fuel-Gauge Ver 0x%x\n", version);
}
static void max17040_get_online(struct i2c_client *client)
diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c
index 0b2eab571528..290ddc12b040 100644
--- a/drivers/power/supply/max8997_charger.c
+++ b/drivers/power/supply/max8997_charger.c
@@ -184,6 +184,7 @@ static const struct platform_device_id max8997_battery_id[] = {
{ "max8997-battery", 0 },
{ }
};
+MODULE_DEVICE_TABLE(platform, max8997_battery_id);
static struct platform_driver max8997_battery_driver = {
.driver = {
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index a74d8ca383a1..1e0960b646e8 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -413,7 +413,7 @@ static int power_supply_match_device_node(struct device *dev, const void *data)
/**
* power_supply_get_by_phandle() - Search for a power supply and returns its ref
* @np: Pointer to device node holding phandle property
- * @phandle_name: Name of property holding a power supply name
+ * @property: Name of property holding a power supply name
*
* If power supply was found, it increases reference count for the
* internal power supply's device. The user should power_supply_put()
@@ -458,7 +458,7 @@ static void devm_power_supply_put(struct device *dev, void *res)
* devm_power_supply_get_by_phandle() - Resource managed version of
* power_supply_get_by_phandle()
* @dev: Pointer to device holding phandle property
- * @phandle_name: Name of property holding a power supply phandle
+ * @property: Name of property holding a power supply phandle
*
* Return: On success returns a reference to a power supply with
* matching name equals to value under @property, NULL or ERR_PTR otherwise.
diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c
index 5c5880664e09..a2740cf57ad3 100644
--- a/drivers/power/supply/wm8350_power.c
+++ b/drivers/power/supply/wm8350_power.c
@@ -182,7 +182,7 @@ static ssize_t charger_state_show(struct device *dev,
return sprintf(buf, "%s\n", charge);
}
-static DEVICE_ATTR(charger_state, 0444, charger_state_show, NULL);
+static DEVICE_ATTR_RO(charger_state);
static irqreturn_t wm8350_charger_handler(int irq, void *data)
{
diff --git a/drivers/power/supply/wm97xx_battery.c b/drivers/power/supply/wm97xx_battery.c
index 6285626d142a..e3edb31ac880 100644
--- a/drivers/power/supply/wm97xx_battery.c
+++ b/drivers/power/supply/wm97xx_battery.c
@@ -30,8 +30,7 @@ static enum power_supply_property *prop;
static unsigned long wm97xx_read_bat(struct power_supply *bat_ps)
{
- struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev.parent),
pdata->batt_aux) * pdata->batt_mult /
@@ -40,8 +39,7 @@ static unsigned long wm97xx_read_bat(struct power_supply *bat_ps)
static unsigned long wm97xx_read_temp(struct power_supply *bat_ps)
{
- struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev.parent),
pdata->temp_aux) * pdata->temp_mult /
@@ -52,8 +50,7 @@ static int wm97xx_bat_get_property(struct power_supply *bat_ps,
enum power_supply_property psp,
union power_supply_propval *val)
{
- struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
@@ -103,8 +100,7 @@ static void wm97xx_bat_external_power_changed(struct power_supply *bat_ps)
static void wm97xx_bat_update(struct power_supply *bat_ps)
{
int old_status = bat_status;
- struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
mutex_lock(&work_lock);
@@ -166,15 +162,15 @@ static int wm97xx_bat_probe(struct platform_device *dev)
int ret = 0;
int props = 1; /* POWER_SUPPLY_PROP_PRESENT */
int i = 0;
- struct wm97xx_pdata *wmdata = dev->dev.platform_data;
- struct wm97xx_batt_pdata *pdata;
+ struct wm97xx_batt_pdata *pdata = dev->dev.platform_data;
+ struct power_supply_config cfg = {};
- if (!wmdata) {
+ if (!pdata) {
dev_err(&dev->dev, "No platform data supplied\n");
return -EINVAL;
}
- pdata = wmdata->batt_pdata;
+ cfg.drv_data = pdata;
if (dev->id != -1)
return -EINVAL;
@@ -243,7 +239,7 @@ static int wm97xx_bat_probe(struct platform_device *dev)
bat_psy_desc.properties = prop;
bat_psy_desc.num_properties = props;
- bat_psy = power_supply_register(&dev->dev, &bat_psy_desc, NULL);
+ bat_psy = power_supply_register(&dev->dev, &bat_psy_desc, &cfg);
if (!IS_ERR(bat_psy)) {
schedule_work(&bat_work);
} else {
@@ -266,8 +262,7 @@ err:
static int wm97xx_bat_remove(struct platform_device *dev)
{
- struct wm97xx_pdata *wmdata = dev->dev.platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = dev->dev.platform_data;
if (pdata && gpio_is_valid(pdata->charge_gpio)) {
free_irq(gpio_to_irq(pdata->charge_gpio), dev);
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 3314bf299a51..fb44d5215e30 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -120,7 +120,7 @@ static const struct regulator_linear_range rk808_ldo3_voltage_ranges[] = {
static int rk808_buck1_2_get_voltage_sel_regmap(struct regulator_dev *rdev)
{
struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
- int id = rdev->desc->id - RK808_ID_DCDC1;
+ int id = rdev_get_id(rdev);
struct gpio_desc *gpio = pdata->dvs_gpio[id];
unsigned int val;
int ret;
@@ -193,7 +193,7 @@ static int rk808_buck1_2_set_voltage_sel(struct regulator_dev *rdev,
unsigned sel)
{
struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
- int id = rdev->desc->id - RK808_ID_DCDC1;
+ int id = rdev_get_id(rdev);
struct gpio_desc *gpio = pdata->dvs_gpio[id];
unsigned int reg = rdev->desc->vsel_reg;
unsigned old_sel;
@@ -232,7 +232,7 @@ static int rk808_buck1_2_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int new_selector)
{
struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
- int id = rdev->desc->id - RK808_ID_DCDC1;
+ int id = rdev_get_id(rdev);
struct gpio_desc *gpio = pdata->dvs_gpio[id];
/* if there is no dvs1/2 pin, we don't need wait extra time here. */
@@ -245,8 +245,7 @@ static int rk808_buck1_2_set_voltage_time_sel(struct regulator_dev *rdev,
static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
{
unsigned int ramp_value = RK808_RAMP_RATE_10MV_PER_US;
- unsigned int reg = rk808_buck_config_regs[rdev->desc->id -
- RK808_ID_DCDC1];
+ unsigned int reg = rk808_buck_config_regs[rdev_get_id(rdev)];
switch (ramp_delay) {
case 1 ... 2000:
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 1de089019268..0e3fdfdbd098 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -69,6 +69,7 @@ static void dasd_block_tasklet(struct dasd_block *);
static void do_kick_device(struct work_struct *);
static void do_restore_device(struct work_struct *);
static void do_reload_device(struct work_struct *);
+static void do_requeue_requests(struct work_struct *);
static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
static void dasd_device_timeout(unsigned long);
static void dasd_block_timeout(unsigned long);
@@ -125,6 +126,7 @@ struct dasd_device *dasd_alloc_device(void)
INIT_WORK(&device->kick_work, do_kick_device);
INIT_WORK(&device->restore_device, do_restore_device);
INIT_WORK(&device->reload_device, do_reload_device);
+ INIT_WORK(&device->requeue_requests, do_requeue_requests);
device->state = DASD_STATE_NEW;
device->target = DASD_STATE_NEW;
mutex_init(&device->state_mutex);
@@ -1448,9 +1450,9 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
cqr->starttime = jiffies;
cqr->retries--;
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
- cqr->lpm &= device->path_data.opm;
+ cqr->lpm &= dasd_path_get_opm(device);
if (!cqr->lpm)
- cqr->lpm = device->path_data.opm;
+ cqr->lpm = dasd_path_get_opm(device);
}
if (cqr->cpmode == 1) {
rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
@@ -1483,8 +1485,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
DBF_DEV_EVENT(DBF_WARNING, device,
"start_IO: selected paths gone (%x)",
cqr->lpm);
- } else if (cqr->lpm != device->path_data.opm) {
- cqr->lpm = device->path_data.opm;
+ } else if (cqr->lpm != dasd_path_get_opm(device)) {
+ cqr->lpm = dasd_path_get_opm(device);
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
"start_IO: selected paths gone,"
" retry on all paths");
@@ -1493,11 +1495,10 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
"start_IO: all paths in opm gone,"
" do path verification");
dasd_generic_last_path_gone(device);
- device->path_data.opm = 0;
- device->path_data.ppm = 0;
- device->path_data.npm = 0;
- device->path_data.tbvpm =
- ccw_device_get_path_mask(device->cdev);
+ dasd_path_no_path(device);
+ dasd_path_set_tbvpm(device,
+ ccw_device_get_path_mask(
+ device->cdev));
}
break;
case -ENODEV:
@@ -1623,6 +1624,13 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
}
EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
+static int dasd_check_hpf_error(struct irb *irb)
+{
+ return (scsw_tm_is_valid_schxs(&irb->scsw) &&
+ (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
+ irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
+}
+
/*
* Interrupt handler for "normal" ssch-io based dasd devices.
*/
@@ -1642,7 +1650,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
switch (PTR_ERR(irb)) {
case -EIO:
if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
- device = (struct dasd_device *) cqr->startdev;
+ device = cqr->startdev;
cqr->status = DASD_CQR_CLEARED;
dasd_device_clear_timer(device);
wake_up(&dasd_flush_wq);
@@ -1749,19 +1757,26 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct dasd_ccw_req, devlist);
}
} else { /* error */
+ /* check for HPF error
+ * call discipline function to requeue all requests
+ * and disable HPF accordingly
+ */
+ if (cqr->cpmode && dasd_check_hpf_error(irb) &&
+ device->discipline->handle_hpf_error)
+ device->discipline->handle_hpf_error(device, irb);
/*
* If we don't want complex ERP for this request, then just
* reset this and retry it in the fastpath
*/
if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
cqr->retries > 0) {
- if (cqr->lpm == device->path_data.opm)
+ if (cqr->lpm == dasd_path_get_opm(device))
DBF_DEV_EVENT(DBF_DEBUG, device,
"default ERP in fastpath "
"(%i retries left)",
cqr->retries);
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
- cqr->lpm = device->path_data.opm;
+ cqr->lpm = dasd_path_get_opm(device);
cqr->status = DASD_CQR_QUEUED;
next = cqr;
} else
@@ -2002,17 +2017,18 @@ static void __dasd_device_check_path_events(struct dasd_device *device)
{
int rc;
- if (device->path_data.tbvpm) {
- if (device->stopped & ~(DASD_STOPPED_DC_WAIT |
- DASD_UNRESUMED_PM))
- return;
- rc = device->discipline->verify_path(
- device, device->path_data.tbvpm);
- if (rc)
- dasd_device_set_timer(device, 50);
- else
- device->path_data.tbvpm = 0;
- }
+ if (!dasd_path_get_tbvpm(device))
+ return;
+
+ if (device->stopped &
+ ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
+ return;
+ rc = device->discipline->verify_path(device,
+ dasd_path_get_tbvpm(device));
+ if (rc)
+ dasd_device_set_timer(device, 50);
+ else
+ dasd_path_clear_all_verify(device);
};
/*
@@ -2924,10 +2940,10 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
if (!block)
return -EINVAL;
- spin_lock_irqsave(&block->queue_lock, flags);
+ spin_lock_irqsave(&block->request_queue_lock, flags);
req = (struct request *) cqr->callback_data;
blk_requeue_request(block->request_queue, req);
- spin_unlock_irqrestore(&block->queue_lock, flags);
+ spin_unlock_irqrestore(&block->request_queue_lock, flags);
return 0;
}
@@ -3121,6 +3137,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
*/
static void dasd_setup_queue(struct dasd_block *block)
{
+ struct request_queue *q = block->request_queue;
int max;
if (block->base->features & DASD_FEATURE_USERAW) {
@@ -3135,17 +3152,16 @@ static void dasd_setup_queue(struct dasd_block *block)
} else {
max = block->base->discipline->max_blocks << block->s2b_shift;
}
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
- block->request_queue->limits.max_dev_sectors = max;
- blk_queue_logical_block_size(block->request_queue,
- block->bp_block);
- blk_queue_max_hw_sectors(block->request_queue, max);
- blk_queue_max_segments(block->request_queue, -1L);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ q->limits.max_dev_sectors = max;
+ blk_queue_logical_block_size(q, block->bp_block);
+ blk_queue_max_hw_sectors(q, max);
+ blk_queue_max_segments(q, USHRT_MAX);
/* with page sized segments we can translate each segement into
* one idaw/tidaw
*/
- blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
- blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
+ blk_queue_max_segment_size(q, PAGE_SIZE);
+ blk_queue_segment_boundary(q, PAGE_SIZE - 1);
}
/*
@@ -3517,11 +3533,15 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
struct dasd_device *device;
struct dasd_block *block;
int max_count, open_count, rc;
+ unsigned long flags;
rc = 0;
- device = dasd_device_from_cdev(cdev);
- if (IS_ERR(device))
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ device = dasd_device_from_cdev_locked(cdev);
+ if (IS_ERR(device)) {
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
return PTR_ERR(device);
+ }
/*
* We must make sure that this device is currently not in use.
@@ -3540,8 +3560,7 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
pr_warn("%s: The DASD cannot be set offline while it is in use\n",
dev_name(&cdev->dev));
clear_bit(DASD_FLAG_OFFLINE, &device->flags);
- dasd_put_device(device);
- return -EBUSY;
+ goto out_busy;
}
}
@@ -3551,19 +3570,19 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* could only be called by normal offline so safe_offline flag
* needs to be removed to run normal offline and kill all I/O
*/
- if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags))
/* Already doing normal offline processing */
- dasd_put_device(device);
- return -EBUSY;
- } else
+ goto out_busy;
+ else
clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
-
- } else
- if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ } else {
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
/* Already doing offline processing */
- dasd_put_device(device);
- return -EBUSY;
- }
+ goto out_busy;
+ }
+
+ set_bit(DASD_FLAG_OFFLINE, &device->flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
/*
* if safe_offline called set safe_offline_running flag and
@@ -3591,7 +3610,6 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
goto interrupted;
}
- set_bit(DASD_FLAG_OFFLINE, &device->flags);
dasd_set_target_state(device, DASD_STATE_NEW);
/* dasd_delete_device destroys the device reference. */
block = device->block;
@@ -3610,7 +3628,14 @@ interrupted:
clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
clear_bit(DASD_FLAG_OFFLINE, &device->flags);
dasd_put_device(device);
+
return rc;
+
+out_busy:
+ dasd_put_device(device);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+ return -EBUSY;
}
EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
@@ -3675,14 +3700,12 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
case CIO_GONE:
case CIO_BOXED:
case CIO_NO_PATH:
- device->path_data.opm = 0;
- device->path_data.ppm = 0;
- device->path_data.npm = 0;
+ dasd_path_no_path(device);
ret = dasd_generic_last_path_gone(device);
break;
case CIO_OPER:
ret = 1;
- if (device->path_data.opm)
+ if (dasd_path_get_opm(device))
ret = dasd_generic_path_operational(device);
break;
}
@@ -3693,48 +3716,32 @@ EXPORT_SYMBOL_GPL(dasd_generic_notify);
void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
{
- int chp;
- __u8 oldopm, eventlpm;
struct dasd_device *device;
+ int chp, oldopm, hpfpm, ifccpm;
device = dasd_device_from_cdev_locked(cdev);
if (IS_ERR(device))
return;
+
+ oldopm = dasd_path_get_opm(device);
for (chp = 0; chp < 8; chp++) {
- eventlpm = 0x80 >> chp;
if (path_event[chp] & PE_PATH_GONE) {
- oldopm = device->path_data.opm;
- device->path_data.opm &= ~eventlpm;
- device->path_data.ppm &= ~eventlpm;
- device->path_data.npm &= ~eventlpm;
- if (oldopm && !device->path_data.opm) {
- dev_warn(&device->cdev->dev,
- "No verified channel paths remain "
- "for the device\n");
- DBF_DEV_EVENT(DBF_WARNING, device,
- "%s", "last verified path gone");
- dasd_eer_write(device, NULL, DASD_EER_NOPATH);
- dasd_device_set_stop_bits(device,
- DASD_STOPPED_DC_WAIT);
- }
+ dasd_path_notoper(device, chp);
}
if (path_event[chp] & PE_PATH_AVAILABLE) {
- device->path_data.opm &= ~eventlpm;
- device->path_data.ppm &= ~eventlpm;
- device->path_data.npm &= ~eventlpm;
- device->path_data.tbvpm |= eventlpm;
+ dasd_path_available(device, chp);
dasd_schedule_device_bh(device);
}
if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
- if (!(device->path_data.opm & eventlpm) &&
- !(device->path_data.tbvpm & eventlpm)) {
+ if (!dasd_path_is_operational(device, chp) &&
+ !dasd_path_need_verify(device, chp)) {
/*
* we can not establish a pathgroup on an
* unavailable path, so trigger a path
* verification first
*/
- device->path_data.tbvpm |= eventlpm;
- dasd_schedule_device_bh(device);
+ dasd_path_available(device, chp);
+ dasd_schedule_device_bh(device);
}
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Pathgroup re-established\n");
@@ -3742,45 +3749,65 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
device->discipline->kick_validate(device);
}
}
+ hpfpm = dasd_path_get_hpfpm(device);
+ ifccpm = dasd_path_get_ifccpm(device);
+ if (!dasd_path_get_opm(device) && hpfpm) {
+ /*
+ * device has no operational paths but at least one path is
+ * disabled due to HPF errors
+ * disable HPF at all and use the path(s) again
+ */
+ if (device->discipline->disable_hpf)
+ device->discipline->disable_hpf(device);
+ dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
+ dasd_path_set_tbvpm(device, hpfpm);
+ dasd_schedule_device_bh(device);
+ dasd_schedule_requeue(device);
+ } else if (!dasd_path_get_opm(device) && ifccpm) {
+ /*
+ * device has no operational paths but at least one path is
+ * disabled due to IFCC errors
+ * trigger path verification on paths with IFCC errors
+ */
+ dasd_path_set_tbvpm(device, ifccpm);
+ dasd_schedule_device_bh(device);
+ }
+ if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
+ dev_warn(&device->cdev->dev,
+ "No verified channel paths remain for the device\n");
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ "%s", "last verified path gone");
+ dasd_eer_write(device, NULL, DASD_EER_NOPATH);
+ dasd_device_set_stop_bits(device,
+ DASD_STOPPED_DC_WAIT);
+ }
dasd_put_device(device);
}
EXPORT_SYMBOL_GPL(dasd_generic_path_event);
int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
{
- if (!device->path_data.opm && lpm) {
- device->path_data.opm = lpm;
+ if (!dasd_path_get_opm(device) && lpm) {
+ dasd_path_set_opm(device, lpm);
dasd_generic_path_operational(device);
} else
- device->path_data.opm |= lpm;
+ dasd_path_add_opm(device, lpm);
return 0;
}
EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
-
-int dasd_generic_pm_freeze(struct ccw_device *cdev)
+/*
+ * clear active requests and requeue them to block layer if possible
+ */
+static int dasd_generic_requeue_all_requests(struct dasd_device *device)
{
- struct dasd_device *device = dasd_device_from_cdev(cdev);
- struct list_head freeze_queue;
+ struct list_head requeue_queue;
struct dasd_ccw_req *cqr, *n;
struct dasd_ccw_req *refers;
int rc;
- if (IS_ERR(device))
- return PTR_ERR(device);
-
- /* mark device as suspended */
- set_bit(DASD_FLAG_SUSPENDED, &device->flags);
-
- if (device->discipline->freeze)
- rc = device->discipline->freeze(device);
-
- /* disallow new I/O */
- dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
-
- /* clear active requests and requeue them to block layer if possible */
- INIT_LIST_HEAD(&freeze_queue);
- spin_lock_irq(get_ccwdev_lock(cdev));
+ INIT_LIST_HEAD(&requeue_queue);
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
rc = 0;
list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
/* Check status and move request to flush_queue */
@@ -3791,25 +3818,22 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
dev_err(&device->cdev->dev,
"Unable to terminate request %p "
"on suspend\n", cqr);
- spin_unlock_irq(get_ccwdev_lock(cdev));
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
dasd_put_device(device);
return rc;
}
}
- list_move_tail(&cqr->devlist, &freeze_queue);
+ list_move_tail(&cqr->devlist, &requeue_queue);
}
- spin_unlock_irq(get_ccwdev_lock(cdev));
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
- list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
+ list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) {
wait_event(dasd_flush_wq,
(cqr->status != DASD_CQR_CLEAR_PENDING));
- if (cqr->status == DASD_CQR_CLEARED)
- cqr->status = DASD_CQR_QUEUED;
- /* requeue requests to blocklayer will only work for
- block device requests */
- if (_dasd_requeue_request(cqr))
- continue;
+ /* mark sleepon requests as ended */
+ if (cqr->callback_data == DASD_SLEEPON_START_TAG)
+ cqr->callback_data = DASD_SLEEPON_END_TAG;
/* remove requests from device and block queue */
list_del_init(&cqr->devlist);
@@ -3821,6 +3845,14 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
dasd_free_erp_request(cqr, cqr->memdev);
cqr = refers;
}
+
+ /*
+ * requeue requests to blocklayer will only work
+ * for block device requests
+ */
+ if (_dasd_requeue_request(cqr))
+ continue;
+
if (cqr->block)
list_del_init(&cqr->blocklist);
cqr->block->base->discipline->free_cp(
@@ -3831,15 +3863,56 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
* if requests remain then they are internal request
* and go back to the device queue
*/
- if (!list_empty(&freeze_queue)) {
+ if (!list_empty(&requeue_queue)) {
/* move freeze_queue to start of the ccw_queue */
- spin_lock_irq(get_ccwdev_lock(cdev));
- list_splice_tail(&freeze_queue, &device->ccw_queue);
- spin_unlock_irq(get_ccwdev_lock(cdev));
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ list_splice_tail(&requeue_queue, &device->ccw_queue);
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
}
- dasd_put_device(device);
+ /* wake up generic waitqueue for eventually ended sleepon requests */
+ wake_up(&generic_waitq);
return rc;
}
+
+static void do_requeue_requests(struct work_struct *work)
+{
+ struct dasd_device *device = container_of(work, struct dasd_device,
+ requeue_requests);
+ dasd_generic_requeue_all_requests(device);
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
+ if (device->block)
+ dasd_schedule_block_bh(device->block);
+ dasd_put_device(device);
+}
+
+void dasd_schedule_requeue(struct dasd_device *device)
+{
+ dasd_get_device(device);
+ /* queue call to dasd_reload_device to the kernel event daemon. */
+ if (!schedule_work(&device->requeue_requests))
+ dasd_put_device(device);
+}
+EXPORT_SYMBOL(dasd_schedule_requeue);
+
+int dasd_generic_pm_freeze(struct ccw_device *cdev)
+{
+ struct dasd_device *device = dasd_device_from_cdev(cdev);
+ int rc;
+
+ if (IS_ERR(device))
+ return PTR_ERR(device);
+
+ /* mark device as suspended */
+ set_bit(DASD_FLAG_SUSPENDED, &device->flags);
+
+ if (device->discipline->freeze)
+ rc = device->discipline->freeze(device);
+
+ /* disallow new I/O */
+ dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
+
+ return dasd_generic_requeue_all_requests(device);
+}
EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
int dasd_generic_restore_device(struct ccw_device *cdev)
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 8305ab688d57..95f7645e3c37 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -152,7 +152,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
opm = ccw_device_get_path_mask(device->cdev);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (erp->lpm == 0)
- erp->lpm = device->path_data.opm &
+ erp->lpm = dasd_path_get_opm(device) &
~(erp->irb.esw.esw0.sublog.lpum);
else
erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
@@ -273,7 +273,7 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
erp->status = DASD_CQR_FILLED;
erp->retries = 10;
- erp->lpm = erp->startdev->path_data.opm;
+ erp->lpm = dasd_path_get_opm(erp->startdev);
erp->function = dasd_3990_erp_action_1_sec;
}
return erp;
@@ -1926,7 +1926,7 @@ dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
/* reset the lpm and the status to be able to
* try further actions. */
- erp->lpm = erp->startdev->path_data.opm;
+ erp->lpm = dasd_path_get_opm(erp->startdev);
erp->status = DASD_CQR_NEED_ERP;
}
}
@@ -2208,6 +2208,51 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_inspect_32 */
+static void dasd_3990_erp_disable_path(struct dasd_device *device, __u8 lpum)
+{
+ int pos = pathmask_to_pos(lpum);
+
+ /* no remaining path, cannot disable */
+ if (!(dasd_path_get_opm(device) & ~lpum))
+ return;
+
+ dev_err(&device->cdev->dev,
+ "Path %x.%02x (pathmask %02x) is disabled - IFCC threshold exceeded\n",
+ device->path[pos].cssid, device->path[pos].chpid, lpum);
+ dasd_path_remove_opm(device, lpum);
+ dasd_path_add_ifccpm(device, lpum);
+ device->path[pos].errorclk = 0;
+ atomic_set(&device->path[pos].error_count, 0);
+}
+
+static void dasd_3990_erp_account_error(struct dasd_ccw_req *erp)
+{
+ struct dasd_device *device = erp->startdev;
+ __u8 lpum = erp->refers->irb.esw.esw1.lpum;
+ int pos = pathmask_to_pos(lpum);
+ unsigned long long clk;
+
+ if (!device->path_thrhld)
+ return;
+
+ clk = get_tod_clock();
+ /*
+ * check if the last error is longer ago than the timeout,
+ * if so reset error state
+ */
+ if ((tod_to_ns(clk - device->path[pos].errorclk) / NSEC_PER_SEC)
+ >= device->path_interval) {
+ atomic_set(&device->path[pos].error_count, 0);
+ device->path[pos].errorclk = 0;
+ }
+ atomic_inc(&device->path[pos].error_count);
+ device->path[pos].errorclk = clk;
+ /* threshold exceeded disable path if possible */
+ if (atomic_read(&device->path[pos].error_count) >=
+ device->path_thrhld)
+ dasd_3990_erp_disable_path(device, lpum);
+}
+
/*
*****************************************************************************
* main ERP control functions (24 and 32 byte sense)
@@ -2237,6 +2282,7 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
| SCHN_STAT_CHN_CTRL_CHK)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"channel or interface control check");
+ dasd_3990_erp_account_error(erp);
erp = dasd_3990_erp_action_4(erp, NULL);
}
return erp;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 15a1a70cace9..84ca314c87e3 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -725,27 +725,15 @@ static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr,
static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dasd_devmap *devmap;
- int val;
- char *endp;
-
- devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
- if (IS_ERR(devmap))
- return PTR_ERR(devmap);
+ unsigned int val;
+ int rc;
- val = simple_strtoul(buf, &endp, 0);
- if (((endp + 1) < (buf + count)) || (val > 1))
+ if (kstrtouint(buf, 0, &val) || val > 1)
return -EINVAL;
- spin_lock(&dasd_devmap_lock);
- if (val)
- devmap->features |= DASD_FEATURE_FAILFAST;
- else
- devmap->features &= ~DASD_FEATURE_FAILFAST;
- if (devmap->device)
- devmap->device->features = devmap->features;
- spin_unlock(&dasd_devmap_lock);
- return count;
+ rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_FAILFAST, val);
+
+ return rc ? : count;
}
static DEVICE_ATTR(failfast, 0644, dasd_ff_show, dasd_ff_store);
@@ -771,32 +759,41 @@ static ssize_t
dasd_ro_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dasd_devmap *devmap;
+ struct ccw_device *cdev = to_ccwdev(dev);
struct dasd_device *device;
- int val;
- char *endp;
-
- devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
- if (IS_ERR(devmap))
- return PTR_ERR(devmap);
+ unsigned long flags;
+ unsigned int val;
+ int rc;
- val = simple_strtoul(buf, &endp, 0);
- if (((endp + 1) < (buf + count)) || (val > 1))
+ if (kstrtouint(buf, 0, &val) || val > 1)
return -EINVAL;
- spin_lock(&dasd_devmap_lock);
- if (val)
- devmap->features |= DASD_FEATURE_READONLY;
- else
- devmap->features &= ~DASD_FEATURE_READONLY;
- device = devmap->device;
- if (device) {
- device->features = devmap->features;
- val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+ rc = dasd_set_feature(cdev, DASD_FEATURE_READONLY, val);
+ if (rc)
+ return rc;
+
+ device = dasd_device_from_cdev(cdev);
+ if (IS_ERR(device))
+ return PTR_ERR(device);
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+
+ if (!device->block || !device->block->gdp ||
+ test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ goto out;
}
- spin_unlock(&dasd_devmap_lock);
- if (device && device->block && device->block->gdp)
- set_disk_ro(device->block->gdp, val);
+ /* Increase open_count to avoid losing the block device */
+ atomic_inc(&device->block->open_count);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+ set_disk_ro(device->block->gdp, val);
+ atomic_dec(&device->block->open_count);
+
+out:
+ dasd_put_device(device);
+
return count;
}
@@ -823,27 +820,15 @@ static ssize_t
dasd_erplog_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dasd_devmap *devmap;
- int val;
- char *endp;
-
- devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
- if (IS_ERR(devmap))
- return PTR_ERR(devmap);
+ unsigned int val;
+ int rc;
- val = simple_strtoul(buf, &endp, 0);
- if (((endp + 1) < (buf + count)) || (val > 1))
+ if (kstrtouint(buf, 0, &val) || val > 1)
return -EINVAL;
- spin_lock(&dasd_devmap_lock);
- if (val)
- devmap->features |= DASD_FEATURE_ERPLOG;
- else
- devmap->features &= ~DASD_FEATURE_ERPLOG;
- if (devmap->device)
- devmap->device->features = devmap->features;
- spin_unlock(&dasd_devmap_lock);
- return count;
+ rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_ERPLOG, val);
+
+ return rc ? : count;
}
static DEVICE_ATTR(erplog, 0644, dasd_erplog_show, dasd_erplog_store);
@@ -871,16 +856,14 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_devmap *devmap;
+ unsigned int val;
ssize_t rc;
- int val;
- char *endp;
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
- val = simple_strtoul(buf, &endp, 0);
- if (((endp + 1) < (buf + count)) || (val > 1))
+ if (kstrtouint(buf, 0, &val) || val > 1)
return -EINVAL;
spin_lock(&dasd_devmap_lock);
@@ -994,10 +977,12 @@ dasd_access_show(struct device *dev, struct device_attribute *attr,
if (IS_ERR(device))
return PTR_ERR(device);
- if (device->discipline->host_access_count)
- count = device->discipline->host_access_count(device);
- else
+ if (!device->discipline)
+ count = -ENODEV;
+ else if (!device->discipline->host_access_count)
count = -EOPNOTSUPP;
+ else
+ count = device->discipline->host_access_count(device);
dasd_put_device(device);
if (count < 0)
@@ -1197,27 +1182,25 @@ static ssize_t
dasd_eer_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dasd_devmap *devmap;
- int val, rc;
- char *endp;
+ struct dasd_device *device;
+ unsigned int val;
+ int rc = 0;
- devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
- if (IS_ERR(devmap))
- return PTR_ERR(devmap);
- if (!devmap->device)
- return -ENODEV;
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return PTR_ERR(device);
- val = simple_strtoul(buf, &endp, 0);
- if (((endp + 1) < (buf + count)) || (val > 1))
+ if (kstrtouint(buf, 0, &val) || val > 1)
return -EINVAL;
- if (val) {
- rc = dasd_eer_enable(devmap->device);
- if (rc)
- return rc;
- } else
- dasd_eer_disable(devmap->device);
- return count;
+ if (val)
+ rc = dasd_eer_enable(device);
+ else
+ dasd_eer_disable(device);
+
+ dasd_put_device(device);
+
+ return rc ? : count;
}
static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store);
@@ -1360,6 +1343,50 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(timeout, 0644,
dasd_timeout_show, dasd_timeout_store);
+
+static ssize_t
+dasd_path_reset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ unsigned int val;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ if ((kstrtouint(buf, 16, &val) != 0) || val > 0xff)
+ val = 0;
+
+ if (device->discipline && device->discipline->reset_path)
+ device->discipline->reset_path(device, (__u8) val);
+
+ dasd_put_device(device);
+ return count;
+}
+
+static DEVICE_ATTR(path_reset, 0200, NULL, dasd_path_reset_store);
+
+static ssize_t dasd_hpf_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dasd_device *device;
+ int hpf;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+ if (!device->discipline || !device->discipline->hpf_enabled) {
+ dasd_put_device(device);
+ return snprintf(buf, PAGE_SIZE, "%d\n", dasd_nofcx);
+ }
+ hpf = device->discipline->hpf_enabled(device);
+ dasd_put_device(device);
+ return snprintf(buf, PAGE_SIZE, "%d\n", hpf);
+}
+
+static DEVICE_ATTR(hpf, 0444, dasd_hpf_show, NULL);
+
static ssize_t dasd_reservation_policy_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1385,27 +1412,17 @@ static ssize_t dasd_reservation_policy_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dasd_devmap *devmap;
+ struct ccw_device *cdev = to_ccwdev(dev);
int rc;
- devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
- if (IS_ERR(devmap))
- return PTR_ERR(devmap);
- rc = 0;
- spin_lock(&dasd_devmap_lock);
if (sysfs_streq("ignore", buf))
- devmap->features &= ~DASD_FEATURE_FAILONSLCK;
+ rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 0);
else if (sysfs_streq("fail", buf))
- devmap->features |= DASD_FEATURE_FAILONSLCK;
+ rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 1);
else
rc = -EINVAL;
- if (devmap->device)
- devmap->device->features = devmap->features;
- spin_unlock(&dasd_devmap_lock);
- if (rc)
- return rc;
- else
- return count;
+
+ return rc ? : count;
}
static DEVICE_ATTR(reservation_policy, 0644,
@@ -1461,25 +1478,120 @@ static ssize_t dasd_pm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dasd_device *device;
- u8 opm, nppm, cablepm, cuirpm, hpfpm;
+ u8 opm, nppm, cablepm, cuirpm, hpfpm, ifccpm;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return sprintf(buf, "0\n");
- opm = device->path_data.opm;
- nppm = device->path_data.npm;
- cablepm = device->path_data.cablepm;
- cuirpm = device->path_data.cuirpm;
- hpfpm = device->path_data.hpfpm;
+ opm = dasd_path_get_opm(device);
+ nppm = dasd_path_get_nppm(device);
+ cablepm = dasd_path_get_cablepm(device);
+ cuirpm = dasd_path_get_cuirpm(device);
+ hpfpm = dasd_path_get_hpfpm(device);
+ ifccpm = dasd_path_get_ifccpm(device);
dasd_put_device(device);
- return sprintf(buf, "%02x %02x %02x %02x %02x\n", opm, nppm,
- cablepm, cuirpm, hpfpm);
+ return sprintf(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm,
+ cablepm, cuirpm, hpfpm, ifccpm);
}
static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL);
+/*
+ * threshold value for IFCC/CCC errors
+ */
+static ssize_t
+dasd_path_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dasd_device *device;
+ int len;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+ len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_thrhld);
+ dasd_put_device(device);
+ return len;
+}
+
+static ssize_t
+dasd_path_threshold_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ unsigned long flags;
+ unsigned long val;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ if ((kstrtoul(buf, 10, &val) != 0) ||
+ (val > DASD_THRHLD_MAX) || val == 0) {
+ dasd_put_device(device);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags);
+ if (val)
+ device->path_thrhld = val;
+ spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags);
+ dasd_put_device(device);
+ return count;
+}
+
+static DEVICE_ATTR(path_threshold, 0644, dasd_path_threshold_show,
+ dasd_path_threshold_store);
+/*
+ * interval for IFCC/CCC checks
+ * meaning time with no IFCC/CCC error before the error counter
+ * gets reset
+ */
+static ssize_t
+dasd_path_interval_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dasd_device *device;
+ int len;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+ len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_interval);
+ dasd_put_device(device);
+ return len;
+}
+
+static ssize_t
+dasd_path_interval_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ unsigned long flags;
+ unsigned long val;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ if ((kstrtoul(buf, 10, &val) != 0) ||
+ (val > DASD_INTERVAL_MAX) || val == 0) {
+ dasd_put_device(device);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags);
+ if (val)
+ device->path_interval = val;
+ spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags);
+ dasd_put_device(device);
+ return count;
+}
+
+static DEVICE_ATTR(path_interval, 0644, dasd_path_interval_show,
+ dasd_path_interval_store);
+
+
static struct attribute * dasd_attrs[] = {
&dev_attr_readonly.attr,
&dev_attr_discipline.attr,
@@ -1500,6 +1612,10 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_safe_offline.attr,
&dev_attr_host_access_count.attr,
&dev_attr_path_masks.attr,
+ &dev_attr_path_threshold.attr,
+ &dev_attr_path_interval.attr,
+ &dev_attr_path_reset.attr,
+ &dev_attr_hpf.attr,
NULL,
};
@@ -1531,7 +1647,7 @@ dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
{
struct dasd_devmap *devmap;
- devmap = dasd_find_busid(dev_name(&cdev->dev));
+ devmap = dasd_devmap_from_cdev(cdev);
if (IS_ERR(devmap))
return PTR_ERR(devmap);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index a7a88476e215..67bf50c9946f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1042,8 +1042,11 @@ static void dasd_eckd_clear_conf_data(struct dasd_device *device)
private->conf_data = NULL;
private->conf_len = 0;
for (i = 0; i < 8; i++) {
- kfree(private->path_conf_data[i]);
- private->path_conf_data[i] = NULL;
+ kfree(device->path[i].conf_data);
+ device->path[i].conf_data = NULL;
+ device->path[i].cssid = 0;
+ device->path[i].ssid = 0;
+ device->path[i].chpid = 0;
}
}
@@ -1055,13 +1058,14 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
int rc, path_err, pos;
__u8 lpm, opm;
struct dasd_eckd_private *private, path_private;
- struct dasd_path *path_data;
struct dasd_uid *uid;
char print_path_uid[60], print_device_uid[60];
+ struct channel_path_desc *chp_desc;
+ struct subchannel_id sch_id;
private = device->private;
- path_data = &device->path_data;
opm = ccw_device_get_path_mask(device->cdev);
+ ccw_device_get_schid(device->cdev, &sch_id);
conf_data_saved = 0;
path_err = 0;
/* get configuration data per operational path */
@@ -1081,7 +1085,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
"No configuration data "
"retrieved");
/* no further analysis possible */
- path_data->opm |= lpm;
+ dasd_path_add_opm(device, opm);
continue; /* no error */
}
/* save first valid configuration data */
@@ -1098,8 +1102,13 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
}
pos = pathmask_to_pos(lpm);
/* store per path conf_data */
- private->path_conf_data[pos] =
- (struct dasd_conf_data *) conf_data;
+ device->path[pos].conf_data = conf_data;
+ device->path[pos].cssid = sch_id.cssid;
+ device->path[pos].ssid = sch_id.ssid;
+ chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
+ if (chp_desc)
+ device->path[pos].chpid = chp_desc->chpid;
+ kfree(chp_desc);
/*
* build device UID that other path data
* can be compared to it
@@ -1154,42 +1163,66 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
"device %s instead of %s\n", lpm,
print_path_uid, print_device_uid);
path_err = -EINVAL;
- path_data->cablepm |= lpm;
+ dasd_path_add_cablepm(device, lpm);
continue;
}
pos = pathmask_to_pos(lpm);
/* store per path conf_data */
- private->path_conf_data[pos] =
- (struct dasd_conf_data *) conf_data;
+ device->path[pos].conf_data = conf_data;
+ device->path[pos].cssid = sch_id.cssid;
+ device->path[pos].ssid = sch_id.ssid;
+ chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
+ if (chp_desc)
+ device->path[pos].chpid = chp_desc->chpid;
+ kfree(chp_desc);
path_private.conf_data = NULL;
path_private.conf_len = 0;
}
switch (dasd_eckd_path_access(conf_data, conf_len)) {
case 0x02:
- path_data->npm |= lpm;
+ dasd_path_add_nppm(device, lpm);
break;
case 0x03:
- path_data->ppm |= lpm;
+ dasd_path_add_ppm(device, lpm);
break;
}
- if (!path_data->opm) {
- path_data->opm = lpm;
+ if (!dasd_path_get_opm(device)) {
+ dasd_path_set_opm(device, lpm);
dasd_generic_path_operational(device);
} else {
- path_data->opm |= lpm;
+ dasd_path_add_opm(device, lpm);
}
- /*
- * if the path is used
- * it should not be in one of the negative lists
- */
- path_data->cablepm &= ~lpm;
- path_data->hpfpm &= ~lpm;
- path_data->cuirpm &= ~lpm;
}
return path_err;
}
+static u32 get_fcx_max_data(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ int fcx_in_css, fcx_in_gneq, fcx_in_features;
+ int tpm, mdc;
+
+ if (dasd_nofcx)
+ return 0;
+ /* is transport mode supported? */
+ fcx_in_css = css_general_characteristics.fcx;
+ fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
+ fcx_in_features = private->features.feature[40] & 0x80;
+ tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
+
+ if (!tpm)
+ return 0;
+
+ mdc = ccw_device_get_mdc(device->cdev, 0);
+ if (mdc < 0) {
+ dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
+ return 0;
+ } else {
+ return (u32)mdc * FCX_MAX_DATA_FACTOR;
+ }
+}
+
static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
{
struct dasd_eckd_private *private = device->private;
@@ -1222,8 +1255,7 @@ static int rebuild_device_uid(struct dasd_device *device,
struct path_verification_work_data *data)
{
struct dasd_eckd_private *private = device->private;
- struct dasd_path *path_data = &device->path_data;
- __u8 lpm, opm = path_data->opm;
+ __u8 lpm, opm = dasd_path_get_opm(device);
int rc = -ENODEV;
for (lpm = 0x80; lpm; lpm >>= 1) {
@@ -1356,7 +1388,7 @@ static void do_path_verification_work(struct work_struct *work)
* in other case the device UID may have changed and
* the first working path UID will be used as device UID
*/
- if (device->path_data.opm &&
+ if (dasd_path_get_opm(device) &&
dasd_eckd_compare_path_uid(device, &path_private)) {
/*
* the comparison was not successful
@@ -1406,23 +1438,17 @@ static void do_path_verification_work(struct work_struct *work)
* situation in dasd_start_IO.
*/
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- if (!device->path_data.opm && opm) {
- device->path_data.opm = opm;
- device->path_data.cablepm &= ~opm;
- device->path_data.cuirpm &= ~opm;
- device->path_data.hpfpm &= ~opm;
+ if (!dasd_path_get_opm(device) && opm) {
+ dasd_path_set_opm(device, opm);
dasd_generic_path_operational(device);
} else {
- device->path_data.opm |= opm;
- device->path_data.cablepm &= ~opm;
- device->path_data.cuirpm &= ~opm;
- device->path_data.hpfpm &= ~opm;
+ dasd_path_add_opm(device, opm);
}
- device->path_data.npm |= npm;
- device->path_data.ppm |= ppm;
- device->path_data.tbvpm |= epm;
- device->path_data.cablepm |= cablepm;
- device->path_data.hpfpm |= hpfpm;
+ dasd_path_add_nppm(device, npm);
+ dasd_path_add_ppm(device, ppm);
+ dasd_path_add_tbvpm(device, epm);
+ dasd_path_add_cablepm(device, cablepm);
+ dasd_path_add_nohpfpm(device, hpfpm);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
@@ -1456,6 +1482,19 @@ static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
return 0;
}
+static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
+{
+ struct dasd_eckd_private *private = device->private;
+ unsigned long flags;
+
+ if (!private->fcx_max_data)
+ private->fcx_max_data = get_fcx_max_data(device);
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
+ dasd_schedule_device_bh(device);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+}
+
static int dasd_eckd_read_features(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
@@ -1652,32 +1691,6 @@ static void dasd_eckd_kick_validate_server(struct dasd_device *device)
dasd_put_device(device);
}
-static u32 get_fcx_max_data(struct dasd_device *device)
-{
- struct dasd_eckd_private *private = device->private;
- int fcx_in_css, fcx_in_gneq, fcx_in_features;
- int tpm, mdc;
-
- if (dasd_nofcx)
- return 0;
- /* is transport mode supported? */
- fcx_in_css = css_general_characteristics.fcx;
- fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
- fcx_in_features = private->features.feature[40] & 0x80;
- tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
-
- if (!tpm)
- return 0;
-
- mdc = ccw_device_get_mdc(device->cdev, 0);
- if (mdc < 0) {
- dev_warn(&device->cdev->dev, "Detecting the maximum supported"
- " data size for zHPF requests failed\n");
- return 0;
- } else
- return (u32)mdc * FCX_MAX_DATA_FACTOR;
-}
-
/*
* Check device characteristics.
* If the device is accessible using ECKD discipline, the device is enabled.
@@ -1729,10 +1742,11 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (rc)
goto out_err1;
- /* set default timeout */
+ /* set some default values */
device->default_expires = DASD_EXPIRES;
- /* set default retry count */
device->default_retries = DASD_RETRIES;
+ device->path_thrhld = DASD_ECKD_PATH_THRHLD;
+ device->path_interval = DASD_ECKD_PATH_INTERVAL;
if (private->gneq) {
value = 1;
@@ -1839,13 +1853,16 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
private->gneq = NULL;
private->conf_len = 0;
for (i = 0; i < 8; i++) {
- kfree(private->path_conf_data[i]);
- if ((__u8 *)private->path_conf_data[i] ==
+ kfree(device->path[i].conf_data);
+ if ((__u8 *)device->path[i].conf_data ==
private->conf_data) {
private->conf_data = NULL;
private->conf_len = 0;
}
- private->path_conf_data[i] = NULL;
+ device->path[i].conf_data = NULL;
+ device->path[i].cssid = 0;
+ device->path[i].ssid = 0;
+ device->path[i].chpid = 0;
}
kfree(private->conf_data);
private->conf_data = NULL;
@@ -2966,7 +2983,7 @@ static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
if (cqr->block && (cqr->startdev != cqr->block->base)) {
dasd_eckd_reset_ccw_to_base_io(cqr);
cqr->startdev = cqr->block->base;
- cqr->lpm = cqr->block->base->path_data.opm;
+ cqr->lpm = dasd_path_get_opm(cqr->block->base);
}
};
@@ -3251,7 +3268,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
cqr->memdev = startdev;
cqr->block = block;
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
- cqr->lpm = startdev->path_data.ppm;
+ cqr->lpm = dasd_path_get_ppm(startdev);
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -3426,7 +3443,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
cqr->memdev = startdev;
cqr->block = block;
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
- cqr->lpm = startdev->path_data.ppm;
+ cqr->lpm = dasd_path_get_ppm(startdev);
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -3735,7 +3752,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
cqr->memdev = startdev;
cqr->block = block;
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
- cqr->lpm = startdev->path_data.ppm;
+ cqr->lpm = dasd_path_get_ppm(startdev);
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -3962,7 +3979,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
cqr->memdev = startdev;
cqr->block = block;
cqr->expires = startdev->default_expires * HZ;
- cqr->lpm = startdev->path_data.ppm;
+ cqr->lpm = dasd_path_get_ppm(startdev);
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -4783,7 +4800,8 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
- irb->scsw.tm.fcxs, irb->scsw.tm.schxs,
+ irb->scsw.tm.fcxs,
+ (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
req ? req->intrc : 0);
len += sprintf(page + len, PRINTK_HEADER
" device %s: Failing TCW: %p\n",
@@ -5306,11 +5324,10 @@ static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
*/
static int
dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
- __u32 message_id,
- struct channel_path_desc *desc,
- struct subchannel_id sch_id)
+ __u32 message_id, __u8 lpum)
{
struct dasd_psf_cuir_response *psf_cuir;
+ int pos = pathmask_to_pos(lpum);
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
@@ -5328,11 +5345,10 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
psf_cuir->cc = response;
- if (desc)
- psf_cuir->chpid = desc->chpid;
+ psf_cuir->chpid = device->path[pos].chpid;
psf_cuir->message_id = message_id;
- psf_cuir->cssid = sch_id.cssid;
- psf_cuir->ssid = sch_id.ssid;
+ psf_cuir->cssid = device->path[pos].cssid;
+ psf_cuir->ssid = device->path[pos].ssid;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->cda = (__u32)(addr_t)psf_cuir;
@@ -5363,20 +5379,19 @@ static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
__u8 lpum,
struct dasd_cuir_message *cuir)
{
- struct dasd_eckd_private *private = device->private;
struct dasd_conf_data *conf_data;
int path, pos;
if (cuir->record_selector == 0)
goto out;
for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
- conf_data = private->path_conf_data[pos];
+ conf_data = device->path[pos].conf_data;
if (conf_data->gneq.record_selector ==
cuir->record_selector)
return conf_data;
}
out:
- return private->path_conf_data[pathmask_to_pos(lpum)];
+ return device->path[pathmask_to_pos(lpum)].conf_data;
}
/*
@@ -5391,7 +5406,6 @@ out:
static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
struct dasd_cuir_message *cuir)
{
- struct dasd_eckd_private *private = device->private;
struct dasd_conf_data *ref_conf_data;
unsigned long bitmask = 0, mask = 0;
struct dasd_conf_data *conf_data;
@@ -5417,11 +5431,10 @@ static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
mask |= cuir->neq_map[1] << 8;
mask |= cuir->neq_map[0] << 16;
- for (path = 0x80; path; path >>= 1) {
+ for (path = 0; path < 8; path++) {
/* initialise data per path */
bitmask = mask;
- pos = pathmask_to_pos(path);
- conf_data = private->path_conf_data[pos];
+ conf_data = device->path[path].conf_data;
pos = 8 - ffs(cuir->ned_map);
ned = (char *) &conf_data->neds[pos];
/* compare reference ned and per path ned */
@@ -5442,33 +5455,29 @@ static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
continue;
/* device and path match the reference values
add path to CUIR scope */
- tbcpm |= path;
+ tbcpm |= 0x80 >> path;
}
return tbcpm;
}
static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
- unsigned long paths,
- struct subchannel_id sch_id, int action)
+ unsigned long paths, int action)
{
- struct channel_path_desc *desc;
int pos;
while (paths) {
/* get position of bit in mask */
- pos = ffs(paths) - 1;
+ pos = 8 - ffs(paths);
/* get channel path descriptor from this position */
- desc = ccw_device_get_chp_desc(device->cdev, 7 - pos);
if (action == CUIR_QUIESCE)
- pr_warn("Service on the storage server caused path "
- "%x.%02x to go offline", sch_id.cssid,
- desc ? desc->chpid : 0);
+ pr_warn("Service on the storage server caused path %x.%02x to go offline",
+ device->path[pos].cssid,
+ device->path[pos].chpid);
else if (action == CUIR_RESUME)
- pr_info("Path %x.%02x is back online after service "
- "on the storage server", sch_id.cssid,
- desc ? desc->chpid : 0);
- kfree(desc);
- clear_bit(pos, &paths);
+ pr_info("Path %x.%02x is back online after service on the storage server",
+ device->path[pos].cssid,
+ device->path[pos].chpid);
+ clear_bit(7 - pos, &paths);
}
}
@@ -5479,16 +5488,16 @@ static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
/* nothing to do if path is not in use */
- if (!(device->path_data.opm & tbcpm))
+ if (!(dasd_path_get_opm(device) & tbcpm))
return 0;
- if (!(device->path_data.opm & ~tbcpm)) {
+ if (!(dasd_path_get_opm(device) & ~tbcpm)) {
/* no path would be left if the CUIR action is taken
return error */
return -EINVAL;
}
/* remove device from operational path mask */
- device->path_data.opm &= ~tbcpm;
- device->path_data.cuirpm |= tbcpm;
+ dasd_path_remove_opm(device, tbcpm);
+ dasd_path_add_cuirpm(device, tbcpm);
return tbcpm;
}
@@ -5501,7 +5510,6 @@ static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
* notify the already set offline devices again
*/
static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
- struct subchannel_id sch_id,
struct dasd_cuir_message *cuir)
{
struct dasd_eckd_private *private = device->private;
@@ -5556,14 +5564,13 @@ static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
}
}
/* notify user about all paths affected by CUIR action */
- dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_QUIESCE);
+ dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
return 0;
out_err:
return tbcpm;
}
static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
- struct subchannel_id sch_id,
struct dasd_cuir_message *cuir)
{
struct dasd_eckd_private *private = device->private;
@@ -5581,8 +5588,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
alias_list) {
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
paths |= tbcpm;
- if (!(dev->path_data.opm & tbcpm)) {
- dev->path_data.tbvpm |= tbcpm;
+ if (!(dasd_path_get_opm(dev) & tbcpm)) {
+ dasd_path_add_tbvpm(dev, tbcpm);
dasd_schedule_device_bh(dev);
}
}
@@ -5591,8 +5598,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
alias_list) {
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
paths |= tbcpm;
- if (!(dev->path_data.opm & tbcpm)) {
- dev->path_data.tbvpm |= tbcpm;
+ if (!(dasd_path_get_opm(dev) & tbcpm)) {
+ dasd_path_add_tbvpm(dev, tbcpm);
dasd_schedule_device_bh(dev);
}
}
@@ -5605,8 +5612,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
alias_list) {
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
paths |= tbcpm;
- if (!(dev->path_data.opm & tbcpm)) {
- dev->path_data.tbvpm |= tbcpm;
+ if (!(dasd_path_get_opm(dev) & tbcpm)) {
+ dasd_path_add_tbvpm(dev, tbcpm);
dasd_schedule_device_bh(dev);
}
}
@@ -5615,14 +5622,14 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
alias_list) {
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
paths |= tbcpm;
- if (!(dev->path_data.opm & tbcpm)) {
- dev->path_data.tbvpm |= tbcpm;
+ if (!(dasd_path_get_opm(dev) & tbcpm)) {
+ dasd_path_add_tbvpm(dev, tbcpm);
dasd_schedule_device_bh(dev);
}
}
}
/* notify user about all paths affected by CUIR action */
- dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_RESUME);
+ dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
return 0;
}
@@ -5630,38 +5637,31 @@ static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
__u8 lpum)
{
struct dasd_cuir_message *cuir = messages;
- struct channel_path_desc *desc;
- struct subchannel_id sch_id;
- int pos, response;
+ int response;
DBF_DEV_EVENT(DBF_WARNING, device,
"CUIR request: %016llx %016llx %016llx %08x",
((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
((u32 *)cuir)[3]);
- ccw_device_get_schid(device->cdev, &sch_id);
- pos = pathmask_to_pos(lpum);
- desc = ccw_device_get_chp_desc(device->cdev, pos);
if (cuir->code == CUIR_QUIESCE) {
/* quiesce */
- if (dasd_eckd_cuir_quiesce(device, lpum, sch_id, cuir))
+ if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
response = PSF_CUIR_LAST_PATH;
else
response = PSF_CUIR_COMPLETED;
} else if (cuir->code == CUIR_RESUME) {
/* resume */
- dasd_eckd_cuir_resume(device, lpum, sch_id, cuir);
+ dasd_eckd_cuir_resume(device, lpum, cuir);
response = PSF_CUIR_COMPLETED;
} else
response = PSF_CUIR_NOT_SUPPORTED;
dasd_eckd_psf_cuir_response(device, response,
- cuir->message_id, desc, sch_id);
+ cuir->message_id, lpum);
DBF_DEV_EVENT(DBF_WARNING, device,
"CUIR response: %d on message ID %08x", response,
cuir->message_id);
- /* free descriptor copy */
- kfree(desc);
/* to make sure there is no attention left schedule work again */
device->discipline->check_attention(device, lpum);
}
@@ -5708,6 +5708,63 @@ static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
return 0;
}
+static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
+{
+ if (~lpum & dasd_path_get_opm(device)) {
+ dasd_path_add_nohpfpm(device, lpum);
+ dasd_path_remove_opm(device, lpum);
+ dev_err(&device->cdev->dev,
+ "Channel path %02X lost HPF functionality and is disabled\n",
+ lpum);
+ return 1;
+ }
+ return 0;
+}
+
+static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ dev_err(&device->cdev->dev,
+ "High Performance FICON disabled\n");
+ private->fcx_max_data = 0;
+}
+
+static int dasd_eckd_hpf_enabled(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ return private->fcx_max_data ? 1 : 0;
+}
+
+static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
+ struct irb *irb)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ if (!private->fcx_max_data) {
+ /* sanity check for no HPF, the error makes no sense */
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Trying to disable HPF for a non HPF device");
+ return;
+ }
+ if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
+ dasd_eckd_disable_hpf_device(device);
+ } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
+ if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
+ return;
+ dasd_eckd_disable_hpf_device(device);
+ dasd_path_set_tbvpm(device,
+ dasd_path_get_hpfpm(device));
+ }
+ /*
+ * prevent that any new I/O ist started on the device and schedule a
+ * requeue of existing requests
+ */
+ dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
+ dasd_schedule_requeue(device);
+}
+
static struct ccw_driver dasd_eckd_driver = {
.driver = {
.name = "dasd-eckd",
@@ -5776,6 +5833,10 @@ static struct dasd_discipline dasd_eckd_discipline = {
.check_attention = dasd_eckd_check_attention,
.host_access_count = dasd_eckd_host_access_count,
.hosts_print = dasd_hosts_print,
+ .handle_hpf_error = dasd_eckd_handle_hpf_error,
+ .disable_hpf = dasd_eckd_disable_hpf_device,
+ .hpf_enabled = dasd_eckd_hpf_enabled,
+ .reset_path = dasd_eckd_reset_path,
};
static int __init
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 59803626ea36..e2a710c250a5 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -94,6 +94,8 @@
#define FCX_MAX_DATA_FACTOR 65536
#define DASD_ECKD_RCD_DATA_SIZE 256
+#define DASD_ECKD_PATH_THRHLD 256
+#define DASD_ECKD_PATH_INTERVAL 300
/*****************************************************************************
* SECTION: Type Definitions
@@ -535,8 +537,7 @@ struct dasd_eckd_private {
struct dasd_eckd_characteristics rdc_data;
u8 *conf_data;
int conf_len;
- /* per path configuration data */
- struct dasd_conf_data *path_conf_data[8];
+
/* pointers to specific parts in the conf_data */
struct dasd_ned *ned;
struct dasd_sneq *sneq;
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 21ef63cf0960..6c5d671304b4 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -454,20 +454,30 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
*/
int dasd_eer_enable(struct dasd_device *device)
{
- struct dasd_ccw_req *cqr;
+ struct dasd_ccw_req *cqr = NULL;
unsigned long flags;
struct ccw1 *ccw;
+ int rc = 0;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (device->eer_cqr)
- return 0;
+ goto out;
+ else if (!device->discipline ||
+ strcmp(device->discipline->name, "ECKD"))
+ rc = -EMEDIUMTYPE;
+ else if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
+ rc = -EBUSY;
- if (!device->discipline || strcmp(device->discipline->name, "ECKD"))
- return -EPERM; /* FIXME: -EMEDIUMTYPE ? */
+ if (rc)
+ goto out;
cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
SNSS_DATA_SIZE, device);
- if (IS_ERR(cqr))
- return -ENOMEM;
+ if (IS_ERR(cqr)) {
+ rc = -ENOMEM;
+ cqr = NULL;
+ goto out;
+ }
cqr->startdev = device;
cqr->retries = 255;
@@ -485,15 +495,18 @@ int dasd_eer_enable(struct dasd_device *device)
cqr->status = DASD_CQR_FILLED;
cqr->callback = dasd_eer_snss_cb;
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (!device->eer_cqr) {
device->eer_cqr = cqr;
cqr = NULL;
}
+
+out:
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
if (cqr)
dasd_kfree_request(cqr, device);
- return 0;
+
+ return rc;
}
/*
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index d138d0116734..113c1c1fa1af 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -96,7 +96,7 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
"default ERP called (%i retries left)",
cqr->retries);
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
- cqr->lpm = device->path_data.opm;
+ cqr->lpm = dasd_path_get_opm(device);
cqr->status = DASD_CQR_FILLED;
} else {
pr_err("%s: default ERP has run out of retries and failed\n",
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index d7b5b550364b..462cab5d4302 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -168,7 +168,7 @@ dasd_fba_check_characteristics(struct dasd_device *device)
device->default_expires = DASD_EXPIRES;
device->default_retries = FBA_DEFAULT_RETRIES;
- device->path_data.opm = LPM_ANYPATH;
+ dasd_path_set_opm(device, LPM_ANYPATH);
readonly = dasd_device_is_ro(device);
if (readonly)
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 87ff6cef872f..24be210c10e5 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -55,6 +55,7 @@
#include <asm/debug.h>
#include <asm/dasd.h>
#include <asm/idals.h>
+#include <linux/bitops.h>
/* DASD discipline magic */
#define DASD_ECKD_MAGIC 0xC5C3D2C4
@@ -377,6 +378,10 @@ struct dasd_discipline {
int (*check_attention)(struct dasd_device *, __u8);
int (*host_access_count)(struct dasd_device *);
int (*hosts_print)(struct dasd_device *, struct seq_file *);
+ void (*handle_hpf_error)(struct dasd_device *, struct irb *);
+ void (*disable_hpf)(struct dasd_device *);
+ int (*hpf_enabled)(struct dasd_device *);
+ void (*reset_path)(struct dasd_device *, __u8);
};
extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -397,17 +402,31 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
#define DASD_EER_STATECHANGE 3
#define DASD_EER_PPRCSUSPEND 4
+/* DASD path handling */
+
+#define DASD_PATH_OPERATIONAL 1
+#define DASD_PATH_TBV 2
+#define DASD_PATH_PP 3
+#define DASD_PATH_NPP 4
+#define DASD_PATH_MISCABLED 5
+#define DASD_PATH_NOHPF 6
+#define DASD_PATH_CUIR 7
+#define DASD_PATH_IFCC 8
+
+#define DASD_THRHLD_MAX 4294967295U
+#define DASD_INTERVAL_MAX 4294967295U
+
struct dasd_path {
- __u8 opm;
- __u8 tbvpm;
- __u8 ppm;
- __u8 npm;
- /* paths that are not used because of a special condition */
- __u8 cablepm; /* miss-cabled */
- __u8 hpfpm; /* the HPF requirements of the other paths are not met */
- __u8 cuirpm; /* CUIR varied offline */
+ unsigned long flags;
+ u8 cssid;
+ u8 ssid;
+ u8 chpid;
+ struct dasd_conf_data *conf_data;
+ atomic_t error_count;
+ unsigned long long errorclk;
};
+
struct dasd_profile_info {
/* legacy part of profile data, as in dasd_profile_info_t */
unsigned int dasd_io_reqs; /* number of requests processed */
@@ -458,7 +477,8 @@ struct dasd_device {
struct dasd_discipline *discipline;
struct dasd_discipline *base_discipline;
void *private;
- struct dasd_path path_data;
+ struct dasd_path path[8];
+ __u8 opm;
/* Device state and target state. */
int state, target;
@@ -483,6 +503,7 @@ struct dasd_device {
struct work_struct reload_device;
struct work_struct kick_validate;
struct work_struct suc_work;
+ struct work_struct requeue_requests;
struct timer_list timer;
debug_info_t *debug_area;
@@ -498,6 +519,9 @@ struct dasd_device {
unsigned long blk_timeout;
+ unsigned long path_thrhld;
+ unsigned long path_interval;
+
struct dentry *debugfs_dentry;
struct dentry *hosts_dentry;
struct dasd_profile profile;
@@ -707,6 +731,7 @@ void dasd_set_target_state(struct dasd_device *, int);
void dasd_kick_device(struct dasd_device *);
void dasd_restore_device(struct dasd_device *);
void dasd_reload_device(struct dasd_device *);
+void dasd_schedule_requeue(struct dasd_device *);
void dasd_add_request_head(struct dasd_ccw_req *);
void dasd_add_request_tail(struct dasd_ccw_req *);
@@ -835,4 +860,410 @@ static inline int dasd_eer_enabled(struct dasd_device *device)
#define dasd_eer_enabled(d) (0)
#endif /* CONFIG_DASD_ERR */
+
+/* DASD path handling functions */
+
+/*
+ * helper functions to modify bit masks for a given channel path for a device
+ */
+static inline int dasd_path_is_operational(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
+}
+
+static inline int dasd_path_need_verify(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_TBV, &device->path[chp].flags);
+}
+
+static inline void dasd_path_verify(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_TBV, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_verify(struct dasd_device *device, int chp)
+{
+ __clear_bit(DASD_PATH_TBV, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_all_verify(struct dasd_device *device)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ dasd_path_clear_verify(device, chp);
+}
+
+static inline void dasd_path_operational(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
+ device->opm |= (0x80 >> chp);
+}
+
+static inline void dasd_path_nonpreferred(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_NPP, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_nonpreferred(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_NPP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_nonpreferred(struct dasd_device *device,
+ int chp)
+{
+ __clear_bit(DASD_PATH_NPP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_preferred(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_PP, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_preferred(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_PP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_preferred(struct dasd_device *device,
+ int chp)
+{
+ __clear_bit(DASD_PATH_PP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_oper(struct dasd_device *device, int chp)
+{
+ __clear_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
+ device->opm &= ~(0x80 >> chp);
+}
+
+static inline void dasd_path_clear_cable(struct dasd_device *device, int chp)
+{
+ __clear_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
+}
+
+static inline void dasd_path_cuir(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_CUIR, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_cuir(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_CUIR, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_cuir(struct dasd_device *device, int chp)
+{
+ __clear_bit(DASD_PATH_CUIR, &device->path[chp].flags);
+}
+
+static inline void dasd_path_ifcc(struct dasd_device *device, int chp)
+{
+ set_bit(DASD_PATH_IFCC, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_ifcc(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_IFCC, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_ifcc(struct dasd_device *device, int chp)
+{
+ clear_bit(DASD_PATH_IFCC, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_nohpf(struct dasd_device *device, int chp)
+{
+ __clear_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
+}
+
+static inline void dasd_path_miscabled(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_miscabled(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
+}
+
+static inline void dasd_path_nohpf(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_nohpf(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
+}
+
+/*
+ * get functions for path masks
+ * will return a path masks for the given device
+ */
+
+static inline __u8 dasd_path_get_opm(struct dasd_device *device)
+{
+ return device->opm;
+}
+
+static inline __u8 dasd_path_get_tbvpm(struct dasd_device *device)
+{
+ int chp;
+ __u8 tbvpm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_need_verify(device, chp))
+ tbvpm |= 0x80 >> chp;
+ return tbvpm;
+}
+
+static inline __u8 dasd_path_get_nppm(struct dasd_device *device)
+{
+ int chp;
+ __u8 npm = 0x00;
+
+ for (chp = 0; chp < 8; chp++) {
+ if (dasd_path_is_nonpreferred(device, chp))
+ npm |= 0x80 >> chp;
+ }
+ return npm;
+}
+
+static inline __u8 dasd_path_get_ppm(struct dasd_device *device)
+{
+ int chp;
+ __u8 ppm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_is_preferred(device, chp))
+ ppm |= 0x80 >> chp;
+ return ppm;
+}
+
+static inline __u8 dasd_path_get_cablepm(struct dasd_device *device)
+{
+ int chp;
+ __u8 cablepm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_is_miscabled(device, chp))
+ cablepm |= 0x80 >> chp;
+ return cablepm;
+}
+
+static inline __u8 dasd_path_get_cuirpm(struct dasd_device *device)
+{
+ int chp;
+ __u8 cuirpm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_is_cuir(device, chp))
+ cuirpm |= 0x80 >> chp;
+ return cuirpm;
+}
+
+static inline __u8 dasd_path_get_ifccpm(struct dasd_device *device)
+{
+ int chp;
+ __u8 ifccpm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_is_ifcc(device, chp))
+ ifccpm |= 0x80 >> chp;
+ return ifccpm;
+}
+
+static inline __u8 dasd_path_get_hpfpm(struct dasd_device *device)
+{
+ int chp;
+ __u8 hpfpm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_is_nohpf(device, chp))
+ hpfpm |= 0x80 >> chp;
+ return hpfpm;
+}
+
+/*
+ * add functions for path masks
+ * the existing path mask will be extended by the given path mask
+ */
+static inline void dasd_path_add_tbvpm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_verify(device, chp);
+}
+
+static inline __u8 dasd_path_get_notoperpm(struct dasd_device *device)
+{
+ int chp;
+ __u8 nopm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_is_nohpf(device, chp) ||
+ dasd_path_is_ifcc(device, chp) ||
+ dasd_path_is_cuir(device, chp) ||
+ dasd_path_is_miscabled(device, chp))
+ nopm |= 0x80 >> chp;
+ return nopm;
+}
+
+static inline void dasd_path_add_opm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp)) {
+ dasd_path_operational(device, chp);
+ /*
+ * if the path is used
+ * it should not be in one of the negative lists
+ */
+ dasd_path_clear_nohpf(device, chp);
+ dasd_path_clear_cuir(device, chp);
+ dasd_path_clear_cable(device, chp);
+ dasd_path_clear_ifcc(device, chp);
+ }
+}
+
+static inline void dasd_path_add_cablepm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_miscabled(device, chp);
+}
+
+static inline void dasd_path_add_cuirpm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_cuir(device, chp);
+}
+
+static inline void dasd_path_add_ifccpm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_ifcc(device, chp);
+}
+
+static inline void dasd_path_add_nppm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_nonpreferred(device, chp);
+}
+
+static inline void dasd_path_add_nohpfpm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_nohpf(device, chp);
+}
+
+static inline void dasd_path_add_ppm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_preferred(device, chp);
+}
+
+/*
+ * set functions for path masks
+ * the existing path mask will be replaced by the given path mask
+ */
+static inline void dasd_path_set_tbvpm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_verify(device, chp);
+ else
+ dasd_path_clear_verify(device, chp);
+}
+
+static inline void dasd_path_set_opm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++) {
+ dasd_path_clear_oper(device, chp);
+ if (pm & (0x80 >> chp)) {
+ dasd_path_operational(device, chp);
+ /*
+ * if the path is used
+ * it should not be in one of the negative lists
+ */
+ dasd_path_clear_nohpf(device, chp);
+ dasd_path_clear_cuir(device, chp);
+ dasd_path_clear_cable(device, chp);
+ dasd_path_clear_ifcc(device, chp);
+ }
+ }
+}
+
+/*
+ * remove functions for path masks
+ * the existing path mask will be cleared with the given path mask
+ */
+static inline void dasd_path_remove_opm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++) {
+ if (pm & (0x80 >> chp))
+ dasd_path_clear_oper(device, chp);
+ }
+}
+
+/*
+ * add the newly available path to the to be verified pm and remove it from
+ * normal operation until it is verified
+ */
+static inline void dasd_path_available(struct dasd_device *device, int chp)
+{
+ dasd_path_clear_oper(device, chp);
+ dasd_path_verify(device, chp);
+}
+
+static inline void dasd_path_notoper(struct dasd_device *device, int chp)
+{
+ dasd_path_clear_oper(device, chp);
+ dasd_path_clear_preferred(device, chp);
+ dasd_path_clear_nonpreferred(device, chp);
+}
+
+/*
+ * remove all paths from normal operation
+ */
+static inline void dasd_path_no_path(struct dasd_device *device)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ dasd_path_notoper(device, chp);
+
+ dasd_path_clear_all_verify(device);
+}
+
+/* end - path handling */
+
#endif /* DASD_H */
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 931d10e86837..1b8d825623bd 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -9,7 +9,6 @@
* Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
*/
-#include <linux/module.h>
#include <linux/types.h>
#include <linux/kdev_t.h>
#include <linux/tty.h>
@@ -1215,13 +1214,4 @@ static int __init tty3215_init(void)
tty3215_driver = driver;
return 0;
}
-
-static void __exit tty3215_exit(void)
-{
- tty_unregister_driver(tty3215_driver);
- put_tty_driver(tty3215_driver);
- ccw_driver_unregister(&raw3215_ccw_driver);
-}
-
-module_init(tty3215_init);
-module_exit(tty3215_exit);
+device_initcall(tty3215_init);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 7a10c56334bb..e1fc7eb043d6 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -59,6 +59,7 @@
typedef unsigned int sclp_cmdw_t;
+#define SCLP_CMDW_READ_CPU_INFO 0x00010001
#define SCLP_CMDW_READ_EVENT_DATA 0x00770005
#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005
#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005
@@ -102,6 +103,28 @@ struct init_sccb {
sccb_mask_t sclp_send_mask;
} __attribute__((packed));
+struct read_cpu_info_sccb {
+ struct sccb_header header;
+ u16 nr_configured;
+ u16 offset_configured;
+ u16 nr_standby;
+ u16 offset_standby;
+ u8 reserved[4096 - 16];
+} __attribute__((packed, aligned(PAGE_SIZE)));
+
+static inline void sclp_fill_core_info(struct sclp_core_info *info,
+ struct read_cpu_info_sccb *sccb)
+{
+ char *page = (char *) sccb;
+
+ memset(info, 0, sizeof(*info));
+ info->configured = sccb->nr_configured;
+ info->standby = sccb->nr_standby;
+ info->combined = sccb->nr_configured + sccb->nr_standby;
+ memcpy(&info->core, page + sccb->offset_configured,
+ info->combined * sizeof(struct sclp_core_entry));
+}
+
#define SCLP_HAS_CHP_INFO (sclp.facilities & 0x8000000000000000ULL)
#define SCLP_HAS_CHP_RECONFIG (sclp.facilities & 0x2000000000000000ULL)
#define SCLP_HAS_CPU_INFO (sclp.facilities & 0x0800000000000000ULL)
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index e3fc7539116b..b9c5522b8a68 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -80,33 +80,10 @@ out:
* CPU configuration related functions.
*/
-#define SCLP_CMDW_READ_CPU_INFO 0x00010001
#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
-struct read_cpu_info_sccb {
- struct sccb_header header;
- u16 nr_configured;
- u16 offset_configured;
- u16 nr_standby;
- u16 offset_standby;
- u8 reserved[4096 - 16];
-} __attribute__((packed, aligned(PAGE_SIZE)));
-
-static void sclp_fill_core_info(struct sclp_core_info *info,
- struct read_cpu_info_sccb *sccb)
-{
- char *page = (char *) sccb;
-
- memset(info, 0, sizeof(*info));
- info->configured = sccb->nr_configured;
- info->standby = sccb->nr_standby;
- info->combined = sccb->nr_configured + sccb->nr_standby;
- memcpy(&info->core, page + sccb->offset_configured,
- info->combined * sizeof(struct sclp_core_entry));
-}
-
-int sclp_get_core_info(struct sclp_core_info *info)
+int _sclp_get_core_info(struct sclp_core_info *info)
{
int rc;
struct read_cpu_info_sccb *sccb;
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
index 554eaa1e347d..78a7e4f94721 100644
--- a/drivers/s390/char/sclp_ctl.c
+++ b/drivers/s390/char/sclp_ctl.c
@@ -10,7 +10,7 @@
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/gfp.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/fs.h>
#include <asm/compat.h>
@@ -126,4 +126,4 @@ static struct miscdevice sclp_ctl_device = {
.name = "sclp",
.fops = &sclp_ctl_fops,
};
-module_misc_device(sclp_ctl_device);
+builtin_misc_device(sclp_ctl_device);
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index c71df0c7dedc..f8e46c22e641 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -221,6 +221,36 @@ static int __init sclp_set_event_mask(struct init_sccb *sccb,
return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
}
+static struct sclp_core_info sclp_core_info_early __initdata;
+static int sclp_core_info_early_valid __initdata;
+
+static void __init sclp_init_core_info_early(struct read_cpu_info_sccb *sccb)
+{
+ int rc;
+
+ if (!SCLP_HAS_CPU_INFO)
+ return;
+ memset(sccb, 0, sizeof(*sccb));
+ sccb->header.length = sizeof(*sccb);
+ do {
+ rc = sclp_cmd_sync_early(SCLP_CMDW_READ_CPU_INFO, sccb);
+ } while (rc == -EBUSY);
+ if (rc)
+ return;
+ if (sccb->header.response_code != 0x0010)
+ return;
+ sclp_fill_core_info(&sclp_core_info_early, sccb);
+ sclp_core_info_early_valid = 1;
+}
+
+int __init _sclp_get_core_info_early(struct sclp_core_info *info)
+{
+ if (!sclp_core_info_early_valid)
+ return -EIO;
+ *info = sclp_core_info_early;
+ return 0;
+}
+
static long __init sclp_hsa_size_init(struct sdias_sccb *sccb)
{
sccb_init_eq_size(sccb);
@@ -293,6 +323,7 @@ void __init sclp_early_detect(void)
void *sccb = &sccb_early;
sclp_facilities_detect(sccb);
+ sclp_init_core_info_early(sccb);
sclp_hsa_size_detect(sccb);
/* Turn off SCLP event notifications. Also save remote masks in the
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 475e470d9768..e4958511168a 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -6,7 +6,6 @@
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
-#include <linux/module.h>
#include <linux/types.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
@@ -80,5 +79,4 @@ static int __init sclp_quiesce_init(void)
{
return sclp_register(&sclp_quiesce_event);
}
-
-module_init(sclp_quiesce_init);
+device_initcall(sclp_quiesce_init);
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 3c6e174e19b6..9259017a1295 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -7,7 +7,6 @@
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
-#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -573,4 +572,4 @@ sclp_tty_init(void)
sclp_tty_driver = driver;
return 0;
}
-module_init(sclp_tty_init);
+device_initcall(sclp_tty_init);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index e883063c7258..3167e8581994 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -870,7 +870,7 @@ static int __init vmlogrdr_init(void)
goto cleanup;
for (i=0; i < MAXMINOR; ++i ) {
- sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
+ sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sys_ser[i].buffer) {
rc = -ENOMEM;
break;
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 16992e2a40ad..f771e5e9e26b 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -7,6 +7,7 @@
*
* Copyright IBM Corp. 2003, 2008
* Author(s): Michael Holzheu
+ * License: GPL
*/
#define KMSG_COMPONENT "zdump"
@@ -16,7 +17,6 @@
#include <linux/slab.h>
#include <linux/miscdevice.h>
#include <linux/debugfs.h>
-#include <linux/module.h>
#include <linux/memblock.h>
#include <asm/asm-offsets.h>
@@ -320,7 +320,7 @@ static int __init zcore_init(void)
goto fail;
}
- pr_alert("DETECTED 'S390X (64 bit) OS'\n");
+ pr_alert("The dump process started for a 64-bit operating system\n");
rc = init_cpu_info();
if (rc)
goto fail;
@@ -364,22 +364,4 @@ fail:
diag308(DIAG308_REL_HSA, NULL);
return rc;
}
-
-static void __exit zcore_exit(void)
-{
- debug_unregister(zcore_dbf);
- sclp_sdias_exit();
- free_page((unsigned long) ipl_block);
- debugfs_remove(zcore_hsa_file);
- debugfs_remove(zcore_reipl_file);
- debugfs_remove(zcore_memmap_file);
- debugfs_remove(zcore_dir);
- diag308(DIAG308_REL_HSA, NULL);
-}
-
-MODULE_AUTHOR("Copyright IBM Corp. 2003,2008");
-MODULE_DESCRIPTION("zcore module for zfcpdump support");
-MODULE_LICENSE("GPL");
-
subsys_initcall(zcore_init);
-module_exit(zcore_exit);
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 268aa23afa01..6b6386e9a500 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -30,7 +30,7 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/list.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/timex.h> /* get_tod_clock() */
@@ -1389,13 +1389,7 @@ static int __init init_cmf(void)
"%s (mode %s)\n", format_string, detect_string);
return 0;
}
-module_init(init_cmf);
-
-
-MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("channel measurement facility base driver\n"
- "Copyright IBM Corp. 2003\n");
+device_initcall(init_cmf);
EXPORT_SYMBOL_GPL(enable_cmf);
EXPORT_SYMBOL_GPL(disable_cmf);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 3d2b20ee613f..bc099b61394d 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -5,12 +5,14 @@
*
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
+ *
+ * License: GPL
*/
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
@@ -1285,5 +1287,3 @@ void css_driver_unregister(struct css_driver *cdrv)
driver_unregister(&cdrv->drv);
}
EXPORT_SYMBOL_GPL(css_driver_unregister);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 6a58bc8f46e2..79823ee9c100 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -5,12 +5,14 @@
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * License: GPL
*/
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
@@ -145,7 +147,6 @@ static struct css_device_id io_subchannel_ids[] = {
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
{ /* end of list */ },
};
-MODULE_DEVICE_TABLE(css, io_subchannel_ids);
static int io_subchannel_prepare(struct subchannel *sch)
{
@@ -2150,7 +2151,6 @@ int ccw_device_siosl(struct ccw_device *cdev)
}
EXPORT_SYMBOL_GPL(ccw_device_siosl);
-MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_online);
EXPORT_SYMBOL(ccw_device_set_offline);
EXPORT_SYMBOL(ccw_driver_register);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 065b1be98e2c..ec497af99dd8 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -13,7 +13,6 @@
*/
enum dev_state {
DEV_STATE_NOT_OPER,
- DEV_STATE_SENSE_PGID,
DEV_STATE_SENSE_ID,
DEV_STATE_OFFLINE,
DEV_STATE_VERIFY,
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 8327d47e08b6..9afb5ce13007 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -1058,12 +1058,6 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
- [DEV_STATE_SENSE_PGID] = {
- [DEV_EVENT_NOTOPER] = ccw_device_request_event,
- [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
- [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
- [DEV_EVENT_VERIFY] = ccw_device_nop,
- },
[DEV_STATE_SENSE_ID] = {
[DEV_EVENT_NOTOPER] = ccw_device_request_event,
[DEV_EVENT_INTERRUPT] = ccw_device_request_event,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 877d9f601e63..cf8c4ac6323a 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -3,8 +3,10 @@
*
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
+ *
+ * License: GPL
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
@@ -676,7 +678,6 @@ void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
}
EXPORT_SYMBOL_GPL(ccw_device_get_schid);
-MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_options_mask);
EXPORT_SYMBOL(ccw_device_set_options);
EXPORT_SYMBOL(ccw_device_clear_options);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ed92fb09fc8e..f407b4f9d0ba 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1273,7 +1273,7 @@ static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
return retval;
}
-static int ap_dev_suspend(struct device *dev, pm_message_t state)
+static int ap_dev_suspend(struct device *dev)
{
struct ap_device *ap_dev = to_ap_dev(dev);
@@ -1287,11 +1287,6 @@ static int ap_dev_suspend(struct device *dev, pm_message_t state)
return 0;
}
-static int ap_dev_resume(struct device *dev)
-{
- return 0;
-}
-
static void ap_bus_suspend(void)
{
ap_suspend_flag = 1;
@@ -1356,12 +1351,13 @@ static struct notifier_block ap_power_notifier = {
.notifier_call = ap_power_event,
};
+static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, NULL);
+
static struct bus_type ap_bus_type = {
.name = "ap",
.match = &ap_bus_match,
.uevent = &ap_uevent,
- .suspend = ap_dev_suspend,
- .resume = ap_dev_resume,
+ .pm = &ap_bus_pm_ops,
};
void ap_device_init_reply(struct ap_device *ap_dev,
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index c8fed9fa1cca..968a0ab4b398 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -84,8 +84,8 @@ extern void zfcp_fc_link_test_work(struct work_struct *);
extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *);
extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
-extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
-extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
+extern int zfcp_fc_exec_bsg_job(struct bsg_job *);
+extern int zfcp_fc_timeout_bsg_job(struct bsg_job *);
extern void zfcp_fc_sym_name_update(struct work_struct *);
extern unsigned int zfcp_fc_port_scan_backoff(void);
extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 237688af179b..7331eea67435 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/utsname.h>
#include <linux/random.h>
+#include <linux/bsg-lib.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include "zfcp_ext.h"
@@ -885,26 +886,30 @@ out_free:
static void zfcp_fc_ct_els_job_handler(void *data)
{
- struct fc_bsg_job *job = data;
+ struct bsg_job *job = data;
struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
struct fc_bsg_reply *jr = job->reply;
jr->reply_payload_rcv_len = job->reply_payload.payload_len;
jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
jr->result = zfcp_ct_els->status ? -EIO : 0;
- job->job_done(job);
+ bsg_job_done(job, jr->result, jr->reply_payload_rcv_len);
}
-static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
+static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct bsg_job *job)
{
u32 preamble_word1;
u8 gs_type;
struct zfcp_adapter *adapter;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_rport *rport = fc_bsg_to_rport(job);
+ struct Scsi_Host *shost;
- preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
+ preamble_word1 = bsg_request->rqst_data.r_ct.preamble_word1;
gs_type = (preamble_word1 & 0xff000000) >> 24;
- adapter = (struct zfcp_adapter *) job->shost->hostdata[0];
+ shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job);
+ adapter = (struct zfcp_adapter *) shost->hostdata[0];
switch (gs_type) {
case FC_FST_ALIAS:
@@ -924,7 +929,7 @@ static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
static void zfcp_fc_ct_job_handler(void *data)
{
- struct fc_bsg_job *job = data;
+ struct bsg_job *job = data;
struct zfcp_fc_wka_port *wka_port;
wka_port = zfcp_fc_job_wka_port(job);
@@ -933,11 +938,12 @@ static void zfcp_fc_ct_job_handler(void *data)
zfcp_fc_ct_els_job_handler(data);
}
-static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
+static int zfcp_fc_exec_els_job(struct bsg_job *job,
struct zfcp_adapter *adapter)
{
struct zfcp_fsf_ct_els *els = job->dd_data;
- struct fc_rport *rport = job->rport;
+ struct fc_rport *rport = fc_bsg_to_rport(job);
+ struct fc_bsg_request *bsg_request = job->request;
struct zfcp_port *port;
u32 d_id;
@@ -949,13 +955,13 @@ static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
d_id = port->d_id;
put_device(&port->dev);
} else
- d_id = ntoh24(job->request->rqst_data.h_els.port_id);
+ d_id = ntoh24(bsg_request->rqst_data.h_els.port_id);
els->handler = zfcp_fc_ct_els_job_handler;
return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ);
}
-static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
+static int zfcp_fc_exec_ct_job(struct bsg_job *job,
struct zfcp_adapter *adapter)
{
int ret;
@@ -978,13 +984,15 @@ static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
return ret;
}
-int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
+int zfcp_fc_exec_bsg_job(struct bsg_job *job)
{
struct Scsi_Host *shost;
struct zfcp_adapter *adapter;
struct zfcp_fsf_ct_els *ct_els = job->dd_data;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_rport *rport = fc_bsg_to_rport(job);
- shost = job->rport ? rport_to_shost(job->rport) : job->shost;
+ shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job);
adapter = (struct zfcp_adapter *)shost->hostdata[0];
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
@@ -994,7 +1002,7 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
ct_els->resp = job->reply_payload.sg_list;
ct_els->handler_data = job;
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
case FC_BSG_HST_ELS_NOLOGIN:
return zfcp_fc_exec_els_job(job, adapter);
@@ -1006,7 +1014,7 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
}
}
-int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job)
+int zfcp_fc_timeout_bsg_job(struct bsg_job *job)
{
/* hardware tracks timeout, reset bsg timeout to not interfere */
return -EAGAIN;
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 8688ad4c825f..639ed4e6afd1 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -24,7 +24,7 @@
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/bitops.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/io.h>
#include <linux/kvm_para.h>
#include <linux/notifier.h>
@@ -235,16 +235,6 @@ static struct airq_info *new_airq_info(void)
return info;
}
-static void destroy_airq_info(struct airq_info *info)
-{
- if (!info)
- return;
-
- unregister_adapter_interrupt(&info->airq);
- airq_iv_release(info->aiv);
- kfree(info);
-}
-
static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
u64 *first, void **airq_info)
{
@@ -1294,7 +1284,6 @@ static struct ccw_device_id virtio_ids[] = {
{ CCW_DEVICE(0x3832, 0) },
{},
};
-MODULE_DEVICE_TABLE(ccw, virtio_ids);
static struct ccw_driver virtio_ccw_driver = {
.driver = {
@@ -1406,14 +1395,4 @@ static int __init virtio_ccw_init(void)
no_auto_parse();
return ccw_driver_register(&virtio_ccw_driver);
}
-module_init(virtio_ccw_init);
-
-static void __exit virtio_ccw_exit(void)
-{
- int i;
-
- ccw_driver_unregister(&virtio_ccw_driver);
- for (i = 0; i < MAX_AIRQ_AREAS; i++)
- destroy_airq_info(airq_areas[i]);
-}
-module_exit(virtio_ccw_exit);
+device_initcall(virtio_ccw_init);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3e2bdb90813c..dfa93347c752 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -263,6 +263,7 @@ config SCSI_SPI_ATTRS
config SCSI_FC_ATTRS
tristate "FiberChannel Transport Attributes"
depends on SCSI && NET
+ select BLK_DEV_BSGLIB
select SCSI_NETLINK
help
If you wish to export transport-specific information about
@@ -743,40 +744,18 @@ config SCSI_ISCI
control unit found in the Intel(R) C600 series chipset.
config SCSI_GENERIC_NCR5380
- tristate "Generic NCR5380/53c400 SCSI PIO support"
- depends on ISA && SCSI
+ tristate "Generic NCR5380/53c400 SCSI ISA card support"
+ depends on ISA && SCSI && HAS_IOPORT_MAP
select SCSI_SPI_ATTRS
---help---
- This is a driver for the old NCR 53c80 series of SCSI controllers
- on boards using PIO. Most boards such as the Trantor T130 fit this
- category, along with a large number of ISA 8bit controllers shipped
- for free with SCSI scanners. If you have a PAS16, T128 or DMX3191
- you should select the specific driver for that card rather than
- generic 5380 support.
-
- It is explained in section 3.8 of the SCSI-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. If it doesn't work out
- of the box, you may have to change some settings in
- <file:drivers/scsi/g_NCR5380.h>.
+ This is a driver for old ISA card SCSI controllers based on a
+ NCR 5380, 53C80, 53C400, 53C400A, or DTC 436 device.
+ Most boards such as the Trantor T130 fit this category, as do
+ various 8-bit and 16-bit ISA cards bundled with SCSI scanners.
To compile this driver as a module, choose M here: the
module will be called g_NCR5380.
-config SCSI_GENERIC_NCR5380_MMIO
- tristate "Generic NCR5380/53c400 SCSI MMIO support"
- depends on ISA && SCSI
- select SCSI_SPI_ATTRS
- ---help---
- This is a driver for the old NCR 53c80 series of SCSI controllers
- on boards using memory mapped I/O.
- It is explained in section 3.8 of the SCSI-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. If it doesn't work out
- of the box, you may have to change some settings in
- <file:drivers/scsi/g_NCR5380.h>.
-
- To compile this driver as a module, choose M here: the
- module will be called g_NCR5380_mmio.
-
config SCSI_IPS
tristate "IBM ServeRAID support"
depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1520596f54a6..a2d03957cbe2 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -74,7 +74,6 @@ obj-$(CONFIG_SCSI_ISCI) += isci/
obj-$(CONFIG_SCSI_IPS) += ips.o
obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
-obj-$(CONFIG_SCSI_GENERIC_NCR5380_MMIO) += g_NCR5380_mmio.o
obj-$(CONFIG_SCSI_NCR53C406A) += NCR53c406a.o
obj-$(CONFIG_SCSI_NCR_D700) += 53c700.o NCR_D700.o
obj-$(CONFIG_SCSI_NCR_Q720) += NCR_Q720_mod.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 790babc5ef66..d849ffa378b1 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -121,9 +121,10 @@
*
* Either real DMA *or* pseudo DMA may be implemented
*
- * NCR5380_dma_write_setup(instance, src, count) - initialize
- * NCR5380_dma_read_setup(instance, dst, count) - initialize
- * NCR5380_dma_residual(instance); - residual count
+ * NCR5380_dma_xfer_len - determine size of DMA/PDMA transfer
+ * NCR5380_dma_send_setup - execute DMA/PDMA from memory to 5380
+ * NCR5380_dma_recv_setup - execute DMA/PDMA from 5380 to memory
+ * NCR5380_dma_residual - residual byte count
*
* The generic driver is initialized by calling NCR5380_init(instance),
* after setting the appropriate host specific fields and ID. If the
@@ -178,7 +179,7 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
/**
* NCR5380_poll_politely2 - wait for two chip register values
- * @instance: controller to poll
+ * @hostdata: host private data
* @reg1: 5380 register to poll
* @bit1: Bitmask to check
* @val1: Expected value
@@ -195,18 +196,14 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
* Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT.
*/
-static int NCR5380_poll_politely2(struct Scsi_Host *instance,
- int reg1, int bit1, int val1,
- int reg2, int bit2, int val2, int wait)
+static int NCR5380_poll_politely2(struct NCR5380_hostdata *hostdata,
+ unsigned int reg1, u8 bit1, u8 val1,
+ unsigned int reg2, u8 bit2, u8 val2,
+ unsigned long wait)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ unsigned long n = hostdata->poll_loops;
unsigned long deadline = jiffies + wait;
- unsigned long n;
- /* Busy-wait for up to 10 ms */
- n = min(10000U, jiffies_to_usecs(wait));
- n *= hostdata->accesses_per_ms;
- n /= 2000;
do {
if ((NCR5380_read(reg1) & bit1) == val1)
return 0;
@@ -288,6 +285,7 @@ mrs[] = {
static void NCR5380_print(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char status, data, basr, mr, icr, i;
data = NCR5380_read(CURRENT_SCSI_DATA_REG);
@@ -337,6 +335,7 @@ static struct {
static void NCR5380_print_phase(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char status;
int i;
@@ -441,14 +440,14 @@ static void prepare_info(struct Scsi_Host *instance)
struct NCR5380_hostdata *hostdata = shost_priv(instance);
snprintf(hostdata->info, sizeof(hostdata->info),
- "%s, io_port 0x%lx, n_io_port %d, "
- "base 0x%lx, irq %d, "
+ "%s, irq %d, "
+ "io_port 0x%lx, base 0x%lx, "
"can_queue %d, cmd_per_lun %d, "
"sg_tablesize %d, this_id %d, "
"flags { %s%s%s}, "
"options { %s} ",
- instance->hostt->name, instance->io_port, instance->n_io_port,
- instance->base, instance->irq,
+ instance->hostt->name, instance->irq,
+ hostdata->io_port, hostdata->base,
instance->can_queue, instance->cmd_per_lun,
instance->sg_tablesize, instance->this_id,
hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "",
@@ -482,6 +481,7 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
struct NCR5380_hostdata *hostdata = shost_priv(instance);
int i;
unsigned long deadline;
+ unsigned long accesses_per_ms;
instance->max_lun = 7;
@@ -530,7 +530,8 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
++i;
cpu_relax();
} while (time_is_after_jiffies(deadline));
- hostdata->accesses_per_ms = i / 256;
+ accesses_per_ms = i / 256;
+ hostdata->poll_loops = NCR5380_REG_POLL_TIME * accesses_per_ms / 2;
return 0;
}
@@ -560,7 +561,7 @@ static int NCR5380_maybe_reset_bus(struct Scsi_Host *instance)
case 3:
case 5:
shost_printk(KERN_ERR, instance, "SCSI bus busy, waiting up to five seconds\n");
- NCR5380_poll_politely(instance,
+ NCR5380_poll_politely(hostdata,
STATUS_REG, SR_BSY, 0, 5 * HZ);
break;
case 2:
@@ -871,7 +872,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
NCR5380_read(RESET_PARITY_INTERRUPT_REG);
- transferred = hostdata->dma_len - NCR5380_dma_residual(instance);
+ transferred = hostdata->dma_len - NCR5380_dma_residual(hostdata);
hostdata->dma_len = 0;
data = (unsigned char **)&hostdata->connected->SCp.ptr;
@@ -994,7 +995,7 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
}
handled = 1;
} else {
- shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n");
+ dsprintk(NDEBUG_INTR, instance, "interrupt without IRQ bit\n");
#ifdef SUN3_SCSI_VME
dregs->csr |= CSR_DMA_ENABLE;
#endif
@@ -1075,7 +1076,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
*/
spin_unlock_irq(&hostdata->lock);
- err = NCR5380_poll_politely2(instance, MODE_REG, MR_ARBITRATE, 0,
+ err = NCR5380_poll_politely2(hostdata, MODE_REG, MR_ARBITRATE, 0,
INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS,
ICR_ARBITRATION_PROGRESS, HZ);
spin_lock_irq(&hostdata->lock);
@@ -1201,7 +1202,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
* selection.
*/
- err = NCR5380_poll_politely(instance, STATUS_REG, SR_BSY, SR_BSY,
+ err = NCR5380_poll_politely(hostdata, STATUS_REG, SR_BSY, SR_BSY,
msecs_to_jiffies(250));
if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
@@ -1247,7 +1248,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
/* Wait for start of REQ/ACK handshake */
- err = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ);
+ err = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ);
spin_lock_irq(&hostdata->lock);
if (err < 0) {
shost_printk(KERN_ERR, instance, "select: REQ timeout\n");
@@ -1318,6 +1319,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
unsigned char *phase, int *count,
unsigned char **data)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char p = *phase, tmp;
int c = *count;
unsigned char *d = *data;
@@ -1336,7 +1338,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
* valid
*/
- if (NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0)
+ if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0)
break;
dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n");
@@ -1381,7 +1383,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
}
- if (NCR5380_poll_politely(instance,
+ if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_REQ, 0, 5 * HZ) < 0)
break;
@@ -1440,6 +1442,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
static void do_reset(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata __maybe_unused *hostdata = shost_priv(instance);
unsigned long flags;
local_irq_save(flags);
@@ -1462,6 +1465,7 @@ static void do_reset(struct Scsi_Host *instance)
static int do_abort(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char *msgptr, phase, tmp;
int len;
int rc;
@@ -1479,7 +1483,7 @@ static int do_abort(struct Scsi_Host *instance)
* the target sees, so we just handshake.
*/
- rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
+ rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
if (rc < 0)
goto timeout;
@@ -1490,7 +1494,7 @@ static int do_abort(struct Scsi_Host *instance)
if (tmp != PHASE_MSGOUT) {
NCR5380_write(INITIATOR_COMMAND_REG,
ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
- rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 3 * HZ);
+ rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 3 * HZ);
if (rc < 0)
goto timeout;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
@@ -1575,9 +1579,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* starting the NCR. This is also the cleaner way for the TT.
*/
if (p & SR_IO)
- result = NCR5380_dma_recv_setup(instance, d, c);
+ result = NCR5380_dma_recv_setup(hostdata, d, c);
else
- result = NCR5380_dma_send_setup(instance, d, c);
+ result = NCR5380_dma_send_setup(hostdata, d, c);
}
/*
@@ -1609,9 +1613,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* NCR access, else the DMA setup gets trashed!
*/
if (p & SR_IO)
- result = NCR5380_dma_recv_setup(instance, d, c);
+ result = NCR5380_dma_recv_setup(hostdata, d, c);
else
- result = NCR5380_dma_send_setup(instance, d, c);
+ result = NCR5380_dma_send_setup(hostdata, d, c);
}
/* On failure, NCR5380_dma_xxxx_setup() returns a negative int. */
@@ -1678,12 +1682,12 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* byte.
*/
- if (NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+ if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ, BASR_DRQ, HZ) < 0) {
result = -1;
shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n");
}
- if (NCR5380_poll_politely(instance, STATUS_REG,
+ if (NCR5380_poll_politely(hostdata, STATUS_REG,
SR_REQ, 0, HZ) < 0) {
result = -1;
shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n");
@@ -1694,7 +1698,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* Wait for the last byte to be sent. If REQ is being asserted for
* the byte we're interested, we'll ACK it and it will go false.
*/
- if (NCR5380_poll_politely2(instance,
+ if (NCR5380_poll_politely2(hostdata,
BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ,
BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, HZ) < 0) {
result = -1;
@@ -1751,22 +1755,26 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
}
#ifdef CONFIG_SUN3
- if (phase == PHASE_CMDOUT) {
- void *d;
- unsigned long count;
+ if (phase == PHASE_CMDOUT &&
+ sun3_dma_setup_done != cmd) {
+ int count;
if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
- count = cmd->SCp.buffer->length;
- d = sg_virt(cmd->SCp.buffer);
- } else {
- count = cmd->SCp.this_residual;
- d = cmd->SCp.ptr;
+ ++cmd->SCp.buffer;
+ --cmd->SCp.buffers_residual;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
}
- if (sun3_dma_setup_done != cmd &&
- sun3scsi_dma_xfer_len(count, cmd) > 0) {
- sun3scsi_dma_setup(instance, d, count,
- rq_data_dir(cmd->request));
+ count = sun3scsi_dma_xfer_len(hostdata, cmd);
+
+ if (count > 0) {
+ if (rq_data_dir(cmd->request))
+ sun3scsi_dma_send_setup(hostdata,
+ cmd->SCp.ptr, count);
+ else
+ sun3scsi_dma_recv_setup(hostdata,
+ cmd->SCp.ptr, count);
sun3_dma_setup_done = cmd;
}
#ifdef SUN3_SCSI_VME
@@ -1827,7 +1835,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
transfersize = 0;
if (!cmd->device->borken)
- transfersize = NCR5380_dma_xfer_len(instance, cmd, phase);
+ transfersize = NCR5380_dma_xfer_len(hostdata, cmd);
if (transfersize > 0) {
len = transfersize;
@@ -2073,7 +2081,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
} /* switch(phase) */
} else {
spin_unlock_irq(&hostdata->lock);
- NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ);
+ NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ);
spin_lock_irq(&hostdata->lock);
}
}
@@ -2119,7 +2127,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
*/
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
- if (NCR5380_poll_politely(instance,
+ if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
return;
@@ -2130,7 +2138,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
* Wait for target to go into MSGIN.
*/
- if (NCR5380_poll_politely(instance,
+ if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) {
do_abort(instance);
return;
@@ -2204,22 +2212,25 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
}
#ifdef CONFIG_SUN3
- {
- void *d;
- unsigned long count;
+ if (sun3_dma_setup_done != tmp) {
+ int count;
if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) {
- count = tmp->SCp.buffer->length;
- d = sg_virt(tmp->SCp.buffer);
- } else {
- count = tmp->SCp.this_residual;
- d = tmp->SCp.ptr;
+ ++tmp->SCp.buffer;
+ --tmp->SCp.buffers_residual;
+ tmp->SCp.this_residual = tmp->SCp.buffer->length;
+ tmp->SCp.ptr = sg_virt(tmp->SCp.buffer);
}
- if (sun3_dma_setup_done != tmp &&
- sun3scsi_dma_xfer_len(count, tmp) > 0) {
- sun3scsi_dma_setup(instance, d, count,
- rq_data_dir(tmp->request));
+ count = sun3scsi_dma_xfer_len(hostdata, tmp);
+
+ if (count > 0) {
+ if (rq_data_dir(tmp->request))
+ sun3scsi_dma_send_setup(hostdata,
+ tmp->SCp.ptr, count);
+ else
+ sun3scsi_dma_recv_setup(hostdata,
+ tmp->SCp.ptr, count);
sun3_dma_setup_done = tmp;
}
}
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 965d92339455..3c6ce5434449 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -219,27 +219,32 @@
#define FLAG_TOSHIBA_DELAY 128 /* Allow for borken CD-ROMs */
struct NCR5380_hostdata {
- NCR5380_implementation_fields; /* implementation specific */
- struct Scsi_Host *host; /* Host backpointer */
- unsigned char id_mask, id_higher_mask; /* 1 << id, all bits greater */
- unsigned char busy[8]; /* index = target, bit = lun */
- int dma_len; /* requested length of DMA */
- unsigned char last_message; /* last message OUT */
- struct scsi_cmnd *connected; /* currently connected cmnd */
- struct scsi_cmnd *selecting; /* cmnd to be connected */
- struct list_head unissued; /* waiting to be issued */
- struct list_head autosense; /* priority issue queue */
- struct list_head disconnected; /* waiting for reconnect */
- spinlock_t lock; /* protects this struct */
- int flags;
- struct scsi_eh_save ses;
- struct scsi_cmnd *sensing;
+ NCR5380_implementation_fields; /* Board-specific data */
+ u8 __iomem *io; /* Remapped 5380 address */
+ u8 __iomem *pdma_io; /* Remapped PDMA address */
+ unsigned long poll_loops; /* Register polling limit */
+ spinlock_t lock; /* Protects this struct */
+ struct scsi_cmnd *connected; /* Currently connected cmnd */
+ struct list_head disconnected; /* Waiting for reconnect */
+ struct Scsi_Host *host; /* SCSI host backpointer */
+ struct workqueue_struct *work_q; /* SCSI host work queue */
+ struct work_struct main_task; /* Work item for main loop */
+ int flags; /* Board-specific quirks */
+ int dma_len; /* Requested length of DMA */
+ int read_overruns; /* Transfer size reduction for DMA erratum */
+ unsigned long io_port; /* Device IO port */
+ unsigned long base; /* Device base address */
+ struct list_head unissued; /* Waiting to be issued */
+ struct scsi_cmnd *selecting; /* Cmnd to be connected */
+ struct list_head autosense; /* Priority cmnd queue */
+ struct scsi_cmnd *sensing; /* Cmnd needing autosense */
+ struct scsi_eh_save ses; /* Cmnd state saved for EH */
+ unsigned char busy[8]; /* Index = target, bit = lun */
+ unsigned char id_mask; /* 1 << Host ID */
+ unsigned char id_higher_mask; /* All bits above id_mask */
+ unsigned char last_message; /* Last Message Out */
+ unsigned long region_size; /* Size of address/port range */
char info[256];
- int read_overruns; /* number of bytes to cut from a
- * transfer to handle chip overruns */
- struct work_struct main_task;
- struct workqueue_struct *work_q;
- unsigned long accesses_per_ms; /* chip register accesses per ms */
};
#ifdef __KERNEL__
@@ -252,6 +257,9 @@ struct NCR5380_cmd {
#define NCR5380_PIO_CHUNK_SIZE 256
+/* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */
+#define NCR5380_REG_POLL_TIME 15
+
static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
{
return ((struct scsi_cmnd *)ncmd_ptr) - 1;
@@ -294,14 +302,45 @@ static void NCR5380_reselect(struct Scsi_Host *instance);
static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
-static int NCR5380_poll_politely2(struct Scsi_Host *, int, int, int, int, int, int, int);
+static int NCR5380_poll_politely2(struct NCR5380_hostdata *,
+ unsigned int, u8, u8,
+ unsigned int, u8, u8, unsigned long);
-static inline int NCR5380_poll_politely(struct Scsi_Host *instance,
- int reg, int bit, int val, int wait)
+static inline int NCR5380_poll_politely(struct NCR5380_hostdata *hostdata,
+ unsigned int reg, u8 bit, u8 val,
+ unsigned long wait)
{
- return NCR5380_poll_politely2(instance, reg, bit, val,
+ if ((NCR5380_read(reg) & bit) == val)
+ return 0;
+
+ return NCR5380_poll_politely2(hostdata, reg, bit, val,
reg, bit, val, wait);
}
+static int NCR5380_dma_xfer_len(struct NCR5380_hostdata *,
+ struct scsi_cmnd *);
+static int NCR5380_dma_send_setup(struct NCR5380_hostdata *,
+ unsigned char *, int);
+static int NCR5380_dma_recv_setup(struct NCR5380_hostdata *,
+ unsigned char *, int);
+static int NCR5380_dma_residual(struct NCR5380_hostdata *);
+
+static inline int NCR5380_dma_xfer_none(struct NCR5380_hostdata *hostdata,
+ struct scsi_cmnd *cmd)
+{
+ return 0;
+}
+
+static inline int NCR5380_dma_setup_none(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return 0;
+}
+
+static inline int NCR5380_dma_residual_none(struct NCR5380_hostdata *hostdata)
+{
+ return 0;
+}
+
#endif /* __KERNEL__ */
#endif /* NCR5380_H */
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 969c312de1be..f059c14efa0c 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1246,7 +1246,6 @@ struct aac_dev
u32 max_msix; /* max. MSI-X vectors */
u32 vector_cap; /* MSI-X vector capab.*/
int msi_enabled; /* MSI/MSI-X enabled */
- struct msix_entry msixentry[AAC_MAX_MSIX];
struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
u8 adapter_shutdown;
u32 handle_pci_error;
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 341ea327ae79..4f56b1003cc7 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -378,16 +378,12 @@ void aac_define_int_mode(struct aac_dev *dev)
if (msi_count > AAC_MAX_MSIX)
msi_count = AAC_MAX_MSIX;
- for (i = 0; i < msi_count; i++)
- dev->msixentry[i].entry = i;
-
if (msi_count > 1 &&
pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) {
min_msix = 2;
- i = pci_enable_msix_range(dev->pdev,
- dev->msixentry,
- min_msix,
- msi_count);
+ i = pci_alloc_irq_vectors(dev->pdev,
+ min_msix, msi_count,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
if (i > 0) {
dev->msi_enabled = 1;
msi_count = i;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 0aeecec1f5ea..9e7551fe4b19 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2043,30 +2043,22 @@ int aac_acquire_irq(struct aac_dev *dev)
int i;
int j;
int ret = 0;
- int cpu;
- cpu = cpumask_first(cpu_online_mask);
if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
for (i = 0; i < dev->max_msix; i++) {
dev->aac_msix[i].vector_no = i;
dev->aac_msix[i].dev = dev;
- if (request_irq(dev->msixentry[i].vector,
+ if (request_irq(pci_irq_vector(dev->pdev, i),
dev->a_ops.adapter_intr,
0, "aacraid", &(dev->aac_msix[i]))) {
printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
dev->name, dev->id, i);
for (j = 0 ; j < i ; j++)
- free_irq(dev->msixentry[j].vector,
+ free_irq(pci_irq_vector(dev->pdev, j),
&(dev->aac_msix[j]));
pci_disable_msix(dev->pdev);
ret = -1;
}
- if (irq_set_affinity_hint(dev->msixentry[i].vector,
- get_cpu_mask(cpu))) {
- printk(KERN_ERR "%s%d: Failed to set IRQ affinity for cpu %d\n",
- dev->name, dev->id, cpu);
- }
- cpu = cpumask_next(cpu, cpu_online_mask);
}
} else {
dev->aac_msix[0].vector_no = 0;
@@ -2096,16 +2088,9 @@ void aac_free_irq(struct aac_dev *dev)
dev->pdev->device == PMC_DEVICE_S8 ||
dev->pdev->device == PMC_DEVICE_S9) {
if (dev->max_msix > 1) {
- for (i = 0; i < dev->max_msix; i++) {
- if (irq_set_affinity_hint(
- dev->msixentry[i].vector, NULL)) {
- printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
- dev->name, dev->id, cpu);
- }
- cpu = cpumask_next(cpu, cpu_online_mask);
- free_irq(dev->msixentry[i].vector,
- &(dev->aac_msix[i]));
- }
+ for (i = 0; i < dev->max_msix; i++)
+ free_irq(pci_irq_vector(dev->pdev, i),
+ &(dev->aac_msix[i]));
} else {
free_irq(dev->pdev->irq, &(dev->aac_msix[0]));
}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 79871f3519ff..e4f3e22fcbd9 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1071,7 +1071,6 @@ static struct scsi_host_template aac_driver_template = {
static void __aac_shutdown(struct aac_dev * aac)
{
int i;
- int cpu;
aac_send_shutdown(aac);
@@ -1087,24 +1086,13 @@ static void __aac_shutdown(struct aac_dev * aac)
kthread_stop(aac->thread);
}
aac_adapter_disable_int(aac);
- cpu = cpumask_first(cpu_online_mask);
if (aac->pdev->device == PMC_DEVICE_S6 ||
aac->pdev->device == PMC_DEVICE_S7 ||
aac->pdev->device == PMC_DEVICE_S8 ||
aac->pdev->device == PMC_DEVICE_S9) {
if (aac->max_msix > 1) {
for (i = 0; i < aac->max_msix; i++) {
- if (irq_set_affinity_hint(
- aac->msixentry[i].vector,
- NULL)) {
- printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
- aac->name,
- aac->id,
- cpu);
- }
- cpu = cpumask_next(cpu,
- cpu_online_mask);
- free_irq(aac->msixentry[i].vector,
+ free_irq(pci_irq_vector(aac->pdev, i),
&(aac->aac_msix[i]));
}
} else {
@@ -1350,7 +1338,7 @@ static void aac_release_resources(struct aac_dev *aac)
aac->pdev->device == PMC_DEVICE_S9) {
if (aac->max_msix > 1) {
for (i = 0; i < aac->max_msix; i++)
- free_irq(aac->msixentry[i].vector,
+ free_irq(pci_irq_vector(aac->pdev, i),
&(aac->aac_msix[i]));
} else {
free_irq(aac->pdev->irq, &(aac->aac_msix[0]));
@@ -1396,13 +1384,13 @@ static int aac_acquire_resources(struct aac_dev *dev)
dev->aac_msix[i].vector_no = i;
dev->aac_msix[i].dev = dev;
- if (request_irq(dev->msixentry[i].vector,
+ if (request_irq(pci_irq_vector(dev->pdev, i),
dev->a_ops.adapter_intr,
0, "aacraid", &(dev->aac_msix[i]))) {
printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
name, instance, i);
for (j = 0 ; j < i ; j++)
- free_irq(dev->msixentry[j].vector,
+ free_irq(pci_irq_vector(dev->pdev, j),
&(dev->aac_msix[j]));
pci_disable_msix(dev->pdev);
goto error_iounmap;
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index febbd83e2ecd..81dd0927246b 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -11030,6 +11030,9 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
ASC_DBG(2, "AdvInitGetConfig()\n");
ret = AdvInitGetConfig(pdev, shost) ? -ENODEV : 0;
+#else
+ share_irq = 0;
+ ret = -ENODEV;
#endif /* CONFIG_PCI */
}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c
index 2e3117aa382f..21ac265280bf 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c
@@ -254,7 +254,7 @@ main(int argc, char *argv[])
argv += optind;
if (argc != 1) {
- fprintf(stderr, "%s: No input file specifiled\n", appname);
+ fprintf(stderr, "%s: No input file specified\n", appname);
usage();
/* NOTREACHED */
}
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 7c713f797535..f2671a8fa7e3 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -228,8 +228,11 @@ static int asd_init_scbs(struct asd_ha_struct *asd_ha)
bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8;
bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
- if (!asd_ha->seq.tc_index_bitmap)
+ if (!asd_ha->seq.tc_index_bitmap) {
+ kfree(asd_ha->seq.tc_index_array);
+ asd_ha->seq.tc_index_array = NULL;
return -ENOMEM;
+ }
spin_lock_init(&seq->tc_index_lock);
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index cf99f8cf4cdd..a254b32eba39 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -629,7 +629,6 @@ struct AdapterControlBlock
struct pci_dev * pdev;
struct Scsi_Host * host;
unsigned long vir2phy_offset;
- struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
/* Offset is used in making arc cdb physical to virtual calculations */
uint32_t outbound_int_enable;
uint32_t cdb_phyaddr_hi32;
@@ -671,8 +670,6 @@ struct AdapterControlBlock
/* iop init */
#define ACB_F_ABORT 0x0200
#define ACB_F_FIRMWARE_TRAP 0x0400
- #define ACB_F_MSI_ENABLED 0x1000
- #define ACB_F_MSIX_ENABLED 0x2000
struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
/* used for memory free */
struct list_head ccb_free_list;
@@ -725,7 +722,7 @@ struct AdapterControlBlock
atomic_t rq_map_token;
atomic_t ante_token_value;
uint32_t maxOutstanding;
- int msix_vector_count;
+ int vector_count;
};/* HW_DEVICE_EXTENSION */
/*
*******************************************************************************
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f0cfb0451757..9e45749d55ed 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -720,51 +720,39 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
static int
arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
{
- int i, j, r;
- struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
-
- for (i = 0; i < ARCMST_NUM_MSIX_VECTORS; i++)
- entries[i].entry = i;
- r = pci_enable_msix_range(pdev, entries, 1, ARCMST_NUM_MSIX_VECTORS);
- if (r < 0)
- goto msi_int;
- acb->msix_vector_count = r;
- for (i = 0; i < r; i++) {
- if (request_irq(entries[i].vector,
- arcmsr_do_interrupt, 0, "arcmsr", acb)) {
+ unsigned long flags;
+ int nvec, i;
+
+ nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
+ PCI_IRQ_MSIX);
+ if (nvec > 0) {
+ pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
+ flags = 0;
+ } else {
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1,
+ PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ if (nvec < 1)
+ return FAILED;
+
+ flags = IRQF_SHARED;
+ }
+
+ acb->vector_count = nvec;
+ for (i = 0; i < nvec; i++) {
+ if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt,
+ flags, "arcmsr", acb)) {
pr_warn("arcmsr%d: request_irq =%d failed!\n",
- acb->host->host_no, entries[i].vector);
- for (j = 0 ; j < i ; j++)
- free_irq(entries[j].vector, acb);
- pci_disable_msix(pdev);
- goto msi_int;
+ acb->host->host_no, pci_irq_vector(pdev, i));
+ goto out_free_irq;
}
- acb->entries[i] = entries[i];
- }
- acb->acb_flags |= ACB_F_MSIX_ENABLED;
- pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
- return SUCCESS;
-msi_int:
- if (pci_enable_msi_exact(pdev, 1) < 0)
- goto legacy_int;
- if (request_irq(pdev->irq, arcmsr_do_interrupt,
- IRQF_SHARED, "arcmsr", acb)) {
- pr_warn("arcmsr%d: request_irq =%d failed!\n",
- acb->host->host_no, pdev->irq);
- pci_disable_msi(pdev);
- goto legacy_int;
- }
- acb->acb_flags |= ACB_F_MSI_ENABLED;
- pr_info("arcmsr%d: msi enabled\n", acb->host->host_no);
- return SUCCESS;
-legacy_int:
- if (request_irq(pdev->irq, arcmsr_do_interrupt,
- IRQF_SHARED, "arcmsr", acb)) {
- pr_warn("arcmsr%d: request_irq = %d failed!\n",
- acb->host->host_no, pdev->irq);
- return FAILED;
}
+
return SUCCESS;
+out_free_irq:
+ while (--i >= 0)
+ free_irq(pci_irq_vector(pdev, i), acb);
+ pci_free_irq_vectors(pdev);
+ return FAILED;
}
static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -886,15 +874,9 @@ static void arcmsr_free_irq(struct pci_dev *pdev,
{
int i;
- if (acb->acb_flags & ACB_F_MSI_ENABLED) {
- free_irq(pdev->irq, acb);
- pci_disable_msi(pdev);
- } else if (acb->acb_flags & ACB_F_MSIX_ENABLED) {
- for (i = 0; i < acb->msix_vector_count; i++)
- free_irq(acb->entries[i].vector, acb);
- pci_disable_msix(pdev);
- } else
- free_irq(pdev->irq, acb);
+ for (i = 0; i < acb->vector_count; i++)
+ free_irq(pci_irq_vector(pdev, i), acb);
+ pci_free_irq_vectors(pdev);
}
static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index 8e9cfe8f22f5..a87b99c7fb9a 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -14,49 +14,48 @@
#include <scsi/scsi_host.h>
#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
-#define NCR5380_read(reg) cumanascsi_read(instance, reg)
-#define NCR5380_write(reg, value) cumanascsi_write(instance, reg, value)
+#define NCR5380_read(reg) cumanascsi_read(hostdata, reg)
+#define NCR5380_write(reg, value) cumanascsi_write(hostdata, reg, value)
-#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize)
+#define NCR5380_dma_xfer_len cumanascsi_dma_xfer_len
#define NCR5380_dma_recv_setup cumanascsi_pread
#define NCR5380_dma_send_setup cumanascsi_pwrite
-#define NCR5380_dma_residual(instance) (0)
+#define NCR5380_dma_residual NCR5380_dma_residual_none
#define NCR5380_intr cumanascsi_intr
#define NCR5380_queue_command cumanascsi_queue_command
#define NCR5380_info cumanascsi_info
#define NCR5380_implementation_fields \
- unsigned ctrl; \
- void __iomem *base; \
- void __iomem *dma
+ unsigned ctrl
-#include "../NCR5380.h"
+struct NCR5380_hostdata;
+static u8 cumanascsi_read(struct NCR5380_hostdata *, unsigned int);
+static void cumanascsi_write(struct NCR5380_hostdata *, unsigned int, u8);
-void cumanascsi_setup(char *str, int *ints)
-{
-}
+#include "../NCR5380.h"
#define CTRL 0x16fc
#define STAT 0x2004
#define L(v) (((v)<<16)|((v) & 0x0000ffff))
#define H(v) (((v)>>16)|((v) & 0xffff0000))
-static inline int cumanascsi_pwrite(struct Scsi_Host *host,
+static inline int cumanascsi_pwrite(struct NCR5380_hostdata *hostdata,
unsigned char *addr, int len)
{
unsigned long *laddr;
- void __iomem *dma = priv(host)->dma + 0x2000;
+ u8 __iomem *base = hostdata->io;
+ u8 __iomem *dma = hostdata->pdma_io + 0x2000;
if(!len) return 0;
- writeb(0x02, priv(host)->base + CTRL);
+ writeb(0x02, base + CTRL);
laddr = (unsigned long *)addr;
while(len >= 32)
{
unsigned int status;
unsigned long v;
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(!(status & 0x40))
@@ -75,12 +74,12 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host,
}
addr = (unsigned char *)laddr;
- writeb(0x12, priv(host)->base + CTRL);
+ writeb(0x12, base + CTRL);
while(len > 0)
{
unsigned int status;
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
@@ -90,7 +89,7 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host,
break;
}
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
@@ -101,27 +100,28 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host,
}
}
end:
- writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
+ writeb(hostdata->ctrl | 0x40, base + CTRL);
if (len)
return -1;
return 0;
}
-static inline int cumanascsi_pread(struct Scsi_Host *host,
+static inline int cumanascsi_pread(struct NCR5380_hostdata *hostdata,
unsigned char *addr, int len)
{
unsigned long *laddr;
- void __iomem *dma = priv(host)->dma + 0x2000;
+ u8 __iomem *base = hostdata->io;
+ u8 __iomem *dma = hostdata->pdma_io + 0x2000;
if(!len) return 0;
- writeb(0x00, priv(host)->base + CTRL);
+ writeb(0x00, base + CTRL);
laddr = (unsigned long *)addr;
while(len >= 32)
{
unsigned int status;
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(!(status & 0x40))
@@ -140,12 +140,12 @@ static inline int cumanascsi_pread(struct Scsi_Host *host,
}
addr = (unsigned char *)laddr;
- writeb(0x10, priv(host)->base + CTRL);
+ writeb(0x10, base + CTRL);
while(len > 0)
{
unsigned int status;
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
@@ -155,7 +155,7 @@ static inline int cumanascsi_pread(struct Scsi_Host *host,
break;
}
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
@@ -166,37 +166,45 @@ static inline int cumanascsi_pread(struct Scsi_Host *host,
}
}
end:
- writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
+ writeb(hostdata->ctrl | 0x40, base + CTRL);
if (len)
return -1;
return 0;
}
-static unsigned char cumanascsi_read(struct Scsi_Host *host, unsigned int reg)
+static int cumanascsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
+ struct scsi_cmnd *cmd)
+{
+ return cmd->transfersize;
+}
+
+static u8 cumanascsi_read(struct NCR5380_hostdata *hostdata,
+ unsigned int reg)
{
- void __iomem *base = priv(host)->base;
- unsigned char val;
+ u8 __iomem *base = hostdata->io;
+ u8 val;
writeb(0, base + CTRL);
val = readb(base + 0x2100 + (reg << 2));
- priv(host)->ctrl = 0x40;
+ hostdata->ctrl = 0x40;
writeb(0x40, base + CTRL);
return val;
}
-static void cumanascsi_write(struct Scsi_Host *host, unsigned int reg, unsigned int value)
+static void cumanascsi_write(struct NCR5380_hostdata *hostdata,
+ unsigned int reg, u8 value)
{
- void __iomem *base = priv(host)->base;
+ u8 __iomem *base = hostdata->io;
writeb(0, base + CTRL);
writeb(value, base + 0x2100 + (reg << 2));
- priv(host)->ctrl = 0x40;
+ hostdata->ctrl = 0x40;
writeb(0x40, base + CTRL);
}
@@ -235,11 +243,11 @@ static int cumanascsi1_probe(struct expansion_card *ec,
goto out_release;
}
- priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW),
- ecard_resource_len(ec, ECARD_RES_IOCSLOW));
- priv(host)->dma = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
- ecard_resource_len(ec, ECARD_RES_MEMC));
- if (!priv(host)->base || !priv(host)->dma) {
+ priv(host)->io = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW),
+ ecard_resource_len(ec, ECARD_RES_IOCSLOW));
+ priv(host)->pdma_io = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
+ ecard_resource_len(ec, ECARD_RES_MEMC));
+ if (!priv(host)->io || !priv(host)->pdma_io) {
ret = -ENOMEM;
goto out_unmap;
}
@@ -253,7 +261,7 @@ static int cumanascsi1_probe(struct expansion_card *ec,
NCR5380_maybe_reset_bus(host);
priv(host)->ctrl = 0;
- writeb(0, priv(host)->base + CTRL);
+ writeb(0, priv(host)->io + CTRL);
ret = request_irq(host->irq, cumanascsi_intr, 0,
"CumanaSCSI-1", host);
@@ -275,8 +283,8 @@ static int cumanascsi1_probe(struct expansion_card *ec,
out_exit:
NCR5380_exit(host);
out_unmap:
- iounmap(priv(host)->base);
- iounmap(priv(host)->dma);
+ iounmap(priv(host)->io);
+ iounmap(priv(host)->pdma_io);
scsi_host_put(host);
out_release:
ecard_release_resources(ec);
@@ -287,15 +295,17 @@ static int cumanascsi1_probe(struct expansion_card *ec,
static void cumanascsi1_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
+ void __iomem *base = priv(host)->io;
+ void __iomem *dma = priv(host)->pdma_io;
ecard_set_drvdata(ec, NULL);
scsi_remove_host(host);
free_irq(host->irq, host);
NCR5380_exit(host);
- iounmap(priv(host)->base);
- iounmap(priv(host)->dma);
scsi_host_put(host);
+ iounmap(base);
+ iounmap(dma);
ecard_release_resources(ec);
}
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index a396024a3cae..6be6666534d4 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -16,21 +16,18 @@
#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
-#define NCR5380_read(reg) \
- readb(priv(instance)->base + ((reg) << 2))
-#define NCR5380_write(reg, value) \
- writeb(value, priv(instance)->base + ((reg) << 2))
+#define NCR5380_read(reg) readb(hostdata->io + ((reg) << 2))
+#define NCR5380_write(reg, value) writeb(value, hostdata->io + ((reg) << 2))
-#define NCR5380_dma_xfer_len(instance, cmd, phase) (0)
+#define NCR5380_dma_xfer_len NCR5380_dma_xfer_none
#define NCR5380_dma_recv_setup oakscsi_pread
#define NCR5380_dma_send_setup oakscsi_pwrite
-#define NCR5380_dma_residual(instance) (0)
+#define NCR5380_dma_residual NCR5380_dma_residual_none
#define NCR5380_queue_command oakscsi_queue_command
#define NCR5380_info oakscsi_info
-#define NCR5380_implementation_fields \
- void __iomem *base
+#define NCR5380_implementation_fields /* none */
#include "../NCR5380.h"
@@ -40,10 +37,10 @@
#define STAT ((128 + 16) << 2)
#define DATA ((128 + 8) << 2)
-static inline int oakscsi_pwrite(struct Scsi_Host *instance,
+static inline int oakscsi_pwrite(struct NCR5380_hostdata *hostdata,
unsigned char *addr, int len)
{
- void __iomem *base = priv(instance)->base;
+ u8 __iomem *base = hostdata->io;
printk("writing %p len %d\n",addr, len);
@@ -55,10 +52,11 @@ printk("writing %p len %d\n",addr, len);
return 0;
}
-static inline int oakscsi_pread(struct Scsi_Host *instance,
+static inline int oakscsi_pread(struct NCR5380_hostdata *hostdata,
unsigned char *addr, int len)
{
- void __iomem *base = priv(instance)->base;
+ u8 __iomem *base = hostdata->io;
+
printk("reading %p len %d\n", addr, len);
while(len > 0)
{
@@ -133,15 +131,14 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
goto release;
}
- priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
- ecard_resource_len(ec, ECARD_RES_MEMC));
- if (!priv(host)->base) {
+ priv(host)->io = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
+ ecard_resource_len(ec, ECARD_RES_MEMC));
+ if (!priv(host)->io) {
ret = -ENOMEM;
goto unreg;
}
host->irq = NO_IRQ;
- host->n_io_port = 255;
ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP);
if (ret)
@@ -159,7 +156,7 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
out_exit:
NCR5380_exit(host);
out_unmap:
- iounmap(priv(host)->base);
+ iounmap(priv(host)->io);
unreg:
scsi_host_put(host);
release:
@@ -171,13 +168,14 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
static void oakscsi_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
+ void __iomem *base = priv(host)->io;
ecard_set_drvdata(ec, NULL);
scsi_remove_host(host);
NCR5380_exit(host);
- iounmap(priv(host)->base);
scsi_host_put(host);
+ iounmap(base);
ecard_release_resources(ec);
}
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a59ad94ea52b..105b35393ce9 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -57,6 +57,9 @@
#define NCR5380_implementation_fields /* none */
+static u8 (*atari_scsi_reg_read)(unsigned int);
+static void (*atari_scsi_reg_write)(unsigned int, u8);
+
#define NCR5380_read(reg) atari_scsi_reg_read(reg)
#define NCR5380_write(reg, value) atari_scsi_reg_write(reg, value)
@@ -64,14 +67,10 @@
#define NCR5380_abort atari_scsi_abort
#define NCR5380_info atari_scsi_info
-#define NCR5380_dma_recv_setup(instance, data, count) \
- atari_scsi_dma_setup(instance, data, count, 0)
-#define NCR5380_dma_send_setup(instance, data, count) \
- atari_scsi_dma_setup(instance, data, count, 1)
-#define NCR5380_dma_residual(instance) \
- atari_scsi_dma_residual(instance)
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- atari_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO))
+#define NCR5380_dma_xfer_len atari_scsi_dma_xfer_len
+#define NCR5380_dma_recv_setup atari_scsi_dma_recv_setup
+#define NCR5380_dma_send_setup atari_scsi_dma_send_setup
+#define NCR5380_dma_residual atari_scsi_dma_residual
#define NCR5380_acquire_dma_irq(instance) falcon_get_lock(instance)
#define NCR5380_release_dma_irq(instance) falcon_release_lock()
@@ -126,9 +125,6 @@ static inline unsigned long SCSI_DMA_GETADR(void)
static void atari_scsi_fetch_restbytes(void);
-static unsigned char (*atari_scsi_reg_read)(unsigned char reg);
-static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value);
-
static unsigned long atari_dma_residual, atari_dma_startaddr;
static short atari_dma_active;
/* pointer to the dribble buffer */
@@ -457,15 +453,14 @@ static int __init atari_scsi_setup(char *str)
__setup("atascsi=", atari_scsi_setup);
#endif /* !MODULE */
-
-static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
+static unsigned long atari_scsi_dma_setup(struct NCR5380_hostdata *hostdata,
void *data, unsigned long count,
int dir)
{
unsigned long addr = virt_to_phys(data);
- dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, "
- "dir = %d\n", instance->host_no, data, addr, count, dir);
+ dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, dir = %d\n",
+ hostdata->host->host_no, data, addr, count, dir);
if (!IS_A_TT() && !STRAM_ADDR(addr)) {
/* If we have a non-DMAable address on a Falcon, use the dribble
@@ -522,8 +517,19 @@ static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
return count;
}
+static inline int atari_scsi_dma_recv_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return atari_scsi_dma_setup(hostdata, data, count, 0);
+}
+
+static inline int atari_scsi_dma_send_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return atari_scsi_dma_setup(hostdata, data, count, 1);
+}
-static long atari_scsi_dma_residual(struct Scsi_Host *instance)
+static int atari_scsi_dma_residual(struct NCR5380_hostdata *hostdata)
{
return atari_dma_residual;
}
@@ -564,10 +570,11 @@ static int falcon_classify_cmd(struct scsi_cmnd *cmd)
* the overrun problem, so this question is academic :-)
*/
-static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
- struct scsi_cmnd *cmd, int write_flag)
+static int atari_scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
+ struct scsi_cmnd *cmd)
{
- unsigned long possible_len, limit;
+ int wanted_len = cmd->SCp.this_residual;
+ int possible_len, limit;
if (wanted_len < DMA_MIN_SIZE)
return 0;
@@ -604,7 +611,7 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
* use the dribble buffer and thus can do only STRAM_BUFFER_SIZE bytes.
*/
- if (write_flag) {
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
/* Write operation can always use the DMA, but the transfer size must
* be rounded up to the next multiple of 512 (atari_dma_setup() does
* this).
@@ -644,8 +651,8 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
possible_len = limit;
if (possible_len != wanted_len)
- dprintk(NDEBUG_DMA, "Sorry, must cut DMA transfer size to %ld bytes "
- "instead of %ld\n", possible_len, wanted_len);
+ dprintk(NDEBUG_DMA, "DMA transfer now %d bytes instead of %d\n",
+ possible_len, wanted_len);
return possible_len;
}
@@ -658,26 +665,38 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
* NCR5380_write call these functions via function pointers.
*/
-static unsigned char atari_scsi_tt_reg_read(unsigned char reg)
+static u8 atari_scsi_tt_reg_read(unsigned int reg)
{
return tt_scsi_regp[reg * 2];
}
-static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value)
+static void atari_scsi_tt_reg_write(unsigned int reg, u8 value)
{
tt_scsi_regp[reg * 2] = value;
}
-static unsigned char atari_scsi_falcon_reg_read(unsigned char reg)
+static u8 atari_scsi_falcon_reg_read(unsigned int reg)
{
- dma_wd.dma_mode_status= (u_short)(0x88 + reg);
- return (u_char)dma_wd.fdc_acces_seccount;
+ unsigned long flags;
+ u8 result;
+
+ reg += 0x88;
+ local_irq_save(flags);
+ dma_wd.dma_mode_status = (u_short)reg;
+ result = (u8)dma_wd.fdc_acces_seccount;
+ local_irq_restore(flags);
+ return result;
}
-static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value)
+static void atari_scsi_falcon_reg_write(unsigned int reg, u8 value)
{
- dma_wd.dma_mode_status = (u_short)(0x88 + reg);
+ unsigned long flags;
+
+ reg += 0x88;
+ local_irq_save(flags);
+ dma_wd.dma_mode_status = (u_short)reg;
dma_wd.fdc_acces_seccount = (u_short)value;
+ local_irq_restore(flags);
}
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index d9239c2d49b1..b5112d6d7e73 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3049,8 +3049,10 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
eq_vaddress = pci_alloc_consistent(phba->pcidev,
num_eq_pages * PAGE_SIZE,
&paddr);
- if (!eq_vaddress)
+ if (!eq_vaddress) {
+ ret = -ENOMEM;
goto create_eq_error;
+ }
mem->va = eq_vaddress;
ret = be_fill_queue(eq, phba->params.num_eq_entries,
@@ -3113,8 +3115,10 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
cq_vaddress = pci_alloc_consistent(phba->pcidev,
num_cq_pages * PAGE_SIZE,
&paddr);
- if (!cq_vaddress)
+ if (!cq_vaddress) {
+ ret = -ENOMEM;
goto create_cq_error;
+ }
ret = be_fill_queue(cq, phba->params.num_cq_entries,
sizeof(struct sol_cqe), cq_vaddress);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 713745da44c6..0f9fab770339 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -111,20 +111,24 @@ struct bfa_meminfo_s {
struct bfa_mem_kva_s kva_info;
};
-/* BFA memory segment setup macros */
-#define bfa_mem_dma_setup(_meminfo, _dm_ptr, _seg_sz) do { \
- ((bfa_mem_dma_t *)(_dm_ptr))->mem_len = (_seg_sz); \
- if (_seg_sz) \
- list_add_tail(&((bfa_mem_dma_t *)_dm_ptr)->qe, \
- &(_meminfo)->dma_info.qe); \
-} while (0)
+/* BFA memory segment setup helpers */
+static inline void bfa_mem_dma_setup(struct bfa_meminfo_s *meminfo,
+ struct bfa_mem_dma_s *dm_ptr,
+ size_t seg_sz)
+{
+ dm_ptr->mem_len = seg_sz;
+ if (seg_sz)
+ list_add_tail(&dm_ptr->qe, &meminfo->dma_info.qe);
+}
-#define bfa_mem_kva_setup(_meminfo, _kva_ptr, _seg_sz) do { \
- ((bfa_mem_kva_t *)(_kva_ptr))->mem_len = (_seg_sz); \
- if (_seg_sz) \
- list_add_tail(&((bfa_mem_kva_t *)_kva_ptr)->qe, \
- &(_meminfo)->kva_info.qe); \
-} while (0)
+static inline void bfa_mem_kva_setup(struct bfa_meminfo_s *meminfo,
+ struct bfa_mem_kva_s *kva_ptr,
+ size_t seg_sz)
+{
+ kva_ptr->mem_len = seg_sz;
+ if (seg_sz)
+ list_add_tail(&kva_ptr->qe, &meminfo->kva_info.qe);
+}
/* BFA dma memory segments iterator */
#define bfa_mem_dma_sptr(_mod, _i) (&(_mod)->dma_seg[(_i)])
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index d1ad0208dfe7..a9a00169ad91 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3130,11 +3130,12 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
}
static int
-bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
+bfad_im_bsg_vendor_request(struct bsg_job *job)
{
- uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
- struct bfad_im_port_s *im_port =
- (struct bfad_im_port_s *) job->shost->hostdata[0];
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
+ struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
struct bfad_s *bfad = im_port->bfad;
struct request_queue *request_q = job->req->q;
void *payload_kbuf;
@@ -3175,18 +3176,19 @@ bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
/* Fill the BSG job reply data */
job->reply_len = job->reply_payload.payload_len;
- job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
- job->reply->result = rc;
+ bsg_reply->reply_payload_rcv_len = job->reply_payload.payload_len;
+ bsg_reply->result = rc;
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
error:
/* free the command buffer */
kfree(payload_kbuf);
out:
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->reply_len = sizeof(uint32_t);
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
return rc;
}
@@ -3312,7 +3314,7 @@ bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
}
int
-bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
+bfad_fcxp_bsg_send(struct bsg_job *job, struct bfad_fcxp *drv_fcxp,
bfa_bsg_fcpt_t *bsg_fcpt)
{
struct bfa_fcxp_s *hal_fcxp;
@@ -3352,28 +3354,29 @@ bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
}
int
-bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
+bfad_im_bsg_els_ct_request(struct bsg_job *job)
{
struct bfa_bsg_data *bsg_data;
- struct bfad_im_port_s *im_port =
- (struct bfad_im_port_s *) job->shost->hostdata[0];
+ struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
struct bfad_s *bfad = im_port->bfad;
bfa_bsg_fcpt_t *bsg_fcpt;
struct bfad_fcxp *drv_fcxp;
struct bfa_fcs_lport_s *fcs_port;
struct bfa_fcs_rport_s *fcs_rport;
- uint32_t command_type = job->request->msgcode;
+ struct fc_bsg_request *bsg_request = bsg_request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ uint32_t command_type = bsg_request->msgcode;
unsigned long flags;
struct bfad_buf_info *rsp_buf_info;
void *req_kbuf = NULL, *rsp_kbuf = NULL;
int rc = -EINVAL;
job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* Get the payload passed in from userspace */
- bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
- sizeof(struct fc_bsg_request));
+ bsg_data = (struct bfa_bsg_data *) (((char *)bsg_request) +
+ sizeof(struct fc_bsg_request));
if (bsg_data == NULL)
goto out;
@@ -3517,13 +3520,13 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
/* fill the job->reply data */
if (drv_fcxp->req_status == BFA_STATUS_OK) {
job->reply_len = drv_fcxp->rsp_len;
- job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
- job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ bsg_reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
+ bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
} else {
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sizeof(struct fc_bsg_ctels_reply);
job->reply_len = sizeof(uint32_t);
- job->reply->reply_data.ctels_reply.status =
+ bsg_reply->reply_data.ctels_reply.status =
FC_CTELS_STATUS_REJECT;
}
@@ -3549,20 +3552,23 @@ out_free_mem:
kfree(bsg_fcpt);
kfree(drv_fcxp);
out:
- job->reply->result = rc;
+ bsg_reply->result = rc;
if (rc == BFA_STATUS_OK)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
int
-bfad_im_bsg_request(struct fc_bsg_job *job)
+bfad_im_bsg_request(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t rc = BFA_STATUS_OK;
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_HST_VENDOR:
/* Process BSG HST Vendor requests */
rc = bfad_im_bsg_vendor_request(job);
@@ -3575,8 +3581,8 @@ bfad_im_bsg_request(struct fc_bsg_job *job)
rc = bfad_im_bsg_els_ct_request(job);
break;
default:
- job->reply->result = rc = -EINVAL;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = rc = -EINVAL;
+ bsg_reply->reply_payload_rcv_len = 0;
break;
}
@@ -3584,7 +3590,7 @@ bfad_im_bsg_request(struct fc_bsg_job *job)
}
int
-bfad_im_bsg_timeout(struct fc_bsg_job *job)
+bfad_im_bsg_timeout(struct bsg_job *job)
{
/* Don't complete the BSG job request - return -EAGAIN
* to reset bsg job timeout : for ELS/CT pass thru we
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 836fdc221edd..c81ec2a77ef5 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -166,8 +166,8 @@ extern struct device_attribute *bfad_im_vport_attrs[];
irqreturn_t bfad_intx(int irq, void *dev_id);
-int bfad_im_bsg_request(struct fc_bsg_job *job);
-int bfad_im_bsg_timeout(struct fc_bsg_job *job);
+int bfad_im_bsg_request(struct bsg_job *job);
+int bfad_im_bsg_timeout(struct bsg_job *job);
/*
* Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f9ddb6156f14..0990130821fa 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -970,7 +970,6 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
sizeof(struct libfc_function_template));
fc_elsct_init(lport);
fc_exch_init(lport);
- fc_rport_init(lport);
fc_disc_init(lport);
fc_disc_config(lport, lport);
return 0;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 08ec318afb99..739bfb62aff6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -80,7 +80,6 @@ static void bnx2fc_offload_session(struct fcoe_port *port,
struct bnx2fc_rport *tgt,
struct fc_rport_priv *rdata)
{
- struct fc_lport *lport = rdata->local_port;
struct fc_rport *rport = rdata->rport;
struct bnx2fc_interface *interface = port->priv;
struct bnx2fc_hba *hba = interface->hba;
@@ -160,7 +159,7 @@ ofld_err:
tgt_init_err:
if (tgt->fcoe_conn_id != -1)
bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
}
void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 4655a9f9dcea..9e6f647ff1c1 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1411,7 +1411,7 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
if (csk->atid < 0) {
pr_err("%s, NO atid available.\n", ndev->name);
- return -EINVAL;
+ goto rel_resource_without_clip;
}
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 6e6815545a71..0e9de5d62da2 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -19,6 +19,7 @@
#include <linux/rwsem.h>
#include <linux/types.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
extern const struct file_operations cxlflash_cxl_fops;
@@ -62,11 +63,6 @@ static inline void check_sizes(void)
/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */
#define CMD_BUFSIZE SIZE_4K
-/* flags in IOA status area for host use */
-#define B_DONE 0x01
-#define B_ERROR 0x02 /* set with B_DONE */
-#define B_TIMEOUT 0x04 /* set with B_DONE & B_ERROR */
-
enum cxlflash_lr_state {
LINK_RESET_INVALID,
LINK_RESET_REQUIRED,
@@ -132,12 +128,9 @@ struct cxlflash_cfg {
struct afu_cmd {
struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */
struct sisl_ioasa sa; /* IOASA must follow IOARCB */
- spinlock_t slock;
- struct completion cevent;
- char *buf; /* per command buffer */
struct afu *parent;
- int slot;
- atomic_t free;
+ struct scsi_cmnd *scp;
+ struct completion cevent;
u8 cmd_tmf:1;
@@ -147,19 +140,31 @@ struct afu_cmd {
*/
} __aligned(cache_line_size());
+static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc)
+{
+ return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd));
+}
+
+static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
+{
+ struct afu_cmd *afuc = sc_to_afuc(sc);
+
+ memset(afuc, 0, sizeof(*afuc));
+ return afuc;
+}
+
struct afu {
/* Stuff requiring alignment go first. */
u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
- /*
- * Command & data for AFU commands.
- */
- struct afu_cmd cmd[CXLFLASH_NUM_CMDS];
/* Beware of alignment till here. Preferably introduce new
* fields after this point
*/
+ int (*send_cmd)(struct afu *, struct afu_cmd *);
+ void (*context_reset)(struct afu_cmd *);
+
/* AFU HW */
struct cxl_ioctl_start_work work;
struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
@@ -173,10 +178,10 @@ struct afu {
u64 *hrrq_end;
u64 *hrrq_curr;
bool toggle;
- bool read_room;
- atomic64_t room;
+ atomic_t cmds_active; /* Number of currently active AFU commands */
+ s64 room;
+ spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */
u64 hb;
- u32 cmd_couts; /* Number of command checkouts */
u32 internal_lun; /* User-desired LUN mode for this AFU */
char version[16];
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
index a0923cade6f3..6c318db90c85 100644
--- a/drivers/scsi/cxlflash/lunmgt.c
+++ b/drivers/scsi/cxlflash/lunmgt.c
@@ -254,8 +254,14 @@ int cxlflash_manage_lun(struct scsi_device *sdev,
if (lli->parent->mode != MODE_NONE)
rc = -EBUSY;
else {
+ /*
+ * Clean up local LUN for this port and reset table
+ * tracking when no more references exist.
+ */
sdev->hostdata = NULL;
lli->port_sel &= ~CHAN2PORT(chan);
+ if (lli->port_sel == 0U)
+ lli->in_table = false;
}
}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index b301655f91cd..b17ebf6d0a7e 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -35,67 +35,6 @@ MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
MODULE_LICENSE("GPL");
/**
- * cmd_checkout() - checks out an AFU command
- * @afu: AFU to checkout from.
- *
- * Commands are checked out in a round-robin fashion. Note that since
- * the command pool is larger than the hardware queue, the majority of
- * times we will only loop once or twice before getting a command. The
- * buffer and CDB within the command are initialized (zeroed) prior to
- * returning.
- *
- * Return: The checked out command or NULL when command pool is empty.
- */
-static struct afu_cmd *cmd_checkout(struct afu *afu)
-{
- int k, dec = CXLFLASH_NUM_CMDS;
- struct afu_cmd *cmd;
-
- while (dec--) {
- k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1));
-
- cmd = &afu->cmd[k];
-
- if (!atomic_dec_if_positive(&cmd->free)) {
- pr_devel("%s: returning found index=%d cmd=%p\n",
- __func__, cmd->slot, cmd);
- memset(cmd->buf, 0, CMD_BUFSIZE);
- memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
- return cmd;
- }
- }
-
- return NULL;
-}
-
-/**
- * cmd_checkin() - checks in an AFU command
- * @cmd: AFU command to checkin.
- *
- * Safe to pass commands that have already been checked in. Several
- * internal tracking fields are reset as part of the checkin. Note
- * that these are intentionally reset prior to toggling the free bit
- * to avoid clobbering values in the event that the command is checked
- * out right away.
- */
-static void cmd_checkin(struct afu_cmd *cmd)
-{
- cmd->rcb.scp = NULL;
- cmd->rcb.timeout = 0;
- cmd->sa.ioasc = 0;
- cmd->cmd_tmf = false;
- cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */
-
- if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
- pr_err("%s: Freeing cmd (%d) that is not in use!\n",
- __func__, cmd->slot);
- return;
- }
-
- pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
-}
-
-/**
* process_cmd_err() - command error handler
* @cmd: AFU command that experienced the error.
* @scp: SCSI command associated with the AFU command in error.
@@ -212,7 +151,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
*
* Prepares and submits command that has either completed or timed out to
* the SCSI stack. Checks AFU command back into command pool for non-internal
- * (rcb.scp populated) commands.
+ * (cmd->scp populated) commands.
*/
static void cmd_complete(struct afu_cmd *cmd)
{
@@ -222,19 +161,14 @@ static void cmd_complete(struct afu_cmd *cmd)
struct cxlflash_cfg *cfg = afu->parent;
bool cmd_is_tmf;
- spin_lock_irqsave(&cmd->slock, lock_flags);
- cmd->sa.host_use_b[0] |= B_DONE;
- spin_unlock_irqrestore(&cmd->slock, lock_flags);
-
- if (cmd->rcb.scp) {
- scp = cmd->rcb.scp;
+ if (cmd->scp) {
+ scp = cmd->scp;
if (unlikely(cmd->sa.ioasc))
process_cmd_err(cmd, scp);
else
scp->result = (DID_OK << 16);
cmd_is_tmf = cmd->cmd_tmf;
- cmd_checkin(cmd); /* Don't use cmd after here */
pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
"ioasc=%d\n", __func__, scp, scp->result,
@@ -254,49 +188,19 @@ static void cmd_complete(struct afu_cmd *cmd)
}
/**
- * context_reset() - timeout handler for AFU commands
+ * context_reset_ioarrin() - reset command owner context via IOARRIN register
* @cmd: AFU command that timed out.
- *
- * Sends a reset to the AFU.
*/
-static void context_reset(struct afu_cmd *cmd)
+static void context_reset_ioarrin(struct afu_cmd *cmd)
{
int nretry = 0;
u64 rrin = 0x1;
- u64 room = 0;
struct afu *afu = cmd->parent;
- ulong lock_flags;
+ struct cxlflash_cfg *cfg = afu->parent;
+ struct device *dev = &cfg->dev->dev;
pr_debug("%s: cmd=%p\n", __func__, cmd);
- spin_lock_irqsave(&cmd->slock, lock_flags);
-
- /* Already completed? */
- if (cmd->sa.host_use_b[0] & B_DONE) {
- spin_unlock_irqrestore(&cmd->slock, lock_flags);
- return;
- }
-
- cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
- spin_unlock_irqrestore(&cmd->slock, lock_flags);
-
- /*
- * We really want to send this reset at all costs, so spread
- * out wait time on successive retries for available room.
- */
- do {
- room = readq_be(&afu->host_map->cmd_room);
- atomic64_set(&afu->room, room);
- if (room)
- goto write_rrin;
- udelay(1 << nretry);
- } while (nretry++ < MC_ROOM_RETRY_CNT);
-
- pr_err("%s: no cmd_room to send reset\n", __func__);
- return;
-
-write_rrin:
- nretry = 0;
writeq_be(rrin, &afu->host_map->ioarrin);
do {
rrin = readq_be(&afu->host_map->ioarrin);
@@ -305,93 +209,81 @@ write_rrin:
/* Double delay each time */
udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
+
+ dev_dbg(dev, "%s: returning rrin=0x%016llX nretry=%d\n",
+ __func__, rrin, nretry);
}
/**
- * send_cmd() - sends an AFU command
+ * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
* @afu: AFU associated with the host.
* @cmd: AFU command to send.
*
* Return:
* 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
*/
-static int send_cmd(struct afu *afu, struct afu_cmd *cmd)
+static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
{
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
- int nretry = 0;
int rc = 0;
- u64 room;
- long newval;
+ s64 room;
+ ulong lock_flags;
/*
- * This routine is used by critical users such an AFU sync and to
- * send a task management function (TMF). Thus we want to retry a
- * bit before returning an error. To avoid the performance penalty
- * of MMIO, we spread the update of 'room' over multiple commands.
+ * To avoid the performance penalty of MMIO, spread the update of
+ * 'room' over multiple commands.
*/
-retry:
- newval = atomic64_dec_if_positive(&afu->room);
- if (!newval) {
- do {
- room = readq_be(&afu->host_map->cmd_room);
- atomic64_set(&afu->room, room);
- if (room)
- goto write_ioarrin;
- udelay(1 << nretry);
- } while (nretry++ < MC_ROOM_RETRY_CNT);
-
- dev_err(dev, "%s: no cmd_room to send 0x%X\n",
- __func__, cmd->rcb.cdb[0]);
-
- goto no_room;
- } else if (unlikely(newval < 0)) {
- /* This should be rare. i.e. Only if two threads race and
- * decrement before the MMIO read is done. In this case
- * just benefit from the other thread having updated
- * afu->room.
- */
- if (nretry++ < MC_ROOM_RETRY_CNT) {
- udelay(1 << nretry);
- goto retry;
+ spin_lock_irqsave(&afu->rrin_slock, lock_flags);
+ if (--afu->room < 0) {
+ room = readq_be(&afu->host_map->cmd_room);
+ if (room <= 0) {
+ dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
+ "0x%02X, room=0x%016llX\n",
+ __func__, cmd->rcb.cdb[0], room);
+ afu->room = 0;
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
}
-
- goto no_room;
+ afu->room = room - 1;
}
-write_ioarrin:
writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
out:
+ spin_unlock_irqrestore(&afu->rrin_slock, lock_flags);
pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
return rc;
-
-no_room:
- afu->read_room = true;
- kref_get(&cfg->afu->mapcount);
- schedule_work(&cfg->work_q);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
}
/**
* wait_resp() - polls for a response or timeout to a sent AFU command
* @afu: AFU associated with the host.
* @cmd: AFU command that was sent.
+ *
+ * Return:
+ * 0 on success, -1 on timeout/error
*/
-static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
+static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
{
+ int rc = 0;
ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
- if (!timeout)
- context_reset(cmd);
+ if (!timeout) {
+ afu->context_reset(cmd);
+ rc = -1;
+ }
- if (unlikely(cmd->sa.ioasc != 0))
+ if (unlikely(cmd->sa.ioasc != 0)) {
pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
"scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
cmd->sa.rc.fc_rc);
+ rc = -1;
+ }
+
+ return rc;
}
/**
@@ -405,24 +297,15 @@ static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
*/
static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
{
- struct afu_cmd *cmd;
-
u32 port_sel = scp->device->channel + 1;
- short lflag = 0;
struct Scsi_Host *host = scp->device->host;
struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+ struct afu_cmd *cmd = sc_to_afucz(scp);
struct device *dev = &cfg->dev->dev;
ulong lock_flags;
int rc = 0;
ulong to;
- cmd = cmd_checkout(afu);
- if (unlikely(!cmd)) {
- dev_err(dev, "%s: could not get a free command\n", __func__);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
-
/* When Task Management Function is active do not send another */
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
if (cfg->tmf_active)
@@ -430,28 +313,23 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
!cfg->tmf_active,
cfg->tmf_slock);
cfg->tmf_active = true;
- cmd->cmd_tmf = true;
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
+ cmd->scp = scp;
+ cmd->parent = afu;
+ cmd->cmd_tmf = true;
+
cmd->rcb.ctx_id = afu->ctx_hndl;
+ cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
cmd->rcb.port_sel = port_sel;
cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
-
- lflag = SISL_REQ_FLAGS_TMF_CMD;
-
cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
- SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
-
- /* Stash the scp in the reserved field, for reuse during interrupt */
- cmd->rcb.scp = scp;
-
- /* Copy the CDB from the cmd passed in */
+ SISL_REQ_FLAGS_SUP_UNDERRUN |
+ SISL_REQ_FLAGS_TMF_CMD);
memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
- /* Send the command */
- rc = send_cmd(afu, cmd);
+ rc = afu->send_cmd(afu, cmd);
if (unlikely(rc)) {
- cmd_checkin(cmd);
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
cfg->tmf_active = false;
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
@@ -507,12 +385,12 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
- struct afu_cmd *cmd;
+ struct afu_cmd *cmd = sc_to_afucz(scp);
+ struct scatterlist *sg = scsi_sglist(scp);
u32 port_sel = scp->device->channel + 1;
- int nseg, i, ncount;
- struct scatterlist *sg;
+ u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
ulong lock_flags;
- short lflag = 0;
+ int nseg = 0;
int rc = 0;
int kref_got = 0;
@@ -552,55 +430,38 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
break;
}
- cmd = cmd_checkout(afu);
- if (unlikely(!cmd)) {
- dev_err(dev, "%s: could not get a free command\n", __func__);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
-
kref_get(&cfg->afu->mapcount);
kref_got = 1;
+ if (likely(sg)) {
+ nseg = scsi_dma_map(scp);
+ if (unlikely(nseg < 0)) {
+ dev_err(dev, "%s: Fail DMA map!\n", __func__);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ cmd->rcb.data_len = sg_dma_len(sg);
+ cmd->rcb.data_ea = sg_dma_address(sg);
+ }
+
+ cmd->scp = scp;
+ cmd->parent = afu;
+
cmd->rcb.ctx_id = afu->ctx_hndl;
+ cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
cmd->rcb.port_sel = port_sel;
cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
if (scp->sc_data_direction == DMA_TO_DEVICE)
- lflag = SISL_REQ_FLAGS_HOST_WRITE;
- else
- lflag = SISL_REQ_FLAGS_HOST_READ;
-
- cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
- SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
-
- /* Stash the scp in the reserved field, for reuse during interrupt */
- cmd->rcb.scp = scp;
-
- nseg = scsi_dma_map(scp);
- if (unlikely(nseg < 0)) {
- dev_err(dev, "%s: Fail DMA map! nseg=%d\n",
- __func__, nseg);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
+ req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
- ncount = scsi_sg_count(scp);
- scsi_for_each_sg(scp, sg, ncount, i) {
- cmd->rcb.data_len = sg_dma_len(sg);
- cmd->rcb.data_ea = sg_dma_address(sg);
- }
-
- /* Copy the CDB from the scsi_cmnd passed in */
+ cmd->rcb.req_flags = req_flags;
memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
- /* Send the command */
- rc = send_cmd(afu, cmd);
- if (unlikely(rc)) {
- cmd_checkin(cmd);
+ rc = afu->send_cmd(afu, cmd);
+ if (unlikely(rc))
scsi_dma_unmap(scp);
- }
-
out:
if (kref_got)
kref_put(&afu->mapcount, afu_unmap);
@@ -628,17 +489,9 @@ static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
*/
static void free_mem(struct cxlflash_cfg *cfg)
{
- int i;
- char *buf = NULL;
struct afu *afu = cfg->afu;
if (cfg->afu) {
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
- buf = afu->cmd[i].buf;
- if (!((u64)buf & (PAGE_SIZE - 1)))
- free_page((ulong)buf);
- }
-
free_pages((ulong)afu, get_order(sizeof(struct afu)));
cfg->afu = NULL;
}
@@ -650,30 +503,16 @@ static void free_mem(struct cxlflash_cfg *cfg)
*
* Safe to call with AFU in a partially allocated/initialized state.
*
- * Cleans up all state associated with the command queue, and unmaps
+ * Waits for any active internal AFU commands to timeout and then unmaps
* the MMIO space.
- *
- * - complete() will take care of commands we initiated (they'll be checked
- * in as part of the cleanup that occurs after the completion)
- *
- * - cmd_checkin() will take care of entries that we did not initiate and that
- * have not (and will not) complete because they are sitting on a [now stale]
- * hardware queue
*/
static void stop_afu(struct cxlflash_cfg *cfg)
{
- int i;
struct afu *afu = cfg->afu;
- struct afu_cmd *cmd;
if (likely(afu)) {
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
- cmd = &afu->cmd[i];
- complete(&cmd->cevent);
- if (!atomic_read(&cmd->free))
- cmd_checkin(cmd);
- }
-
+ while (atomic_read(&afu->cmds_active))
+ ssleep(1);
if (likely(afu->afu_map)) {
cxl_psa_unmap((void __iomem *)afu->afu_map);
afu->afu_map = NULL;
@@ -886,8 +725,6 @@ static void cxlflash_remove(struct pci_dev *pdev)
static int alloc_mem(struct cxlflash_cfg *cfg)
{
int rc = 0;
- int i;
- char *buf = NULL;
struct device *dev = &cfg->dev->dev;
/* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
@@ -901,25 +738,6 @@ static int alloc_mem(struct cxlflash_cfg *cfg)
}
cfg->afu->parent = cfg;
cfg->afu->afu_map = NULL;
-
- for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) {
- if (!((u64)buf & (PAGE_SIZE - 1))) {
- buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
- if (unlikely(!buf)) {
- dev_err(dev,
- "%s: Allocate command buffers fail!\n",
- __func__);
- rc = -ENOMEM;
- free_mem(cfg);
- goto out;
- }
- }
-
- cfg->afu->cmd[i].buf = buf;
- atomic_set(&cfg->afu->cmd[i].free, 1);
- cfg->afu->cmd[i].slot = i;
- }
-
out:
return rc;
}
@@ -1549,13 +1367,6 @@ static void init_pcr(struct cxlflash_cfg *cfg)
/* Program the Endian Control for the master context */
writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
-
- /* Initialize cmd fields that never change */
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
- afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
- afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
- afu->cmd[i].rcb.rrq = 0x0;
- }
}
/**
@@ -1644,19 +1455,8 @@ out:
static int start_afu(struct cxlflash_cfg *cfg)
{
struct afu *afu = cfg->afu;
- struct afu_cmd *cmd;
-
- int i = 0;
int rc = 0;
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
- cmd = &afu->cmd[i];
-
- init_completion(&cmd->cevent);
- spin_lock_init(&cmd->slock);
- cmd->parent = afu;
- }
-
init_pcr(cfg);
/* After an AFU reset, RRQ entries are stale, clear them */
@@ -1829,6 +1629,9 @@ static int init_afu(struct cxlflash_cfg *cfg)
goto err2;
}
+ afu->send_cmd = send_cmd_ioarrin;
+ afu->context_reset = context_reset_ioarrin;
+
pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__,
afu->version, afu->interface_version);
@@ -1840,7 +1643,8 @@ static int init_afu(struct cxlflash_cfg *cfg)
}
afu_err_intr_init(cfg->afu);
- atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
+ spin_lock_init(&afu->rrin_slock);
+ afu->room = readq_be(&afu->host_map->cmd_room);
/* Restore the LUN mappings */
cxlflash_restore_luntable(cfg);
@@ -1884,8 +1688,8 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
struct afu_cmd *cmd = NULL;
+ char *buf = NULL;
int rc = 0;
- int retry_cnt = 0;
static DEFINE_MUTEX(sync_active);
if (cfg->state != STATE_NORMAL) {
@@ -1894,27 +1698,23 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
}
mutex_lock(&sync_active);
-retry:
- cmd = cmd_checkout(afu);
- if (unlikely(!cmd)) {
- retry_cnt++;
- udelay(1000 * retry_cnt);
- if (retry_cnt < MC_RETRY_CNT)
- goto retry;
- dev_err(dev, "%s: could not get a free command\n", __func__);
+ atomic_inc(&afu->cmds_active);
+ buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
+ if (unlikely(!buf)) {
+ dev_err(dev, "%s: no memory for command\n", __func__);
rc = -1;
goto out;
}
- pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
+ cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
+ init_completion(&cmd->cevent);
+ cmd->parent = afu;
- memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
+ pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
- cmd->rcb.port_sel = 0x0; /* NA */
- cmd->rcb.lun_id = 0x0; /* NA */
- cmd->rcb.data_len = 0x0;
- cmd->rcb.data_ea = 0x0;
+ cmd->rcb.ctx_id = afu->ctx_hndl;
+ cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
@@ -1924,20 +1724,17 @@ retry:
*((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
*((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
- rc = send_cmd(afu, cmd);
+ rc = afu->send_cmd(afu, cmd);
if (unlikely(rc))
goto out;
- wait_resp(afu, cmd);
-
- /* Set on timeout */
- if (unlikely((cmd->sa.ioasc != 0) ||
- (cmd->sa.host_use_b[0] & B_ERROR)))
+ rc = wait_resp(afu, cmd);
+ if (unlikely(rc))
rc = -1;
out:
+ atomic_dec(&afu->cmds_active);
mutex_unlock(&sync_active);
- if (cmd)
- cmd_checkin(cmd);
+ kfree(buf);
pr_debug("%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -2376,8 +2173,9 @@ static struct scsi_host_template driver_template = {
.change_queue_depth = cxlflash_change_queue_depth,
.cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
.can_queue = CXLFLASH_MAX_CMDS,
+ .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
.this_id = -1,
- .sg_tablesize = SG_NONE, /* No scatter gather support */
+ .sg_tablesize = 1, /* No scatter gather support */
.max_sectors = CXLFLASH_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = cxlflash_host_attrs,
@@ -2412,7 +2210,6 @@ MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
* Handles the following events:
* - Link reset which cannot be performed on interrupt context due to
* blocking up to a few seconds
- * - Read AFU command room
* - Rescan the host
*/
static void cxlflash_worker_thread(struct work_struct *work)
@@ -2449,11 +2246,6 @@ static void cxlflash_worker_thread(struct work_struct *work)
cfg->lr_state = LINK_RESET_COMPLETE;
}
- if (afu->read_room) {
- atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
- afu->read_room = false;
- }
-
spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index 347fc1671975..1a2d09c148b3 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -72,7 +72,7 @@ struct sisl_ioarcb {
u16 timeout; /* in units specified by req_flags */
u32 rsvd1;
u8 cdb[16]; /* must be in big endian */
- struct scsi_cmnd *scp;
+ u64 reserved; /* Reserved area */
} __packed;
struct sisl_rc {
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index db03c49e2350..d704752b6332 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -95,7 +95,7 @@ struct alua_port_group {
struct alua_dh_data {
struct list_head node;
- struct alua_port_group *pg;
+ struct alua_port_group __rcu *pg;
int group_id;
spinlock_t pg_lock;
struct scsi_device *sdev;
@@ -371,7 +371,7 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
/* Check for existing port group references */
spin_lock(&h->pg_lock);
- old_pg = h->pg;
+ old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
if (old_pg != pg) {
/* port group has changed. Update to new port group */
if (h->pg) {
@@ -390,7 +390,9 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
list_add_rcu(&h->node, &pg->dh_list);
spin_unlock_irqrestore(&pg->lock, flags);
- alua_rtpg_queue(h->pg, sdev, NULL, true);
+ alua_rtpg_queue(rcu_dereference_protected(h->pg,
+ lockdep_is_held(&h->pg_lock)),
+ sdev, NULL, true);
spin_unlock(&h->pg_lock);
if (old_pg)
@@ -942,7 +944,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
static int alua_set_params(struct scsi_device *sdev, const char *params)
{
struct alua_dh_data *h = sdev->handler_data;
- struct alua_port_group __rcu *pg = NULL;
+ struct alua_port_group *pg = NULL;
unsigned int optimize = 0, argc;
const char *p = params;
int result = SCSI_DH_OK;
@@ -989,7 +991,7 @@ static int alua_activate(struct scsi_device *sdev,
struct alua_dh_data *h = sdev->handler_data;
int err = SCSI_DH_OK;
struct alua_queue_data *qdata;
- struct alua_port_group __rcu *pg;
+ struct alua_port_group *pg;
qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
if (!qdata) {
@@ -1053,7 +1055,7 @@ static void alua_check(struct scsi_device *sdev, bool force)
static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
{
struct alua_dh_data *h = sdev->handler_data;
- struct alua_port_group __rcu *pg;
+ struct alua_port_group *pg;
unsigned char state = SCSI_ACCESS_STATE_OPTIMAL;
int ret = BLKPREP_OK;
@@ -1123,7 +1125,7 @@ static void alua_bus_detach(struct scsi_device *sdev)
struct alua_port_group *pg;
spin_lock(&h->pg_lock);
- pg = h->pg;
+ pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
rcu_assign_pointer(h->pg, NULL);
h->sdev = NULL;
spin_unlock(&h->pg_lock);
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 9b5a457d4bca..6af3394d051d 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -34,13 +34,13 @@
* Definitions for the generic 5380 driver.
*/
-#define NCR5380_read(reg) inb(instance->io_port + reg)
-#define NCR5380_write(reg, value) outb(value, instance->io_port + reg)
+#define NCR5380_read(reg) inb(hostdata->base + (reg))
+#define NCR5380_write(reg, value) outb(value, hostdata->base + (reg))
-#define NCR5380_dma_xfer_len(instance, cmd, phase) (0)
-#define NCR5380_dma_recv_setup(instance, dst, len) (0)
-#define NCR5380_dma_send_setup(instance, src, len) (0)
-#define NCR5380_dma_residual(instance) (0)
+#define NCR5380_dma_xfer_len NCR5380_dma_xfer_none
+#define NCR5380_dma_recv_setup NCR5380_dma_setup_none
+#define NCR5380_dma_send_setup NCR5380_dma_setup_none
+#define NCR5380_dma_residual NCR5380_dma_residual_none
#define NCR5380_implementation_fields /* none */
@@ -71,6 +71,7 @@ static int dmx3191d_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct Scsi_Host *shost;
+ struct NCR5380_hostdata *hostdata;
unsigned long io;
int error = -ENODEV;
@@ -88,7 +89,9 @@ static int dmx3191d_probe_one(struct pci_dev *pdev,
sizeof(struct NCR5380_hostdata));
if (!shost)
goto out_release_region;
- shost->io_port = io;
+
+ hostdata = shost_priv(shost);
+ hostdata->base = io;
/* This card does not seem to raise an interrupt on pdev->irq.
* Steam-powered SCSI controllers run without an IRQ anyway.
@@ -125,7 +128,8 @@ out_host_put:
static void dmx3191d_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
- unsigned long io = shost->io_port;
+ struct NCR5380_hostdata *hostdata = shost_priv(shost);
+ unsigned long io = hostdata->base;
scsi_remove_host(shost);
@@ -149,18 +153,7 @@ static struct pci_driver dmx3191d_pci_driver = {
.remove = dmx3191d_remove_one,
};
-static int __init dmx3191d_init(void)
-{
- return pci_register_driver(&dmx3191d_pci_driver);
-}
-
-static void __exit dmx3191d_exit(void)
-{
- pci_unregister_driver(&dmx3191d_pci_driver);
-}
-
-module_init(dmx3191d_init);
-module_exit(dmx3191d_exit);
+module_pci_driver(dmx3191d_pci_driver);
MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>");
MODULE_DESCRIPTION("Domex DMX3191D SCSI driver");
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 21c8d210c456..27c0dce22e72 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -651,7 +651,6 @@ static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
}
spin_unlock_irqrestore(pHba->host->host_lock, flags);
if (i >= nr) {
- kfree (reply);
printk(KERN_WARNING"%s: Too many outstanding "
"ioctl commands\n", pHba->name);
return (u32)-1;
@@ -1754,8 +1753,10 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
sg_offset = (msg[0]>>4)&0xf;
msg[2] = 0x40000000; // IOCTL context
msg[3] = adpt_ioctl_to_context(pHba, reply);
- if (msg[3] == (u32)-1)
+ if (msg[3] == (u32)-1) {
+ kfree(reply);
return -EBUSY;
+ }
memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
if(sg_offset) {
@@ -3350,7 +3351,7 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
if (opblk_va == NULL) {
dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
resblk_va, resblk_pa);
- printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
+ printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
pHba->name);
return -ENOMEM;
}
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 9bd41a35a78a..59150cad0353 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -63,6 +63,14 @@ unsigned int fcoe_debug_logging;
module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+unsigned int fcoe_e_d_tov = 2 * 1000;
+module_param_named(e_d_tov, fcoe_e_d_tov, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(e_d_tov, "E_D_TOV in ms, default 2000");
+
+unsigned int fcoe_r_a_tov = 2 * 2 * 1000;
+module_param_named(r_a_tov, fcoe_r_a_tov, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(r_a_tov, "R_A_TOV in ms, default 4000");
+
static DEFINE_MUTEX(fcoe_config_mutex);
static struct workqueue_struct *fcoe_wq;
@@ -582,7 +590,8 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
* Use default VLAN for FIP VLAN discovery protocol
*/
frame = (struct fip_frame *)skb->data;
- if (frame->fip.fip_op == ntohs(FIP_OP_VLAN) &&
+ if (ntohs(frame->eth.h_proto) == ETH_P_FIP &&
+ ntohs(frame->fip.fip_op) == FIP_OP_VLAN &&
fcoe->realdev != fcoe->netdev)
skb->dev = fcoe->realdev;
else
@@ -633,8 +642,8 @@ static int fcoe_lport_config(struct fc_lport *lport)
lport->qfull = 0;
lport->max_retry_count = 3;
lport->max_rport_retry_count = 3;
- lport->e_d_tov = 2 * 1000; /* FC-FS default */
- lport->r_a_tov = 2 * 2 * 1000;
+ lport->e_d_tov = fcoe_e_d_tov;
+ lport->r_a_tov = fcoe_r_a_tov;
lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
lport->does_npiv = 1;
@@ -2160,11 +2169,13 @@ static bool fcoe_match(struct net_device *netdev)
*/
static void fcoe_dcb_create(struct fcoe_interface *fcoe)
{
+ int ctlr_prio = TC_PRIO_BESTEFFORT;
+ int fcoe_prio = TC_PRIO_INTERACTIVE;
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
#ifdef CONFIG_DCB
int dcbx;
u8 fup, up;
struct net_device *netdev = fcoe->realdev;
- struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
struct dcb_app app = {
.priority = 0,
.protocol = ETH_P_FCOE
@@ -2186,10 +2197,12 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
fup = dcb_getapp(netdev, &app);
}
- fcoe->priority = ffs(up) ? ffs(up) - 1 : 0;
- ctlr->priority = ffs(fup) ? ffs(fup) - 1 : fcoe->priority;
+ fcoe_prio = ffs(up) ? ffs(up) - 1 : 0;
+ ctlr_prio = ffs(fup) ? ffs(fup) - 1 : fcoe_prio;
}
#endif
+ fcoe->priority = fcoe_prio;
+ ctlr->priority = ctlr_prio;
}
enum fcoe_create_link_state {
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index dcf36537a767..cea57e27e713 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -801,6 +801,8 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
return -EINPROGRESS;
drop:
kfree_skb(skb);
+ LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n",
+ op, ntoh24(fh->fh_d_id));
return -EINVAL;
}
EXPORT_SYMBOL(fcoe_ctlr_els_send);
@@ -1316,7 +1318,7 @@ drop:
* The overall length has already been checked.
*/
static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
- struct fip_header *fh)
+ struct sk_buff *skb)
{
struct fip_desc *desc;
struct fip_mac_desc *mp;
@@ -1331,14 +1333,18 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
int num_vlink_desc;
int reset_phys_port = 0;
struct fip_vn_desc **vlink_desc_arr = NULL;
+ struct fip_header *fh = (struct fip_header *)skb->data;
+ struct ethhdr *eh = eth_hdr(skb);
LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
- if (!fcf || !lport->port_id) {
+ if (!fcf) {
/*
* We are yet to select best FCF, but we got CVL in the
* meantime. reset the ctlr and let it rediscover the FCF
*/
+ LIBFCOE_FIP_DBG(fip, "Resetting fcoe_ctlr as FCF has not been "
+ "selected yet\n");
mutex_lock(&fip->ctlr_mutex);
fcoe_ctlr_reset(fip);
mutex_unlock(&fip->ctlr_mutex);
@@ -1346,6 +1352,31 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
}
/*
+ * If we've selected an FCF check that the CVL is from there to avoid
+ * processing CVLs from an unexpected source. If it is from an
+ * unexpected source drop it on the floor.
+ */
+ if (!ether_addr_equal(eh->h_source, fcf->fcf_mac)) {
+ LIBFCOE_FIP_DBG(fip, "Dropping CVL due to source address "
+ "mismatch with FCF src=%pM\n", eh->h_source);
+ return;
+ }
+
+ /*
+ * If we haven't logged into the fabric but receive a CVL we should
+ * reset everything and go back to solicitation.
+ */
+ if (!lport->port_id) {
+ LIBFCOE_FIP_DBG(fip, "lport not logged in, resoliciting\n");
+ mutex_lock(&fip->ctlr_mutex);
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
+ fc_lport_reset(fip->lp);
+ fcoe_ctlr_solicit(fip, NULL);
+ return;
+ }
+
+ /*
* mask of required descriptors. Validating each one clears its bit.
*/
desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME);
@@ -1576,7 +1607,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
if (op == FIP_OP_DISC && sub == FIP_SC_ADV)
fcoe_ctlr_recv_adv(fip, skb);
else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK)
- fcoe_ctlr_recv_clr_vlink(fip, fiph);
+ fcoe_ctlr_recv_clr_vlink(fip, skb);
kfree_skb(skb);
return 0;
drop:
@@ -2122,7 +2153,7 @@ static void fcoe_ctlr_vn_rport_callback(struct fc_lport *lport,
LIBFCOE_FIP_DBG(fip,
"rport FLOGI limited port_id %6.6x\n",
rdata->ids.port_id);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
}
break;
default:
@@ -2145,9 +2176,15 @@ static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport)
{
struct fc_rport_priv *rdata;
+ rcu_read_lock();
+ list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
+ if (kref_get_unless_zero(&rdata->kref)) {
+ fc_rport_logoff(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+ }
+ rcu_read_unlock();
mutex_lock(&lport->disc.disc_mutex);
- list_for_each_entry_rcu(rdata, &lport->disc.rports, peers)
- lport->tt.rport_logoff(rdata);
lport->disc.disc_callback = NULL;
mutex_unlock(&lport->disc.disc_mutex);
}
@@ -2178,7 +2215,7 @@ static void fcoe_ctlr_disc_stop(struct fc_lport *lport)
static void fcoe_ctlr_disc_stop_final(struct fc_lport *lport)
{
fcoe_ctlr_disc_stop(lport);
- lport->tt.rport_flush_queue();
+ fc_rport_flush_queue();
synchronize_rcu();
}
@@ -2393,6 +2430,8 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
switch (fip->state) {
case FIP_ST_VNMP_CLAIM:
case FIP_ST_VNMP_UP:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_req: send reply, state %x\n",
+ fip->state);
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP,
frport->enode_mac, 0);
break;
@@ -2407,15 +2446,21 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
*/
if (fip->lp->wwpn > rdata->ids.port_name &&
!(frport->flags & FIP_FL_REC_OR_P2P)) {
+ LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
+ "port_id collision\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP,
frport->enode_mac, 0);
break;
}
/* fall through */
case FIP_ST_VNMP_START:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
+ "restart VN2VN negotiation\n");
fcoe_ctlr_vn_restart(fip);
break;
default:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_req: ignore state %x\n",
+ fip->state);
break;
}
}
@@ -2437,9 +2482,12 @@ static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
case FIP_ST_VNMP_PROBE1:
case FIP_ST_VNMP_PROBE2:
case FIP_ST_VNMP_CLAIM:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_reply: restart state %x\n",
+ fip->state);
fcoe_ctlr_vn_restart(fip);
break;
case FIP_ST_VNMP_UP:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_reply: send claim notify\n");
fcoe_ctlr_vn_send_claim(fip);
break;
default:
@@ -2467,26 +2515,33 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
return;
mutex_lock(&lport->disc.disc_mutex);
- rdata = lport->tt.rport_create(lport, port_id);
+ rdata = fc_rport_create(lport, port_id);
if (!rdata) {
mutex_unlock(&lport->disc.disc_mutex);
return;
}
+ mutex_lock(&rdata->rp_mutex);
+ mutex_unlock(&lport->disc.disc_mutex);
rdata->ops = &fcoe_ctlr_vn_rport_ops;
rdata->disc_id = lport->disc.disc_id;
ids = &rdata->ids;
if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) ||
- (ids->node_name != -1 && ids->node_name != new->ids.node_name))
- lport->tt.rport_logoff(rdata);
+ (ids->node_name != -1 && ids->node_name != new->ids.node_name)) {
+ mutex_unlock(&rdata->rp_mutex);
+ LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id);
+ fc_rport_logoff(rdata);
+ mutex_lock(&rdata->rp_mutex);
+ }
ids->port_name = new->ids.port_name;
ids->node_name = new->ids.node_name;
- mutex_unlock(&lport->disc.disc_mutex);
+ mutex_unlock(&rdata->rp_mutex);
frport = fcoe_ctlr_rport(rdata);
- LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s\n",
- port_id, frport->fcoe_len ? "old" : "new");
+ LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n",
+ port_id, frport->fcoe_len ? "old" : "new",
+ rdata->rp_state);
*frport = *fcoe_ctlr_rport(new);
frport->time = 0;
}
@@ -2506,12 +2561,12 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac)
struct fcoe_rport *frport;
int ret = -1;
- rdata = lport->tt.rport_lookup(lport, port_id);
+ rdata = fc_rport_lookup(lport, port_id);
if (rdata) {
frport = fcoe_ctlr_rport(rdata);
memcpy(mac, frport->enode_mac, ETH_ALEN);
ret = 0;
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
return ret;
}
@@ -2529,6 +2584,7 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
struct fcoe_rport *frport = fcoe_ctlr_rport(new);
if (frport->flags & FIP_FL_REC_OR_P2P) {
+ LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
return;
}
@@ -2536,25 +2592,37 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
case FIP_ST_VNMP_START:
case FIP_ST_VNMP_PROBE1:
case FIP_ST_VNMP_PROBE2:
- if (new->ids.port_id == fip->port_id)
+ if (new->ids.port_id == fip->port_id) {
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+ "restart, state %d\n",
+ fip->state);
fcoe_ctlr_vn_restart(fip);
+ }
break;
case FIP_ST_VNMP_CLAIM:
case FIP_ST_VNMP_UP:
if (new->ids.port_id == fip->port_id) {
if (new->ids.port_name > fip->lp->wwpn) {
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+ "restart, port_id collision\n");
fcoe_ctlr_vn_restart(fip);
break;
}
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+ "send claim notify\n");
fcoe_ctlr_vn_send_claim(fip);
break;
}
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n",
+ new->ids.port_id);
fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac,
min((u32)frport->fcoe_len,
fcoe_ctlr_fcoe_size(fip)));
fcoe_ctlr_vn_add(fip, new);
break;
default:
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+ "ignoring claim from %x\n", new->ids.port_id);
break;
}
}
@@ -2591,19 +2659,26 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
frport = fcoe_ctlr_rport(new);
if (frport->flags & FIP_FL_REC_OR_P2P) {
+ LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
return;
}
- rdata = lport->tt.rport_lookup(lport, new->ids.port_id);
+ rdata = fc_rport_lookup(lport, new->ids.port_id);
if (rdata) {
if (rdata->ids.node_name == new->ids.node_name &&
rdata->ids.port_name == new->ids.port_name) {
frport = fcoe_ctlr_rport(rdata);
- if (!frport->time && fip->state == FIP_ST_VNMP_UP)
- lport->tt.rport_login(rdata);
+ LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n",
+ rdata->ids.port_id);
+ if (!frport->time && fip->state == FIP_ST_VNMP_UP) {
+ LIBFCOE_FIP_DBG(fip, "beacon expired "
+ "for rport %x\n",
+ rdata->ids.port_id);
+ fc_rport_login(rdata);
+ }
frport->time = jiffies;
}
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
}
if (fip->state != FIP_ST_VNMP_UP)
@@ -2638,11 +2713,15 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
unsigned long deadline;
next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10);
- mutex_lock(&lport->disc.disc_mutex);
+ rcu_read_lock();
list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
+ if (!kref_get_unless_zero(&rdata->kref))
+ continue;
frport = fcoe_ctlr_rport(rdata);
- if (!frport->time)
+ if (!frport->time) {
+ kref_put(&rdata->kref, fc_rport_destroy);
continue;
+ }
deadline = frport->time +
msecs_to_jiffies(FIP_VN_BEACON_INT * 25 / 10);
if (time_after_eq(jiffies, deadline)) {
@@ -2650,11 +2729,12 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
LIBFCOE_FIP_DBG(fip,
"port %16.16llx fc_id %6.6x beacon expired\n",
rdata->ids.port_name, rdata->ids.port_id);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
} else if (time_before(deadline, next_time))
next_time = deadline;
+ kref_put(&rdata->kref, fc_rport_destroy);
}
- mutex_unlock(&lport->disc.disc_mutex);
+ rcu_read_unlock();
return next_time;
}
@@ -2674,11 +2754,21 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
struct fc_rport_priv rdata;
struct fcoe_rport frport;
} buf;
- int rc;
+ int rc, vlan_id = 0;
fiph = (struct fip_header *)skb->data;
sub = fiph->fip_subcode;
+ if (fip->lp->vlan)
+ vlan_id = skb_vlan_tag_get_id(skb);
+
+ if (vlan_id && vlan_id != fip->lp->vlan) {
+ LIBFCOE_FIP_DBG(fip, "vn_recv drop frame sub %x vlan %d\n",
+ sub, vlan_id);
+ rc = -EAGAIN;
+ goto drop;
+ }
+
rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
if (rc) {
LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
@@ -2941,7 +3031,7 @@ static void fcoe_ctlr_disc_recv(struct fc_lport *lport, struct fc_frame *fp)
rjt_data.reason = ELS_RJT_UNSUP;
rjt_data.explan = ELS_EXPL_NONE;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(fp);
}
@@ -2991,12 +3081,17 @@ static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip)
mutex_lock(&disc->disc_mutex);
callback = disc->pending ? disc->disc_callback : NULL;
disc->pending = 0;
+ mutex_unlock(&disc->disc_mutex);
+ rcu_read_lock();
list_for_each_entry_rcu(rdata, &disc->rports, peers) {
+ if (!kref_get_unless_zero(&rdata->kref))
+ continue;
frport = fcoe_ctlr_rport(rdata);
if (frport->time)
- lport->tt.rport_login(rdata);
+ fc_rport_login(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
- mutex_unlock(&disc->disc_mutex);
+ rcu_read_unlock();
if (callback)
callback(lport, DISC_EV_SUCCESS);
}
@@ -3015,11 +3110,13 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
switch (fip->state) {
case FIP_ST_VNMP_START:
fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE1);
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send 1st probe request\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
next_time = jiffies + msecs_to_jiffies(FIP_VN_PROBE_WAIT);
break;
case FIP_ST_VNMP_PROBE1:
fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE2);
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send 2nd probe request\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
break;
@@ -3030,6 +3127,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
hton24(mac + 3, new_port_id);
fcoe_ctlr_map_dest(fip);
fip->update_mac(fip->lp, mac);
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send claim notify\n");
fcoe_ctlr_vn_send_claim(fip);
next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
break;
@@ -3041,6 +3139,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
next_time = fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT);
if (time_after_eq(jiffies, next_time)) {
fcoe_ctlr_set_state(fip, FIP_ST_VNMP_UP);
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send vn2vn beacon\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON,
fcoe_all_vn2vn, 0);
next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
@@ -3051,6 +3150,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
case FIP_ST_VNMP_UP:
next_time = fcoe_ctlr_vn_age(fip);
if (time_after_eq(jiffies, fip->port_ka_time)) {
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send vn2vn beacon\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON,
fcoe_all_vn2vn, 0);
fip->port_ka_time = jiffies +
@@ -3135,7 +3235,6 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
fc_exch_init(lport);
fc_elsct_init(lport);
fc_lport_init(lport);
- fc_rport_init(lport);
fc_disc_init(lport);
fcoe_ctlr_mode_set(lport, fip, fip->mode);
return 0;
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 0675fd128734..9cf3d56296ab 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -335,16 +335,24 @@ static ssize_t store_ctlr_enabled(struct device *dev,
const char *buf, size_t count)
{
struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ bool enabled;
int rc;
+ if (*buf == '1')
+ enabled = true;
+ else if (*buf == '0')
+ enabled = false;
+ else
+ return -EINVAL;
+
switch (ctlr->enabled) {
case FCOE_CTLR_ENABLED:
- if (*buf == '1')
+ if (enabled)
return count;
ctlr->enabled = FCOE_CTLR_DISABLED;
break;
case FCOE_CTLR_DISABLED:
- if (*buf == '0')
+ if (!enabled)
return count;
ctlr->enabled = FCOE_CTLR_ENABLED;
break;
@@ -424,6 +432,75 @@ static FCOE_DEVICE_ATTR(ctlr, fip_vlan_responder, S_IRUGO | S_IWUSR,
store_ctlr_fip_resp);
static ssize_t
+fcoe_ctlr_var_store(u32 *var, const char *buf, size_t count)
+{
+ int err;
+ unsigned long v;
+
+ err = kstrtoul(buf, 10, &v);
+ if (err || v > UINT_MAX)
+ return -EINVAL;
+
+ *var = v;
+
+ return count;
+}
+
+static ssize_t store_ctlr_r_a_tov(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ if (ctlr_dev->enabled == FCOE_CTLR_ENABLED)
+ return -EBUSY;
+ if (ctlr_dev->enabled == FCOE_CTLR_DISABLED)
+ return fcoe_ctlr_var_store(&ctlr->lp->r_a_tov, buf, count);
+ return -ENOTSUPP;
+}
+
+static ssize_t show_ctlr_r_a_tov(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ return sprintf(buf, "%d\n", ctlr->lp->r_a_tov);
+}
+
+static FCOE_DEVICE_ATTR(ctlr, r_a_tov, S_IRUGO | S_IWUSR,
+ show_ctlr_r_a_tov, store_ctlr_r_a_tov);
+
+static ssize_t store_ctlr_e_d_tov(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ if (ctlr_dev->enabled == FCOE_CTLR_ENABLED)
+ return -EBUSY;
+ if (ctlr_dev->enabled == FCOE_CTLR_DISABLED)
+ return fcoe_ctlr_var_store(&ctlr->lp->e_d_tov, buf, count);
+ return -ENOTSUPP;
+}
+
+static ssize_t show_ctlr_e_d_tov(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ return sprintf(buf, "%d\n", ctlr->lp->e_d_tov);
+}
+
+static FCOE_DEVICE_ATTR(ctlr, e_d_tov, S_IRUGO | S_IWUSR,
+ show_ctlr_e_d_tov, store_ctlr_e_d_tov);
+
+static ssize_t
store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -507,6 +584,8 @@ static struct attribute_group fcoe_ctlr_lesb_attr_group = {
static struct attribute *fcoe_ctlr_attrs[] = {
&device_attr_fcoe_ctlr_fip_vlan_responder.attr,
&device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
+ &device_attr_fcoe_ctlr_r_a_tov.attr,
+ &device_attr_fcoe_ctlr_e_d_tov.attr,
&device_attr_fcoe_ctlr_enabled.attr,
&device_attr_fcoe_ctlr_mode.attr,
NULL,
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index d9fd2f841585..2544a37ece0a 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -441,30 +441,38 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
unsigned long ptr;
spinlock_t *io_lock = NULL;
int io_lock_acquired = 0;
+ struct fc_rport_libfc_priv *rp;
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
return SCSI_MLQUEUE_HOST_BUSY;
rport = starget_to_rport(scsi_target(sc->device));
+ if (!rport) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "returning DID_NO_CONNECT for IO as rport is NULL\n");
+ sc->result = DID_NO_CONNECT << 16;
+ done(sc);
+ return 0;
+ }
+
ret = fc_remote_port_chkready(rport);
if (ret) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "rport is not ready\n");
atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
sc->result = ret;
done(sc);
return 0;
}
- if (rport) {
- struct fc_rport_libfc_priv *rp = rport->dd_data;
-
- if (!rp || rp->rp_state != RPORT_ST_READY) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ rp = rport->dd_data;
+ if (!rp || rp->rp_state != RPORT_ST_READY) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"returning DID_NO_CONNECT for IO as rport is removed\n");
- atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
- sc->result = DID_NO_CONNECT<<16;
- done(sc);
- return 0;
- }
+ atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+ sc->result = DID_NO_CONNECT<<16;
+ done(sc);
+ return 0;
}
if (lp->state != LPORT_ST_READY || !(lp->link_up))
@@ -2543,7 +2551,7 @@ int fnic_reset(struct Scsi_Host *shost)
* Reset local port, this will clean up libFC exchanges,
* reset remote port sessions, and if link is up, begin flogi
*/
- ret = lp->tt.lport_reset(lp);
+ ret = fc_lport_reset(lp);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from fnic reset %s\n",
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 4e15c4bf0795..5a5fa01576b7 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -613,7 +613,7 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
fc_trace_entries.rd_idx = 0;
}
- fc_buf->time_stamp = CURRENT_TIME;
+ ktime_get_real_ts64(&fc_buf->time_stamp);
fc_buf->host_no = host_no;
fc_buf->frame_type = frame_type;
@@ -740,7 +740,7 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
len = *orig_len;
- time_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
+ time64_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t";
len += snprintf(fnic_dbgfs_prt->buffer + len,
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
index a8aa0578fcb0..e375d0c2eaaf 100644
--- a/drivers/scsi/fnic/fnic_trace.h
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -72,7 +72,7 @@ struct fnic_trace_data {
typedef struct fnic_trace_data fnic_trace_data_t;
struct fc_trace_hdr {
- struct timespec time_stamp;
+ struct timespec64 time_stamp;
u32 host_no;
u8 frame_type;
u8 frame_len;
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 9795d6f3e197..ba69d6112fa1 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -499,10 +499,7 @@ void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
if (err)
- printk(KERN_ERR
- "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
- err);
+ pr_err("Can't add addr [%pM], %d\n", addr, err);
}
void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
@@ -517,10 +514,7 @@ void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
if (err)
- printk(KERN_ERR
- "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
- err);
+ pr_err("Can't del addr [%pM], %d\n", addr, err);
}
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index cbf010324c18..de5147a8c959 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -64,9 +64,9 @@ static int card[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
module_param_array(card, int, NULL, 0);
MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)");
+MODULE_ALIAS("g_NCR5380_mmio");
MODULE_LICENSE("GPL");
-#ifndef SCSI_G_NCR5380_MEM
/*
* Configure I/O address of 53C400A or DTC436 by writing magic numbers
* to ports 0x779 and 0x379.
@@ -88,40 +88,35 @@ static void magic_configure(int idx, u8 irq, u8 magic[])
cfg = 0x80 | idx | (irq << 4);
outb(cfg, 0x379);
}
-#endif
+
+static unsigned int ncr_53c400a_ports[] = {
+ 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
+};
+static unsigned int dtc_3181e_ports[] = {
+ 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0
+};
+static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */
+ 0x59, 0xb9, 0xc5, 0xae, 0xa6
+};
+static u8 hp_c2502_magic[] = { /* HP C2502 */
+ 0x0f, 0x22, 0xf0, 0x20, 0x80
+};
static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
struct device *pdev, int base, int irq, int board)
{
- unsigned int *ports;
+ bool is_pmio = base <= 0xffff;
+ int ret;
+ int flags = 0;
+ unsigned int *ports = NULL;
u8 *magic = NULL;
-#ifndef SCSI_G_NCR5380_MEM
int i;
int port_idx = -1;
unsigned long region_size;
-#endif
- static unsigned int ncr_53c400a_ports[] = {
- 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
- };
- static unsigned int dtc_3181e_ports[] = {
- 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0
- };
- static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */
- 0x59, 0xb9, 0xc5, 0xae, 0xa6
- };
- static u8 hp_c2502_magic[] = { /* HP C2502 */
- 0x0f, 0x22, 0xf0, 0x20, 0x80
- };
- int flags, ret;
struct Scsi_Host *instance;
struct NCR5380_hostdata *hostdata;
-#ifdef SCSI_G_NCR5380_MEM
- void __iomem *iomem;
- resource_size_t iomem_size;
-#endif
+ u8 __iomem *iomem;
- ports = NULL;
- flags = 0;
switch (board) {
case BOARD_NCR5380:
flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP;
@@ -140,8 +135,7 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
break;
}
-#ifndef SCSI_G_NCR5380_MEM
- if (ports && magic) {
+ if (is_pmio && ports && magic) {
/* wakeup sequence for the NCR53C400A and DTC3181E */
/* Disable the adapter and look for a free io port */
@@ -170,84 +164,89 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
if (ports[i]) {
/* At this point we have our region reserved */
magic_configure(i, 0, magic); /* no IRQ yet */
- outb(0xc0, ports[i] + 9);
- if (inb(ports[i] + 9) != 0x80) {
+ base = ports[i];
+ outb(0xc0, base + 9);
+ if (inb(base + 9) != 0x80) {
ret = -ENODEV;
goto out_release;
}
- base = ports[i];
port_idx = i;
} else
return -EINVAL;
- }
- else
- {
+ } else if (is_pmio) {
/* NCR5380 - no configuration, just grab */
region_size = 8;
if (!base || !request_region(base, region_size, "ncr5380"))
return -EBUSY;
+ } else { /* MMIO */
+ region_size = NCR53C400_region_size;
+ if (!request_mem_region(base, region_size, "ncr5380"))
+ return -EBUSY;
}
-#else
- iomem_size = NCR53C400_region_size;
- if (!request_mem_region(base, iomem_size, "ncr5380"))
- return -EBUSY;
- iomem = ioremap(base, iomem_size);
+
+ if (is_pmio)
+ iomem = ioport_map(base, region_size);
+ else
+ iomem = ioremap(base, region_size);
+
if (!iomem) {
- release_mem_region(base, iomem_size);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_release;
}
-#endif
+
instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata));
if (instance == NULL) {
ret = -ENOMEM;
- goto out_release;
+ goto out_unmap;
}
hostdata = shost_priv(instance);
-#ifndef SCSI_G_NCR5380_MEM
- instance->io_port = base;
- instance->n_io_port = region_size;
- hostdata->io_width = 1; /* 8-bit PDMA by default */
-
- /*
- * On NCR53C400 boards, NCR5380 registers are mapped 8 past
- * the base address.
- */
- switch (board) {
- case BOARD_NCR53C400:
- instance->io_port += 8;
- hostdata->c400_ctl_status = 0;
- hostdata->c400_blk_cnt = 1;
- hostdata->c400_host_buf = 4;
- break;
- case BOARD_DTC3181E:
- hostdata->io_width = 2; /* 16-bit PDMA */
- /* fall through */
- case BOARD_NCR53C400A:
- case BOARD_HP_C2502:
- hostdata->c400_ctl_status = 9;
- hostdata->c400_blk_cnt = 10;
- hostdata->c400_host_buf = 8;
- break;
- }
-#else
- instance->base = base;
- hostdata->iomem = iomem;
- hostdata->iomem_size = iomem_size;
- switch (board) {
- case BOARD_NCR53C400:
- hostdata->c400_ctl_status = 0x100;
- hostdata->c400_blk_cnt = 0x101;
- hostdata->c400_host_buf = 0x104;
- break;
- case BOARD_DTC3181E:
- case BOARD_NCR53C400A:
- case BOARD_HP_C2502:
- pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
- ret = -EINVAL;
- goto out_unregister;
+ hostdata->io = iomem;
+ hostdata->region_size = region_size;
+
+ if (is_pmio) {
+ hostdata->io_port = base;
+ hostdata->io_width = 1; /* 8-bit PDMA by default */
+ hostdata->offset = 0;
+
+ /*
+ * On NCR53C400 boards, NCR5380 registers are mapped 8 past
+ * the base address.
+ */
+ switch (board) {
+ case BOARD_NCR53C400:
+ hostdata->io_port += 8;
+ hostdata->c400_ctl_status = 0;
+ hostdata->c400_blk_cnt = 1;
+ hostdata->c400_host_buf = 4;
+ break;
+ case BOARD_DTC3181E:
+ hostdata->io_width = 2; /* 16-bit PDMA */
+ /* fall through */
+ case BOARD_NCR53C400A:
+ case BOARD_HP_C2502:
+ hostdata->c400_ctl_status = 9;
+ hostdata->c400_blk_cnt = 10;
+ hostdata->c400_host_buf = 8;
+ break;
+ }
+ } else {
+ hostdata->base = base;
+ hostdata->offset = NCR53C400_mem_base;
+ switch (board) {
+ case BOARD_NCR53C400:
+ hostdata->c400_ctl_status = 0x100;
+ hostdata->c400_blk_cnt = 0x101;
+ hostdata->c400_host_buf = 0x104;
+ break;
+ case BOARD_DTC3181E:
+ case BOARD_NCR53C400A:
+ case BOARD_HP_C2502:
+ pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
+ ret = -EINVAL;
+ goto out_unregister;
+ }
}
-#endif
ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP);
if (ret)
@@ -273,11 +272,9 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
instance->irq = NO_IRQ;
if (instance->irq != NO_IRQ) {
-#ifndef SCSI_G_NCR5380_MEM
/* set IRQ for HP C2502 */
if (board == BOARD_HP_C2502)
magic_configure(port_idx, instance->irq, magic);
-#endif
if (request_irq(instance->irq, generic_NCR5380_intr,
0, "NCR5380", instance)) {
printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
@@ -303,38 +300,39 @@ out_free_irq:
NCR5380_exit(instance);
out_unregister:
scsi_host_put(instance);
-out_release:
-#ifndef SCSI_G_NCR5380_MEM
- release_region(base, region_size);
-#else
+out_unmap:
iounmap(iomem);
- release_mem_region(base, iomem_size);
-#endif
+out_release:
+ if (is_pmio)
+ release_region(base, region_size);
+ else
+ release_mem_region(base, region_size);
return ret;
}
static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ void __iomem *iomem = hostdata->io;
+ unsigned long io_port = hostdata->io_port;
+ unsigned long base = hostdata->base;
+ unsigned long region_size = hostdata->region_size;
+
scsi_remove_host(instance);
if (instance->irq != NO_IRQ)
free_irq(instance->irq, instance);
NCR5380_exit(instance);
-#ifndef SCSI_G_NCR5380_MEM
- release_region(instance->io_port, instance->n_io_port);
-#else
- {
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
- iounmap(hostdata->iomem);
- release_mem_region(instance->base, hostdata->iomem_size);
- }
-#endif
scsi_host_put(instance);
+ iounmap(iomem);
+ if (io_port)
+ release_region(io_port, region_size);
+ else
+ release_mem_region(base, region_size);
}
/**
* generic_NCR5380_pread - pseudo DMA read
- * @instance: adapter to read from
+ * @hostdata: scsi host private data
* @dst: buffer to read into
* @len: buffer length
*
@@ -342,10 +340,9 @@ static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
* controller
*/
-static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
+static inline int generic_NCR5380_pread(struct NCR5380_hostdata *hostdata,
unsigned char *dst, int len)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
int blocks = len / 128;
int start = 0;
@@ -361,18 +358,16 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; /* FIXME - no timeout */
-#ifndef SCSI_G_NCR5380_MEM
- if (hostdata->io_width == 2)
- insw(instance->io_port + hostdata->c400_host_buf,
+ if (hostdata->io_port && hostdata->io_width == 2)
+ insw(hostdata->io_port + hostdata->c400_host_buf,
dst + start, 64);
- else
- insb(instance->io_port + hostdata->c400_host_buf,
+ else if (hostdata->io_port)
+ insb(hostdata->io_port + hostdata->c400_host_buf,
dst + start, 128);
-#else
- /* implies SCSI_G_NCR5380_MEM */
- memcpy_fromio(dst + start,
- hostdata->iomem + NCR53C400_host_buffer, 128);
-#endif
+ else
+ memcpy_fromio(dst + start,
+ hostdata->io + NCR53C400_host_buffer, 128);
+
start += 128;
blocks--;
}
@@ -381,18 +376,16 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; /* FIXME - no timeout */
-#ifndef SCSI_G_NCR5380_MEM
- if (hostdata->io_width == 2)
- insw(instance->io_port + hostdata->c400_host_buf,
+ if (hostdata->io_port && hostdata->io_width == 2)
+ insw(hostdata->io_port + hostdata->c400_host_buf,
dst + start, 64);
- else
- insb(instance->io_port + hostdata->c400_host_buf,
+ else if (hostdata->io_port)
+ insb(hostdata->io_port + hostdata->c400_host_buf,
dst + start, 128);
-#else
- /* implies SCSI_G_NCR5380_MEM */
- memcpy_fromio(dst + start,
- hostdata->iomem + NCR53C400_host_buffer, 128);
-#endif
+ else
+ memcpy_fromio(dst + start,
+ hostdata->io + NCR53C400_host_buffer, 128);
+
start += 128;
blocks--;
}
@@ -412,7 +405,7 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
/**
* generic_NCR5380_pwrite - pseudo DMA write
- * @instance: adapter to read from
+ * @hostdata: scsi host private data
* @dst: buffer to read into
* @len: buffer length
*
@@ -420,10 +413,9 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
* controller
*/
-static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
+static inline int generic_NCR5380_pwrite(struct NCR5380_hostdata *hostdata,
unsigned char *src, int len)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
int blocks = len / 128;
int start = 0;
@@ -439,18 +431,17 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
break;
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; // FIXME - timeout
-#ifndef SCSI_G_NCR5380_MEM
- if (hostdata->io_width == 2)
- outsw(instance->io_port + hostdata->c400_host_buf,
+
+ if (hostdata->io_port && hostdata->io_width == 2)
+ outsw(hostdata->io_port + hostdata->c400_host_buf,
src + start, 64);
- else
- outsb(instance->io_port + hostdata->c400_host_buf,
+ else if (hostdata->io_port)
+ outsb(hostdata->io_port + hostdata->c400_host_buf,
src + start, 128);
-#else
- /* implies SCSI_G_NCR5380_MEM */
- memcpy_toio(hostdata->iomem + NCR53C400_host_buffer,
- src + start, 128);
-#endif
+ else
+ memcpy_toio(hostdata->io + NCR53C400_host_buffer,
+ src + start, 128);
+
start += 128;
blocks--;
}
@@ -458,18 +449,16 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; // FIXME - no timeout
-#ifndef SCSI_G_NCR5380_MEM
- if (hostdata->io_width == 2)
- outsw(instance->io_port + hostdata->c400_host_buf,
+ if (hostdata->io_port && hostdata->io_width == 2)
+ outsw(hostdata->io_port + hostdata->c400_host_buf,
src + start, 64);
- else
- outsb(instance->io_port + hostdata->c400_host_buf,
+ else if (hostdata->io_port)
+ outsb(hostdata->io_port + hostdata->c400_host_buf,
src + start, 128);
-#else
- /* implies SCSI_G_NCR5380_MEM */
- memcpy_toio(hostdata->iomem + NCR53C400_host_buffer,
- src + start, 128);
-#endif
+ else
+ memcpy_toio(hostdata->io + NCR53C400_host_buffer,
+ src + start, 128);
+
start += 128;
blocks--;
}
@@ -489,10 +478,9 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
return 0;
}
-static int generic_NCR5380_dma_xfer_len(struct Scsi_Host *instance,
+static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
int transfersize = cmd->transfersize;
if (hostdata->flags & FLAG_NO_PSEUDO_DMA)
@@ -566,7 +554,7 @@ static struct isa_driver generic_NCR5380_isa_driver = {
},
};
-#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+#ifdef CONFIG_PNP
static struct pnp_device_id generic_NCR5380_pnp_ids[] = {
{ .id = "DTC436e", .driver_data = BOARD_DTC3181E },
{ .id = "" }
@@ -600,7 +588,7 @@ static struct pnp_driver generic_NCR5380_pnp_driver = {
.probe = generic_NCR5380_pnp_probe,
.remove = generic_NCR5380_pnp_remove,
};
-#endif /* !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) */
+#endif /* defined(CONFIG_PNP) */
static int pnp_registered, isa_registered;
@@ -624,7 +612,7 @@ static int __init generic_NCR5380_init(void)
card[0] = BOARD_HP_C2502;
}
-#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+#ifdef CONFIG_PNP
if (!pnp_register_driver(&generic_NCR5380_pnp_driver))
pnp_registered = 1;
#endif
@@ -637,7 +625,7 @@ static int __init generic_NCR5380_init(void)
static void __exit generic_NCR5380_exit(void)
{
-#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+#ifdef CONFIG_PNP
if (pnp_registered)
pnp_unregister_driver(&generic_NCR5380_pnp_driver);
#endif
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index b175b9234458..3ce5b65ccb00 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -14,49 +14,28 @@
#ifndef GENERIC_NCR5380_H
#define GENERIC_NCR5380_H
-#ifndef SCSI_G_NCR5380_MEM
#define DRV_MODULE_NAME "g_NCR5380"
#define NCR5380_read(reg) \
- inb(instance->io_port + (reg))
+ ioread8(hostdata->io + hostdata->offset + (reg))
#define NCR5380_write(reg, value) \
- outb(value, instance->io_port + (reg))
+ iowrite8(value, hostdata->io + hostdata->offset + (reg))
#define NCR5380_implementation_fields \
+ int offset; \
int c400_ctl_status; \
int c400_blk_cnt; \
int c400_host_buf; \
int io_width;
-#else
-/* therefore SCSI_G_NCR5380_MEM */
-#define DRV_MODULE_NAME "g_NCR5380_mmio"
-
#define NCR53C400_mem_base 0x3880
#define NCR53C400_host_buffer 0x3900
#define NCR53C400_region_size 0x3a00
-#define NCR5380_read(reg) \
- readb(((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \
- NCR53C400_mem_base + (reg))
-#define NCR5380_write(reg, value) \
- writeb(value, ((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \
- NCR53C400_mem_base + (reg))
-
-#define NCR5380_implementation_fields \
- void __iomem *iomem; \
- resource_size_t iomem_size; \
- int c400_ctl_status; \
- int c400_blk_cnt; \
- int c400_host_buf;
-
-#endif
-
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- generic_NCR5380_dma_xfer_len(instance, cmd)
+#define NCR5380_dma_xfer_len generic_NCR5380_dma_xfer_len
#define NCR5380_dma_recv_setup generic_NCR5380_pread
#define NCR5380_dma_send_setup generic_NCR5380_pwrite
-#define NCR5380_dma_residual(instance) (0)
+#define NCR5380_dma_residual NCR5380_dma_residual_none
#define NCR5380_intr generic_NCR5380_intr
#define NCR5380_queue_command generic_NCR5380_queue_command
@@ -73,4 +52,3 @@
#define BOARD_HP_C2502 4
#endif /* GENERIC_NCR5380_H */
-
diff --git a/drivers/scsi/g_NCR5380_mmio.c b/drivers/scsi/g_NCR5380_mmio.c
deleted file mode 100644
index 8cdde71ba0c8..000000000000
--- a/drivers/scsi/g_NCR5380_mmio.c
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * There is probably a nicer way to do this but this one makes
- * pretty obvious what is happening. We rebuild the same file with
- * different options for mmio versus pio.
- */
-
-#define SCSI_G_NCR5380_MEM
-
-#include "g_NCR5380.c"
-
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 72c98522bd26..c0cd505a9ef7 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -13,6 +13,7 @@
#define _HISI_SAS_H_
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/dmapool.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -110,7 +111,7 @@ struct hisi_sas_device {
struct domain_device *sas_device;
u64 attached_phy;
u64 device_id;
- u64 running_req;
+ atomic64_t running_req;
u8 dev_status;
};
@@ -149,7 +150,8 @@ struct hisi_sas_hw {
struct domain_device *device);
struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
- int (*get_free_slot)(struct hisi_hba *hisi_hba, int *q, int *s);
+ int (*get_free_slot)(struct hisi_hba *hisi_hba, u32 dev_id,
+ int *q, int *s);
void (*start_delivery)(struct hisi_hba *hisi_hba);
int (*prep_ssp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot, int is_tmf,
@@ -166,6 +168,9 @@ struct hisi_sas_hw {
void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no);
+ void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
+ struct sas_phy_linkrates *linkrates);
+ enum sas_linkrate (*phy_get_max_linkrate)(void);
void (*free_device)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
@@ -183,6 +188,7 @@ struct hisi_hba {
u32 ctrl_reset_reg;
u32 ctrl_reset_sts_reg;
u32 ctrl_clock_ena_reg;
+ u32 refclk_frequency_mhz;
u8 sas_addr[SAS_ADDR_SIZE];
int n_phy;
@@ -205,7 +211,6 @@ struct hisi_hba {
struct hisi_sas_port port[HISI_SAS_MAX_PHYS];
int queue_count;
- int queue;
struct hisi_sas_slot *slot_prep;
struct dma_pool *sge_page_pool;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 2f872f784e10..d50e9cfefd24 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -162,8 +162,8 @@ out:
hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
if (task->task_done)
task->task_done(task);
- if (sas_dev && sas_dev->running_req)
- sas_dev->running_req--;
+ if (sas_dev)
+ atomic64_dec(&sas_dev->running_req);
}
static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
@@ -232,8 +232,8 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
if (rc)
goto err_out;
- rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
- &dlvry_queue_slot);
+ rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
+ &dlvry_queue, &dlvry_queue_slot);
if (rc)
goto err_out_tag;
@@ -303,7 +303,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
hisi_hba->slot_prep = slot;
- sas_dev->running_req++;
+ atomic64_inc(&sas_dev->running_req);
++(*pass);
return 0;
@@ -369,9 +369,14 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
struct sas_phy *sphy = sas_phy->phy;
sphy->negotiated_linkrate = sas_phy->linkrate;
- sphy->minimum_linkrate = phy->minimum_linkrate;
sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
- sphy->maximum_linkrate = phy->maximum_linkrate;
+ sphy->maximum_linkrate_hw =
+ hisi_hba->hw->phy_get_max_linkrate();
+ if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
+ sphy->minimum_linkrate = phy->minimum_linkrate;
+
+ if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
+ sphy->maximum_linkrate = phy->maximum_linkrate;
}
if (phy->phy_type & PORT_TYPE_SAS) {
@@ -537,7 +542,7 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
struct hisi_sas_phy *phy = sas_phy->lldd_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- struct hisi_sas_port *port = &hisi_hba->port[sas_phy->id];
+ struct hisi_sas_port *port = &hisi_hba->port[phy->port_id];
unsigned long flags;
if (!sas_port)
@@ -645,6 +650,9 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
break;
case PHY_FUNC_SET_LINK_RATE:
+ hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
+ break;
+
case PHY_FUNC_RELEASE_SPINUP_HOLD:
default:
return -EOPNOTSUPP;
@@ -764,7 +772,8 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
task = NULL;
}
ex_err:
- WARN_ON(retry == TASK_RETRY);
+ if (retry == TASK_RETRY)
+ dev_warn(dev, "abort tmf: executing internal task failed!\n");
sas_free_task(task);
return res;
}
@@ -960,6 +969,9 @@ static int hisi_sas_query_task(struct sas_task *task)
case TMF_RESP_FUNC_FAILED:
case TMF_RESP_FUNC_COMPLETE:
break;
+ default:
+ rc = TMF_RESP_FUNC_FAILED;
+ break;
}
}
return rc;
@@ -987,8 +999,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
if (rc)
goto err_out;
- rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
- &dlvry_queue_slot);
+ rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
+ &dlvry_queue, &dlvry_queue_slot);
if (rc)
goto err_out_tag;
@@ -1023,7 +1035,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
hisi_hba->slot_prep = slot;
- sas_dev->running_req++;
+ atomic64_inc(&sas_dev->running_req);
+
/* send abort command to our chip */
hisi_hba->hw->start_delivery(hisi_hba);
@@ -1396,10 +1409,13 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
struct hisi_hba *hisi_hba;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
+ struct clk *refclk;
shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
- if (!shost)
- goto err_out;
+ if (!shost) {
+ dev_err(dev, "scsi host alloc failed\n");
+ return NULL;
+ }
hisi_hba = shost_priv(shost);
hisi_hba->hw = hw;
@@ -1432,6 +1448,12 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
goto err_out;
}
+ refclk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(refclk))
+ dev_info(dev, "no ref clk property\n");
+ else
+ hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
+
if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy))
goto err_out;
@@ -1457,6 +1479,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
return shost;
err_out:
+ kfree(shost);
dev_err(dev, "shost alloc failed\n");
return NULL;
}
@@ -1483,10 +1506,8 @@ int hisi_sas_probe(struct platform_device *pdev,
int rc, phy_nr, port_nr, i;
shost = hisi_sas_shost_alloc(pdev, hw);
- if (!shost) {
- rc = -ENOMEM;
- goto err_out_ha;
- }
+ if (!shost)
+ return -ENOMEM;
sha = SHOST_TO_SAS_HA(shost);
hisi_hba = shost_priv(shost);
@@ -1496,12 +1517,13 @@ int hisi_sas_probe(struct platform_device *pdev,
arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
- if (!arr_phy || !arr_port)
- return -ENOMEM;
+ if (!arr_phy || !arr_port) {
+ rc = -ENOMEM;
+ goto err_out_ha;
+ }
sha->sas_phy = arr_phy;
sha->sas_port = arr_port;
- sha->core.shost = shost;
sha->lldd_ha = hisi_hba;
shost->transportt = hisi_sas_stt;
@@ -1546,6 +1568,7 @@ int hisi_sas_probe(struct platform_device *pdev,
err_out_register_ha:
scsi_remove_host(shost);
err_out_ha:
+ hisi_sas_free(hisi_hba);
kfree(shost);
return rc;
}
@@ -1555,12 +1578,14 @@ int hisi_sas_remove(struct platform_device *pdev)
{
struct sas_ha_struct *sha = platform_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct Scsi_Host *shost = sha->core.shost;
scsi_remove_host(sha->core.shost);
sas_unregister_ha(sha);
sas_remove_host(sha->core.shost);
hisi_sas_free(hisi_hba);
+ kfree(shost);
return 0;
}
EXPORT_SYMBOL_GPL(hisi_sas_remove);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index c0ac49d8bc8d..8a1be0ba8a22 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -843,6 +843,49 @@ static void sl_notify_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
}
+static enum sas_linkrate phy_get_max_linkrate_v1_hw(void)
+{
+ return SAS_LINK_RATE_6_0_GBPS;
+}
+
+static void phy_set_linkrate_v1_hw(struct hisi_hba *hisi_hba, int phy_no,
+ struct sas_phy_linkrates *r)
+{
+ u32 prog_phy_link_rate =
+ hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ int i;
+ enum sas_linkrate min, max;
+ u32 rate_mask = 0;
+
+ if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+ max = sas_phy->phy->maximum_linkrate;
+ min = r->minimum_linkrate;
+ } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+ max = r->maximum_linkrate;
+ min = sas_phy->phy->minimum_linkrate;
+ } else
+ return;
+
+ sas_phy->phy->maximum_linkrate = max;
+ sas_phy->phy->minimum_linkrate = min;
+
+ min -= SAS_LINK_RATE_1_5_GBPS;
+ max -= SAS_LINK_RATE_1_5_GBPS;
+
+ for (i = 0; i <= max; i++)
+ rate_mask |= 1 << (i * 2);
+
+ prog_phy_link_rate &= ~0xff;
+ prog_phy_link_rate |= rate_mask;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
+ prog_phy_link_rate);
+
+ phy_hard_reset_v1_hw(hisi_hba, phy_no);
+}
+
static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
{
int i, bitmap = 0;
@@ -862,29 +905,23 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
* The callpath to this function and upto writing the write
* queue pointer should be safe from interruption.
*/
-static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, int *q, int *s)
+static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, u32 dev_id,
+ int *q, int *s)
{
struct device *dev = &hisi_hba->pdev->dev;
struct hisi_sas_dq *dq;
u32 r, w;
- int queue = hisi_hba->queue;
-
- while (1) {
- dq = &hisi_hba->dq[queue];
- w = dq->wr_point;
- r = hisi_sas_read32_relaxed(hisi_hba,
- DLVRY_Q_0_RD_PTR + (queue * 0x14));
- if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
- queue = (queue + 1) % hisi_hba->queue_count;
- if (queue == hisi_hba->queue) {
- dev_warn(dev, "could not find free slot\n");
- return -EAGAIN;
- }
- continue;
- }
- break;
+ int queue = dev_id % hisi_hba->queue_count;
+
+ dq = &hisi_hba->dq[queue];
+ w = dq->wr_point;
+ r = hisi_sas_read32_relaxed(hisi_hba,
+ DLVRY_Q_0_RD_PTR + (queue * 0x14));
+ if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
+ dev_warn(dev, "could not find free slot\n");
+ return -EAGAIN;
}
- hisi_hba->queue = (queue + 1) % hisi_hba->queue_count;
+
*q = queue;
*s = w;
return 0;
@@ -1372,8 +1409,8 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
}
out:
- if (sas_dev && sas_dev->running_req)
- sas_dev->running_req--;
+ if (sas_dev)
+ atomic64_dec(&sas_dev->running_req);
hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
@@ -1824,6 +1861,8 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
.phy_enable = enable_phy_v1_hw,
.phy_disable = disable_phy_v1_hw,
.phy_hard_reset = phy_hard_reset_v1_hw,
+ .phy_set_linkrate = phy_set_linkrate_v1_hw,
+ .phy_get_max_linkrate = phy_get_max_linkrate_v1_hw,
.get_wideport_bitmap = get_wideport_bitmap_v1_hw,
.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr),
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 9825a3f49f53..b934aec1eebb 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -55,10 +55,44 @@
#define HGC_DFX_CFG2 0xc0
#define HGC_IOMB_PROC1_STATUS 0x104
#define CFG_1US_TIMER_TRSH 0xcc
+#define HGC_LM_DFX_STATUS2 0x128
+#define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0
+#define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \
+ HGC_LM_DFX_STATUS2_IOSTLIST_OFF)
+#define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12
+#define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \
+ HGC_LM_DFX_STATUS2_ITCTLIST_OFF)
+#define HGC_CQE_ECC_ADDR 0x13c
+#define HGC_CQE_ECC_1B_ADDR_OFF 0
+#define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF)
+#define HGC_CQE_ECC_MB_ADDR_OFF 8
+#define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF)
+#define HGC_IOST_ECC_ADDR 0x140
+#define HGC_IOST_ECC_1B_ADDR_OFF 0
+#define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF)
+#define HGC_IOST_ECC_MB_ADDR_OFF 16
+#define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF)
+#define HGC_DQE_ECC_ADDR 0x144
+#define HGC_DQE_ECC_1B_ADDR_OFF 0
+#define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF)
+#define HGC_DQE_ECC_MB_ADDR_OFF 16
+#define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF)
#define HGC_INVLD_DQE_INFO 0x148
#define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9
#define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF)
#define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18
+#define HGC_ITCT_ECC_ADDR 0x150
+#define HGC_ITCT_ECC_1B_ADDR_OFF 0
+#define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \
+ HGC_ITCT_ECC_1B_ADDR_OFF)
+#define HGC_ITCT_ECC_MB_ADDR_OFF 16
+#define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \
+ HGC_ITCT_ECC_MB_ADDR_OFF)
+#define HGC_AXI_FIFO_ERR_INFO 0x154
+#define AXI_ERR_INFO_OFF 0
+#define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF)
+#define FIFO_ERR_INFO_OFF 8
+#define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF)
#define INT_COAL_EN 0x19c
#define OQ_INT_COAL_TIME 0x1a0
#define OQ_INT_COAL_CNT 0x1a4
@@ -73,13 +107,41 @@
#define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
#define ENT_INT_SRC2 0x1bc
#define ENT_INT_SRC3 0x1c0
+#define ENT_INT_SRC3_WP_DEPTH_OFF 8
+#define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9
+#define ENT_INT_SRC3_RP_DEPTH_OFF 10
+#define ENT_INT_SRC3_AXI_OFF 11
+#define ENT_INT_SRC3_FIFO_OFF 12
+#define ENT_INT_SRC3_LM_OFF 14
#define ENT_INT_SRC3_ITC_INT_OFF 15
#define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
+#define ENT_INT_SRC3_ABT_OFF 16
#define ENT_INT_SRC_MSK1 0x1c4
#define ENT_INT_SRC_MSK2 0x1c8
#define ENT_INT_SRC_MSK3 0x1cc
#define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
+#define SAS_ECC_INTR 0x1e8
+#define SAS_ECC_INTR_DQE_ECC_1B_OFF 0
+#define SAS_ECC_INTR_DQE_ECC_MB_OFF 1
+#define SAS_ECC_INTR_IOST_ECC_1B_OFF 2
+#define SAS_ECC_INTR_IOST_ECC_MB_OFF 3
+#define SAS_ECC_INTR_ITCT_ECC_MB_OFF 4
+#define SAS_ECC_INTR_ITCT_ECC_1B_OFF 5
+#define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 6
+#define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 7
+#define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 8
+#define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 9
+#define SAS_ECC_INTR_CQE_ECC_1B_OFF 10
+#define SAS_ECC_INTR_CQE_ECC_MB_OFF 11
+#define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 12
+#define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 13
+#define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 14
+#define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 15
+#define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 16
+#define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 17
+#define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 18
+#define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 19
#define SAS_ECC_INTR_MSK 0x1ec
#define HGC_ERR_STAT_EN 0x238
#define DLVRY_Q_0_BASE_ADDR_LO 0x260
@@ -94,7 +156,20 @@
#define COMPL_Q_0_DEPTH 0x4e8
#define COMPL_Q_0_WR_PTR 0x4ec
#define COMPL_Q_0_RD_PTR 0x4f0
-
+#define HGC_RXM_DFX_STATUS14 0xae8
+#define HGC_RXM_DFX_STATUS14_MEM0_OFF 0
+#define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS14_MEM0_OFF)
+#define HGC_RXM_DFX_STATUS14_MEM1_OFF 9
+#define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS14_MEM1_OFF)
+#define HGC_RXM_DFX_STATUS14_MEM2_OFF 18
+#define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS14_MEM2_OFF)
+#define HGC_RXM_DFX_STATUS15 0xaec
+#define HGC_RXM_DFX_STATUS15_MEM3_OFF 0
+#define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS15_MEM3_OFF)
/* phy registers need init */
#define PORT_BASE (0x2000)
@@ -119,6 +194,9 @@
#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
#define SL_CONTROL_CTA_OFF 17
#define SL_CONTROL_CTA_MSK (0x1 << SL_CONTROL_CTA_OFF)
+#define RX_PRIMS_STATUS (PORT_BASE + 0x98)
+#define RX_BCAST_CHG_OFF 1
+#define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF)
#define TX_ID_DWORD0 (PORT_BASE + 0x9c)
#define TX_ID_DWORD1 (PORT_BASE + 0xa0)
#define TX_ID_DWORD2 (PORT_BASE + 0xa4)
@@ -267,6 +345,8 @@
#define ITCT_HDR_RTOLT_OFF 48
#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
+#define HISI_SAS_FATAL_INT_NR 2
+
struct hisi_sas_complete_v2_hdr {
__le32 dw0;
__le32 dw1;
@@ -659,8 +739,6 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
qw0 &= ~(1 << ITCT_HDR_VALID_OFF);
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
ENT_INT_SRC3_ITC_INT_MSK);
- hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED;
- hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL;
/* clear the itct */
hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
@@ -808,7 +886,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe);
- hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfffff3c0);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
for (i = 0; i < hisi_hba->queue_count; i++)
hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
@@ -824,7 +902,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
@@ -836,7 +914,9 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0);
- hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694);
+ if (hisi_hba->refclk_frequency_mhz == 66)
+ hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694);
+ /* else, do nothing -> leave it how you found it */
}
for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -980,6 +1060,49 @@ static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
}
+static enum sas_linkrate phy_get_max_linkrate_v2_hw(void)
+{
+ return SAS_LINK_RATE_12_0_GBPS;
+}
+
+static void phy_set_linkrate_v2_hw(struct hisi_hba *hisi_hba, int phy_no,
+ struct sas_phy_linkrates *r)
+{
+ u32 prog_phy_link_rate =
+ hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ int i;
+ enum sas_linkrate min, max;
+ u32 rate_mask = 0;
+
+ if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+ max = sas_phy->phy->maximum_linkrate;
+ min = r->minimum_linkrate;
+ } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+ max = r->maximum_linkrate;
+ min = sas_phy->phy->minimum_linkrate;
+ } else
+ return;
+
+ sas_phy->phy->maximum_linkrate = max;
+ sas_phy->phy->minimum_linkrate = min;
+
+ min -= SAS_LINK_RATE_1_5_GBPS;
+ max -= SAS_LINK_RATE_1_5_GBPS;
+
+ for (i = 0; i <= max; i++)
+ rate_mask |= 1 << (i * 2);
+
+ prog_phy_link_rate &= ~0xff;
+ prog_phy_link_rate |= rate_mask;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
+ prog_phy_link_rate);
+
+ phy_hard_reset_v2_hw(hisi_hba, phy_no);
+}
+
static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
{
int i, bitmap = 0;
@@ -1010,29 +1133,24 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
* The callpath to this function and upto writing the write
* queue pointer should be safe from interruption.
*/
-static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, int *q, int *s)
+static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, u32 dev_id,
+ int *q, int *s)
{
struct device *dev = &hisi_hba->pdev->dev;
struct hisi_sas_dq *dq;
u32 r, w;
- int queue = hisi_hba->queue;
-
- while (1) {
- dq = &hisi_hba->dq[queue];
- w = dq->wr_point;
- r = hisi_sas_read32_relaxed(hisi_hba,
- DLVRY_Q_0_RD_PTR + (queue * 0x14));
- if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
- queue = (queue + 1) % hisi_hba->queue_count;
- if (queue == hisi_hba->queue) {
- dev_warn(dev, "could not find free slot\n");
- return -EAGAIN;
- }
- continue;
- }
- break;
+ int queue = dev_id % hisi_hba->queue_count;
+
+ dq = &hisi_hba->dq[queue];
+ w = dq->wr_point;
+ r = hisi_sas_read32_relaxed(hisi_hba,
+ DLVRY_Q_0_RD_PTR + (queue * 0x14));
+ if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
+ dev_warn(dev, "full queue=%d r=%d w=%d\n\n",
+ queue, r, w);
+ return -EAGAIN;
}
- hisi_hba->queue = (queue + 1) % hisi_hba->queue_count;
+
*q = queue;
*s = w;
return 0;
@@ -1653,8 +1771,8 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
}
out:
- if (sas_dev && sas_dev->running_req)
- sas_dev->running_req--;
+ if (sas_dev)
+ atomic64_dec(&sas_dev->running_req);
hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
@@ -1675,6 +1793,7 @@ static u8 get_ata_protocol(u8 cmd, int direction)
case ATA_CMD_NCQ_NON_DATA:
return SATA_PROTOCOL_FPDMA;
+ case ATA_CMD_DOWNLOAD_MICRO:
case ATA_CMD_ID_ATA:
case ATA_CMD_PMP_READ:
case ATA_CMD_READ_LOG_EXT:
@@ -1686,18 +1805,27 @@ static u8 get_ata_protocol(u8 cmd, int direction)
case ATA_CMD_PIO_WRITE_EXT:
return SATA_PROTOCOL_PIO;
+ case ATA_CMD_DSM:
+ case ATA_CMD_DOWNLOAD_MICRO_DMA:
+ case ATA_CMD_PMP_READ_DMA:
+ case ATA_CMD_PMP_WRITE_DMA:
case ATA_CMD_READ:
case ATA_CMD_READ_EXT:
case ATA_CMD_READ_LOG_DMA_EXT:
+ case ATA_CMD_READ_STREAM_DMA_EXT:
+ case ATA_CMD_TRUSTED_RCV_DMA:
+ case ATA_CMD_TRUSTED_SND_DMA:
case ATA_CMD_WRITE:
case ATA_CMD_WRITE_EXT:
+ case ATA_CMD_WRITE_FUA_EXT:
case ATA_CMD_WRITE_QUEUED:
case ATA_CMD_WRITE_LOG_DMA_EXT:
+ case ATA_CMD_WRITE_STREAM_DMA_EXT:
return SATA_PROTOCOL_DMA;
- case ATA_CMD_DOWNLOAD_MICRO:
- case ATA_CMD_DEV_RESET:
case ATA_CMD_CHK_POWER:
+ case ATA_CMD_DEV_RESET:
+ case ATA_CMD_EDD:
case ATA_CMD_FLUSH:
case ATA_CMD_FLUSH_EXT:
case ATA_CMD_VERIFY:
@@ -1970,9 +2098,12 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ u32 bcast_status;
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
+ if (bcast_status & RX_BCAST_CHG_MSK)
+ sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -2005,8 +2136,9 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
if (irq_value1) {
if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
CHL_INT1_DMAC_TX_ECC_ERR_MSK))
- panic("%s: DMAC RX/TX ecc bad error! (0x%x)",
- dev_name(dev), irq_value1);
+ panic("%s: DMAC RX/TX ecc bad error!\
+ (0x%x)",
+ dev_name(dev), irq_value1);
hisi_sas_phy_write32(hisi_hba, phy_no,
CHL_INT1, irq_value1);
@@ -2037,6 +2169,318 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
return IRQ_HANDLED;
}
+static void
+one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value)
+{
+ struct device *dev = &hisi_hba->pdev->dev;
+ u32 reg_val;
+
+ if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
+ dev_warn(dev, "hgc_dqe_acc1b_intr found: \
+ Ram address is 0x%08X\n",
+ (reg_val & HGC_DQE_ECC_1B_ADDR_MSK) >>
+ HGC_DQE_ECC_1B_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
+ dev_warn(dev, "hgc_iost_acc1b_intr found: \
+ Ram address is 0x%08X\n",
+ (reg_val & HGC_IOST_ECC_1B_ADDR_MSK) >>
+ HGC_IOST_ECC_1B_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
+ dev_warn(dev, "hgc_itct_acc1b_intr found: \
+ Ram address is 0x%08X\n",
+ (reg_val & HGC_ITCT_ECC_1B_ADDR_MSK) >>
+ HGC_ITCT_ECC_1B_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+ dev_warn(dev, "hgc_iostl_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
+ HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+ dev_warn(dev, "hgc_itctl_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
+ HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
+ dev_warn(dev, "hgc_cqe_acc1b_intr found: \
+ Ram address is 0x%08X\n",
+ (reg_val & HGC_CQE_ECC_1B_ADDR_MSK) >>
+ HGC_CQE_ECC_1B_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ dev_warn(dev, "rxm_mem0_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM0_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ dev_warn(dev, "rxm_mem1_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM1_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ dev_warn(dev, "rxm_mem2_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM2_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
+ dev_warn(dev, "rxm_mem3_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
+ HGC_RXM_DFX_STATUS15_MEM3_OFF);
+ }
+
+}
+
+static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
+ u32 irq_value)
+{
+ u32 reg_val;
+ struct device *dev = &hisi_hba->pdev->dev;
+
+ if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
+ panic("%s: hgc_dqe_accbad_intr (0x%x) found: \
+ Ram address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_DQE_ECC_MB_ADDR_MSK) >>
+ HGC_DQE_ECC_MB_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
+ panic("%s: hgc_iost_accbad_intr (0x%x) found: \
+ Ram address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_IOST_ECC_MB_ADDR_MSK) >>
+ HGC_IOST_ECC_MB_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
+ panic("%s: hgc_itct_accbad_intr (0x%x) found: \
+ Ram address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_ITCT_ECC_MB_ADDR_MSK) >>
+ HGC_ITCT_ECC_MB_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+ panic("%s: hgc_iostl_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
+ HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+ panic("%s: hgc_itctl_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
+ HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
+ panic("%s: hgc_cqe_accbad_intr (0x%x) found: \
+ Ram address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_CQE_ECC_MB_ADDR_MSK) >>
+ HGC_CQE_ECC_MB_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ panic("%s: rxm_mem0_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM0_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ panic("%s: rxm_mem1_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM1_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ panic("%s: rxm_mem2_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM2_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
+ panic("%s: rxm_mem3_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
+ HGC_RXM_DFX_STATUS15_MEM3_OFF);
+ }
+
+}
+
+static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
+ u32 irq_value, irq_msk;
+
+ irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff);
+
+ irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR);
+ if (irq_value) {
+ one_bit_ecc_error_process_v2_hw(hisi_hba, irq_value);
+ multi_bit_ecc_error_process_v2_hw(hisi_hba, irq_value);
+ }
+
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk);
+
+ return IRQ_HANDLED;
+}
+
+#define AXI_ERR_NR 8
+static const char axi_err_info[AXI_ERR_NR][32] = {
+ "IOST_AXI_W_ERR",
+ "IOST_AXI_R_ERR",
+ "ITCT_AXI_W_ERR",
+ "ITCT_AXI_R_ERR",
+ "SATA_AXI_W_ERR",
+ "SATA_AXI_R_ERR",
+ "DQE_AXI_R_ERR",
+ "CQE_AXI_W_ERR"
+};
+
+#define FIFO_ERR_NR 5
+static const char fifo_err_info[FIFO_ERR_NR][32] = {
+ "CQE_WINFO_FIFO",
+ "CQE_MSG_FIFIO",
+ "GETDQE_FIFO",
+ "CMDP_FIFO",
+ "AWTCTRL_FIFO"
+};
+
+static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
+ u32 irq_value, irq_msk, err_value;
+ struct device *dev = &hisi_hba->pdev->dev;
+
+ irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe);
+
+ irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ if (irq_value) {
+ if (irq_value & BIT(ENT_INT_SRC3_WP_DEPTH_OFF)) {
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_WP_DEPTH_OFF);
+ panic("%s: write pointer and depth error (0x%x) \
+ found!\n",
+ dev_name(dev), irq_value);
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF)) {
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 <<
+ ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF);
+ panic("%s: iptt no match slot error (0x%x) found!\n",
+ dev_name(dev), irq_value);
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF))
+ panic("%s: read pointer and depth error (0x%x) \
+ found!\n",
+ dev_name(dev), irq_value);
+
+ if (irq_value & BIT(ENT_INT_SRC3_AXI_OFF)) {
+ int i;
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_AXI_OFF);
+ err_value = hisi_sas_read32(hisi_hba,
+ HGC_AXI_FIFO_ERR_INFO);
+
+ for (i = 0; i < AXI_ERR_NR; i++) {
+ if (err_value & BIT(i))
+ panic("%s: %s (0x%x) found!\n",
+ dev_name(dev),
+ axi_err_info[i], irq_value);
+ }
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_FIFO_OFF)) {
+ int i;
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_FIFO_OFF);
+ err_value = hisi_sas_read32(hisi_hba,
+ HGC_AXI_FIFO_ERR_INFO);
+
+ for (i = 0; i < FIFO_ERR_NR; i++) {
+ if (err_value & BIT(AXI_ERR_NR + i))
+ panic("%s: %s (0x%x) found!\n",
+ dev_name(dev),
+ fifo_err_info[i], irq_value);
+ }
+
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_LM_OFF)) {
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_LM_OFF);
+ panic("%s: LM add/fetch list error (0x%x) found!\n",
+ dev_name(dev), irq_value);
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_ABT_OFF)) {
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_ABT_OFF);
+ panic("%s: SAS_HGC_ABT fetch LM list error (0x%x) found!\n",
+ dev_name(dev), irq_value);
+ }
+ }
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
{
struct hisi_sas_cq *cq = p;
@@ -2136,6 +2580,16 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
goto end;
}
+ /* check ERR bit of Status Register */
+ if (fis->status & ATA_ERR) {
+ dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no,
+ fis->status);
+ disable_phy_v2_hw(hisi_hba, phy_no);
+ enable_phy_v2_hw(hisi_hba, phy_no);
+ res = IRQ_NONE;
+ goto end;
+ }
+
if (unlikely(phy_no == 8)) {
u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
@@ -2190,6 +2644,11 @@ static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = {
int_chnl_int_v2_hw,
};
+static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
+ fatal_ecc_int_v2_hw,
+ fatal_axi_int_v2_hw
+};
+
/**
* There is a limitation in the hip06 chipset that we need
* to map in all mbigen interrupts, even if they are not used.
@@ -2245,6 +2704,26 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
}
}
+ for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++) {
+ int idx = i;
+
+ irq = irq_map[idx + 81];
+ if (!irq) {
+ dev_err(dev, "irq init: fail map fatal interrupt %d\n",
+ idx);
+ return -ENOENT;
+ }
+
+ rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
+ DRV_NAME " fatal", hisi_hba);
+ if (rc) {
+ dev_err(dev,
+ "irq init: could not request fatal interrupt %d, rc=%d\n",
+ irq, rc);
+ return -ENOENT;
+ }
+ }
+
for (i = 0; i < hisi_hba->queue_count; i++) {
int idx = i + 96; /* First cq interrupt is irq96 */
@@ -2303,12 +2782,26 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.phy_enable = enable_phy_v2_hw,
.phy_disable = disable_phy_v2_hw,
.phy_hard_reset = phy_hard_reset_v2_hw,
+ .phy_set_linkrate = phy_set_linkrate_v2_hw,
+ .phy_get_max_linkrate = phy_get_max_linkrate_v2_hw,
.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
};
static int hisi_sas_v2_probe(struct platform_device *pdev)
{
+ /*
+ * Check if we should defer the probe before we probe the
+ * upper layer, as it's hard to defer later on.
+ */
+ int ret = platform_get_irq(pdev, 0);
+
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "cannot obtain irq\n");
+ return ret;
+ }
+
return hisi_sas_probe(pdev, &hisi_sas_v2_hw);
}
@@ -2319,6 +2812,7 @@ static int hisi_sas_v2_remove(struct platform_device *pdev)
static const struct of_device_id sas_v2_of_match[] = {
{ .compatible = "hisilicon,hip06-sas-v2",},
+ { .compatible = "hisilicon,hip07-sas-v2",},
{},
};
MODULE_DEVICE_TABLE(of, sas_v2_of_match);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index a1d6ab76a514..691a09316952 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -276,6 +276,9 @@ static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
unsigned long *memory_bar);
static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
+static int wait_for_device_to_become_ready(struct ctlr_info *h,
+ unsigned char lunaddr[],
+ int reply_queue);
static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
int wait_for_ready);
static inline void finish_cmd(struct CommandList *c);
@@ -700,9 +703,7 @@ static ssize_t lunid_show(struct device *dev,
}
memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
spin_unlock_irqrestore(&h->lock, flags);
- return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- lunid[0], lunid[1], lunid[2], lunid[3],
- lunid[4], lunid[5], lunid[6], lunid[7]);
+ return snprintf(buf, 20, "0x%8phN\n", lunid);
}
static ssize_t unique_id_show(struct device *dev,
@@ -864,6 +865,16 @@ static ssize_t path_info_show(struct device *dev,
return output_len;
}
+static ssize_t host_show_ctlr_num(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ h = shost_to_hba(shost);
+ return snprintf(buf, 20, "%d\n", h->ctlr);
+}
+
static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
@@ -887,6 +898,8 @@ static DEVICE_ATTR(resettable, S_IRUGO,
host_show_resettable, NULL);
static DEVICE_ATTR(lockup_detected, S_IRUGO,
host_show_lockup_detected, NULL);
+static DEVICE_ATTR(ctlr_num, S_IRUGO,
+ host_show_ctlr_num, NULL);
static struct device_attribute *hpsa_sdev_attrs[] = {
&dev_attr_raid_level,
@@ -907,6 +920,7 @@ static struct device_attribute *hpsa_shost_attrs[] = {
&dev_attr_hp_ssd_smart_path_status,
&dev_attr_raid_offload_debug,
&dev_attr_lockup_detected,
+ &dev_attr_ctlr_num,
NULL,
};
@@ -1001,7 +1015,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
{
if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
- if (unlikely(!h->msix_vector))
+ if (unlikely(!h->msix_vectors))
return;
if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
c->Header.ReplyQueue =
@@ -2541,7 +2555,7 @@ static void complete_scsi_command(struct CommandList *cp)
if ((unlikely(hpsa_is_pending_event(cp)))) {
if (cp->reset_pending)
- return hpsa_cmd_resolve_and_free(h, cp);
+ return hpsa_cmd_free_and_done(h, cp, cmd);
if (cp->abort_pending)
return hpsa_cmd_abort_and_free(h, cp, cmd);
}
@@ -2824,14 +2838,8 @@ static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
const u8 *cdb = c->Request.CDB;
const u8 *lun = c->Header.LUN.LunAddrBytes;
- dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
- " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- txt, lun[0], lun[1], lun[2], lun[3],
- lun[4], lun[5], lun[6], lun[7],
- cdb[0], cdb[1], cdb[2], cdb[3],
- cdb[4], cdb[5], cdb[6], cdb[7],
- cdb[8], cdb[9], cdb[10], cdb[11],
- cdb[12], cdb[13], cdb[14], cdb[15]);
+ dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
+ txt, lun, cdb);
}
static void hpsa_scsi_interpret_error(struct ctlr_info *h,
@@ -3080,6 +3088,8 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
if (unlikely(rc))
atomic_set(&dev->reset_cmds_out, 0);
+ else
+ wait_for_device_to_become_ready(h, scsi3addr, 0);
mutex_unlock(&h->reset_mutex);
return rc;
@@ -3623,8 +3633,32 @@ out:
static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
struct ReportExtendedLUNdata *buf, int bufsize)
{
- return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
- HPSA_REPORT_PHYS_EXTENDED);
+ int rc;
+ struct ReportLUNdata *lbuf;
+
+ rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
+ HPSA_REPORT_PHYS_EXTENDED);
+ if (!rc || !hpsa_allow_any)
+ return rc;
+
+ /* REPORT PHYS EXTENDED is not supported */
+ lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
+ if (!lbuf)
+ return -ENOMEM;
+
+ rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
+ if (!rc) {
+ int i;
+ u32 nphys;
+
+ /* Copy ReportLUNdata header */
+ memcpy(buf, lbuf, 8);
+ nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
+ for (i = 0; i < nphys; i++)
+ memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
+ }
+ kfree(lbuf);
+ return rc;
}
static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
@@ -5488,7 +5522,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
dev = cmd->device->hostdata;
if (!dev) {
- cmd->result = NOT_READY << 16; /* host byte */
+ cmd->result = DID_NO_CONNECT << 16;
cmd->scsi_done(cmd);
return 0;
}
@@ -5569,6 +5603,14 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
if (unlikely(lockup_detected(h)))
return hpsa_scan_complete(h);
+ /*
+ * Do the scan after a reset completion
+ */
+ if (h->reset_in_progress) {
+ h->drv_req_rescan = 1;
+ return;
+ }
+
hpsa_update_scsi_devices(h);
hpsa_scan_complete(h);
@@ -5624,7 +5666,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
sh->sg_tablesize = h->maxsgentries;
sh->transportt = hpsa_sas_transport_template;
sh->hostdata[0] = (unsigned long) h;
- sh->irq = h->intr[h->intr_mode];
+ sh->irq = pci_irq_vector(h->pdev, 0);
sh->unique_id = sh->irq;
h->scsi_host = sh;
@@ -5999,11 +6041,9 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
if (h->raid_offload_debug > 0)
dev_info(&h->pdev->dev,
- "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ "scsi %d:%d:%d:%d %s scsi3addr 0x%8phN\n",
h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
- "Reset as abort",
- scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
- scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
+ "Reset as abort", scsi3addr);
if (!dev->offload_enabled) {
dev_warn(&h->pdev->dev,
@@ -6020,32 +6060,28 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
/* send the reset */
if (h->raid_offload_debug > 0)
dev_info(&h->pdev->dev,
- "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- psa[0], psa[1], psa[2], psa[3],
- psa[4], psa[5], psa[6], psa[7]);
+ "Reset as abort: Resetting physical device at scsi3addr 0x%8phN\n",
+ psa);
rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue);
if (rc != 0) {
dev_warn(&h->pdev->dev,
- "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- psa[0], psa[1], psa[2], psa[3],
- psa[4], psa[5], psa[6], psa[7]);
+ "Reset as abort: Failed on physical device at scsi3addr 0x%8phN\n",
+ psa);
return rc; /* failed to reset */
}
/* wait for device to recover */
if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
dev_warn(&h->pdev->dev,
- "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- psa[0], psa[1], psa[2], psa[3],
- psa[4], psa[5], psa[6], psa[7]);
+ "Reset as abort: Failed: Device never recovered from reset: 0x%8phN\n",
+ psa);
return -1; /* failed to recover */
}
/* device recovered */
dev_info(&h->pdev->dev,
- "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- psa[0], psa[1], psa[2], psa[3],
- psa[4], psa[5], psa[6], psa[7]);
+ "Reset as abort: Device recovered from reset: scsi3addr 0x%8phN\n",
+ psa);
return rc; /* success */
}
@@ -6663,8 +6699,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- ioc = (BIG_IOCTL_Command_struct *)
- kmalloc(sizeof(*ioc), GFP_KERNEL);
+ ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
if (!ioc) {
status = -ENOMEM;
goto cleanup1;
@@ -7658,67 +7693,41 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
{
- if (h->msix_vector) {
- if (h->pdev->msix_enabled)
- pci_disable_msix(h->pdev);
- h->msix_vector = 0;
- } else if (h->msi_vector) {
- if (h->pdev->msi_enabled)
- pci_disable_msi(h->pdev);
- h->msi_vector = 0;
- }
+ pci_free_irq_vectors(h->pdev);
+ h->msix_vectors = 0;
}
/* If MSI/MSI-X is supported by the kernel we will try to enable it on
* controllers that are capable. If not, we use legacy INTx mode.
*/
-static void hpsa_interrupt_mode(struct ctlr_info *h)
+static int hpsa_interrupt_mode(struct ctlr_info *h)
{
-#ifdef CONFIG_PCI_MSI
- int err, i;
- struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
-
- for (i = 0; i < MAX_REPLY_QUEUES; i++) {
- hpsa_msix_entries[i].vector = 0;
- hpsa_msix_entries[i].entry = i;
- }
+ unsigned int flags = PCI_IRQ_LEGACY;
+ int ret;
/* Some boards advertise MSI but don't really support it */
- if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
- (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
- goto default_int_mode;
- if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
- dev_info(&h->pdev->dev, "MSI-X capable controller\n");
- h->msix_vector = MAX_REPLY_QUEUES;
- if (h->msix_vector > num_online_cpus())
- h->msix_vector = num_online_cpus();
- err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
- 1, h->msix_vector);
- if (err < 0) {
- dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
- h->msix_vector = 0;
- goto single_msi_mode;
- } else if (err < h->msix_vector) {
- dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
- "available\n", err);
+ switch (h->board_id) {
+ case 0x40700E11:
+ case 0x40800E11:
+ case 0x40820E11:
+ case 0x40830E11:
+ break;
+ default:
+ ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ if (ret > 0) {
+ h->msix_vectors = ret;
+ return 0;
}
- h->msix_vector = err;
- for (i = 0; i < h->msix_vector; i++)
- h->intr[i] = hpsa_msix_entries[i].vector;
- return;
- }
-single_msi_mode:
- if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
- dev_info(&h->pdev->dev, "MSI capable controller\n");
- if (!pci_enable_msi(h->pdev))
- h->msi_vector = 1;
- else
- dev_warn(&h->pdev->dev, "MSI init failed\n");
+
+ flags |= PCI_IRQ_MSI;
+ break;
}
-default_int_mode:
-#endif /* CONFIG_PCI_MSI */
- /* if we get here we're going to use the default interrupt mode */
- h->intr[h->intr_mode] = h->pdev->irq;
+
+ ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
+ if (ret < 0)
+ return ret;
+ return 0;
}
static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
@@ -8074,7 +8083,9 @@ static int hpsa_pci_init(struct ctlr_info *h)
pci_set_master(h->pdev);
- hpsa_interrupt_mode(h);
+ err = hpsa_interrupt_mode(h);
+ if (err)
+ goto clean1;
err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
if (err)
goto clean2; /* intmode+region, pci */
@@ -8110,6 +8121,7 @@ clean3: /* vaddr, intmode+region, pci */
h->vaddr = NULL;
clean2: /* intmode+region, pci */
hpsa_disable_interrupt_mode(h);
+clean1:
/*
* call pci_disable_device before pci_release_regions per
* Documentation/PCI/pci.txt
@@ -8243,34 +8255,20 @@ clean_up:
return -ENOMEM;
}
-static void hpsa_irq_affinity_hints(struct ctlr_info *h)
-{
- int i, cpu;
-
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < h->msix_vector; i++) {
- irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
-}
-
/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
static void hpsa_free_irqs(struct ctlr_info *h)
{
int i;
- if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
+ if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
/* Single reply queue, only one irq to free */
- i = h->intr_mode;
- irq_set_affinity_hint(h->intr[i], NULL);
- free_irq(h->intr[i], &h->q[i]);
- h->q[i] = 0;
+ free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]);
+ h->q[h->intr_mode] = 0;
return;
}
- for (i = 0; i < h->msix_vector; i++) {
- irq_set_affinity_hint(h->intr[i], NULL);
- free_irq(h->intr[i], &h->q[i]);
+ for (i = 0; i < h->msix_vectors; i++) {
+ free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
h->q[i] = 0;
}
for (; i < MAX_REPLY_QUEUES; i++)
@@ -8291,11 +8289,11 @@ static int hpsa_request_irqs(struct ctlr_info *h,
for (i = 0; i < MAX_REPLY_QUEUES; i++)
h->q[i] = (u8) i;
- if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
+ if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
/* If performant mode and MSI-X, use multiple reply queues */
- for (i = 0; i < h->msix_vector; i++) {
+ for (i = 0; i < h->msix_vectors; i++) {
sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
- rc = request_irq(h->intr[i], msixhandler,
+ rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
0, h->intrname[i],
&h->q[i]);
if (rc) {
@@ -8303,9 +8301,9 @@ static int hpsa_request_irqs(struct ctlr_info *h,
dev_err(&h->pdev->dev,
"failed to get irq %d for %s\n",
- h->intr[i], h->devname);
+ pci_irq_vector(h->pdev, i), h->devname);
for (j = 0; j < i; j++) {
- free_irq(h->intr[j], &h->q[j]);
+ free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
h->q[j] = 0;
}
for (; j < MAX_REPLY_QUEUES; j++)
@@ -8313,33 +8311,27 @@ static int hpsa_request_irqs(struct ctlr_info *h,
return rc;
}
}
- hpsa_irq_affinity_hints(h);
} else {
/* Use single reply pool */
- if (h->msix_vector > 0 || h->msi_vector) {
- if (h->msix_vector)
- sprintf(h->intrname[h->intr_mode],
- "%s-msix", h->devname);
- else
- sprintf(h->intrname[h->intr_mode],
- "%s-msi", h->devname);
- rc = request_irq(h->intr[h->intr_mode],
+ if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
+ sprintf(h->intrname[0], "%s-msi%s", h->devname,
+ h->msix_vectors ? "x" : "");
+ rc = request_irq(pci_irq_vector(h->pdev, 0),
msixhandler, 0,
- h->intrname[h->intr_mode],
+ h->intrname[0],
&h->q[h->intr_mode]);
} else {
sprintf(h->intrname[h->intr_mode],
"%s-intx", h->devname);
- rc = request_irq(h->intr[h->intr_mode],
+ rc = request_irq(pci_irq_vector(h->pdev, 0),
intxhandler, IRQF_SHARED,
- h->intrname[h->intr_mode],
+ h->intrname[0],
&h->q[h->intr_mode]);
}
- irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
}
if (rc) {
dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
- h->intr[h->intr_mode], h->devname);
+ pci_irq_vector(h->pdev, 0), h->devname);
hpsa_free_irqs(h);
return -ENODEV;
}
@@ -8640,6 +8632,14 @@ static void hpsa_rescan_ctlr_worker(struct work_struct *work)
if (h->remove_in_progress)
return;
+ /*
+ * Do the scan after the reset
+ */
+ if (h->reset_in_progress) {
+ h->drv_req_rescan = 1;
+ return;
+ }
+
if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
scsi_host_get(h->scsi_host);
hpsa_ack_ctlr_events(h);
@@ -9525,7 +9525,7 @@ static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
return rc;
}
- h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
+ h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
hpsa_get_max_perf_mode_cmds(h);
/* Performant mode ring buffer and supporting data structures */
h->reply_queue_size = h->max_commands * sizeof(u64);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 9ea162de80dc..64e98295b707 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -176,9 +176,7 @@ struct ctlr_info {
# define DOORBELL_INT 1
# define SIMPLE_MODE_INT 2
# define MEMQ_MODE_INT 3
- unsigned int intr[MAX_REPLY_QUEUES];
- unsigned int msix_vector;
- unsigned int msi_vector;
+ unsigned int msix_vectors;
int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
struct access_method access;
@@ -466,7 +464,7 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
unsigned long register_value = FIFO_EMPTY;
/* msi auto clears the interrupt pending bit. */
- if (unlikely(!(h->msi_vector || h->msix_vector))) {
+ if (unlikely(!(h->pdev->msi_enabled || h->msix_vectors))) {
/* flush the controller write of the reply queue by reading
* outbound doorbell status register.
*/
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 7e487c78279c..78b72c28a55d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -32,6 +32,7 @@
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/stringify.h>
+#include <linux/bsg-lib.h>
#include <asm/firmware.h>
#include <asm/irq.h>
#include <asm/vio.h>
@@ -1701,14 +1702,14 @@ static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
/**
* ibmvfc_bsg_timeout - Handle a BSG timeout
- * @job: struct fc_bsg_job that timed out
+ * @job: struct bsg_job that timed out
*
* Returns:
* 0 on success / other on failure
**/
-static int ibmvfc_bsg_timeout(struct fc_bsg_job *job)
+static int ibmvfc_bsg_timeout(struct bsg_job *job)
{
- struct ibmvfc_host *vhost = shost_priv(job->shost);
+ struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
unsigned long port_id = (unsigned long)job->dd_data;
struct ibmvfc_event *evt;
struct ibmvfc_tmf *tmf;
@@ -1814,41 +1815,43 @@ unlock_out:
/**
* ibmvfc_bsg_request - Handle a BSG request
- * @job: struct fc_bsg_job to be executed
+ * @job: struct bsg_job to be executed
*
* Returns:
* 0 on success / other on failure
**/
-static int ibmvfc_bsg_request(struct fc_bsg_job *job)
+static int ibmvfc_bsg_request(struct bsg_job *job)
{
- struct ibmvfc_host *vhost = shost_priv(job->shost);
- struct fc_rport *rport = job->rport;
+ struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
+ struct fc_rport *rport = fc_bsg_to_rport(job);
struct ibmvfc_passthru_mad *mad;
struct ibmvfc_event *evt;
union ibmvfc_iu rsp_iu;
unsigned long flags, port_id = -1;
- unsigned int code = job->request->msgcode;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ unsigned int code = bsg_request->msgcode;
int rc = 0, req_seg, rsp_seg, issue_login = 0;
u32 fc_flags, rsp_len;
ENTER;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (rport)
port_id = rport->port_id;
switch (code) {
case FC_BSG_HST_ELS_NOLOGIN:
- port_id = (job->request->rqst_data.h_els.port_id[0] << 16) |
- (job->request->rqst_data.h_els.port_id[1] << 8) |
- job->request->rqst_data.h_els.port_id[2];
+ port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
+ (bsg_request->rqst_data.h_els.port_id[1] << 8) |
+ bsg_request->rqst_data.h_els.port_id[2];
case FC_BSG_RPT_ELS:
fc_flags = IBMVFC_FC_ELS;
break;
case FC_BSG_HST_CT:
issue_login = 1;
- port_id = (job->request->rqst_data.h_ct.port_id[0] << 16) |
- (job->request->rqst_data.h_ct.port_id[1] << 8) |
- job->request->rqst_data.h_ct.port_id[2];
+ port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
+ (bsg_request->rqst_data.h_ct.port_id[1] << 8) |
+ bsg_request->rqst_data.h_ct.port_id[2];
case FC_BSG_RPT_CT:
fc_flags = IBMVFC_FC_CT_IU;
break;
@@ -1937,13 +1940,14 @@ static int ibmvfc_bsg_request(struct fc_bsg_job *job)
if (rsp_iu.passthru.common.status)
rc = -EIO;
else
- job->reply->reply_payload_rcv_len = rsp_len;
+ bsg_reply->reply_payload_rcv_len = rsp_len;
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_free_event(evt);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
rc = 0;
out:
dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 642b739ad0da..c9fa3565c671 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -22,7 +22,7 @@
*
****************************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
@@ -81,7 +81,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
}
} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
if (se_cmd->data_direction == DMA_TO_DEVICE) {
- /* residual data from an overflow write */
+ /* residual data from an overflow write */
rsp->flags = SRP_RSP_FLAG_DOOVER;
rsp->data_out_res_cnt = cpu_to_be32(residual_count);
} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
@@ -101,7 +101,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
* and the function returns TRUE.
*
* EXECUTION ENVIRONMENT:
- * Interrupt or Process environment
+ * Interrupt or Process environment
*/
static bool connection_broken(struct scsi_info *vscsi)
{
@@ -324,7 +324,7 @@ static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask,
}
/**
- * ibmvscsis_send_init_message() - send initialize message to the client
+ * ibmvscsis_send_init_message() - send initialize message to the client
* @vscsi: Pointer to our adapter structure
* @format: Which Init Message format to send
*
@@ -382,13 +382,13 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
vscsi->cmd_q.base_addr);
if (crq) {
*format = (uint)(crq->format);
- rc = ERROR;
+ rc = ERROR;
crq->valid = INVALIDATE_CMD_RESP_EL;
dma_rmb();
}
} else {
*format = (uint)(crq->format);
- rc = ERROR;
+ rc = ERROR;
crq->valid = INVALIDATE_CMD_RESP_EL;
dma_rmb();
}
@@ -397,166 +397,6 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
}
/**
- * ibmvscsis_establish_new_q() - Establish new CRQ queue
- * @vscsi: Pointer to our adapter structure
- * @new_state: New state being established after resetting the queue
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_establish_new_q(struct scsi_info *vscsi, uint new_state)
-{
- long rc = ADAPT_SUCCESS;
- uint format;
-
- vscsi->flags &= PRESERVE_FLAG_FIELDS;
- vscsi->rsp_q_timer.timer_pops = 0;
- vscsi->debit = 0;
- vscsi->credit = 0;
-
- rc = vio_enable_interrupts(vscsi->dma_dev);
- if (rc) {
- pr_warn("reset_queue: failed to enable interrupts, rc %ld\n",
- rc);
- return rc;
- }
-
- rc = ibmvscsis_check_init_msg(vscsi, &format);
- if (rc) {
- dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n",
- rc);
- return rc;
- }
-
- if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) {
- rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
- switch (rc) {
- case H_SUCCESS:
- case H_DROPPED:
- case H_CLOSED:
- rc = ADAPT_SUCCESS;
- break;
-
- case H_PARAMETER:
- case H_HARDWARE:
- break;
-
- default:
- vscsi->state = UNDEFINED;
- rc = H_HARDWARE;
- break;
- }
- }
-
- return rc;
-}
-
-/**
- * ibmvscsis_reset_queue() - Reset CRQ Queue
- * @vscsi: Pointer to our adapter structure
- * @new_state: New state to establish after resetting the queue
- *
- * This function calls h_free_q and then calls h_reg_q and does all
- * of the bookkeeping to get us back to where we can communicate.
- *
- * Actually, we don't always call h_free_crq. A problem was discovered
- * where one partition would close and reopen his queue, which would
- * cause his partner to get a transport event, which would cause him to
- * close and reopen his queue, which would cause the original partition
- * to get a transport event, etc., etc. To prevent this, we don't
- * actually close our queue if the client initiated the reset, (i.e.
- * either we got a transport event or we have detected that the client's
- * queue is gone)
- *
- * EXECUTION ENVIRONMENT:
- * Process environment, called with interrupt lock held
- */
-static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state)
-{
- int bytes;
- long rc = ADAPT_SUCCESS;
-
- pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
-
- /* don't reset, the client did it for us */
- if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
- vscsi->flags &= PRESERVE_FLAG_FIELDS;
- vscsi->rsp_q_timer.timer_pops = 0;
- vscsi->debit = 0;
- vscsi->credit = 0;
- vscsi->state = new_state;
- vio_enable_interrupts(vscsi->dma_dev);
- } else {
- rc = ibmvscsis_free_command_q(vscsi);
- if (rc == ADAPT_SUCCESS) {
- vscsi->state = new_state;
-
- bytes = vscsi->cmd_q.size * PAGE_SIZE;
- rc = h_reg_crq(vscsi->dds.unit_id,
- vscsi->cmd_q.crq_token, bytes);
- if (rc == H_CLOSED || rc == H_SUCCESS) {
- rc = ibmvscsis_establish_new_q(vscsi,
- new_state);
- }
-
- if (rc != ADAPT_SUCCESS) {
- pr_debug("reset_queue: reg_crq rc %ld\n", rc);
-
- vscsi->state = ERR_DISCONNECTED;
- vscsi->flags |= RESPONSE_Q_DOWN;
- ibmvscsis_free_command_q(vscsi);
- }
- } else {
- vscsi->state = ERR_DISCONNECTED;
- vscsi->flags |= RESPONSE_Q_DOWN;
- }
- }
-}
-
-/**
- * ibmvscsis_free_cmd_resources() - Free command resources
- * @vscsi: Pointer to our adapter structure
- * @cmd: Command which is not longer in use
- *
- * Must be called with interrupt lock held.
- */
-static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
- struct ibmvscsis_cmd *cmd)
-{
- struct iu_entry *iue = cmd->iue;
-
- switch (cmd->type) {
- case TASK_MANAGEMENT:
- case SCSI_CDB:
- /*
- * When the queue goes down this value is cleared, so it
- * cannot be cleared in this general purpose function.
- */
- if (vscsi->debit)
- vscsi->debit -= 1;
- break;
- case ADAPTER_MAD:
- vscsi->flags &= ~PROCESSING_MAD;
- break;
- case UNSET_TYPE:
- break;
- default:
- dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
- cmd->type);
- break;
- }
-
- cmd->iue = NULL;
- list_add_tail(&cmd->list, &vscsi->free_cmd);
- srp_iu_put(iue);
-
- if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
- list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
- vscsi->flags &= ~WAIT_FOR_IDLE;
- complete(&vscsi->wait_idle);
- }
-}
-
-/**
* ibmvscsis_disconnect() - Helper function to disconnect
* @work: Pointer to work_struct, gives access to our adapter structure
*
@@ -575,7 +415,6 @@ static void ibmvscsis_disconnect(struct work_struct *work)
proc_work);
u16 new_state;
bool wait_idle = false;
- long rc = ADAPT_SUCCESS;
spin_lock_bh(&vscsi->intr_lock);
new_state = vscsi->new_state;
@@ -589,7 +428,7 @@ static void ibmvscsis_disconnect(struct work_struct *work)
* should transitition to the new state
*/
switch (vscsi->state) {
- /* Should never be called while in this state. */
+ /* Should never be called while in this state. */
case NO_QUEUE:
/*
* Can never transition from this state;
@@ -628,30 +467,24 @@ static void ibmvscsis_disconnect(struct work_struct *work)
vscsi->state = new_state;
break;
- /*
- * If this is a transition into an error state.
- * a client is attempting to establish a connection
- * and has violated the RPA protocol.
- * There can be nothing pending on the adapter although
- * there can be requests in the command queue.
- */
case WAIT_ENABLED:
- case PART_UP_WAIT_ENAB:
switch (new_state) {
- case ERR_DISCONNECT:
- vscsi->flags |= RESPONSE_Q_DOWN;
+ case UNCONFIGURING:
vscsi->state = new_state;
+ vscsi->flags |= RESPONSE_Q_DOWN;
vscsi->flags &= ~(SCHEDULE_DISCONNECT |
DISCONNECT_SCHEDULED);
- ibmvscsis_free_command_q(vscsi);
- break;
- case ERR_DISCONNECT_RECONNECT:
- ibmvscsis_reset_queue(vscsi, WAIT_ENABLED);
+ dma_rmb();
+ if (vscsi->flags & CFG_SLEEPING) {
+ vscsi->flags &= ~CFG_SLEEPING;
+ complete(&vscsi->unconfig);
+ }
break;
/* should never happen */
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
case WAIT_IDLE:
- rc = ERROR;
dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
vscsi->state);
break;
@@ -660,6 +493,13 @@ static void ibmvscsis_disconnect(struct work_struct *work)
case WAIT_IDLE:
switch (new_state) {
+ case UNCONFIGURING:
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ vscsi->state = new_state;
+ vscsi->flags &= ~(SCHEDULE_DISCONNECT |
+ DISCONNECT_SCHEDULED);
+ ibmvscsis_free_command_q(vscsi);
+ break;
case ERR_DISCONNECT:
case ERR_DISCONNECT_RECONNECT:
vscsi->state = new_state;
@@ -788,7 +628,6 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
break;
case WAIT_ENABLED:
- case PART_UP_WAIT_ENAB:
case WAIT_IDLE:
case WAIT_CONNECTION:
case CONNECTED:
@@ -806,6 +645,310 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
}
/**
+ * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
+{
+ long rc = ADAPT_SUCCESS;
+
+ switch (vscsi->state) {
+ case NO_QUEUE:
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ case ERR_DISCONNECTED:
+ case UNCONFIGURING:
+ case UNDEFINED:
+ rc = ERROR;
+ break;
+
+ case WAIT_CONNECTION:
+ vscsi->state = CONNECTED;
+ break;
+
+ case WAIT_IDLE:
+ case SRP_PROCESSING:
+ case CONNECTED:
+ case WAIT_ENABLED:
+ default:
+ rc = ERROR;
+ dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
+ vscsi->state);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_handle_init_msg() - Respond to an Init Message
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
+{
+ long rc = ADAPT_SUCCESS;
+
+ switch (vscsi->state) {
+ case WAIT_CONNECTION:
+ rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
+ switch (rc) {
+ case H_SUCCESS:
+ vscsi->state = CONNECTED;
+ break;
+
+ case H_PARAMETER:
+ dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+ break;
+
+ case H_DROPPED:
+ dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+ rc);
+ rc = ERROR;
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT, 0);
+ break;
+
+ case H_CLOSED:
+ pr_warn("init_msg: failed to send, rc %ld\n", rc);
+ rc = 0;
+ break;
+ }
+ break;
+
+ case UNDEFINED:
+ rc = ERROR;
+ break;
+
+ case UNCONFIGURING:
+ break;
+
+ case WAIT_ENABLED:
+ case CONNECTED:
+ case SRP_PROCESSING:
+ case WAIT_IDLE:
+ case NO_QUEUE:
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ case ERR_DISCONNECTED:
+ default:
+ rc = ERROR;
+ dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
+ vscsi->state);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_init_msg() - Respond to an init message
+ * @vscsi: Pointer to our adapter structure
+ * @crq: Pointer to CRQ element containing the Init Message
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
+{
+ long rc = ADAPT_SUCCESS;
+
+ pr_debug("init_msg: state 0x%hx\n", vscsi->state);
+
+ rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
+ (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
+ 0);
+ if (rc == H_SUCCESS) {
+ vscsi->client_data.partition_number =
+ be64_to_cpu(*(u64 *)vscsi->map_buf);
+ pr_debug("init_msg, part num %d\n",
+ vscsi->client_data.partition_number);
+ } else {
+ pr_debug("init_msg h_vioctl rc %ld\n", rc);
+ rc = ADAPT_SUCCESS;
+ }
+
+ if (crq->format == INIT_MSG) {
+ rc = ibmvscsis_handle_init_msg(vscsi);
+ } else if (crq->format == INIT_COMPLETE_MSG) {
+ rc = ibmvscsis_handle_init_compl_msg(vscsi);
+ } else {
+ rc = ERROR;
+ dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
+ (uint)crq->format);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_establish_new_q() - Establish new CRQ queue
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
+{
+ long rc = ADAPT_SUCCESS;
+ uint format;
+
+ vscsi->flags &= PRESERVE_FLAG_FIELDS;
+ vscsi->rsp_q_timer.timer_pops = 0;
+ vscsi->debit = 0;
+ vscsi->credit = 0;
+
+ rc = vio_enable_interrupts(vscsi->dma_dev);
+ if (rc) {
+ pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n",
+ rc);
+ return rc;
+ }
+
+ rc = ibmvscsis_check_init_msg(vscsi, &format);
+ if (rc) {
+ dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n",
+ rc);
+ return rc;
+ }
+
+ if (format == UNUSED_FORMAT) {
+ rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
+ switch (rc) {
+ case H_SUCCESS:
+ case H_DROPPED:
+ case H_CLOSED:
+ rc = ADAPT_SUCCESS;
+ break;
+
+ case H_PARAMETER:
+ case H_HARDWARE:
+ break;
+
+ default:
+ vscsi->state = UNDEFINED;
+ rc = H_HARDWARE;
+ break;
+ }
+ } else if (format == INIT_MSG) {
+ rc = ibmvscsis_handle_init_msg(vscsi);
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_reset_queue() - Reset CRQ Queue
+ * @vscsi: Pointer to our adapter structure
+ *
+ * This function calls h_free_q and then calls h_reg_q and does all
+ * of the bookkeeping to get us back to where we can communicate.
+ *
+ * Actually, we don't always call h_free_crq. A problem was discovered
+ * where one partition would close and reopen his queue, which would
+ * cause his partner to get a transport event, which would cause him to
+ * close and reopen his queue, which would cause the original partition
+ * to get a transport event, etc., etc. To prevent this, we don't
+ * actually close our queue if the client initiated the reset, (i.e.
+ * either we got a transport event or we have detected that the client's
+ * queue is gone)
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process environment, called with interrupt lock held
+ */
+static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
+{
+ int bytes;
+ long rc = ADAPT_SUCCESS;
+
+ pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
+
+ /* don't reset, the client did it for us */
+ if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
+ vscsi->flags &= PRESERVE_FLAG_FIELDS;
+ vscsi->rsp_q_timer.timer_pops = 0;
+ vscsi->debit = 0;
+ vscsi->credit = 0;
+ vscsi->state = WAIT_CONNECTION;
+ vio_enable_interrupts(vscsi->dma_dev);
+ } else {
+ rc = ibmvscsis_free_command_q(vscsi);
+ if (rc == ADAPT_SUCCESS) {
+ vscsi->state = WAIT_CONNECTION;
+
+ bytes = vscsi->cmd_q.size * PAGE_SIZE;
+ rc = h_reg_crq(vscsi->dds.unit_id,
+ vscsi->cmd_q.crq_token, bytes);
+ if (rc == H_CLOSED || rc == H_SUCCESS) {
+ rc = ibmvscsis_establish_new_q(vscsi);
+ }
+
+ if (rc != ADAPT_SUCCESS) {
+ pr_debug("reset_queue: reg_crq rc %ld\n", rc);
+
+ vscsi->state = ERR_DISCONNECTED;
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ ibmvscsis_free_command_q(vscsi);
+ }
+ } else {
+ vscsi->state = ERR_DISCONNECTED;
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ }
+ }
+}
+
+/**
+ * ibmvscsis_free_cmd_resources() - Free command resources
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Command which is not longer in use
+ *
+ * Must be called with interrupt lock held.
+ */
+static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd)
+{
+ struct iu_entry *iue = cmd->iue;
+
+ switch (cmd->type) {
+ case TASK_MANAGEMENT:
+ case SCSI_CDB:
+ /*
+ * When the queue goes down this value is cleared, so it
+ * cannot be cleared in this general purpose function.
+ */
+ if (vscsi->debit)
+ vscsi->debit -= 1;
+ break;
+ case ADAPTER_MAD:
+ vscsi->flags &= ~PROCESSING_MAD;
+ break;
+ case UNSET_TYPE:
+ break;
+ default:
+ dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
+ cmd->type);
+ break;
+ }
+
+ cmd->iue = NULL;
+ list_add_tail(&cmd->list, &vscsi->free_cmd);
+ srp_iu_put(iue);
+
+ if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
+ list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
+ vscsi->flags &= ~WAIT_FOR_IDLE;
+ complete(&vscsi->wait_idle);
+ }
+}
+
+/**
* ibmvscsis_trans_event() - Handle a Transport Event
* @vscsi: Pointer to our adapter structure
* @crq: Pointer to CRQ entry containing the Transport Event
@@ -863,10 +1006,6 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
TRANS_EVENT));
break;
- case PART_UP_WAIT_ENAB:
- vscsi->state = WAIT_ENABLED;
- break;
-
case SRP_PROCESSING:
if ((vscsi->debit > 0) ||
!list_empty(&vscsi->schedule_q) ||
@@ -895,7 +1034,7 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
}
}
- rc = vscsi->flags & SCHEDULE_DISCONNECT;
+ rc = vscsi->flags & SCHEDULE_DISCONNECT;
pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
vscsi->flags, vscsi->state, rc);
@@ -1066,16 +1205,28 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
free_qs = true;
switch (vscsi->state) {
+ case UNCONFIGURING:
+ ibmvscsis_free_command_q(vscsi);
+ dma_rmb();
+ isync();
+ if (vscsi->flags & CFG_SLEEPING) {
+ vscsi->flags &= ~CFG_SLEEPING;
+ complete(&vscsi->unconfig);
+ }
+ break;
case ERR_DISCONNECT_RECONNECT:
- ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION);
+ ibmvscsis_reset_queue(vscsi);
pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
break;
case ERR_DISCONNECT:
ibmvscsis_free_command_q(vscsi);
- vscsi->flags &= ~DISCONNECT_SCHEDULED;
+ vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED);
vscsi->flags |= RESPONSE_Q_DOWN;
- vscsi->state = ERR_DISCONNECTED;
+ if (vscsi->tport.enabled)
+ vscsi->state = ERR_DISCONNECTED;
+ else
+ vscsi->state = WAIT_ENABLED;
pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
vscsi->flags, vscsi->state);
break;
@@ -1220,7 +1371,7 @@ static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
* @iue: Information Unit containing the Adapter Info MAD request
*
* EXECUTION ENVIRONMENT:
- * Interrupt adpater lock is held
+ * Interrupt adapter lock is held
*/
static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
struct iu_entry *iue)
@@ -1620,8 +1771,8 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
be64_to_cpu(msg_hi),
be64_to_cpu(cmd->rsp.tag));
- pr_debug("send_messages: tag 0x%llx, rc %ld\n",
- be64_to_cpu(cmd->rsp.tag), rc);
+ pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
+ cmd, be64_to_cpu(cmd->rsp.tag), rc);
/* if all ok free up the command element resources */
if (rc == H_SUCCESS) {
@@ -1691,7 +1842,7 @@ static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
* @crq: Pointer to the CRQ entry containing the MAD request
*
* EXECUTION ENVIRONMENT:
- * Interrupt called with adapter lock held
+ * Interrupt, called with adapter lock held
*/
static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
{
@@ -1745,14 +1896,7 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
- if (be16_to_cpu(mad->length) < 0) {
- dev_err(&vscsi->dev, "mad: length is < 0\n");
- ibmvscsis_post_disconnect(vscsi,
- ERR_DISCONNECT_RECONNECT, 0);
- rc = SRP_VIOLATION;
- } else {
- rc = ibmvscsis_process_mad(vscsi, iue);
- }
+ rc = ibmvscsis_process_mad(vscsi, iue);
pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
rc);
@@ -1864,7 +2008,7 @@ static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi,
break;
case H_PERMISSION:
if (connection_broken(vscsi))
- flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
+ flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
rc);
ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
@@ -2187,156 +2331,6 @@ static long ibmvscsis_ping_response(struct scsi_info *vscsi)
}
/**
- * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
- * @vscsi: Pointer to our adapter structure
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
-{
- long rc = ADAPT_SUCCESS;
-
- switch (vscsi->state) {
- case NO_QUEUE:
- case ERR_DISCONNECT:
- case ERR_DISCONNECT_RECONNECT:
- case ERR_DISCONNECTED:
- case UNCONFIGURING:
- case UNDEFINED:
- rc = ERROR;
- break;
-
- case WAIT_CONNECTION:
- vscsi->state = CONNECTED;
- break;
-
- case WAIT_IDLE:
- case SRP_PROCESSING:
- case CONNECTED:
- case WAIT_ENABLED:
- case PART_UP_WAIT_ENAB:
- default:
- rc = ERROR;
- dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
- vscsi->state);
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
- break;
- }
-
- return rc;
-}
-
-/**
- * ibmvscsis_handle_init_msg() - Respond to an Init Message
- * @vscsi: Pointer to our adapter structure
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
-{
- long rc = ADAPT_SUCCESS;
-
- switch (vscsi->state) {
- case WAIT_ENABLED:
- vscsi->state = PART_UP_WAIT_ENAB;
- break;
-
- case WAIT_CONNECTION:
- rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
- switch (rc) {
- case H_SUCCESS:
- vscsi->state = CONNECTED;
- break;
-
- case H_PARAMETER:
- dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
- rc);
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
- break;
-
- case H_DROPPED:
- dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
- rc);
- rc = ERROR;
- ibmvscsis_post_disconnect(vscsi,
- ERR_DISCONNECT_RECONNECT, 0);
- break;
-
- case H_CLOSED:
- pr_warn("init_msg: failed to send, rc %ld\n", rc);
- rc = 0;
- break;
- }
- break;
-
- case UNDEFINED:
- rc = ERROR;
- break;
-
- case UNCONFIGURING:
- break;
-
- case PART_UP_WAIT_ENAB:
- case CONNECTED:
- case SRP_PROCESSING:
- case WAIT_IDLE:
- case NO_QUEUE:
- case ERR_DISCONNECT:
- case ERR_DISCONNECT_RECONNECT:
- case ERR_DISCONNECTED:
- default:
- rc = ERROR;
- dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
- vscsi->state);
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
- break;
- }
-
- return rc;
-}
-
-/**
- * ibmvscsis_init_msg() - Respond to an init message
- * @vscsi: Pointer to our adapter structure
- * @crq: Pointer to CRQ element containing the Init Message
- *
- * EXECUTION ENVIRONMENT:
- * Interrupt, interrupt lock held
- */
-static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
-{
- long rc = ADAPT_SUCCESS;
-
- pr_debug("init_msg: state 0x%hx\n", vscsi->state);
-
- rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
- (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
- 0);
- if (rc == H_SUCCESS) {
- vscsi->client_data.partition_number =
- be64_to_cpu(*(u64 *)vscsi->map_buf);
- pr_debug("init_msg, part num %d\n",
- vscsi->client_data.partition_number);
- } else {
- pr_debug("init_msg h_vioctl rc %ld\n", rc);
- rc = ADAPT_SUCCESS;
- }
-
- if (crq->format == INIT_MSG) {
- rc = ibmvscsis_handle_init_msg(vscsi);
- } else if (crq->format == INIT_COMPLETE_MSG) {
- rc = ibmvscsis_handle_init_compl_msg(vscsi);
- } else {
- rc = ERROR;
- dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
- (uint)crq->format);
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
- }
-
- return rc;
-}
-
-/**
* ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
* @vscsi: Pointer to our adapter structure
* @crq: Pointer to CRQ element containing the SRP request
@@ -2391,7 +2385,7 @@ static long ibmvscsis_parse_command(struct scsi_info *vscsi,
break;
case VALID_TRANS_EVENT:
- rc = ibmvscsis_trans_event(vscsi, crq);
+ rc = ibmvscsis_trans_event(vscsi, crq);
break;
case VALID_INIT_MSG:
@@ -2522,7 +2516,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
srp->tag);
goto fail;
- return;
}
cmd->rsp.sol_not = srp->sol_not;
@@ -2559,6 +2552,10 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
data_len, attr, dir, 0);
if (rc) {
dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
+ spin_lock_bh(&vscsi->intr_lock);
+ list_del(&cmd->list);
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ spin_unlock_bh(&vscsi->intr_lock);
goto fail;
}
return;
@@ -2638,6 +2635,9 @@ static void ibmvscsis_parse_task(struct scsi_info *vscsi,
if (rc) {
dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
rc);
+ spin_lock_bh(&vscsi->intr_lock);
+ list_del(&cmd->list);
+ spin_unlock_bh(&vscsi->intr_lock);
cmd->se_cmd.se_tmr_req->response =
TMR_FUNCTION_REJECTED;
}
@@ -2786,36 +2786,6 @@ static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
}
/**
- * ibmvscsis_check_q() - Helper function to Check Init Message Valid
- * @vscsi: Pointer to our adapter structure
- *
- * Checks if a initialize message was queued by the initiatior
- * while the timing window was open. This function is called from
- * probe after the CRQ is created and interrupts are enabled.
- * It would only be used by adapters who wait for some event before
- * completing the init handshake with the client. For ibmvscsi, this
- * event is waiting for the port to be enabled.
- *
- * EXECUTION ENVIRONMENT:
- * Process level only, interrupt lock held
- */
-static long ibmvscsis_check_q(struct scsi_info *vscsi)
-{
- uint format;
- long rc;
-
- rc = ibmvscsis_check_init_msg(vscsi, &format);
- if (rc)
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
- else if (format == UNUSED_FORMAT)
- vscsi->state = WAIT_ENABLED;
- else
- vscsi->state = PART_UP_WAIT_ENAB;
-
- return rc;
-}
-
-/**
* ibmvscsis_enable_change_state() - Set new state based on enabled status
* @vscsi: Pointer to our adapter structure
*
@@ -2826,77 +2796,19 @@ static long ibmvscsis_check_q(struct scsi_info *vscsi)
*/
static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
{
+ int bytes;
long rc = ADAPT_SUCCESS;
-handle_state_change:
- switch (vscsi->state) {
- case WAIT_ENABLED:
- rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
- switch (rc) {
- case H_SUCCESS:
- case H_DROPPED:
- case H_CLOSED:
- vscsi->state = WAIT_CONNECTION;
- rc = ADAPT_SUCCESS;
- break;
-
- case H_PARAMETER:
- break;
-
- case H_HARDWARE:
- break;
-
- default:
- vscsi->state = UNDEFINED;
- rc = H_HARDWARE;
- break;
- }
- break;
- case PART_UP_WAIT_ENAB:
- rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
- switch (rc) {
- case H_SUCCESS:
- vscsi->state = CONNECTED;
- rc = ADAPT_SUCCESS;
- break;
-
- case H_DROPPED:
- case H_CLOSED:
- vscsi->state = WAIT_ENABLED;
- goto handle_state_change;
-
- case H_PARAMETER:
- break;
-
- case H_HARDWARE:
- break;
-
- default:
- rc = H_HARDWARE;
- break;
- }
- break;
-
- case WAIT_CONNECTION:
- case WAIT_IDLE:
- case SRP_PROCESSING:
- case CONNECTED:
- rc = ADAPT_SUCCESS;
- break;
- /* should not be able to get here */
- case UNCONFIGURING:
- rc = ERROR;
- vscsi->state = UNDEFINED;
- break;
+ bytes = vscsi->cmd_q.size * PAGE_SIZE;
+ rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes);
+ if (rc == H_CLOSED || rc == H_SUCCESS) {
+ vscsi->state = WAIT_CONNECTION;
+ rc = ibmvscsis_establish_new_q(vscsi);
+ }
- /* driver should never allow this to happen */
- case ERR_DISCONNECT:
- case ERR_DISCONNECT_RECONNECT:
- default:
- dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n",
- vscsi->state);
- rc = ADAPT_SUCCESS;
- break;
+ if (rc != ADAPT_SUCCESS) {
+ vscsi->state = ERR_DISCONNECTED;
+ vscsi->flags |= RESPONSE_Q_DOWN;
}
return rc;
@@ -2916,7 +2828,6 @@ handle_state_change:
*/
static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
{
- long rc = 0;
int pages;
struct vio_dev *vdev = vscsi->dma_dev;
@@ -2940,22 +2851,7 @@ static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
return -ENOMEM;
}
- rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE);
- if (rc) {
- if (rc == H_CLOSED) {
- vscsi->state = WAIT_ENABLED;
- rc = 0;
- } else {
- dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- free_page((unsigned long)vscsi->cmd_q.base_addr);
- rc = -ENODEV;
- }
- } else {
- vscsi->state = WAIT_ENABLED;
- }
-
- return rc;
+ return 0;
}
/**
@@ -3270,7 +3166,7 @@ static void ibmvscsis_handle_crq(unsigned long data)
/*
* if we are in a path where we are waiting for all pending commands
* to complete because we received a transport event and anything in
- * the command queue is for a new connection, do nothing
+ * the command queue is for a new connection, do nothing
*/
if (TARGET_STOP(vscsi)) {
vio_enable_interrupts(vscsi->dma_dev);
@@ -3314,7 +3210,7 @@ cmd_work:
* everything but transport events on the queue
*
* need to decrement the queue index so we can
- * look at the elment again
+ * look at the element again
*/
if (vscsi->cmd_q.index)
vscsi->cmd_q.index -= 1;
@@ -3378,7 +3274,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
INIT_LIST_HEAD(&vscsi->waiting_rsp);
INIT_LIST_HEAD(&vscsi->active_q);
- snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev));
+ snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
+ dev_name(&vdev->dev));
pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
@@ -3393,6 +3290,9 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
strncat(vscsi->eye, vdev->name, MAX_EYE);
vscsi->dds.unit_id = vdev->unit_address;
+ strncpy(vscsi->dds.partition_name, partition_name,
+ sizeof(vscsi->dds.partition_name));
+ vscsi->dds.partition_num = partition_number;
spin_lock_bh(&ibmvscsis_dev_lock);
list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
@@ -3469,6 +3369,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
(unsigned long)vscsi);
init_completion(&vscsi->wait_idle);
+ init_completion(&vscsi->unconfig);
snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
vscsi->work_q = create_workqueue(wq_name);
@@ -3485,31 +3386,12 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
goto destroy_WQ;
}
- spin_lock_bh(&vscsi->intr_lock);
- vio_enable_interrupts(vdev);
- if (rc) {
- dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc);
- rc = -ENODEV;
- spin_unlock_bh(&vscsi->intr_lock);
- goto free_irq;
- }
-
- if (ibmvscsis_check_q(vscsi)) {
- rc = ERROR;
- dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc);
- spin_unlock_bh(&vscsi->intr_lock);
- goto disable_interrupt;
- }
- spin_unlock_bh(&vscsi->intr_lock);
+ vscsi->state = WAIT_ENABLED;
dev_set_drvdata(&vdev->dev, vscsi);
return 0;
-disable_interrupt:
- vio_disable_interrupts(vdev);
-free_irq:
- free_irq(vdev->irq, vscsi);
destroy_WQ:
destroy_workqueue(vscsi->work_q);
unmap_buf:
@@ -3543,10 +3425,11 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
- /*
- * TBD: Need to handle if there are commands on the waiting_rsp q
- * Actually, can there still be cmds outstanding to tcm?
- */
+ spin_lock_bh(&vscsi->intr_lock);
+ ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
+ vscsi->flags |= CFG_SLEEPING;
+ spin_unlock_bh(&vscsi->intr_lock);
+ wait_for_completion(&vscsi->unconfig);
vio_disable_interrupts(vdev);
free_irq(vdev->irq, vscsi);
@@ -3555,7 +3438,6 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
DMA_BIDIRECTIONAL);
kfree(vscsi->map_buf);
tasklet_kill(&vscsi->work_task);
- ibmvscsis_unregister_command_q(vscsi);
ibmvscsis_destroy_command_q(vscsi);
ibmvscsis_freetimer(vscsi);
ibmvscsis_free_cmds(vscsi);
@@ -3609,7 +3491,7 @@ static int ibmvscsis_get_system_info(void)
num = of_get_property(rootdn, "ibm,partition-no", NULL);
if (num)
- partition_number = *num;
+ partition_number = of_read_number(num, 1);
of_node_put(rootdn);
@@ -3903,18 +3785,22 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
}
if (tmp) {
- tport->enabled = true;
spin_lock_bh(&vscsi->intr_lock);
+ tport->enabled = true;
lrc = ibmvscsis_enable_change_state(vscsi);
if (lrc)
pr_err("enable_change_state failed, rc %ld state %d\n",
lrc, vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
} else {
+ spin_lock_bh(&vscsi->intr_lock);
tport->enabled = false;
+ /* This simulates the server going down */
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+ spin_unlock_bh(&vscsi->intr_lock);
}
- pr_debug("tpg_enable_store, state %d\n", vscsi->state);
+ pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state);
return count;
}
@@ -3983,10 +3869,10 @@ static struct attribute *ibmvscsis_dev_attrs[] = {
ATTRIBUTE_GROUPS(ibmvscsis_dev);
static struct class ibmvscsis_class = {
- .name = "ibmvscsis",
- .dev_release = ibmvscsis_dev_release,
- .class_attrs = ibmvscsis_class_attrs,
- .dev_groups = ibmvscsis_dev_groups,
+ .name = "ibmvscsis",
+ .dev_release = ibmvscsis_dev_release,
+ .class_attrs = ibmvscsis_class_attrs,
+ .dev_groups = ibmvscsis_dev_groups,
};
static struct vio_device_id ibmvscsis_device_table[] = {
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
index 981a0c992b6c..98b0ca79a5c5 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -204,8 +204,6 @@ struct scsi_info {
struct list_head waiting_rsp;
#define NO_QUEUE 0x00
#define WAIT_ENABLED 0X01
- /* driver has received an initialize command */
-#define PART_UP_WAIT_ENAB 0x02
#define WAIT_CONNECTION 0x04
/* have established a connection */
#define CONNECTED 0x08
@@ -259,6 +257,8 @@ struct scsi_info {
#define SCHEDULE_DISCONNECT 0x00400
/* disconnect handler is scheduled */
#define DISCONNECT_SCHEDULED 0x00800
+ /* remove function is sleeping */
+#define CFG_SLEEPING 0x01000
u32 flags;
/* adapter lock */
spinlock_t intr_lock;
@@ -287,6 +287,7 @@ struct scsi_info {
struct workqueue_struct *work_q;
struct completion wait_idle;
+ struct completion unconfig;
struct device dev;
struct vio_dev *dma_dev;
struct srp_target target;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 532474109624..835c59c777f2 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -186,16 +186,16 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
};
static const struct ipr_chip_t ipr_chip[] = {
- { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
- { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
+ { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
};
static int ipr_max_bus_speeds[] = {
@@ -9439,23 +9439,11 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
{
struct pci_dev *pdev = ioa_cfg->pdev;
+ int i;
- if (ioa_cfg->intr_flag == IPR_USE_MSI ||
- ioa_cfg->intr_flag == IPR_USE_MSIX) {
- int i;
- for (i = 0; i < ioa_cfg->nvectors; i++)
- free_irq(ioa_cfg->vectors_info[i].vec,
- &ioa_cfg->hrrq[i]);
- } else
- free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
-
- if (ioa_cfg->intr_flag == IPR_USE_MSI) {
- pci_disable_msi(pdev);
- ioa_cfg->intr_flag &= ~IPR_USE_MSI;
- } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
- pci_disable_msix(pdev);
- ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
- }
+ for (i = 0; i < ioa_cfg->nvectors; i++)
+ free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
+ pci_free_irq_vectors(pdev);
}
/**
@@ -9883,45 +9871,6 @@ static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
}
}
-static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
-{
- struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
- int i, vectors;
-
- for (i = 0; i < ARRAY_SIZE(entries); ++i)
- entries[i].entry = i;
-
- vectors = pci_enable_msix_range(ioa_cfg->pdev,
- entries, 1, ipr_number_of_msix);
- if (vectors < 0) {
- ipr_wait_for_pci_err_recovery(ioa_cfg);
- return vectors;
- }
-
- for (i = 0; i < vectors; i++)
- ioa_cfg->vectors_info[i].vec = entries[i].vector;
- ioa_cfg->nvectors = vectors;
-
- return 0;
-}
-
-static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
-{
- int i, vectors;
-
- vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
- if (vectors < 0) {
- ipr_wait_for_pci_err_recovery(ioa_cfg);
- return vectors;
- }
-
- for (i = 0; i < vectors; i++)
- ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
- ioa_cfg->nvectors = vectors;
-
- return 0;
-}
-
static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
{
int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
@@ -9934,19 +9883,20 @@ static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
}
}
-static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
+static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
+ struct pci_dev *pdev)
{
int i, rc;
for (i = 1; i < ioa_cfg->nvectors; i++) {
- rc = request_irq(ioa_cfg->vectors_info[i].vec,
+ rc = request_irq(pci_irq_vector(pdev, i),
ipr_isr_mhrrq,
0,
ioa_cfg->vectors_info[i].desc,
&ioa_cfg->hrrq[i]);
if (rc) {
while (--i >= 0)
- free_irq(ioa_cfg->vectors_info[i].vec,
+ free_irq(pci_irq_vector(pdev, i),
&ioa_cfg->hrrq[i]);
return rc;
}
@@ -9984,8 +9934,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
* ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
* @pdev: PCI device struct
*
- * Description: The return value from pci_enable_msi_range() can not always be
- * trusted. This routine sets up and initiates a test interrupt to determine
+ * Description: This routine sets up and initiates a test interrupt to determine
* if the interrupt is received via the ipr_test_intr() service routine.
* If the tests fails, the driver will fall back to LSI.
*
@@ -9997,6 +9946,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
int rc;
volatile u32 int_reg;
unsigned long lock_flags = 0;
+ int irq = pci_irq_vector(pdev, 0);
ENTER;
@@ -10008,15 +9958,12 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
- else
- rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
+ rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
if (rc) {
- dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
+ dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
return rc;
} else if (ipr_debug)
- dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
+ dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
@@ -10033,10 +9980,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
- else
- free_irq(pdev->irq, ioa_cfg);
+ free_irq(irq, ioa_cfg);
LEAVE;
@@ -10060,6 +10004,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
int rc = PCIBIOS_SUCCESSFUL;
volatile u32 mask, uproc, interrupts;
unsigned long lock_flags, driver_lock_flags;
+ unsigned int irq_flag;
ENTER;
@@ -10175,18 +10120,18 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
}
- if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
- ipr_enable_msix(ioa_cfg) == 0)
- ioa_cfg->intr_flag = IPR_USE_MSIX;
- else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
- ipr_enable_msi(ioa_cfg) == 0)
- ioa_cfg->intr_flag = IPR_USE_MSI;
- else {
- ioa_cfg->intr_flag = IPR_USE_LSI;
- ioa_cfg->clear_isr = 1;
- ioa_cfg->nvectors = 1;
- dev_info(&pdev->dev, "Cannot enable MSI.\n");
+ irq_flag = PCI_IRQ_LEGACY;
+ if (ioa_cfg->ipr_chip->has_msi)
+ irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
+ rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
+ if (rc < 0) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ goto cleanup_nomem;
}
+ ioa_cfg->nvectors = rc;
+
+ if (!pdev->msi_enabled && !pdev->msix_enabled)
+ ioa_cfg->clear_isr = 1;
pci_set_master(pdev);
@@ -10199,33 +10144,23 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
}
}
- if (ioa_cfg->intr_flag == IPR_USE_MSI ||
- ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ if (pdev->msi_enabled || pdev->msix_enabled) {
rc = ipr_test_msi(ioa_cfg, pdev);
- if (rc == -EOPNOTSUPP) {
+ switch (rc) {
+ case 0:
+ dev_info(&pdev->dev,
+ "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
+ pdev->msix_enabled ? "-X" : "");
+ break;
+ case -EOPNOTSUPP:
ipr_wait_for_pci_err_recovery(ioa_cfg);
- if (ioa_cfg->intr_flag == IPR_USE_MSI) {
- ioa_cfg->intr_flag &= ~IPR_USE_MSI;
- pci_disable_msi(pdev);
- } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
- ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
- pci_disable_msix(pdev);
- }
+ pci_free_irq_vectors(pdev);
- ioa_cfg->intr_flag = IPR_USE_LSI;
ioa_cfg->nvectors = 1;
- }
- else if (rc)
+ ioa_cfg->clear_isr = 1;
+ break;
+ default:
goto out_msi_disable;
- else {
- if (ioa_cfg->intr_flag == IPR_USE_MSI)
- dev_info(&pdev->dev,
- "Request for %d MSIs succeeded with starting IRQ: %d\n",
- ioa_cfg->nvectors, pdev->irq);
- else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- dev_info(&pdev->dev,
- "Request for %d MSIXs succeeded.",
- ioa_cfg->nvectors);
}
}
@@ -10273,15 +10208,13 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->intr_flag == IPR_USE_MSI
- || ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ if (pdev->msi_enabled || pdev->msix_enabled) {
name_msi_vectors(ioa_cfg);
- rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
- 0,
+ rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
ioa_cfg->vectors_info[0].desc,
&ioa_cfg->hrrq[0]);
if (!rc)
- rc = ipr_request_other_msi_irqs(ioa_cfg);
+ rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
} else {
rc = request_irq(pdev->irq, ipr_isr,
IRQF_SHARED,
@@ -10323,10 +10256,7 @@ cleanup_nolog:
ipr_free_mem(ioa_cfg);
out_msi_disable:
ipr_wait_for_pci_err_recovery(ioa_cfg);
- if (ioa_cfg->intr_flag == IPR_USE_MSI)
- pci_disable_msi(pdev);
- else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- pci_disable_msix(pdev);
+ pci_free_irq_vectors(pdev);
cleanup_nomem:
iounmap(ipr_regs);
out_disable:
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 8995053d01b3..b7d2e98eb45b 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1413,10 +1413,7 @@ struct ipr_chip_cfg_t {
struct ipr_chip_t {
u16 vendor;
u16 device;
- u16 intr_type;
-#define IPR_USE_LSI 0x00
-#define IPR_USE_MSI 0x01
-#define IPR_USE_MSIX 0x02
+ bool has_msi;
u16 sis_type;
#define IPR_SIS32 0x00
#define IPR_SIS64 0x01
@@ -1593,11 +1590,9 @@ struct ipr_ioa_cfg {
struct ipr_cmnd **ipr_cmnd_list;
dma_addr_t *ipr_cmnd_list_dma;
- u16 intr_flag;
unsigned int nvectors;
struct {
- unsigned short vec;
char desc[22];
} vectors_info[IPR_MAX_MSIX_VECTORS];
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 02cb76fd4420..3419e1bcdff6 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -2241,9 +2241,6 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
uint8_t minor;
uint8_t subminor;
uint8_t *buffer;
- char hexDigits[] =
- { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C',
- 'D', 'E', 'F' };
METHOD_TRACE("ips_get_bios_version", 1);
@@ -2374,13 +2371,13 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
}
}
- ha->bios_version[0] = hexDigits[(major & 0xF0) >> 4];
+ ha->bios_version[0] = hex_asc_upper_hi(major);
ha->bios_version[1] = '.';
- ha->bios_version[2] = hexDigits[major & 0x0F];
- ha->bios_version[3] = hexDigits[subminor];
+ ha->bios_version[2] = hex_asc_upper_lo(major);
+ ha->bios_version[3] = hex_asc_upper_lo(subminor);
ha->bios_version[4] = '.';
- ha->bios_version[5] = hexDigits[(minor & 0xF0) >> 4];
- ha->bios_version[6] = hexDigits[minor & 0x0F];
+ ha->bios_version[5] = hex_asc_upper_hi(minor);
+ ha->bios_version[6] = hex_asc_upper_lo(minor);
ha->bios_version[7] = 0;
}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 22a9bb1abae1..b3539928073c 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -295,7 +295,6 @@ enum sci_controller_states {
#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
struct isci_pci_info {
- struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
struct isci_host *hosts[SCI_MAX_CONTROLLERS];
struct isci_orom *orom;
};
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 77128d680e3b..0b5b5db0d0f8 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -350,16 +350,12 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
*/
num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT;
- for (i = 0; i < num_msix; i++)
- pci_info->msix_entries[i].entry = i;
-
- err = pci_enable_msix_exact(pdev, pci_info->msix_entries, num_msix);
- if (err)
+ err = pci_alloc_irq_vectors(pdev, num_msix, num_msix, PCI_IRQ_MSIX);
+ if (err < 0)
goto intx;
for (i = 0; i < num_msix; i++) {
int id = i / SCI_NUM_MSI_X_INT;
- struct msix_entry *msix = &pci_info->msix_entries[i];
irq_handler_t isr;
ihost = pci_info->hosts[id];
@@ -369,8 +365,8 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
else
isr = isci_msix_isr;
- err = devm_request_irq(&pdev->dev, msix->vector, isr, 0,
- DRV_NAME"-msix", ihost);
+ err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
+ isr, 0, DRV_NAME"-msix", ihost);
if (!err)
continue;
@@ -378,18 +374,19 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
while (i--) {
id = i / SCI_NUM_MSI_X_INT;
ihost = pci_info->hosts[id];
- msix = &pci_info->msix_entries[i];
- devm_free_irq(&pdev->dev, msix->vector, ihost);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
+ ihost);
}
- pci_disable_msix(pdev);
+ pci_free_irq_vectors(pdev);
goto intx;
}
return 0;
intx:
for_each_isci_host(i, ihost, pdev) {
- err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr,
- IRQF_SHARED, DRV_NAME"-intx", ihost);
+ err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, 0),
+ isci_intx_isr, IRQF_SHARED, DRV_NAME"-intx",
+ ihost);
if (err)
break;
}
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index 8ac646e5eddc..a2bbe46f8ccb 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -54,6 +54,7 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
len = pci_biosrom_size(pdev);
rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL);
if (!rom) {
+ pci_unmap_biosrom(oprom);
dev_warn(&pdev->dev,
"Unable to allocate memory for orom\n");
return NULL;
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 1910100638a2..e3f2a5359d71 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -66,6 +66,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
{
static const char * const strings[] = RNC_STATES;
+ if (state >= ARRAY_SIZE(strings))
+ return "UNKNOWN";
+
return strings[state];
}
#undef C
@@ -454,7 +457,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
* the device since it's being invalidated anyway */
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: SCIC Remote Node Context 0x%p was "
- "suspeneded by hardware while being "
+ "suspended by hardware while being "
"invalidated.\n", __func__, sci_rnc);
break;
default:
@@ -473,7 +476,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
* the device since it's being resumed anyway */
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: SCIC Remote Node Context 0x%p was "
- "suspeneded by hardware while being resumed.\n",
+ "suspended by hardware while being resumed.\n",
__func__, sci_rnc);
break;
default:
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index b709d2b20880..47f66e949745 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2473,7 +2473,7 @@ static void isci_request_process_response_iu(
"%s: resp_iu = %p "
"resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
"resp_iu->response_data_len = %x, "
- "resp_iu->sense_data_len = %x\nrepsonse data: ",
+ "resp_iu->sense_data_len = %x\nresponse data: ",
__func__,
resp_iu,
resp_iu->status,
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 880a9068ca12..6103231104da 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -68,10 +68,14 @@ static void fc_disc_stop_rports(struct fc_disc *disc)
lport = fc_disc_lport(disc);
- mutex_lock(&disc->disc_mutex);
- list_for_each_entry_rcu(rdata, &disc->rports, peers)
- lport->tt.rport_logoff(rdata);
- mutex_unlock(&disc->disc_mutex);
+ rcu_read_lock();
+ list_for_each_entry_rcu(rdata, &disc->rports, peers) {
+ if (kref_get_unless_zero(&rdata->kref)) {
+ fc_rport_logoff(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+ }
+ rcu_read_unlock();
}
/**
@@ -150,7 +154,7 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
break;
}
}
- lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
/*
* If not doing a complete rediscovery, do GPN_ID on
@@ -178,7 +182,7 @@ reject:
FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
rjt_data.reason = ELS_RJT_LOGIC;
rjt_data.explan = ELS_EXPL_NONE;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(fp);
}
@@ -289,15 +293,19 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
* Skip ports which were never discovered. These are the dNS port
* and ports which were created by PLOGI.
*/
+ rcu_read_lock();
list_for_each_entry_rcu(rdata, &disc->rports, peers) {
- if (!rdata->disc_id)
+ if (!kref_get_unless_zero(&rdata->kref))
continue;
- if (rdata->disc_id == disc->disc_id)
- lport->tt.rport_login(rdata);
- else
- lport->tt.rport_logoff(rdata);
+ if (rdata->disc_id) {
+ if (rdata->disc_id == disc->disc_id)
+ fc_rport_login(rdata);
+ else
+ fc_rport_logoff(rdata);
+ }
+ kref_put(&rdata->kref, fc_rport_destroy);
}
-
+ rcu_read_unlock();
mutex_unlock(&disc->disc_mutex);
disc->disc_callback(lport, event);
mutex_lock(&disc->disc_mutex);
@@ -446,7 +454,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
if (ids.port_id != lport->port_id &&
ids.port_name != lport->wwpn) {
- rdata = lport->tt.rport_create(lport, ids.port_id);
+ rdata = fc_rport_create(lport, ids.port_id);
if (rdata) {
rdata->ids.port_name = ids.port_name;
rdata->disc_id = disc->disc_id;
@@ -592,7 +600,6 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
lport = rdata->local_port;
disc = &lport->disc;
- mutex_lock(&disc->disc_mutex);
if (PTR_ERR(fp) == -FC_EX_CLOSED)
goto out;
if (IS_ERR(fp))
@@ -607,37 +614,41 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
goto redisc;
pn = (struct fc_ns_gid_pn *)(cp + 1);
port_name = get_unaligned_be64(&pn->fn_wwpn);
+ mutex_lock(&rdata->rp_mutex);
if (rdata->ids.port_name == -1)
rdata->ids.port_name = port_name;
else if (rdata->ids.port_name != port_name) {
FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. "
"Port-id %6.6x wwpn %16.16llx\n",
rdata->ids.port_id, port_name);
- lport->tt.rport_logoff(rdata);
-
- new_rdata = lport->tt.rport_create(lport,
- rdata->ids.port_id);
+ mutex_unlock(&rdata->rp_mutex);
+ fc_rport_logoff(rdata);
+ mutex_lock(&lport->disc.disc_mutex);
+ new_rdata = fc_rport_create(lport, rdata->ids.port_id);
+ mutex_unlock(&lport->disc.disc_mutex);
if (new_rdata) {
new_rdata->disc_id = disc->disc_id;
- lport->tt.rport_login(new_rdata);
+ fc_rport_login(new_rdata);
}
goto out;
}
rdata->disc_id = disc->disc_id;
- lport->tt.rport_login(rdata);
+ mutex_unlock(&rdata->rp_mutex);
+ fc_rport_login(rdata);
} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
FC_DISC_DBG(disc, "GPN_ID rejected reason %x exp %x\n",
cp->ct_reason, cp->ct_explan);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
} else {
FC_DISC_DBG(disc, "GPN_ID unexpected response code %x\n",
ntohs(cp->ct_cmd));
redisc:
+ mutex_lock(&disc->disc_mutex);
fc_disc_restart(disc);
+ mutex_unlock(&disc->disc_mutex);
}
out:
- mutex_unlock(&disc->disc_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -678,7 +689,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
{
struct fc_rport_priv *rdata;
- rdata = lport->tt.rport_create(lport, dp->port_id);
+ rdata = fc_rport_create(lport, dp->port_id);
if (!rdata)
return -ENOMEM;
rdata->disc_id = 0;
@@ -708,7 +719,7 @@ static void fc_disc_stop(struct fc_lport *lport)
static void fc_disc_stop_final(struct fc_lport *lport)
{
fc_disc_stop(lport);
- lport->tt.rport_flush_queue();
+ fc_rport_flush_queue();
}
/**
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index c2384d501470..6384a98048af 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -67,7 +67,7 @@ struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type,
FC_FCTL_REQ, 0);
- return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
+ return fc_exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
}
EXPORT_SYMBOL(fc_elsct_send);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 16ca31ad5ec0..42bcf7f3a0f9 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -94,6 +94,7 @@ struct fc_exch_pool {
struct fc_exch_mgr {
struct fc_exch_pool __percpu *pool;
mempool_t *ep_pool;
+ struct fc_lport *lport;
enum fc_class class;
struct kref kref;
u16 min_xid;
@@ -362,8 +363,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
fc_exch_hold(ep); /* hold for timer */
if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
- msecs_to_jiffies(timer_msec)))
+ msecs_to_jiffies(timer_msec))) {
+ FC_EXCH_DBG(ep, "Exchange already queued\n");
fc_exch_release(ep);
+ }
}
/**
@@ -406,6 +409,8 @@ static int fc_exch_done_locked(struct fc_exch *ep)
return rc;
}
+static struct fc_exch fc_quarantine_exch;
+
/**
* fc_exch_ptr_get() - Return an exchange from an exchange pool
* @pool: Exchange Pool to get an exchange from
@@ -450,14 +455,17 @@ static void fc_exch_delete(struct fc_exch *ep)
/* update cache of free slot */
index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
- if (pool->left == FC_XID_UNKNOWN)
- pool->left = index;
- else if (pool->right == FC_XID_UNKNOWN)
- pool->right = index;
- else
- pool->next_index = index;
-
- fc_exch_ptr_set(pool, index, NULL);
+ if (!(ep->state & FC_EX_QUARANTINE)) {
+ if (pool->left == FC_XID_UNKNOWN)
+ pool->left = index;
+ else if (pool->right == FC_XID_UNKNOWN)
+ pool->right = index;
+ else
+ pool->next_index = index;
+ fc_exch_ptr_set(pool, index, NULL);
+ } else {
+ fc_exch_ptr_set(pool, index, &fc_quarantine_exch);
+ }
list_del(&ep->ex_list);
spin_unlock_bh(&pool->lock);
fc_exch_release(ep); /* drop hold for exch in mp */
@@ -525,8 +533,7 @@ out:
* Note: The frame will be freed either by a direct call to fc_frame_free(fp)
* or indirectly by calling libfc_function_template.frame_send().
*/
-static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
- struct fc_frame *fp)
+int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp)
{
struct fc_exch *ep;
int error;
@@ -536,6 +543,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
spin_unlock_bh(&ep->ex_lock);
return error;
}
+EXPORT_SYMBOL(fc_seq_send);
/**
* fc_seq_alloc() - Allocate a sequence for a given exchange
@@ -577,7 +585,7 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
* for a given sequence/exchange pair
* @sp: The sequence/exchange to get a new exchange for
*/
-static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
+struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
{
struct fc_exch *ep = fc_seq_exch(sp);
@@ -587,16 +595,16 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
return sp;
}
+EXPORT_SYMBOL(fc_seq_start_next);
/*
* Set the response handler for the exchange associated with a sequence.
*
* Note: May sleep if invoked from outside a response handler.
*/
-static void fc_seq_set_resp(struct fc_seq *sp,
- void (*resp)(struct fc_seq *, struct fc_frame *,
- void *),
- void *arg)
+void fc_seq_set_resp(struct fc_seq *sp,
+ void (*resp)(struct fc_seq *, struct fc_frame *, void *),
+ void *arg)
{
struct fc_exch *ep = fc_seq_exch(sp);
DEFINE_WAIT(wait);
@@ -615,12 +623,20 @@ static void fc_seq_set_resp(struct fc_seq *sp,
ep->arg = arg;
spin_unlock_bh(&ep->ex_lock);
}
+EXPORT_SYMBOL(fc_seq_set_resp);
/**
* fc_exch_abort_locked() - Abort an exchange
* @ep: The exchange to be aborted
* @timer_msec: The period of time to wait before aborting
*
+ * Abort an exchange and sequence. Generally called because of a
+ * exchange timeout or an abort from the upper layer.
+ *
+ * A timer_msec can be specified for abort timeout, if non-zero
+ * timer_msec value is specified then exchange resp handler
+ * will be called with timeout error if no response to abort.
+ *
* Locking notes: Called with exch lock held
*
* Return value: 0 on success else error code
@@ -632,9 +648,13 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
struct fc_frame *fp;
int error;
+ FC_EXCH_DBG(ep, "exch: abort, time %d msecs\n", timer_msec);
if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
- ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP))
+ ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
+ FC_EXCH_DBG(ep, "exch: already completed esb %x state %x\n",
+ ep->esb_stat, ep->state);
return -ENXIO;
+ }
/*
* Send the abort on a new sequence if possible.
@@ -680,8 +700,7 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
*
* Return value: 0 on success else error code
*/
-static int fc_seq_exch_abort(const struct fc_seq *req_sp,
- unsigned int timer_msec)
+int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
{
struct fc_exch *ep;
int error;
@@ -758,7 +777,7 @@ static void fc_exch_timeout(struct work_struct *work)
u32 e_stat;
int rc = 1;
- FC_EXCH_DBG(ep, "Exchange timed out\n");
+ FC_EXCH_DBG(ep, "Exchange timed out state %x\n", ep->state);
spin_lock_bh(&ep->ex_lock);
if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
@@ -821,14 +840,18 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
/* peek cache of free slot */
if (pool->left != FC_XID_UNKNOWN) {
- index = pool->left;
- pool->left = FC_XID_UNKNOWN;
- goto hit;
+ if (!WARN_ON(fc_exch_ptr_get(pool, pool->left))) {
+ index = pool->left;
+ pool->left = FC_XID_UNKNOWN;
+ goto hit;
+ }
}
if (pool->right != FC_XID_UNKNOWN) {
- index = pool->right;
- pool->right = FC_XID_UNKNOWN;
- goto hit;
+ if (!WARN_ON(fc_exch_ptr_get(pool, pool->right))) {
+ index = pool->right;
+ pool->right = FC_XID_UNKNOWN;
+ goto hit;
+ }
}
index = pool->next_index;
@@ -888,14 +911,19 @@ err:
* EM is selected when a NULL match function pointer is encountered
* or when a call to a match function returns true.
*/
-static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
- struct fc_frame *fp)
+static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
+ struct fc_frame *fp)
{
struct fc_exch_mgr_anchor *ema;
+ struct fc_exch *ep;
- list_for_each_entry(ema, &lport->ema_list, ema_list)
- if (!ema->match || ema->match(fp))
- return fc_exch_em_alloc(lport, ema->mp);
+ list_for_each_entry(ema, &lport->ema_list, ema_list) {
+ if (!ema->match || ema->match(fp)) {
+ ep = fc_exch_em_alloc(lport, ema->mp);
+ if (ep)
+ return ep;
+ }
+ }
return NULL;
}
@@ -906,14 +934,17 @@ static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
*/
static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
{
+ struct fc_lport *lport = mp->lport;
struct fc_exch_pool *pool;
struct fc_exch *ep = NULL;
u16 cpu = xid & fc_cpu_mask;
+ if (xid == FC_XID_UNKNOWN)
+ return NULL;
+
if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
- printk_ratelimited(KERN_ERR
- "libfc: lookup request for XID = %d, "
- "indicates invalid CPU %d\n", xid, cpu);
+ pr_err("host%u: lport %6.6x: xid %d invalid CPU %d\n:",
+ lport->host->host_no, lport->port_id, xid, cpu);
return NULL;
}
@@ -921,6 +952,10 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
pool = per_cpu_ptr(mp->pool, cpu);
spin_lock_bh(&pool->lock);
ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
+ if (ep == &fc_quarantine_exch) {
+ FC_LPORT_DBG(lport, "xid %x quarantined\n", xid);
+ ep = NULL;
+ }
if (ep) {
WARN_ON(ep->xid != xid);
fc_exch_hold(ep);
@@ -938,7 +973,7 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
*
* Note: May sleep if invoked from outside a response handler.
*/
-static void fc_exch_done(struct fc_seq *sp)
+void fc_exch_done(struct fc_seq *sp)
{
struct fc_exch *ep = fc_seq_exch(sp);
int rc;
@@ -951,6 +986,7 @@ static void fc_exch_done(struct fc_seq *sp)
if (!rc)
fc_exch_delete(ep);
}
+EXPORT_SYMBOL(fc_exch_done);
/**
* fc_exch_resp() - Allocate a new exchange for a response frame
@@ -1197,8 +1233,8 @@ static void fc_exch_set_addr(struct fc_exch *ep,
*
* The received frame is not freed.
*/
-static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
- struct fc_seq_els_data *els_data)
+void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
+ struct fc_seq_els_data *els_data)
{
switch (els_cmd) {
case ELS_LS_RJT:
@@ -1217,6 +1253,7 @@ static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
}
}
+EXPORT_SYMBOL_GPL(fc_seq_els_rsp_send);
/**
* fc_seq_send_last() - Send a sequence that is the last in the exchange
@@ -1258,8 +1295,10 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
*/
if (fc_sof_needs_ack(fr_sof(rx_fp))) {
fp = fc_frame_alloc(lport, 0);
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(ep, "Drop ACK request, out of memory\n");
return;
+ }
fh = fc_frame_header_get(fp);
fh->fh_r_ctl = FC_RCTL_ACK_1;
@@ -1312,13 +1351,18 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
struct fc_frame_header *rx_fh;
struct fc_frame_header *fh;
struct fc_ba_rjt *rp;
+ struct fc_seq *sp;
struct fc_lport *lport;
unsigned int f_ctl;
lport = fr_dev(rx_fp);
+ sp = fr_seq(rx_fp);
fp = fc_frame_alloc(lport, sizeof(*rp));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(fc_seq_exch(sp),
+ "Drop BA_RJT request, out of memory\n");
return;
+ }
fh = fc_frame_header_get(fp);
rx_fh = fc_frame_header_get(rx_fp);
@@ -1383,14 +1427,17 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
if (!ep)
goto reject;
+ FC_EXCH_DBG(ep, "exch: ABTS received\n");
fp = fc_frame_alloc(ep->lp, sizeof(*ap));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(ep, "Drop ABTS request, out of memory\n");
goto free;
+ }
spin_lock_bh(&ep->ex_lock);
if (ep->esb_stat & ESB_ST_COMPLETE) {
spin_unlock_bh(&ep->ex_lock);
-
+ FC_EXCH_DBG(ep, "exch: ABTS rejected, exchange complete\n");
fc_frame_free(fp);
goto reject;
}
@@ -1433,7 +1480,7 @@ reject:
* A reference will be held on the exchange/sequence for the caller, which
* must call fc_seq_release().
*/
-static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
+struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_exch_mgr_anchor *ema;
@@ -1447,15 +1494,17 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
break;
return fr_seq(fp);
}
+EXPORT_SYMBOL(fc_seq_assign);
/**
* fc_seq_release() - Release the hold
* @sp: The sequence.
*/
-static void fc_seq_release(struct fc_seq *sp)
+void fc_seq_release(struct fc_seq *sp)
{
fc_exch_release(fc_seq_exch(sp));
}
+EXPORT_SYMBOL(fc_seq_release);
/**
* fc_exch_recv_req() - Handler for an incoming request
@@ -1491,7 +1540,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
* The upper-level protocol may request one later, if needed.
*/
if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
- return lport->tt.lport_recv(lport, fp);
+ return fc_lport_recv(lport, fp);
reject = fc_seq_lookup_recip(lport, mp, fp);
if (reject == FC_RJT_NONE) {
@@ -1512,7 +1561,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
* first.
*/
if (!fc_invoke_resp(ep, sp, fp))
- lport->tt.lport_recv(lport, fp);
+ fc_lport_recv(lport, fp);
fc_exch_release(ep); /* release from lookup */
} else {
FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
@@ -1562,9 +1611,6 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
if (fc_sof_is_init(sof)) {
sp->ssb_stat |= SSB_ST_RESP;
sp->id = fh->fh_seq_id;
- } else if (sp->id != fh->fh_seq_id) {
- atomic_inc(&mp->stats.seq_not_found);
- goto rel;
}
f_ctl = ntoh24(fh->fh_f_ctl);
@@ -1761,7 +1807,10 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
fc_frame_free(fp);
break;
case FC_RCTL_BA_ABTS:
- fc_exch_recv_abts(ep, fp);
+ if (ep)
+ fc_exch_recv_abts(ep, fp);
+ else
+ fc_frame_free(fp);
break;
default: /* ignore junk */
fc_frame_free(fp);
@@ -1784,11 +1833,16 @@ static void fc_seq_ls_acc(struct fc_frame *rx_fp)
struct fc_lport *lport;
struct fc_els_ls_acc *acc;
struct fc_frame *fp;
+ struct fc_seq *sp;
lport = fr_dev(rx_fp);
+ sp = fr_seq(rx_fp);
fp = fc_frame_alloc(lport, sizeof(*acc));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(fc_seq_exch(sp),
+ "exch: drop LS_ACC, out of memory\n");
return;
+ }
acc = fc_frame_payload_get(fp, sizeof(*acc));
memset(acc, 0, sizeof(*acc));
acc->la_cmd = ELS_LS_ACC;
@@ -1811,11 +1865,16 @@ static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
struct fc_lport *lport;
struct fc_els_ls_rjt *rjt;
struct fc_frame *fp;
+ struct fc_seq *sp;
lport = fr_dev(rx_fp);
+ sp = fr_seq(rx_fp);
fp = fc_frame_alloc(lport, sizeof(*rjt));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(fc_seq_exch(sp),
+ "exch: drop LS_ACC, out of memory\n");
return;
+ }
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
memset(rjt, 0, sizeof(*rjt));
rjt->er_cmd = ELS_LS_RJT;
@@ -1960,8 +2019,7 @@ static void fc_exch_els_rec(struct fc_frame *rfp)
enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
enum fc_els_rjt_explan explan;
u32 sid;
- u16 rxid;
- u16 oxid;
+ u16 xid, rxid, oxid;
lport = fr_dev(rfp);
rp = fc_frame_payload_get(rfp, sizeof(*rp));
@@ -1972,18 +2030,35 @@ static void fc_exch_els_rec(struct fc_frame *rfp)
rxid = ntohs(rp->rec_rx_id);
oxid = ntohs(rp->rec_ox_id);
- ep = fc_exch_lookup(lport,
- sid == fc_host_port_id(lport->host) ? oxid : rxid);
explan = ELS_EXPL_OXID_RXID;
- if (!ep)
+ if (sid == fc_host_port_id(lport->host))
+ xid = oxid;
+ else
+ xid = rxid;
+ if (xid == FC_XID_UNKNOWN) {
+ FC_LPORT_DBG(lport,
+ "REC request from %x: invalid rxid %x oxid %x\n",
+ sid, rxid, oxid);
+ goto reject;
+ }
+ ep = fc_exch_lookup(lport, xid);
+ if (!ep) {
+ FC_LPORT_DBG(lport,
+ "REC request from %x: rxid %x oxid %x not found\n",
+ sid, rxid, oxid);
goto reject;
+ }
+ FC_EXCH_DBG(ep, "REC request from %x: rxid %x oxid %x\n",
+ sid, rxid, oxid);
if (ep->oid != sid || oxid != ep->oxid)
goto rel;
if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
goto rel;
fp = fc_frame_alloc(lport, sizeof(*acc));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(ep, "Drop REC request, out of memory\n");
goto out;
+ }
acc = fc_frame_payload_get(fp, sizeof(*acc));
memset(acc, 0, sizeof(*acc));
@@ -2065,6 +2140,24 @@ cleanup:
* @arg: The argument to be passed to the response handler
* @timer_msec: The timeout period for the exchange
*
+ * The exchange response handler is set in this routine to resp()
+ * function pointer. It can be called in two scenarios: if a timeout
+ * occurs or if a response frame is received for the exchange. The
+ * fc_frame pointer in response handler will also indicate timeout
+ * as error using IS_ERR related macros.
+ *
+ * The exchange destructor handler is also set in this routine.
+ * The destructor handler is invoked by EM layer when exchange
+ * is about to free, this can be used by caller to free its
+ * resources along with exchange free.
+ *
+ * The arg is passed back to resp and destructor handler.
+ *
+ * The timeout value (in msec) for an exchange is set if non zero
+ * timer_msec argument is specified. The timer is canceled when
+ * it fires or when the exchange is done. The exchange timeout handler
+ * is registered by EM layer.
+ *
* The frame pointer with some of the header's fields must be
* filled before calling this routine, those fields are:
*
@@ -2075,14 +2168,13 @@ cleanup:
* - frame control
* - parameter or relative offset
*/
-static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
- struct fc_frame *fp,
- void (*resp)(struct fc_seq *,
- struct fc_frame *fp,
- void *arg),
- void (*destructor)(struct fc_seq *,
- void *),
- void *arg, u32 timer_msec)
+struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
+ struct fc_frame *fp,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *fp,
+ void *arg),
+ void (*destructor)(struct fc_seq *, void *),
+ void *arg, u32 timer_msec)
{
struct fc_exch *ep;
struct fc_seq *sp = NULL;
@@ -2101,7 +2193,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
ep->resp = resp;
ep->destructor = destructor;
ep->arg = arg;
- ep->r_a_tov = FC_DEF_R_A_TOV;
+ ep->r_a_tov = lport->r_a_tov;
ep->lp = lport;
sp = &ep->seq;
@@ -2135,6 +2227,7 @@ err:
fc_exch_delete(ep);
return NULL;
}
+EXPORT_SYMBOL(fc_exch_seq_send);
/**
* fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command
@@ -2176,6 +2269,7 @@ static void fc_exch_rrq(struct fc_exch *ep)
return;
retry:
+ FC_EXCH_DBG(ep, "exch: RRQ send failed\n");
spin_lock_bh(&ep->ex_lock);
if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
spin_unlock_bh(&ep->ex_lock);
@@ -2218,6 +2312,8 @@ static void fc_exch_els_rrq(struct fc_frame *fp)
if (!ep)
goto reject;
spin_lock_bh(&ep->ex_lock);
+ FC_EXCH_DBG(ep, "RRQ request from %x: xid %x rxid %x oxid %x\n",
+ sid, xid, ntohs(rp->rrq_rx_id), ntohs(rp->rrq_ox_id));
if (ep->oxid != ntohs(rp->rrq_ox_id))
goto unlock_reject;
if (ep->rxid != ntohs(rp->rrq_rx_id) &&
@@ -2385,6 +2481,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
return NULL;
mp->class = class;
+ mp->lport = lport;
/* adjust em exch xid range for offload */
mp->min_xid = min_xid;
@@ -2558,36 +2655,9 @@ EXPORT_SYMBOL(fc_exch_recv);
*/
int fc_exch_init(struct fc_lport *lport)
{
- if (!lport->tt.seq_start_next)
- lport->tt.seq_start_next = fc_seq_start_next;
-
- if (!lport->tt.seq_set_resp)
- lport->tt.seq_set_resp = fc_seq_set_resp;
-
- if (!lport->tt.exch_seq_send)
- lport->tt.exch_seq_send = fc_exch_seq_send;
-
- if (!lport->tt.seq_send)
- lport->tt.seq_send = fc_seq_send;
-
- if (!lport->tt.seq_els_rsp_send)
- lport->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
-
- if (!lport->tt.exch_done)
- lport->tt.exch_done = fc_exch_done;
-
if (!lport->tt.exch_mgr_reset)
lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
- if (!lport->tt.seq_exch_abort)
- lport->tt.seq_exch_abort = fc_seq_exch_abort;
-
- if (!lport->tt.seq_assign)
- lport->tt.seq_assign = fc_seq_assign;
-
- if (!lport->tt.seq_release)
- lport->tt.seq_release = fc_seq_release;
-
return 0;
}
EXPORT_SYMBOL(fc_exch_init);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 5121272f28fd..0e67621477a8 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -122,6 +122,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
#define FC_HRD_ERROR 9
#define FC_CRC_ERROR 10
#define FC_TIMED_OUT 11
+#define FC_TRANS_RESET 12
/*
* Error recovery timeout values.
@@ -195,7 +196,7 @@ static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
* @seq: The sequence that the FCP packet is on (required by destructor API)
* @fsp: The FCP packet to be released
*
- * This routine is called by a destructor callback in the exch_seq_send()
+ * This routine is called by a destructor callback in the fc_exch_seq_send()
* routine of the libfc Transport Template. The 'struct fc_seq' is a required
* argument even though it is not used by this routine.
*
@@ -253,8 +254,21 @@ static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
*/
static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
{
- if (!(fsp->state & FC_SRB_COMPL))
+ if (!(fsp->state & FC_SRB_COMPL)) {
mod_timer(&fsp->timer, jiffies + delay);
+ fsp->timer_delay = delay;
+ }
+}
+
+static void fc_fcp_abort_done(struct fc_fcp_pkt *fsp)
+{
+ fsp->state |= FC_SRB_ABORTED;
+ fsp->state &= ~FC_SRB_ABORT_PENDING;
+
+ if (fsp->wait_for_comp)
+ complete(&fsp->tm_done);
+ else
+ fc_fcp_complete_locked(fsp);
}
/**
@@ -264,6 +278,8 @@ static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
*/
static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
{
+ int rc;
+
if (!fsp->seq_ptr)
return -EINVAL;
@@ -271,7 +287,16 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
put_cpu();
fsp->state |= FC_SRB_ABORT_PENDING;
- return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
+ rc = fc_seq_exch_abort(fsp->seq_ptr, 0);
+ /*
+ * fc_seq_exch_abort() might return -ENXIO if
+ * the sequence is already completed
+ */
+ if (rc == -ENXIO) {
+ fc_fcp_abort_done(fsp);
+ rc = 0;
+ }
+ return rc;
}
/**
@@ -283,16 +308,16 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
* fc_io_compl() will notify the SCSI-ml that the I/O is done.
* The SCSI-ml will retry the command.
*/
-static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
+static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp, int status_code)
{
if (fsp->seq_ptr) {
- fsp->lp->tt.exch_done(fsp->seq_ptr);
+ fc_exch_done(fsp->seq_ptr);
fsp->seq_ptr = NULL;
}
fsp->state &= ~FC_SRB_ABORT_PENDING;
fsp->io_status = 0;
- fsp->status_code = FC_ERROR;
+ fsp->status_code = status_code;
fc_fcp_complete_locked(fsp);
}
@@ -402,8 +427,6 @@ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
if (!can_queue)
can_queue = 1;
lport->host->can_queue = can_queue;
- shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n"
- "Reducing can_queue to %d.\n", can_queue);
unlock:
spin_unlock_irqrestore(lport->host->host_lock, flags);
@@ -430,10 +453,29 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
put_cpu();
/* error case */
fc_fcp_can_queue_ramp_down(lport);
+ shost_printk(KERN_ERR, lport->host,
+ "libfc: Could not allocate frame, "
+ "reducing can_queue to %d.\n", lport->host->can_queue);
return NULL;
}
/**
+ * get_fsp_rec_tov() - Helper function to get REC_TOV
+ * @fsp: the FCP packet
+ *
+ * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second
+ */
+static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
+{
+ struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data;
+ unsigned int e_d_tov = FC_DEF_E_D_TOV;
+
+ if (rpriv && rpriv->e_d_tov > e_d_tov)
+ e_d_tov = rpriv->e_d_tov;
+ return msecs_to_jiffies(e_d_tov) + HZ;
+}
+
+/**
* fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target
* @fsp: The FCP packet the data is on
* @fp: The data frame
@@ -536,8 +578,10 @@ crc_err:
* and completes the transfer, call the completion handler.
*/
if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
- fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
+ fsp->xfer_len == fsp->data_len - fsp->scsi_resid) {
+ FC_FCP_DBG( fsp, "complete out-of-order sequence\n" );
fc_fcp_complete_locked(fsp);
+ }
return;
err:
fc_fcp_recovery(fsp, host_bcode);
@@ -609,7 +653,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
remaining = seq_blen;
fh_parm_offset = frame_offset = offset;
tlen = 0;
- seq = lport->tt.seq_start_next(seq);
+ seq = fc_seq_start_next(seq);
f_ctl = FC_FC_REL_OFF;
WARN_ON(!seq);
@@ -687,7 +731,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
/*
* send fragment using for a sequence.
*/
- error = lport->tt.seq_send(lport, seq, fp);
+ error = fc_seq_send(lport, seq, fp);
if (error) {
WARN_ON(1); /* send error should be rare */
return error;
@@ -727,15 +771,8 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
ba_done = 0;
}
- if (ba_done) {
- fsp->state |= FC_SRB_ABORTED;
- fsp->state &= ~FC_SRB_ABORT_PENDING;
-
- if (fsp->wait_for_comp)
- complete(&fsp->tm_done);
- else
- fc_fcp_complete_locked(fsp);
- }
+ if (ba_done)
+ fc_fcp_abort_done(fsp);
}
/**
@@ -764,8 +801,11 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
fh = fc_frame_header_get(fp);
r_ctl = fh->fh_r_ctl;
- if (lport->state != LPORT_ST_READY)
+ if (lport->state != LPORT_ST_READY) {
+ FC_FCP_DBG(fsp, "lport state %d, ignoring r_ctl %x\n",
+ lport->state, r_ctl);
goto out;
+ }
if (fc_fcp_lock_pkt(fsp))
goto out;
@@ -774,8 +814,10 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
goto unlock;
}
- if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING))
+ if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) {
+ FC_FCP_DBG(fsp, "command aborted, ignoring r_ctl %x\n", r_ctl);
goto unlock;
+ }
if (r_ctl == FC_RCTL_DD_DATA_DESC) {
/*
@@ -910,7 +952,16 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
* Wait a at least one jiffy to see if it is delivered.
* If this expires without data, we may do SRR.
*/
- fc_fcp_timer_set(fsp, 2);
+ if (fsp->lp->qfull) {
+ FC_FCP_DBG(fsp, "tgt %6.6x queue busy retry\n",
+ fsp->rport->port_id);
+ return;
+ }
+ FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx data underrun "
+ "len %x, data len %x\n",
+ fsp->rport->port_id,
+ fsp->xfer_len, expected_len, fsp->data_len);
+ fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
return;
}
fsp->status_code = FC_DATA_OVRRUN;
@@ -959,8 +1010,11 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
if (fsp->cdb_status == SAM_STAT_GOOD &&
fsp->xfer_len < fsp->data_len && !fsp->io_status &&
(!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
- fsp->xfer_len < fsp->data_len - fsp->scsi_resid))
+ fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
+ FC_FCP_DBG(fsp, "data underrun, xfer %zx data %x\n",
+ fsp->xfer_len, fsp->data_len);
fsp->status_code = FC_DATA_UNDRUN;
+ }
}
seq = fsp->seq_ptr;
@@ -970,7 +1024,7 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
struct fc_frame *conf_frame;
struct fc_seq *csp;
- csp = lport->tt.seq_start_next(seq);
+ csp = fc_seq_start_next(seq);
conf_frame = fc_fcp_frame_alloc(fsp->lp, 0);
if (conf_frame) {
f_ctl = FC_FC_SEQ_INIT;
@@ -979,10 +1033,10 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
ep->did, ep->sid,
FC_TYPE_FCP, f_ctl, 0);
- lport->tt.seq_send(lport, csp, conf_frame);
+ fc_seq_send(lport, csp, conf_frame);
}
}
- lport->tt.exch_done(seq);
+ fc_exch_done(seq);
}
/*
* Some resets driven by SCSI are not I/Os and do not have
@@ -1000,10 +1054,8 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
*/
static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
{
- struct fc_lport *lport = fsp->lp;
-
if (fsp->seq_ptr) {
- lport->tt.exch_done(fsp->seq_ptr);
+ fc_exch_done(fsp->seq_ptr);
fsp->seq_ptr = NULL;
}
fsp->status_code = error;
@@ -1116,19 +1168,6 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
}
/**
- * get_fsp_rec_tov() - Helper function to get REC_TOV
- * @fsp: the FCP packet
- *
- * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second
- */
-static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
-{
- struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data;
-
- return msecs_to_jiffies(rpriv->e_d_tov) + HZ;
-}
-
-/**
* fc_fcp_cmd_send() - Send a FCP command
* @lport: The local port to send the command on
* @fsp: The FCP packet the command is on
@@ -1165,8 +1204,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
rpriv->local_port->port_id, FC_TYPE_FCP,
FC_FCTL_REQ, 0);
- seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy,
- fsp, 0);
+ seq = fc_exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
if (!seq) {
rc = -1;
goto unlock;
@@ -1196,7 +1234,7 @@ static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
return;
if (error == -FC_EX_CLOSED) {
- fc_fcp_retry_cmd(fsp);
+ fc_fcp_retry_cmd(fsp, FC_ERROR);
goto unlock;
}
@@ -1222,8 +1260,16 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
int rc = FAILED;
unsigned long ticks_left;
- if (fc_fcp_send_abort(fsp))
+ FC_FCP_DBG(fsp, "pkt abort state %x\n", fsp->state);
+ if (fc_fcp_send_abort(fsp)) {
+ FC_FCP_DBG(fsp, "failed to send abort\n");
return FAILED;
+ }
+
+ if (fsp->state & FC_SRB_ABORTED) {
+ FC_FCP_DBG(fsp, "target abort cmd completed\n");
+ return SUCCESS;
+ }
init_completion(&fsp->tm_done);
fsp->wait_for_comp = 1;
@@ -1301,7 +1347,7 @@ static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
spin_lock_bh(&fsp->scsi_pkt_lock);
if (fsp->seq_ptr) {
- lport->tt.exch_done(fsp->seq_ptr);
+ fc_exch_done(fsp->seq_ptr);
fsp->seq_ptr = NULL;
}
fsp->wait_for_comp = 0;
@@ -1355,7 +1401,7 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
if (fh->fh_type != FC_TYPE_BLS)
fc_fcp_resp(fsp, fp);
fsp->seq_ptr = NULL;
- fsp->lp->tt.exch_done(seq);
+ fc_exch_done(seq);
out_unlock:
fc_fcp_unlock_pkt(fsp);
out:
@@ -1394,6 +1440,15 @@ static void fc_fcp_timeout(unsigned long data)
if (fsp->cdb_cmd.fc_tm_flags)
goto unlock;
+ if (fsp->lp->qfull) {
+ FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n",
+ fsp->timer_delay);
+ setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
+ fc_fcp_timer_set(fsp, fsp->timer_delay);
+ goto unlock;
+ }
+ FC_FCP_DBG(fsp, "fcp timeout, delay %d flags %x state %x\n",
+ fsp->timer_delay, rpriv->flags, fsp->state);
fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
@@ -1486,8 +1541,8 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
switch (rjt->er_reason) {
default:
- FC_FCP_DBG(fsp, "device %x unexpected REC reject "
- "reason %d expl %d\n",
+ FC_FCP_DBG(fsp,
+ "device %x invalid REC reject %d/%d\n",
fsp->rport->port_id, rjt->er_reason,
rjt->er_explan);
/* fall through */
@@ -1503,18 +1558,23 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
break;
case ELS_RJT_LOGIC:
case ELS_RJT_UNAB:
+ FC_FCP_DBG(fsp, "device %x REC reject %d/%d\n",
+ fsp->rport->port_id, rjt->er_reason,
+ rjt->er_explan);
/*
- * If no data transfer, the command frame got dropped
- * so we just retry. If data was transferred, we
- * lost the response but the target has no record,
- * so we abort and retry.
+ * If response got lost or is stuck in the
+ * queue somewhere we have no idea if and when
+ * the response will be received. So quarantine
+ * the xid and retry the command.
*/
- if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
- fsp->xfer_len == 0) {
- fc_fcp_retry_cmd(fsp);
+ if (rjt->er_explan == ELS_EXPL_OXID_RXID) {
+ struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
+ ep->state |= FC_EX_QUARANTINE;
+ fsp->state |= FC_SRB_ABORTED;
+ fc_fcp_retry_cmd(fsp, FC_TRANS_RESET);
break;
}
- fc_fcp_recovery(fsp, FC_ERROR);
+ fc_fcp_recovery(fsp, FC_TRANS_RESET);
break;
}
} else if (opcode == ELS_LS_ACC) {
@@ -1608,7 +1668,9 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
switch (error) {
case -FC_EX_CLOSED:
- fc_fcp_retry_cmd(fsp);
+ FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange closed\n",
+ fsp, fsp->rport->port_id);
+ fc_fcp_retry_cmd(fsp, FC_ERROR);
break;
default:
@@ -1622,8 +1684,8 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
* Assume REC or LS_ACC was lost.
* The exchange manager will have aborted REC, so retry.
*/
- FC_FCP_DBG(fsp, "REC fid %6.6x error error %d retry %d/%d\n",
- fsp->rport->port_id, error, fsp->recov_retry,
+ FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange timeout retry %d/%d\n",
+ fsp, fsp->rport->port_id, fsp->recov_retry,
FC_MAX_RECOV_RETRY);
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
@@ -1642,6 +1704,7 @@ out:
*/
static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
{
+ FC_FCP_DBG(fsp, "start recovery code %x\n", code);
fsp->status_code = code;
fsp->cdb_status = 0;
fsp->io_status = 0;
@@ -1668,7 +1731,6 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
struct fc_seq *seq;
struct fcp_srr *srr;
struct fc_frame *fp;
- unsigned int rec_tov;
rport = fsp->rport;
rpriv = rport->dd_data;
@@ -1692,10 +1754,9 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
rpriv->local_port->port_id, FC_TYPE_FCP,
FC_FCTL_REQ, 0);
- rec_tov = get_fsp_rec_tov(fsp);
- seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp,
- fc_fcp_pkt_destroy,
- fsp, jiffies_to_msecs(rec_tov));
+ seq = fc_exch_seq_send(lport, fp, fc_fcp_srr_resp,
+ fc_fcp_pkt_destroy,
+ fsp, get_fsp_rec_tov(fsp));
if (!seq)
goto retry;
@@ -1706,7 +1767,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */
return;
retry:
- fc_fcp_retry_cmd(fsp);
+ fc_fcp_retry_cmd(fsp, FC_TRANS_RESET);
}
/**
@@ -1730,9 +1791,9 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
fh = fc_frame_header_get(fp);
/*
- * BUG? fc_fcp_srr_error calls exch_done which would release
+ * BUG? fc_fcp_srr_error calls fc_exch_done which would release
* the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
- * then fc_exch_timeout would be sending an abort. The exch_done
+ * then fc_exch_timeout would be sending an abort. The fc_exch_done
* call by fc_fcp_srr_error would prevent fc_exch.c from seeing
* an abort response though.
*/
@@ -1753,7 +1814,7 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
}
fc_fcp_unlock_pkt(fsp);
out:
- fsp->lp->tt.exch_done(seq);
+ fc_exch_done(seq);
fc_frame_free(fp);
}
@@ -1768,20 +1829,22 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
goto out;
switch (PTR_ERR(fp)) {
case -FC_EX_TIMEOUT:
+ FC_FCP_DBG(fsp, "SRR timeout, retries %d\n", fsp->recov_retry);
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
else
fc_fcp_recovery(fsp, FC_TIMED_OUT);
break;
case -FC_EX_CLOSED: /* e.g., link failure */
+ FC_FCP_DBG(fsp, "SRR error, exchange closed\n");
/* fall through */
default:
- fc_fcp_retry_cmd(fsp);
+ fc_fcp_retry_cmd(fsp, FC_ERROR);
break;
}
fc_fcp_unlock_pkt(fsp);
out:
- fsp->lp->tt.exch_done(fsp->recov_seq);
+ fc_exch_done(fsp->recov_seq);
}
/**
@@ -1832,8 +1895,13 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
rpriv = rport->dd_data;
if (!fc_fcp_lport_queue_ready(lport)) {
- if (lport->qfull)
+ if (lport->qfull) {
fc_fcp_can_queue_ramp_down(lport);
+ shost_printk(KERN_ERR, lport->host,
+ "libfc: queue full, "
+ "reducing can_queue to %d.\n",
+ lport->host->can_queue);
+ }
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -1980,15 +2048,26 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
break;
case FC_CMD_ABORTED:
- FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
- "due to FC_CMD_ABORTED\n");
- sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
+ if (host_byte(sc_cmd->result) == DID_TIME_OUT)
+ FC_FCP_DBG(fsp, "Returning DID_TIME_OUT to scsi-ml "
+ "due to FC_CMD_ABORTED\n");
+ else {
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_CMD_ABORTED\n");
+ set_host_byte(sc_cmd, DID_ERROR);
+ }
+ sc_cmd->result |= fsp->io_status;
break;
case FC_CMD_RESET:
FC_FCP_DBG(fsp, "Returning DID_RESET to scsi-ml "
"due to FC_CMD_RESET\n");
sc_cmd->result = (DID_RESET << 16);
break;
+ case FC_TRANS_RESET:
+ FC_FCP_DBG(fsp, "Returning DID_SOFT_ERROR to scsi-ml "
+ "due to FC_TRANS_RESET\n");
+ sc_cmd->result = (DID_SOFT_ERROR << 16);
+ break;
case FC_HRD_ERROR:
FC_FCP_DBG(fsp, "Returning DID_NO_CONNECT to scsi-ml "
"due to FC_HRD_ERROR\n");
@@ -2142,7 +2221,7 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
fc_block_scsi_eh(sc_cmd);
- lport->tt.lport_reset(lport);
+ fc_lport_reset(lport);
wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
wait_tmo))
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index c11a638f32e6..d623d084b7ec 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -226,7 +226,7 @@ void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
sp = fr_seq(in_fp);
if (sp)
- fr_seq(fp) = fr_dev(in_fp)->tt.seq_start_next(sp);
+ fr_seq(fp) = fc_seq_start_next(sp);
fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset);
}
EXPORT_SYMBOL(fc_fill_reply_hdr);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 50c71678a156..919736a74ffa 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -149,7 +149,7 @@ static const char *fc_lport_state_names[] = {
* @offset: The offset into the response data
*/
struct fc_bsg_info {
- struct fc_bsg_job *job;
+ struct bsg_job *job;
struct fc_lport *lport;
u16 rsp_code;
struct scatterlist *sg;
@@ -200,7 +200,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
"in the DNS or FDMI state, it's in the "
"%d state", rdata->ids.port_id,
lport->state);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
}
break;
case RPORT_EV_LOGO:
@@ -237,23 +237,26 @@ static const char *fc_lport_state(struct fc_lport *lport)
* @remote_fid: The FID of the ptp rport
* @remote_wwpn: The WWPN of the ptp rport
* @remote_wwnn: The WWNN of the ptp rport
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
*/
static void fc_lport_ptp_setup(struct fc_lport *lport,
u32 remote_fid, u64 remote_wwpn,
u64 remote_wwnn)
{
- mutex_lock(&lport->disc.disc_mutex);
if (lport->ptp_rdata) {
- lport->tt.rport_logoff(lport->ptp_rdata);
- kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+ fc_rport_logoff(lport->ptp_rdata);
+ kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
}
- lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->ptp_rdata = fc_rport_create(lport, remote_fid);
kref_get(&lport->ptp_rdata->kref);
lport->ptp_rdata->ids.port_name = remote_wwpn;
lport->ptp_rdata->ids.node_name = remote_wwnn;
mutex_unlock(&lport->disc.disc_mutex);
- lport->tt.rport_login(lport->ptp_rdata);
+ fc_rport_login(lport->ptp_rdata);
fc_lport_enter_ready(lport);
}
@@ -409,7 +412,7 @@ static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
fc_lport_state(lport));
- lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
fc_frame_free(fp);
}
@@ -478,7 +481,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
if (!req) {
rjt_data.reason = ELS_RJT_LOGIC;
rjt_data.explan = ELS_EXPL_NONE;
- lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
} else {
fmt = req->rnid_fmt;
len = sizeof(*rp);
@@ -518,7 +521,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
*/
static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
{
- lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
fc_lport_enter_reset(lport);
fc_frame_free(fp);
}
@@ -620,9 +623,9 @@ int fc_fabric_logoff(struct fc_lport *lport)
lport->tt.disc_stop_final(lport);
mutex_lock(&lport->lp_mutex);
if (lport->dns_rdata)
- lport->tt.rport_logoff(lport->dns_rdata);
+ fc_rport_logoff(lport->dns_rdata);
mutex_unlock(&lport->lp_mutex);
- lport->tt.rport_flush_queue();
+ fc_rport_flush_queue();
mutex_lock(&lport->lp_mutex);
fc_lport_enter_logo(lport);
mutex_unlock(&lport->lp_mutex);
@@ -899,7 +902,7 @@ static void fc_lport_recv_els_req(struct fc_lport *lport,
/*
* Check opcode.
*/
- recv = lport->tt.rport_recv_req;
+ recv = fc_rport_recv_req;
switch (fc_frame_payload_op(fp)) {
case ELS_FLOGI:
if (!lport->point_to_multipoint)
@@ -941,15 +944,14 @@ struct fc4_prov fc_lport_els_prov = {
};
/**
- * fc_lport_recv_req() - The generic lport request handler
+ * fc_lport_recv() - The generic lport request handler
* @lport: The lport that received the request
* @fp: The frame the request is in
*
* Locking Note: This function should not be called with the lport
* lock held because it may grab the lock.
*/
-static void fc_lport_recv_req(struct fc_lport *lport,
- struct fc_frame *fp)
+void fc_lport_recv(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fc_seq *sp = fr_seq(fp);
@@ -978,8 +980,9 @@ drop:
FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
fc_frame_free(fp);
if (sp)
- lport->tt.exch_done(sp);
+ fc_exch_done(sp);
}
+EXPORT_SYMBOL(fc_lport_recv);
/**
* fc_lport_reset() - Reset a local port
@@ -1007,12 +1010,14 @@ EXPORT_SYMBOL(fc_lport_reset);
*/
static void fc_lport_reset_locked(struct fc_lport *lport)
{
- if (lport->dns_rdata)
- lport->tt.rport_logoff(lport->dns_rdata);
+ if (lport->dns_rdata) {
+ fc_rport_logoff(lport->dns_rdata);
+ lport->dns_rdata = NULL;
+ }
if (lport->ptp_rdata) {
- lport->tt.rport_logoff(lport->ptp_rdata);
- kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+ fc_rport_logoff(lport->ptp_rdata);
+ kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
lport->ptp_rdata = NULL;
}
@@ -1426,13 +1431,13 @@ static void fc_lport_enter_dns(struct fc_lport *lport)
fc_lport_state_enter(lport, LPORT_ST_DNS);
mutex_lock(&lport->disc.disc_mutex);
- rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
+ rdata = fc_rport_create(lport, FC_FID_DIR_SERV);
mutex_unlock(&lport->disc.disc_mutex);
if (!rdata)
goto err;
rdata->ops = &fc_lport_rport_ops;
- lport->tt.rport_login(rdata);
+ fc_rport_login(rdata);
return;
err:
@@ -1543,13 +1548,13 @@ static void fc_lport_enter_fdmi(struct fc_lport *lport)
fc_lport_state_enter(lport, LPORT_ST_FDMI);
mutex_lock(&lport->disc.disc_mutex);
- rdata = lport->tt.rport_create(lport, FC_FID_MGMT_SERV);
+ rdata = fc_rport_create(lport, FC_FID_MGMT_SERV);
mutex_unlock(&lport->disc.disc_mutex);
if (!rdata)
goto err;
rdata->ops = &fc_lport_rport_ops;
- lport->tt.rport_login(rdata);
+ fc_rport_login(rdata);
return;
err:
@@ -1772,7 +1777,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
if ((csp_flags & FC_SP_FT_FPORT) == 0) {
if (e_d_tov > lport->e_d_tov)
lport->e_d_tov = e_d_tov;
- lport->r_a_tov = 2 * e_d_tov;
+ lport->r_a_tov = 2 * lport->e_d_tov;
fc_lport_set_port_id(lport, did, fp);
printk(KERN_INFO "host%d: libfc: "
"Port (%6.6x) entered "
@@ -1784,8 +1789,10 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
get_unaligned_be64(
&flp->fl_wwnn));
} else {
- lport->e_d_tov = e_d_tov;
- lport->r_a_tov = r_a_tov;
+ if (e_d_tov > lport->e_d_tov)
+ lport->e_d_tov = e_d_tov;
+ if (r_a_tov > lport->r_a_tov)
+ lport->r_a_tov = r_a_tov;
fc_host_fabric_name(lport->host) =
get_unaligned_be64(&flp->fl_wwnn);
fc_lport_set_port_id(lport, did, fp);
@@ -1858,12 +1865,6 @@ EXPORT_SYMBOL(fc_lport_config);
*/
int fc_lport_init(struct fc_lport *lport)
{
- if (!lport->tt.lport_recv)
- lport->tt.lport_recv = fc_lport_recv_req;
-
- if (!lport->tt.lport_reset)
- lport->tt.lport_reset = fc_lport_reset;
-
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
fc_host_node_name(lport->host) = lport->wwnn;
fc_host_port_name(lport->host) = lport->wwpn;
@@ -1900,18 +1901,19 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
void *info_arg)
{
struct fc_bsg_info *info = info_arg;
- struct fc_bsg_job *job = info->job;
+ struct bsg_job *job = info->job;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct fc_lport *lport = info->lport;
struct fc_frame_header *fh;
size_t len;
void *buf;
if (IS_ERR(fp)) {
- job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
+ bsg_reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
-ECONNABORTED : -ETIMEDOUT;
job->reply_len = sizeof(uint32_t);
- job->state_flags |= FC_RQST_STATE_DONE;
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
kfree(info);
return;
}
@@ -1928,25 +1930,25 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
(unsigned short)fc_frame_payload_op(fp);
/* Save the reply status of the job */
- job->reply->reply_data.ctels_reply.status =
+ bsg_reply->reply_data.ctels_reply.status =
(cmd == info->rsp_code) ?
FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
}
- job->reply->reply_payload_rcv_len +=
+ bsg_reply->reply_payload_rcv_len +=
fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
&info->offset, NULL);
if (fr_eof(fp) == FC_EOF_T &&
(ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
(FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
- if (job->reply->reply_payload_rcv_len >
+ if (bsg_reply->reply_payload_rcv_len >
job->reply_payload.payload_len)
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
job->reply_payload.payload_len;
- job->reply->result = 0;
- job->state_flags |= FC_RQST_STATE_DONE;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
kfree(info);
}
fc_frame_free(fp);
@@ -1962,7 +1964,7 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
* Locking Note: The lport lock is expected to be held before calling
* this routine.
*/
-static int fc_lport_els_request(struct fc_bsg_job *job,
+static int fc_lport_els_request(struct bsg_job *job,
struct fc_lport *lport,
u32 did, u32 tov)
{
@@ -2005,8 +2007,8 @@ static int fc_lport_els_request(struct fc_bsg_job *job,
info->nents = job->reply_payload.sg_cnt;
info->sg = job->reply_payload.sg_list;
- if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
- NULL, info, tov)) {
+ if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
+ NULL, info, tov)) {
kfree(info);
return -ECOMM;
}
@@ -2023,7 +2025,7 @@ static int fc_lport_els_request(struct fc_bsg_job *job,
* Locking Note: The lport lock is expected to be held before calling
* this routine.
*/
-static int fc_lport_ct_request(struct fc_bsg_job *job,
+static int fc_lport_ct_request(struct bsg_job *job,
struct fc_lport *lport, u32 did, u32 tov)
{
struct fc_bsg_info *info;
@@ -2066,8 +2068,8 @@ static int fc_lport_ct_request(struct fc_bsg_job *job,
info->nents = job->reply_payload.sg_cnt;
info->sg = job->reply_payload.sg_list;
- if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
- NULL, info, tov)) {
+ if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
+ NULL, info, tov)) {
kfree(info);
return -ECOMM;
}
@@ -2079,25 +2081,27 @@ static int fc_lport_ct_request(struct fc_bsg_job *job,
* FC Passthrough requests
* @job: The BSG passthrough job
*/
-int fc_lport_bsg_request(struct fc_bsg_job *job)
+int fc_lport_bsg_request(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct request *rsp = job->req->next_rq;
- struct Scsi_Host *shost = job->shost;
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
struct fc_lport *lport = shost_priv(shost);
struct fc_rport *rport;
struct fc_rport_priv *rdata;
int rc = -EINVAL;
u32 did, tov;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (rsp)
rsp->resid_len = job->reply_payload.payload_len;
mutex_lock(&lport->lp_mutex);
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
- rport = job->rport;
+ rport = fc_bsg_to_rport(job);
if (!rport)
break;
@@ -2107,7 +2111,7 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
break;
case FC_BSG_RPT_CT:
- rport = job->rport;
+ rport = fc_bsg_to_rport(job);
if (!rport)
break;
@@ -2117,25 +2121,25 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
break;
case FC_BSG_HST_CT:
- did = ntoh24(job->request->rqst_data.h_ct.port_id);
+ did = ntoh24(bsg_request->rqst_data.h_ct.port_id);
if (did == FC_FID_DIR_SERV) {
rdata = lport->dns_rdata;
if (!rdata)
break;
tov = rdata->e_d_tov;
} else {
- rdata = lport->tt.rport_lookup(lport, did);
+ rdata = fc_rport_lookup(lport, did);
if (!rdata)
break;
tov = rdata->e_d_tov;
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
rc = fc_lport_ct_request(job, lport, did, tov);
break;
case FC_BSG_HST_ELS_NOLOGIN:
- did = ntoh24(job->request->rqst_data.h_els.port_id);
+ did = ntoh24(bsg_request->rqst_data.h_els.port_id);
rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
break;
}
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 97aeaddd600d..c991f3b822f8 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -44,6 +44,19 @@
* path this potential over-use of the mutex is acceptable.
*/
+/*
+ * RPORT REFERENCE COUNTING
+ *
+ * A rport reference should be taken when:
+ * - an rport is allocated
+ * - a workqueue item is scheduled
+ * - an ELS request is send
+ * The reference should be dropped when:
+ * - the workqueue function has finished
+ * - the ELS response is handled
+ * - an rport is removed
+ */
+
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -74,8 +87,8 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_frame *);
static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_frame *);
static void fc_rport_recv_logo_req(struct fc_lport *, struct fc_frame *);
static void fc_rport_timeout(struct work_struct *);
-static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
-static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
+static void fc_rport_error(struct fc_rport_priv *, int);
+static void fc_rport_error_retry(struct fc_rport_priv *, int);
static void fc_rport_work(struct work_struct *);
static const char *fc_rport_state_names[] = {
@@ -98,8 +111,8 @@ static const char *fc_rport_state_names[] = {
* The reference count of the fc_rport_priv structure is
* increased by one.
*/
-static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
- u32 port_id)
+struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
+ u32 port_id)
{
struct fc_rport_priv *rdata = NULL, *tmp_rdata;
@@ -113,6 +126,7 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
rcu_read_unlock();
return rdata;
}
+EXPORT_SYMBOL(fc_rport_lookup);
/**
* fc_rport_create() - Create a new remote port
@@ -123,12 +137,11 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
*
* Locking note: must be called with the disc_mutex held.
*/
-static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
- u32 port_id)
+struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
{
struct fc_rport_priv *rdata;
- rdata = lport->tt.rport_lookup(lport, port_id);
+ rdata = fc_rport_lookup(lport, port_id);
if (rdata)
return rdata;
@@ -158,18 +171,20 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
}
return rdata;
}
+EXPORT_SYMBOL(fc_rport_create);
/**
* fc_rport_destroy() - Free a remote port after last reference is released
* @kref: The remote port's kref
*/
-static void fc_rport_destroy(struct kref *kref)
+void fc_rport_destroy(struct kref *kref)
{
struct fc_rport_priv *rdata;
rdata = container_of(kref, struct fc_rport_priv, kref);
kfree_rcu(rdata, rcu);
}
+EXPORT_SYMBOL(fc_rport_destroy);
/**
* fc_rport_state() - Return a string identifying the remote port's state
@@ -242,6 +257,8 @@ static void fc_rport_state_enter(struct fc_rport_priv *rdata,
/**
* fc_rport_work() - Handler for remote port events in the rport_event_queue
* @work: Handle to the remote port being dequeued
+ *
+ * Reference counting: drops kref on return
*/
static void fc_rport_work(struct work_struct *work)
{
@@ -272,12 +289,14 @@ static void fc_rport_work(struct work_struct *work)
kref_get(&rdata->kref);
mutex_unlock(&rdata->rp_mutex);
- if (!rport)
+ if (!rport) {
+ FC_RPORT_DBG(rdata, "No rport!\n");
rport = fc_remote_port_add(lport->host, 0, &ids);
+ }
if (!rport) {
FC_RPORT_DBG(rdata, "Failed to add the rport\n");
- lport->tt.rport_logoff(rdata);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ fc_rport_logoff(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
}
mutex_lock(&rdata->rp_mutex);
@@ -303,7 +322,7 @@ static void fc_rport_work(struct work_struct *work)
FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
rdata->lld_event_callback(lport, rdata, event);
}
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
break;
case RPORT_EV_FAILED:
@@ -329,7 +348,8 @@ static void fc_rport_work(struct work_struct *work)
FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
rdata->lld_event_callback(lport, rdata, event);
}
- cancel_delayed_work_sync(&rdata->retry_work);
+ if (cancel_delayed_work_sync(&rdata->retry_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
/*
* Reset any outstanding exchanges before freeing rport.
@@ -351,7 +371,7 @@ static void fc_rport_work(struct work_struct *work)
if (port_id == FC_FID_DIR_SERV) {
rdata->event = RPORT_EV_NONE;
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
} else if ((rdata->flags & FC_RP_STARTED) &&
rdata->major_retries <
lport->max_rport_retry_count) {
@@ -362,17 +382,21 @@ static void fc_rport_work(struct work_struct *work)
mutex_unlock(&rdata->rp_mutex);
} else {
FC_RPORT_DBG(rdata, "work delete\n");
+ mutex_lock(&lport->disc.disc_mutex);
list_del_rcu(&rdata->peers);
+ mutex_unlock(&lport->disc.disc_mutex);
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
} else {
/*
* Re-open for events. Reissue READY event if ready.
*/
rdata->event = RPORT_EV_NONE;
- if (rdata->rp_state == RPORT_ST_READY)
+ if (rdata->rp_state == RPORT_ST_READY) {
+ FC_RPORT_DBG(rdata, "work reopen\n");
fc_rport_enter_ready(rdata);
+ }
mutex_unlock(&rdata->rp_mutex);
}
break;
@@ -381,12 +405,21 @@ static void fc_rport_work(struct work_struct *work)
mutex_unlock(&rdata->rp_mutex);
break;
}
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
* fc_rport_login() - Start the remote port login state machine
* @rdata: The remote port to be logged in to
*
+ * Initiates the RP state machine. It is called from the LP module.
+ * This function will issue the following commands to the N_Port
+ * identified by the FC ID provided.
+ *
+ * - PLOGI
+ * - PRLI
+ * - RTV
+ *
* Locking Note: Called without the rport lock held. This
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
@@ -395,10 +428,16 @@ static void fc_rport_work(struct work_struct *work)
* If it appears we are already logged in, ADISC is used to verify
* the setup.
*/
-static int fc_rport_login(struct fc_rport_priv *rdata)
+int fc_rport_login(struct fc_rport_priv *rdata)
{
mutex_lock(&rdata->rp_mutex);
+ if (rdata->flags & FC_RP_STARTED) {
+ FC_RPORT_DBG(rdata, "port already started\n");
+ mutex_unlock(&rdata->rp_mutex);
+ return 0;
+ }
+
rdata->flags |= FC_RP_STARTED;
switch (rdata->rp_state) {
case RPORT_ST_READY:
@@ -408,15 +447,20 @@ static int fc_rport_login(struct fc_rport_priv *rdata)
case RPORT_ST_DELETE:
FC_RPORT_DBG(rdata, "Restart deleted port\n");
break;
- default:
+ case RPORT_ST_INIT:
FC_RPORT_DBG(rdata, "Login to port\n");
fc_rport_enter_flogi(rdata);
break;
+ default:
+ FC_RPORT_DBG(rdata, "Login in progress, state %s\n",
+ fc_rport_state(rdata));
+ break;
}
mutex_unlock(&rdata->rp_mutex);
return 0;
}
+EXPORT_SYMBOL(fc_rport_login);
/**
* fc_rport_enter_delete() - Schedule a remote port to be deleted
@@ -431,6 +475,8 @@ static int fc_rport_login(struct fc_rport_priv *rdata)
* Set the new event so that the old pending event will not occur.
* Since we have the mutex, even if fc_rport_work() is already started,
* it'll see the new event.
+ *
+ * Reference counting: does not modify kref
*/
static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
enum fc_rport_event event)
@@ -442,8 +488,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
fc_rport_state_enter(rdata, RPORT_ST_DELETE);
- if (rdata->event == RPORT_EV_NONE)
- queue_work(rport_event_queue, &rdata->event_work);
+ kref_get(&rdata->kref);
+ if (rdata->event == RPORT_EV_NONE &&
+ !queue_work(rport_event_queue, &rdata->event_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
+
rdata->event = event;
}
@@ -455,7 +504,7 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
*/
-static int fc_rport_logoff(struct fc_rport_priv *rdata)
+int fc_rport_logoff(struct fc_rport_priv *rdata)
{
struct fc_lport *lport = rdata->local_port;
u32 port_id = rdata->ids.port_id;
@@ -489,6 +538,7 @@ out:
mutex_unlock(&rdata->rp_mutex);
return 0;
}
+EXPORT_SYMBOL(fc_rport_logoff);
/**
* fc_rport_enter_ready() - Transition to the RPORT_ST_READY state
@@ -496,6 +546,8 @@ out:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: schedules workqueue, does not modify kref
*/
static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
{
@@ -503,8 +555,11 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
FC_RPORT_DBG(rdata, "Port is Ready\n");
- if (rdata->event == RPORT_EV_NONE)
- queue_work(rport_event_queue, &rdata->event_work);
+ kref_get(&rdata->kref);
+ if (rdata->event == RPORT_EV_NONE &&
+ !queue_work(rport_event_queue, &rdata->event_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
+
rdata->event = RPORT_EV_READY;
}
@@ -515,6 +570,8 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
* Locking Note: Called without the rport lock held. This
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
+ *
+ * Reference counting: Drops kref on return.
*/
static void fc_rport_timeout(struct work_struct *work)
{
@@ -522,6 +579,7 @@ static void fc_rport_timeout(struct work_struct *work)
container_of(work, struct fc_rport_priv, retry_work.work);
mutex_lock(&rdata->rp_mutex);
+ FC_RPORT_DBG(rdata, "Port timeout, state %s\n", fc_rport_state(rdata));
switch (rdata->rp_state) {
case RPORT_ST_FLOGI:
@@ -547,23 +605,25 @@ static void fc_rport_timeout(struct work_struct *work)
}
mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
* fc_rport_error() - Error handler, called once retries have been exhausted
* @rdata: The remote port the error is happened on
- * @fp: The error code encapsulated in a frame pointer
+ * @err: The error code
*
* Locking Note: The rport lock is expected to be held before
* calling this routine
+ *
+ * Reference counting: does not modify kref
*/
-static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
+static void fc_rport_error(struct fc_rport_priv *rdata, int err)
{
struct fc_lport *lport = rdata->local_port;
- FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
- IS_ERR(fp) ? -PTR_ERR(fp) : 0,
- fc_rport_state(rdata), rdata->retries);
+ FC_RPORT_DBG(rdata, "Error %d in state %s, retries %d\n",
+ -err, fc_rport_state(rdata), rdata->retries);
switch (rdata->rp_state) {
case RPORT_ST_FLOGI:
@@ -595,36 +655,39 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
/**
* fc_rport_error_retry() - Handler for remote port state retries
* @rdata: The remote port whose state is to be retried
- * @fp: The error code encapsulated in a frame pointer
+ * @err: The error code
*
* If the error was an exchange timeout retry immediately,
* otherwise wait for E_D_TOV.
*
* Locking Note: The rport lock is expected to be held before
* calling this routine
+ *
+ * Reference counting: increments kref when scheduling retry_work
*/
-static void fc_rport_error_retry(struct fc_rport_priv *rdata,
- struct fc_frame *fp)
+static void fc_rport_error_retry(struct fc_rport_priv *rdata, int err)
{
- unsigned long delay = msecs_to_jiffies(FC_DEF_E_D_TOV);
+ unsigned long delay = msecs_to_jiffies(rdata->e_d_tov);
/* make sure this isn't an FC_EX_CLOSED error, never retry those */
- if (PTR_ERR(fp) == -FC_EX_CLOSED)
+ if (err == -FC_EX_CLOSED)
goto out;
if (rdata->retries < rdata->local_port->max_rport_retry_count) {
- FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
- PTR_ERR(fp), fc_rport_state(rdata));
+ FC_RPORT_DBG(rdata, "Error %d in state %s, retrying\n",
+ err, fc_rport_state(rdata));
rdata->retries++;
/* no additional delay on exchange timeouts */
- if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
+ if (err == -FC_EX_TIMEOUT)
delay = 0;
- schedule_delayed_work(&rdata->retry_work, delay);
+ kref_get(&rdata->kref);
+ if (!schedule_delayed_work(&rdata->retry_work, delay))
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
}
out:
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, err);
}
/**
@@ -684,8 +747,11 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_lport *lport = rdata->local_port;
struct fc_els_flogi *flogi;
unsigned int r_a_tov;
+ u8 opcode;
+ int err = 0;
- FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp));
+ FC_RPORT_DBG(rdata, "Received a FLOGI %s\n",
+ IS_ERR(fp) ? "error" : fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
goto put;
@@ -701,18 +767,34 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, PTR_ERR(fp));
goto err;
}
-
- if (fc_frame_payload_op(fp) != ELS_LS_ACC)
+ opcode = fc_frame_payload_op(fp);
+ if (opcode == ELS_LS_RJT) {
+ struct fc_els_ls_rjt *rjt;
+
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ FC_RPORT_DBG(rdata, "FLOGI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
+ err = -FC_EX_ELS_RJT;
goto bad;
- if (fc_rport_login_complete(rdata, fp))
+ } else if (opcode != ELS_LS_ACC) {
+ FC_RPORT_DBG(rdata, "FLOGI ELS invalid opcode %x\n", opcode);
+ err = -FC_EX_ELS_RJT;
goto bad;
+ }
+ if (fc_rport_login_complete(rdata, fp)) {
+ FC_RPORT_DBG(rdata, "FLOGI failed, no login\n");
+ err = -FC_EX_INV_LOGIN;
+ goto bad;
+ }
flogi = fc_frame_payload_get(fp, sizeof(*flogi));
- if (!flogi)
+ if (!flogi) {
+ err = -FC_EX_ALLOC_ERR;
goto bad;
+ }
r_a_tov = ntohl(flogi->fl_csp.sp_r_a_tov);
if (r_a_tov > rdata->r_a_tov)
rdata->r_a_tov = r_a_tov;
@@ -726,11 +808,11 @@ out:
err:
mutex_unlock(&rdata->rp_mutex);
put:
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
bad:
FC_RPORT_DBG(rdata, "Bad FLOGI response\n");
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, err);
goto out;
}
@@ -740,6 +822,8 @@ bad:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
{
@@ -756,20 +840,23 @@ static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
if (!fp)
- return fc_rport_error_retry(rdata, fp);
+ return fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
+ kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_FLOGI,
fc_rport_flogi_resp, rdata,
- 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
}
/**
* fc_rport_recv_flogi_req() - Handle Fabric Login (FLOGI) request in p-mp mode
* @lport: The local port that received the PLOGI request
* @rx_fp: The PLOGI request frame
+ *
+ * Reference counting: drops kref on return
*/
static void fc_rport_recv_flogi_req(struct fc_lport *lport,
struct fc_frame *rx_fp)
@@ -799,7 +886,7 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
goto reject;
}
- rdata = lport->tt.rport_lookup(lport, sid);
+ rdata = fc_rport_lookup(lport, sid);
if (!rdata) {
rjt_data.reason = ELS_RJT_FIP;
rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR;
@@ -824,8 +911,7 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
* RPORT wouldn;t have created and 'rport_lookup' would have
* failed anyway in that case.
*/
- if (lport->point_to_multipoint)
- break;
+ break;
case RPORT_ST_DELETE:
mutex_unlock(&rdata->rp_mutex);
rjt_data.reason = ELS_RJT_FIP;
@@ -867,20 +953,27 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
- if (rdata->ids.port_name < lport->wwpn)
- fc_rport_enter_plogi(rdata);
- else
- fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
+ /*
+ * Do not proceed with the state machine if our
+ * FLOGI has crossed with an FLOGI from the
+ * remote port; wait for the FLOGI response instead.
+ */
+ if (rdata->rp_state != RPORT_ST_FLOGI) {
+ if (rdata->ids.port_name < lport->wwpn)
+ fc_rport_enter_plogi(rdata);
+ else
+ fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
+ }
out:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
fc_frame_free(rx_fp);
return;
reject_put:
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
reject:
- lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(rx_fp);
}
@@ -904,10 +997,13 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
u16 cssp_seq;
u8 op;
- mutex_lock(&rdata->rp_mutex);
-
FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
if (rdata->rp_state != RPORT_ST_PLOGI) {
FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
"%s\n", fc_rport_state(rdata));
@@ -917,7 +1013,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, PTR_ERR(fp));
goto err;
}
@@ -939,14 +1035,20 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
rdata->max_seq = csp_seq;
rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
fc_rport_enter_prli(rdata);
- } else
- fc_rport_error_retry(rdata, fp);
+ } else {
+ struct fc_els_ls_rjt *rjt;
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
+ fc_rport_error_retry(rdata, -FC_EX_ELS_RJT);
+ }
out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
}
static bool
@@ -969,6 +1071,8 @@ fc_rport_compatible_roles(struct fc_lport *lport, struct fc_rport_priv *rdata)
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
{
@@ -990,17 +1094,18 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
if (!fp) {
FC_RPORT_DBG(rdata, "%s frame alloc failed\n", __func__);
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
rdata->e_d_tov = lport->e_d_tov;
+ kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
fc_rport_plogi_resp, rdata,
- 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
}
/**
@@ -1022,16 +1127,20 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_els_spp spp;
} *pp;
struct fc_els_spp temp_spp;
+ struct fc_els_ls_rjt *rjt;
struct fc4_prov *prov;
u32 roles = FC_RPORT_ROLE_UNKNOWN;
u32 fcp_parm = 0;
u8 op;
- u8 resp_code = 0;
-
- mutex_lock(&rdata->rp_mutex);
+ enum fc_els_spp_resp resp_code;
FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
if (rdata->rp_state != RPORT_ST_PRLI) {
FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
"%s\n", fc_rport_state(rdata));
@@ -1041,7 +1150,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, PTR_ERR(fp));
goto err;
}
@@ -1055,14 +1164,14 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
goto out;
resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
- FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n",
- pp->spp.spp_flags);
+ FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n",
+ pp->spp.spp_flags, pp->spp.spp_type);
rdata->spp_type = pp->spp.spp_type;
if (resp_code != FC_SPP_RESP_ACK) {
if (resp_code == FC_SPP_RESP_CONF)
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, -FC_EX_SEQ_ERR);
else
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out;
}
if (pp->prli.prli_spp_len < sizeof(pp->spp))
@@ -1074,13 +1183,25 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
if (fcp_parm & FCP_SPPF_CONF_COMPL)
rdata->flags |= FC_RP_FLAGS_CONF_REQ;
- prov = fc_passive_prov[FC_TYPE_FCP];
+ /*
+ * Call prli provider if we should act as a target
+ */
+ prov = fc_passive_prov[rdata->spp_type];
if (prov) {
memset(&temp_spp, 0, sizeof(temp_spp));
prov->prli(rdata, pp->prli.prli_spp_len,
&pp->spp, &temp_spp);
}
-
+ /*
+ * Check if the image pair could be established
+ */
+ if (rdata->spp_type != FC_TYPE_FCP ||
+ !(pp->spp.spp_flags & FC_SPP_EST_IMG_PAIR)) {
+ /*
+ * Nope; we can't use this port as a target.
+ */
+ fcp_parm &= ~FCP_SPPF_TARG_FCN;
+ }
rdata->supported_classes = FC_COS_CLASS3;
if (fcp_parm & FCP_SPPF_INIT_FCN)
roles |= FC_RPORT_ROLE_FCP_INITIATOR;
@@ -1091,15 +1212,18 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
fc_rport_enter_rtv(rdata);
} else {
- FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
- fc_rport_error_retry(rdata, fp);
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
+ fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
}
out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1108,6 +1232,8 @@ err:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
{
@@ -1128,6 +1254,15 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
return;
}
+ /*
+ * And if the local port does not support the initiator function
+ * there's no need to send a PRLI, either.
+ */
+ if (!(lport->service_params & FCP_SPPF_INIT_FCN)) {
+ fc_rport_enter_ready(rdata);
+ return;
+ }
+
FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
fc_rport_state(rdata));
@@ -1135,7 +1270,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(*pp));
if (!fp) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
@@ -1151,15 +1286,16 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
fc_host_port_id(lport->host), FC_TYPE_ELS,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
- if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp,
- NULL, rdata, 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ kref_get(&rdata->kref);
+ if (!fc_exch_seq_send(lport, fp, fc_rport_prli_resp,
+ NULL, rdata, 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
}
/**
- * fc_rport_els_rtv_resp() - Handler for Request Timeout Value (RTV) responses
+ * fc_rport_rtv_resp() - Handler for Request Timeout Value (RTV) responses
* @sp: The sequence the RTV was on
* @fp: The RTV response frame
* @rdata_arg: The remote port that sent the RTV response
@@ -1176,10 +1312,13 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_rport_priv *rdata = rdata_arg;
u8 op;
- mutex_lock(&rdata->rp_mutex);
-
FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
if (rdata->rp_state != RPORT_ST_RTV) {
FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
"%s\n", fc_rport_state(rdata));
@@ -1189,7 +1328,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, PTR_ERR(fp));
goto err;
}
@@ -1205,13 +1344,15 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
tov = ntohl(rtv->rtv_r_a_tov);
if (tov == 0)
tov = 1;
- rdata->r_a_tov = tov;
+ if (tov > rdata->r_a_tov)
+ rdata->r_a_tov = tov;
tov = ntohl(rtv->rtv_e_d_tov);
if (toq & FC_ELS_RTV_EDRES)
tov /= 1000000;
if (tov == 0)
tov = 1;
- rdata->e_d_tov = tov;
+ if (tov > rdata->e_d_tov)
+ rdata->e_d_tov = tov;
}
}
@@ -1221,7 +1362,8 @@ out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1230,6 +1372,8 @@ err:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
{
@@ -1243,16 +1387,52 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
if (!fp) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
+ kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
fc_rport_rtv_resp, rdata,
- 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+}
+
+/**
+ * fc_rport_recv_rtv_req() - Handler for Read Timeout Value (RTV) requests
+ * @rdata: The remote port that sent the RTV request
+ * @in_fp: The RTV request frame
+ *
+ * Locking Note: Called with the lport and rport locks held.
+ */
+static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
+ struct fc_frame *in_fp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+ struct fc_els_rtv_acc *rtv;
+ struct fc_seq_els_data rjt_data;
+
+ FC_RPORT_DBG(rdata, "Received RTV request\n");
+
+ fp = fc_frame_alloc(lport, sizeof(*rtv));
+ if (!fp) {
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.reason = ELS_EXPL_INSUF_RES;
+ fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+ goto drop;
+ }
+ rtv = fc_frame_payload_get(fp, sizeof(*rtv));
+ rtv->rtv_cmd = ELS_LS_ACC;
+ rtv->rtv_r_a_tov = htonl(lport->r_a_tov);
+ rtv->rtv_e_d_tov = htonl(lport->e_d_tov);
+ rtv->rtv_toq = 0;
+ fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+drop:
+ fc_frame_free(in_fp);
}
/**
@@ -1262,15 +1442,16 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
* @lport_arg: The local port
*/
static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
- void *lport_arg)
+ void *rdata_arg)
{
- struct fc_lport *lport = lport_arg;
+ struct fc_rport_priv *rdata = rdata_arg;
+ struct fc_lport *lport = rdata->local_port;
FC_RPORT_ID_DBG(lport, fc_seq_exch(sp)->did,
"Received a LOGO %s\n", fc_els_resp_type(fp));
- if (IS_ERR(fp))
- return;
- fc_frame_free(fp);
+ if (!IS_ERR(fp))
+ fc_frame_free(fp);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1279,6 +1460,8 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
{
@@ -1291,8 +1474,10 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
if (!fp)
return;
- (void)lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
- fc_rport_logo_resp, lport, 0);
+ kref_get(&rdata->kref);
+ if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
+ fc_rport_logo_resp, rdata, 0))
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1312,10 +1497,13 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_els_adisc *adisc;
u8 op;
- mutex_lock(&rdata->rp_mutex);
-
FC_RPORT_DBG(rdata, "Received a ADISC response\n");
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
if (rdata->rp_state != RPORT_ST_ADISC) {
FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n",
fc_rport_state(rdata));
@@ -1325,7 +1513,7 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, PTR_ERR(fp));
goto err;
}
@@ -1350,7 +1538,8 @@ out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1359,6 +1548,8 @@ err:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
{
@@ -1372,15 +1563,16 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc));
if (!fp) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
+ kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
fc_rport_adisc_resp, rdata,
- 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
}
/**
@@ -1404,7 +1596,7 @@ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
if (!adisc) {
rjt_data.reason = ELS_RJT_PROT;
rjt_data.explan = ELS_EXPL_INV_LEN;
- lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
goto drop;
}
@@ -1480,7 +1672,7 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
goto out;
out_rjt:
- lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
out:
fc_frame_free(rx_fp);
}
@@ -1494,15 +1686,21 @@ out:
* The ELS opcode has already been validated by the caller.
*
* Locking Note: Called with the lport lock held.
+ *
+ * Reference counting: does not modify kref
*/
static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_rport_priv *rdata;
struct fc_seq_els_data els_data;
- rdata = lport->tt.rport_lookup(lport, fc_frame_sid(fp));
- if (!rdata)
+ rdata = fc_rport_lookup(lport, fc_frame_sid(fp));
+ if (!rdata) {
+ FC_RPORT_ID_DBG(lport, fc_frame_sid(fp),
+ "Received ELS 0x%02x from non-logged-in port\n",
+ fc_frame_payload_op(fp));
goto reject;
+ }
mutex_lock(&rdata->rp_mutex);
@@ -1512,9 +1710,21 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
case RPORT_ST_READY:
case RPORT_ST_ADISC:
break;
+ case RPORT_ST_PLOGI:
+ if (fc_frame_payload_op(fp) == ELS_PRLI) {
+ FC_RPORT_DBG(rdata, "Reject ELS PRLI "
+ "while in state %s\n",
+ fc_rport_state(rdata));
+ mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ goto busy;
+ }
default:
+ FC_RPORT_DBG(rdata,
+ "Reject ELS 0x%02x while in state %s\n",
+ fc_frame_payload_op(fp), fc_rport_state(rdata));
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
goto reject;
}
@@ -1529,30 +1739,41 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
fc_rport_recv_adisc_req(rdata, fp);
break;
case ELS_RRQ:
- lport->tt.seq_els_rsp_send(fp, ELS_RRQ, NULL);
+ fc_seq_els_rsp_send(fp, ELS_RRQ, NULL);
fc_frame_free(fp);
break;
case ELS_REC:
- lport->tt.seq_els_rsp_send(fp, ELS_REC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_REC, NULL);
fc_frame_free(fp);
break;
case ELS_RLS:
fc_rport_recv_rls_req(rdata, fp);
break;
+ case ELS_RTV:
+ fc_rport_recv_rtv_req(rdata, fp);
+ break;
default:
fc_frame_free(fp); /* can't happen */
break;
}
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
reject:
els_data.reason = ELS_RJT_UNAB;
els_data.explan = ELS_EXPL_PLOGI_REQD;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+ fc_frame_free(fp);
+ return;
+
+busy:
+ els_data.reason = ELS_RJT_BUSY;
+ els_data.explan = ELS_EXPL_NONE;
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
fc_frame_free(fp);
+ return;
}
/**
@@ -1561,8 +1782,10 @@ reject:
* @fp: The request frame
*
* Locking Note: Called with the lport lock held.
+ *
+ * Reference counting: does not modify kref
*/
-static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_seq_els_data els_data;
@@ -1588,16 +1811,18 @@ static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
case ELS_RRQ:
case ELS_REC:
case ELS_RLS:
+ case ELS_RTV:
fc_rport_recv_els_req(lport, fp);
break;
default:
els_data.reason = ELS_RJT_UNSUP;
els_data.explan = ELS_EXPL_NONE;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
fc_frame_free(fp);
break;
}
}
+EXPORT_SYMBOL(fc_rport_recv_req);
/**
* fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests
@@ -1605,6 +1830,8 @@ static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
* @rx_fp: The PLOGI request frame
*
* Locking Note: The rport lock is held before calling this function.
+ *
+ * Reference counting: increments kref on return
*/
static void fc_rport_recv_plogi_req(struct fc_lport *lport,
struct fc_frame *rx_fp)
@@ -1630,7 +1857,7 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
disc = &lport->disc;
mutex_lock(&disc->disc_mutex);
- rdata = lport->tt.rport_create(lport, sid);
+ rdata = fc_rport_create(lport, sid);
if (!rdata) {
mutex_unlock(&disc->disc_mutex);
rjt_data.reason = ELS_RJT_UNAB;
@@ -1718,7 +1945,7 @@ out:
return;
reject:
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(fp);
}
@@ -1744,7 +1971,6 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
unsigned int len;
unsigned int plen;
enum fc_els_spp_resp resp;
- enum fc_els_spp_resp passive;
struct fc_seq_els_data rjt_data;
struct fc4_prov *prov;
@@ -1794,15 +2020,21 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
resp = 0;
if (rspp->spp_type < FC_FC4_PROV_SIZE) {
+ enum fc_els_spp_resp active = 0, passive = 0;
+
prov = fc_active_prov[rspp->spp_type];
if (prov)
- resp = prov->prli(rdata, plen, rspp, spp);
+ active = prov->prli(rdata, plen, rspp, spp);
prov = fc_passive_prov[rspp->spp_type];
- if (prov) {
+ if (prov)
passive = prov->prli(rdata, plen, rspp, spp);
- if (!resp || passive == FC_SPP_RESP_ACK)
- resp = passive;
- }
+ if (!active || passive == FC_SPP_RESP_ACK)
+ resp = passive;
+ else
+ resp = active;
+ FC_RPORT_DBG(rdata, "PRLI rspp type %x "
+ "active %x passive %x\n",
+ rspp->spp_type, active, passive);
}
if (!resp) {
if (spp->spp_flags & FC_SPP_EST_IMG_PAIR)
@@ -1823,20 +2055,13 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
- switch (rdata->rp_state) {
- case RPORT_ST_PRLI:
- fc_rport_enter_ready(rdata);
- break;
- default:
- break;
- }
goto drop;
reject_len:
rjt_data.reason = ELS_RJT_PROT;
rjt_data.explan = ELS_EXPL_INV_LEN;
reject:
- lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
drop:
fc_frame_free(rx_fp);
}
@@ -1907,7 +2132,7 @@ reject_len:
rjt_data.reason = ELS_RJT_PROT;
rjt_data.explan = ELS_EXPL_INV_LEN;
reject:
- lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
drop:
fc_frame_free(rx_fp);
}
@@ -1919,17 +2144,19 @@ drop:
*
* Locking Note: The rport lock is expected to be held before calling
* this function.
+ *
+ * Reference counting: drops kref on return
*/
static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_rport_priv *rdata;
u32 sid;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
sid = fc_frame_sid(fp);
- rdata = lport->tt.rport_lookup(lport, sid);
+ rdata = fc_rport_lookup(lport, sid);
if (rdata) {
mutex_lock(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
@@ -1937,7 +2164,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
fc_rport_enter_delete(rdata, RPORT_EV_STOP);
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
} else
FC_RPORT_ID_DBG(lport, sid,
"Received LOGO from non-logged-in port\n");
@@ -1947,41 +2174,11 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
/**
* fc_rport_flush_queue() - Flush the rport_event_queue
*/
-static void fc_rport_flush_queue(void)
+void fc_rport_flush_queue(void)
{
flush_workqueue(rport_event_queue);
}
-
-/**
- * fc_rport_init() - Initialize the remote port layer for a local port
- * @lport: The local port to initialize the remote port layer for
- */
-int fc_rport_init(struct fc_lport *lport)
-{
- if (!lport->tt.rport_lookup)
- lport->tt.rport_lookup = fc_rport_lookup;
-
- if (!lport->tt.rport_create)
- lport->tt.rport_create = fc_rport_create;
-
- if (!lport->tt.rport_login)
- lport->tt.rport_login = fc_rport_login;
-
- if (!lport->tt.rport_logoff)
- lport->tt.rport_logoff = fc_rport_logoff;
-
- if (!lport->tt.rport_recv_req)
- lport->tt.rport_recv_req = fc_rport_recv_req;
-
- if (!lport->tt.rport_flush_queue)
- lport->tt.rport_flush_queue = fc_rport_flush_queue;
-
- if (!lport->tt.rport_destroy)
- lport->tt.rport_destroy = fc_rport_destroy;
-
- return 0;
-}
-EXPORT_SYMBOL(fc_rport_init);
+EXPORT_SYMBOL(fc_rport_flush_queue);
/**
* fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator.
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index b484859464f6..8a20b4e86224 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -648,6 +648,10 @@ struct lpfc_hba {
#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
+#define HBA_FORCED_LINK_SPEED 0x40000 /*
+ * Firmware supports Forced Link Speed
+ * capability
+ */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -746,6 +750,8 @@ struct lpfc_hba {
uint32_t cfg_oas_priority;
uint32_t cfg_XLanePriority;
uint32_t cfg_enable_bg;
+ uint32_t cfg_prot_mask;
+ uint32_t cfg_prot_guard;
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_aer_support;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f1019908800e..c84775562c65 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2759,18 +2759,14 @@ LPFC_ATTR_R(enable_npiv, 1, 0, 1,
LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
"FCF Fast failover=1 Priority failover=2");
-int lpfc_enable_rrq = 2;
-module_param(lpfc_enable_rrq, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
-lpfc_param_show(enable_rrq);
/*
# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
# 0x0 = disabled, XRI/OXID use not tracked.
# 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
# 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
*/
-lpfc_param_init(enable_rrq, 2, 0, 2);
-static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL);
+LPFC_ATTR_R(enable_rrq, 2, 0, 2,
+ "Enable RRQ functionality");
/*
# lpfc_suppress_link_up: Bring link up at initialization
@@ -2827,14 +2823,8 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
lpfc_txcmplq_hw_show, NULL);
-int lpfc_iocb_cnt = 2;
-module_param(lpfc_iocb_cnt, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_iocb_cnt,
+LPFC_ATTR_R(iocb_cnt, 2, 1, 5,
"Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
-lpfc_param_show(iocb_cnt);
-lpfc_param_init(iocb_cnt, 2, 1, 5);
-static DEVICE_ATTR(lpfc_iocb_cnt, S_IRUGO,
- lpfc_iocb_cnt_show, NULL);
/*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
@@ -2887,9 +2877,9 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
if (val != LPFC_DEF_DEVLOSS_TMO)
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0407 Ignoring nodev_tmo module "
- "parameter because devloss_tmo is "
- "set.\n");
+ "0407 Ignoring lpfc_nodev_tmo module "
+ "parameter because lpfc_devloss_tmo "
+ "is set.\n");
return 0;
}
@@ -2948,8 +2938,8 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
if (vport->dev_loss_tmo_changed ||
(lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0401 Ignoring change to nodev_tmo "
- "because devloss_tmo is set.\n");
+ "0401 Ignoring change to lpfc_nodev_tmo "
+ "because lpfc_devloss_tmo is set.\n");
return 0;
}
if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
@@ -2964,7 +2954,7 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
return 0;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0403 lpfc_nodev_tmo attribute cannot be set to"
+ "0403 lpfc_nodev_tmo attribute cannot be set to "
"%d, allowed range is [%d, %d]\n",
val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
return -EINVAL;
@@ -3015,8 +3005,8 @@ lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0404 lpfc_devloss_tmo attribute cannot be set to"
- " %d, allowed range is [%d, %d]\n",
+ "0404 lpfc_devloss_tmo attribute cannot be set to "
+ "%d, allowed range is [%d, %d]\n",
val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
return -EINVAL;
}
@@ -3204,6 +3194,8 @@ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
# Default value is 0.
*/
+LPFC_ATTR(topology, 0, 0, 6,
+ "Select Fibre Channel topology");
/**
* lpfc_topology_set - Set the adapters topology field
@@ -3281,11 +3273,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
phba->brd_no, val);
return -EINVAL;
}
-static int lpfc_topology = 0;
-module_param(lpfc_topology, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
+
lpfc_param_show(topology)
-lpfc_param_init(topology, 0, 0, 6)
static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
lpfc_topology_show, lpfc_topology_store);
@@ -3679,7 +3668,12 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
int nolip = 0;
const char *val_buf = buf;
int err;
- uint32_t prev_val;
+ uint32_t prev_val, if_type;
+
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_2 &&
+ phba->hba_flag & HBA_FORCED_LINK_SPEED)
+ return -EPERM;
if (!strncmp(buf, "nolip ", strlen("nolip "))) {
nolip = 1;
@@ -3789,6 +3783,9 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
# 1 = aer supported and enabled (default)
# Value range is [0,1]. Default value is 1.
*/
+LPFC_ATTR(aer_support, 1, 0, 1,
+ "Enable PCIe device AER support");
+lpfc_param_show(aer_support)
/**
* lpfc_aer_support_store - Set the adapter for aer support
@@ -3871,46 +3868,6 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
return rc;
}
-static int lpfc_aer_support = 1;
-module_param(lpfc_aer_support, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
-lpfc_param_show(aer_support)
-
-/**
- * lpfc_aer_support_init - Set the initial adapters aer support flag
- * @phba: lpfc_hba pointer.
- * @val: enable aer or disable aer flag.
- *
- * Description:
- * If val is in a valid range [0,1], then set the adapter's initial
- * cfg_aer_support field. It will be up to the driver's probe_one
- * routine to determine whether the device's AER support can be set
- * or not.
- *
- * Notes:
- * If the value is not in range log a kernel error message, and
- * choose the default value of setting AER support and return.
- *
- * Returns:
- * zero if val saved.
- * -EINVAL val out of range
- **/
-static int
-lpfc_aer_support_init(struct lpfc_hba *phba, int val)
-{
- if (val == 0 || val == 1) {
- phba->cfg_aer_support = val;
- return 0;
- }
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2712 lpfc_aer_support attribute value %d out "
- "of range, allowed values are 0|1, setting it "
- "to default value of 1\n", val);
- /* By default, try to enable AER on a device */
- phba->cfg_aer_support = 1;
- return -EINVAL;
-}
-
static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
lpfc_aer_support_show, lpfc_aer_support_store);
@@ -4055,39 +4012,10 @@ lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
return rc;
}
-static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN;
-module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
-lpfc_param_show(sriov_nr_virtfn)
-
-/**
- * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
- * @phba: lpfc_hba pointer.
- * @val: link speed value.
- *
- * Description:
- * If val is in a valid range [0,255], then set the adapter's initial
- * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
- * number shall be used instead. It will be up to the driver's probe_one
- * routine to determine whether the device's SR-IOV is supported or not.
- *
- * Returns:
- * zero if val saved.
- * -EINVAL val out of range
- **/
-static int
-lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
-{
- if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
- phba->cfg_sriov_nr_virtfn = val;
- return 0;
- }
+LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN,
+ "Enable PCIe device SR-IOV virtual fn");
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3017 Enabling %d virtual functions is not "
- "allowed.\n", val);
- return -EINVAL;
-}
+lpfc_param_show(sriov_nr_virtfn)
static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
@@ -4251,7 +4179,8 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3016 fcp_imax: %d out of range, using default\n", val);
+ "3016 lpfc_fcp_imax: %d out of range, using default\n",
+ val);
phba->cfg_fcp_imax = LPFC_DEF_IMAX;
return 0;
@@ -4401,8 +4330,8 @@ lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3326 fcp_cpu_map: %d out of range, using default\n",
- val);
+ "3326 lpfc_fcp_cpu_map: %d out of range, using "
+ "default\n", val);
phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
return 0;
@@ -4441,12 +4370,10 @@ LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
# to limit the I/O completion time to the parameter value.
# The value is set in milliseconds.
*/
-static int lpfc_max_scsicmpl_time;
-module_param(lpfc_max_scsicmpl_time, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
+LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000,
"Use command completion time to control queue depth");
+
lpfc_vport_param_show(max_scsicmpl_time);
-lpfc_vport_param_init(max_scsicmpl_time, 0, 0, 60000);
static int
lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
{
@@ -4691,12 +4618,15 @@ unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
# HBA supports DIX Type 1: Host to HBA Type 1 protection
#
*/
-unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION |
- SHOST_DIX_TYPE0_PROTECTION |
- SHOST_DIX_TYPE1_PROTECTION;
-
-module_param(lpfc_prot_mask, uint, S_IRUGO);
-MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
+LPFC_ATTR(prot_mask,
+ (SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION),
+ 0,
+ (SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION),
+ "T10-DIF host protection capabilities mask");
/*
# lpfc_prot_guard: i
@@ -4706,9 +4636,9 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
# - Default will result in registering capabilities for all guard types
#
*/
-unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
-module_param(lpfc_prot_guard, byte, S_IRUGO);
-MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
+LPFC_ATTR(prot_guard,
+ SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP,
+ "T10-DIF host protection guard type");
/*
* Delay initial NPort discovery when Clean Address bit is cleared in
@@ -5828,6 +5758,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_oas_flags = 0;
phba->cfg_oas_priority = 0;
lpfc_enable_bg_init(phba, lpfc_enable_bg);
+ lpfc_prot_mask_init(phba, lpfc_prot_mask);
+ lpfc_prot_guard_init(phba, lpfc_prot_guard);
if (phba->sli_rev == LPFC_SLI_REV4)
phba->cfg_poll = 0;
else
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 05dcc2abd541..7dca4d6a8883 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/list.h>
+#include <linux/bsg-lib.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -97,7 +98,7 @@ struct lpfc_bsg_menlo {
#define TYPE_MENLO 4
struct bsg_job_data {
uint32_t type;
- struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */
+ struct bsg_job *set_job; /* job waiting for this iocb to finish */
union {
struct lpfc_bsg_event *evt;
struct lpfc_bsg_iocb iocb;
@@ -211,7 +212,7 @@ lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
static unsigned int
lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
- struct fc_bsg_buffer *bsg_buffers,
+ struct bsg_buffer *bsg_buffers,
unsigned int bytes_to_transfer, int to_buffers)
{
@@ -297,7 +298,8 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp, *rmp;
struct lpfc_nodelist *ndlp;
@@ -312,6 +314,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
@@ -350,7 +353,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
}
} else {
rsp_size = rsp->un.genreq64.bdl.bdeSize;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
lpfc_bsg_copy_data(rmp, &job->reply_payload,
rsp_size, 0);
}
@@ -367,8 +370,9 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -378,12 +382,13 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
* @job: fc_bsg_job to handle
**/
static int
-lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
+lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata = job->rport->dd_data;
+ struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
struct lpfc_nodelist *ndlp = rdata->pnode;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct ulp_bde64 *bpl = NULL;
uint32_t timeout;
struct lpfc_iocbq *cmdiocbq = NULL;
@@ -398,7 +403,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
int iocb_stat;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
@@ -542,7 +547,7 @@ no_ndlp:
kfree(dd_data);
no_dd_data:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
@@ -570,7 +575,8 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
IOCB_t *rsp;
struct lpfc_nodelist *ndlp;
struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
@@ -588,6 +594,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
@@ -609,17 +616,17 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
if (job) {
if (rsp->ulpStatus == IOSTAT_SUCCESS) {
rsp_size = rsp->un.elsreq64.bdl.bdeSize;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
prsp->virt,
rsp_size);
} else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sizeof(struct fc_bsg_ctels_reply);
/* LS_RJT data returned in word 4 */
rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
- els_reply = &job->reply->reply_data.ctels_reply;
+ els_reply = &bsg_reply->reply_data.ctels_reply;
els_reply->status = FC_CTELS_STATUS_REJECT;
els_reply->rjt_data.action = rjt_data[3];
els_reply->rjt_data.reason_code = rjt_data[2];
@@ -637,8 +644,9 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -648,12 +656,14 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
* @job: fc_bsg_job to handle
**/
static int
-lpfc_bsg_rport_els(struct fc_bsg_job *job)
+lpfc_bsg_rport_els(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata = job->rport->dd_data;
+ struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
struct lpfc_nodelist *ndlp = rdata->pnode;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t elscmd;
uint32_t cmdsize;
struct lpfc_iocbq *cmdiocbq;
@@ -664,7 +674,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
int rc = 0;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* verify the els command is not greater than the
* maximum ELS transfer size.
@@ -684,7 +694,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
goto no_dd_data;
}
- elscmd = job->request->rqst_data.r_els.els_code;
+ elscmd = bsg_request->rqst_data.r_els.els_code;
cmdsize = job->request_payload.payload_len;
if (!lpfc_nlp_get(ndlp)) {
@@ -771,7 +781,7 @@ free_dd_data:
no_dd_data:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
@@ -917,7 +927,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
struct lpfc_hbq_entry *hbqe;
struct lpfc_sli_ct_request *ct_req;
- struct fc_bsg_job *job = NULL;
+ struct bsg_job *job = NULL;
+ struct fc_bsg_reply *bsg_reply;
struct bsg_job_data *dd_data = NULL;
unsigned long flags;
int size = 0;
@@ -1120,13 +1131,15 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dd_data->set_job = NULL;
lpfc_bsg_event_unref(evt);
if (job) {
- job->reply->reply_payload_rcv_len = size;
+ bsg_reply = job->reply;
+ bsg_reply->reply_payload_rcv_len = size;
/* make error code available to userspace */
- job->reply->result = 0;
+ bsg_reply->result = 0;
job->dd_data = NULL;
/* complete the job back to userspace */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
}
}
@@ -1187,10 +1200,11 @@ lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
* @job: SET_EVENT fc_bsg_job
**/
static int
-lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
+lpfc_bsg_hba_set_event(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
struct set_ct_event *event_req;
struct lpfc_bsg_event *evt;
int rc = 0;
@@ -1208,7 +1222,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
}
event_req = (struct set_ct_event *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
FC_REG_EVENT_MASK);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -1271,10 +1285,12 @@ job_error:
* @job: GET_EVENT fc_bsg_job
**/
static int
-lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
+lpfc_bsg_hba_get_event(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct get_ct_event *event_req;
struct get_ct_event_reply *event_reply;
struct lpfc_bsg_event *evt, *evt_next;
@@ -1292,10 +1308,10 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
}
event_req = (struct get_ct_event *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
event_reply = (struct get_ct_event_reply *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
if (evt->reg_id == event_req->ev_reg_id) {
@@ -1315,7 +1331,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
* an error indicating that there isn't anymore
*/
if (evt_dat == NULL) {
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
rc = -ENOENT;
goto job_error;
}
@@ -1331,12 +1347,12 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
event_reply->type = evt_dat->type;
event_reply->immed_data = evt_dat->immed_dat;
if (evt_dat->len > 0)
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
evt_dat->data, evt_dat->len);
else
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (evt_dat) {
kfree(evt_dat->data);
@@ -1347,13 +1363,14 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
lpfc_bsg_event_unref(evt);
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
job->dd_data = NULL;
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
job_error:
job->dd_data = NULL;
- job->reply->result = rc;
+ bsg_reply->result = rc;
return rc;
}
@@ -1380,7 +1397,8 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp;
struct lpfc_nodelist *ndlp;
@@ -1411,6 +1429,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
/* Copy the completed job data or set the error status */
if (job) {
+ bsg_reply = job->reply;
if (rsp->ulpStatus) {
if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
@@ -1428,7 +1447,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
rc = -EACCES;
}
} else {
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
}
}
@@ -1442,8 +1461,9 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -1457,7 +1477,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
* @num_entry: Number of enties in the bde.
**/
static int
-lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
+lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
int num_entry)
{
@@ -1603,12 +1623,14 @@ no_dd_data:
* @job: SEND_MGMT_RESP fc_bsg_job
**/
static int
-lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
+lpfc_bsg_send_mgmt_rsp(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
struct ulp_bde64 *bpl;
struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
int bpl_entries;
@@ -1618,7 +1640,7 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
int rc = 0;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
rc = -ERANGE;
@@ -1664,7 +1686,7 @@ send_mgmt_rsp_free_bmp:
kfree(bmp);
send_mgmt_rsp_exit:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
@@ -1760,8 +1782,10 @@ lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
* All of this is done in-line.
*/
static int
-lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct diag_mode_set *loopback_mode;
uint32_t link_flags;
uint32_t timeout;
@@ -1771,7 +1795,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
int rc = 0;
/* no data to return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len < sizeof(struct fc_bsg_request) +
sizeof(struct diag_mode_set)) {
@@ -1791,7 +1815,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
/* bring the link to diagnostic mode */
loopback_mode = (struct diag_mode_set *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
@@ -1864,10 +1888,11 @@ loopback_mode_exit:
job_error:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2015,14 +2040,16 @@ lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
* loopback mode in order to perform a diagnostic loopback test.
*/
static int
-lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct diag_mode_set *loopback_mode;
uint32_t link_flags, timeout;
int i, rc = 0;
/* no data to return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len < sizeof(struct fc_bsg_request) +
sizeof(struct diag_mode_set)) {
@@ -2054,7 +2081,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3129 Bring link to diagnostic state.\n");
loopback_mode = (struct diag_mode_set *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
@@ -2151,10 +2178,11 @@ loopback_mode_exit:
job_error:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2166,17 +2194,17 @@ job_error:
* command from the user to proper driver action routines.
*/
static int
-lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
+lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
{
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
int rc;
- shost = job->shost;
+ shost = fc_bsg_to_shost(job);
if (!shost)
return -ENODEV;
- vport = (struct lpfc_vport *)job->shost->hostdata;
+ vport = shost_priv(shost);
if (!vport)
return -ENODEV;
phba = vport->phba;
@@ -2202,8 +2230,10 @@ lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
* command from the user to proper driver action routines.
*/
static int
-lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
+lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
@@ -2211,10 +2241,10 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
uint32_t timeout;
int rc, i;
- shost = job->shost;
+ shost = fc_bsg_to_shost(job);
if (!shost)
return -ENODEV;
- vport = (struct lpfc_vport *)job->shost->hostdata;
+ vport = shost_priv(shost);
if (!vport)
return -ENODEV;
phba = vport->phba;
@@ -2232,7 +2262,7 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
phba->link_flag &= ~LS_LOOPBACK_MODE;
spin_unlock_irq(&phba->hbalock);
loopback_mode_end_cmd = (struct diag_mode_set *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
timeout = loopback_mode_end_cmd->timeout * 100;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
@@ -2263,10 +2293,11 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
loopback_mode_end_exit:
/* make return code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2278,8 +2309,10 @@ loopback_mode_end_exit:
* applicaiton.
*/
static int
-lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
+lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
@@ -2292,12 +2325,12 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
struct diag_status *diag_status_reply;
int mbxstatus, rc = 0;
- shost = job->shost;
+ shost = fc_bsg_to_shost(job);
if (!shost) {
rc = -ENODEV;
goto job_error;
}
- vport = (struct lpfc_vport *)job->shost->hostdata;
+ vport = shost_priv(shost);
if (!vport) {
rc = -ENODEV;
goto job_error;
@@ -2335,7 +2368,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
goto job_error;
link_diag_test_cmd = (struct sli4_link_diag *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
@@ -2385,7 +2418,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
}
diag_status_reply = (struct diag_status *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
if (job->reply_len <
sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
@@ -2413,10 +2446,11 @@ link_diag_test_exit:
job_error:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2982,9 +3016,10 @@ err_post_rxbufs_exit:
* of loopback mode.
**/
static int
-lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
+lpfc_bsg_diag_loopback_run(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
struct lpfc_bsg_event *evt;
struct event_data *evdat;
@@ -3012,7 +3047,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
uint32_t total_mem;
/* in case no data is returned return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
@@ -3237,11 +3272,11 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
rc = IOCB_SUCCESS;
/* skip over elx loopback header */
rx_databuf += ELX_LOOPBACK_HEADER_SZ;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
rx_databuf, size);
- job->reply->reply_payload_rcv_len = size;
+ bsg_reply->reply_payload_rcv_len = size;
}
}
@@ -3271,11 +3306,12 @@ err_loopback_test_exit:
loopback_test_exit:
kfree(dataout);
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
/* complete the job back to userspace if no error */
if (rc == IOCB_SUCCESS)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -3284,9 +3320,10 @@ loopback_test_exit:
* @job: GET_DFC_REV fc_bsg_job
**/
static int
-lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
+lpfc_bsg_get_dfc_rev(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
struct get_mgmt_rev_reply *event_reply;
int rc = 0;
@@ -3301,7 +3338,7 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
}
event_reply = (struct get_mgmt_rev_reply *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
if (job->reply_len <
sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
@@ -3315,9 +3352,10 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
job_error:
- job->reply->result = rc;
+ bsg_reply->result = rc;
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -3336,7 +3374,8 @@ static void
lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
+ struct bsg_job *job;
uint32_t size;
unsigned long flags;
uint8_t *pmb, *pmb_buf;
@@ -3364,8 +3403,9 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
/* Copy the mailbox data to the job if it is still active */
if (job) {
+ bsg_reply = job->reply;
size = job->reply_payload.payload_len;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmb_buf, size);
@@ -3379,8 +3419,9 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -3510,11 +3551,12 @@ lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
* This is routine handles BSG job for mailbox commands completions with
* multiple external buffers.
**/
-static struct fc_bsg_job *
+static struct bsg_job *
lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
uint8_t *pmb, *pmb_buf;
unsigned long flags;
uint32_t size;
@@ -3529,6 +3571,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
@@ -3559,13 +3602,13 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
if (job) {
size = job->reply_payload.payload_len;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmb_buf, size);
/* result for successful */
- job->reply->result = 0;
+ bsg_reply->result = 0;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2937 SLI_CONFIG ext-buffer maibox command "
@@ -3603,7 +3646,8 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
static void
lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
@@ -3623,9 +3667,11 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
/* if the job is still active, call job done */
- if (job)
- job->job_done(job);
-
+ if (job) {
+ bsg_reply = job->reply;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ }
return;
}
@@ -3640,7 +3686,8 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
static void
lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
@@ -3658,8 +3705,11 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
lpfc_bsg_mbox_ext_session_reset(phba);
/* if the job is still active, call job done */
- if (job)
- job->job_done(job);
+ if (job) {
+ bsg_reply = job->reply;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ }
return;
}
@@ -3768,10 +3818,11 @@ lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
* non-embedded external bufffers.
**/
static int
-lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
enum nemb_type nemb_tp,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_request *bsg_request = job->request;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
struct dfc_mbox_req *mbox_req;
struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
@@ -3784,7 +3835,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
int rc, i;
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* pointer to the start of mailbox command */
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -3955,10 +4006,12 @@ job_error:
* non-embedded external bufffers.
**/
static int
-lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
enum nemb_type nemb_tp,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct dfc_mbox_req *mbox_req;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
uint32_t ext_buf_cnt;
@@ -3969,7 +4022,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
int rc = SLI_CONFIG_NOT_HANDLED, i;
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* pointer to the start of mailbox command */
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -4096,8 +4149,9 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* wait for additoinal external buffers */
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return SLI_CONFIG_HANDLED;
job_error:
@@ -4119,7 +4173,7 @@ job_error:
* with embedded sussystem 0x1 and opcodes with external HBDs.
**/
static int
-lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
struct lpfc_sli_config_mbox *sli_cfg_mbx;
@@ -4268,8 +4322,9 @@ lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
* user space through BSG.
**/
static int
-lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
{
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
struct lpfc_dmabuf *dmabuf;
uint8_t *pbuf;
@@ -4307,7 +4362,7 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
dmabuf, index);
pbuf = (uint8_t *)dmabuf->virt;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pbuf, size);
@@ -4321,8 +4376,9 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
lpfc_bsg_mbox_ext_session_reset(phba);
}
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return SLI_CONFIG_HANDLED;
}
@@ -4336,9 +4392,10 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
* from user space through BSG.
**/
static int
-lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct bsg_job_data *dd_data = NULL;
LPFC_MBOXQ_t *pmboxq = NULL;
MAILBOX_t *pmb;
@@ -4436,8 +4493,9 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
/* wait for additoinal external buffers */
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return SLI_CONFIG_HANDLED;
job_error:
@@ -4457,7 +4515,7 @@ job_error:
* command with multiple non-embedded external buffers.
**/
static int
-lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
int rc;
@@ -4502,14 +4560,15 @@ lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
* (0x9B) mailbox commands and external buffers.
**/
static int
-lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_request *bsg_request = job->request;
struct dfc_mbox_req *mbox_req;
int rc = SLI_CONFIG_NOT_HANDLED;
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* mbox command with/without single external buffer */
if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
@@ -4579,9 +4638,11 @@ sli_cfg_ext_error:
* let our completion handler finish the command.
**/
static int
-lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_vport *vport)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
/* a 4k buffer to hold the mb and extended data from/to the bsg */
@@ -4600,7 +4661,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
uint32_t size;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* sanity check to protect driver */
if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
@@ -4619,7 +4680,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* check if requested extended data lengths are valid */
if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
@@ -4841,7 +4902,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* job finished, copy the data */
memcpy(pmbx, pmb, sizeof(*pmb));
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmbx, size);
@@ -4870,15 +4931,17 @@ job_cont:
* @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
**/
static int
-lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
+lpfc_bsg_mbox_cmd(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
struct dfc_mbox_req *mbox_req;
int rc = 0;
/* mix-and-match backward compatibility */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -4889,7 +4952,7 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
sizeof(struct fc_bsg_request)),
(int)sizeof(struct dfc_mbox_req));
mbox_req = (struct dfc_mbox_req *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
mbox_req->extMboxTag = 0;
mbox_req->extSeqNum = 0;
}
@@ -4898,15 +4961,16 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
if (rc == 0) {
/* job done */
- job->reply->result = 0;
+ bsg_reply->result = 0;
job->dd_data = NULL;
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
} else if (rc == 1)
/* job submitted, will complete later*/
rc = 0; /* return zero, no error */
else {
/* some error occurred */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
}
@@ -4936,7 +5000,8 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp, *rmp;
struct lpfc_bsg_menlo *menlo;
@@ -4956,6 +5021,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
@@ -4970,7 +5036,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
*/
menlo_resp = (struct menlo_response *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
menlo_resp->xri = rsp->ulpContext;
if (rsp->ulpStatus) {
if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
@@ -4990,7 +5056,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
}
} else {
rsp_size = rsp->un.genreq64.bdl.bdeSize;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
lpfc_bsg_copy_data(rmp, &job->reply_payload,
rsp_size, 0);
}
@@ -5007,8 +5073,9 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
/* Complete the job if active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
@@ -5024,9 +5091,11 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
* supplied in the menlo request header xri field.
**/
static int
-lpfc_menlo_cmd(struct fc_bsg_job *job)
+lpfc_menlo_cmd(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocbq;
IOCB_t *cmd;
@@ -5039,7 +5108,7 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
struct ulp_bde64 *bpl = NULL;
/* in case no data is returned return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) +
@@ -5069,7 +5138,7 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
}
menlo_cmd = (struct menlo_command *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
@@ -5180,19 +5249,65 @@ free_dd:
kfree(dd_data);
no_dd_data:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
+static int
+lpfc_forced_link_speed(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct forced_link_speed_support_reply *forced_reply;
+ int rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct get_forced_link_speed_support)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "0048 Received FORCED_LINK_SPEED request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ forced_reply = (struct forced_link_speed_support_reply *)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+ if (job->reply_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct forced_link_speed_support_reply)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "0049 Received FORCED_LINK_SPEED reply below "
+ "minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED)
+ ? LPFC_FORCED_LINK_SPEED_SUPPORTED
+ : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
+job_error:
+ bsg_reply->result = rc;
+ if (rc == 0)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return rc;
+}
+
/**
* lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
* @job: fc_bsg_job to handle
**/
static int
-lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
+lpfc_bsg_hst_vendor(struct bsg_job *job)
{
- int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
int rc;
switch (command) {
@@ -5227,11 +5342,14 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
case LPFC_BSG_VENDOR_MENLO_DATA:
rc = lpfc_menlo_cmd(job);
break;
+ case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
+ rc = lpfc_forced_link_speed(job);
+ break;
default:
rc = -EINVAL;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
break;
}
@@ -5243,12 +5361,14 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
* @job: fc_bsg_job to handle
**/
int
-lpfc_bsg_request(struct fc_bsg_job *job)
+lpfc_bsg_request(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t msgcode;
int rc;
- msgcode = job->request->msgcode;
+ msgcode = bsg_request->msgcode;
switch (msgcode) {
case FC_BSG_HST_VENDOR:
rc = lpfc_bsg_hst_vendor(job);
@@ -5261,9 +5381,9 @@ lpfc_bsg_request(struct fc_bsg_job *job)
break;
default:
rc = -EINVAL;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
break;
}
@@ -5278,9 +5398,9 @@ lpfc_bsg_request(struct fc_bsg_job *job)
* the waiting function which will handle passing the error back to userspace
**/
int
-lpfc_bsg_timeout(struct fc_bsg_job *job)
+lpfc_bsg_timeout(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index e557bcdbcb19..f2247aa4fa17 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -35,6 +35,7 @@
#define LPFC_BSG_VENDOR_MENLO_DATA 9
#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
+#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14
struct set_ct_event {
uint32_t command;
@@ -284,6 +285,15 @@ struct lpfc_sli_config_mbox {
} un;
};
+#define LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED 0
+#define LPFC_FORCED_LINK_SPEED_SUPPORTED 1
+struct get_forced_link_speed_support {
+ uint32_t command;
+};
+struct forced_link_speed_support_reply {
+ uint8_t supported;
+};
+
/* driver only */
#define SLI_CONFIG_NOT_HANDLED 0
#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index bd7576d452f2..15d2bfdf582d 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -397,8 +397,6 @@ extern spinlock_t _dump_buf_lock;
extern int _dump_buf_done;
extern spinlock_t pgcnt_lock;
extern unsigned int pgcnt;
-extern unsigned int lpfc_prot_mask;
-extern unsigned char lpfc_prot_guard;
extern unsigned int lpfc_fcp_look_ahead;
/* Interface exported by fabric iocb scheduler */
@@ -431,8 +429,8 @@ struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
#define HBA_EVENT_LINK_DOWN 3
/* functions to support SGIOv4/bsg interface */
-int lpfc_bsg_request(struct fc_bsg_job *);
-int lpfc_bsg_timeout(struct fc_bsg_job *);
+int lpfc_bsg_request(struct bsg_job *);
+int lpfc_bsg_timeout(struct bsg_job *);
int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b7d54bfb1df9..236e4e51d161 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7610,7 +7610,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* reject till our FLOGI completes */
if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
(cmd != ELS_CMD_FLOGI)) {
- rjt_err = LSRJT_UNABLE_TPC;
+ rjt_err = LSRJT_LOGICAL_BSY;
rjt_exp = LSEXP_NOTHING_MORE;
goto lsrjt;
}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index ee8022737591..5646699b0516 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -921,6 +921,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D
#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
#define LPFC_MBOX_OPCODE_GET_VPD_DATA 0x5B
+#define LPFC_MBOX_OPCODE_SET_HOST_DATA 0x5D
#define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73
#define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74
#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
@@ -2289,6 +2290,9 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
+#define lpfc_mbx_rd_conf_link_speed_SHIFT 16
+#define lpfc_mbx_rd_conf_link_speed_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_link_speed_WORD word6
uint32_t rsvd_7;
uint32_t rsvd_8;
uint32_t word9;
@@ -2919,6 +2923,16 @@ struct lpfc_mbx_set_feature {
};
+#define LPFC_SET_HOST_OS_DRIVER_VERSION 0x2
+struct lpfc_mbx_set_host_data {
+#define LPFC_HOST_OS_DRIVER_VERSION_SIZE 48
+ struct mbox_header header;
+ uint32_t param_id;
+ uint32_t param_len;
+ uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE];
+};
+
+
struct lpfc_mbx_get_sli4_parameters {
struct mbox_header header;
struct lpfc_sli4_parameters sli4_parameters;
@@ -3313,6 +3327,7 @@ struct lpfc_mqe {
struct lpfc_mbx_get_port_name get_port_name;
struct lpfc_mbx_set_feature set_feature;
struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
+ struct lpfc_mbx_set_host_data set_host_data;
struct lpfc_mbx_nop nop;
} un;
};
@@ -3981,7 +3996,8 @@ union lpfc_wqe128 {
struct gen_req64_wqe gen_req;
};
-#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001
+#define LPFC_GROUP_OJECT_MAGIC_G5 0xfeaa0001
+#define LPFC_GROUP_OJECT_MAGIC_G6 0xfeaa0003
#define LPFC_FILE_TYPE_GROUP 0xf7
#define LPFC_FILE_ID_GROUP 0xa2
struct lpfc_grp_hdr {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 734a0428ef0e..4776fd85514f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6279,34 +6279,36 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
uint32_t old_guard;
int pagecnt = 10;
- if (lpfc_prot_mask && lpfc_prot_guard) {
+ if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"1478 Registering BlockGuard with the "
"SCSI layer\n");
- old_mask = lpfc_prot_mask;
- old_guard = lpfc_prot_guard;
+ old_mask = phba->cfg_prot_mask;
+ old_guard = phba->cfg_prot_guard;
/* Only allow supported values */
- lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
+ phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
SHOST_DIX_TYPE0_PROTECTION |
SHOST_DIX_TYPE1_PROTECTION);
- lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC);
+ phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
+ SHOST_DIX_GUARD_CRC);
/* DIF Type 1 protection for profiles AST1/C1 is end to end */
- if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
- lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
+ if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
+ phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
- if (lpfc_prot_mask && lpfc_prot_guard) {
- if ((old_mask != lpfc_prot_mask) ||
- (old_guard != lpfc_prot_guard))
+ if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
+ if ((old_mask != phba->cfg_prot_mask) ||
+ (old_guard != phba->cfg_prot_guard))
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1475 Registering BlockGuard with the "
"SCSI layer: mask %d guard %d\n",
- lpfc_prot_mask, lpfc_prot_guard);
+ phba->cfg_prot_mask,
+ phba->cfg_prot_guard);
- scsi_host_set_prot(shost, lpfc_prot_mask);
- scsi_host_set_guard(shost, lpfc_prot_guard);
+ scsi_host_set_prot(shost, phba->cfg_prot_mask);
+ scsi_host_set_guard(shost, phba->cfg_prot_guard);
} else
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1479 Not Registering BlockGuard with the SCSI "
@@ -6929,6 +6931,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
struct lpfc_mbx_get_func_cfg *get_func_cfg;
struct lpfc_rsrc_desc_fcfcoe *desc;
char *pdesc_0;
+ uint16_t forced_link_speed;
+ uint32_t if_type;
int length, i, rc = 0, rc2;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -7022,6 +7026,58 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
if (rc)
goto read_cfg_out;
+ /* Update link speed if forced link speed is supported */
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ forced_link_speed =
+ bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
+ if (forced_link_speed) {
+ phba->hba_flag |= HBA_FORCED_LINK_SPEED;
+
+ switch (forced_link_speed) {
+ case LINK_SPEED_1G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_1G;
+ break;
+ case LINK_SPEED_2G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_2G;
+ break;
+ case LINK_SPEED_4G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_4G;
+ break;
+ case LINK_SPEED_8G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_8G;
+ break;
+ case LINK_SPEED_10G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_10G;
+ break;
+ case LINK_SPEED_16G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_16G;
+ break;
+ case LINK_SPEED_32G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_32G;
+ break;
+ case 0xffff:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_AUTO;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0047 Unrecognized link "
+ "speed : %d\n",
+ forced_link_speed);
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_AUTO;
+ }
+ }
+ }
+
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
length = phba->sli4_hba.max_cfg_param.max_xri -
lpfc_sli4_get_els_iocb_cnt(phba);
@@ -7256,6 +7312,7 @@ int
lpfc_sli4_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
+ uint32_t wqesize;
int idx;
/*
@@ -7340,15 +7397,10 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.fcp_cq[idx] = qdesc;
/* Create Fast Path FCP WQs */
- if (phba->fcp_embed_io) {
- qdesc = lpfc_sli4_queue_alloc(phba,
- LPFC_WQE128_SIZE,
- LPFC_WQE128_DEF_COUNT);
- } else {
- qdesc = lpfc_sli4_queue_alloc(phba,
- phba->sli4_hba.wq_esize,
- phba->sli4_hba.wq_ecount);
- }
+ wqesize = (phba->fcp_embed_io) ?
+ LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
+ qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
+ phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0503 Failed allocate fast-path FCP "
@@ -10260,6 +10312,7 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
int i, rc = 0;
struct lpfc_dmabuf *dmabuf, *next;
uint32_t offset = 0, temp_offset = 0;
+ uint32_t magic_number, ftype, fid, fsize;
/* It can be null in no-wait mode, sanity check */
if (!fw) {
@@ -10268,18 +10321,19 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
}
image = (struct lpfc_grp_hdr *)fw->data;
+ magic_number = be32_to_cpu(image->magic_number);
+ ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
+ fid = bf_get_be32(lpfc_grp_hdr_id, image),
+ fsize = be32_to_cpu(image->size);
+
INIT_LIST_HEAD(&dma_buffer_list);
- if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
- (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
- LPFC_FILE_TYPE_GROUP) ||
- (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
- (be32_to_cpu(image->size) != fw->size)) {
+ if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 &&
+ magic_number != LPFC_GROUP_OJECT_MAGIC_G6) ||
+ ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3022 Invalid FW image found. "
- "Magic:%x Type:%x ID:%x\n",
- be32_to_cpu(image->magic_number),
- bf_get_be32(lpfc_grp_hdr_file_type, image),
- bf_get_be32(lpfc_grp_hdr_id, image));
+ "Magic:%x Type:%x ID:%x Size %d %zd\n",
+ magic_number, ftype, fid, fsize, fw->size);
rc = -EINVAL;
goto release_out;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index d197aa176dee..ad350d969bdc 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -413,15 +413,13 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
* struct fcp_cmnd, struct fcp_rsp and the number of bde's
* necessary to support the sg_tablesize.
*/
- psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+ psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool,
GFP_KERNEL, &psb->dma_handle);
if (!psb->data) {
kfree(psb);
break;
}
- /* Initialize virtual ptrs to dma_buf region. */
- memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
/* Allocate iotag for psb->cur_iocbq. */
iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
@@ -607,7 +605,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
}
/**
- * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
+ * lpfc_sli4_post_scsi_sgl_list - Post blocks of scsi buffer sgls from a list
* @phba: pointer to lpfc hba data structure.
* @post_sblist: pointer to the scsi buffer list.
*
@@ -736,7 +734,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
}
/**
- * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
+ * lpfc_sli4_repost_scsi_sgl_list - Repost all the allocated scsi buffer sgls
* @phba: pointer to lpfc hba data structure.
*
* This routine walks the list of scsi buffers that have been allocated and
@@ -821,13 +819,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
* for the struct fcp_cmnd, struct fcp_rsp and the number
* of bde's necessary to support the sg_tablesize.
*/
- psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+ psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool,
GFP_KERNEL, &psb->dma_handle);
if (!psb->data) {
kfree(psb);
break;
}
- memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
/*
* 4K Page alignment is CRITICAL to BlockGuard, double check
@@ -857,7 +854,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
psb->data, psb->dma_handle);
kfree(psb);
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "3368 Failed to allocated IOTAG for"
+ "3368 Failed to allocate IOTAG for"
" XRI:0x%x\n", lxri);
lpfc_sli4_free_xri(phba, lxri);
break;
@@ -1136,7 +1133,7 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
*
* This routine does the pci dma mapping for scatter-gather list of scsi cmnd
* field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
- * through sg elements and format the bdea. This routine also initializes all
+ * through sg elements and format the bde. This routine also initializes all
* IOCB fields which are dependent on scsi command request buffer.
*
* Return codes:
@@ -1269,13 +1266,16 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-/* Return if if error injection is detected by Initiator */
+/* Return BG_ERR_INIT if error injection is detected by Initiator */
#define BG_ERR_INIT 0x1
-/* Return if if error injection is detected by Target */
+/* Return BG_ERR_TGT if error injection is detected by Target */
#define BG_ERR_TGT 0x2
-/* Return if if swapping CSUM<-->CRC is required for error injection */
+/* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
#define BG_ERR_SWAP 0x10
-/* Return if disabling Guard/Ref/App checking is required for error injection */
+/**
+ * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
+ * error injection
+ **/
#define BG_ERR_CHECK 0x20
/**
@@ -4139,13 +4139,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
- /* The sdev is not guaranteed to be valid post scsi_done upcall. */
- cmd->scsi_done(cmd);
-
spin_lock_irqsave(&phba->hbalock, flags);
lpfc_cmd->pCmd = NULL;
spin_unlock_irqrestore(&phba->hbalock, flags);
+ /* The sdev is not guaranteed to be valid post scsi_done upcall. */
+ cmd->scsi_done(cmd);
+
/*
* If there is a thread waiting for command completion
* wake up the thread.
@@ -4822,7 +4822,7 @@ wait_for_cmpl:
ret = FAILED;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0748 abort handler timed out waiting "
- "for abortng I/O (xri:x%x) to complete: "
+ "for aborting I/O (xri:x%x) to complete: "
"ret %#x, ID %d, LUN %llu\n",
iocb->sli4_xritag, ret,
cmnd->device->id, cmnd->device->lun);
@@ -4945,26 +4945,30 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
* 0x2002 - Success.
**/
static int
-lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
- unsigned tgt_id, uint64_t lun_id,
- uint8_t task_mgmt_cmd)
+lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
+ unsigned int tgt_id, uint64_t lun_id,
+ uint8_t task_mgmt_cmd)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_scsi_buf *lpfc_cmd;
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *iocbqrsp;
- struct lpfc_nodelist *pnode = rdata->pnode;
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *pnode;
int ret;
int status;
- if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
return FAILED;
+ pnode = rdata->pnode;
- lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
+ lpfc_cmd = lpfc_get_scsi_buf(phba, pnode);
if (lpfc_cmd == NULL)
return FAILED;
lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
lpfc_cmd->rdata = rdata;
+ lpfc_cmd->pCmd = cmnd;
status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
task_mgmt_cmd);
@@ -5171,7 +5175,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
- status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
+ status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
FCP_LUN_RESET);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5249,7 +5253,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
- status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
+ status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
FCP_TARGET_RESET);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5328,7 +5332,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
if (!match)
continue;
- status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
+ status = lpfc_send_taskmgmt(vport, cmnd,
i, 0, FCP_TARGET_RESET);
if (status != SUCCESS) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f4f77c5b0c83..4faa7672fc1d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -47,6 +47,7 @@
#include "lpfc_compat.h"
#include "lpfc_debugfs.h"
#include "lpfc_vport.h"
+#include "lpfc_version.h"
/* There are only four IOCB completion types. */
typedef enum _lpfc_iocb_type {
@@ -2678,15 +2679,16 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
- list_del_init(&cmd_iocb->list);
if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+ /* remove from txcmpl queue list */
+ list_del_init(&cmd_iocb->list);
cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ return cmd_iocb;
}
- return cmd_iocb;
}
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0317 iotag x%x is out off "
+ "0317 iotag x%x is out of "
"range: max iotag x%x wd0 x%x\n",
iotag, phba->sli.last_iotag,
*(((uint32_t *) &prspiocb->iocb) + 7));
@@ -2721,8 +2723,9 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
return cmd_iocb;
}
}
+
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0372 iotag x%x is out off range: max iotag (x%x)\n",
+ "0372 iotag x%x is out of range: max iotag (x%x)\n",
iotag, phba->sli.last_iotag);
return NULL;
}
@@ -6291,6 +6294,25 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
return 0;
}
+void
+lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+ uint32_t len;
+
+ len = sizeof(struct lpfc_mbx_set_host_data) -
+ sizeof(struct lpfc_sli4_cfg_mhdr);
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
+ LPFC_SLI4_MBX_EMBED);
+
+ mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
+ mbox->u.mqe.un.set_host_data.param_len = 8;
+ snprintf(mbox->u.mqe.un.set_host_data.data,
+ LPFC_HOST_OS_DRIVER_VERSION_SIZE,
+ "Linux %s v"LPFC_DRIVER_VERSION,
+ (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
+}
+
/**
* lpfc_sli4_hba_setup - SLI4 device intialization PCI function
* @phba: Pointer to HBA context object.
@@ -6542,6 +6564,15 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_free_mbox;
}
+ lpfc_set_host_data(phba, mboxq);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "2134 Failed to set host os driver version %x",
+ rc);
+ }
+
/* Read the port's service parameters. */
rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
if (rc) {
@@ -11781,6 +11812,8 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
/* Look up the ELS command IOCB and create pseudo response IOCB */
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ /* Put the iocb back on the txcmplq */
+ lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
if (unlikely(!cmdiocbq)) {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c9bf20eb7223..50bfc43ebcb0 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.2.0.0."
+#define LPFC_DRIVER_VERSION "11.2.0.2"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index a590089b9397..ccb68d12692c 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -28,17 +28,15 @@
/* Definitions for the core NCR5380 driver. */
-#define NCR5380_implementation_fields unsigned char *pdma_base; \
- int pdma_residual
+#define NCR5380_implementation_fields int pdma_residual
-#define NCR5380_read(reg) macscsi_read(instance, reg)
-#define NCR5380_write(reg, value) macscsi_write(instance, reg, value)
+#define NCR5380_read(reg) in_8(hostdata->io + ((reg) << 4))
+#define NCR5380_write(reg, value) out_8(hostdata->io + ((reg) << 4), value)
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- macscsi_dma_xfer_len(instance, cmd)
+#define NCR5380_dma_xfer_len macscsi_dma_xfer_len
#define NCR5380_dma_recv_setup macscsi_pread
#define NCR5380_dma_send_setup macscsi_pwrite
-#define NCR5380_dma_residual(instance) (hostdata->pdma_residual)
+#define NCR5380_dma_residual macscsi_dma_residual
#define NCR5380_intr macscsi_intr
#define NCR5380_queue_command macscsi_queue_command
@@ -61,20 +59,6 @@ module_param(setup_hostid, int, 0);
static int setup_toshiba_delay = -1;
module_param(setup_toshiba_delay, int, 0);
-/*
- * NCR 5380 register access functions
- */
-
-static inline char macscsi_read(struct Scsi_Host *instance, int reg)
-{
- return in_8(instance->base + (reg << 4));
-}
-
-static inline void macscsi_write(struct Scsi_Host *instance, int reg, int value)
-{
- out_8(instance->base + (reg << 4), value);
-}
-
#ifndef MODULE
static int __init mac_scsi_setup(char *str)
{
@@ -167,16 +151,15 @@ __asm__ __volatile__ \
: "0"(s), "1"(d), "2"(n) \
: "d0")
-static int macscsi_pread(struct Scsi_Host *instance,
- unsigned char *dst, int len)
+static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
+ unsigned char *dst, int len)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- unsigned char *s = hostdata->pdma_base + (INPUT_DATA_REG << 4);
+ unsigned char *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
unsigned char *d = dst;
int n = len;
int transferred;
- while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+ while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ | BASR_PHASE_MATCH,
BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
CP_IO_TO_MEM(s, d, n);
@@ -189,23 +172,23 @@ static int macscsi_pread(struct Scsi_Host *instance,
return 0;
/* Target changed phase early? */
- if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ,
+ if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
scmd_printk(KERN_ERR, hostdata->connected,
"%s: !REQ and !ACK\n", __func__);
if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
return 0;
- dsprintk(NDEBUG_PSEUDO_DMA, instance,
+ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
"%s: bus error (%d/%d)\n", __func__, transferred, len);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
d = dst + transferred;
n = len - transferred;
}
scmd_printk(KERN_ERR, hostdata->connected,
"%s: phase mismatch or !DRQ\n", __func__);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
return -1;
}
@@ -270,16 +253,15 @@ __asm__ __volatile__ \
: "0"(s), "1"(d), "2"(n) \
: "d0")
-static int macscsi_pwrite(struct Scsi_Host *instance,
- unsigned char *src, int len)
+static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
+ unsigned char *src, int len)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char *s = src;
- unsigned char *d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4);
+ unsigned char *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
int n = len;
int transferred;
- while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+ while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ | BASR_PHASE_MATCH,
BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
CP_MEM_TO_IO(s, d, n);
@@ -288,7 +270,7 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
hostdata->pdma_residual = len - transferred;
/* Target changed phase early? */
- if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ,
+ if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
scmd_printk(KERN_ERR, hostdata->connected,
"%s: !REQ and !ACK\n", __func__);
@@ -297,7 +279,7 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
/* No bus error. */
if (n == 0) {
- if (NCR5380_poll_politely(instance, TARGET_COMMAND_REG,
+ if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
TCR_LAST_BYTE_SENT,
TCR_LAST_BYTE_SENT, HZ / 64) < 0)
scmd_printk(KERN_ERR, hostdata->connected,
@@ -305,25 +287,23 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
return 0;
}
- dsprintk(NDEBUG_PSEUDO_DMA, instance,
+ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
"%s: bus error (%d/%d)\n", __func__, transferred, len);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
s = src + transferred;
n = len - transferred;
}
scmd_printk(KERN_ERR, hostdata->connected,
"%s: phase mismatch or !DRQ\n", __func__);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
return -1;
}
-static int macscsi_dma_xfer_len(struct Scsi_Host *instance,
+static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
cmd->SCp.this_residual < 16)
return 0;
@@ -331,6 +311,11 @@ static int macscsi_dma_xfer_len(struct Scsi_Host *instance,
return cmd->SCp.this_residual;
}
+static int macscsi_dma_residual(struct NCR5380_hostdata *hostdata)
+{
+ return hostdata->pdma_residual;
+}
+
#include "NCR5380.c"
#define DRV_MODULE_NAME "mac_scsi"
@@ -356,6 +341,7 @@ static struct scsi_host_template mac_scsi_template = {
static int __init mac_scsi_probe(struct platform_device *pdev)
{
struct Scsi_Host *instance;
+ struct NCR5380_hostdata *hostdata;
int error;
int host_flags = 0;
struct resource *irq, *pio_mem, *pdma_mem = NULL;
@@ -388,17 +374,18 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
if (!instance)
return -ENOMEM;
- instance->base = pio_mem->start;
if (irq)
instance->irq = irq->start;
else
instance->irq = NO_IRQ;
- if (pdma_mem && setup_use_pdma) {
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ hostdata = shost_priv(instance);
+ hostdata->base = pio_mem->start;
+ hostdata->io = (void *)pio_mem->start;
- hostdata->pdma_base = (unsigned char *)pdma_mem->start;
- } else
+ if (pdma_mem && setup_use_pdma)
+ hostdata->pdma_io = (void *)pdma_mem->start;
+ else
host_flags |= FLAG_NO_PSEUDO_DMA;
host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 3aaea713bf37..fdd519c1dd57 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.811.02.00-rc1"
-#define MEGASAS_RELDATE "April 12, 2016"
+#define MEGASAS_VERSION "06.812.07.00-rc1"
+#define MEGASAS_RELDATE "August 22, 2016"
/*
* Device IDs
@@ -1429,6 +1429,8 @@ enum FW_BOOT_CONTEXT {
#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14
#define MR_MAX_MSIX_REG_ARRAY 16
#define MR_RDPQ_MODE_OFFSET 0X00800000
+#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000
+
/*
* register set for both 1068 and 1078 controllers
* structure extended for 1078 registers
@@ -2118,7 +2120,6 @@ struct megasas_instance {
u32 ctrl_context_pages;
struct megasas_ctrl_info *ctrl_info;
unsigned int msix_vectors;
- struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES];
struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
u64 map_id;
u64 pd_seq_map_id;
@@ -2140,6 +2141,7 @@ struct megasas_instance {
u8 is_imr;
u8 is_rdpq;
bool dev_handle;
+ bool fw_sync_cache_support;
};
struct MR_LD_VF_MAP {
u32 size;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index d8b1fbd4c8aa..6484c382f670 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1700,11 +1700,8 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
goto out_done;
}
- /*
- * FW takes care of flush cache on its own for Virtual Disk.
- * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
- */
- if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
+ if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd) &&
+ (!instance->fw_sync_cache_support)) {
scmd->result = DID_OK << 16;
goto out_done;
}
@@ -4840,7 +4837,7 @@ fail_alloc_cmds:
}
/*
- * megasas_setup_irqs_msix - register legacy interrupts.
+ * megasas_setup_irqs_ioapic - register legacy interrupts.
* @instance: Adapter soft state
*
* Do not enable interrupt, only setup ISRs.
@@ -4855,8 +4852,9 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance)
pdev = instance->pdev;
instance->irq_context[0].instance = instance;
instance->irq_context[0].MSIxIndex = 0;
- if (request_irq(pdev->irq, instance->instancet->service_isr,
- IRQF_SHARED, "megasas", &instance->irq_context[0])) {
+ if (request_irq(pci_irq_vector(pdev, 0),
+ instance->instancet->service_isr, IRQF_SHARED,
+ "megasas", &instance->irq_context[0])) {
dev_err(&instance->pdev->dev,
"Failed to register IRQ from %s %d\n",
__func__, __LINE__);
@@ -4877,28 +4875,23 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance)
static int
megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
{
- int i, j, cpu;
+ int i, j;
struct pci_dev *pdev;
pdev = instance->pdev;
/* Try MSI-x */
- cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < instance->msix_vectors; i++) {
instance->irq_context[i].instance = instance;
instance->irq_context[i].MSIxIndex = i;
- if (request_irq(instance->msixentry[i].vector,
+ if (request_irq(pci_irq_vector(pdev, i),
instance->instancet->service_isr, 0, "megasas",
&instance->irq_context[i])) {
dev_err(&instance->pdev->dev,
"Failed to register IRQ for vector %d.\n", i);
- for (j = 0; j < i; j++) {
- if (smp_affinity_enable)
- irq_set_affinity_hint(
- instance->msixentry[j].vector, NULL);
- free_irq(instance->msixentry[j].vector,
- &instance->irq_context[j]);
- }
+ for (j = 0; j < i; j++)
+ free_irq(pci_irq_vector(pdev, j),
+ &instance->irq_context[j]);
/* Retry irq register for IO_APIC*/
instance->msix_vectors = 0;
if (is_probe)
@@ -4906,14 +4899,6 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
else
return -1;
}
- if (smp_affinity_enable) {
- if (irq_set_affinity_hint(instance->msixentry[i].vector,
- get_cpu_mask(cpu)))
- dev_err(&instance->pdev->dev,
- "Failed to set affinity hint"
- " for cpu %d\n", cpu);
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
}
return 0;
}
@@ -4930,14 +4915,12 @@ megasas_destroy_irqs(struct megasas_instance *instance) {
if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) {
- if (smp_affinity_enable)
- irq_set_affinity_hint(
- instance->msixentry[i].vector, NULL);
- free_irq(instance->msixentry[i].vector,
+ free_irq(pci_irq_vector(instance->pdev, i),
&instance->irq_context[i]);
}
else
- free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ free_irq(pci_irq_vector(instance->pdev, 0),
+ &instance->irq_context[0]);
}
/**
@@ -5095,6 +5078,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
0x4000000) >> 0x1a;
if (msix_enable && !msix_disable) {
+ int irq_flags = PCI_IRQ_MSIX;
+
scratch_pad_2 = readl
(&instance->reg_set->outbound_scratch_pad_2);
/* Check max MSI-X vectors */
@@ -5131,15 +5116,18 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Don't bother allocating more MSI-X vectors than cpus */
instance->msix_vectors = min(instance->msix_vectors,
(unsigned int)num_online_cpus());
- for (i = 0; i < instance->msix_vectors; i++)
- instance->msixentry[i].entry = i;
- i = pci_enable_msix_range(instance->pdev, instance->msixentry,
- 1, instance->msix_vectors);
+ if (smp_affinity_enable)
+ irq_flags |= PCI_IRQ_AFFINITY;
+ i = pci_alloc_irq_vectors(instance->pdev, 1,
+ instance->msix_vectors, irq_flags);
if (i > 0)
instance->msix_vectors = i;
else
instance->msix_vectors = 0;
}
+ i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
+ if (i < 0)
+ goto fail_setup_irqs;
dev_info(&instance->pdev->dev,
"firmware supports msix\t: (%d)", fw_msix_count);
@@ -5152,11 +5140,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
- if (instance->msix_vectors ?
- megasas_setup_irqs_msix(instance, 1) :
- megasas_setup_irqs_ioapic(instance))
- goto fail_setup_irqs;
-
instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
GFP_KERNEL);
if (instance->ctrl_info == NULL)
@@ -5172,6 +5155,10 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (instance->instancet->init_adapter(instance))
goto fail_init_adapter;
+ if (instance->msix_vectors ?
+ megasas_setup_irqs_msix(instance, 1) :
+ megasas_setup_irqs_ioapic(instance))
+ goto fail_init_adapter;
instance->instancet->enable_intr(instance);
@@ -5315,7 +5302,7 @@ fail_init_adapter:
megasas_destroy_irqs(instance);
fail_setup_irqs:
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
instance->msix_vectors = 0;
fail_ready_state:
kfree(instance->ctrl_info);
@@ -5584,7 +5571,6 @@ static int megasas_io_attach(struct megasas_instance *instance)
/*
* Export parameters required by SCSI mid-layer
*/
- host->irq = instance->pdev->irq;
host->unique_id = instance->unique_id;
host->can_queue = instance->max_scsi_cmds;
host->this_id = instance->init_id;
@@ -5947,7 +5933,7 @@ fail_io_attach:
else
megasas_release_mfi(instance);
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
fail_init_mfi:
fail_alloc_dma_buf:
if (instance->evt_detail)
@@ -6105,7 +6091,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
megasas_destroy_irqs(instance);
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
pci_save_state(pdev);
pci_disable_device(pdev);
@@ -6125,6 +6111,7 @@ megasas_resume(struct pci_dev *pdev)
int rval;
struct Scsi_Host *host;
struct megasas_instance *instance;
+ int irq_flags = PCI_IRQ_LEGACY;
instance = pci_get_drvdata(pdev);
host = instance->host;
@@ -6160,9 +6147,15 @@ megasas_resume(struct pci_dev *pdev)
goto fail_ready_state;
/* Now re-enable MSI-X */
- if (instance->msix_vectors &&
- pci_enable_msix_exact(instance->pdev, instance->msixentry,
- instance->msix_vectors))
+ if (instance->msix_vectors) {
+ irq_flags = PCI_IRQ_MSIX;
+ if (smp_affinity_enable)
+ irq_flags |= PCI_IRQ_AFFINITY;
+ }
+ rval = pci_alloc_irq_vectors(instance->pdev, 1,
+ instance->msix_vectors ?
+ instance->msix_vectors : 1, irq_flags);
+ if (rval < 0)
goto fail_reenable_msix;
if (instance->ctrl_context) {
@@ -6245,6 +6238,34 @@ fail_reenable_msix:
#define megasas_resume NULL
#endif
+static inline int
+megasas_wait_for_adapter_operational(struct megasas_instance *instance)
+{
+ int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
+ int i;
+
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
+ return 1;
+
+ for (i = 0; i < wait_time; i++) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
+ break;
+
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
+ dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
+
+ msleep(1000);
+ }
+
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+ dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
+ __func__);
+ return 1;
+ }
+
+ return 0;
+}
+
/**
* megasas_detach_one - PCI hot"un"plug entry point
* @pdev: PCI device structure
@@ -6269,9 +6290,14 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (instance->fw_crash_state != UNAVAILABLE)
megasas_free_host_crash_buffer(instance);
scsi_remove_host(instance->host);
+
+ if (megasas_wait_for_adapter_operational(instance))
+ goto skip_firing_dcmds;
+
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+skip_firing_dcmds:
/* cancel the delayed work if this work still in queue*/
if (instance->ev != NULL) {
struct megasas_aen_event *ev = instance->ev;
@@ -6302,7 +6328,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
megasas_destroy_irqs(instance);
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
if (instance->ctrl_context) {
megasas_release_fusion(instance);
@@ -6385,13 +6411,19 @@ static void megasas_shutdown(struct pci_dev *pdev)
struct megasas_instance *instance = pci_get_drvdata(pdev);
instance->unload = 1;
+
+ if (megasas_wait_for_adapter_operational(instance))
+ goto skip_firing_dcmds;
+
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+
+skip_firing_dcmds:
instance->instancet->disable_intr(instance);
megasas_destroy_irqs(instance);
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
}
/**
@@ -6752,8 +6784,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
- dev_err(&instance->pdev->dev, "timed out while"
- "waiting for HBA to recover\n");
+ dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
error = -ENODEV;
goto out_up;
}
@@ -6821,8 +6852,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
spin_lock_irqsave(&instance->hba_lock, flags);
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
- dev_err(&instance->pdev->dev, "timed out while waiting"
- "for HBA to recover\n");
+ dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
return -ENODEV;
}
spin_unlock_irqrestore(&instance->hba_lock, flags);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index e413113c86ac..f237d0003df3 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -782,7 +782,8 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
- pd = MR_ArPdGet(arRef, physArm + 1, map);
+ physArm = physArm + 1;
+ pd = MR_ArPdGet(arRef, physArm, map);
if (pd != MR_PD_INVALID)
*pDevHandle = MR_PdDevHandleGet(pd, map);
}
@@ -879,7 +880,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
/* Get alternate Pd. */
- pd = MR_ArPdGet(arRef, physArm + 1, map);
+ physArm = physArm + 1;
+ pd = MR_ArPdGet(arRef, physArm, map);
if (pd != MR_PD_INVALID)
/* Get dev handle from Pd */
*pDevHandle = MR_PdDevHandleGet(pd, map);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 52d8bbf7feb5..24778ba4b6e8 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -748,6 +748,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
goto fail_fw_init;
}
+ instance->fw_sync_cache_support = (scratch_pad_2 &
+ MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
+ dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
+ instance->fw_sync_cache_support ? "Yes" : "No");
+
IOCInitMessage =
dma_alloc_coherent(&instance->pdev->dev,
sizeof(struct MPI2_IOC_INIT_REQUEST),
@@ -2000,6 +2005,8 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
pRAID_Context->regLockFlags |=
(MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
+ pRAID_Context->Type = MPI2_TYPE_CUDA;
+ pRAID_Context->nseg = 0x1;
} else if (fusion->fast_path_io) {
pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
pRAID_Context->configSeqNum = 0;
@@ -2035,12 +2042,10 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
pRAID_Context->timeoutValue =
cpu_to_le16((os_timeout_value > timeout_limit) ?
timeout_limit : os_timeout_value);
- if (fusion->adapter_type == INVADER_SERIES) {
- pRAID_Context->Type = MPI2_TYPE_CUDA;
- pRAID_Context->nseg = 0x1;
+ if (fusion->adapter_type == INVADER_SERIES)
io_request->IoFlags |=
cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
- }
+
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -2463,12 +2468,15 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
/* Start collecting crash, if DMA bit is done */
if ((fw_state == MFI_STATE_FAULT) && dma_state)
schedule_work(&instance->crash_init);
- else if (fw_state == MFI_STATE_FAULT)
- schedule_work(&instance->work_init);
+ else if (fw_state == MFI_STATE_FAULT) {
+ if (instance->unload == 0)
+ schedule_work(&instance->work_init);
+ }
} else if (fw_state == MFI_STATE_FAULT) {
dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
"for scsi%d\n", instance->host->host_no);
- schedule_work(&instance->work_init);
+ if (instance->unload == 0)
+ schedule_work(&instance->work_init);
}
}
@@ -2823,6 +2831,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
"will reset adapter scsi%d.\n",
instance->host->host_no);
+ *convert = 1;
retval = 1;
}
out:
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 95356a82ee99..fa61baf7c74d 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -478,6 +478,13 @@ typedef struct _MPI2_CONFIG_REPLY {
#define MPI26_MFGPAGE_DEVID_SAS3324_3 (0x00C2)
#define MPI26_MFGPAGE_DEVID_SAS3324_4 (0x00C3)
+#define MPI26_MFGPAGE_DEVID_SAS3516 (0x00AA)
+#define MPI26_MFGPAGE_DEVID_SAS3516_1 (0x00AB)
+#define MPI26_MFGPAGE_DEVID_SAS3416 (0x00AC)
+#define MPI26_MFGPAGE_DEVID_SAS3508 (0x00AD)
+#define MPI26_MFGPAGE_DEVID_SAS3508_1 (0x00AE)
+#define MPI26_MFGPAGE_DEVID_SAS3408 (0x00AF)
+
/*Manufacturing Page 0 */
typedef struct _MPI2_CONFIG_PAGE_MAN_0 {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index a1a5ceb42ce6..f00ef88a378a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -849,7 +849,7 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
ack_request->EventContext = mpi_reply->EventContext;
ack_request->VF_ID = 0; /* TODO */
ack_request->VP_ID = 0;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
out:
@@ -1078,7 +1078,7 @@ _base_interrupt(int irq, void *bus_id)
* new reply host index value in ReplyPostIndex Field and msix_index
* value in MSIxIndex field.
*/
- if (ioc->msix96_vector)
+ if (ioc->combined_reply_queue)
writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
MPI2_RPHI_MSIX_INDEX_SHIFT),
ioc->replyPostRegisterIndex[msix_index/8]);
@@ -1959,7 +1959,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
{
struct msix_entry *entries, *a;
int r;
- int i;
+ int i, local_max_msix_vectors;
u8 try_msix = 0;
if (msix_disable == -1 || msix_disable == 0)
@@ -1979,13 +1979,15 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc->cpu_count, max_msix_vectors);
if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
- max_msix_vectors = 8;
+ local_max_msix_vectors = 8;
+ else
+ local_max_msix_vectors = max_msix_vectors;
- if (max_msix_vectors > 0) {
- ioc->reply_queue_count = min_t(int, max_msix_vectors,
+ if (local_max_msix_vectors > 0) {
+ ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
ioc->reply_queue_count);
ioc->msix_vector_count = ioc->reply_queue_count;
- } else if (max_msix_vectors == 0)
+ } else if (local_max_msix_vectors == 0)
goto try_ioapic;
if (ioc->msix_vector_count < ioc->cpu_count)
@@ -2050,7 +2052,7 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
_base_free_irq(ioc);
_base_disable_msix(ioc);
- if (ioc->msix96_vector) {
+ if (ioc->combined_reply_queue) {
kfree(ioc->replyPostRegisterIndex);
ioc->replyPostRegisterIndex = NULL;
}
@@ -2160,7 +2162,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
/* Use the Combined reply queue feature only for SAS3 C0 & higher
* revision HBAs and also only when reply queue count is greater than 8
*/
- if (ioc->msix96_vector && ioc->reply_queue_count > 8) {
+ if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) {
/* Determine the Supplemental Reply Post Host Index Registers
* Addresse. Supplemental Reply Post Host Index Registers
* starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
@@ -2168,7 +2170,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
* MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
*/
ioc->replyPostRegisterIndex = kcalloc(
- MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT,
+ ioc->combined_reply_index_count,
sizeof(resource_size_t *), GFP_KERNEL);
if (!ioc->replyPostRegisterIndex) {
dfailprintk(ioc, printk(MPT3SAS_FMT
@@ -2178,14 +2180,14 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
goto out_fail;
}
- for (i = 0; i < MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT; i++) {
+ for (i = 0; i < ioc->combined_reply_index_count; i++) {
ioc->replyPostRegisterIndex[i] = (resource_size_t *)
((u8 *)&ioc->chip->Doorbell +
MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
(i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
}
} else
- ioc->msix96_vector = 0;
+ ioc->combined_reply_queue = 0;
if (ioc->is_warpdrive) {
ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
@@ -2462,15 +2464,15 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
#endif
/**
- * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
+ * _base_put_smid_scsi_io - send SCSI_IO request to firmware
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
*
* Return nothing.
*/
-void
-mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
+static void
+_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
{
Mpi2RequestDescriptorUnion_t descriptor;
u64 *request = (u64 *)&descriptor;
@@ -2486,15 +2488,15 @@ mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
}
/**
- * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
+ * _base_put_smid_fast_path - send fast path request to firmware
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
*
* Return nothing.
*/
-void
-mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+static void
+_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 handle)
{
Mpi2RequestDescriptorUnion_t descriptor;
@@ -2511,14 +2513,14 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
}
/**
- * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
+ * _base_put_smid_hi_priority - send Task Management request to firmware
* @ioc: per adapter object
* @smid: system request message index
* @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
* Return nothing.
*/
-void
-mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+static void
+_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 msix_task)
{
Mpi2RequestDescriptorUnion_t descriptor;
@@ -2535,14 +2537,14 @@ mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
}
/**
- * mpt3sas_base_put_smid_default - Default, primarily used for config pages
+ * _base_put_smid_default - Default, primarily used for config pages
* @ioc: per adapter object
* @smid: system request message index
*
* Return nothing.
*/
-void
-mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+static void
+_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
Mpi2RequestDescriptorUnion_t descriptor;
u64 *request = (u64 *)&descriptor;
@@ -2557,6 +2559,95 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
}
/**
+* _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
+* Atomic Request Descriptor
+* @ioc: per adapter object
+* @smid: system request message index
+* @handle: device handle, unused in this function, for function type match
+*
+* Return nothing.
+*/
+static void
+_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
+ descriptor.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_fast_path_atomic - send fast path request to firmware
+ * using Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle, unused in this function, for function type match
+ * Return nothing
+ */
+static void
+_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
+ descriptor.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_hi_priority_atomic - send Task Management request to
+ * firmware using Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 msix_task)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ descriptor.MSIxIndex = msix_task;
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_default - Default, primarily used for config pages
+ * use Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ descriptor.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
* _base_display_OEMs_branding - Display branding string
* @ioc: per adapter object
*
@@ -4070,7 +4161,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
ioc->ioc_link_reset_in_progress = 1;
init_completion(&ioc->base_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
@@ -4170,7 +4261,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
ioc->base_cmds.smid = smid;
memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
init_completion(&ioc->base_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -4355,6 +4446,8 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
if ((facts->IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
ioc->rdpq_array_capable = 1;
+ if (facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
+ ioc->atomic_desc_capable = 1;
facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
facts->IOCRequestFrameSize =
le16_to_cpu(mpi_reply.IOCRequestFrameSize);
@@ -4582,7 +4675,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
init_completion(&ioc->port_enable_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -4645,7 +4738,7 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
return 0;
}
@@ -4764,7 +4857,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
mpi_request->EventMasks[i] =
cpu_to_le32(ioc->event_masks[i]);
init_completion(&ioc->base_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -5138,7 +5231,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
/* initialize reply post host index */
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
- if (ioc->msix96_vector)
+ if (ioc->combined_reply_queue)
writel((reply_q->msix_index & 7)<<
MPI2_RPHI_MSIX_INDEX_SHIFT,
ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
@@ -5280,9 +5373,23 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->build_sg = &_base_build_sg_ieee;
ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
+
break;
}
+ if (ioc->atomic_desc_capable) {
+ ioc->put_smid_default = &_base_put_smid_default_atomic;
+ ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
+ ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic;
+ ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic;
+ } else {
+ ioc->put_smid_default = &_base_put_smid_default;
+ ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
+ ioc->put_smid_fast_path = &_base_put_smid_fast_path;
+ ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
+ }
+
+
/*
* These function pointers for other requests that don't
* the require IEEE scatter gather elements.
@@ -5332,6 +5439,21 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
goto out_free_resources;
}
+ /* allocate memory for pending OS device add list */
+ ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
+ if (ioc->facts.MaxDevHandle % 8)
+ ioc->pend_os_device_add_sz++;
+ ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
+ GFP_KERNEL);
+ if (!ioc->pend_os_device_add)
+ goto out_free_resources;
+
+ ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
+ ioc->device_remove_in_progress =
+ kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
+ if (!ioc->device_remove_in_progress)
+ goto out_free_resources;
+
ioc->fwfault_debug = mpt3sas_fwfault_debug;
/* base internal command bits */
@@ -5414,6 +5536,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
kfree(ioc->reply_post_host_index);
kfree(ioc->pd_handles);
kfree(ioc->blocking_handles);
+ kfree(ioc->device_remove_in_progress);
+ kfree(ioc->pend_os_device_add);
kfree(ioc->tm_cmds.reply);
kfree(ioc->transport_cmds.reply);
kfree(ioc->scsih_cmds.reply);
@@ -5455,6 +5579,8 @@ mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
kfree(ioc->reply_post_host_index);
kfree(ioc->pd_handles);
kfree(ioc->blocking_handles);
+ kfree(ioc->device_remove_in_progress);
+ kfree(ioc->pend_os_device_add);
kfree(ioc->pfacts);
kfree(ioc->ctl_cmds.reply);
kfree(ioc->ctl_cmds.sense);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 3e71bc1b4a80..8de0eda8cd00 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -73,9 +73,9 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "13.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 13
-#define MPT3SAS_MINOR_VERSION 100
+#define MPT3SAS_DRIVER_VERSION "14.101.00.00"
+#define MPT3SAS_MAJOR_VERSION 14
+#define MPT3SAS_MINOR_VERSION 101
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -300,8 +300,9 @@
* There are twelve Supplemental Reply Post Host Index Registers
* and each register is at offset 0x10 bytes from the previous one.
*/
-#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT 12
-#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
/* OEM Identifiers */
#define MFG10_OEM_ID_INVALID (0x00000000)
@@ -375,7 +376,6 @@ struct MPT3SAS_TARGET {
* per device private data
*/
#define MPT_DEVICE_FLAGS_INIT 0x01
-#define MPT_DEVICE_TLR_ON 0x02
#define MFG_PAGE10_HIDE_SSDS_MASK (0x00000003)
#define MFG_PAGE10_HIDE_ALL_DISKS (0x00)
@@ -736,7 +736,10 @@ typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge,
typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc,
void *paddr);
-
+/* To support atomic and non atomic descriptors*/
+typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 funcdep);
+typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid);
/* IOC Facts and Port Facts converted from little endian to cpu */
union mpi3_version_union {
@@ -1079,6 +1082,9 @@ struct MPT3SAS_ADAPTER {
void *pd_handles;
u16 pd_handles_sz;
+ void *pend_os_device_add;
+ u16 pend_os_device_add_sz;
+
/* config page */
u16 config_page_sz;
void *config_page;
@@ -1156,7 +1162,8 @@ struct MPT3SAS_ADAPTER {
u8 reply_queue_count;
struct list_head reply_queue_list;
- u8 msix96_vector;
+ u8 combined_reply_queue;
+ u8 combined_reply_index_count;
/* reply post register index */
resource_size_t **replyPostRegisterIndex;
@@ -1187,6 +1194,15 @@ struct MPT3SAS_ADAPTER {
struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi;
+ void *device_remove_in_progress;
+ u16 device_remove_in_progress_sz;
+ u8 is_gen35_ioc;
+ u8 atomic_desc_capable;
+ PUT_SMID_IO_FP_HIP put_smid_scsi_io;
+ PUT_SMID_IO_FP_HIP put_smid_fast_path;
+ PUT_SMID_IO_FP_HIP put_smid_hi_priority;
+ PUT_SMID_DEFAULT put_smid_default;
+
};
typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1232,13 +1248,6 @@ u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid);
-void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid,
- u16 handle);
-void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
- u16 handle);
-void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc,
- u16 smid, u16 msix_task);
-void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
void mpt3sas_base_initialize_callback_handler(void);
u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func);
void mpt3sas_base_release_callback_handler(u8 cb_idx);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index cebfd734fd76..dd6270125614 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -384,7 +384,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
_config_display_some_debug(ioc, smid, "config_request", NULL);
init_completion(&ioc->config_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 26cdc127ac89..050bd788ad02 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -654,6 +654,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
size_t data_in_sz = 0;
long ret;
u16 wait_state_count;
+ u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
issue_reset = 0;
@@ -738,10 +739,13 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
data_in_sz = karg.data_in_size;
if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
- mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
- if (!le16_to_cpu(mpi_request->FunctionDependent1) ||
- le16_to_cpu(mpi_request->FunctionDependent1) >
- ioc->facts.MaxDevHandle) {
+ mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+ mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT ||
+ mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH) {
+
+ device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
+ if (!device_handle || (device_handle >
+ ioc->facts.MaxDevHandle)) {
ret = -EINVAL;
mpt3sas_base_free_smid(ioc, smid);
goto out;
@@ -797,14 +801,20 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
scsiio_request->SenseBufferLowAddress =
mpt3sas_base_get_sense_buffer_dma(ioc, smid);
memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
+ if (test_bit(device_handle, ioc->device_remove_in_progress)) {
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ ioc->name, device_handle));
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
-
if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
- mpt3sas_base_put_smid_scsi_io(ioc, smid,
- le16_to_cpu(mpi_request->FunctionDependent1));
+ ioc->put_smid_scsi_io(ioc, smid, device_handle);
else
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_SCSI_TASK_MGMT:
@@ -827,11 +837,19 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
}
+ if (test_bit(device_handle, ioc->device_remove_in_progress)) {
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ ioc->name, device_handle));
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
tm_request->DevHandle));
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
- mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+ ioc->put_smid_hi_priority(ioc, smid, 0);
break;
}
case MPI2_FUNCTION_SMP_PASSTHROUGH:
@@ -862,16 +880,30 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
data_in_sz);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_SATA_PASSTHROUGH:
+ {
+ if (test_bit(device_handle, ioc->device_remove_in_progress)) {
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ ioc->name, device_handle));
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
+ data_in_sz);
+ ioc->put_smid_default(ioc, smid);
+ break;
+ }
case MPI2_FUNCTION_FW_DOWNLOAD:
case MPI2_FUNCTION_FW_UPLOAD:
{
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
data_in_sz);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_TOOLBOX:
@@ -886,7 +918,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
}
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
@@ -905,7 +937,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
default:
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
@@ -1064,7 +1096,10 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
break;
case MPI25_VERSION:
case MPI26_VERSION:
- karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
+ if (ioc->is_gen35_ioc)
+ karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35;
+ else
+ karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
break;
}
@@ -1491,7 +1526,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
cpu_to_le32(ioc->product_specific[buffer_type][i]);
init_completion(&ioc->ctl_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
@@ -1838,7 +1873,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
mpi_request->VP_ID = 0;
init_completion(&ioc->ctl_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
@@ -2105,7 +2140,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
mpi_request->VP_ID = 0;
init_completion(&ioc->ctl_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 89408356d252..f3e17a8c1b07 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -143,6 +143,7 @@ struct mpt3_ioctl_pci_info {
#define MPT2_IOCTL_INTERFACE_SAS2 (0x04)
#define MPT2_IOCTL_INTERFACE_SAS2_SSS6200 (0x05)
#define MPT3_IOCTL_INTERFACE_SAS3 (0x06)
+#define MPT3_IOCTL_INTERFACE_SAS35 (0x07)
#define MPT2_IOCTL_VERSION_LENGTH (32)
/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 1c4744e78173..5c8f75247d73 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -423,7 +423,7 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
return 0;
}
- /* we hit this becuase the given parent handle doesn't exist */
+ /* we hit this because the given parent handle doesn't exist */
if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
return -ENXIO;
@@ -788,6 +788,11 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
list_add_tail(&sas_device->list, &ioc->sas_device_list);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (ioc->hide_drives) {
+ clear_bit(sas_device->handle, ioc->pend_os_device_add);
+ return;
+ }
+
if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
sas_device->sas_address_parent)) {
_scsih_sas_device_remove(ioc, sas_device);
@@ -803,7 +808,8 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
sas_device->sas_address_parent);
_scsih_sas_device_remove(ioc, sas_device);
}
- }
+ } else
+ clear_bit(sas_device->handle, ioc->pend_os_device_add);
}
/**
@@ -1517,7 +1523,7 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
/*
* raid transport support -
* Enabled for SLES11 and newer, in older kernels the driver will panic when
- * unloading the driver followed by a load - I beleive that the subroutine
+ * unloading the driver followed by a load - I believe that the subroutine
* raid_class_release() is not cleaning up properly.
*/
@@ -2279,7 +2285,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
msix_task = scsi_lookup->msix_io;
else
msix_task = 0;
- mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
+ ioc->put_smid_hi_priority(ioc, smid, msix_task);
wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -2837,7 +2843,7 @@ _scsih_internal_device_block(struct scsi_device *sdev,
if (r == -EINVAL)
sdev_printk(KERN_WARNING, sdev,
"device_block failed with return(%d) for handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle, r);
+ r, sas_device_priv_data->sas_target->handle);
}
/**
@@ -2867,20 +2873,20 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
sdev_printk(KERN_WARNING, sdev,
"device_unblock failed with return(%d) for handle(0x%04x) "
"performing a block followed by an unblock\n",
- sas_device_priv_data->sas_target->handle, r);
+ r, sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 1;
r = scsi_internal_device_block(sdev);
if (r)
sdev_printk(KERN_WARNING, sdev, "retried device_block "
"failed with return(%d) for handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle, r);
+ r, sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 0;
r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
if (r)
sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
" failed with return(%d) for handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle, r);
+ r, sas_device_priv_data->sas_target->handle);
}
}
@@ -2942,7 +2948,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
* @ioc: per adapter object
* @handle: device handle
*
- * During device pull we need to appropiately set the sdev state.
+ * During device pull we need to appropriately set the sdev state.
*/
static void
_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
@@ -2971,7 +2977,7 @@ _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @handle: device handle
*
- * During device pull we need to appropiately set the sdev state.
+ * During device pull we need to appropriately set the sdev state.
*/
static void
_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -3138,6 +3144,8 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if (test_bit(handle, ioc->pd_handles))
return;
+ clear_bit(handle, ioc->pend_os_device_add);
+
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
if (sas_device && sas_device->starget &&
@@ -3192,7 +3200,8 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+ set_bit(handle, ioc->device_remove_in_progress);
+ ioc->put_smid_hi_priority(ioc, smid, 0);
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
out:
@@ -3291,7 +3300,7 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
mpi_request->DevHandle = mpi_request_tm->DevHandle;
- mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
+ ioc->put_smid_default(ioc, smid_sas_ctrl);
return _scsih_check_for_pending_tm(ioc, smid);
}
@@ -3326,6 +3335,11 @@ _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
le16_to_cpu(mpi_reply->IOCStatus),
le32_to_cpu(mpi_reply->IOCLogInfo)));
+ if (le16_to_cpu(mpi_reply->IOCStatus) ==
+ MPI2_IOCSTATUS_SUCCESS) {
+ clear_bit(le16_to_cpu(mpi_reply->DevHandle),
+ ioc->device_remove_in_progress);
+ }
} else {
pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
@@ -3381,7 +3395,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+ ioc->put_smid_hi_priority(ioc, smid, 0);
}
/**
@@ -3473,7 +3487,7 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
ack_request->EventContext = event_context;
ack_request->VF_ID = 0; /* TODO */
ack_request->VP_ID = 0;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
}
/**
@@ -3530,7 +3544,7 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
mpi_request->DevHandle = handle;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
}
/**
@@ -3930,7 +3944,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
* _scsih_setup_eedp - setup MPI request for EEDP transfer
* @ioc: per adapter object
* @scmd: pointer to scsi command object
- * @mpi_request: pointer to the SCSI_IO reqest message frame
+ * @mpi_request: pointer to the SCSI_IO request message frame
*
* Supporting protection 1 and 3.
*
@@ -3983,6 +3997,9 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
mpi_request_3v->EEDPBlockSize =
cpu_to_le16(scmd->device->sector_size);
+
+ if (ioc->is_gen35_ioc)
+ eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
}
@@ -4084,7 +4101,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
- /* device busy with task managment */
+ /* device busy with task management */
} else if (sas_target_priv_data->tm_busy ||
sas_device_priv_data->block)
return SCSI_MLQUEUE_DEVICE_BUSY;
@@ -4154,12 +4171,12 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
MPI25_SCSIIO_IOFLAGS_FAST_PATH);
- mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
+ ioc->put_smid_fast_path(ioc, smid, handle);
} else
- mpt3sas_base_put_smid_scsi_io(ioc, smid,
+ ioc->put_smid_scsi_io(ioc, smid,
le16_to_cpu(mpi_request->DevHandle));
} else
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
return 0;
out:
@@ -4658,7 +4675,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
mpi_request->DevHandle =
cpu_to_le16(sas_device_priv_data->sas_target->handle);
- mpt3sas_base_put_smid_scsi_io(ioc, smid,
+ ioc->put_smid_scsi_io(ioc, smid,
sas_device_priv_data->sas_target->handle);
return 0;
}
@@ -5383,10 +5400,10 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
sas_device->handle, handle);
sas_target_priv_data->handle = handle;
sas_device->handle = handle;
- if (sas_device_pg0.Flags &
+ if (le16_to_cpu(sas_device_pg0.Flags) &
MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
sas_device->enclosure_level =
- le16_to_cpu(sas_device_pg0.EnclosureLevel);
+ sas_device_pg0.EnclosureLevel;
memcpy(sas_device->connector_name,
sas_device_pg0.ConnectorName, 4);
sas_device->connector_name[4] = '\0';
@@ -5465,6 +5482,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
if (!(_scsih_is_end_device(device_info)))
return -1;
+ set_bit(handle, ioc->pend_os_device_add);
sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
/* check if device is present */
@@ -5483,6 +5501,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
sas_device = mpt3sas_get_sdev_by_addr(ioc,
sas_address);
if (sas_device) {
+ clear_bit(handle, ioc->pend_os_device_add);
sas_device_put(sas_device);
return -1;
}
@@ -5513,9 +5532,10 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
- if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+ if (le16_to_cpu(sas_device_pg0.Flags)
+ & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
sas_device->enclosure_level =
- le16_to_cpu(sas_device_pg0.EnclosureLevel);
+ sas_device_pg0.EnclosureLevel;
memcpy(sas_device->connector_name,
sas_device_pg0.ConnectorName, 4);
sas_device->connector_name[4] = '\0';
@@ -5806,6 +5826,9 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
_scsih_check_device(ioc, sas_address, handle,
phy_number, link_rate);
+ if (!test_bit(handle, ioc->pend_os_device_add))
+ break;
+
case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
@@ -6267,7 +6290,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
handle, phys_disk_num));
init_completion(&ioc->scsih_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -6320,7 +6343,7 @@ _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
{
sdev->no_uld_attach = no_uld_attach ? 1 : 0;
sdev_printk(KERN_INFO, sdev, "%s raid component\n",
- sdev->no_uld_attach ? "hidding" : "exposing");
+ sdev->no_uld_attach ? "hiding" : "exposing");
WARN_ON(scsi_device_reprobe(sdev));
}
@@ -7050,7 +7073,7 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
if (sas_device_pg0->Flags &
MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
sas_device->enclosure_level =
- le16_to_cpu(sas_device_pg0->EnclosureLevel);
+ sas_device_pg0->EnclosureLevel;
memcpy(&sas_device->connector_name[0],
&sas_device_pg0->ConnectorName[0], 4);
} else {
@@ -7112,6 +7135,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
sas_device_pg0.SASAddress =
le64_to_cpu(sas_device_pg0.SASAddress);
sas_device_pg0.Slot = le16_to_cpu(sas_device_pg0.Slot);
+ sas_device_pg0.Flags = le16_to_cpu(sas_device_pg0.Flags);
_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
}
@@ -7723,6 +7747,9 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
complete(&ioc->tm_cmds.done);
}
+ memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
+ memset(ioc->device_remove_in_progress, 0,
+ ioc->device_remove_in_progress_sz);
_scsih_fw_event_cleanup_queue(ioc);
_scsih_flush_running_cmds(ioc);
break;
@@ -8113,7 +8140,7 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->hide_ir_msg)
pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
init_completion(&ioc->scsih_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -8654,6 +8681,12 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
case MPI26_MFGPAGE_DEVID_SAS3324_2:
case MPI26_MFGPAGE_DEVID_SAS3324_3:
case MPI26_MFGPAGE_DEVID_SAS3324_4:
+ case MPI26_MFGPAGE_DEVID_SAS3508:
+ case MPI26_MFGPAGE_DEVID_SAS3508_1:
+ case MPI26_MFGPAGE_DEVID_SAS3408:
+ case MPI26_MFGPAGE_DEVID_SAS3516:
+ case MPI26_MFGPAGE_DEVID_SAS3516_1:
+ case MPI26_MFGPAGE_DEVID_SAS3416:
return MPI26_VERSION;
}
return 0;
@@ -8722,10 +8755,29 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->hba_mpi_version_belonged = hba_mpi_version;
ioc->id = mpt3_ids++;
sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
+ switch (pdev->device) {
+ case MPI26_MFGPAGE_DEVID_SAS3508:
+ case MPI26_MFGPAGE_DEVID_SAS3508_1:
+ case MPI26_MFGPAGE_DEVID_SAS3408:
+ case MPI26_MFGPAGE_DEVID_SAS3516:
+ case MPI26_MFGPAGE_DEVID_SAS3516_1:
+ case MPI26_MFGPAGE_DEVID_SAS3416:
+ ioc->is_gen35_ioc = 1;
+ break;
+ default:
+ ioc->is_gen35_ioc = 0;
+ }
if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
- (ioc->hba_mpi_version_belonged == MPI26_VERSION))
- ioc->msix96_vector = 1;
+ (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
+ ioc->combined_reply_queue = 1;
+ if (ioc->is_gen35_ioc)
+ ioc->combined_reply_index_count =
+ MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
+ else
+ ioc->combined_reply_index_count =
+ MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
+ }
break;
default:
return -ENODEV;
@@ -9128,6 +9180,19 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID },
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
PCI_ANY_ID, PCI_ANY_ID },
+ /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
+ PCI_ANY_ID, PCI_ANY_ID },
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
@@ -9168,7 +9233,7 @@ scsih_init(void)
/* queuecommand callback hander */
scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
- /* task managment callback handler */
+ /* task management callback handler */
tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
/* base internal commands callback handler */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index b74faf1a69b2..7f1d5785bc30 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -392,7 +392,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
"report_manufacture - send to sas_addr(0x%016llx)\n",
ioc->name, (unsigned long long)sas_address));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1198,7 +1198,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
ioc->name, (unsigned long long)phy->identify.sas_address,
phy->number));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1514,7 +1514,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
ioc->name, (unsigned long long)phy->identify.sas_address,
phy->number, phy_operation));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -2032,7 +2032,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
"%s - sending smp request\n", ioc->name, __func__));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 4c57d9abce7b..7de5d8d75480 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -668,7 +668,7 @@ static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
{
u32 tmp;
tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
- if (tmp && 1 << (slot_idx % 32)) {
+ if (tmp & 1 << (slot_idx % 32)) {
mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx);
mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
1 << (slot_idx % 32));
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 845affa112f7..337982cf3d63 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3787,11 +3787,11 @@ static long pmcraid_ioctl_passthrough(
direction);
if (rc) {
pmcraid_err("couldn't build passthrough ioadls\n");
- goto out_free_buffer;
+ goto out_free_cmd;
}
} else if (request_size < 0) {
rc = -EINVAL;
- goto out_free_buffer;
+ goto out_free_cmd;
}
/* If data is being written into the device, copy the data from user
@@ -3908,6 +3908,8 @@ out_handle_response:
out_free_sglist:
pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
+
+out_free_cmd:
pmcraid_return_cmd(cmd);
out_free_buffer:
@@ -6018,8 +6020,10 @@ static int __init pmcraid_init(void)
error = pmcraid_netlink_init();
- if (error)
+ if (error) {
+ class_destroy(pmcraid_class);
goto out_unreg_chrdev;
+ }
error = pci_register_driver(&pmcraid_driver);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 643014f82f7d..1bf8061ff803 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1,4 +1,4 @@
-/*
+ /*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
*
@@ -9,6 +9,7 @@
#include <linux/kthread.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
+#include <linux/bsg-lib.h>
/* BSG support for ELS/CT pass through */
void
@@ -16,10 +17,12 @@ qla2x00_bsg_job_done(void *data, void *ptr, int res)
{
srb_t *sp = (srb_t *)ptr;
struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
- bsg_job->reply->result = res;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = res;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
sp->free(vha, sp);
}
@@ -28,13 +31,15 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
{
srb_t *sp = (srb_t *)ptr;
struct scsi_qla_host *vha = sp->fcport->vha;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+
struct qla_hw_data *ha = vha->hw;
struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
if (sp->type == SRB_FXIOCB_BCMD) {
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
- &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
dma_unmap_sg(&ha->pdev->dev,
@@ -116,9 +121,11 @@ qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
}
static int
-qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
+qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int ret = 0;
@@ -131,7 +138,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
}
/* Get the sub command */
- oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
/* Only set config is allowed if config memory is not allocated */
if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
@@ -145,10 +152,10 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
ha->fcp_prio_cfg->attributes &=
~FCP_PRIO_ATTR_ENABLE;
qla24xx_update_all_fcp_prio(vha);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
} else {
ret = -EINVAL;
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
goto exit_fcp_prio_cfg;
}
break;
@@ -160,10 +167,10 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
ha->fcp_prio_cfg->attributes |=
FCP_PRIO_ATTR_ENABLE;
qla24xx_update_all_fcp_prio(vha);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
} else {
ret = -EINVAL;
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
goto exit_fcp_prio_cfg;
}
}
@@ -173,12 +180,12 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
len = bsg_job->reply_payload.payload_len;
if (!len || len > FCP_PRIO_CFG_SIZE) {
ret = -EINVAL;
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
goto exit_fcp_prio_cfg;
}
- bsg_job->reply->result = DID_OK;
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->result = DID_OK;
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
@@ -189,7 +196,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
case QLFC_FCP_PRIO_SET_CONFIG:
len = bsg_job->request_payload.payload_len;
if (!len || len > FCP_PRIO_CFG_SIZE) {
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
ret = -EINVAL;
goto exit_fcp_prio_cfg;
}
@@ -200,7 +207,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7050,
"Unable to allocate memory for fcp prio "
"config data (%x).\n", FCP_PRIO_CFG_SIZE);
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
ret = -ENOMEM;
goto exit_fcp_prio_cfg;
}
@@ -215,7 +222,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
if (!qla24xx_fcp_prio_cfg_valid(vha,
(struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
ret = -EINVAL;
/* If buffer was invalidatic int
* fcp_prio_cfg is of no use
@@ -229,7 +236,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
ha->flags.fcp_prio_enabled = 1;
qla24xx_update_all_fcp_prio(vha);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
break;
default:
ret = -EINVAL;
@@ -237,13 +244,15 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
}
exit_fcp_prio_cfg:
if (!ret)
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return ret;
}
static int
-qla2x00_process_els(struct fc_bsg_job *bsg_job)
+qla2x00_process_els(struct bsg_job *bsg_job)
{
+ struct fc_bsg_request *bsg_request = bsg_job->request;
struct fc_rport *rport;
fc_port_t *fcport = NULL;
struct Scsi_Host *host;
@@ -255,15 +264,15 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
int rval = (DRIVER_ERROR << 16);
uint16_t nextlid = 0;
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
- rport = bsg_job->rport;
+ if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
+ rport = fc_bsg_to_rport(bsg_job);
fcport = *(fc_port_t **) rport->dd_data;
host = rport_to_shost(rport);
vha = shost_priv(host);
ha = vha->hw;
type = "FC_BSG_RPT_ELS";
} else {
- host = bsg_job->shost;
+ host = fc_bsg_to_shost(bsg_job);
vha = shost_priv(host);
ha = vha->hw;
type = "FC_BSG_HST_ELS_NOLOGIN";
@@ -296,7 +305,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
}
/* ELS request for rport */
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
+ if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
/* make sure the rport is logged in,
* if not perform fabric login
*/
@@ -322,11 +331,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
fcport->d_id.b.al_pa =
- bsg_job->request->rqst_data.h_els.port_id[0];
+ bsg_request->rqst_data.h_els.port_id[0];
fcport->d_id.b.area =
- bsg_job->request->rqst_data.h_els.port_id[1];
+ bsg_request->rqst_data.h_els.port_id[1];
fcport->d_id.b.domain =
- bsg_job->request->rqst_data.h_els.port_id[2];
+ bsg_request->rqst_data.h_els.port_id[2];
fcport->loop_id =
(fcport->d_id.b.al_pa == 0xFD) ?
NPH_FABRIC_CONTROLLER : NPH_F_PORT;
@@ -366,11 +375,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
}
sp->type =
- (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
- SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
+ (bsg_request->msgcode == FC_BSG_RPT_ELS ?
+ SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
sp->name =
- (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
- "bsg_els_rpt" : "bsg_els_hst");
+ (bsg_request->msgcode == FC_BSG_RPT_ELS ?
+ "bsg_els_rpt" : "bsg_els_hst");
sp->u.bsg_job = bsg_job;
sp->free = qla2x00_bsg_sp_free;
sp->done = qla2x00_bsg_job_done;
@@ -378,7 +387,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
ql_dbg(ql_dbg_user, vha, 0x700a,
"bsg rqst type: %s els type: %x - loop-id=%x "
"portid=%-2x%02x%02x.\n", type,
- bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
+ bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
rval = qla2x00_start_sp(sp);
@@ -399,7 +408,7 @@ done_unmap_sg:
goto done_free_fcport;
done_free_fcport:
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS)
+ if (bsg_request->msgcode == FC_BSG_RPT_ELS)
kfree(fcport);
done:
return rval;
@@ -420,10 +429,11 @@ qla24xx_calc_ct_iocbs(uint16_t dsds)
}
static int
-qla2x00_process_ct(struct fc_bsg_job *bsg_job)
+qla2x00_process_ct(struct bsg_job *bsg_job)
{
srb_t *sp;
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = (DRIVER_ERROR << 16);
@@ -469,7 +479,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
}
loop_id =
- (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
+ (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
>> 24;
switch (loop_id) {
case 0xFC:
@@ -500,9 +510,9 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
- fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
- fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
- fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
+ fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
+ fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
+ fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
fcport->loop_id = loop_id;
/* Alloc SRB structure */
@@ -524,7 +534,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
ql_dbg(ql_dbg_user, vha, 0x7016,
"bsg rqst type: %s else type: %x - "
"loop-id=%x portid=%02x%02x%02x.\n", type,
- (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
+ (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
@@ -697,9 +707,11 @@ done_set_internal:
}
static int
-qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
+qla2x00_process_loopback(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval;
@@ -780,9 +792,9 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
elreq.rcv_dma = rsp_data_dma;
elreq.transfer_size = req_data_len;
- elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
elreq.iteration_count =
- bsg_job->request->rqst_data.h_vendor.vendor_cmd[2];
+ bsg_request->rqst_data.h_vendor.vendor_cmd[2];
if (atomic_read(&vha->loop_state) == LOOP_READY &&
(ha->current_topology == ISP_CFG_F ||
@@ -896,12 +908,12 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
"Vendor request %s failed.\n", type);
rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = (DID_ERROR << 16);
+ bsg_reply->reply_payload_rcv_len = 0;
} else {
ql_dbg(ql_dbg_user, vha, 0x702d,
"Vendor request %s completed.\n", type);
- bsg_job->reply->result = (DID_OK << 16);
+ bsg_reply->result = (DID_OK << 16);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, rsp_data,
rsp_data_len);
@@ -930,14 +942,17 @@ done_unmap_req_sg:
bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!rval)
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla84xx_reset(struct fc_bsg_job *bsg_job)
+qla84xx_reset(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -948,7 +963,7 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
return -EINVAL;
}
- flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
@@ -960,17 +975,20 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
} else {
ql_dbg(ql_dbg_user, vha, 0x7031,
"Vendor request 84xx reset completed.\n");
- bsg_job->reply->result = DID_OK;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return rval;
}
static int
-qla84xx_updatefw(struct fc_bsg_job *bsg_job)
+qla84xx_updatefw(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct verify_chip_entry_84xx *mn = NULL;
@@ -1027,7 +1045,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
goto done_free_fw_buf;
}
- flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
memset(mn, 0, sizeof(struct access_chip_84xx));
@@ -1059,7 +1077,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
"Vendor request 84xx updatefw completed.\n");
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
}
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
@@ -1072,14 +1090,17 @@ done_unmap_sg:
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!rval)
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
+qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct access_chip_84xx *mn = NULL;
@@ -1107,7 +1128,7 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
memset(mn, 0, sizeof(struct access_chip_84xx));
mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
mn->entry_count = 1;
- ql84_mgmt = (void *)bsg_job->request + sizeof(struct fc_bsg_request);
+ ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
switch (ql84_mgmt->mgmt.cmd) {
case QLA84_MGMT_READ_MEM:
case QLA84_MGMT_GET_INFO:
@@ -1239,11 +1260,11 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
"Vendor request 84xx mgmt completed.\n");
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
(ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
@@ -1267,14 +1288,17 @@ exit_mgmt:
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
if (!rval)
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla24xx_iidma(struct fc_bsg_job *bsg_job)
+qla24xx_iidma(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
int rval = 0;
struct qla_port_param *port_param = NULL;
@@ -1288,7 +1312,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
return -EINVAL;
}
- port_param = (void *)bsg_job->request + sizeof(struct fc_bsg_request);
+ port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
ql_log(ql_log_warn, vha, 0x7048,
"Invalid destination type.\n");
@@ -1343,24 +1367,26 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
sizeof(struct qla_port_param);
- rsp_ptr = ((uint8_t *)bsg_job->reply) +
+ rsp_ptr = ((uint8_t *)bsg_reply) +
sizeof(struct fc_bsg_reply);
memcpy(rsp_ptr, port_param,
sizeof(struct qla_port_param));
}
- bsg_job->reply->result = DID_OK;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return rval;
}
static int
-qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
+qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
uint8_t is_update)
{
+ struct fc_bsg_request *bsg_request = bsg_job->request;
uint32_t start = 0;
int valid = 0;
struct qla_hw_data *ha = vha->hw;
@@ -1368,7 +1394,7 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
if (unlikely(pci_channel_offline(ha->pdev)))
return -EINVAL;
- start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
if (start > ha->optrom_size) {
ql_log(ql_log_warn, vha, 0x7055,
"start %d > optrom_size %d.\n", start, ha->optrom_size);
@@ -1427,9 +1453,10 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
}
static int
-qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
+qla2x00_read_optrom(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1451,20 +1478,22 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
ha->optrom_region_size);
- bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
- bsg_job->reply->result = DID_OK;
+ bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
+ bsg_reply->result = DID_OK;
vfree(ha->optrom_buffer);
ha->optrom_buffer = NULL;
ha->optrom_state = QLA_SWAITING;
mutex_unlock(&ha->optrom_mutex);
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
+qla2x00_update_optrom(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1486,19 +1515,21 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
vfree(ha->optrom_buffer);
ha->optrom_buffer = NULL;
ha->optrom_state = QLA_SWAITING;
mutex_unlock(&ha->optrom_mutex);
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
+qla2x00_update_fru_versions(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1509,7 +1540,7 @@ qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1525,30 +1556,32 @@ qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
image->field_address.device, image->field_address.offset,
sizeof(image->field_info), image->field_address.option);
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
image++;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
+qla2x00_read_fru_status(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1557,7 +1590,7 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1571,7 +1604,7 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
sr->status_reg = *sfp;
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
@@ -1579,24 +1612,26 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->reply_payload_rcv_len = sizeof(*sr);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
+qla2x00_write_fru_status(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1605,7 +1640,7 @@ qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1619,28 +1654,30 @@ qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
sizeof(sr->status_reg), sr->field_address.option);
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
+qla2x00_write_i2c(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1649,7 +1686,7 @@ qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1662,28 +1699,30 @@ qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
i2c->device, i2c->offset, i2c->length, i2c->option);
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
+qla2x00_read_i2c(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1692,7 +1731,7 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1704,7 +1743,7 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
i2c->device, i2c->offset, i2c->length, i2c->option);
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
@@ -1713,24 +1752,26 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
+qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
uint32_t rval = EXT_STATUS_OK;
@@ -1895,19 +1936,21 @@ done:
/* Return an error vendor specific response
* and complete the bsg request
*/
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->reply_payload_rcv_len = 0;
- bsg_job->reply->result = (DID_OK) << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = (DID_OK) << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
/* Always return success, vendor rsp carries correct status */
return 0;
}
static int
-qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
+qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = (DRIVER_ERROR << 16);
@@ -1919,7 +1962,7 @@ qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
/* Copy the IOCB specific information */
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
- &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
/* Dump the vendor information */
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
@@ -2027,9 +2070,10 @@ done:
}
static int
-qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
+qla26xx_serdes_op(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
int rval = 0;
struct qla_serdes_reg sr;
@@ -2042,13 +2086,13 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
switch (sr.cmd) {
case INT_SC_SERDES_WRITE_REG:
rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
break;
case INT_SC_SERDES_READ_REG:
rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
- bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
+ bsg_reply->reply_payload_rcv_len = sizeof(sr);
break;
default:
ql_dbg(ql_dbg_user, vha, 0x708c,
@@ -2057,19 +2101,21 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
break;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
rval ? EXT_STATUS_MAILBOX : 0;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla8044_serdes_op(struct fc_bsg_job *bsg_job)
+qla8044_serdes_op(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
int rval = 0;
struct qla_serdes_reg_ex sr;
@@ -2082,13 +2128,13 @@ qla8044_serdes_op(struct fc_bsg_job *bsg_job)
switch (sr.cmd) {
case INT_SC_SERDES_WRITE_REG:
rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
break;
case INT_SC_SERDES_READ_REG:
rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
- bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
+ bsg_reply->reply_payload_rcv_len = sizeof(sr);
break;
default:
ql_dbg(ql_dbg_user, vha, 0x70cf,
@@ -2097,19 +2143,21 @@ qla8044_serdes_op(struct fc_bsg_job *bsg_job)
break;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
rval ? EXT_STATUS_MAILBOX : 0;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla27xx_get_flash_upd_cap(struct fc_bsg_job *bsg_job)
+qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct qla_flash_update_caps cap;
@@ -2125,21 +2173,23 @@ qla27xx_get_flash_upd_cap(struct fc_bsg_job *bsg_job)
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
- bsg_job->reply->reply_payload_rcv_len = sizeof(cap);
+ bsg_reply->reply_payload_rcv_len = sizeof(cap);
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla27xx_set_flash_upd_cap(struct fc_bsg_job *bsg_job)
+qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
uint64_t online_fw_attr = 0;
@@ -2158,32 +2208,34 @@ qla27xx_set_flash_upd_cap(struct fc_bsg_job *bsg_job)
(uint64_t)ha->fw_attributes;
if (online_fw_attr != cap.capabilities) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_INVALID_PARAM;
return -EINVAL;
}
if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_INVALID_PARAM;
return -EINVAL;
}
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla27xx_get_bbcr_data(struct fc_bsg_job *bsg_job)
+qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct qla_bbcr_data bbcr;
@@ -2227,27 +2279,30 @@ qla27xx_get_bbcr_data(struct fc_bsg_job *bsg_job)
done:
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
- bsg_job->reply->reply_payload_rcv_len = sizeof(bbcr);
+ bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
+qla2x00_get_priv_stats(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
struct link_statistics *stats = NULL;
dma_addr_t stats_dma;
int rval;
- uint32_t *cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd;
+ uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
if (test_bit(UNLOADING, &vha->dpc_flags))
@@ -2281,13 +2336,14 @@ qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
}
- bsg_job->reply->reply_payload_rcv_len = sizeof(*stats);
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_payload_rcv_len = sizeof(*stats);
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
- bsg_job->reply_len = sizeof(*bsg_job->reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_job->reply_len = sizeof(*bsg_reply);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
stats, stats_dma);
@@ -2296,9 +2352,10 @@ qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
}
static int
-qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job)
+qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
int rval;
struct qla_dport_diag *dd;
@@ -2323,13 +2380,14 @@ qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job)
bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
}
- bsg_job->reply->reply_payload_rcv_len = sizeof(*dd);
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_payload_rcv_len = sizeof(*dd);
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
- bsg_job->reply_len = sizeof(*bsg_job->reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_job->reply_len = sizeof(*bsg_reply);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
kfree(dd);
@@ -2337,9 +2395,11 @@ qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job)
}
static int
-qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
+qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
{
- switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+
+ switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
case QL_VND_LOOPBACK:
return qla2x00_process_loopback(bsg_job);
@@ -2413,36 +2473,38 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
}
int
-qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
+qla24xx_bsg_request(struct bsg_job *bsg_job)
{
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
int ret = -EINVAL;
struct fc_rport *rport;
struct Scsi_Host *host;
scsi_qla_host_t *vha;
/* In case no data transferred. */
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
- rport = bsg_job->rport;
+ if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
+ rport = fc_bsg_to_rport(bsg_job);
host = rport_to_shost(rport);
vha = shost_priv(host);
} else {
- host = bsg_job->shost;
+ host = fc_bsg_to_shost(bsg_job);
vha = shost_priv(host);
}
if (qla2x00_reset_active(vha)) {
ql_dbg(ql_dbg_user, vha, 0x709f,
"BSG: ISP abort active/needed -- cmd=%d.\n",
- bsg_job->request->msgcode);
+ bsg_request->msgcode);
return -EBUSY;
}
ql_dbg(ql_dbg_user, vha, 0x7000,
- "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
+ "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
- switch (bsg_job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
case FC_BSG_HST_ELS_NOLOGIN:
ret = qla2x00_process_els(bsg_job);
@@ -2464,9 +2526,10 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
}
int
-qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
+qla24xx_bsg_timeout(struct bsg_job *bsg_job)
{
- scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
int cnt, que;
@@ -2494,13 +2557,13 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
"mbx abort_command "
"failed.\n");
bsg_job->req->errors =
- bsg_job->reply->result = -EIO;
+ bsg_reply->result = -EIO;
} else {
ql_dbg(ql_dbg_user, vha, 0x708a,
"mbx abort_command "
"success.\n");
bsg_job->req->errors =
- bsg_job->reply->result = 0;
+ bsg_reply->result = 0;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
goto done;
@@ -2510,7 +2573,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
- bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
+ bsg_job->req->errors = bsg_reply->result = -ENXIO;
return 0;
done:
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 73b12e41d992..5236e3f2a06a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -403,7 +403,7 @@ typedef struct srb {
int iocbs;
union {
struct srb_iocb iocb_cmd;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
struct srb_cmd scmd;
} u;
void (*done)(void *, void *, int);
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 6ca00813c71f..c51d9f3359e3 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -733,8 +733,8 @@ extern int qla82xx_read_temperature(scsi_qla_host_t *);
extern int qla8044_read_temperature(scsi_qla_host_t *);
/* BSG related functions */
-extern int qla24xx_bsg_request(struct fc_bsg_job *);
-extern int qla24xx_bsg_timeout(struct fc_bsg_job *);
+extern int qla24xx_bsg_request(struct bsg_job *);
+extern int qla24xx_bsg_timeout(struct bsg_job *);
extern int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t);
extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
dma_addr_t, size_t, uint32_t);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index b41265a75ed5..221ad8907893 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2197,7 +2197,8 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
static void
qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
els_iocb->entry_type = ELS_IOCB_TYPE;
els_iocb->entry_count = 1;
@@ -2212,8 +2213,8 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->opcode =
sp->type == SRB_ELS_CMD_RPT ?
- bsg_job->request->rqst_data.r_els.els_code :
- bsg_job->request->rqst_data.h_els.command_code;
+ bsg_request->rqst_data.r_els.els_code :
+ bsg_request->rqst_data.h_els.command_code;
els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
els_iocb->port_id[1] = sp->fcport->d_id.b.area;
els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
@@ -2250,7 +2251,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
int entry_count = 1;
@@ -2327,7 +2328,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
int entry_count = 1;
@@ -2833,7 +2834,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
struct scatterlist *sg;
int index;
int entry_count = 1;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
/*Update entry type to indicate bidir command */
*((uint32_t *)(&cmd_pkt->entry_type)) =
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 068c4e47fac9..19f18485a854 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1356,7 +1356,8 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
const char func[] = "CT_IOCB";
const char *type;
srb_t *sp;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
+ struct fc_bsg_reply *bsg_reply;
uint16_t comp_status;
int res;
@@ -1365,6 +1366,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
return;
bsg_job = sp->u.bsg_job;
+ bsg_reply = bsg_job->reply;
type = "ct pass-through";
@@ -1373,32 +1375,32 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
* fc payload to the caller
*/
- bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
if (comp_status != CS_COMPLETE) {
if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
ql_log(ql_log_warn, vha, 0x5048,
"CT pass-through-%s error "
"comp_status-status=0x%x total_byte = 0x%x.\n",
type, comp_status,
- bsg_job->reply->reply_payload_rcv_len);
+ bsg_reply->reply_payload_rcv_len);
} else {
ql_log(ql_log_warn, vha, 0x5049,
"CT pass-through-%s error "
"comp_status-status=0x%x.\n", type, comp_status);
res = DID_ERROR << 16;
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
}
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
(uint8_t *)pkt, sizeof(*pkt));
} else {
res = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
bsg_job->reply_len = 0;
}
@@ -1413,7 +1415,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
const char func[] = "ELS_CT_IOCB";
const char *type;
srb_t *sp;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
+ struct fc_bsg_reply *bsg_reply;
uint16_t comp_status;
uint32_t fw_status[3];
uint8_t* fw_sts_ptr;
@@ -1423,6 +1426,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
if (!sp)
return;
bsg_job = sp->u.bsg_job;
+ bsg_reply = bsg_job->reply;
type = NULL;
switch (sp->type) {
@@ -1452,13 +1456,13 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
* fc payload to the caller
*/
- bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
if (comp_status != CS_COMPLETE) {
if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
ql_dbg(ql_dbg_user, vha, 0x503f,
@@ -1480,7 +1484,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(((struct els_sts_entry_24xx *)
pkt)->error_subcode_2));
res = DID_ERROR << 16;
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
}
@@ -1489,7 +1493,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
}
else {
res = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
+ bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
bsg_job->reply_len = 0;
}
@@ -1904,7 +1908,9 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
uint16_t scsi_status;
uint16_t thread_id;
uint32_t rval = EXT_STATUS_OK;
- struct fc_bsg_job *bsg_job = NULL;
+ struct bsg_job *bsg_job = NULL;
+ struct fc_bsg_request *bsg_request;
+ struct fc_bsg_reply *bsg_reply;
sts_entry_t *sts;
struct sts_entry_24xx *sts24;
sts = (sts_entry_t *) pkt;
@@ -1919,11 +1925,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
}
sp = req->outstanding_cmds[index];
- if (sp) {
- /* Free outstanding command slot. */
- req->outstanding_cmds[index] = NULL;
- bsg_job = sp->u.bsg_job;
- } else {
+ if (!sp) {
ql_log(ql_log_warn, vha, 0x70b0,
"Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
req->id, index);
@@ -1932,6 +1934,12 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
return;
}
+ /* Free outstanding command slot. */
+ req->outstanding_cmds[index] = NULL;
+ bsg_job = sp->u.bsg_job;
+ bsg_request = bsg_job->request;
+ bsg_reply = bsg_job->reply;
+
if (IS_FWI2_CAPABLE(ha)) {
comp_status = le16_to_cpu(sts24->comp_status);
scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
@@ -1940,14 +1948,14 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
}
- thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
switch (comp_status) {
case CS_COMPLETE:
if (scsi_status == 0) {
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
vha->qla_stats.input_bytes +=
- bsg_job->reply->reply_payload_rcv_len;
+ bsg_reply->reply_payload_rcv_len;
vha->qla_stats.input_requests++;
rval = EXT_STATUS_OK;
}
@@ -2028,11 +2036,11 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
rval = EXT_STATUS_ERR;
break;
}
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
done:
/* Return the vendor specific reply to API */
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
/* Always return DID_OK, bsg will send the vendor specific response
* in this case only */
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 15dff7099955..02f1de18bc2b 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
+#include <linux/bsg-lib.h>
#include <scsi/scsi_tcq.h>
#include <linux/utsname.h>
@@ -2206,7 +2207,8 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
{
const char func[] = "IOSB_IOCB";
srb_t *sp;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
+ struct fc_bsg_reply *bsg_reply;
struct srb_iocb *iocb_job;
int res;
struct qla_mt_iocb_rsp_fx00 fstatus;
@@ -2226,6 +2228,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
pkt->dataword_r;
} else {
bsg_job = sp->u.bsg_job;
+ bsg_reply = bsg_job->reply;
memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
@@ -2257,8 +2260,8 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
sp->fcport->vha, 0x5074,
(uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
- res = bsg_job->reply->result = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len =
+ res = bsg_reply->result = DID_OK << 16;
+ bsg_reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
}
sp->done(vha, sp, res);
@@ -3252,7 +3255,8 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
{
struct srb_iocb *fxio = &sp->u.iocb_cmd;
struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
+ struct fc_bsg_request *bsg_request;
struct fxdisc_entry_fx00 fx_iocb;
uint8_t entry_cnt = 1;
@@ -3301,8 +3305,9 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
} else {
struct scatterlist *sg;
bsg_job = sp->u.bsg_job;
+ bsg_request = bsg_job->request;
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
- &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
fx_iocb.func_num = piocb_rqst->func_type;
fx_iocb.adapid = piocb_rqst->adapid;
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index a7cfc270bd08..aeebefb1e9f8 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -409,18 +409,9 @@ struct qla4_8xxx_legacy_intr_set {
/* MSI-X Support */
-#define QLA_MSIX_DEFAULT 0x00
-#define QLA_MSIX_RSP_Q 0x01
-
+#define QLA_MSIX_DEFAULT 0
+#define QLA_MSIX_RSP_Q 1
#define QLA_MSIX_ENTRIES 2
-#define QLA_MIDX_DEFAULT 0
-#define QLA_MIDX_RSP_Q 1
-
-struct ql4_msix_entry {
- int have_irq;
- uint16_t msix_vector;
- uint16_t msix_entry;
-};
/*
* ISP Operations
@@ -572,9 +563,6 @@ struct scsi_qla_host {
#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */
#define AF_HA_REMOVAL 12 /* 0x00001000 */
-#define AF_INTx_ENABLED 15 /* 0x00008000 */
-#define AF_MSI_ENABLED 16 /* 0x00010000 */
-#define AF_MSIX_ENABLED 17 /* 0x00020000 */
#define AF_MBOX_COMMAND_NOPOLL 18 /* 0x00040000 */
#define AF_FW_RECOVERY 19 /* 0x00080000 */
#define AF_EEH_BUSY 20 /* 0x00100000 */
@@ -762,8 +750,6 @@ struct scsi_qla_host {
struct isp_operations *isp_ops;
struct ql82xx_hw_data hw;
- struct ql4_msix_entry msix_entries[QLA_MSIX_ENTRIES];
-
uint32_t nx_dev_init_timeout;
uint32_t nx_reset_timeout;
void *fw_dump;
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 2559144f5475..bce96a58f14e 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -134,7 +134,6 @@ int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha);
void qla4_82xx_enable_intrs(struct scsi_qla_host *ha);
void qla4_82xx_disable_intrs(struct scsi_qla_host *ha);
int qla4_8xxx_enable_msix(struct scsi_qla_host *ha);
-void qla4_8xxx_disable_msix(struct scsi_qla_host *ha);
irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id);
irqreturn_t qla4_8xxx_default_intr_handler(int irq, void *dev_id);
irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id);
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 4f9c0f2be89d..d2cd33d8d67f 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1107,7 +1107,7 @@ static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha,
DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
if (is_qla8022(ha)) {
writel(0, &ha->qla4_82xx_reg->host_int);
- if (test_bit(AF_INTx_ENABLED, &ha->flags))
+ if (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled)
qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
0xfbff);
}
@@ -1564,19 +1564,18 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
try_msi:
/* Trying MSI */
- ret = pci_enable_msi(ha->pdev);
- if (!ret) {
+ ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret > 0) {
ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
0, DRIVER_NAME, ha);
if (!ret) {
DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
- set_bit(AF_MSI_ENABLED, &ha->flags);
goto irq_attached;
} else {
ql4_printk(KERN_WARNING, ha,
"MSI: Failed to reserve interrupt %d "
"already in use.\n", ha->pdev->irq);
- pci_disable_msi(ha->pdev);
+ pci_free_irq_vectors(ha->pdev);
}
}
@@ -1592,7 +1591,6 @@ try_intx:
IRQF_SHARED, DRIVER_NAME, ha);
if (!ret) {
DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
- set_bit(AF_INTx_ENABLED, &ha->flags);
goto irq_attached;
} else {
@@ -1614,14 +1612,11 @@ irq_not_attached:
void qla4xxx_free_irqs(struct scsi_qla_host *ha)
{
- if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) {
- if (test_bit(AF_MSIX_ENABLED, &ha->flags)) {
- qla4_8xxx_disable_msix(ha);
- } else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
- free_irq(ha->pdev->irq, ha);
- pci_disable_msi(ha->pdev);
- } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags)) {
- free_irq(ha->pdev->irq, ha);
- }
- }
+ if (!test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
+ return;
+
+ if (ha->pdev->msix_enabled)
+ free_irq(pci_irq_vector(ha->pdev, 1), ha);
+ free_irq(pci_irq_vector(ha->pdev, 0), ha);
+ pci_free_irq_vectors(ha->pdev);
}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index c291fdff1b33..1da04f323d38 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -2032,10 +2032,7 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
ptid = (uint16_t *)&fw_ddb_entry->isid[1];
*ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id);
- DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%02x%02x%02x%02x%02x%02x]\n",
- fw_ddb_entry->isid[5], fw_ddb_entry->isid[4],
- fw_ddb_entry->isid[3], fw_ddb_entry->isid[2],
- fw_ddb_entry->isid[1], fw_ddb_entry->isid[0]));
+ DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%pmR]\n", fw_ddb_entry->isid));
iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options);
memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 06ddd13cb7cc..e91abb327745 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -3945,7 +3945,7 @@ void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
ha->isp_ops->interrupt_service_routine(ha, intr_status);
if (test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
- test_bit(AF_INTx_ENABLED, &ha->flags))
+ (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled))
qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
0xfbff);
}
@@ -4094,12 +4094,8 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
ha->phy_port_num = sys_info->port_num;
ha->iscsi_pci_func_cnt = sys_info->iscsi_pci_func_cnt;
- DEBUG2(printk("scsi%ld: %s: "
- "mac %02x:%02x:%02x:%02x:%02x:%02x "
- "serial %s\n", ha->host_no, __func__,
- ha->my_mac[0], ha->my_mac[1], ha->my_mac[2],
- ha->my_mac[3], ha->my_mac[4], ha->my_mac[5],
- ha->serial_number));
+ DEBUG2(printk("scsi%ld: %s: mac %pM serial %s\n",
+ ha->host_no, __func__, ha->my_mac, ha->serial_number));
status = QLA_SUCCESS;
@@ -4178,78 +4174,37 @@ qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
spin_unlock_irq(&ha->hardware_lock);
}
-struct ql4_init_msix_entry {
- uint16_t entry;
- uint16_t index;
- const char *name;
- irq_handler_t handler;
-};
-
-static struct ql4_init_msix_entry qla4_8xxx_msix_entries[QLA_MSIX_ENTRIES] = {
- { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
- "qla4xxx (default)",
- (irq_handler_t)qla4_8xxx_default_intr_handler },
- { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
- "qla4xxx (rsp_q)", (irq_handler_t)qla4_8xxx_msix_rsp_q },
-};
-
-void
-qla4_8xxx_disable_msix(struct scsi_qla_host *ha)
-{
- int i;
- struct ql4_msix_entry *qentry;
-
- for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
- qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index];
- if (qentry->have_irq) {
- free_irq(qentry->msix_vector, ha);
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n",
- __func__, qla4_8xxx_msix_entries[i].name));
- }
- }
- pci_disable_msix(ha->pdev);
- clear_bit(AF_MSIX_ENABLED, &ha->flags);
-}
-
int
qla4_8xxx_enable_msix(struct scsi_qla_host *ha)
{
- int i, ret;
- struct msix_entry entries[QLA_MSIX_ENTRIES];
- struct ql4_msix_entry *qentry;
-
- for (i = 0; i < QLA_MSIX_ENTRIES; i++)
- entries[i].entry = qla4_8xxx_msix_entries[i].entry;
+ int ret;
- ret = pci_enable_msix_exact(ha->pdev, entries, ARRAY_SIZE(entries));
- if (ret) {
+ ret = pci_alloc_irq_vectors(ha->pdev, QLA_MSIX_ENTRIES,
+ QLA_MSIX_ENTRIES, PCI_IRQ_MSIX);
+ if (ret < 0) {
ql4_printk(KERN_WARNING, ha,
"MSI-X: Failed to enable support -- %d/%d\n",
QLA_MSIX_ENTRIES, ret);
- goto msix_out;
- }
- set_bit(AF_MSIX_ENABLED, &ha->flags);
-
- for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
- qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index];
- qentry->msix_vector = entries[i].vector;
- qentry->msix_entry = entries[i].entry;
- qentry->have_irq = 0;
- ret = request_irq(qentry->msix_vector,
- qla4_8xxx_msix_entries[i].handler, 0,
- qla4_8xxx_msix_entries[i].name, ha);
- if (ret) {
- ql4_printk(KERN_WARNING, ha,
- "MSI-X: Unable to register handler -- %x/%d.\n",
- qla4_8xxx_msix_entries[i].index, ret);
- qla4_8xxx_disable_msix(ha);
- goto msix_out;
- }
- qentry->have_irq = 1;
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n",
- __func__, qla4_8xxx_msix_entries[i].name));
+ return ret;
}
-msix_out:
+
+ ret = request_irq(pci_irq_vector(ha->pdev, 0),
+ qla4_8xxx_default_intr_handler, 0, "qla4xxx (default)",
+ ha);
+ if (ret)
+ goto out_free_vectors;
+
+ ret = request_irq(pci_irq_vector(ha->pdev, 1),
+ qla4_8xxx_msix_rsp_q, 0, "qla4xxx (rsp_q)", ha);
+ if (ret)
+ goto out_free_default_irq;
+
+ return 0;
+
+out_free_default_irq:
+ free_irq(pci_irq_vector(ha->pdev, 0), ha);
+out_free_vectors:
+ pci_free_irq_vectors(ha->pdev);
return ret;
}
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 01c3610a60cf..9fbb33fc90c7 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -6304,13 +6304,9 @@ static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
* ISID would not match firmware generated ISID.
*/
if (is_isid_compare) {
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
- "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
- __func__, old_tddb->isid[5], old_tddb->isid[4],
- old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
- old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
- new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
- new_tddb->isid[0]));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: old ISID [%pmR] New ISID [%pmR]\n",
+ __func__, old_tddb->isid, new_tddb->isid));
if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
sizeof(old_tddb->isid)))
@@ -7925,10 +7921,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
break;
case ISCSI_FLASHNODE_ISID:
- rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
- fnode_sess->isid[0], fnode_sess->isid[1],
- fnode_sess->isid[2], fnode_sess->isid[3],
- fnode_sess->isid[4], fnode_sess->isid[5]);
+ rc = sprintf(buf, "%pm\n", fnode_sess->isid);
break;
case ISCSI_FLASHNODE_TSID:
rc = sprintf(buf, "%u\n", fnode_sess->tsid);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 246456925335..28fea83ae2fe 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -220,8 +220,6 @@ static struct {
{"NAKAMICH", "MJ-5.16S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NEC", "PD-1 ODX654P", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NEC", "iStorage", NULL, BLIST_REPORTLUN2},
- {"NETAPP", "LUN C-Mode", NULL, BLIST_SYNC_ALUA},
- {"NETAPP", "INF-01-00", NULL, BLIST_SYNC_ALUA},
{"NRC", "MBR-7", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NRC", "MBR-7.4", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9a8ccff1121f..c35b6de4ca64 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1998,6 +1998,15 @@ static void scsi_exit_request(void *data, struct request *rq,
kfree(cmd->sense_buffer);
}
+static int scsi_map_queues(struct blk_mq_tag_set *set)
+{
+ struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
+
+ if (shost->hostt->map_queues)
+ return shost->hostt->map_queues(shost);
+ return blk_mq_map_queues(set);
+}
+
static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
{
struct device *host_dev;
@@ -2090,6 +2099,7 @@ static struct blk_mq_ops scsi_mq_ops = {
.timeout = scsi_timeout,
.init_request = scsi_init_request,
.exit_request = scsi_exit_request,
+ .map_queues = scsi_map_queues,
};
struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
@@ -2732,6 +2742,39 @@ void sdev_evt_send_simple(struct scsi_device *sdev,
EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
/**
+ * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
+ * @sdev: SCSI device to count the number of scsi_request_fn() callers for.
+ */
+static int scsi_request_fn_active(struct scsi_device *sdev)
+{
+ struct request_queue *q = sdev->request_queue;
+ int request_fn_active;
+
+ WARN_ON_ONCE(sdev->host->use_blk_mq);
+
+ spin_lock_irq(q->queue_lock);
+ request_fn_active = q->request_fn_active;
+ spin_unlock_irq(q->queue_lock);
+
+ return request_fn_active;
+}
+
+/**
+ * scsi_wait_for_queuecommand() - wait for ongoing queuecommand() calls
+ * @sdev: SCSI device pointer.
+ *
+ * Wait until the ongoing shost->hostt->queuecommand() calls that are
+ * invoked from scsi_request_fn() have finished.
+ */
+static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
+{
+ WARN_ON_ONCE(sdev->host->use_blk_mq);
+
+ while (scsi_request_fn_active(sdev))
+ msleep(20);
+}
+
+/**
* scsi_device_quiesce - Block user issued commands.
* @sdev: scsi device to quiesce.
*
@@ -2815,8 +2858,7 @@ EXPORT_SYMBOL(scsi_target_resume);
* @sdev: device to block
*
* Block request made by scsi lld's to temporarily stop all
- * scsi commands on the specified device. Called from interrupt
- * or normal process context.
+ * scsi commands on the specified device. May sleep.
*
* Returns zero if successful or error if not
*
@@ -2825,6 +2867,10 @@ EXPORT_SYMBOL(scsi_target_resume);
* (which must be a legal transition). When the device is in this
* state, all commands are deferred until the scsi lld reenables
* the device with scsi_device_unblock or device_block_tmo fires.
+ *
+ * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
+ * scsi_internal_device_block() has blocked a SCSI device and also
+ * remove the rport mutex lock and unlock calls from srp_queuecommand().
*/
int
scsi_internal_device_block(struct scsi_device *sdev)
@@ -2852,6 +2898,7 @@ scsi_internal_device_block(struct scsi_device *sdev)
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
+ scsi_wait_for_queuecommand(sdev);
}
return 0;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 0f3a3869524b..03577bde6ac5 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -30,6 +30,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/bsg-lib.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
@@ -2592,7 +2593,7 @@ fc_rport_final_delete(struct work_struct *work)
/**
- * fc_rport_create - allocates and creates a remote FC port.
+ * fc_remote_port_create - allocates and creates a remote FC port.
* @shost: scsi host the remote port is connected to.
* @channel: Channel on shost port connected to.
* @ids: The world wide names, fc address, and FC4 port
@@ -2605,8 +2606,8 @@ fc_rport_final_delete(struct work_struct *work)
* This routine assumes no locks are held on entry.
*/
static struct fc_rport *
-fc_rport_create(struct Scsi_Host *shost, int channel,
- struct fc_rport_identifiers *ids)
+fc_remote_port_create(struct Scsi_Host *shost, int channel,
+ struct fc_rport_identifiers *ids)
{
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
struct fc_internal *fci = to_fc_internal(shost->transportt);
@@ -2914,7 +2915,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
spin_unlock_irqrestore(shost->host_lock, flags);
/* No consistent binding found - create new remote port entry */
- rport = fc_rport_create(shost, channel, ids);
+ rport = fc_remote_port_create(shost, channel, ids);
return rport;
}
@@ -3554,81 +3555,6 @@ fc_vport_sched_delete(struct work_struct *work)
* BSG support
*/
-
-/**
- * fc_destroy_bsgjob - routine to teardown/delete a fc bsg job
- * @job: fc_bsg_job that is to be torn down
- */
-static void
-fc_destroy_bsgjob(struct fc_bsg_job *job)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&job->job_lock, flags);
- if (job->ref_cnt) {
- spin_unlock_irqrestore(&job->job_lock, flags);
- return;
- }
- spin_unlock_irqrestore(&job->job_lock, flags);
-
- put_device(job->dev); /* release reference for the request */
-
- kfree(job->request_payload.sg_list);
- kfree(job->reply_payload.sg_list);
- kfree(job);
-}
-
-/**
- * fc_bsg_jobdone - completion routine for bsg requests that the LLD has
- * completed
- * @job: fc_bsg_job that is complete
- */
-static void
-fc_bsg_jobdone(struct fc_bsg_job *job)
-{
- struct request *req = job->req;
- struct request *rsp = req->next_rq;
- int err;
-
- err = job->req->errors = job->reply->result;
-
- if (err < 0)
- /* we're only returning the result field in the reply */
- job->req->sense_len = sizeof(uint32_t);
- else
- job->req->sense_len = job->reply_len;
-
- /* we assume all request payload was transferred, residual == 0 */
- req->resid_len = 0;
-
- if (rsp) {
- WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len);
-
- /* set reply (bidi) residual */
- rsp->resid_len -= min(job->reply->reply_payload_rcv_len,
- rsp->resid_len);
- }
- blk_complete_request(req);
-}
-
-/**
- * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests
- * @rq: BSG request that holds the job to be destroyed
- */
-static void fc_bsg_softirq_done(struct request *rq)
-{
- struct fc_bsg_job *job = rq->special;
- unsigned long flags;
-
- spin_lock_irqsave(&job->job_lock, flags);
- job->state_flags |= FC_RQST_STATE_DONE;
- job->ref_cnt--;
- spin_unlock_irqrestore(&job->job_lock, flags);
-
- blk_end_request_all(rq, rq->errors);
- fc_destroy_bsgjob(job);
-}
-
/**
* fc_bsg_job_timeout - handler for when a bsg request timesout
* @req: request that timed out
@@ -3636,27 +3562,22 @@ static void fc_bsg_softirq_done(struct request *rq)
static enum blk_eh_timer_return
fc_bsg_job_timeout(struct request *req)
{
- struct fc_bsg_job *job = (void *) req->special;
- struct Scsi_Host *shost = job->shost;
+ struct bsg_job *job = (void *) req->special;
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct fc_rport *rport = fc_bsg_to_rport(job);
struct fc_internal *i = to_fc_internal(shost->transportt);
- unsigned long flags;
- int err = 0, done = 0;
+ int err = 0, inflight = 0;
- if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED)
+ if (rport && rport->port_state == FC_PORTSTATE_BLOCKED)
return BLK_EH_RESET_TIMER;
- spin_lock_irqsave(&job->job_lock, flags);
- if (job->state_flags & FC_RQST_STATE_DONE)
- done = 1;
- else
- job->ref_cnt++;
- spin_unlock_irqrestore(&job->job_lock, flags);
+ inflight = bsg_job_get(job);
- if (!done && i->f->bsg_timeout) {
+ if (inflight && i->f->bsg_timeout) {
/* call LLDD to abort the i/o as it has timed out */
err = i->f->bsg_timeout(job);
if (err == -EAGAIN) {
- job->ref_cnt--;
+ bsg_job_put(job);
return BLK_EH_RESET_TIMER;
} else if (err)
printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
@@ -3664,126 +3585,33 @@ fc_bsg_job_timeout(struct request *req)
}
/* the blk_end_sync_io() doesn't check the error */
- if (done)
+ if (!inflight)
return BLK_EH_NOT_HANDLED;
else
return BLK_EH_HANDLED;
}
-static int
-fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req)
-{
- size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
-
- BUG_ON(!req->nr_phys_segments);
-
- buf->sg_list = kzalloc(sz, GFP_KERNEL);
- if (!buf->sg_list)
- return -ENOMEM;
- sg_init_table(buf->sg_list, req->nr_phys_segments);
- buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
- buf->payload_len = blk_rq_bytes(req);
- return 0;
-}
-
-
-/**
- * fc_req_to_bsgjob - Allocate/create the fc_bsg_job structure for the
- * bsg request
- * @shost: SCSI Host corresponding to the bsg object
- * @rport: (optional) FC Remote Port corresponding to the bsg object
- * @req: BSG request that needs a job structure
- */
-static int
-fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport,
- struct request *req)
-{
- struct fc_internal *i = to_fc_internal(shost->transportt);
- struct request *rsp = req->next_rq;
- struct fc_bsg_job *job;
- int ret;
-
- BUG_ON(req->special);
-
- job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size,
- GFP_KERNEL);
- if (!job)
- return -ENOMEM;
-
- /*
- * Note: this is a bit silly.
- * The request gets formatted as a SGIO v4 ioctl request, which
- * then gets reformatted as a blk request, which then gets
- * reformatted as a fc bsg request. And on completion, we have
- * to wrap return results such that SGIO v4 thinks it was a scsi
- * status. I hope this was all worth it.
- */
-
- req->special = job;
- job->shost = shost;
- job->rport = rport;
- job->req = req;
- if (i->f->dd_bsg_size)
- job->dd_data = (void *)&job[1];
- spin_lock_init(&job->job_lock);
- job->request = (struct fc_bsg_request *)req->cmd;
- job->request_len = req->cmd_len;
- job->reply = req->sense;
- job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
- * allocated */
- if (req->bio) {
- ret = fc_bsg_map_buffer(&job->request_payload, req);
- if (ret)
- goto failjob_rls_job;
- }
- if (rsp && rsp->bio) {
- ret = fc_bsg_map_buffer(&job->reply_payload, rsp);
- if (ret)
- goto failjob_rls_rqst_payload;
- }
- job->job_done = fc_bsg_jobdone;
- if (rport)
- job->dev = &rport->dev;
- else
- job->dev = &shost->shost_gendev;
- get_device(job->dev); /* take a reference for the request */
-
- job->ref_cnt = 1;
-
- return 0;
-
-
-failjob_rls_rqst_payload:
- kfree(job->request_payload.sg_list);
-failjob_rls_job:
- kfree(job);
- return -ENOMEM;
-}
-
-
-enum fc_dispatch_result {
- FC_DISPATCH_BREAK, /* on return, q is locked, break from q loop */
- FC_DISPATCH_LOCKED, /* on return, q is locked, continue on */
- FC_DISPATCH_UNLOCKED, /* on return, q is unlocked, continue on */
-};
-
-
/**
* fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD
- * @q: fc host request queue
* @shost: scsi host rport attached to
* @job: bsg job to be processed
*/
-static enum fc_dispatch_result
-fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
- struct fc_bsg_job *job)
+static int fc_bsg_host_dispatch(struct Scsi_Host *shost, struct bsg_job *job)
{
struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
int ret;
+ /* check if we really have all the request data needed */
+ if (job->request_len < cmdlen) {
+ ret = -ENOMSG;
+ goto fail_host_msg;
+ }
+
/* Validate the host command */
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_HST_ADD_RPORT:
cmdlen += sizeof(struct fc_bsg_host_add_rport);
break;
@@ -3815,7 +3643,7 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
case FC_BSG_HST_VENDOR:
cmdlen += sizeof(struct fc_bsg_host_vendor);
if ((shost->hostt->vendor_id == 0L) ||
- (job->request->rqst_data.h_vendor.vendor_id !=
+ (bsg_request->rqst_data.h_vendor.vendor_id !=
shost->hostt->vendor_id)) {
ret = -ESRCH;
goto fail_host_msg;
@@ -3827,24 +3655,19 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
goto fail_host_msg;
}
- /* check if we really have all the request data needed */
- if (job->request_len < cmdlen) {
- ret = -ENOMSG;
- goto fail_host_msg;
- }
-
ret = i->f->bsg_request(job);
if (!ret)
- return FC_DISPATCH_UNLOCKED;
+ return 0;
fail_host_msg:
/* return the errno failure code as the only status */
BUG_ON(job->reply_len < sizeof(uint32_t));
- job->reply->reply_payload_rcv_len = 0;
- job->reply->result = ret;
+ bsg_reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = ret;
job->reply_len = sizeof(uint32_t);
- fc_bsg_jobdone(job);
- return FC_DISPATCH_UNLOCKED;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return 0;
}
@@ -3855,34 +3678,38 @@ fail_host_msg:
static void
fc_bsg_goose_queue(struct fc_rport *rport)
{
- if (!rport->rqst_q)
+ struct request_queue *q = rport->rqst_q;
+ unsigned long flags;
+
+ if (!q)
return;
- /*
- * This get/put dance makes no sense
- */
- get_device(&rport->dev);
- blk_run_queue_async(rport->rqst_q);
- put_device(&rport->dev);
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_run_queue_async(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
}
/**
* fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
- * @q: rport request queue
* @shost: scsi host rport attached to
- * @rport: rport request destined to
* @job: bsg job to be processed
*/
-static enum fc_dispatch_result
-fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost,
- struct fc_rport *rport, struct fc_bsg_job *job)
+static int fc_bsg_rport_dispatch(struct Scsi_Host *shost, struct bsg_job *job)
{
struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
int ret;
+ /* check if we really have all the request data needed */
+ if (job->request_len < cmdlen) {
+ ret = -ENOMSG;
+ goto fail_rport_msg;
+ }
+
/* Validate the rport command */
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
cmdlen += sizeof(struct fc_bsg_rport_els);
goto check_bidi;
@@ -3902,133 +3729,31 @@ check_bidi:
goto fail_rport_msg;
}
- /* check if we really have all the request data needed */
- if (job->request_len < cmdlen) {
- ret = -ENOMSG;
- goto fail_rport_msg;
- }
-
ret = i->f->bsg_request(job);
if (!ret)
- return FC_DISPATCH_UNLOCKED;
+ return 0;
fail_rport_msg:
/* return the errno failure code as the only status */
BUG_ON(job->reply_len < sizeof(uint32_t));
- job->reply->reply_payload_rcv_len = 0;
- job->reply->result = ret;
+ bsg_reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = ret;
job->reply_len = sizeof(uint32_t);
- fc_bsg_jobdone(job);
- return FC_DISPATCH_UNLOCKED;
-}
-
-
-/**
- * fc_bsg_request_handler - generic handler for bsg requests
- * @q: request queue to manage
- * @shost: Scsi_Host related to the bsg object
- * @rport: FC remote port related to the bsg object (optional)
- * @dev: device structure for bsg object
- */
-static void
-fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
- struct fc_rport *rport, struct device *dev)
-{
- struct request *req;
- struct fc_bsg_job *job;
- enum fc_dispatch_result ret;
-
- if (!get_device(dev))
- return;
-
- while (1) {
- if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
- !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
- break;
-
- req = blk_fetch_request(q);
- if (!req)
- break;
-
- if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) {
- req->errors = -ENXIO;
- spin_unlock_irq(q->queue_lock);
- blk_end_request_all(req, -ENXIO);
- spin_lock_irq(q->queue_lock);
- continue;
- }
-
- spin_unlock_irq(q->queue_lock);
-
- ret = fc_req_to_bsgjob(shost, rport, req);
- if (ret) {
- req->errors = ret;
- blk_end_request_all(req, ret);
- spin_lock_irq(q->queue_lock);
- continue;
- }
-
- job = req->special;
-
- /* check if we have the msgcode value at least */
- if (job->request_len < sizeof(uint32_t)) {
- BUG_ON(job->reply_len < sizeof(uint32_t));
- job->reply->reply_payload_rcv_len = 0;
- job->reply->result = -ENOMSG;
- job->reply_len = sizeof(uint32_t);
- fc_bsg_jobdone(job);
- spin_lock_irq(q->queue_lock);
- continue;
- }
-
- /* the dispatch routines will unlock the queue_lock */
- if (rport)
- ret = fc_bsg_rport_dispatch(q, shost, rport, job);
- else
- ret = fc_bsg_host_dispatch(q, shost, job);
-
- /* did dispatcher hit state that can't process any more */
- if (ret == FC_DISPATCH_BREAK)
- break;
-
- /* did dispatcher had released the lock */
- if (ret == FC_DISPATCH_UNLOCKED)
- spin_lock_irq(q->queue_lock);
- }
-
- spin_unlock_irq(q->queue_lock);
- put_device(dev);
- spin_lock_irq(q->queue_lock);
-}
-
-
-/**
- * fc_bsg_host_handler - handler for bsg requests for a fc host
- * @q: fc host request queue
- */
-static void
-fc_bsg_host_handler(struct request_queue *q)
-{
- struct Scsi_Host *shost = q->queuedata;
-
- fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return 0;
}
-
-/**
- * fc_bsg_rport_handler - handler for bsg requests for a fc rport
- * @q: rport request queue
- */
-static void
-fc_bsg_rport_handler(struct request_queue *q)
+static int fc_bsg_dispatch(struct bsg_job *job)
{
- struct fc_rport *rport = q->queuedata;
- struct Scsi_Host *shost = rport_to_shost(rport);
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
- fc_bsg_request_handler(q, shost, rport, &rport->dev);
+ if (scsi_is_fc_rport(job->dev))
+ return fc_bsg_rport_dispatch(shost, job);
+ else
+ return fc_bsg_host_dispatch(shost, job);
}
-
/**
* fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
* @shost: shost for fc_host
@@ -4051,33 +3776,42 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
snprintf(bsg_name, sizeof(bsg_name),
"fc_host%d", shost->host_no);
- q = __scsi_alloc_queue(shost, fc_bsg_host_handler);
+ q = __scsi_alloc_queue(shost, bsg_request_fn);
if (!q) {
- printk(KERN_ERR "fc_host%d: bsg interface failed to "
- "initialize - no request queue\n",
- shost->host_no);
+ dev_err(dev,
+ "fc_host%d: bsg interface failed to initialize - no request queue\n",
+ shost->host_no);
return -ENOMEM;
}
- q->queuedata = shost;
- queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
- blk_queue_softirq_done(q, fc_bsg_softirq_done);
- blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
- blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
-
- err = bsg_register_queue(q, dev, bsg_name, NULL);
+ err = bsg_setup_queue(dev, q, bsg_name, fc_bsg_dispatch,
+ i->f->dd_bsg_size);
if (err) {
- printk(KERN_ERR "fc_host%d: bsg interface failed to "
- "initialize - register queue\n",
- shost->host_no);
+ dev_err(dev,
+ "fc_host%d: bsg interface failed to initialize - setup queue\n",
+ shost->host_no);
blk_cleanup_queue(q);
return err;
}
-
+ blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
+ blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
fc_host->rqst_q = q;
return 0;
}
+static int fc_bsg_rport_prep(struct request_queue *q, struct request *req)
+{
+ struct fc_rport *rport = dev_to_rport(q->queuedata);
+
+ if (rport->port_state == FC_PORTSTATE_BLOCKED &&
+ !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
+ return BLKPREP_DEFER;
+
+ if (rport->port_state != FC_PORTSTATE_ONLINE)
+ return BLKPREP_KILL;
+
+ return BLKPREP_OK;
+}
/**
* fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
@@ -4097,29 +3831,22 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
if (!i->f->bsg_request)
return -ENOTSUPP;
- q = __scsi_alloc_queue(shost, fc_bsg_rport_handler);
+ q = __scsi_alloc_queue(shost, bsg_request_fn);
if (!q) {
- printk(KERN_ERR "%s: bsg interface failed to "
- "initialize - no request queue\n",
- dev->kobj.name);
+ dev_err(dev, "bsg interface failed to initialize - no request queue\n");
return -ENOMEM;
}
- q->queuedata = rport;
- queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
- blk_queue_softirq_done(q, fc_bsg_softirq_done);
- blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
- blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
-
- err = bsg_register_queue(q, dev, NULL, NULL);
+ err = bsg_setup_queue(dev, q, NULL, fc_bsg_dispatch, i->f->dd_bsg_size);
if (err) {
- printk(KERN_ERR "%s: bsg interface failed to "
- "initialize - register queue\n",
- dev->kobj.name);
+ dev_err(dev, "failed to setup bsg queue\n");
blk_cleanup_queue(q);
return err;
}
+ blk_queue_prep_rq(q, fc_bsg_rport_prep);
+ blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
+ blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
rport->rqst_q = q;
return 0;
}
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index e3cd3ece4412..b87a78673f65 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -24,7 +24,6 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/delay.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -115,21 +114,12 @@ static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup,
static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports",
NULL, NULL, NULL);
-#define SRP_PID(p) \
- (p)->port_id[0], (p)->port_id[1], (p)->port_id[2], (p)->port_id[3], \
- (p)->port_id[4], (p)->port_id[5], (p)->port_id[6], (p)->port_id[7], \
- (p)->port_id[8], (p)->port_id[9], (p)->port_id[10], (p)->port_id[11], \
- (p)->port_id[12], (p)->port_id[13], (p)->port_id[14], (p)->port_id[15]
-
-#define SRP_PID_FMT "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:" \
- "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
-
static ssize_t
show_srp_rport_id(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_rport *rport = transport_class_to_srp_rport(dev);
- return sprintf(buf, SRP_PID_FMT "\n", SRP_PID(rport));
+ return sprintf(buf, "%16phC\n", rport->port_id);
}
static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL);
@@ -402,36 +392,6 @@ static void srp_reconnect_work(struct work_struct *work)
}
}
-/**
- * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
- * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
- *
- * To do: add support for scsi-mq in this function.
- */
-static int scsi_request_fn_active(struct Scsi_Host *shost)
-{
- struct scsi_device *sdev;
- struct request_queue *q;
- int request_fn_active = 0;
-
- shost_for_each_device(sdev, shost) {
- q = sdev->request_queue;
-
- spin_lock_irq(q->queue_lock);
- request_fn_active += q->request_fn_active;
- spin_unlock_irq(q->queue_lock);
- }
-
- return request_fn_active;
-}
-
-/* Wait until ongoing shost->hostt->queuecommand() calls have finished. */
-static void srp_wait_for_queuecommand(struct Scsi_Host *shost)
-{
- while (scsi_request_fn_active(shost))
- msleep(20);
-}
-
static void __rport_fail_io_fast(struct srp_rport *rport)
{
struct Scsi_Host *shost = rport_to_shost(rport);
@@ -441,14 +401,17 @@ static void __rport_fail_io_fast(struct srp_rport *rport)
if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
return;
+ /*
+ * Call scsi_target_block() to wait for ongoing shost->queuecommand()
+ * calls before invoking i->f->terminate_rport_io().
+ */
+ scsi_target_block(rport->dev.parent);
scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
/* Involve the LLD if possible to terminate all I/O on the rport. */
i = to_srp_internal(shost->transportt);
- if (i->f->terminate_rport_io) {
- srp_wait_for_queuecommand(shost);
+ if (i->f->terminate_rport_io)
i->f->terminate_rport_io(rport);
- }
}
/**
@@ -576,7 +539,6 @@ int srp_reconnect_rport(struct srp_rport *rport)
if (res)
goto out;
scsi_target_block(&shost->shost_gendev);
- srp_wait_for_queuecommand(shost);
res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
pr_debug("%s (state %d): transport.reconnect() returned %d\n",
dev_name(&shost->shost_gendev), rport->state, res);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 079c2d9759fb..1622e23138e0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2465,9 +2465,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
if (sdkp->first_scan || old_wp != sdkp->write_prot) {
sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
sdkp->write_prot ? "on" : "off");
- sd_printk(KERN_DEBUG, sdkp,
- "Mode Sense: %02x %02x %02x %02x\n",
- buffer[0], buffer[1], buffer[2], buffer[3]);
+ sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer);
}
}
}
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 07b6444d3e0a..b673825f46b5 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -929,8 +929,6 @@ struct pqi_ctrl_info {
int max_msix_vectors;
int num_msix_vectors_enabled;
int num_msix_vectors_initialized;
- u32 msix_vectors[PQI_MAX_MSIX_VECTORS];
- void *intr_data[PQI_MAX_MSIX_VECTORS];
int event_irq;
struct Scsi_Host *scsi_host;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index a535b2661f38..8702d9cf8040 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -25,6 +25,7 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/cciss_ioctl.h>
+#include <linux/blk-mq-pci.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -2887,19 +2888,19 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
{
+ struct pci_dev *pdev = ctrl_info->pci_dev;
int i;
int rc;
- ctrl_info->event_irq = ctrl_info->msix_vectors[0];
+ ctrl_info->event_irq = pci_irq_vector(pdev, 0);
for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
- rc = request_irq(ctrl_info->msix_vectors[i],
- pqi_irq_handler, 0,
- DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
+ rc = request_irq(pci_irq_vector(pdev, i), pqi_irq_handler, 0,
+ DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
if (rc) {
- dev_err(&ctrl_info->pci_dev->dev,
+ dev_err(&pdev->dev,
"irq %u init failed with error %d\n",
- ctrl_info->msix_vectors[i], rc);
+ pci_irq_vector(pdev, i), rc);
return rc;
}
ctrl_info->num_msix_vectors_initialized++;
@@ -2908,72 +2909,23 @@ static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
return 0;
}
-static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
-{
- int i;
-
- for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
- free_irq(ctrl_info->msix_vectors[i],
- ctrl_info->intr_data[i]);
-}
-
static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
{
- unsigned int i;
- int max_vectors;
- int num_vectors_enabled;
- struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
-
- max_vectors = ctrl_info->num_queue_groups;
-
- for (i = 0; i < max_vectors; i++)
- msix_entries[i].entry = i;
-
- num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
- msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
+ int ret;
- if (num_vectors_enabled < 0) {
+ ret = pci_alloc_irq_vectors(ctrl_info->pci_dev,
+ PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ if (ret < 0) {
dev_err(&ctrl_info->pci_dev->dev,
- "MSI-X init failed with error %d\n",
- num_vectors_enabled);
- return num_vectors_enabled;
- }
-
- ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
- for (i = 0; i < num_vectors_enabled; i++) {
- ctrl_info->msix_vectors[i] = msix_entries[i].vector;
- ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
+ "MSI-X init failed with error %d\n", ret);
+ return ret;
}
+ ctrl_info->num_msix_vectors_enabled = ret;
return 0;
}
-static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
-{
- int i;
- int rc;
- int cpu;
-
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
- rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
- get_cpu_mask(cpu));
- if (rc)
- dev_err(&ctrl_info->pci_dev->dev,
- "error %d setting affinity hint for irq vector %u\n",
- rc, ctrl_info->msix_vectors[i]);
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
-}
-
-static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
-{
- int i;
-
- for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
- irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
-}
-
static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
{
unsigned int i;
@@ -4743,6 +4695,13 @@ static int pqi_slave_configure(struct scsi_device *sdev)
return 0;
}
+static int pqi_map_queues(struct Scsi_Host *shost)
+{
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+ return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev);
+}
+
static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
void __user *arg)
{
@@ -5130,6 +5089,7 @@ static struct scsi_host_template pqi_driver_template = {
.ioctl = pqi_ioctl,
.slave_alloc = pqi_slave_alloc,
.slave_configure = pqi_slave_configure,
+ .map_queues = pqi_map_queues,
.sdev_attrs = pqi_sdev_attrs,
.shost_attrs = pqi_shost_attrs,
};
@@ -5159,7 +5119,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
shost->cmd_per_lun = shost->can_queue;
shost->sg_tablesize = ctrl_info->sg_tablesize;
shost->transportt = pqi_sas_transport_template;
- shost->irq = ctrl_info->msix_vectors[0];
+ shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
shost->unique_id = shost->irq;
shost->nr_hw_queues = ctrl_info->num_queue_groups;
shost->hostdata[0] = (unsigned long)ctrl_info;
@@ -5409,8 +5369,6 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc)
return rc;
- pqi_irq_set_affinity_hint(ctrl_info);
-
rc = pqi_create_queues(ctrl_info);
if (rc)
return rc;
@@ -5557,10 +5515,14 @@ static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
{
- pqi_irq_unset_affinity_hint(ctrl_info);
- pqi_free_irqs(ctrl_info);
- if (ctrl_info->num_msix_vectors_enabled)
- pci_disable_msix(ctrl_info->pci_dev);
+ int i;
+
+ for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
+ free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
+ &ctrl_info->queue_groups[i]);
+ }
+
+ pci_free_irq_vectors(ctrl_info->pci_dev);
}
static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 8ccfc9ea874b..05526b71541b 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1495,9 +1495,9 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
if (sg_count) {
if (sg_count > MAX_PAGE_BUFFER_COUNT) {
- payload_sz = (sg_count * sizeof(void *) +
+ payload_sz = (sg_count * sizeof(u64) +
sizeof(struct vmbus_packet_mpb_array));
- payload = kmalloc(payload_sz, GFP_ATOMIC);
+ payload = kzalloc(payload_sz, GFP_ATOMIC);
if (!payload)
return SCSI_MLQUEUE_DEVICE_BUSY;
}
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 3c4c07038948..88db6992420e 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -43,20 +43,18 @@
#define NCR5380_implementation_fields /* none */
-#define NCR5380_read(reg) sun3scsi_read(reg)
-#define NCR5380_write(reg, value) sun3scsi_write(reg, value)
+#define NCR5380_read(reg) in_8(hostdata->io + (reg))
+#define NCR5380_write(reg, value) out_8(hostdata->io + (reg), value)
#define NCR5380_queue_command sun3scsi_queue_command
#define NCR5380_bus_reset sun3scsi_bus_reset
#define NCR5380_abort sun3scsi_abort
#define NCR5380_info sun3scsi_info
-#define NCR5380_dma_recv_setup(instance, data, count) (count)
-#define NCR5380_dma_send_setup(instance, data, count) (count)
-#define NCR5380_dma_residual(instance) \
- sun3scsi_dma_residual(instance)
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd)
+#define NCR5380_dma_xfer_len sun3scsi_dma_xfer_len
+#define NCR5380_dma_recv_setup sun3scsi_dma_count
+#define NCR5380_dma_send_setup sun3scsi_dma_count
+#define NCR5380_dma_residual sun3scsi_dma_residual
#define NCR5380_acquire_dma_irq(instance) (1)
#define NCR5380_release_dma_irq(instance)
@@ -82,7 +80,6 @@ module_param(setup_hostid, int, 0);
#define SUN3_DVMA_BUFSIZE 0xe000
static struct scsi_cmnd *sun3_dma_setup_done;
-static unsigned char *sun3_scsi_regp;
static volatile struct sun3_dma_regs *dregs;
static struct sun3_udc_regs *udc_regs;
static unsigned char *sun3_dma_orig_addr;
@@ -90,20 +87,6 @@ static unsigned long sun3_dma_orig_count;
static int sun3_dma_active;
static unsigned long last_residual;
-/*
- * NCR 5380 register access functions
- */
-
-static inline unsigned char sun3scsi_read(int reg)
-{
- return in_8(sun3_scsi_regp + reg);
-}
-
-static inline void sun3scsi_write(int reg, int value)
-{
- out_8(sun3_scsi_regp + reg, value);
-}
-
#ifndef SUN3_SCSI_VME
/* dma controller register access functions */
@@ -158,8 +141,8 @@ static irqreturn_t scsi_sun3_intr(int irq, void *dev)
}
/* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */
-static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance,
- void *data, unsigned long count, int write_flag)
+static int sun3scsi_dma_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count, int write_flag)
{
void *addr;
@@ -211,9 +194,10 @@ static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance,
dregs->csr |= CSR_FIFO;
if(dregs->fifo_count != count) {
- shost_printk(KERN_ERR, instance, "FIFO mismatch %04x not %04x\n",
+ shost_printk(KERN_ERR, hostdata->host,
+ "FIFO mismatch %04x not %04x\n",
dregs->fifo_count, (unsigned int) count);
- NCR5380_dprint(NDEBUG_DMA, instance);
+ NCR5380_dprint(NDEBUG_DMA, hostdata->host);
}
/* setup udc */
@@ -248,14 +232,34 @@ static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance,
}
-static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance)
+static int sun3scsi_dma_count(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return count;
+}
+
+static inline int sun3scsi_dma_recv_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return sun3scsi_dma_setup(hostdata, data, count, 0);
+}
+
+static inline int sun3scsi_dma_send_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return sun3scsi_dma_setup(hostdata, data, count, 1);
+}
+
+static int sun3scsi_dma_residual(struct NCR5380_hostdata *hostdata)
{
return last_residual;
}
-static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted_len,
- struct scsi_cmnd *cmd)
+static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
+ struct scsi_cmnd *cmd)
{
+ int wanted_len = cmd->SCp.this_residual;
+
if (wanted_len < DMA_MIN_SIZE || cmd->request->cmd_type != REQ_TYPE_FS)
return 0;
@@ -428,9 +432,10 @@ static struct scsi_host_template sun3_scsi_template = {
static int __init sun3_scsi_probe(struct platform_device *pdev)
{
struct Scsi_Host *instance;
+ struct NCR5380_hostdata *hostdata;
int error;
struct resource *irq, *mem;
- unsigned char *ioaddr;
+ void __iomem *ioaddr;
int host_flags = 0;
#ifdef SUN3_SCSI_VME
int i;
@@ -493,8 +498,6 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
}
#endif
- sun3_scsi_regp = ioaddr;
-
instance = scsi_host_alloc(&sun3_scsi_template,
sizeof(struct NCR5380_hostdata));
if (!instance) {
@@ -502,9 +505,12 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
goto fail_alloc;
}
- instance->io_port = (unsigned long)ioaddr;
instance->irq = irq->start;
+ hostdata = shost_priv(instance);
+ hostdata->base = mem->start;
+ hostdata->io = ioaddr;
+
error = NCR5380_init(instance, host_flags);
if (error)
goto fail_init;
@@ -552,13 +558,15 @@ fail_init:
fail_alloc:
if (udc_regs)
dvma_free(udc_regs);
- iounmap(sun3_scsi_regp);
+ iounmap(ioaddr);
return error;
}
static int __exit sun3_scsi_remove(struct platform_device *pdev)
{
struct Scsi_Host *instance = platform_get_drvdata(pdev);
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ void __iomem *ioaddr = hostdata->io;
scsi_remove_host(instance);
free_irq(instance->irq, instance);
@@ -566,7 +574,7 @@ static int __exit sun3_scsi_remove(struct platform_device *pdev)
scsi_host_put(instance);
if (udc_regs)
dvma_free(udc_regs);
- iounmap(sun3_scsi_regp);
+ iounmap(ioaddr);
return 0;
}
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 3aedf73f1131..aa43bfea0d00 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1094,10 +1094,12 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
* ufs_qcom_setup_clocks - enables/disable clocks
* @hba: host controller instance
* @on: If true, enable clocks else disable them.
+ * @status: PRE_CHANGE or POST_CHANGE notify
*
* Returns 0 on success, non-zero on failure.
*/
-static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
+static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
@@ -1111,18 +1113,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
if (!host)
return 0;
- if (on) {
- err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
- if (err)
- goto out;
+ if (on && (status == POST_CHANGE)) {
+ phy_power_on(host->generic_phy);
- err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
- if (err) {
- dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
- __func__, err);
- ufs_qcom_phy_disable_iface_clk(host->generic_phy);
- goto out;
- }
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
@@ -1130,14 +1123,15 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
if (vote == host->bus_vote.min_bw_vote)
ufs_qcom_update_bus_bw_vote(host);
- } else {
-
- /* M-PHY RMMI interface clocks can be turned off */
- ufs_qcom_phy_disable_iface_clk(host->generic_phy);
- if (!ufs_qcom_is_link_active(hba))
+ } else if (!on && (status == PRE_CHANGE)) {
+ if (!ufs_qcom_is_link_active(hba)) {
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
+ /* powering off PHY during aggressive clk gating */
+ phy_power_off(host->generic_phy);
+ }
+
vote = host->bus_vote.min_bw_vote;
}
@@ -1146,7 +1140,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
dev_err(hba->dev, "%s: set bus vote failed %d\n",
__func__, err);
-out:
return err;
}
@@ -1204,12 +1197,12 @@ static int ufs_qcom_init(struct ufs_hba *hba)
if (IS_ERR(host->generic_phy)) {
err = PTR_ERR(host->generic_phy);
dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
- goto out;
+ goto out_variant_clear;
}
err = ufs_qcom_bus_register(host);
if (err)
- goto out_host_free;
+ goto out_variant_clear;
ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
&host->hw_ver.minor, &host->hw_ver.step);
@@ -1254,7 +1247,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
- ufs_qcom_setup_clocks(hba, true);
+ ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
ufs_qcom_hosts[hba->dev->id] = host;
@@ -1274,8 +1267,7 @@ out_disable_phy:
phy_power_off(host->generic_phy);
out_unregister_bus:
phy_exit(host->generic_phy);
-out_host_free:
- devm_kfree(dev, host);
+out_variant_clear:
ufshcd_set_variant(hba, NULL);
out:
return err;
@@ -1287,6 +1279,7 @@ static void ufs_qcom_exit(struct ufs_hba *hba)
ufs_qcom_disable_lane_clks(host);
phy_power_off(host->generic_phy);
+ phy_exit(host->generic_phy);
}
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 845b874e2977..8e6709a3fb6b 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -46,6 +46,7 @@
#define QUERY_DESC_HDR_SIZE 2
#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
(sizeof(struct utp_upiu_header)))
+#define RESPONSE_UPIU_SENSE_DATA_LENGTH 18
#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
@@ -162,7 +163,7 @@ enum desc_header_offset {
};
enum ufs_desc_max_size {
- QUERY_DESC_DEVICE_MAX_SIZE = 0x1F,
+ QUERY_DESC_DEVICE_MAX_SIZE = 0x40,
QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
QUERY_DESC_UNIT_MAX_SIZE = 0x23,
QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
@@ -416,7 +417,7 @@ struct utp_cmd_rsp {
__be32 residual_transfer_count;
__be32 reserved[4];
__be16 sense_data_len;
- u8 sense_data[18];
+ u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH];
};
/**
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index 22f881e9253a..f7983058f3f7 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -128,6 +128,13 @@ struct ufs_dev_fix {
*/
#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6)
+/*
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, enabling this quirk ensure this.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE (1 << 7)
+
+
struct ufs_hba;
void ufs_advertise_fixup_device(struct ufs_hba *hba);
@@ -140,6 +147,8 @@ static struct ufs_dev_fix ufs_fixups[] = {
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_NO_FASTAUTO),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index d15eaa466c59..52b546fb509b 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -104,6 +104,7 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
ufshcd_remove(hba);
+ ufshcd_dealloc_host(hba);
}
/**
@@ -147,6 +148,7 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = ufshcd_init(hba, mmio_base, pdev->irq);
if (err) {
dev_err(&pdev->dev, "Initialization failed\n");
+ ufshcd_dealloc_host(hba);
return err;
}
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index db53f38da864..a72a4ba78125 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -163,7 +163,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
if (ret) {
dev_err(dev, "%s: unable to find %s err %d\n",
__func__, prop_name, ret);
- goto out_free;
+ goto out;
}
vreg->min_uA = 0;
@@ -185,9 +185,6 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
goto out;
-out_free:
- devm_kfree(dev, vreg);
- vreg = NULL;
out:
if (!ret)
*out_vreg = vreg;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index cf549871c1ee..ef8548c3a423 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -45,6 +45,8 @@
#include "ufs_quirks.h"
#include "unipro.h"
+#define UFSHCD_REQ_SENSE_SIZE 18
+
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)
@@ -57,15 +59,9 @@
#define NOP_OUT_TIMEOUT 30 /* msecs */
/* Query request retries */
-#define QUERY_REQ_RETRIES 10
+#define QUERY_REQ_RETRIES 3
/* Query request timeout */
-#define QUERY_REQ_TIMEOUT 30 /* msec */
-/*
- * Query request timeout for fDeviceInit flag
- * fDeviceInit query response time for some devices is too large that default
- * QUERY_REQ_TIMEOUT may not be enough for such devices.
- */
-#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
+#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
/* Task management command timeout */
#define TM_CMD_TIMEOUT 100 /* msecs */
@@ -123,6 +119,7 @@ enum {
UFSHCD_STATE_RESET,
UFSHCD_STATE_ERROR,
UFSHCD_STATE_OPERATIONAL,
+ UFSHCD_STATE_EH_SCHEDULED,
};
/* UFSHCD error handling flags */
@@ -598,6 +595,20 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
return false;
}
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+ if (ufshcd_is_clkscaling_enabled(hba)) {
+ devfreq_suspend_device(hba->devfreq);
+ hba->clk_scaling.window_start_t = 0;
+ }
+}
+
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
+{
+ if (ufshcd_is_clkscaling_enabled(hba))
+ devfreq_resume_device(hba->devfreq);
+}
+
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
@@ -631,8 +642,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
hba->clk_gating.is_suspended = false;
}
unblock_reqs:
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_resume_device(hba->devfreq);
+ ufshcd_resume_clkscaling(hba);
scsi_unblock_requests(hba->host);
}
@@ -660,6 +670,21 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
start:
switch (hba->clk_gating.state) {
case CLKS_ON:
+ /*
+ * Wait for the ungate work to complete if in progress.
+ * Though the clocks may be in ON state, the link could
+ * still be in hibner8 state if hibern8 is allowed
+ * during clock gating.
+ * Make sure we exit hibern8 state also in addition to
+ * clocks being ON.
+ */
+ if (ufshcd_can_hibern8_during_gating(hba) &&
+ ufshcd_is_link_hibern8(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->clk_gating.ungate_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ goto start;
+ }
break;
case REQ_CLKS_OFF:
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
@@ -709,7 +734,14 @@ static void ufshcd_gate_work(struct work_struct *work)
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.is_suspended) {
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case save time by
+ * skipping the gating work and exit after changing the clock
+ * state to CLKS_ON.
+ */
+ if (hba->clk_gating.is_suspended ||
+ (hba->clk_gating.state == REQ_CLKS_ON)) {
hba->clk_gating.state = CLKS_ON;
goto rel_lock;
}
@@ -731,10 +763,7 @@ static void ufshcd_gate_work(struct work_struct *work)
ufshcd_set_link_hibern8(hba);
}
- if (ufshcd_is_clkscaling_enabled(hba)) {
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
- }
+ ufshcd_suspend_clkscaling(hba);
if (!ufshcd_is_link_active(hba))
ufshcd_setup_clocks(hba, false);
@@ -878,6 +907,8 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
}
/**
@@ -889,10 +920,14 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
int len;
if (lrbp->sense_buffer &&
ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
+ int len_to_copy;
+
len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
+ len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
+
memcpy(lrbp->sense_buffer,
lrbp->ucd_rsp_ptr->sr.sense_data,
- min_t(int, len, SCSI_SENSE_BUFFERSIZE));
+ min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
}
}
@@ -1088,7 +1123,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
*
* Returns 0 in case of success, non-zero value in case of failure
*/
-static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
+static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
struct ufshcd_sg_entry *prd_table;
struct scatterlist *sg;
@@ -1102,8 +1137,13 @@ static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
return sg_segments;
if (sg_segments) {
- lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((u16) (sg_segments));
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16)(sg_segments *
+ sizeof(struct ufshcd_sg_entry)));
+ else
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16) (sg_segments));
prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
@@ -1410,6 +1450,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
break;
+ case UFSHCD_STATE_EH_SCHEDULED:
case UFSHCD_STATE_RESET:
err = SCSI_MLQUEUE_HOST_BUSY;
goto out_unlock;
@@ -1457,7 +1498,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
- lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
+ lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
lrbp->sense_buffer = cmd->sense_buffer;
lrbp->task_tag = tag;
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
@@ -1465,15 +1506,18 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_comp_scsi_upiu(hba, lrbp);
- err = ufshcd_map_sg(lrbp);
+ err = ufshcd_map_sg(hba, lrbp);
if (err) {
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
goto out;
}
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
/* issue command to the controller */
spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
ufshcd_send_command(hba, tag);
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1581,6 +1625,8 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
msecs_to_jiffies(max_timeout));
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
hba->dev_cmd.complete = NULL;
if (likely(time_left)) {
@@ -1683,6 +1729,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
ufshcd_send_command(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1789,9 +1836,6 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
goto out_unlock;
}
- if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
- timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
-
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
if (err) {
@@ -1861,8 +1905,8 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err) {
- dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
- __func__, opcode, idn, err);
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode, idn, index, err);
goto out_unlock;
}
@@ -1961,8 +2005,8 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err) {
- dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
- __func__, opcode, idn, err);
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode, idn, index, err);
goto out_unlock;
}
@@ -2055,18 +2099,41 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
desc_id, desc_index, 0, desc_buf,
&buff_len);
- if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
- (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
- ufs_query_desc_max_size[desc_id])
- || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
- dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
- __func__, desc_id, param_offset, buff_len, ret);
- if (!ret)
- ret = -EINVAL;
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
+ __func__, desc_id, desc_index, param_offset, ret);
goto out;
}
+ /* Sanity check */
+ if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
+ dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
+ __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * While reading variable size descriptors (like string descriptor),
+ * some UFS devices may report the "LENGTH" (field in "Transaction
+ * Specific fields" of Query Response UPIU) same as what was requested
+ * in Query Request UPIU instead of reporting the actual size of the
+ * variable size descriptor.
+ * Although it's safe to ignore the "LENGTH" field for variable size
+ * descriptors as we can always derive the length of the descriptor from
+ * the descriptor header fields. Hence this change impose the length
+ * match check only for fixed size descriptors (for which we always
+ * request the correct size as part of Query Request UPIU).
+ */
+ if ((desc_id != QUERY_DESC_IDN_STRING) &&
+ (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
+ dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
+ __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
+ ret = -EINVAL;
+ goto out;
+ }
+
if (is_kmalloc)
memcpy(param_read_buf, &desc_buf[param_offset], param_size);
out:
@@ -2088,7 +2155,18 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
u8 *buf,
u32 size)
{
- return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+ int err = 0;
+ int retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ /* Read descriptor*/
+ err = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+ if (!err)
+ break;
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
+ }
+
+ return err;
}
int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
@@ -2320,12 +2398,21 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
/* Response upiu and prdt offset should be in double words */
- utrdlp[i].response_upiu_offset =
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
+ utrdlp[i].response_upiu_offset =
+ cpu_to_le16(response_offset);
+ utrdlp[i].prd_table_offset =
+ cpu_to_le16(prdt_offset);
+ utrdlp[i].response_upiu_length =
+ cpu_to_le16(ALIGNED_UPIU_SIZE);
+ } else {
+ utrdlp[i].response_upiu_offset =
cpu_to_le16((response_offset >> 2));
- utrdlp[i].prd_table_offset =
+ utrdlp[i].prd_table_offset =
cpu_to_le16((prdt_offset >> 2));
- utrdlp[i].response_upiu_length =
+ utrdlp[i].response_upiu_length =
cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
+ }
hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
hba->lrb[i].ucd_req_ptr =
@@ -2429,10 +2516,10 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
} while (ret && peer && --retries);
- if (!retries)
+ if (ret)
dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
- set, UIC_GET_ATTR_ID(attr_sel), mib_val,
- retries);
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val,
+ UFS_UIC_COMMAND_RETRIES - retries);
return ret;
}
@@ -2496,9 +2583,10 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
get, UIC_GET_ATTR_ID(attr_sel), ret);
} while (ret && peer && --retries);
- if (!retries)
+ if (ret)
dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
- get, UIC_GET_ATTR_ID(attr_sel), retries);
+ get, UIC_GET_ATTR_ID(attr_sel),
+ UFS_UIC_COMMAND_RETRIES - retries);
if (mib_val && !ret)
*mib_val = uic_cmd.argument3;
@@ -2651,6 +2739,8 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
int ret;
struct uic_command uic_cmd = {0};
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
+
uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
@@ -2664,7 +2754,9 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
*/
if (ufshcd_link_recovery(hba))
ret = -ENOLINK;
- }
+ } else
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
+ POST_CHANGE);
return ret;
}
@@ -2687,13 +2779,17 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
struct uic_command uic_cmd = {0};
int ret;
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
+
uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
if (ret) {
dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
__func__, ret);
ret = ufshcd_link_recovery(hba);
- }
+ } else
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
+ POST_CHANGE);
return ret;
}
@@ -2725,8 +2821,8 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
if (hba->max_pwr_info.is_valid)
return 0;
- pwr_info->pwr_tx = FASTAUTO_MODE;
- pwr_info->pwr_rx = FASTAUTO_MODE;
+ pwr_info->pwr_tx = FAST_MODE;
+ pwr_info->pwr_rx = FAST_MODE;
pwr_info->hs_rate = PA_HS_MODE_B;
/* Get the connected lane count */
@@ -2757,7 +2853,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
__func__, pwr_info->gear_rx);
return -EINVAL;
}
- pwr_info->pwr_rx = SLOWAUTO_MODE;
+ pwr_info->pwr_rx = SLOW_MODE;
}
ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
@@ -2770,7 +2866,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
__func__, pwr_info->gear_tx);
return -EINVAL;
}
- pwr_info->pwr_tx = SLOWAUTO_MODE;
+ pwr_info->pwr_tx = SLOW_MODE;
}
hba->max_pwr_info.is_valid = true;
@@ -3090,7 +3186,16 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
{
int ret;
int retries = DME_LINKSTARTUP_RETRIES;
+ bool link_startup_again = false;
+
+ /*
+ * If UFS device isn't active then we will have to issue link startup
+ * 2 times to make sure the device state move to active.
+ */
+ if (!ufshcd_is_ufs_dev_active(hba))
+ link_startup_again = true;
+link_startup:
do {
ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
@@ -3116,6 +3221,12 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
/* failed to get the link up... retire */
goto out;
+ if (link_startup_again) {
+ link_startup_again = false;
+ retries = DME_LINKSTARTUP_RETRIES;
+ goto link_startup;
+ }
+
if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
ret = ufshcd_disable_device_tx_lcc(hba);
if (ret)
@@ -3181,16 +3292,24 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev)
{
int ret = 0;
u8 lun_qdepth;
+ int retries;
struct ufs_hba *hba;
hba = shost_priv(sdev->host);
lun_qdepth = hba->nutrs;
- ret = ufshcd_read_unit_desc_param(hba,
- ufshcd_scsi_to_upiu_lun(sdev->lun),
- UNIT_DESC_PARAM_LU_Q_DEPTH,
- &lun_qdepth,
- sizeof(lun_qdepth));
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ /* Read descriptor*/
+ ret = ufshcd_read_unit_desc_param(hba,
+ ufshcd_scsi_to_upiu_lun(sdev->lun),
+ UNIT_DESC_PARAM_LU_Q_DEPTH,
+ &lun_qdepth,
+ sizeof(lun_qdepth));
+ if (!ret || ret == -ENOTSUPP)
+ break;
+
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, ret);
+ }
/* Some WLUN doesn't support unit descriptor */
if (ret == -EOPNOTSUPP)
@@ -4097,6 +4216,17 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
{
u32 reg;
+ /* PHY layer lane error */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+ /* Ignore LINERESET indication, as this is not an error */
+ if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
+ (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK))
+ /*
+ * To know whether this error is fatal or not, DB timeout
+ * must be checked but this error is handled separately.
+ */
+ dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
+
/* PA_INIT_ERROR is fatal and needs UIC reset */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
@@ -4158,7 +4288,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
/* block commands from scsi mid-layer */
scsi_block_requests(hba->host);
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
schedule_work(&hba->eh_work);
}
}
@@ -4311,6 +4441,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
task_req_upiup->input_param1 = cpu_to_be32(lun_id);
task_req_upiup->input_param2 = cpu_to_be32(task_id);
+ ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
+
/* send command to the controller */
__set_bit(free_slot, &hba->outstanding_tasks);
@@ -4318,6 +4450,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
wmb();
ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
spin_unlock_irqrestore(host->host_lock, flags);
@@ -4722,6 +4856,24 @@ out:
return icc_level;
}
+static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level)
+{
+ int ret = 0;
+ int retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ /* write attribute */
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
+ if (!ret)
+ break;
+
+ dev_dbg(hba->dev, "%s: failed with error %d\n", __func__, ret);
+ }
+
+ return ret;
+}
+
static void ufshcd_init_icc_levels(struct ufs_hba *hba)
{
int ret;
@@ -4742,9 +4894,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
__func__, hba->init_prefetch_data.icc_level);
- ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
- &hba->init_prefetch_data.icc_level);
+ ret = ufshcd_set_icc_levels_attr(hba,
+ hba->init_prefetch_data.icc_level);
if (ret)
dev_err(hba->dev,
@@ -4965,6 +5116,76 @@ out:
return ret;
}
+/**
+ * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
+ * less than device PA_TACTIVATE time.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
+ * for such devices.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 granularity, peer_granularity;
+ u32 pa_tactivate, peer_pa_tactivate;
+ u32 pa_tactivate_us, peer_pa_tactivate_us;
+ u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &granularity);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &peer_granularity);
+ if (ret)
+ goto out;
+
+ if ((granularity < PA_GRANULARITY_MIN_VAL) ||
+ (granularity > PA_GRANULARITY_MAX_VAL)) {
+ dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
+ __func__, granularity);
+ return -EINVAL;
+ }
+
+ if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
+ (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
+ dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
+ __func__, peer_granularity);
+ return -EINVAL;
+ }
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ &peer_pa_tactivate);
+ if (ret)
+ goto out;
+
+ pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
+ peer_pa_tactivate_us = peer_pa_tactivate *
+ gran_to_us_table[peer_granularity - 1];
+
+ if (pa_tactivate_us > peer_pa_tactivate_us) {
+ u32 new_peer_pa_tactivate;
+
+ new_peer_pa_tactivate = pa_tactivate_us /
+ gran_to_us_table[peer_granularity - 1];
+ new_peer_pa_tactivate++;
+ ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ new_peer_pa_tactivate);
+ }
+
+out:
+ return ret;
+}
+
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
@@ -4975,6 +5196,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
/* set 1ms timeout for PA_TACTIVATE */
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
+ ufshcd_quirk_tune_host_pa_tactivate(hba);
}
/**
@@ -5027,9 +5251,11 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
__func__);
} else {
ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
- if (ret)
+ if (ret) {
dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
__func__, ret);
+ goto out;
+ }
}
/* set the state as operational after switching to desired gear */
@@ -5062,8 +5288,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
hba->is_init_prefetch = true;
/* Resume devfreq after UFS device is detected */
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_resume_device(hba->devfreq);
+ ufshcd_resume_clkscaling(hba);
out:
/*
@@ -5389,6 +5614,10 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
if (!head || list_empty(head))
goto out;
+ ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
+ if (ret)
+ return ret;
+
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
@@ -5410,7 +5639,10 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
}
}
- ret = ufshcd_vops_setup_clocks(hba, on);
+ ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
+ if (ret)
+ return ret;
+
out:
if (ret) {
list_for_each_entry(clki, head, list) {
@@ -5500,8 +5732,6 @@ static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
if (!hba->vops)
return;
- ufshcd_vops_setup_clocks(hba, false);
-
ufshcd_vops_setup_regulators(hba, false);
ufshcd_vops_exit(hba);
@@ -5564,6 +5794,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
if (hba->is_powered) {
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
+ ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
hba->is_powered = false;
@@ -5577,19 +5808,19 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
0,
0,
0,
- SCSI_SENSE_BUFFERSIZE,
+ UFSHCD_REQ_SENSE_SIZE,
0};
char *buffer;
int ret;
- buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+ buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
goto out;
}
ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
- SCSI_SENSE_BUFFERSIZE, NULL,
+ UFSHCD_REQ_SENSE_SIZE, NULL,
msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM);
if (ret)
pr_err("%s: failed with err %d\n", __func__, ret);
@@ -5766,7 +5997,6 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
!hba->dev_info.is_lu_power_on_wp) {
ret = ufshcd_setup_vreg(hba, true);
} else if (!ufshcd_is_ufs_dev_active(hba)) {
- ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
if (!ret && !ufshcd_is_link_active(hba)) {
ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
if (ret)
@@ -5775,6 +6005,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
if (ret)
goto vccq_lpm;
}
+ ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
}
goto out;
@@ -5839,6 +6070,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_hold(hba, false);
hba->clk_gating.is_suspended = true;
+ ufshcd_suspend_clkscaling(hba);
+
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) {
goto disable_clks;
@@ -5846,12 +6079,12 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
(req_link_state == hba->uic_link_state))
- goto out;
+ goto enable_gating;
/* UFS device & link must be active before we enter in this function */
if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
ret = -EINVAL;
- goto out;
+ goto enable_gating;
}
if (ufshcd_is_runtime_pm(pm_op)) {
@@ -5888,15 +6121,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
disable_clks:
/*
- * The clock scaling needs access to controller registers. Hence, Wait
- * for pending clock scaling work to be done before clocks are
- * turned off.
- */
- if (ufshcd_is_clkscaling_enabled(hba)) {
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
- }
- /*
* Call vendor specific suspend callback. As these callbacks may access
* vendor specific host controller register space call them before the
* host clocks are ON.
@@ -5905,10 +6129,6 @@ disable_clks:
if (ret)
goto set_link_active;
- ret = ufshcd_vops_setup_clocks(hba, false);
- if (ret)
- goto vops_resume;
-
if (!ufshcd_is_link_active(hba))
ufshcd_setup_clocks(hba, false);
else
@@ -5925,9 +6145,8 @@ disable_clks:
ufshcd_hba_vreg_set_lpm(hba);
goto out;
-vops_resume:
- ufshcd_vops_resume(hba, pm_op);
set_link_active:
+ ufshcd_resume_clkscaling(hba);
ufshcd_vreg_set_hpm(hba);
if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
ufshcd_set_link_active(hba);
@@ -5937,6 +6156,7 @@ set_dev_active:
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
enable_gating:
+ ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false;
ufshcd_release(hba);
out:
@@ -6015,8 +6235,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_urgent_bkops(hba);
hba->clk_gating.is_suspended = false;
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_resume_device(hba->devfreq);
+ ufshcd_resume_clkscaling(hba);
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba);
@@ -6030,6 +6249,7 @@ disable_vreg:
ufshcd_vreg_set_lpm(hba);
disable_irq_and_vops_clks:
ufshcd_disable_irq(hba);
+ ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
out:
hba->pm_op_in_progress = 0;
@@ -6052,16 +6272,13 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
if (!hba || !hba->is_powered)
return 0;
- if (pm_runtime_suspended(hba->dev)) {
- if (hba->rpm_lvl == hba->spm_lvl)
- /*
- * There is possibility that device may still be in
- * active state during the runtime suspend.
- */
- if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
- hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
- goto out;
+ if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+ hba->curr_dev_pwr_mode) &&
+ (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+ hba->uic_link_state))
+ goto out;
+ if (pm_runtime_suspended(hba->dev)) {
/*
* UFS device and/or UFS link low power states during runtime
* suspend seems to be different than what is expected during
@@ -6092,7 +6309,10 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
int ufshcd_system_resume(struct ufs_hba *hba)
{
- if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered || pm_runtime_suspended(hba->dev))
/*
* Let the runtime resume take care of resuming
* if runtime suspended.
@@ -6113,7 +6333,10 @@ EXPORT_SYMBOL(ufshcd_system_resume);
*/
int ufshcd_runtime_suspend(struct ufs_hba *hba)
{
- if (!hba || !hba->is_powered)
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered)
return 0;
return ufshcd_suspend(hba, UFS_RUNTIME_PM);
@@ -6143,10 +6366,13 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
*/
int ufshcd_runtime_resume(struct ufs_hba *hba)
{
- if (!hba || !hba->is_powered)
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered)
return 0;
- else
- return ufshcd_resume(hba, UFS_RUNTIME_PM);
+
+ return ufshcd_resume(hba, UFS_RUNTIME_PM);
}
EXPORT_SYMBOL(ufshcd_runtime_resume);
@@ -6198,11 +6424,7 @@ void ufshcd_remove(struct ufs_hba *hba)
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba, true);
- scsi_host_put(hba->host);
-
ufshcd_exit_clk_gating(hba);
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_remove_device(hba->devfreq);
ufshcd_hba_exit(hba);
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -6324,15 +6546,47 @@ static int ufshcd_devfreq_target(struct device *dev,
{
int err = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
+ bool release_clk_hold = false;
+ unsigned long irq_flags;
if (!ufshcd_is_clkscaling_enabled(hba))
return -EINVAL;
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (ufshcd_eh_in_progress(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return 0;
+ }
+
+ if (ufshcd_is_clkgating_allowed(hba) &&
+ (hba->clk_gating.state != CLKS_ON)) {
+ if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+ /* hold the vote until the scaling work is completed */
+ hba->clk_gating.active_reqs++;
+ release_clk_hold = true;
+ hba->clk_gating.state = CLKS_ON;
+ } else {
+ /*
+ * Clock gating work seems to be running in parallel
+ * hence skip scaling work to avoid deadlock between
+ * current scaling work and gating work.
+ */
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return 0;
+ }
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
if (*freq == UINT_MAX)
err = ufshcd_scale_clks(hba, true);
else if (*freq == 0)
err = ufshcd_scale_clks(hba, false);
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (release_clk_hold)
+ __ufshcd_release(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
return err;
}
@@ -6498,7 +6752,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
}
if (ufshcd_is_clkscaling_enabled(hba)) {
- hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
+ hba->devfreq = devm_devfreq_add_device(dev, &ufs_devfreq_profile,
"simple_ondemand", NULL);
if (IS_ERR(hba->devfreq)) {
dev_err(hba->dev, "Unable to register with devfreq %ld\n",
@@ -6507,18 +6761,19 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_remove_scsi_host;
}
/* Suspend devfreq until the UFS device is detected */
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
+ ufshcd_suspend_clkscaling(hba);
}
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
/*
- * The device-initialize-sequence hasn't been invoked yet.
- * Set the device to power-off state
+ * We are assuming that device wasn't put in sleep/power-down
+ * state exclusively during the boot stage before kernel.
+ * This assumption helps avoid doing link startup twice during
+ * ufshcd_probe_hba().
*/
- ufshcd_set_ufs_dev_poweroff(hba);
+ ufshcd_set_ufs_dev_active(hba);
async_schedule(ufshcd_async_scan, hba);
@@ -6530,7 +6785,6 @@ exit_gating:
ufshcd_exit_clk_gating(hba);
out_disable:
hba->is_irq_enabled = false;
- scsi_host_put(host);
ufshcd_hba_exit(hba);
out_error:
return err;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 430bef111293..7d9ff22acfea 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -261,6 +261,12 @@ struct ufs_pwr_mode_info {
* @pwr_change_notify: called before and after a power mode change
* is carried out to allow vendor spesific capabilities
* to be set.
+ * @setup_xfer_req: called before any transfer request is issued
+ * to set some things
+ * @setup_task_mgmt: called before any task management request is issued
+ * to set some things
+ * @hibern8_notify: called around hibern8 enter/exit
+ * to configure some things
* @suspend: called during host controller PM callback
* @resume: called during host controller PM callback
* @dbg_register_dump: used to dump controller debug information
@@ -273,7 +279,8 @@ struct ufs_hba_variant_ops {
u32 (*get_ufs_hci_version)(struct ufs_hba *);
int (*clk_scale_notify)(struct ufs_hba *, bool,
enum ufs_notify_change_status);
- int (*setup_clocks)(struct ufs_hba *, bool);
+ int (*setup_clocks)(struct ufs_hba *, bool,
+ enum ufs_notify_change_status);
int (*setup_regulators)(struct ufs_hba *, bool);
int (*hce_enable_notify)(struct ufs_hba *,
enum ufs_notify_change_status);
@@ -283,6 +290,10 @@ struct ufs_hba_variant_ops {
enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *,
struct ufs_pa_layer_attr *);
+ void (*setup_xfer_req)(struct ufs_hba *, int, bool);
+ void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
+ void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
+ enum ufs_notify_change_status);
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
@@ -474,6 +485,12 @@ struct ufs_hba {
*/
#define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5)
+ /*
+ * This quirk needs to be enabled if the host contoller regards
+ * resolution of the values of PRDTO and PRDTL in UTRD as byte.
+ */
+ #define UFSHCD_QUIRK_PRDT_BYTE_GRAN UFS_BIT(7)
+
unsigned int quirks; /* Deviations from standard UFSHCI spec. */
/* Device deviations from standard UFS device spec. */
@@ -755,10 +772,11 @@ static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
return 0;
}
-static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on)
+static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
{
if (hba->vops && hba->vops->setup_clocks)
- return hba->vops->setup_clocks(hba, on);
+ return hba->vops->setup_clocks(hba, on, status);
return 0;
}
@@ -799,6 +817,28 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
return -ENOTSUPP;
}
+static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
+ bool is_scsi_cmd)
+{
+ if (hba->vops && hba->vops->setup_xfer_req)
+ return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
+}
+
+static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
+ int tag, u8 tm_function)
+{
+ if (hba->vops && hba->vops->setup_task_mgmt)
+ return hba->vops->setup_task_mgmt(hba, tag, tm_function);
+}
+
+static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
+ enum uic_cmd_dme cmd,
+ enum ufs_notify_change_status status)
+{
+ if (hba->vops && hba->vops->hibern8_notify)
+ return hba->vops->hibern8_notify(hba, cmd, status);
+}
+
static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
{
if (hba->vops && hba->vops->suspend)
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 9599741ff606..5d978867be57 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -83,6 +83,8 @@ enum {
MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
};
+#define UFS_MASK(mask, offset) ((mask) << (offset))
+
/* UFS Version 08h */
#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
@@ -166,6 +168,7 @@ enum {
/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31)
#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
+#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31)
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index eff8b5675575..23129d7b2678 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -123,6 +123,7 @@
#define PA_MAXRXHSGEAR 0x1587
#define PA_RXHSUNTERMCAP 0x15A5
#define PA_RXLSTERMCAP 0x15A6
+#define PA_GRANULARITY 0x15AA
#define PA_PACPREQTIMEOUT 0x1590
#define PA_PACPREQEOBTIMEOUT 0x1591
#define PA_HIBERN8TIME 0x15A7
@@ -158,6 +159,9 @@
#define VS_DEBUGOMC 0xD09E
#define VS_POWERSTATE 0xD083
+#define PA_GRANULARITY_MIN_VAL 1
+#define PA_GRANULARITY_MAX_VAL 6
+
/* PHY Adapter Protocol Constants */
#define PA_MAXDATALANES 4
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 9dc8687bf048..9aa1fe1fc939 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -79,10 +79,13 @@
struct vscsifrnt_shadow {
/* command between backend and frontend */
unsigned char act;
+ uint8_t nr_segments;
uint16_t rqid;
+ uint16_t ref_rqid;
unsigned int nr_grants; /* number of grants in gref[] */
struct scsiif_request_segment *sg; /* scatter/gather elements */
+ struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
/* Do reset or abort function. */
wait_queue_head_t wq_reset; /* reset work queue */
@@ -172,68 +175,90 @@ static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
scsifront_wake_up(info);
}
-static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info)
+static int scsifront_do_request(struct vscsifrnt_info *info,
+ struct vscsifrnt_shadow *shadow)
{
struct vscsiif_front_ring *ring = &(info->ring);
struct vscsiif_request *ring_req;
+ struct scsi_cmnd *sc = shadow->sc;
uint32_t id;
+ int i, notify;
+
+ if (RING_FULL(&info->ring))
+ return -EBUSY;
id = scsifront_get_rqid(info); /* use id in response */
if (id >= VSCSIIF_MAX_REQS)
- return NULL;
+ return -EBUSY;
- ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
+ info->shadow[id] = shadow;
+ shadow->rqid = id;
+ ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
ring->req_prod_pvt++;
- ring_req->rqid = (uint16_t)id;
+ ring_req->rqid = id;
+ ring_req->act = shadow->act;
+ ring_req->ref_rqid = shadow->ref_rqid;
+ ring_req->nr_segments = shadow->nr_segments;
- return ring_req;
-}
+ ring_req->id = sc->device->id;
+ ring_req->lun = sc->device->lun;
+ ring_req->channel = sc->device->channel;
+ ring_req->cmd_len = sc->cmd_len;
-static void scsifront_do_request(struct vscsifrnt_info *info)
-{
- struct vscsiif_front_ring *ring = &(info->ring);
- int notify;
+ BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
+
+ memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
+
+ ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
+ ring_req->timeout_per_command = sc->request->timeout / HZ;
+
+ for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
+ ring_req->seg[i] = shadow->seg[i];
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
if (notify)
notify_remote_via_irq(info->irq);
+
+ return 0;
}
-static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id)
+static void scsifront_gnttab_done(struct vscsifrnt_info *info,
+ struct vscsifrnt_shadow *shadow)
{
- struct vscsifrnt_shadow *s = info->shadow[id];
int i;
- if (s->sc->sc_data_direction == DMA_NONE)
+ if (shadow->sc->sc_data_direction == DMA_NONE)
return;
- for (i = 0; i < s->nr_grants; i++) {
- if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) {
+ for (i = 0; i < shadow->nr_grants; i++) {
+ if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) {
shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
"grant still in use by backend\n");
BUG();
}
- gnttab_end_foreign_access(s->gref[i], 0, 0UL);
+ gnttab_end_foreign_access(shadow->gref[i], 0, 0UL);
}
- kfree(s->sg);
+ kfree(shadow->sg);
}
static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
struct vscsiif_response *ring_rsp)
{
+ struct vscsifrnt_shadow *shadow;
struct scsi_cmnd *sc;
uint32_t id;
uint8_t sense_len;
id = ring_rsp->rqid;
- sc = info->shadow[id]->sc;
+ shadow = info->shadow[id];
+ sc = shadow->sc;
BUG_ON(sc == NULL);
- scsifront_gnttab_done(info, id);
+ scsifront_gnttab_done(info, shadow);
scsifront_put_rqid(info, id);
sc->result = ring_rsp->rslt;
@@ -366,7 +391,6 @@ static void scsifront_finish_all(struct vscsifrnt_info *info)
static int map_data_for_request(struct vscsifrnt_info *info,
struct scsi_cmnd *sc,
- struct vscsiif_request *ring_req,
struct vscsifrnt_shadow *shadow)
{
grant_ref_t gref_head;
@@ -379,7 +403,6 @@ static int map_data_for_request(struct vscsifrnt_info *info,
struct scatterlist *sg;
struct scsiif_request_segment *seg;
- ring_req->nr_segments = 0;
if (sc->sc_data_direction == DMA_NONE || !data_len)
return 0;
@@ -398,7 +421,7 @@ static int map_data_for_request(struct vscsifrnt_info *info,
if (!shadow->sg)
return -ENOMEM;
}
- seg = shadow->sg ? : ring_req->seg;
+ seg = shadow->sg ? : shadow->seg;
err = gnttab_alloc_grant_references(seg_grants + data_grants,
&gref_head);
@@ -423,9 +446,9 @@ static int map_data_for_request(struct vscsifrnt_info *info,
info->dev->otherend_id,
xen_page_to_gfn(page), 1);
shadow->gref[ref_cnt] = ref;
- ring_req->seg[ref_cnt].gref = ref;
- ring_req->seg[ref_cnt].offset = (uint16_t)off;
- ring_req->seg[ref_cnt].length = (uint16_t)bytes;
+ shadow->seg[ref_cnt].gref = ref;
+ shadow->seg[ref_cnt].offset = (uint16_t)off;
+ shadow->seg[ref_cnt].length = (uint16_t)bytes;
page++;
len -= bytes;
@@ -473,44 +496,14 @@ static int map_data_for_request(struct vscsifrnt_info *info,
}
if (seg_grants)
- ring_req->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
+ shadow->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
else
- ring_req->nr_segments = (uint8_t)ref_cnt;
+ shadow->nr_segments = (uint8_t)ref_cnt;
shadow->nr_grants = ref_cnt;
return 0;
}
-static struct vscsiif_request *scsifront_command2ring(
- struct vscsifrnt_info *info, struct scsi_cmnd *sc,
- struct vscsifrnt_shadow *shadow)
-{
- struct vscsiif_request *ring_req;
-
- memset(shadow, 0, sizeof(*shadow));
-
- ring_req = scsifront_pre_req(info);
- if (!ring_req)
- return NULL;
-
- info->shadow[ring_req->rqid] = shadow;
- shadow->rqid = ring_req->rqid;
-
- ring_req->id = sc->device->id;
- ring_req->lun = sc->device->lun;
- ring_req->channel = sc->device->channel;
- ring_req->cmd_len = sc->cmd_len;
-
- BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
-
- memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
-
- ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
- ring_req->timeout_per_command = sc->request->timeout / HZ;
-
- return ring_req;
-}
-
static int scsifront_enter(struct vscsifrnt_info *info)
{
if (info->pause)
@@ -536,36 +529,25 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *sc)
{
struct vscsifrnt_info *info = shost_priv(shost);
- struct vscsiif_request *ring_req;
struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc);
unsigned long flags;
int err;
- uint16_t rqid;
+
+ sc->result = 0;
+ memset(shadow, 0, sizeof(*shadow));
+
+ shadow->sc = sc;
+ shadow->act = VSCSIIF_ACT_SCSI_CDB;
spin_lock_irqsave(shost->host_lock, flags);
if (scsifront_enter(info)) {
spin_unlock_irqrestore(shost->host_lock, flags);
return SCSI_MLQUEUE_HOST_BUSY;
}
- if (RING_FULL(&info->ring))
- goto busy;
-
- ring_req = scsifront_command2ring(info, sc, shadow);
- if (!ring_req)
- goto busy;
-
- sc->result = 0;
-
- rqid = ring_req->rqid;
- ring_req->act = VSCSIIF_ACT_SCSI_CDB;
- shadow->sc = sc;
- shadow->act = VSCSIIF_ACT_SCSI_CDB;
-
- err = map_data_for_request(info, sc, ring_req, shadow);
+ err = map_data_for_request(info, sc, shadow);
if (err < 0) {
pr_debug("%s: err %d\n", __func__, err);
- scsifront_put_rqid(info, rqid);
scsifront_return(info);
spin_unlock_irqrestore(shost->host_lock, flags);
if (err == -ENOMEM)
@@ -575,7 +557,11 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
return 0;
}
- scsifront_do_request(info);
+ if (scsifront_do_request(info, shadow)) {
+ scsifront_gnttab_done(info, shadow);
+ goto busy;
+ }
+
scsifront_return(info);
spin_unlock_irqrestore(shost->host_lock, flags);
@@ -598,26 +584,30 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
struct Scsi_Host *host = sc->device->host;
struct vscsifrnt_info *info = shost_priv(host);
struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc);
- struct vscsiif_request *ring_req;
int err = 0;
- shadow = kmalloc(sizeof(*shadow), GFP_NOIO);
+ shadow = kzalloc(sizeof(*shadow), GFP_NOIO);
if (!shadow)
return FAILED;
+ shadow->act = act;
+ shadow->rslt_reset = RSLT_RESET_WAITING;
+ shadow->sc = sc;
+ shadow->ref_rqid = s->rqid;
+ init_waitqueue_head(&shadow->wq_reset);
+
spin_lock_irq(host->host_lock);
for (;;) {
- if (!RING_FULL(&info->ring)) {
- ring_req = scsifront_command2ring(info, sc, shadow);
- if (ring_req)
- break;
- }
- if (err || info->pause) {
- spin_unlock_irq(host->host_lock);
- kfree(shadow);
- return FAILED;
- }
+ if (scsifront_enter(info))
+ goto fail;
+
+ if (!scsifront_do_request(info, shadow))
+ break;
+
+ scsifront_return(info);
+ if (err)
+ goto fail;
info->wait_ring_available = 1;
spin_unlock_irq(host->host_lock);
err = wait_event_interruptible(info->wq_sync,
@@ -625,22 +615,6 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
spin_lock_irq(host->host_lock);
}
- if (scsifront_enter(info)) {
- spin_unlock_irq(host->host_lock);
- return FAILED;
- }
-
- ring_req->act = act;
- ring_req->ref_rqid = s->rqid;
-
- shadow->act = act;
- shadow->rslt_reset = RSLT_RESET_WAITING;
- init_waitqueue_head(&shadow->wq_reset);
-
- ring_req->nr_segments = 0;
-
- scsifront_do_request(info);
-
spin_unlock_irq(host->host_lock);
err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset);
spin_lock_irq(host->host_lock);
@@ -659,6 +633,11 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
scsifront_return(info);
spin_unlock_irq(host->host_lock);
return err;
+
+fail:
+ spin_unlock_irq(host->host_lock);
+ kfree(shadow);
+ return FAILED;
}
static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
@@ -1060,13 +1039,9 @@ static void scsifront_read_backend_params(struct xenbus_device *dev,
struct vscsifrnt_info *info)
{
unsigned int sg_grant, nr_segs;
- int ret;
struct Scsi_Host *host = info->host;
- ret = xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg-grant", "%u",
- &sg_grant);
- if (ret != 1)
- sg_grant = 0;
+ sg_grant = xenbus_read_unsigned(dev->otherend, "feature-sg-grant", 0);
nr_segs = min_t(unsigned int, sg_grant, SG_ALL);
nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE);
nr_segs = min_t(unsigned int, nr_segs,
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index b7995474148c..ec4aa252d6e8 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -67,6 +67,13 @@ config SPI_ATH79
This enables support for the SPI controller present on the
Atheros AR71XX/AR724X/AR913X SoCs.
+config SPI_ARMADA_3700
+ tristate "Marvell Armada 3700 SPI Controller"
+ depends on (ARCH_MVEBU && OF) || COMPILE_TEST
+ help
+ This enables support for the SPI controller present on the
+ Marvell Armada 3700 SoCs.
+
config SPI_ATMEL
tristate "Atmel SPI Controller"
depends on HAS_DMA
@@ -264,6 +271,12 @@ config SPI_FALCON
has only been tested with m25p80 type chips. The hardware has no
support for other types of SPI peripherals.
+config SPI_FSL_LPSPI
+ tristate "Freescale i.MX LPSPI controller"
+ depends on ARCH_MXC || COMPILE_TEST
+ help
+ This enables Freescale i.MX LPSPI controllers in master mode.
+
config SPI_GPIO
tristate "GPIO-based bitbanging SPI Master"
depends on GPIOLIB || COMPILE_TEST
@@ -373,7 +386,6 @@ config SPI_FSL_DSPI
config SPI_FSL_ESPI
tristate "Freescale eSPI controller"
depends on FSL_SOC
- select SPI_FSL_LIB
help
This enables using the Freescale eSPI controllers in master mode.
From MPC8536, 85xx platform uses the controller, and all P10xx,
@@ -451,7 +463,8 @@ config SPI_ORION
tristate "Orion SPI master"
depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST
help
- This enables using the SPI master controller on the Orion chips.
+ This enables using the SPI master controller on the Orion
+ and MVEBU chips.
config SPI_PIC32
tristate "Microchip PIC32 series SPI"
@@ -553,7 +566,7 @@ config SPI_S3C24XX_FIQ
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
- depends on (PLAT_SAMSUNG || ARCH_EXYNOS)
+ depends on (PLAT_SAMSUNG || ARCH_EXYNOS || COMPILE_TEST)
help
SPI driver for Samsung S3C64XX and newer SoCs.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index aa939d955521..7a6b64662c82 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o
# SPI master controller drivers (bus)
obj-$(CONFIG_SPI_ALTERA) += spi-altera.o
+obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o
obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
@@ -43,6 +44,7 @@ obj-$(CONFIG_SPI_FSL_CPM) += spi-fsl-cpm.o
obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o
obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
+obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o
obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
new file mode 100644
index 000000000000..e89da0af45d2
--- /dev/null
+++ b/drivers/spi/spi-armada-3700.c
@@ -0,0 +1,923 @@
+/*
+ * Marvell Armada-3700 SPI controller driver
+ *
+ * Copyright (C) 2016 Marvell Ltd.
+ *
+ * Author: Wilson Ding <dingwei@marvell.com>
+ * Author: Romain Perier <romain.perier@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME "armada_3700_spi"
+
+#define A3700_SPI_TIMEOUT 10
+
+/* SPI Register Offest */
+#define A3700_SPI_IF_CTRL_REG 0x00
+#define A3700_SPI_IF_CFG_REG 0x04
+#define A3700_SPI_DATA_OUT_REG 0x08
+#define A3700_SPI_DATA_IN_REG 0x0C
+#define A3700_SPI_IF_INST_REG 0x10
+#define A3700_SPI_IF_ADDR_REG 0x14
+#define A3700_SPI_IF_RMODE_REG 0x18
+#define A3700_SPI_IF_HDR_CNT_REG 0x1C
+#define A3700_SPI_IF_DIN_CNT_REG 0x20
+#define A3700_SPI_IF_TIME_REG 0x24
+#define A3700_SPI_INT_STAT_REG 0x28
+#define A3700_SPI_INT_MASK_REG 0x2C
+
+/* A3700_SPI_IF_CTRL_REG */
+#define A3700_SPI_EN BIT(16)
+#define A3700_SPI_ADDR_NOT_CONFIG BIT(12)
+#define A3700_SPI_WFIFO_OVERFLOW BIT(11)
+#define A3700_SPI_WFIFO_UNDERFLOW BIT(10)
+#define A3700_SPI_RFIFO_OVERFLOW BIT(9)
+#define A3700_SPI_RFIFO_UNDERFLOW BIT(8)
+#define A3700_SPI_WFIFO_FULL BIT(7)
+#define A3700_SPI_WFIFO_EMPTY BIT(6)
+#define A3700_SPI_RFIFO_FULL BIT(5)
+#define A3700_SPI_RFIFO_EMPTY BIT(4)
+#define A3700_SPI_WFIFO_RDY BIT(3)
+#define A3700_SPI_RFIFO_RDY BIT(2)
+#define A3700_SPI_XFER_RDY BIT(1)
+#define A3700_SPI_XFER_DONE BIT(0)
+
+/* A3700_SPI_IF_CFG_REG */
+#define A3700_SPI_WFIFO_THRS BIT(28)
+#define A3700_SPI_RFIFO_THRS BIT(24)
+#define A3700_SPI_AUTO_CS BIT(20)
+#define A3700_SPI_DMA_RD_EN BIT(18)
+#define A3700_SPI_FIFO_MODE BIT(17)
+#define A3700_SPI_SRST BIT(16)
+#define A3700_SPI_XFER_START BIT(15)
+#define A3700_SPI_XFER_STOP BIT(14)
+#define A3700_SPI_INST_PIN BIT(13)
+#define A3700_SPI_ADDR_PIN BIT(12)
+#define A3700_SPI_DATA_PIN1 BIT(11)
+#define A3700_SPI_DATA_PIN0 BIT(10)
+#define A3700_SPI_FIFO_FLUSH BIT(9)
+#define A3700_SPI_RW_EN BIT(8)
+#define A3700_SPI_CLK_POL BIT(7)
+#define A3700_SPI_CLK_PHA BIT(6)
+#define A3700_SPI_BYTE_LEN BIT(5)
+#define A3700_SPI_CLK_PRESCALE BIT(0)
+#define A3700_SPI_CLK_PRESCALE_MASK (0x1f)
+
+#define A3700_SPI_WFIFO_THRS_BIT 28
+#define A3700_SPI_RFIFO_THRS_BIT 24
+#define A3700_SPI_FIFO_THRS_MASK 0x7
+
+#define A3700_SPI_DATA_PIN_MASK 0x3
+
+/* A3700_SPI_IF_HDR_CNT_REG */
+#define A3700_SPI_DUMMY_CNT_BIT 12
+#define A3700_SPI_DUMMY_CNT_MASK 0x7
+#define A3700_SPI_RMODE_CNT_BIT 8
+#define A3700_SPI_RMODE_CNT_MASK 0x3
+#define A3700_SPI_ADDR_CNT_BIT 4
+#define A3700_SPI_ADDR_CNT_MASK 0x7
+#define A3700_SPI_INSTR_CNT_BIT 0
+#define A3700_SPI_INSTR_CNT_MASK 0x3
+
+/* A3700_SPI_IF_TIME_REG */
+#define A3700_SPI_CLK_CAPT_EDGE BIT(7)
+
+/* Flags and macros for struct a3700_spi */
+#define A3700_INSTR_CNT 1
+#define A3700_ADDR_CNT 3
+#define A3700_DUMMY_CNT 1
+
+struct a3700_spi {
+ struct spi_master *master;
+ void __iomem *base;
+ struct clk *clk;
+ unsigned int irq;
+ unsigned int flags;
+ bool xmit_data;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ size_t buf_len;
+ u8 byte_len;
+ u32 wait_mask;
+ struct completion done;
+ u32 addr_cnt;
+ u32 instr_cnt;
+ size_t hdr_cnt;
+};
+
+static u32 spireg_read(struct a3700_spi *a3700_spi, u32 offset)
+{
+ return readl(a3700_spi->base + offset);
+}
+
+static void spireg_write(struct a3700_spi *a3700_spi, u32 offset, u32 data)
+{
+ writel(data, a3700_spi->base + offset);
+}
+
+static void a3700_spi_auto_cs_unset(struct a3700_spi *a3700_spi)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val &= ~A3700_SPI_AUTO_CS;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_activate_cs(struct a3700_spi *a3700_spi, unsigned int cs)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+ val |= (A3700_SPI_EN << cs);
+ spireg_write(a3700_spi, A3700_SPI_IF_CTRL_REG, val);
+}
+
+static void a3700_spi_deactivate_cs(struct a3700_spi *a3700_spi,
+ unsigned int cs)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+ val &= ~(A3700_SPI_EN << cs);
+ spireg_write(a3700_spi, A3700_SPI_IF_CTRL_REG, val);
+}
+
+static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
+ unsigned int pin_mode)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val &= ~(A3700_SPI_INST_PIN | A3700_SPI_ADDR_PIN);
+ val &= ~(A3700_SPI_DATA_PIN0 | A3700_SPI_DATA_PIN1);
+
+ switch (pin_mode) {
+ case 1:
+ break;
+ case 2:
+ val |= A3700_SPI_DATA_PIN0;
+ break;
+ case 4:
+ val |= A3700_SPI_DATA_PIN1;
+ break;
+ default:
+ dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode);
+ return -EINVAL;
+ }
+
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ return 0;
+}
+
+static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val |= A3700_SPI_FIFO_MODE;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_mode_set(struct a3700_spi *a3700_spi,
+ unsigned int mode_bits)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+
+ if (mode_bits & SPI_CPOL)
+ val |= A3700_SPI_CLK_POL;
+ else
+ val &= ~A3700_SPI_CLK_POL;
+
+ if (mode_bits & SPI_CPHA)
+ val |= A3700_SPI_CLK_PHA;
+ else
+ val &= ~A3700_SPI_CLK_PHA;
+
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
+ unsigned int speed_hz, u16 mode)
+{
+ u32 val;
+ u32 prescale;
+
+ prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz);
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val = val & ~A3700_SPI_CLK_PRESCALE_MASK;
+
+ val = val | (prescale & A3700_SPI_CLK_PRESCALE_MASK);
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ if (prescale <= 2) {
+ val = spireg_read(a3700_spi, A3700_SPI_IF_TIME_REG);
+ val |= A3700_SPI_CLK_CAPT_EDGE;
+ spireg_write(a3700_spi, A3700_SPI_IF_TIME_REG, val);
+ }
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val &= ~(A3700_SPI_CLK_POL | A3700_SPI_CLK_PHA);
+
+ if (mode & SPI_CPOL)
+ val |= A3700_SPI_CLK_POL;
+
+ if (mode & SPI_CPHA)
+ val |= A3700_SPI_CLK_PHA;
+
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_bytelen_set(struct a3700_spi *a3700_spi, unsigned int len)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ if (len == 4)
+ val |= A3700_SPI_BYTE_LEN;
+ else
+ val &= ~A3700_SPI_BYTE_LEN;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ a3700_spi->byte_len = len;
+}
+
+static int a3700_spi_fifo_flush(struct a3700_spi *a3700_spi)
+{
+ int timeout = A3700_SPI_TIMEOUT;
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val |= A3700_SPI_FIFO_FLUSH;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ while (--timeout) {
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ if (!(val & A3700_SPI_FIFO_FLUSH))
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int a3700_spi_init(struct a3700_spi *a3700_spi)
+{
+ struct spi_master *master = a3700_spi->master;
+ u32 val;
+ int i, ret = 0;
+
+ /* Reset SPI unit */
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val |= A3700_SPI_SRST;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ udelay(A3700_SPI_TIMEOUT);
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val &= ~A3700_SPI_SRST;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ /* Disable AUTO_CS and deactivate all chip-selects */
+ a3700_spi_auto_cs_unset(a3700_spi);
+ for (i = 0; i < master->num_chipselect; i++)
+ a3700_spi_deactivate_cs(a3700_spi, i);
+
+ /* Enable FIFO mode */
+ a3700_spi_fifo_mode_set(a3700_spi);
+
+ /* Set SPI mode */
+ a3700_spi_mode_set(a3700_spi, master->mode_bits);
+
+ /* Reset counters */
+ spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG, 0);
+
+ /* Mask the interrupts and clear cause bits */
+ spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_INT_STAT_REG, ~0U);
+
+ return ret;
+}
+
+static irqreturn_t a3700_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct a3700_spi *a3700_spi;
+ u32 cause;
+
+ a3700_spi = spi_master_get_devdata(master);
+
+ /* Get interrupt causes */
+ cause = spireg_read(a3700_spi, A3700_SPI_INT_STAT_REG);
+
+ if (!cause || !(a3700_spi->wait_mask & cause))
+ return IRQ_NONE;
+
+ /* mask and acknowledge the SPI interrupts */
+ spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_INT_STAT_REG, cause);
+
+ /* Wake up the transfer */
+ if (a3700_spi->wait_mask & cause)
+ complete(&a3700_spi->done);
+
+ return IRQ_HANDLED;
+}
+
+static bool a3700_spi_wait_completion(struct spi_device *spi)
+{
+ struct a3700_spi *a3700_spi;
+ unsigned int timeout;
+ unsigned int ctrl_reg;
+ unsigned long timeout_jiffies;
+
+ a3700_spi = spi_master_get_devdata(spi->master);
+
+ /* SPI interrupt is edge-triggered, which means an interrupt will
+ * be generated only when detecting a specific status bit changed
+ * from '0' to '1'. So when we start waiting for a interrupt, we
+ * need to check status bit in control reg first, if it is already 1,
+ * then we do not need to wait for interrupt
+ */
+ ctrl_reg = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+ if (a3700_spi->wait_mask & ctrl_reg)
+ return true;
+
+ reinit_completion(&a3700_spi->done);
+
+ spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG,
+ a3700_spi->wait_mask);
+
+ timeout_jiffies = msecs_to_jiffies(A3700_SPI_TIMEOUT);
+ timeout = wait_for_completion_timeout(&a3700_spi->done,
+ timeout_jiffies);
+
+ a3700_spi->wait_mask = 0;
+
+ if (timeout)
+ return true;
+
+ /* there might be the case that right after we checked the
+ * status bits in this routine and before start to wait for
+ * interrupt by wait_for_completion_timeout, the interrupt
+ * happens, to avoid missing it we need to double check
+ * status bits in control reg, if it is already 1, then
+ * consider that we have the interrupt successfully and
+ * return true.
+ */
+ ctrl_reg = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+ if (a3700_spi->wait_mask & ctrl_reg)
+ return true;
+
+ spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
+
+ return true;
+}
+
+static bool a3700_spi_transfer_wait(struct spi_device *spi,
+ unsigned int bit_mask)
+{
+ struct a3700_spi *a3700_spi;
+
+ a3700_spi = spi_master_get_devdata(spi->master);
+ a3700_spi->wait_mask = bit_mask;
+
+ return a3700_spi_wait_completion(spi);
+}
+
+static void a3700_spi_fifo_thres_set(struct a3700_spi *a3700_spi,
+ unsigned int bytes)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val &= ~(A3700_SPI_FIFO_THRS_MASK << A3700_SPI_RFIFO_THRS_BIT);
+ val |= (bytes - 1) << A3700_SPI_RFIFO_THRS_BIT;
+ val &= ~(A3700_SPI_FIFO_THRS_MASK << A3700_SPI_WFIFO_THRS_BIT);
+ val |= (7 - bytes) << A3700_SPI_WFIFO_THRS_BIT;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_transfer_setup(struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct a3700_spi *a3700_spi;
+ unsigned int byte_len;
+
+ a3700_spi = spi_master_get_devdata(spi->master);
+
+ a3700_spi_clock_set(a3700_spi, xfer->speed_hz, spi->mode);
+
+ byte_len = xfer->bits_per_word >> 3;
+
+ a3700_spi_fifo_thres_set(a3700_spi, byte_len);
+}
+
+static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct a3700_spi *a3700_spi = spi_master_get_devdata(spi->master);
+
+ if (!enable)
+ a3700_spi_activate_cs(a3700_spi, spi->chip_select);
+ else
+ a3700_spi_deactivate_cs(a3700_spi, spi->chip_select);
+}
+
+static void a3700_spi_header_set(struct a3700_spi *a3700_spi)
+{
+ u32 instr_cnt = 0, addr_cnt = 0, dummy_cnt = 0;
+ u32 val = 0;
+
+ /* Clear the header registers */
+ spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_IF_RMODE_REG, 0);
+
+ /* Set header counters */
+ if (a3700_spi->tx_buf) {
+ if (a3700_spi->buf_len <= a3700_spi->instr_cnt) {
+ instr_cnt = a3700_spi->buf_len;
+ } else if (a3700_spi->buf_len <= (a3700_spi->instr_cnt +
+ a3700_spi->addr_cnt)) {
+ instr_cnt = a3700_spi->instr_cnt;
+ addr_cnt = a3700_spi->buf_len - instr_cnt;
+ } else if (a3700_spi->buf_len <= a3700_spi->hdr_cnt) {
+ instr_cnt = a3700_spi->instr_cnt;
+ addr_cnt = a3700_spi->addr_cnt;
+ /* Need to handle the normal write case with 1 byte
+ * data
+ */
+ if (!a3700_spi->tx_buf[instr_cnt + addr_cnt])
+ dummy_cnt = a3700_spi->buf_len - instr_cnt -
+ addr_cnt;
+ }
+ val |= ((instr_cnt & A3700_SPI_INSTR_CNT_MASK)
+ << A3700_SPI_INSTR_CNT_BIT);
+ val |= ((addr_cnt & A3700_SPI_ADDR_CNT_MASK)
+ << A3700_SPI_ADDR_CNT_BIT);
+ val |= ((dummy_cnt & A3700_SPI_DUMMY_CNT_MASK)
+ << A3700_SPI_DUMMY_CNT_BIT);
+ }
+ spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val);
+
+ /* Update the buffer length to be transferred */
+ a3700_spi->buf_len -= (instr_cnt + addr_cnt + dummy_cnt);
+
+ /* Set Instruction */
+ val = 0;
+ while (instr_cnt--) {
+ val = (val << 8) | a3700_spi->tx_buf[0];
+ a3700_spi->tx_buf++;
+ }
+ spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, val);
+
+ /* Set Address */
+ val = 0;
+ while (addr_cnt--) {
+ val = (val << 8) | a3700_spi->tx_buf[0];
+ a3700_spi->tx_buf++;
+ }
+ spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val);
+}
+
+static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+ return (val & A3700_SPI_WFIFO_FULL);
+}
+
+static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi)
+{
+ u32 val;
+ int i = 0;
+
+ while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) {
+ val = 0;
+ if (a3700_spi->buf_len >= 4) {
+ val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
+
+ a3700_spi->buf_len -= 4;
+ a3700_spi->tx_buf += 4;
+ } else {
+ /*
+ * If the remained buffer length is less than 4-bytes,
+ * we should pad the write buffer with all ones. So that
+ * it avoids overwrite the unexpected bytes following
+ * the last one.
+ */
+ val = GENMASK(31, 0);
+ while (a3700_spi->buf_len) {
+ val &= ~(0xff << (8 * i));
+ val |= *a3700_spi->tx_buf++ << (8 * i);
+ i++;
+ a3700_spi->buf_len--;
+
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG,
+ val);
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int a3700_is_rfifo_empty(struct a3700_spi *a3700_spi)
+{
+ u32 val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+
+ return (val & A3700_SPI_RFIFO_EMPTY);
+}
+
+static int a3700_spi_fifo_read(struct a3700_spi *a3700_spi)
+{
+ u32 val;
+
+ while (!a3700_is_rfifo_empty(a3700_spi) && a3700_spi->buf_len) {
+ val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
+ if (a3700_spi->buf_len >= 4) {
+ u32 data = le32_to_cpu(val);
+ memcpy(a3700_spi->rx_buf, &data, 4);
+
+ a3700_spi->buf_len -= 4;
+ a3700_spi->rx_buf += 4;
+ } else {
+ /*
+ * When remain bytes is not larger than 4, we should
+ * avoid memory overwriting and just write the left rx
+ * buffer bytes.
+ */
+ while (a3700_spi->buf_len) {
+ *a3700_spi->rx_buf = val & 0xff;
+ val >>= 8;
+
+ a3700_spi->buf_len--;
+ a3700_spi->rx_buf++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void a3700_spi_transfer_abort_fifo(struct a3700_spi *a3700_spi)
+{
+ int timeout = A3700_SPI_TIMEOUT;
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val |= A3700_SPI_XFER_STOP;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ while (--timeout) {
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ if (!(val & A3700_SPI_XFER_START))
+ break;
+ udelay(1);
+ }
+
+ a3700_spi_fifo_flush(a3700_spi);
+
+ val &= ~A3700_SPI_XFER_STOP;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static int a3700_spi_prepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ struct spi_device *spi = message->spi;
+ int ret;
+
+ ret = clk_enable(a3700_spi->clk);
+ if (ret) {
+ dev_err(&spi->dev, "failed to enable clk with error %d\n", ret);
+ return ret;
+ }
+
+ /* Flush the FIFOs */
+ ret = a3700_spi_fifo_flush(a3700_spi);
+ if (ret)
+ return ret;
+
+ a3700_spi_bytelen_set(a3700_spi, 4);
+
+ return 0;
+}
+
+static int a3700_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ int ret = 0, timeout = A3700_SPI_TIMEOUT;
+ unsigned int nbits = 0;
+ u32 val;
+
+ a3700_spi_transfer_setup(spi, xfer);
+
+ a3700_spi->tx_buf = xfer->tx_buf;
+ a3700_spi->rx_buf = xfer->rx_buf;
+ a3700_spi->buf_len = xfer->len;
+
+ /* SPI transfer headers */
+ a3700_spi_header_set(a3700_spi);
+
+ if (xfer->tx_buf)
+ nbits = xfer->tx_nbits;
+ else if (xfer->rx_buf)
+ nbits = xfer->rx_nbits;
+
+ a3700_spi_pin_mode_set(a3700_spi, nbits);
+
+ if (xfer->rx_buf) {
+ /* Set read data length */
+ spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG,
+ a3700_spi->buf_len);
+ /* Start READ transfer */
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val &= ~A3700_SPI_RW_EN;
+ val |= A3700_SPI_XFER_START;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+ } else if (xfer->tx_buf) {
+ /* Start Write transfer */
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val |= (A3700_SPI_XFER_START | A3700_SPI_RW_EN);
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ /*
+ * If there are data to be written to the SPI device, xmit_data
+ * flag is set true; otherwise the instruction in SPI_INSTR does
+ * not require data to be written to the SPI device, then
+ * xmit_data flag is set false.
+ */
+ a3700_spi->xmit_data = (a3700_spi->buf_len != 0);
+ }
+
+ while (a3700_spi->buf_len) {
+ if (a3700_spi->tx_buf) {
+ /* Wait wfifo ready */
+ if (!a3700_spi_transfer_wait(spi,
+ A3700_SPI_WFIFO_RDY)) {
+ dev_err(&spi->dev,
+ "wait wfifo ready timed out\n");
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+ /* Fill up the wfifo */
+ ret = a3700_spi_fifo_write(a3700_spi);
+ if (ret)
+ goto error;
+ } else if (a3700_spi->rx_buf) {
+ /* Wait rfifo ready */
+ if (!a3700_spi_transfer_wait(spi,
+ A3700_SPI_RFIFO_RDY)) {
+ dev_err(&spi->dev,
+ "wait rfifo ready timed out\n");
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+ /* Drain out the rfifo */
+ ret = a3700_spi_fifo_read(a3700_spi);
+ if (ret)
+ goto error;
+ }
+ }
+
+ /*
+ * Stop a write transfer in fifo mode:
+ * - wait all the bytes in wfifo to be shifted out
+ * - set XFER_STOP bit
+ * - wait XFER_START bit clear
+ * - clear XFER_STOP bit
+ * Stop a read transfer in fifo mode:
+ * - the hardware is to reset the XFER_START bit
+ * after the number of bytes indicated in DIN_CNT
+ * register
+ * - just wait XFER_START bit clear
+ */
+ if (a3700_spi->tx_buf) {
+ if (a3700_spi->xmit_data) {
+ /*
+ * If there are data written to the SPI device, wait
+ * until SPI_WFIFO_EMPTY is 1 to wait for all data to
+ * transfer out of write FIFO.
+ */
+ if (!a3700_spi_transfer_wait(spi,
+ A3700_SPI_WFIFO_EMPTY)) {
+ dev_err(&spi->dev, "wait wfifo empty timed out\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ /*
+ * If the instruction in SPI_INSTR does not require data
+ * to be written to the SPI device, wait until SPI_RDY
+ * is 1 for the SPI interface to be in idle.
+ */
+ if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) {
+ dev_err(&spi->dev, "wait xfer ready timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val |= A3700_SPI_XFER_STOP;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+ }
+
+ while (--timeout) {
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ if (!(val & A3700_SPI_XFER_START))
+ break;
+ udelay(1);
+ }
+
+ if (timeout == 0) {
+ dev_err(&spi->dev, "wait transfer start clear timed out\n");
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ val &= ~A3700_SPI_XFER_STOP;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+ goto out;
+
+error:
+ a3700_spi_transfer_abort_fifo(a3700_spi);
+out:
+ spi_finalize_current_transfer(master);
+
+ return ret;
+}
+
+static int a3700_spi_unprepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+
+ clk_disable(a3700_spi->clk);
+
+ return 0;
+}
+
+static const struct of_device_id a3700_spi_dt_ids[] = {
+ { .compatible = "marvell,armada-3700-spi", .data = NULL },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, a3700_spi_dt_ids);
+
+static int a3700_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *of_node = dev->of_node;
+ struct resource *res;
+ struct spi_master *master;
+ struct a3700_spi *spi;
+ u32 num_cs = 0;
+ int ret = 0;
+
+ master = spi_alloc_master(dev, sizeof(*spi));
+ if (!master) {
+ dev_err(dev, "master allocation failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (of_property_read_u32(of_node, "num-cs", &num_cs)) {
+ dev_err(dev, "could not find num-cs\n");
+ ret = -ENXIO;
+ goto error;
+ }
+
+ master->bus_num = pdev->id;
+ master->dev.of_node = of_node;
+ master->mode_bits = SPI_MODE_3;
+ master->num_chipselect = num_cs;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(32);
+ master->prepare_message = a3700_spi_prepare_message;
+ master->transfer_one = a3700_spi_transfer_one;
+ master->unprepare_message = a3700_spi_unprepare_message;
+ master->set_cs = a3700_spi_set_cs;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->mode_bits |= (SPI_RX_DUAL | SPI_RX_DUAL |
+ SPI_RX_QUAD | SPI_TX_QUAD);
+
+ platform_set_drvdata(pdev, master);
+
+ spi = spi_master_get_devdata(master);
+ memset(spi, 0, sizeof(struct a3700_spi));
+
+ spi->master = master;
+ spi->instr_cnt = A3700_INSTR_CNT;
+ spi->addr_cnt = A3700_ADDR_CNT;
+ spi->hdr_cnt = A3700_INSTR_CNT + A3700_ADDR_CNT +
+ A3700_DUMMY_CNT;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spi->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(spi->base)) {
+ ret = PTR_ERR(spi->base);
+ goto error;
+ }
+
+ spi->irq = platform_get_irq(pdev, 0);
+ if (spi->irq < 0) {
+ dev_err(dev, "could not get irq: %d\n", spi->irq);
+ ret = -ENXIO;
+ goto error;
+ }
+
+ init_completion(&spi->done);
+
+ spi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(spi->clk)) {
+ dev_err(dev, "could not find clk: %ld\n", PTR_ERR(spi->clk));
+ goto error;
+ }
+
+ ret = clk_prepare(spi->clk);
+ if (ret) {
+ dev_err(dev, "could not prepare clk: %d\n", ret);
+ goto error;
+ }
+
+ ret = a3700_spi_init(spi);
+ if (ret)
+ goto error_clk;
+
+ ret = devm_request_irq(dev, spi->irq, a3700_spi_interrupt, 0,
+ dev_name(dev), master);
+ if (ret) {
+ dev_err(dev, "could not request IRQ: %d\n", ret);
+ goto error_clk;
+ }
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret) {
+ dev_err(dev, "Failed to register master\n");
+ goto error_clk;
+ }
+
+ return 0;
+
+error_clk:
+ clk_disable_unprepare(spi->clk);
+error:
+ spi_master_put(master);
+out:
+ return ret;
+}
+
+static int a3700_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct a3700_spi *spi = spi_master_get_devdata(master);
+
+ clk_unprepare(spi->clk);
+ spi_master_put(master);
+
+ return 0;
+}
+
+static struct platform_driver a3700_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(a3700_spi_dt_ids),
+ },
+ .probe = a3700_spi_probe,
+ .remove = a3700_spi_remove,
+};
+
+module_platform_driver(a3700_spi_driver);
+
+MODULE_DESCRIPTION("Armada-3700 SPI driver");
+MODULE_AUTHOR("Wilson Ding <dingwei@marvell.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 6165bf21d427..f369174fbd88 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -304,6 +304,7 @@ static const struct of_device_id ath79_spi_of_match[] = {
{ .compatible = "qca,ar7100-spi", },
{ },
};
+MODULE_DEVICE_TABLE(of, ath79_spi_of_match);
static struct platform_driver ath79_spi_driver = {
.probe = ath79_spi_probe,
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 8feac599e9ab..0e7712bac3b6 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
@@ -264,17 +265,6 @@
#define AUTOSUSPEND_TIMEOUT 2000
-struct atmel_spi_dma {
- struct dma_chan *chan_rx;
- struct dma_chan *chan_tx;
- struct scatterlist sgrx;
- struct scatterlist sgtx;
- struct dma_async_tx_descriptor *data_desc_rx;
- struct dma_async_tx_descriptor *data_desc_tx;
-
- struct at_dma_slave dma_slave;
-};
-
struct atmel_spi_caps {
bool is_spi2;
bool has_wdrbt;
@@ -295,6 +285,7 @@ struct atmel_spi {
int irq;
struct clk *clk;
struct platform_device *pdev;
+ unsigned long spi_clk;
struct spi_transfer *current_transfer;
int current_remaining_bytes;
@@ -302,17 +293,11 @@ struct atmel_spi {
struct completion xfer_completion;
- /* scratch buffer */
- void *buffer;
- dma_addr_t buffer_dma;
-
struct atmel_spi_caps caps;
bool use_dma;
bool use_pdc;
bool use_cs_gpios;
- /* dmaengine data */
- struct atmel_spi_dma dma;
bool keep_cs;
bool cs_active;
@@ -326,7 +311,7 @@ struct atmel_spi_device {
u32 csr;
};
-#define BUFFER_SIZE PAGE_SIZE
+#define SPI_MAX_DMA_XFER 65535 /* true for both PDC and DMA */
#define INVALID_DMA_ADDRESS 0xffffffff
/*
@@ -456,10 +441,20 @@ static inline bool atmel_spi_use_dma(struct atmel_spi *as,
return as->use_dma && xfer->len >= DMA_MIN_BYTES;
}
+static bool atmel_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+
+ return atmel_spi_use_dma(as, xfer);
+}
+
static int atmel_spi_dma_slave_config(struct atmel_spi *as,
struct dma_slave_config *slave_config,
u8 bits_per_word)
{
+ struct spi_master *master = platform_get_drvdata(as->pdev);
int err = 0;
if (bits_per_word > 8) {
@@ -491,7 +486,7 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
* path works the same whether FIFOs are available (and enabled) or not.
*/
slave_config->direction = DMA_MEM_TO_DEV;
- if (dmaengine_slave_config(as->dma.chan_tx, slave_config)) {
+ if (dmaengine_slave_config(master->dma_tx, slave_config)) {
dev_err(&as->pdev->dev,
"failed to configure tx dma channel\n");
err = -EINVAL;
@@ -506,7 +501,7 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
* enabled) or not.
*/
slave_config->direction = DMA_DEV_TO_MEM;
- if (dmaengine_slave_config(as->dma.chan_rx, slave_config)) {
+ if (dmaengine_slave_config(master->dma_rx, slave_config)) {
dev_err(&as->pdev->dev,
"failed to configure rx dma channel\n");
err = -EINVAL;
@@ -515,7 +510,8 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
return err;
}
-static int atmel_spi_configure_dma(struct atmel_spi *as)
+static int atmel_spi_configure_dma(struct spi_master *master,
+ struct atmel_spi *as)
{
struct dma_slave_config slave_config;
struct device *dev = &as->pdev->dev;
@@ -525,26 +521,26 @@ static int atmel_spi_configure_dma(struct atmel_spi *as)
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- as->dma.chan_tx = dma_request_slave_channel_reason(dev, "tx");
- if (IS_ERR(as->dma.chan_tx)) {
- err = PTR_ERR(as->dma.chan_tx);
+ master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+ if (IS_ERR(master->dma_tx)) {
+ err = PTR_ERR(master->dma_tx);
if (err == -EPROBE_DEFER) {
dev_warn(dev, "no DMA channel available at the moment\n");
- return err;
+ goto error_clear;
}
dev_err(dev,
"DMA TX channel not available, SPI unable to use DMA\n");
err = -EBUSY;
- goto error;
+ goto error_clear;
}
/*
* No reason to check EPROBE_DEFER here since we have already requested
* tx channel. If it fails here, it's for another reason.
*/
- as->dma.chan_rx = dma_request_slave_channel(dev, "rx");
+ master->dma_rx = dma_request_slave_channel(dev, "rx");
- if (!as->dma.chan_rx) {
+ if (!master->dma_rx) {
dev_err(dev,
"DMA RX channel not available, SPI unable to use DMA\n");
err = -EBUSY;
@@ -557,31 +553,38 @@ static int atmel_spi_configure_dma(struct atmel_spi *as)
dev_info(&as->pdev->dev,
"Using %s (tx) and %s (rx) for DMA transfers\n",
- dma_chan_name(as->dma.chan_tx),
- dma_chan_name(as->dma.chan_rx));
+ dma_chan_name(master->dma_tx),
+ dma_chan_name(master->dma_rx));
+
return 0;
error:
- if (as->dma.chan_rx)
- dma_release_channel(as->dma.chan_rx);
- if (!IS_ERR(as->dma.chan_tx))
- dma_release_channel(as->dma.chan_tx);
+ if (master->dma_rx)
+ dma_release_channel(master->dma_rx);
+ if (!IS_ERR(master->dma_tx))
+ dma_release_channel(master->dma_tx);
+error_clear:
+ master->dma_tx = master->dma_rx = NULL;
return err;
}
-static void atmel_spi_stop_dma(struct atmel_spi *as)
+static void atmel_spi_stop_dma(struct spi_master *master)
{
- if (as->dma.chan_rx)
- dmaengine_terminate_all(as->dma.chan_rx);
- if (as->dma.chan_tx)
- dmaengine_terminate_all(as->dma.chan_tx);
+ if (master->dma_rx)
+ dmaengine_terminate_all(master->dma_rx);
+ if (master->dma_tx)
+ dmaengine_terminate_all(master->dma_tx);
}
-static void atmel_spi_release_dma(struct atmel_spi *as)
+static void atmel_spi_release_dma(struct spi_master *master)
{
- if (as->dma.chan_rx)
- dma_release_channel(as->dma.chan_rx);
- if (as->dma.chan_tx)
- dma_release_channel(as->dma.chan_tx);
+ if (master->dma_rx) {
+ dma_release_channel(master->dma_rx);
+ master->dma_rx = NULL;
+ }
+ if (master->dma_tx) {
+ dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
+ }
}
/* This function is called by the DMA driver from tasklet context */
@@ -611,14 +614,10 @@ static void atmel_spi_next_xfer_single(struct spi_master *master,
cpu_relax();
}
- if (xfer->tx_buf) {
- if (xfer->bits_per_word > 8)
- spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos));
- else
- spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
- } else {
- spi_writel(as, TDR, 0);
- }
+ if (xfer->bits_per_word > 8)
+ spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos));
+ else
+ spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
dev_dbg(master->dev.parent,
" start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
@@ -665,17 +664,12 @@ static void atmel_spi_next_xfer_fifo(struct spi_master *master,
/* Fill TX FIFO */
while (num_data >= 2) {
- if (xfer->tx_buf) {
- if (xfer->bits_per_word > 8) {
- td0 = *words++;
- td1 = *words++;
- } else {
- td0 = *bytes++;
- td1 = *bytes++;
- }
+ if (xfer->bits_per_word > 8) {
+ td0 = *words++;
+ td1 = *words++;
} else {
- td0 = 0;
- td1 = 0;
+ td0 = *bytes++;
+ td1 = *bytes++;
}
spi_writel(as, TDR, (td1 << 16) | td0);
@@ -683,14 +677,10 @@ static void atmel_spi_next_xfer_fifo(struct spi_master *master,
}
if (num_data) {
- if (xfer->tx_buf) {
- if (xfer->bits_per_word > 8)
- td0 = *words++;
- else
- td0 = *bytes++;
- } else {
- td0 = 0;
- }
+ if (xfer->bits_per_word > 8)
+ td0 = *words++;
+ else
+ td0 = *bytes++;
spi_writew(as, TDR, td0);
num_data--;
@@ -730,13 +720,12 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
u32 *plen)
{
struct atmel_spi *as = spi_master_get_devdata(master);
- struct dma_chan *rxchan = as->dma.chan_rx;
- struct dma_chan *txchan = as->dma.chan_tx;
+ struct dma_chan *rxchan = master->dma_rx;
+ struct dma_chan *txchan = master->dma_tx;
struct dma_async_tx_descriptor *rxdesc;
struct dma_async_tx_descriptor *txdesc;
struct dma_slave_config slave_config;
dma_cookie_t cookie;
- u32 len = *plen;
dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n");
@@ -747,44 +736,22 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
/* release lock for DMA operations */
atmel_spi_unlock(as);
- /* prepare the RX dma transfer */
- sg_init_table(&as->dma.sgrx, 1);
- if (xfer->rx_buf) {
- as->dma.sgrx.dma_address = xfer->rx_dma + xfer->len - *plen;
- } else {
- as->dma.sgrx.dma_address = as->buffer_dma;
- if (len > BUFFER_SIZE)
- len = BUFFER_SIZE;
- }
-
- /* prepare the TX dma transfer */
- sg_init_table(&as->dma.sgtx, 1);
- if (xfer->tx_buf) {
- as->dma.sgtx.dma_address = xfer->tx_dma + xfer->len - *plen;
- } else {
- as->dma.sgtx.dma_address = as->buffer_dma;
- if (len > BUFFER_SIZE)
- len = BUFFER_SIZE;
- memset(as->buffer, 0, len);
- }
-
- sg_dma_len(&as->dma.sgtx) = len;
- sg_dma_len(&as->dma.sgrx) = len;
-
- *plen = len;
+ *plen = xfer->len;
if (atmel_spi_dma_slave_config(as, &slave_config,
xfer->bits_per_word))
goto err_exit;
/* Send both scatterlists */
- rxdesc = dmaengine_prep_slave_sg(rxchan, &as->dma.sgrx, 1,
+ rxdesc = dmaengine_prep_slave_sg(rxchan,
+ xfer->rx_sg.sgl, xfer->rx_sg.nents,
DMA_FROM_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!rxdesc)
goto err_dma;
- txdesc = dmaengine_prep_slave_sg(txchan, &as->dma.sgtx, 1,
+ txdesc = dmaengine_prep_slave_sg(txchan,
+ xfer->tx_sg.sgl, xfer->tx_sg.nents,
DMA_TO_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txdesc)
@@ -818,7 +785,7 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
err_dma:
spi_writel(as, IDR, SPI_BIT(OVRES));
- atmel_spi_stop_dma(as);
+ atmel_spi_stop_dma(master);
err_exit:
atmel_spi_lock(as);
return -ENOMEM;
@@ -830,30 +797,10 @@ static void atmel_spi_next_xfer_data(struct spi_master *master,
dma_addr_t *rx_dma,
u32 *plen)
{
- struct atmel_spi *as = spi_master_get_devdata(master);
- u32 len = *plen;
-
- /* use scratch buffer only when rx or tx data is unspecified */
- if (xfer->rx_buf)
- *rx_dma = xfer->rx_dma + xfer->len - *plen;
- else {
- *rx_dma = as->buffer_dma;
- if (len > BUFFER_SIZE)
- len = BUFFER_SIZE;
- }
-
- if (xfer->tx_buf)
- *tx_dma = xfer->tx_dma + xfer->len - *plen;
- else {
- *tx_dma = as->buffer_dma;
- if (len > BUFFER_SIZE)
- len = BUFFER_SIZE;
- memset(as->buffer, 0, len);
- dma_sync_single_for_device(&as->pdev->dev,
- as->buffer_dma, len, DMA_TO_DEVICE);
- }
-
- *plen = len;
+ *rx_dma = xfer->rx_dma + xfer->len - *plen;
+ *tx_dma = xfer->tx_dma + xfer->len - *plen;
+ if (*plen > master->max_dma_len)
+ *plen = master->max_dma_len;
}
static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
@@ -864,7 +811,7 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
unsigned long bus_hz;
/* v1 chips start out at half the peripheral bus speed. */
- bus_hz = clk_get_rate(as->clk);
+ bus_hz = as->spi_clk;
if (!atmel_spi_is_v2(as))
bus_hz /= 2;
@@ -1025,16 +972,12 @@ atmel_spi_pump_single_data(struct atmel_spi *as, struct spi_transfer *xfer)
u16 *rxp16;
unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
- if (xfer->rx_buf) {
- if (xfer->bits_per_word > 8) {
- rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
- *rxp16 = spi_readl(as, RDR);
- } else {
- rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
- *rxp = spi_readl(as, RDR);
- }
+ if (xfer->bits_per_word > 8) {
+ rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
+ *rxp16 = spi_readl(as, RDR);
} else {
- spi_readl(as, RDR);
+ rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
+ *rxp = spi_readl(as, RDR);
}
if (xfer->bits_per_word > 8) {
if (as->current_remaining_bytes > 2)
@@ -1073,12 +1016,10 @@ atmel_spi_pump_fifo_data(struct atmel_spi *as, struct spi_transfer *xfer)
/* Read data */
while (num_data) {
rd = spi_readl(as, RDR);
- if (xfer->rx_buf) {
- if (xfer->bits_per_word > 8)
- *words++ = rd;
- else
- *bytes++ = rd;
- }
+ if (xfer->bits_per_word > 8)
+ *words++ = rd;
+ else
+ *bytes++ = rd;
num_data--;
}
}
@@ -1204,7 +1145,6 @@ static int atmel_spi_setup(struct spi_device *spi)
u32 csr;
unsigned int bits = spi->bits_per_word;
unsigned int npcs_pin;
- int ret;
as = spi_master_get_devdata(spi->master);
@@ -1247,16 +1187,9 @@ static int atmel_spi_setup(struct spi_device *spi)
if (!asd)
return -ENOMEM;
- if (as->use_cs_gpios) {
- ret = gpio_request(npcs_pin, dev_name(&spi->dev));
- if (ret) {
- kfree(asd);
- return ret;
- }
-
+ if (as->use_cs_gpios)
gpio_direction_output(npcs_pin,
!(spi->mode & SPI_CS_HIGH));
- }
asd->npcs_pin = npcs_pin;
spi->controller_state = asd;
@@ -1307,7 +1240,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
* better fault reporting.
*/
if ((!msg->is_dma_mapped)
- && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) {
+ && as->use_pdc) {
if (atmel_spi_dma_map_xfer(as, xfer) < 0)
return -ENOMEM;
}
@@ -1380,11 +1313,11 @@ static int atmel_spi_one_transfer(struct spi_master *master,
spi_readl(as, SR);
} else if (atmel_spi_use_dma(as, xfer)) {
- atmel_spi_stop_dma(as);
+ atmel_spi_stop_dma(master);
}
if (!msg->is_dma_mapped
- && (atmel_spi_use_dma(as, xfer) || as->use_pdc))
+ && as->use_pdc)
atmel_spi_dma_unmap_xfer(master, xfer);
return 0;
@@ -1395,7 +1328,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
}
if (!msg->is_dma_mapped
- && (atmel_spi_use_dma(as, xfer) || as->use_pdc))
+ && as->use_pdc)
atmel_spi_dma_unmap_xfer(master, xfer);
if (xfer->delay_usecs)
@@ -1471,13 +1404,11 @@ msg_done:
static void atmel_spi_cleanup(struct spi_device *spi)
{
struct atmel_spi_device *asd = spi->controller_state;
- unsigned gpio = (unsigned long) spi->controller_data;
if (!asd)
return;
spi->controller_state = NULL;
- gpio_free(gpio);
kfree(asd);
}
@@ -1499,6 +1430,39 @@ static void atmel_get_caps(struct atmel_spi *as)
}
/*-------------------------------------------------------------------------*/
+static int atmel_spi_gpio_cs(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ struct device_node *np = master->dev.of_node;
+ int i;
+ int ret = 0;
+ int nb = 0;
+
+ if (!as->use_cs_gpios)
+ return 0;
+
+ if (!np)
+ return 0;
+
+ nb = of_gpio_named_count(np, "cs-gpios");
+ for (i = 0; i < nb; i++) {
+ int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
+ "cs-gpios", i);
+
+ if (cs_gpio == -EPROBE_DEFER)
+ return cs_gpio;
+
+ if (gpio_is_valid(cs_gpio)) {
+ ret = devm_gpio_request(&pdev->dev, cs_gpio,
+ dev_name(&pdev->dev));
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
static int atmel_spi_probe(struct platform_device *pdev)
{
@@ -1537,29 +1501,23 @@ static int atmel_spi_probe(struct platform_device *pdev)
master->bus_num = pdev->id;
master->num_chipselect = master->dev.of_node ? 0 : 4;
master->setup = atmel_spi_setup;
+ master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
master->transfer_one_message = atmel_spi_transfer_one_message;
master->cleanup = atmel_spi_cleanup;
master->auto_runtime_pm = true;
+ master->max_dma_len = SPI_MAX_DMA_XFER;
+ master->can_dma = atmel_spi_can_dma;
platform_set_drvdata(pdev, master);
as = spi_master_get_devdata(master);
- /*
- * Scratch buffer is used for throwaway rx and tx data.
- * It's coherent to minimize dcache pollution.
- */
- as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
- &as->buffer_dma, GFP_KERNEL);
- if (!as->buffer)
- goto out_free;
-
spin_lock_init(&as->lock);
as->pdev = pdev;
as->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(as->regs)) {
ret = PTR_ERR(as->regs);
- goto out_free_buffer;
+ goto out_unmap_regs;
}
as->phybase = regs->start;
as->irq = irq;
@@ -1577,14 +1535,19 @@ static int atmel_spi_probe(struct platform_device *pdev)
master->num_chipselect = 4;
}
+ ret = atmel_spi_gpio_cs(pdev);
+ if (ret)
+ goto out_unmap_regs;
+
as->use_dma = false;
as->use_pdc = false;
if (as->caps.has_dma_support) {
- ret = atmel_spi_configure_dma(as);
- if (ret == 0)
+ ret = atmel_spi_configure_dma(master, as);
+ if (ret == 0) {
as->use_dma = true;
- else if (ret == -EPROBE_DEFER)
+ } else if (ret == -EPROBE_DEFER) {
return ret;
+ }
} else {
as->use_pdc = true;
}
@@ -1606,6 +1569,9 @@ static int atmel_spi_probe(struct platform_device *pdev)
ret = clk_prepare_enable(clk);
if (ret)
goto out_free_irq;
+
+ as->spi_clk = clk_get_rate(clk);
+
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
if (as->caps.has_wdrbt) {
@@ -1626,10 +1592,6 @@ static int atmel_spi_probe(struct platform_device *pdev)
spi_writel(as, CR, SPI_BIT(FIFOEN));
}
- /* go! */
- dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
- (unsigned long)regs->start, irq);
-
pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
@@ -1639,6 +1601,10 @@ static int atmel_spi_probe(struct platform_device *pdev)
if (ret)
goto out_free_dma;
+ /* go! */
+ dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
+ (unsigned long)regs->start, irq);
+
return 0;
out_free_dma:
@@ -1646,16 +1612,13 @@ out_free_dma:
pm_runtime_set_suspended(&pdev->dev);
if (as->use_dma)
- atmel_spi_release_dma(as);
+ atmel_spi_release_dma(master);
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
clk_disable_unprepare(clk);
out_free_irq:
out_unmap_regs:
-out_free_buffer:
- dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
- as->buffer_dma);
out_free:
spi_master_put(master);
return ret;
@@ -1671,8 +1634,8 @@ static int atmel_spi_remove(struct platform_device *pdev)
/* reset the hardware and block queue progress */
spin_lock_irq(&as->lock);
if (as->use_dma) {
- atmel_spi_stop_dma(as);
- atmel_spi_release_dma(as);
+ atmel_spi_stop_dma(master);
+ atmel_spi_release_dma(master);
}
spi_writel(as, CR, SPI_BIT(SWRST));
@@ -1680,9 +1643,6 @@ static int atmel_spi_remove(struct platform_device *pdev)
spi_readl(as, SR);
spin_unlock_irq(&as->lock);
- dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
- as->buffer_dma);
-
clk_disable_unprepare(as->clk);
pm_runtime_put_noidle(&pdev->dev);
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 2b1456e5e221..319225d7e761 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -574,6 +574,7 @@ static const struct of_device_id spi_engine_match_table[] = {
{ .compatible = "adi,axi-spi-engine-1.00.a" },
{ },
};
+MODULE_DEVICE_TABLE(of, spi_engine_match_table);
static struct platform_driver spi_engine_driver = {
.probe = spi_engine_probe,
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 27960e46135d..b715a26a9148 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -502,6 +502,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
master->handle_err = dw_spi_handle_err;
master->max_speed_hz = dws->max_freq;
master->dev.of_node = dev->of_node;
+ master->flags = SPI_MASTER_GPIO_SS;
/* Basic HW init */
spi_hw_init(dev, dws);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index a67b0ff6a362..14c8e7ce1913 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -15,6 +15,8 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -40,6 +42,7 @@
#define TRAN_STATE_WORD_ODD_NUM 0x04
#define DSPI_FIFO_SIZE 4
+#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
#define SPI_MCR 0x00
#define SPI_MCR_MASTER (1 << 31)
@@ -72,6 +75,11 @@
#define SPI_SR_TCFQF 0x80000000
#define SPI_SR_CLEAR 0xdaad0000
+#define SPI_RSER_TFFFE BIT(25)
+#define SPI_RSER_TFFFD BIT(24)
+#define SPI_RSER_RFDFE BIT(17)
+#define SPI_RSER_RFDFD BIT(16)
+
#define SPI_RSER 0x30
#define SPI_RSER_EOQFE 0x10000000
#define SPI_RSER_TCFQE 0x80000000
@@ -109,6 +117,8 @@
#define SPI_TCR_TCNT_MAX 0x10000
+#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
+
struct chip_data {
u32 mcr_val;
u32 ctar_val;
@@ -118,6 +128,7 @@ struct chip_data {
enum dspi_trans_mode {
DSPI_EOQ_MODE = 0,
DSPI_TCFQ_MODE,
+ DSPI_DMA_MODE,
};
struct fsl_dspi_devtype_data {
@@ -126,7 +137,7 @@ struct fsl_dspi_devtype_data {
};
static const struct fsl_dspi_devtype_data vf610_data = {
- .trans_mode = DSPI_EOQ_MODE,
+ .trans_mode = DSPI_DMA_MODE,
.max_clock_factor = 2,
};
@@ -140,6 +151,23 @@ static const struct fsl_dspi_devtype_data ls2085a_data = {
.max_clock_factor = 8,
};
+struct fsl_dspi_dma {
+ /* Length of transfer in words of DSPI_FIFO_SIZE */
+ u32 curr_xfer_len;
+
+ u32 *tx_dma_buf;
+ struct dma_chan *chan_tx;
+ dma_addr_t tx_dma_phys;
+ struct completion cmd_tx_complete;
+ struct dma_async_tx_descriptor *tx_desc;
+
+ u32 *rx_dma_buf;
+ struct dma_chan *chan_rx;
+ dma_addr_t rx_dma_phys;
+ struct completion cmd_rx_complete;
+ struct dma_async_tx_descriptor *rx_desc;
+};
+
struct fsl_dspi {
struct spi_master *master;
struct platform_device *pdev;
@@ -166,8 +194,11 @@ struct fsl_dspi {
u32 waitflags;
u32 spi_tcnt;
+ struct fsl_dspi_dma *dma;
};
+static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word);
+
static inline int is_double_byte_mode(struct fsl_dspi *dspi)
{
unsigned int val;
@@ -177,6 +208,255 @@ static inline int is_double_byte_mode(struct fsl_dspi *dspi)
return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
}
+static void dspi_tx_dma_callback(void *arg)
+{
+ struct fsl_dspi *dspi = arg;
+ struct fsl_dspi_dma *dma = dspi->dma;
+
+ complete(&dma->cmd_tx_complete);
+}
+
+static void dspi_rx_dma_callback(void *arg)
+{
+ struct fsl_dspi *dspi = arg;
+ struct fsl_dspi_dma *dma = dspi->dma;
+ int rx_word;
+ int i;
+ u16 d;
+
+ rx_word = is_double_byte_mode(dspi);
+
+ if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) {
+ for (i = 0; i < dma->curr_xfer_len; i++) {
+ d = dspi->dma->rx_dma_buf[i];
+ rx_word ? (*(u16 *)dspi->rx = d) :
+ (*(u8 *)dspi->rx = d);
+ dspi->rx += rx_word + 1;
+ }
+ }
+
+ complete(&dma->cmd_rx_complete);
+}
+
+static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
+{
+ struct fsl_dspi_dma *dma = dspi->dma;
+ struct device *dev = &dspi->pdev->dev;
+ int time_left;
+ int tx_word;
+ int i;
+
+ tx_word = is_double_byte_mode(dspi);
+
+ for (i = 0; i < dma->curr_xfer_len; i++) {
+ dspi->dma->tx_dma_buf[i] = dspi_data_to_pushr(dspi, tx_word);
+ if ((dspi->cs_change) && (!dspi->len))
+ dspi->dma->tx_dma_buf[i] &= ~SPI_PUSHR_CONT;
+ }
+
+ dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
+ dma->tx_dma_phys,
+ dma->curr_xfer_len *
+ DMA_SLAVE_BUSWIDTH_4_BYTES,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!dma->tx_desc) {
+ dev_err(dev, "Not able to get desc for DMA xfer\n");
+ return -EIO;
+ }
+
+ dma->tx_desc->callback = dspi_tx_dma_callback;
+ dma->tx_desc->callback_param = dspi;
+ if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
+ dev_err(dev, "DMA submit failed\n");
+ return -EINVAL;
+ }
+
+ dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
+ dma->rx_dma_phys,
+ dma->curr_xfer_len *
+ DMA_SLAVE_BUSWIDTH_4_BYTES,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!dma->rx_desc) {
+ dev_err(dev, "Not able to get desc for DMA xfer\n");
+ return -EIO;
+ }
+
+ dma->rx_desc->callback = dspi_rx_dma_callback;
+ dma->rx_desc->callback_param = dspi;
+ if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
+ dev_err(dev, "DMA submit failed\n");
+ return -EINVAL;
+ }
+
+ reinit_completion(&dspi->dma->cmd_rx_complete);
+ reinit_completion(&dspi->dma->cmd_tx_complete);
+
+ dma_async_issue_pending(dma->chan_rx);
+ dma_async_issue_pending(dma->chan_tx);
+
+ time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
+ DMA_COMPLETION_TIMEOUT);
+ if (time_left == 0) {
+ dev_err(dev, "DMA tx timeout\n");
+ dmaengine_terminate_all(dma->chan_tx);
+ dmaengine_terminate_all(dma->chan_rx);
+ return -ETIMEDOUT;
+ }
+
+ time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
+ DMA_COMPLETION_TIMEOUT);
+ if (time_left == 0) {
+ dev_err(dev, "DMA rx timeout\n");
+ dmaengine_terminate_all(dma->chan_tx);
+ dmaengine_terminate_all(dma->chan_rx);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int dspi_dma_xfer(struct fsl_dspi *dspi)
+{
+ struct fsl_dspi_dma *dma = dspi->dma;
+ struct device *dev = &dspi->pdev->dev;
+ int curr_remaining_bytes;
+ int bytes_per_buffer;
+ int word = 1;
+ int ret = 0;
+
+ if (is_double_byte_mode(dspi))
+ word = 2;
+ curr_remaining_bytes = dspi->len;
+ bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
+ while (curr_remaining_bytes) {
+ /* Check if current transfer fits the DMA buffer */
+ dma->curr_xfer_len = curr_remaining_bytes / word;
+ if (dma->curr_xfer_len > bytes_per_buffer)
+ dma->curr_xfer_len = bytes_per_buffer;
+
+ ret = dspi_next_xfer_dma_submit(dspi);
+ if (ret) {
+ dev_err(dev, "DMA transfer failed\n");
+ goto exit;
+
+ } else {
+ curr_remaining_bytes -= dma->curr_xfer_len * word;
+ if (curr_remaining_bytes < 0)
+ curr_remaining_bytes = 0;
+ }
+ }
+
+exit:
+ return ret;
+}
+
+static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
+{
+ struct fsl_dspi_dma *dma;
+ struct dma_slave_config cfg;
+ struct device *dev = &dspi->pdev->dev;
+ int ret;
+
+ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ dma->chan_rx = dma_request_slave_channel(dev, "rx");
+ if (!dma->chan_rx) {
+ dev_err(dev, "rx dma channel not available\n");
+ ret = -ENODEV;
+ return ret;
+ }
+
+ dma->chan_tx = dma_request_slave_channel(dev, "tx");
+ if (!dma->chan_tx) {
+ dev_err(dev, "tx dma channel not available\n");
+ ret = -ENODEV;
+ goto err_tx_channel;
+ }
+
+ dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
+ &dma->tx_dma_phys, GFP_KERNEL);
+ if (!dma->tx_dma_buf) {
+ ret = -ENOMEM;
+ goto err_tx_dma_buf;
+ }
+
+ dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
+ &dma->rx_dma_phys, GFP_KERNEL);
+ if (!dma->rx_dma_buf) {
+ ret = -ENOMEM;
+ goto err_rx_dma_buf;
+ }
+
+ cfg.src_addr = phy_addr + SPI_POPR;
+ cfg.dst_addr = phy_addr + SPI_PUSHR;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 1;
+ cfg.dst_maxburst = 1;
+
+ cfg.direction = DMA_DEV_TO_MEM;
+ ret = dmaengine_slave_config(dma->chan_rx, &cfg);
+ if (ret) {
+ dev_err(dev, "can't configure rx dma channel\n");
+ ret = -EINVAL;
+ goto err_slave_config;
+ }
+
+ cfg.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(dma->chan_tx, &cfg);
+ if (ret) {
+ dev_err(dev, "can't configure tx dma channel\n");
+ ret = -EINVAL;
+ goto err_slave_config;
+ }
+
+ dspi->dma = dma;
+ init_completion(&dma->cmd_tx_complete);
+ init_completion(&dma->cmd_rx_complete);
+
+ return 0;
+
+err_slave_config:
+ dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
+ dma->rx_dma_buf, dma->rx_dma_phys);
+err_rx_dma_buf:
+ dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
+ dma->tx_dma_buf, dma->tx_dma_phys);
+err_tx_dma_buf:
+ dma_release_channel(dma->chan_tx);
+err_tx_channel:
+ dma_release_channel(dma->chan_rx);
+
+ devm_kfree(dev, dma);
+ dspi->dma = NULL;
+
+ return ret;
+}
+
+static void dspi_release_dma(struct fsl_dspi *dspi)
+{
+ struct fsl_dspi_dma *dma = dspi->dma;
+ struct device *dev = &dspi->pdev->dev;
+
+ if (dma) {
+ if (dma->chan_tx) {
+ dma_unmap_single(dev, dma->tx_dma_phys,
+ DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
+ dma_release_channel(dma->chan_tx);
+ }
+
+ if (dma->chan_rx) {
+ dma_unmap_single(dev, dma->rx_dma_phys,
+ DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
+ dma_release_channel(dma->chan_rx);
+ }
+ }
+}
+
static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
unsigned long clkrate)
{
@@ -425,6 +705,12 @@ static int dspi_transfer_one_message(struct spi_master *master,
regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
dspi_tcfq_write(dspi);
break;
+ case DSPI_DMA_MODE:
+ regmap_write(dspi->regmap, SPI_RSER,
+ SPI_RSER_TFFFE | SPI_RSER_TFFFD |
+ SPI_RSER_RFDFE | SPI_RSER_RFDFD);
+ status = dspi_dma_xfer(dspi);
+ break;
default:
dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
trans_mode);
@@ -432,9 +718,13 @@ static int dspi_transfer_one_message(struct spi_master *master,
goto out;
}
- if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
- dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
- dspi->waitflags = 0;
+ if (trans_mode != DSPI_DMA_MODE) {
+ if (wait_event_interruptible(dspi->waitq,
+ dspi->waitflags))
+ dev_err(&dspi->pdev->dev,
+ "wait transfer complete fail!\n");
+ dspi->waitflags = 0;
+ }
if (transfer->delay_usecs)
udelay(transfer->delay_usecs);
@@ -740,6 +1030,13 @@ static int dspi_probe(struct platform_device *pdev)
if (ret)
goto out_master_put;
+ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
+ if (dspi_request_dma(dspi, res->start)) {
+ dev_err(&pdev->dev, "can't get dma channels\n");
+ goto out_clk_put;
+ }
+ }
+
master->max_speed_hz =
clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
@@ -768,6 +1065,7 @@ static int dspi_remove(struct platform_device *pdev)
struct fsl_dspi *dspi = spi_master_get_devdata(master);
/* Disconnect from the SPI framework */
+ dspi_release_dma(dspi);
clk_disable_unprepare(dspi->clk);
spi_unregister_master(dspi->master);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 2c175b9495f7..1d332e23f6ed 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -23,8 +23,6 @@
#include <linux/pm_runtime.h>
#include <sysdev/fsl_soc.h>
-#include "spi-fsl-lib.h"
-
/* eSPI Controller registers */
#define ESPI_SPMODE 0x00 /* eSPI mode register */
#define ESPI_SPIE 0x04 /* eSPI event register */
@@ -54,8 +52,11 @@
#define CSMODE_AFT(x) ((x) << 8)
#define CSMODE_CG(x) ((x) << 3)
+#define FSL_ESPI_FIFO_SIZE 32
+#define FSL_ESPI_RXTHR 15
+
/* Default mode/csmode for eSPI controller */
-#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(3))
+#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(FSL_ESPI_RXTHR))
#define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \
| CSMODE_AFT(0) | CSMODE_CG(1))
@@ -90,219 +91,342 @@
#define AUTOSUSPEND_TIMEOUT 2000
-static inline u32 fsl_espi_read_reg(struct mpc8xxx_spi *mspi, int offset)
+struct fsl_espi {
+ struct device *dev;
+ void __iomem *reg_base;
+
+ struct list_head *m_transfers;
+ struct spi_transfer *tx_t;
+ unsigned int tx_pos;
+ bool tx_done;
+ struct spi_transfer *rx_t;
+ unsigned int rx_pos;
+ bool rx_done;
+
+ bool swab;
+ unsigned int rxskip;
+
+ spinlock_t lock;
+
+ u32 spibrg; /* SPIBRG input clock */
+
+ struct completion done;
+};
+
+struct fsl_espi_cs {
+ u32 hw_mode;
+};
+
+static inline u32 fsl_espi_read_reg(struct fsl_espi *espi, int offset)
{
- return ioread32be(mspi->reg_base + offset);
+ return ioread32be(espi->reg_base + offset);
}
-static inline u8 fsl_espi_read_reg8(struct mpc8xxx_spi *mspi, int offset)
+static inline u16 fsl_espi_read_reg16(struct fsl_espi *espi, int offset)
{
- return ioread8(mspi->reg_base + offset);
+ return ioread16be(espi->reg_base + offset);
}
-static inline void fsl_espi_write_reg(struct mpc8xxx_spi *mspi, int offset,
- u32 val)
+static inline u8 fsl_espi_read_reg8(struct fsl_espi *espi, int offset)
{
- iowrite32be(val, mspi->reg_base + offset);
+ return ioread8(espi->reg_base + offset);
}
-static inline void fsl_espi_write_reg8(struct mpc8xxx_spi *mspi, int offset,
- u8 val)
+static inline void fsl_espi_write_reg(struct fsl_espi *espi, int offset,
+ u32 val)
{
- iowrite8(val, mspi->reg_base + offset);
+ iowrite32be(val, espi->reg_base + offset);
}
-static void fsl_espi_copy_to_buf(struct spi_message *m,
- struct mpc8xxx_spi *mspi)
+static inline void fsl_espi_write_reg16(struct fsl_espi *espi, int offset,
+ u16 val)
{
- struct spi_transfer *t;
- u8 *buf = mspi->local_buf;
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->tx_buf)
- memcpy(buf, t->tx_buf, t->len);
- else
- memset(buf, 0, t->len);
- buf += t->len;
- }
+ iowrite16be(val, espi->reg_base + offset);
}
-static void fsl_espi_copy_from_buf(struct spi_message *m,
- struct mpc8xxx_spi *mspi)
+static inline void fsl_espi_write_reg8(struct fsl_espi *espi, int offset,
+ u8 val)
{
- struct spi_transfer *t;
- u8 *buf = mspi->local_buf;
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->rx_buf)
- memcpy(t->rx_buf, buf, t->len);
- buf += t->len;
- }
+ iowrite8(val, espi->reg_base + offset);
}
static int fsl_espi_check_message(struct spi_message *m)
{
- struct mpc8xxx_spi *mspi = spi_master_get_devdata(m->spi->master);
+ struct fsl_espi *espi = spi_master_get_devdata(m->spi->master);
struct spi_transfer *t, *first;
if (m->frame_length > SPCOM_TRANLEN_MAX) {
- dev_err(mspi->dev, "message too long, size is %u bytes\n",
+ dev_err(espi->dev, "message too long, size is %u bytes\n",
m->frame_length);
return -EMSGSIZE;
}
first = list_first_entry(&m->transfers, struct spi_transfer,
transfer_list);
+
list_for_each_entry(t, &m->transfers, transfer_list) {
if (first->bits_per_word != t->bits_per_word ||
first->speed_hz != t->speed_hz) {
- dev_err(mspi->dev, "bits_per_word/speed_hz should be the same for all transfers\n");
+ dev_err(espi->dev, "bits_per_word/speed_hz should be the same for all transfers\n");
return -EINVAL;
}
}
+ /* ESPI supports MSB-first transfers for word size 8 / 16 only */
+ if (!(m->spi->mode & SPI_LSB_FIRST) && first->bits_per_word != 8 &&
+ first->bits_per_word != 16) {
+ dev_err(espi->dev,
+ "MSB-first transfer not supported for wordsize %u\n",
+ first->bits_per_word);
+ return -EINVAL;
+ }
+
return 0;
}
-static void fsl_espi_change_mode(struct spi_device *spi)
+static unsigned int fsl_espi_check_rxskip_mode(struct spi_message *m)
{
- struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
- struct spi_mpc8xxx_cs *cs = spi->controller_state;
- u32 tmp;
- unsigned long flags;
-
- /* Turn off IRQs locally to minimize time that SPI is disabled. */
- local_irq_save(flags);
-
- /* Turn off SPI unit prior changing mode */
- tmp = fsl_espi_read_reg(mspi, ESPI_SPMODE);
- fsl_espi_write_reg(mspi, ESPI_SPMODE, tmp & ~SPMODE_ENABLE);
- fsl_espi_write_reg(mspi, ESPI_SPMODEx(spi->chip_select),
- cs->hw_mode);
- fsl_espi_write_reg(mspi, ESPI_SPMODE, tmp);
-
- local_irq_restore(flags);
+ struct spi_transfer *t;
+ unsigned int i = 0, rxskip = 0;
+
+ /*
+ * prerequisites for ESPI rxskip mode:
+ * - message has two transfers
+ * - first transfer is a write and second is a read
+ *
+ * In addition the current low-level transfer mechanism requires
+ * that the rxskip bytes fit into the TX FIFO. Else the transfer
+ * would hang because after the first FSL_ESPI_FIFO_SIZE bytes
+ * the TX FIFO isn't re-filled.
+ */
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (i == 0) {
+ if (!t->tx_buf || t->rx_buf ||
+ t->len > FSL_ESPI_FIFO_SIZE)
+ return 0;
+ rxskip = t->len;
+ } else if (i == 1) {
+ if (t->tx_buf || !t->rx_buf)
+ return 0;
+ }
+ i++;
+ }
+
+ return i == 2 ? rxskip : 0;
}
-static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi)
+static void fsl_espi_fill_tx_fifo(struct fsl_espi *espi, u32 events)
{
- u32 data;
- u16 data_h;
- u16 data_l;
- const u32 *tx = mpc8xxx_spi->tx;
+ u32 tx_fifo_avail;
+ unsigned int tx_left;
+ const void *tx_buf;
+
+ /* if events is zero transfer has not started and tx fifo is empty */
+ tx_fifo_avail = events ? SPIE_TXCNT(events) : FSL_ESPI_FIFO_SIZE;
+start:
+ tx_left = espi->tx_t->len - espi->tx_pos;
+ tx_buf = espi->tx_t->tx_buf;
+ while (tx_fifo_avail >= min(4U, tx_left) && tx_left) {
+ if (tx_left >= 4) {
+ if (!tx_buf)
+ fsl_espi_write_reg(espi, ESPI_SPITF, 0);
+ else if (espi->swab)
+ fsl_espi_write_reg(espi, ESPI_SPITF,
+ swahb32p(tx_buf + espi->tx_pos));
+ else
+ fsl_espi_write_reg(espi, ESPI_SPITF,
+ *(u32 *)(tx_buf + espi->tx_pos));
+ espi->tx_pos += 4;
+ tx_left -= 4;
+ tx_fifo_avail -= 4;
+ } else if (tx_left >= 2 && tx_buf && espi->swab) {
+ fsl_espi_write_reg16(espi, ESPI_SPITF,
+ swab16p(tx_buf + espi->tx_pos));
+ espi->tx_pos += 2;
+ tx_left -= 2;
+ tx_fifo_avail -= 2;
+ } else {
+ if (!tx_buf)
+ fsl_espi_write_reg8(espi, ESPI_SPITF, 0);
+ else
+ fsl_espi_write_reg8(espi, ESPI_SPITF,
+ *(u8 *)(tx_buf + espi->tx_pos));
+ espi->tx_pos += 1;
+ tx_left -= 1;
+ tx_fifo_avail -= 1;
+ }
+ }
- if (!tx)
- return 0;
+ if (!tx_left) {
+ /* Last transfer finished, in rxskip mode only one is needed */
+ if (list_is_last(&espi->tx_t->transfer_list,
+ espi->m_transfers) || espi->rxskip) {
+ espi->tx_done = true;
+ return;
+ }
+ espi->tx_t = list_next_entry(espi->tx_t, transfer_list);
+ espi->tx_pos = 0;
+ /* continue with next transfer if tx fifo is not full */
+ if (tx_fifo_avail)
+ goto start;
+ }
+}
- data = *tx++ << mpc8xxx_spi->tx_shift;
- data_l = data & 0xffff;
- data_h = (data >> 16) & 0xffff;
- swab16s(&data_l);
- swab16s(&data_h);
- data = data_h | data_l;
+static void fsl_espi_read_rx_fifo(struct fsl_espi *espi, u32 events)
+{
+ u32 rx_fifo_avail = SPIE_RXCNT(events);
+ unsigned int rx_left;
+ void *rx_buf;
+
+start:
+ rx_left = espi->rx_t->len - espi->rx_pos;
+ rx_buf = espi->rx_t->rx_buf;
+ while (rx_fifo_avail >= min(4U, rx_left) && rx_left) {
+ if (rx_left >= 4) {
+ u32 val = fsl_espi_read_reg(espi, ESPI_SPIRF);
+
+ if (rx_buf && espi->swab)
+ *(u32 *)(rx_buf + espi->rx_pos) = swahb32(val);
+ else if (rx_buf)
+ *(u32 *)(rx_buf + espi->rx_pos) = val;
+ espi->rx_pos += 4;
+ rx_left -= 4;
+ rx_fifo_avail -= 4;
+ } else if (rx_left >= 2 && rx_buf && espi->swab) {
+ u16 val = fsl_espi_read_reg16(espi, ESPI_SPIRF);
+
+ *(u16 *)(rx_buf + espi->rx_pos) = swab16(val);
+ espi->rx_pos += 2;
+ rx_left -= 2;
+ rx_fifo_avail -= 2;
+ } else {
+ u8 val = fsl_espi_read_reg8(espi, ESPI_SPIRF);
+
+ if (rx_buf)
+ *(u8 *)(rx_buf + espi->rx_pos) = val;
+ espi->rx_pos += 1;
+ rx_left -= 1;
+ rx_fifo_avail -= 1;
+ }
+ }
- mpc8xxx_spi->tx = tx;
- return data;
+ if (!rx_left) {
+ if (list_is_last(&espi->rx_t->transfer_list,
+ espi->m_transfers)) {
+ espi->rx_done = true;
+ return;
+ }
+ espi->rx_t = list_next_entry(espi->rx_t, transfer_list);
+ espi->rx_pos = 0;
+ /* continue with next transfer if rx fifo is not empty */
+ if (rx_fifo_avail)
+ goto start;
+ }
}
static void fsl_espi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ struct fsl_espi *espi = spi_master_get_devdata(spi->master);
int bits_per_word = t ? t->bits_per_word : spi->bits_per_word;
- u32 hz = t ? t->speed_hz : spi->max_speed_hz;
- u8 pm;
- struct spi_mpc8xxx_cs *cs = spi->controller_state;
-
- cs->rx_shift = 0;
- cs->tx_shift = 0;
- cs->get_rx = mpc8xxx_spi_rx_buf_u32;
- cs->get_tx = mpc8xxx_spi_tx_buf_u32;
- if (bits_per_word <= 8) {
- cs->rx_shift = 8 - bits_per_word;
- } else {
- cs->rx_shift = 16 - bits_per_word;
- if (spi->mode & SPI_LSB_FIRST)
- cs->get_tx = fsl_espi_tx_buf_lsb;
- }
-
- mpc8xxx_spi->rx_shift = cs->rx_shift;
- mpc8xxx_spi->tx_shift = cs->tx_shift;
- mpc8xxx_spi->get_rx = cs->get_rx;
- mpc8xxx_spi->get_tx = cs->get_tx;
+ u32 pm, hz = t ? t->speed_hz : spi->max_speed_hz;
+ struct fsl_espi_cs *cs = spi_get_ctldata(spi);
+ u32 hw_mode_old = cs->hw_mode;
/* mask out bits we are going to set */
cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF));
cs->hw_mode |= CSMODE_LEN(bits_per_word - 1);
- if ((mpc8xxx_spi->spibrg / hz) > 64) {
+ pm = DIV_ROUND_UP(espi->spibrg, hz * 4) - 1;
+
+ if (pm > 15) {
cs->hw_mode |= CSMODE_DIV16;
- pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 16 * 4);
-
- WARN_ONCE(pm > 33, "%s: Requested speed is too low: %d Hz. "
- "Will use %d Hz instead.\n", dev_name(&spi->dev),
- hz, mpc8xxx_spi->spibrg / (4 * 16 * (32 + 1)));
- if (pm > 33)
- pm = 33;
- } else {
- pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 4);
+ pm = DIV_ROUND_UP(espi->spibrg, hz * 16 * 4) - 1;
}
- if (pm)
- pm--;
- if (pm < 2)
- pm = 2;
cs->hw_mode |= CSMODE_PM(pm);
- fsl_espi_change_mode(spi);
+ /* don't write the mode register if the mode doesn't change */
+ if (cs->hw_mode != hw_mode_old)
+ fsl_espi_write_reg(espi, ESPI_SPMODEx(spi->chip_select),
+ cs->hw_mode);
}
static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
{
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
- u32 word;
+ struct fsl_espi *espi = spi_master_get_devdata(spi->master);
+ unsigned int rx_len = t->len;
+ u32 mask, spcom;
int ret;
- mpc8xxx_spi->len = t->len;
- mpc8xxx_spi->count = roundup(t->len, 4) / 4;
-
- mpc8xxx_spi->tx = t->tx_buf;
- mpc8xxx_spi->rx = t->rx_buf;
-
- reinit_completion(&mpc8xxx_spi->done);
+ reinit_completion(&espi->done);
/* Set SPCOM[CS] and SPCOM[TRANLEN] field */
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPCOM,
- (SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1)));
+ spcom = SPCOM_CS(spi->chip_select);
+ spcom |= SPCOM_TRANLEN(t->len - 1);
+
+ /* configure RXSKIP mode */
+ if (espi->rxskip) {
+ spcom |= SPCOM_RXSKIP(espi->rxskip);
+ rx_len = t->len - espi->rxskip;
+ if (t->rx_nbits == SPI_NBITS_DUAL)
+ spcom |= SPCOM_DO;
+ }
+
+ fsl_espi_write_reg(espi, ESPI_SPCOM, spcom);
- /* enable rx ints */
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, SPIM_RNE);
+ /* enable interrupts */
+ mask = SPIM_DON;
+ if (rx_len > FSL_ESPI_FIFO_SIZE)
+ mask |= SPIM_RXT;
+ fsl_espi_write_reg(espi, ESPI_SPIM, mask);
- /* transmit word */
- word = mpc8xxx_spi->get_tx(mpc8xxx_spi);
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPITF, word);
+ /* Prevent filling the fifo from getting interrupted */
+ spin_lock_irq(&espi->lock);
+ fsl_espi_fill_tx_fifo(espi, 0);
+ spin_unlock_irq(&espi->lock);
/* Won't hang up forever, SPI bus sometimes got lost interrupts... */
- ret = wait_for_completion_timeout(&mpc8xxx_spi->done, 2 * HZ);
+ ret = wait_for_completion_timeout(&espi->done, 2 * HZ);
if (ret == 0)
- dev_err(mpc8xxx_spi->dev,
- "Transaction hanging up (left %d bytes)\n",
- mpc8xxx_spi->count);
+ dev_err(espi->dev, "Transfer timed out!\n");
/* disable rx ints */
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, 0);
+ fsl_espi_write_reg(espi, ESPI_SPIM, 0);
- return mpc8xxx_spi->count > 0 ? -EMSGSIZE : 0;
+ return ret == 0 ? -ETIMEDOUT : 0;
}
static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans)
{
- struct mpc8xxx_spi *mspi = spi_master_get_devdata(m->spi->master);
+ struct fsl_espi *espi = spi_master_get_devdata(m->spi->master);
struct spi_device *spi = m->spi;
int ret;
- fsl_espi_copy_to_buf(m, mspi);
+ /* In case of LSB-first and bits_per_word > 8 byte-swap all words */
+ espi->swab = spi->mode & SPI_LSB_FIRST && trans->bits_per_word > 8;
+
+ espi->m_transfers = &m->transfers;
+ espi->tx_t = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
+ espi->tx_pos = 0;
+ espi->tx_done = false;
+ espi->rx_t = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
+ espi->rx_pos = 0;
+ espi->rx_done = false;
+
+ espi->rxskip = fsl_espi_check_rxskip_mode(m);
+ if (trans->rx_nbits == SPI_NBITS_DUAL && !espi->rxskip) {
+ dev_err(espi->dev, "Dual output mode requires RXSKIP mode!\n");
+ return -EINVAL;
+ }
+
+ /* In RXSKIP mode skip first transfer for reads */
+ if (espi->rxskip)
+ espi->rx_t = list_next_entry(espi->rx_t, transfer_list);
+
fsl_espi_setup_transfer(spi, trans);
ret = fsl_espi_bufs(spi, trans);
@@ -310,19 +434,13 @@ static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans)
if (trans->delay_usecs)
udelay(trans->delay_usecs);
- fsl_espi_setup_transfer(spi, NULL);
-
- if (!ret)
- fsl_espi_copy_from_buf(m, mspi);
-
return ret;
}
static int fsl_espi_do_one_msg(struct spi_master *master,
struct spi_message *m)
{
- struct mpc8xxx_spi *mspi = spi_master_get_devdata(m->spi->master);
- unsigned int delay_usecs = 0;
+ unsigned int delay_usecs = 0, rx_nbits = 0;
struct spi_transfer *t, trans = {};
int ret;
@@ -333,6 +451,8 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->delay_usecs > delay_usecs)
delay_usecs = t->delay_usecs;
+ if (t->rx_nbits > rx_nbits)
+ rx_nbits = t->rx_nbits;
}
t = list_first_entry(&m->transfers, struct spi_transfer,
@@ -342,8 +462,7 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
trans.speed_hz = t->speed_hz;
trans.bits_per_word = t->bits_per_word;
trans.delay_usecs = delay_usecs;
- trans.tx_buf = mspi->local_buf;
- trans.rx_buf = mspi->local_buf;
+ trans.rx_nbits = rx_nbits;
if (trans.len)
ret = fsl_espi_trans(m, &trans);
@@ -360,12 +479,9 @@ out:
static int fsl_espi_setup(struct spi_device *spi)
{
- struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_espi *espi;
u32 loop_mode;
- struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
-
- if (!spi->max_speed_hz)
- return -EINVAL;
+ struct fsl_espi_cs *cs = spi_get_ctldata(spi);
if (!cs) {
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
@@ -374,12 +490,11 @@ static int fsl_espi_setup(struct spi_device *spi)
spi_set_ctldata(spi, cs);
}
- mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ espi = spi_master_get_devdata(spi->master);
- pm_runtime_get_sync(mpc8xxx_spi->dev);
+ pm_runtime_get_sync(espi->dev);
- cs->hw_mode = fsl_espi_read_reg(mpc8xxx_spi,
- ESPI_SPMODEx(spi->chip_select));
+ cs->hw_mode = fsl_espi_read_reg(espi, ESPI_SPMODEx(spi->chip_select));
/* mask out bits we are going to set */
cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH
| CSMODE_REV);
@@ -392,115 +507,74 @@ static int fsl_espi_setup(struct spi_device *spi)
cs->hw_mode |= CSMODE_REV;
/* Handle the loop mode */
- loop_mode = fsl_espi_read_reg(mpc8xxx_spi, ESPI_SPMODE);
+ loop_mode = fsl_espi_read_reg(espi, ESPI_SPMODE);
loop_mode &= ~SPMODE_LOOP;
if (spi->mode & SPI_LOOP)
loop_mode |= SPMODE_LOOP;
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, loop_mode);
+ fsl_espi_write_reg(espi, ESPI_SPMODE, loop_mode);
fsl_espi_setup_transfer(spi, NULL);
- pm_runtime_mark_last_busy(mpc8xxx_spi->dev);
- pm_runtime_put_autosuspend(mpc8xxx_spi->dev);
+ pm_runtime_mark_last_busy(espi->dev);
+ pm_runtime_put_autosuspend(espi->dev);
return 0;
}
static void fsl_espi_cleanup(struct spi_device *spi)
{
- struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
+ struct fsl_espi_cs *cs = spi_get_ctldata(spi);
kfree(cs);
spi_set_ctldata(spi, NULL);
}
-static void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
+static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events)
{
- /* We need handle RX first */
- if (events & SPIE_RNE) {
- u32 rx_data, tmp;
- u8 rx_data_8;
- int rx_nr_bytes = 4;
- int ret;
-
- /* Spin until RX is done */
- if (SPIE_RXCNT(events) < min(4, mspi->len)) {
- ret = spin_event_timeout(
- !(SPIE_RXCNT(events =
- fsl_espi_read_reg(mspi, ESPI_SPIE)) <
- min(4, mspi->len)),
- 10000, 0); /* 10 msec */
- if (!ret)
- dev_err(mspi->dev,
- "tired waiting for SPIE_RXCNT\n");
- }
+ if (!espi->rx_done)
+ fsl_espi_read_rx_fifo(espi, events);
- if (mspi->len >= 4) {
- rx_data = fsl_espi_read_reg(mspi, ESPI_SPIRF);
- } else if (mspi->len <= 0) {
- dev_err(mspi->dev,
- "unexpected RX(SPIE_RNE) interrupt occurred,\n"
- "(local rxlen %d bytes, reg rxlen %d bytes)\n",
- min(4, mspi->len), SPIE_RXCNT(events));
- rx_nr_bytes = 0;
- } else {
- rx_nr_bytes = mspi->len;
- tmp = mspi->len;
- rx_data = 0;
- while (tmp--) {
- rx_data_8 = fsl_espi_read_reg8(mspi,
- ESPI_SPIRF);
- rx_data |= (rx_data_8 << (tmp * 8));
- }
-
- rx_data <<= (4 - mspi->len) * 8;
- }
+ if (!espi->tx_done)
+ fsl_espi_fill_tx_fifo(espi, events);
- mspi->len -= rx_nr_bytes;
+ if (!espi->tx_done || !espi->rx_done)
+ return;
- if (rx_nr_bytes && mspi->rx)
- mspi->get_rx(rx_data, mspi);
- }
+ /* we're done, but check for errors before returning */
+ events = fsl_espi_read_reg(espi, ESPI_SPIE);
- if (!(events & SPIE_TNF)) {
- int ret;
-
- /* spin until TX is done */
- ret = spin_event_timeout(((events = fsl_espi_read_reg(
- mspi, ESPI_SPIE)) & SPIE_TNF), 1000, 0);
- if (!ret) {
- dev_err(mspi->dev, "tired waiting for SPIE_TNF\n");
- complete(&mspi->done);
- return;
- }
- }
+ if (!(events & SPIE_DON))
+ dev_err(espi->dev,
+ "Transfer done but SPIE_DON isn't set!\n");
- mspi->count -= 1;
- if (mspi->count) {
- u32 word = mspi->get_tx(mspi);
+ if (SPIE_RXCNT(events) || SPIE_TXCNT(events) != FSL_ESPI_FIFO_SIZE)
+ dev_err(espi->dev, "Transfer done but rx/tx fifo's aren't empty!\n");
- fsl_espi_write_reg(mspi, ESPI_SPITF, word);
- } else {
- complete(&mspi->done);
- }
+ complete(&espi->done);
}
static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
{
- struct mpc8xxx_spi *mspi = context_data;
+ struct fsl_espi *espi = context_data;
u32 events;
+ spin_lock(&espi->lock);
+
/* Get interrupt events(tx/rx) */
- events = fsl_espi_read_reg(mspi, ESPI_SPIE);
- if (!events)
+ events = fsl_espi_read_reg(espi, ESPI_SPIE);
+ if (!events) {
+ spin_unlock(&espi->lock);
return IRQ_NONE;
+ }
- dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events);
+ dev_vdbg(espi->dev, "%s: events %x\n", __func__, events);
- fsl_espi_cpu_irq(mspi, events);
+ fsl_espi_cpu_irq(espi, events);
/* Clear the events */
- fsl_espi_write_reg(mspi, ESPI_SPIE, events);
+ fsl_espi_write_reg(espi, ESPI_SPIE, events);
+
+ spin_unlock(&espi->lock);
return IRQ_HANDLED;
}
@@ -509,12 +583,12 @@ static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
static int fsl_espi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+ struct fsl_espi *espi = spi_master_get_devdata(master);
u32 regval;
- regval = fsl_espi_read_reg(mpc8xxx_spi, ESPI_SPMODE);
+ regval = fsl_espi_read_reg(espi, ESPI_SPMODE);
regval &= ~SPMODE_ENABLE;
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval);
+ fsl_espi_write_reg(espi, ESPI_SPMODE, regval);
return 0;
}
@@ -522,12 +596,12 @@ static int fsl_espi_runtime_suspend(struct device *dev)
static int fsl_espi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+ struct fsl_espi *espi = spi_master_get_devdata(master);
u32 regval;
- regval = fsl_espi_read_reg(mpc8xxx_spi, ESPI_SPMODE);
+ regval = fsl_espi_read_reg(espi, ESPI_SPMODE);
regval |= SPMODE_ENABLE;
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval);
+ fsl_espi_write_reg(espi, ESPI_SPMODE, regval);
return 0;
}
@@ -538,96 +612,105 @@ static size_t fsl_espi_max_message_size(struct spi_device *spi)
return SPCOM_TRANLEN_MAX;
}
+static void fsl_espi_init_regs(struct device *dev, bool initial)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct fsl_espi *espi = spi_master_get_devdata(master);
+ struct device_node *nc;
+ u32 csmode, cs, prop;
+ int ret;
+
+ /* SPI controller initializations */
+ fsl_espi_write_reg(espi, ESPI_SPMODE, 0);
+ fsl_espi_write_reg(espi, ESPI_SPIM, 0);
+ fsl_espi_write_reg(espi, ESPI_SPCOM, 0);
+ fsl_espi_write_reg(espi, ESPI_SPIE, 0xffffffff);
+
+ /* Init eSPI CS mode register */
+ for_each_available_child_of_node(master->dev.of_node, nc) {
+ /* get chip select */
+ ret = of_property_read_u32(nc, "reg", &cs);
+ if (ret || cs >= master->num_chipselect)
+ continue;
+
+ csmode = CSMODE_INIT_VAL;
+
+ /* check if CSBEF is set in device tree */
+ ret = of_property_read_u32(nc, "fsl,csbef", &prop);
+ if (!ret) {
+ csmode &= ~(CSMODE_BEF(0xf));
+ csmode |= CSMODE_BEF(prop);
+ }
+
+ /* check if CSAFT is set in device tree */
+ ret = of_property_read_u32(nc, "fsl,csaft", &prop);
+ if (!ret) {
+ csmode &= ~(CSMODE_AFT(0xf));
+ csmode |= CSMODE_AFT(prop);
+ }
+
+ fsl_espi_write_reg(espi, ESPI_SPMODEx(cs), csmode);
+
+ if (initial)
+ dev_info(dev, "cs=%u, init_csmode=0x%x\n", cs, csmode);
+ }
+
+ /* Enable SPI interface */
+ fsl_espi_write_reg(espi, ESPI_SPMODE, SPMODE_INIT_VAL | SPMODE_ENABLE);
+}
+
static int fsl_espi_probe(struct device *dev, struct resource *mem,
- unsigned int irq)
+ unsigned int irq, unsigned int num_cs)
{
- struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master;
- struct mpc8xxx_spi *mpc8xxx_spi;
- struct device_node *nc;
- const __be32 *prop;
- u32 regval, csmode;
- int i, len, ret;
+ struct fsl_espi *espi;
+ int ret;
- master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
+ master = spi_alloc_master(dev, sizeof(struct fsl_espi));
if (!master)
return -ENOMEM;
dev_set_drvdata(dev, master);
- mpc8xxx_spi_probe(dev, mem, irq);
-
+ master->mode_bits = SPI_RX_DUAL | SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
+ SPI_LSB_FIRST | SPI_LOOP;
+ master->dev.of_node = dev->of_node;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
master->setup = fsl_espi_setup;
master->cleanup = fsl_espi_cleanup;
master->transfer_one_message = fsl_espi_do_one_msg;
master->auto_runtime_pm = true;
master->max_message_size = fsl_espi_max_message_size;
+ master->num_chipselect = num_cs;
- mpc8xxx_spi = spi_master_get_devdata(master);
+ espi = spi_master_get_devdata(master);
+ spin_lock_init(&espi->lock);
- mpc8xxx_spi->local_buf =
- devm_kmalloc(dev, SPCOM_TRANLEN_MAX, GFP_KERNEL);
- if (!mpc8xxx_spi->local_buf) {
- ret = -ENOMEM;
+ espi->dev = dev;
+ espi->spibrg = fsl_get_sys_freq();
+ if (espi->spibrg == -1) {
+ dev_err(dev, "Can't get sys frequency!\n");
+ ret = -EINVAL;
goto err_probe;
}
+ /* determined by clock divider fields DIV16/PM in register SPMODEx */
+ master->min_speed_hz = DIV_ROUND_UP(espi->spibrg, 4 * 16 * 16);
+ master->max_speed_hz = DIV_ROUND_UP(espi->spibrg, 4);
- mpc8xxx_spi->reg_base = devm_ioremap_resource(dev, mem);
- if (IS_ERR(mpc8xxx_spi->reg_base)) {
- ret = PTR_ERR(mpc8xxx_spi->reg_base);
+ init_completion(&espi->done);
+
+ espi->reg_base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(espi->reg_base)) {
+ ret = PTR_ERR(espi->reg_base);
goto err_probe;
}
/* Register for SPI Interrupt */
- ret = devm_request_irq(dev, mpc8xxx_spi->irq, fsl_espi_irq,
- 0, "fsl_espi", mpc8xxx_spi);
+ ret = devm_request_irq(dev, irq, fsl_espi_irq, 0, "fsl_espi", espi);
if (ret)
goto err_probe;
- if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
- mpc8xxx_spi->rx_shift = 16;
- mpc8xxx_spi->tx_shift = 24;
- }
-
- /* SPI controller initializations */
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, 0);
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, 0);
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPCOM, 0);
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIE, 0xffffffff);
-
- /* Init eSPI CS mode register */
- for_each_available_child_of_node(master->dev.of_node, nc) {
- /* get chip select */
- prop = of_get_property(nc, "reg", &len);
- if (!prop || len < sizeof(*prop))
- continue;
- i = be32_to_cpup(prop);
- if (i < 0 || i >= pdata->max_chipselect)
- continue;
-
- csmode = CSMODE_INIT_VAL;
- /* check if CSBEF is set in device tree */
- prop = of_get_property(nc, "fsl,csbef", &len);
- if (prop && len >= sizeof(*prop)) {
- csmode &= ~(CSMODE_BEF(0xf));
- csmode |= CSMODE_BEF(be32_to_cpup(prop));
- }
- /* check if CSAFT is set in device tree */
- prop = of_get_property(nc, "fsl,csaft", &len);
- if (prop && len >= sizeof(*prop)) {
- csmode &= ~(CSMODE_AFT(0xf));
- csmode |= CSMODE_AFT(be32_to_cpup(prop));
- }
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODEx(i), csmode);
-
- dev_info(dev, "cs=%d, init_csmode=0x%x\n", i, csmode);
- }
-
- /* Enable SPI interface */
- regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
-
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval);
+ fsl_espi_init_regs(dev, true);
pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(dev);
@@ -639,8 +722,7 @@ static int fsl_espi_probe(struct device *dev, struct resource *mem,
if (ret < 0)
goto err_pm;
- dev_info(dev, "at 0x%p (irq = %d)\n", mpc8xxx_spi->reg_base,
- mpc8xxx_spi->irq);
+ dev_info(dev, "at 0x%p (irq = %u)\n", espi->reg_base, irq);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -659,20 +741,16 @@ err_probe:
static int of_fsl_espi_get_chipselects(struct device *dev)
{
struct device_node *np = dev->of_node;
- struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
- const u32 *prop;
- int len;
+ u32 num_cs;
+ int ret;
- prop = of_get_property(np, "fsl,espi-num-chipselects", &len);
- if (!prop || len < sizeof(*prop)) {
+ ret = of_property_read_u32(np, "fsl,espi-num-chipselects", &num_cs);
+ if (ret) {
dev_err(dev, "No 'fsl,espi-num-chipselects' property\n");
- return -EINVAL;
+ return 0;
}
- pdata->max_chipselect = *prop;
- pdata->cs_control = NULL;
-
- return 0;
+ return num_cs;
}
static int of_fsl_espi_probe(struct platform_device *ofdev)
@@ -680,16 +758,17 @@ static int of_fsl_espi_probe(struct platform_device *ofdev)
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct resource mem;
- unsigned int irq;
+ unsigned int irq, num_cs;
int ret;
- ret = of_mpc8xxx_spi_probe(ofdev);
- if (ret)
- return ret;
+ if (of_property_read_bool(np, "mode")) {
+ dev_err(dev, "mode property is not supported on ESPI!\n");
+ return -EINVAL;
+ }
- ret = of_fsl_espi_get_chipselects(dev);
- if (ret)
- return ret;
+ num_cs = of_fsl_espi_get_chipselects(dev);
+ if (!num_cs)
+ return -EINVAL;
ret = of_address_to_resource(np, 0, &mem);
if (ret)
@@ -699,7 +778,7 @@ static int of_fsl_espi_probe(struct platform_device *ofdev)
if (!irq)
return -EINVAL;
- return fsl_espi_probe(dev, &mem, irq);
+ return fsl_espi_probe(dev, &mem, irq, num_cs);
}
static int of_fsl_espi_remove(struct platform_device *dev)
@@ -721,38 +800,15 @@ static int of_fsl_espi_suspend(struct device *dev)
return ret;
}
- ret = pm_runtime_force_suspend(dev);
- if (ret < 0)
- return ret;
-
- return 0;
+ return pm_runtime_force_suspend(dev);
}
static int of_fsl_espi_resume(struct device *dev)
{
- struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master = dev_get_drvdata(dev);
- struct mpc8xxx_spi *mpc8xxx_spi;
- u32 regval;
- int i, ret;
-
- mpc8xxx_spi = spi_master_get_devdata(master);
-
- /* SPI controller initializations */
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, 0);
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, 0);
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPCOM, 0);
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIE, 0xffffffff);
-
- /* Init eSPI CS mode register */
- for (i = 0; i < pdata->max_chipselect; i++)
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODEx(i),
- CSMODE_INIT_VAL);
-
- /* Enable SPI interface */
- regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
+ int ret;
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval);
+ fsl_espi_init_regs(dev, false);
ret = pm_runtime_force_resume(dev);
if (ret < 0)
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index 2925c8089fd9..f303f306b38e 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -28,10 +28,6 @@ struct mpc8xxx_spi {
/* rx & tx bufs from the spi_transfer */
const void *tx;
void *rx;
-#if IS_ENABLED(CONFIG_SPI_FSL_ESPI)
- int len;
- u8 *local_buf;
-#endif
int subblock;
struct spi_pram __iomem *pram;
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
new file mode 100644
index 000000000000..52551f6d0c7d
--- /dev/null
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -0,0 +1,525 @@
+/*
+ * Freescale i.MX7ULP LPSPI driver
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/types.h>
+
+#define DRIVER_NAME "fsl_lpspi"
+
+/* i.MX7ULP LPSPI registers */
+#define IMX7ULP_VERID 0x0
+#define IMX7ULP_PARAM 0x4
+#define IMX7ULP_CR 0x10
+#define IMX7ULP_SR 0x14
+#define IMX7ULP_IER 0x18
+#define IMX7ULP_DER 0x1c
+#define IMX7ULP_CFGR0 0x20
+#define IMX7ULP_CFGR1 0x24
+#define IMX7ULP_DMR0 0x30
+#define IMX7ULP_DMR1 0x34
+#define IMX7ULP_CCR 0x40
+#define IMX7ULP_FCR 0x58
+#define IMX7ULP_FSR 0x5c
+#define IMX7ULP_TCR 0x60
+#define IMX7ULP_TDR 0x64
+#define IMX7ULP_RSR 0x70
+#define IMX7ULP_RDR 0x74
+
+/* General control register field define */
+#define CR_RRF BIT(9)
+#define CR_RTF BIT(8)
+#define CR_RST BIT(1)
+#define CR_MEN BIT(0)
+#define SR_TCF BIT(10)
+#define SR_RDF BIT(1)
+#define SR_TDF BIT(0)
+#define IER_TCIE BIT(10)
+#define IER_RDIE BIT(1)
+#define IER_TDIE BIT(0)
+#define CFGR1_PCSCFG BIT(27)
+#define CFGR1_PCSPOL BIT(8)
+#define CFGR1_NOSTALL BIT(3)
+#define CFGR1_MASTER BIT(0)
+#define RSR_RXEMPTY BIT(1)
+#define TCR_CPOL BIT(31)
+#define TCR_CPHA BIT(30)
+#define TCR_CONT BIT(21)
+#define TCR_CONTC BIT(20)
+#define TCR_RXMSK BIT(19)
+#define TCR_TXMSK BIT(18)
+
+static int clkdivs[] = {1, 2, 4, 8, 16, 32, 64, 128};
+
+struct lpspi_config {
+ u8 bpw;
+ u8 chip_select;
+ u8 prescale;
+ u16 mode;
+ u32 speed_hz;
+};
+
+struct fsl_lpspi_data {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+
+ void *rx_buf;
+ const void *tx_buf;
+ void (*tx)(struct fsl_lpspi_data *);
+ void (*rx)(struct fsl_lpspi_data *);
+
+ u32 remain;
+ u8 txfifosize;
+ u8 rxfifosize;
+
+ struct lpspi_config config;
+ struct completion xfer_done;
+};
+
+static const struct of_device_id fsl_lpspi_dt_ids[] = {
+ { .compatible = "fsl,imx7ulp-spi", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids);
+
+#define LPSPI_BUF_RX(type) \
+static void fsl_lpspi_buf_rx_##type(struct fsl_lpspi_data *fsl_lpspi) \
+{ \
+ unsigned int val = readl(fsl_lpspi->base + IMX7ULP_RDR); \
+ \
+ if (fsl_lpspi->rx_buf) { \
+ *(type *)fsl_lpspi->rx_buf = val; \
+ fsl_lpspi->rx_buf += sizeof(type); \
+ } \
+}
+
+#define LPSPI_BUF_TX(type) \
+static void fsl_lpspi_buf_tx_##type(struct fsl_lpspi_data *fsl_lpspi) \
+{ \
+ type val = 0; \
+ \
+ if (fsl_lpspi->tx_buf) { \
+ val = *(type *)fsl_lpspi->tx_buf; \
+ fsl_lpspi->tx_buf += sizeof(type); \
+ } \
+ \
+ fsl_lpspi->remain -= sizeof(type); \
+ writel(val, fsl_lpspi->base + IMX7ULP_TDR); \
+}
+
+LPSPI_BUF_RX(u8)
+LPSPI_BUF_TX(u8)
+LPSPI_BUF_RX(u16)
+LPSPI_BUF_TX(u16)
+LPSPI_BUF_RX(u32)
+LPSPI_BUF_TX(u32)
+
+static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi,
+ unsigned int enable)
+{
+ writel(enable, fsl_lpspi->base + IMX7ULP_IER);
+}
+
+static int lpspi_prepare_xfer_hardware(struct spi_master *master)
+{
+ struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+
+ return clk_prepare_enable(fsl_lpspi->clk);
+}
+
+static int lpspi_unprepare_xfer_hardware(struct spi_master *master)
+{
+ struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(fsl_lpspi->clk);
+
+ return 0;
+}
+
+static int fsl_lpspi_txfifo_empty(struct fsl_lpspi_data *fsl_lpspi)
+{
+ u32 txcnt;
+ unsigned long orig_jiffies = jiffies;
+
+ do {
+ txcnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
+
+ if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
+ dev_dbg(fsl_lpspi->dev, "txfifo empty timeout\n");
+ return -ETIMEDOUT;
+ }
+ cond_resched();
+
+ } while (txcnt);
+
+ return 0;
+}
+
+static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
+{
+ u8 txfifo_cnt;
+
+ txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
+
+ while (txfifo_cnt < fsl_lpspi->txfifosize) {
+ if (!fsl_lpspi->remain)
+ break;
+ fsl_lpspi->tx(fsl_lpspi);
+ txfifo_cnt++;
+ }
+
+ if (!fsl_lpspi->remain && (txfifo_cnt < fsl_lpspi->txfifosize))
+ writel(0, fsl_lpspi->base + IMX7ULP_TDR);
+ else
+ fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE);
+}
+
+static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi)
+{
+ while (!(readl(fsl_lpspi->base + IMX7ULP_RSR) & RSR_RXEMPTY))
+ fsl_lpspi->rx(fsl_lpspi);
+}
+
+static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi,
+ bool is_first_xfer)
+{
+ u32 temp = 0;
+
+ temp |= fsl_lpspi->config.bpw - 1;
+ temp |= fsl_lpspi->config.prescale << 27;
+ temp |= (fsl_lpspi->config.mode & 0x3) << 30;
+ temp |= (fsl_lpspi->config.chip_select & 0x3) << 24;
+
+ /*
+ * Set TCR_CONT will keep SS asserted after current transfer.
+ * For the first transfer, clear TCR_CONTC to assert SS.
+ * For subsequent transfer, set TCR_CONTC to keep SS asserted.
+ */
+ temp |= TCR_CONT;
+ if (is_first_xfer)
+ temp &= ~TCR_CONTC;
+ else
+ temp |= TCR_CONTC;
+
+ writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
+
+ dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp);
+}
+
+static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
+{
+ u32 temp;
+
+ temp = fsl_lpspi->txfifosize >> 1 | (fsl_lpspi->rxfifosize >> 1) << 16;
+
+ writel(temp, fsl_lpspi->base + IMX7ULP_FCR);
+
+ dev_dbg(fsl_lpspi->dev, "FCR=0x%x\n", temp);
+}
+
+static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
+{
+ struct lpspi_config config = fsl_lpspi->config;
+ unsigned int perclk_rate, scldiv;
+ u8 prescale;
+
+ perclk_rate = clk_get_rate(fsl_lpspi->clk);
+ for (prescale = 0; prescale < 8; prescale++) {
+ scldiv = perclk_rate /
+ (clkdivs[prescale] * config.speed_hz) - 2;
+ if (scldiv < 256) {
+ fsl_lpspi->config.prescale = prescale;
+ break;
+ }
+ }
+
+ if (prescale == 8 && scldiv >= 256)
+ return -EINVAL;
+
+ writel(scldiv, fsl_lpspi->base + IMX7ULP_CCR);
+
+ dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale =%d, scldiv=%d\n",
+ perclk_rate, config.speed_hz, prescale, scldiv);
+
+ return 0;
+}
+
+static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
+{
+ u32 temp;
+ int ret;
+
+ temp = CR_RST;
+ writel(temp, fsl_lpspi->base + IMX7ULP_CR);
+ writel(0, fsl_lpspi->base + IMX7ULP_CR);
+
+ ret = fsl_lpspi_set_bitrate(fsl_lpspi);
+ if (ret)
+ return ret;
+
+ fsl_lpspi_set_watermark(fsl_lpspi);
+
+ temp = CFGR1_PCSCFG | CFGR1_MASTER | CFGR1_NOSTALL;
+ if (fsl_lpspi->config.mode & SPI_CS_HIGH)
+ temp |= CFGR1_PCSPOL;
+ writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);
+
+ temp = readl(fsl_lpspi->base + IMX7ULP_CR);
+ temp |= CR_RRF | CR_RTF | CR_MEN;
+ writel(temp, fsl_lpspi->base + IMX7ULP_CR);
+
+ return 0;
+}
+
+static void fsl_lpspi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(spi->master);
+
+ fsl_lpspi->config.mode = spi->mode;
+ fsl_lpspi->config.bpw = t ? t->bits_per_word : spi->bits_per_word;
+ fsl_lpspi->config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
+ fsl_lpspi->config.chip_select = spi->chip_select;
+
+ if (!fsl_lpspi->config.speed_hz)
+ fsl_lpspi->config.speed_hz = spi->max_speed_hz;
+ if (!fsl_lpspi->config.bpw)
+ fsl_lpspi->config.bpw = spi->bits_per_word;
+
+ /* Initialize the functions for transfer */
+ if (fsl_lpspi->config.bpw <= 8) {
+ fsl_lpspi->rx = fsl_lpspi_buf_rx_u8;
+ fsl_lpspi->tx = fsl_lpspi_buf_tx_u8;
+ } else if (fsl_lpspi->config.bpw <= 16) {
+ fsl_lpspi->rx = fsl_lpspi_buf_rx_u16;
+ fsl_lpspi->tx = fsl_lpspi_buf_tx_u16;
+ } else {
+ fsl_lpspi->rx = fsl_lpspi_buf_rx_u32;
+ fsl_lpspi->tx = fsl_lpspi_buf_tx_u32;
+ }
+
+ fsl_lpspi_config(fsl_lpspi);
+}
+
+static int fsl_lpspi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+ int ret;
+
+ fsl_lpspi->tx_buf = t->tx_buf;
+ fsl_lpspi->rx_buf = t->rx_buf;
+ fsl_lpspi->remain = t->len;
+
+ reinit_completion(&fsl_lpspi->xfer_done);
+ fsl_lpspi_write_tx_fifo(fsl_lpspi);
+
+ ret = wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ);
+ if (!ret) {
+ dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = fsl_lpspi_txfifo_empty(fsl_lpspi);
+ if (ret)
+ return ret;
+
+ fsl_lpspi_read_rx_fifo(fsl_lpspi);
+
+ return 0;
+}
+
+static int fsl_lpspi_transfer_one_msg(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+ struct spi_transfer *xfer;
+ bool is_first_xfer = true;
+ u32 temp;
+ int ret;
+
+ msg->status = 0;
+ msg->actual_length = 0;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ fsl_lpspi_setup_transfer(spi, xfer);
+ fsl_lpspi_set_cmd(fsl_lpspi, is_first_xfer);
+
+ is_first_xfer = false;
+
+ ret = fsl_lpspi_transfer_one(master, spi, xfer);
+ if (ret < 0)
+ goto complete;
+
+ msg->actual_length += xfer->len;
+ }
+
+complete:
+ /* de-assert SS, then finalize current message */
+ temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
+ temp &= ~TCR_CONTC;
+ writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
+
+ msg->status = ret;
+ spi_finalize_current_message(master);
+
+ return ret;
+}
+
+static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
+{
+ struct fsl_lpspi_data *fsl_lpspi = dev_id;
+ u32 temp;
+
+ fsl_lpspi_intctrl(fsl_lpspi, 0);
+ temp = readl(fsl_lpspi->base + IMX7ULP_SR);
+
+ fsl_lpspi_read_rx_fifo(fsl_lpspi);
+
+ if (temp & SR_TDF) {
+ fsl_lpspi_write_tx_fifo(fsl_lpspi);
+
+ if (!fsl_lpspi->remain)
+ complete(&fsl_lpspi->xfer_done);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int fsl_lpspi_probe(struct platform_device *pdev)
+{
+ struct fsl_lpspi_data *fsl_lpspi;
+ struct spi_master *master;
+ struct resource *res;
+ int ret, irq;
+ u32 temp;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_lpspi_data));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ master->bus_num = pdev->id;
+
+ fsl_lpspi = spi_master_get_devdata(master);
+ fsl_lpspi->dev = &pdev->dev;
+
+ master->transfer_one_message = fsl_lpspi_transfer_one_msg;
+ master->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
+ master->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = pdev->id;
+
+ init_completion(&fsl_lpspi->xfer_done);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fsl_lpspi->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fsl_lpspi->base)) {
+ ret = PTR_ERR(fsl_lpspi->base);
+ goto out_master_put;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto out_master_put;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, 0,
+ dev_name(&pdev->dev), fsl_lpspi);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
+ goto out_master_put;
+ }
+
+ fsl_lpspi->clk = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(fsl_lpspi->clk)) {
+ ret = PTR_ERR(fsl_lpspi->clk);
+ goto out_master_put;
+ }
+
+ ret = clk_prepare_enable(fsl_lpspi->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "can't enable lpspi clock, ret=%d\n", ret);
+ goto out_master_put;
+ }
+
+ temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
+ fsl_lpspi->txfifosize = 1 << (temp & 0x0f);
+ fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f);
+
+ clk_disable_unprepare(fsl_lpspi->clk);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_master error.\n");
+ goto out_master_put;
+ }
+
+ return 0;
+
+out_master_put:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int fsl_lpspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(fsl_lpspi->clk);
+
+ return 0;
+}
+
+static struct platform_driver fsl_lpspi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = fsl_lpspi_dt_ids,
+ },
+ .probe = fsl_lpspi_probe,
+ .remove = fsl_lpspi_remove,
+};
+module_platform_driver(fsl_lpspi_driver);
+
+MODULE_DESCRIPTION("LPSPI Master Controller driver");
+MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index deb782f6556c..32ced64a5bb9 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -173,15 +173,16 @@ static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
/* MX21, MX27 */
static unsigned int spi_imx_clkdiv_1(unsigned int fin,
- unsigned int fspi, unsigned int max)
+ unsigned int fspi, unsigned int max, unsigned int *fres)
{
int i;
for (i = 2; i < max; i++)
if (fspi * mxc_clkdivs[i] >= fin)
- return i;
+ break;
- return max;
+ *fres = fin / mxc_clkdivs[i];
+ return i;
}
/* MX1, MX31, MX35, MX51 CSPI */
@@ -442,6 +443,7 @@ static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
#define MX31_CSPICTRL_ENABLE (1 << 0)
#define MX31_CSPICTRL_MASTER (1 << 1)
#define MX31_CSPICTRL_XCH (1 << 2)
+#define MX31_CSPICTRL_SMC (1 << 3)
#define MX31_CSPICTRL_POL (1 << 4)
#define MX31_CSPICTRL_PHA (1 << 5)
#define MX31_CSPICTRL_SSCTL (1 << 6)
@@ -452,6 +454,10 @@ static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
#define MX35_CSPICTRL_CS_SHIFT 12
#define MX31_CSPICTRL_DR_SHIFT 16
+#define MX31_CSPI_DMAREG 0x10
+#define MX31_DMAREG_RH_DEN (1<<4)
+#define MX31_DMAREG_TH_DEN (1<<1)
+
#define MX31_CSPISTATUS 0x14
#define MX31_STATUS_RR (1 << 3)
@@ -511,6 +517,9 @@ static int mx31_config(struct spi_device *spi, struct spi_imx_config *config)
(is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
MX31_CSPICTRL_CS_SHIFT);
+ if (spi_imx->usedma)
+ reg |= MX31_CSPICTRL_SMC;
+
writel(reg, spi_imx->base + MXC_CSPICTRL);
reg = readl(spi_imx->base + MX31_CSPI_TESTREG);
@@ -520,6 +529,13 @@ static int mx31_config(struct spi_device *spi, struct spi_imx_config *config)
reg &= ~MX31_TEST_LBC;
writel(reg, spi_imx->base + MX31_CSPI_TESTREG);
+ if (spi_imx->usedma) {
+ /* configure DMA requests when RXFIFO is half full and
+ when TXFIFO is half empty */
+ writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN,
+ spi_imx->base + MX31_CSPI_DMAREG);
+ }
+
return 0;
}
@@ -574,9 +590,12 @@ static int mx21_config(struct spi_device *spi, struct spi_imx_config *config)
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
+ unsigned int clk;
+
+ reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max, &clk)
+ << MX21_CSPICTRL_DR_SHIFT;
+ spi_imx->spi_bus_clk = clk;
- reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) <<
- MX21_CSPICTRL_DR_SHIFT;
reg |= config->bpw - 1;
if (spi->mode & SPI_CPHA)
@@ -1244,10 +1263,10 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
/*
- * Only validated on i.mx6 now, can remove the constrain if validated on
- * other chips.
+ * Only validated on i.mx35 and i.mx6 now, can remove the constraint
+ * if validated on other chips.
*/
- if (is_imx51_ecspi(spi_imx)) {
+ if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx)) {
ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master);
if (ret == -EPROBE_DEFER)
goto out_clk_put;
diff --git a/drivers/spi/spi-jcore.c b/drivers/spi/spi-jcore.c
index f8117b80fa22..cebfea5faa4b 100644
--- a/drivers/spi/spi-jcore.c
+++ b/drivers/spi/spi-jcore.c
@@ -214,6 +214,7 @@ static const struct of_device_id jcore_spi_of_match[] = {
{ .compatible = "jcore,spi2" },
{},
};
+MODULE_DEVICE_TABLE(of, jcore_spi_of_match);
static struct platform_driver jcore_spi_driver = {
.probe = jcore_spi_probe,
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index d5157b2222ce..79800e991ccd 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1386,20 +1386,13 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
regs_offset = pdata->regs_offset;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- status = -ENODEV;
- goto free_master;
- }
-
- r->start += regs_offset;
- r->end += regs_offset;
- mcspi->phys = r->start;
-
mcspi->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(mcspi->base)) {
status = PTR_ERR(mcspi->base);
goto free_master;
}
+ mcspi->phys = r->start + regs_offset;
+ mcspi->base += regs_offset;
mcspi->dev = &pdev->dev;
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index ded37025b445..6b001c4a5640 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -138,37 +138,62 @@ static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
tclk_hz = clk_get_rate(orion_spi->clk);
if (devdata->typ == ARMADA_SPI) {
- unsigned int clk, spr, sppr, sppr2, err;
- unsigned int best_spr, best_sppr, best_err;
-
- best_err = speed;
- best_spr = 0;
- best_sppr = 0;
-
- /* Iterate over the valid range looking for best fit */
- for (sppr = 0; sppr < 8; sppr++) {
- sppr2 = 0x1 << sppr;
-
- spr = tclk_hz / sppr2;
- spr = DIV_ROUND_UP(spr, speed);
- if ((spr == 0) || (spr > 15))
- continue;
-
- clk = tclk_hz / (spr * sppr2);
- err = speed - clk;
-
- if (err < best_err) {
- best_spr = spr;
- best_sppr = sppr;
- best_err = err;
- }
- }
+ /*
+ * Given the core_clk (tclk_hz) and the target rate (speed) we
+ * determine the best values for SPR (in [0 .. 15]) and SPPR (in
+ * [0..7]) such that
+ *
+ * core_clk / (SPR * 2 ** SPPR)
+ *
+ * is as big as possible but not bigger than speed.
+ */
- if ((best_sppr == 0) && (best_spr == 0))
- return -EINVAL;
+ /* best integer divider: */
+ unsigned divider = DIV_ROUND_UP(tclk_hz, speed);
+ unsigned spr, sppr;
+
+ if (divider < 16) {
+ /* This is the easy case, divider is less than 16 */
+ spr = divider;
+ sppr = 0;
+
+ } else {
+ unsigned two_pow_sppr;
+ /*
+ * Find the highest bit set in divider. This and the
+ * three next bits define SPR (apart from rounding).
+ * SPPR is then the number of zero bits that must be
+ * appended:
+ */
+ sppr = fls(divider) - 4;
+
+ /*
+ * As SPR only has 4 bits, we have to round divider up
+ * to the next multiple of 2 ** sppr.
+ */
+ two_pow_sppr = 1 << sppr;
+ divider = (divider + two_pow_sppr - 1) & -two_pow_sppr;
+
+ /*
+ * recalculate sppr as rounding up divider might have
+ * increased it enough to change the position of the
+ * highest set bit. In this case the bit that now
+ * doesn't make it into SPR is 0, so there is no need to
+ * round again.
+ */
+ sppr = fls(divider) - 4;
+ spr = divider >> sppr;
+
+ /*
+ * Now do range checking. SPR is constructed to have a
+ * width of 4 bits, so this is fine for sure. So we
+ * still need to check for sppr to fit into 3 bits:
+ */
+ if (sppr > 7)
+ return -EINVAL;
+ }
- prescale = ((best_sppr & 0x6) << 5) |
- ((best_sppr & 0x1) << 4) | best_spr;
+ prescale = ((sppr & 0x6) << 5) | ((sppr & 0x1) << 4) | spr;
} else {
/*
* the supported rates are: 4,6,8...30
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index ce31b8199bb3..2823a00a9405 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -109,7 +109,6 @@ static inline void pxa2xx_spi_write(const struct driver_data *drv_data,
#define DONE_STATE ((void *)2)
#define ERROR_STATE ((void *)-1)
-#define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT)
#define DMA_ALIGNMENT 8
static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index a816f07e168e..9daf50031737 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -413,7 +413,7 @@ static unsigned int qspi_set_send_trigger(struct rspi_data *rspi,
return n;
}
-static void qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
+static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
{
unsigned int n;
@@ -428,6 +428,7 @@ static void qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
qspi_update(rspi, SPBFCR_RXTRG_MASK,
SPBFCR_RXTRG_1B, QSPI_SPBFCR);
}
+ return n;
}
#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
@@ -785,6 +786,9 @@ static int qspi_transfer_out_in(struct rspi_data *rspi,
static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
{
+ const u8 *tx = xfer->tx_buf;
+ unsigned int n = xfer->len;
+ unsigned int i, len;
int ret;
if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
@@ -793,9 +797,23 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
return ret;
}
- ret = rspi_pio_transfer(rspi, xfer->tx_buf, NULL, xfer->len);
- if (ret < 0)
- return ret;
+ while (n > 0) {
+ len = qspi_set_send_trigger(rspi, n);
+ if (len == QSPI_BUFFER_SIZE) {
+ ret = rspi_wait_for_tx_empty(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->master->dev, "transmit timeout\n");
+ return ret;
+ }
+ for (i = 0; i < len; i++)
+ rspi_write_data(rspi, *tx++);
+ } else {
+ ret = rspi_pio_transfer(rspi, tx, NULL, n);
+ if (ret < 0)
+ return ret;
+ }
+ n -= len;
+ }
/* Wait for the last transmission */
rspi_wait_for_tx_empty(rspi);
@@ -805,13 +823,37 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
{
+ u8 *rx = xfer->rx_buf;
+ unsigned int n = xfer->len;
+ unsigned int i, len;
+ int ret;
+
if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
if (ret != -EAGAIN)
return ret;
}
- return rspi_pio_transfer(rspi, NULL, xfer->rx_buf, xfer->len);
+ while (n > 0) {
+ len = qspi_set_receive_trigger(rspi, n);
+ if (len == QSPI_BUFFER_SIZE) {
+ ret = rspi_wait_for_rx_full(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->master->dev, "receive timeout\n");
+ return ret;
+ }
+ for (i = 0; i < len; i++)
+ *rx++ = rspi_read_data(rspi);
+ } else {
+ ret = rspi_pio_transfer(rspi, NULL, rx, n);
+ if (ret < 0)
+ return ret;
+ *rx++ = ret;
+ }
+ n -= len;
+ }
+
+ return 0;
}
static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 1de3a772eb7d..0012ad02e569 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -980,6 +980,7 @@ static const struct of_device_id sh_msiof_match[] = {
{ .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data },
+ { .compatible = "renesas,msiof-r8a7796", .data = &r8a779x_data },
{},
};
MODULE_DEVICE_TABLE(of, sh_msiof_match);
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index 4969dc10684a..c5cd635c28f3 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -46,6 +46,8 @@
#define SUN4I_CTL_TP BIT(18)
#define SUN4I_INT_CTL_REG 0x0c
+#define SUN4I_INT_CTL_RF_F34 BIT(4)
+#define SUN4I_INT_CTL_TF_E34 BIT(12)
#define SUN4I_INT_CTL_TC BIT(16)
#define SUN4I_INT_STA_REG 0x10
@@ -61,11 +63,14 @@
#define SUN4I_CLK_CTL_CDR1(div) (((div) & SUN4I_CLK_CTL_CDR1_MASK) << 8)
#define SUN4I_CLK_CTL_DRS BIT(12)
+#define SUN4I_MAX_XFER_SIZE 0xffffff
+
#define SUN4I_BURST_CNT_REG 0x20
-#define SUN4I_BURST_CNT(cnt) ((cnt) & 0xffffff)
+#define SUN4I_BURST_CNT(cnt) ((cnt) & SUN4I_MAX_XFER_SIZE)
#define SUN4I_XMIT_CNT_REG 0x24
-#define SUN4I_XMIT_CNT(cnt) ((cnt) & 0xffffff)
+#define SUN4I_XMIT_CNT(cnt) ((cnt) & SUN4I_MAX_XFER_SIZE)
+
#define SUN4I_FIFO_STA_REG 0x28
#define SUN4I_FIFO_STA_RF_CNT_MASK 0x7f
@@ -96,6 +101,31 @@ static inline void sun4i_spi_write(struct sun4i_spi *sspi, u32 reg, u32 value)
writel(value, sspi->base_addr + reg);
}
+static inline u32 sun4i_spi_get_tx_fifo_count(struct sun4i_spi *sspi)
+{
+ u32 reg = sun4i_spi_read(sspi, SUN4I_FIFO_STA_REG);
+
+ reg >>= SUN4I_FIFO_STA_TF_CNT_BITS;
+
+ return reg & SUN4I_FIFO_STA_TF_CNT_MASK;
+}
+
+static inline void sun4i_spi_enable_interrupt(struct sun4i_spi *sspi, u32 mask)
+{
+ u32 reg = sun4i_spi_read(sspi, SUN4I_INT_CTL_REG);
+
+ reg |= mask;
+ sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, reg);
+}
+
+static inline void sun4i_spi_disable_interrupt(struct sun4i_spi *sspi, u32 mask)
+{
+ u32 reg = sun4i_spi_read(sspi, SUN4I_INT_CTL_REG);
+
+ reg &= ~mask;
+ sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, reg);
+}
+
static inline void sun4i_spi_drain_fifo(struct sun4i_spi *sspi, int len)
{
u32 reg, cnt;
@@ -118,10 +148,13 @@ static inline void sun4i_spi_drain_fifo(struct sun4i_spi *sspi, int len)
static inline void sun4i_spi_fill_fifo(struct sun4i_spi *sspi, int len)
{
+ u32 cnt;
u8 byte;
- if (len > sspi->len)
- len = sspi->len;
+ /* See how much data we can fit */
+ cnt = SUN4I_FIFO_DEPTH - sun4i_spi_get_tx_fifo_count(sspi);
+
+ len = min3(len, (int)cnt, sspi->len);
while (len--) {
byte = sspi->tx_buf ? *sspi->tx_buf++ : 0;
@@ -184,10 +217,10 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
u32 reg;
/* We don't support transfer larger than the FIFO */
- if (tfr->len > SUN4I_FIFO_DEPTH)
+ if (tfr->len > SUN4I_MAX_XFER_SIZE)
return -EMSGSIZE;
- if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH)
+ if (tfr->tx_buf && tfr->len >= SUN4I_MAX_XFER_SIZE)
return -EMSGSIZE;
reinit_completion(&sspi->done);
@@ -286,7 +319,11 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
/* Enable the interrupts */
- sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
+ sun4i_spi_enable_interrupt(sspi, SUN4I_INT_CTL_TC |
+ SUN4I_INT_CTL_RF_F34);
+ /* Only enable Tx FIFO interrupt if we really need it */
+ if (tx_len > SUN4I_FIFO_DEPTH)
+ sun4i_spi_enable_interrupt(sspi, SUN4I_INT_CTL_TF_E34);
/* Start the transfer */
reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
@@ -306,7 +343,6 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
goto out;
}
- sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH);
out:
sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, 0);
@@ -322,10 +358,33 @@ static irqreturn_t sun4i_spi_handler(int irq, void *dev_id)
/* Transfer complete */
if (status & SUN4I_INT_CTL_TC) {
sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_TC);
+ sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH);
complete(&sspi->done);
return IRQ_HANDLED;
}
+ /* Receive FIFO 3/4 full */
+ if (status & SUN4I_INT_CTL_RF_F34) {
+ sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH);
+ /* Only clear the interrupt _after_ draining the FIFO */
+ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_RF_F34);
+ return IRQ_HANDLED;
+ }
+
+ /* Transmit FIFO 3/4 empty */
+ if (status & SUN4I_INT_CTL_TF_E34) {
+ sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
+
+ if (!sspi->len)
+ /* nothing left to transmit */
+ sun4i_spi_disable_interrupt(sspi, SUN4I_INT_CTL_TF_E34);
+
+ /* Only clear the interrupt _after_ re-seeding the FIFO */
+ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_TF_E34);
+
+ return IRQ_HANDLED;
+ }
+
return IRQ_NONE;
}
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index 9918a57a6a6e..e3114832c485 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -24,6 +25,7 @@
#include <linux/spi/spi.h>
#define SUN6I_FIFO_DEPTH 128
+#define SUN8I_FIFO_DEPTH 64
#define SUN6I_GBL_CTL_REG 0x04
#define SUN6I_GBL_CTL_BUS_ENABLE BIT(0)
@@ -90,6 +92,7 @@ struct sun6i_spi {
const u8 *tx_buf;
u8 *rx_buf;
int len;
+ unsigned long fifo_depth;
};
static inline u32 sun6i_spi_read(struct sun6i_spi *sspi, u32 reg)
@@ -155,7 +158,9 @@ static void sun6i_spi_set_cs(struct spi_device *spi, bool enable)
static size_t sun6i_spi_max_transfer_size(struct spi_device *spi)
{
- return SUN6I_FIFO_DEPTH - 1;
+ struct sun6i_spi *sspi = spi_master_get_devdata(spi->master);
+
+ return sspi->fifo_depth - 1;
}
static int sun6i_spi_transfer_one(struct spi_master *master,
@@ -170,7 +175,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
u32 reg;
/* We don't support transfer larger than the FIFO */
- if (tfr->len > SUN6I_FIFO_DEPTH)
+ if (tfr->len > sspi->fifo_depth)
return -EINVAL;
reinit_completion(&sspi->done);
@@ -265,7 +270,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
SUN6I_BURST_CTL_CNT_STC(tx_len));
/* Fill the TX FIFO */
- sun6i_spi_fill_fifo(sspi, SUN6I_FIFO_DEPTH);
+ sun6i_spi_fill_fifo(sspi, sspi->fifo_depth);
/* Enable the interrupts */
sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, SUN6I_INT_CTL_TC);
@@ -288,7 +293,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
goto out;
}
- sun6i_spi_drain_fifo(sspi, SUN6I_FIFO_DEPTH);
+ sun6i_spi_drain_fifo(sspi, sspi->fifo_depth);
out:
sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, 0);
@@ -398,6 +403,8 @@ static int sun6i_spi_probe(struct platform_device *pdev)
}
sspi->master = master;
+ sspi->fifo_depth = (unsigned long)of_device_get_match_data(&pdev->dev);
+
master->max_speed_hz = 100 * 1000 * 1000;
master->min_speed_hz = 3 * 1000;
master->set_cs = sun6i_spi_set_cs;
@@ -470,7 +477,8 @@ static int sun6i_spi_remove(struct platform_device *pdev)
}
static const struct of_device_id sun6i_spi_match[] = {
- { .compatible = "allwinner,sun6i-a31-spi", },
+ { .compatible = "allwinner,sun6i-a31-spi", .data = (void *)SUN6I_FIFO_DEPTH },
+ { .compatible = "allwinner,sun8i-h3-spi", .data = (void *)SUN8I_FIFO_DEPTH },
{}
};
MODULE_DEVICE_TABLE(of, sun6i_spi_match);
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index caeac66a3977..ec6fb09e2e17 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -411,6 +411,7 @@ static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst,
tx->callback = ti_qspi_dma_callback;
tx->callback_param = qspi;
cookie = tx->tx_submit(tx);
+ reinit_completion(&qspi->transfer_complete);
ret = dma_submit_error(cookie);
if (ret) {
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index c54ee6674471..fcb991034c3d 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1268,11 +1268,8 @@ static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
struct pch_spi_data *data)
{
- int retval = 0;
-
dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
-
/* reset PCH SPI h/w */
pch_spi_reset(data->master);
dev_dbg(&board_dat->pdev->dev,
@@ -1280,15 +1277,7 @@ static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
- if (retval != 0) {
- dev_err(&board_dat->pdev->dev,
- "%s FAIL:invoking pch_spi_free_resources\n", __func__);
- pch_spi_free_resources(board_dat, data);
- }
-
- dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval);
-
- return retval;
+ return 0;
}
static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
diff --git a/drivers/spi/spi-xlp.c b/drivers/spi/spi-xlp.c
index 4071a729eb2f..bea7a93a6046 100644
--- a/drivers/spi/spi-xlp.c
+++ b/drivers/spi/spi-xlp.c
@@ -451,6 +451,7 @@ static const struct of_device_id xlp_spi_dt_id[] = {
{ .compatible = "netlogic,xlp832-spi" },
{ },
};
+MODULE_DEVICE_TABLE(of, xlp_spi_dt_id);
static struct platform_driver xlp_spi_driver = {
.probe = xlp_spi_probe,
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 838783c3fed0..656dd3e3220c 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -697,10 +697,15 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
- if (gpio_is_valid(spi->cs_gpio))
+ if (gpio_is_valid(spi->cs_gpio)) {
gpio_set_value(spi->cs_gpio, !enable);
- else if (spi->master->set_cs)
+ /* Some SPI masters need both GPIO CS & slave_select */
+ if ((spi->master->flags & SPI_MASTER_GPIO_SS) &&
+ spi->master->set_cs)
+ spi->master->set_cs(spi, !enable);
+ } else if (spi->master->set_cs) {
spi->master->set_cs(spi, !enable);
+ }
}
#ifdef CONFIG_HAS_DMA
@@ -720,6 +725,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
int desc_len;
int sgs;
struct page *vm_page;
+ struct scatterlist *sg;
void *sg_buf;
size_t min;
int i, ret;
@@ -738,6 +744,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
if (ret != 0)
return ret;
+ sg = &sgt->sgl[0];
for (i = 0; i < sgs; i++) {
if (vmalloced_buf || kmap_buf) {
@@ -751,16 +758,17 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
sg_free_table(sgt);
return -ENOMEM;
}
- sg_set_page(&sgt->sgl[i], vm_page,
+ sg_set_page(sg, vm_page,
min, offset_in_page(buf));
} else {
min = min_t(size_t, len, desc_len);
sg_buf = buf;
- sg_set_buf(&sgt->sgl[i], sg_buf, min);
+ sg_set_buf(sg, sg_buf, min);
}
buf += min;
len -= min;
+ sg = sg_next(sg);
}
ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
@@ -1034,8 +1042,14 @@ static int spi_transfer_one_message(struct spi_master *master,
if (msg->status != -EINPROGRESS)
goto out;
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
+ if (xfer->delay_usecs) {
+ u16 us = xfer->delay_usecs;
+
+ if (us <= 10)
+ udelay(us);
+ else
+ usleep_range(us, us + DIV_ROUND_UP(us, 10));
+ }
if (xfer->cs_change) {
if (list_is_last(&xfer->transfer_list,
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 2e05046f866b..9e2e099baf8c 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -696,6 +696,7 @@ static struct class *spidev_class;
static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "rohm,dh2228fv" },
{ .compatible = "lineartechnology,ltc2488" },
+ { .compatible = "ge,achc" },
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 58a7b3504b82..cd005cd41413 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -24,8 +24,6 @@ menuconfig STAGING
if STAGING
-source "drivers/staging/slicoss/Kconfig"
-
source "drivers/staging/wlan-ng/Kconfig"
source "drivers/staging/comedi/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 2fa9745db614..831e2e891989 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -1,7 +1,6 @@
# Makefile for staging directory
obj-y += media/
-obj-$(CONFIG_SLICOSS) += slicoss/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
obj-$(CONFIG_COMEDI) += comedi/
obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
@@ -41,4 +40,4 @@ obj-$(CONFIG_MOST) += most/
obj-$(CONFIG_ISDN_I4L) += i4l/
obj-$(CONFIG_KS7010) += ks7010/
obj-$(CONFIG_GREYBUS) += greybus/
-obj-$(CONFIG_BCM2708_VCHIQ) += vc04_services/
+obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 64d8c8720960..8f3ac37bfe12 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -25,13 +25,5 @@ ion/
exposes existing cma regions and doesn't reserve unecessarily memory when
booting a system which doesn't use ion.
-sync framework:
- - remove CONFIG_SW_SYNC_USER, it is used only for testing/debugging and
- should not be upstreamed.
- - port CONFIG_SW_SYNC_USER tests interfaces to use debugfs somehow
- - port libsync tests to kselftest
- - clean up and ABI check for security issues
- - move it to drivers/base/dma-buf
-
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com>
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index ca9a53c03f0f..7cbad0d45b9c 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -100,39 +100,43 @@ static DEFINE_MUTEX(ashmem_mutex);
static struct kmem_cache *ashmem_area_cachep __read_mostly;
static struct kmem_cache *ashmem_range_cachep __read_mostly;
-#define range_size(range) \
- ((range)->pgend - (range)->pgstart + 1)
+static inline unsigned long range_size(struct ashmem_range *range)
+{
+ return range->pgend - range->pgstart + 1;
+}
-#define range_on_lru(range) \
- ((range)->purged == ASHMEM_NOT_PURGED)
+static inline bool range_on_lru(struct ashmem_range *range)
+{
+ return range->purged == ASHMEM_NOT_PURGED;
+}
-static inline int page_range_subsumes_range(struct ashmem_range *range,
- size_t start, size_t end)
+static inline bool page_range_subsumes_range(struct ashmem_range *range,
+ size_t start, size_t end)
{
- return (((range)->pgstart >= (start)) && ((range)->pgend <= (end)));
+ return (range->pgstart >= start) && (range->pgend <= end);
}
-static inline int page_range_subsumed_by_range(struct ashmem_range *range,
- size_t start, size_t end)
+static inline bool page_range_subsumed_by_range(struct ashmem_range *range,
+ size_t start, size_t end)
{
- return (((range)->pgstart <= (start)) && ((range)->pgend >= (end)));
+ return (range->pgstart <= start) && (range->pgend >= end);
}
-static inline int page_in_range(struct ashmem_range *range, size_t page)
+static inline bool page_in_range(struct ashmem_range *range, size_t page)
{
- return (((range)->pgstart <= (page)) && ((range)->pgend >= (page)));
+ return (range->pgstart <= page) && (range->pgend >= page);
}
-static inline int page_range_in_range(struct ashmem_range *range,
- size_t start, size_t end)
+static inline bool page_range_in_range(struct ashmem_range *range,
+ size_t start, size_t end)
{
- return (page_in_range(range, start) || page_in_range(range, end) ||
- page_range_subsumes_range(range, start, end));
+ return page_in_range(range, start) || page_in_range(range, end) ||
+ page_range_subsumes_range(range, start, end);
}
-static inline int range_before_page(struct ashmem_range *range, size_t page)
+static inline bool range_before_page(struct ashmem_range *range, size_t page)
{
- return ((range)->pgend < (page));
+ return range->pgend < page;
}
#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 209a8f7ef02b..d5cc3070e83f 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1013,7 +1013,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
return 0;
}
-static struct dma_buf_ops dma_buf_ops = {
+static const struct dma_buf_ops dma_buf_ops = {
.map_dma_buf = ion_map_dma_buf,
.unmap_dma_buf = ion_unmap_dma_buf,
.mmap = ion_mmap,
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index b23f2c76c753..cf5c010d32bc 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -58,7 +58,7 @@ static struct ion_platform_heap dummy_heaps[] = {
},
};
-static struct ion_platform_data dummy_ion_pdata = {
+static const struct ion_platform_data dummy_ion_pdata = {
.nr = ARRAY_SIZE(dummy_heaps),
.heaps = dummy_heaps,
};
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 7e023d505af8..3ebbb75746e8 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -30,7 +30,7 @@
static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
__GFP_NORETRY) & ~__GFP_RECLAIM;
-static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO);
+static gfp_t low_order_gfp_flags = GFP_HIGHUSER | __GFP_ZERO;
static const unsigned int orders[] = {8, 4, 0};
static int order_to_index(unsigned int order)
diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h
index ffef06f63133..480242e02f8d 100644
--- a/drivers/staging/android/uapi/ion_test.h
+++ b/drivers/staging/android/uapi/ion_test.h
@@ -66,5 +66,4 @@ struct ion_test_rw_data {
#define ION_IOC_TEST_KERNEL_MAPPING \
_IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data)
-
#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
index 7b8be5293883..bf3fe7c61be5 100644
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
@@ -68,7 +68,7 @@ struct clk_wzrd {
struct clk *axi_clk;
struct clk *clks_internal[wzrd_clk_int_max];
struct clk *clkout[WZRD_NUM_OUTPUTS];
- int speed_grade;
+ unsigned int speed_grade;
bool suspended;
};
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index 08fb26b51a5f..a1c1081906c5 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -245,6 +245,22 @@ enum comedi_subdevice_type {
/* configuration instructions */
/**
+ * enum comedi_io_direction - COMEDI I/O directions
+ * @COMEDI_INPUT: Input.
+ * @COMEDI_OUTPUT: Output.
+ * @COMEDI_OPENDRAIN: Open-drain (or open-collector) output.
+ *
+ * These are used by the %INSN_CONFIG_DIO_QUERY configuration instruction to
+ * report a direction. They may also be used in other places where a direction
+ * needs to be specified.
+ */
+enum comedi_io_direction {
+ COMEDI_INPUT = 0,
+ COMEDI_OUTPUT = 1,
+ COMEDI_OPENDRAIN = 2
+};
+
+/**
* enum configuration_ids - COMEDI configuration instruction codes
* @INSN_CONFIG_DIO_INPUT: Configure digital I/O as input.
* @INSN_CONFIG_DIO_OUTPUT: Configure digital I/O as output.
@@ -296,9 +312,9 @@ enum comedi_subdevice_type {
* @INSN_CONFIG_PWM_GET_H_BRIDGE: Get PWM H bridge duty cycle and polarity.
*/
enum configuration_ids {
- INSN_CONFIG_DIO_INPUT = 0,
- INSN_CONFIG_DIO_OUTPUT = 1,
- INSN_CONFIG_DIO_OPENDRAIN = 2,
+ INSN_CONFIG_DIO_INPUT = COMEDI_INPUT,
+ INSN_CONFIG_DIO_OUTPUT = COMEDI_OUTPUT,
+ INSN_CONFIG_DIO_OPENDRAIN = COMEDI_OPENDRAIN,
INSN_CONFIG_ANALOG_TRIG = 16,
/* INSN_CONFIG_WAVEFORM = 17, */
/* INSN_CONFIG_TRIG = 18, */
@@ -397,22 +413,6 @@ enum comedi_digital_trig_op {
};
/**
- * enum comedi_io_direction - COMEDI I/O directions
- * @COMEDI_INPUT: Input.
- * @COMEDI_OUTPUT: Output.
- * @COMEDI_OPENDRAIN: Open-drain (or open-collector) output.
- *
- * These are used by the %INSN_CONFIG_DIO_QUERY configuration instruction to
- * report a direction. They may also be used in other places where a direction
- * needs to be specified.
- */
-enum comedi_io_direction {
- COMEDI_INPUT = 0,
- COMEDI_OUTPUT = 1,
- COMEDI_OPENDRAIN = 2
-};
-
-/**
* enum comedi_support_level - support level for a COMEDI feature
* @COMEDI_UNKNOWN_SUPPORT: Unspecified support for feature.
* @COMEDI_SUPPORTED: Feature is supported.
@@ -1104,18 +1104,19 @@ enum ni_gpct_other_select {
enum ni_gpct_arm_source {
NI_GPCT_ARM_IMMEDIATE = 0x0,
/*
- * Start both the counter and the adjacent pared
- * counter simultaneously
+ * Start both the counter and the adjacent paired counter simultaneously
*/
NI_GPCT_ARM_PAIRED_IMMEDIATE = 0x1,
/*
- * NI doesn't document bits for selecting hardware arm triggers.
- * If the NI_GPCT_ARM_UNKNOWN bit is set, we will pass the least
- * significant bits (3 bits for 660x or 5 bits for m-series)
- * through to the hardware. This will at least allow someone to
- * figure out what the bits do later.
+ * If the NI_GPCT_HW_ARM bit is set, we will pass the least significant
+ * bits (3 bits for 660x or 5 bits for m-series) through to the
+ * hardware. To select a hardware trigger, pass the appropriate select
+ * bit, e.g.,
+ * NI_GPCT_HW_ARM | NI_GPCT_AI_START1_GATE_SELECT or
+ * NI_GPCT_HW_ARM | NI_GPCT_PFI_GATE_SELECT(pfi_number)
*/
- NI_GPCT_ARM_UNKNOWN = 0x1000,
+ NI_GPCT_HW_ARM = 0x1000,
+ NI_GPCT_ARM_UNKNOWN = NI_GPCT_HW_ARM, /* for backward compatibility */
};
/* digital filtering options for ni 660x for use with INSN_CONFIG_FILTER. */
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index dcb637665eb7..0c7c37a8ff33 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -426,6 +426,18 @@ enum comedi_cb {
* handler will be called with the COMEDI device structure's board_ptr member
* pointing to the matched pointer to a board name within the driver's private
* array of static, read-only board type information.
+ *
+ * The @detach handler has two roles. If a COMEDI device was successfully
+ * configured by the @attach or @auto_attach handler, it is called when the
+ * device is being deconfigured (by the %COMEDI_DEVCONFIG ioctl, or due to
+ * unloading of the driver, or due to device removal). It is also called when
+ * the @attach or @auto_attach handler returns an error. Therefore, the
+ * @attach or @auto_attach handlers can defer clean-up on error until the
+ * @detach handler is called. If the @attach or @auto_attach handlers free
+ * any resources themselves, they must prevent the @detach handler from
+ * freeing the same resources. The @detach handler must not assume that all
+ * resources requested by the @attach or @auto_attach handler were
+ * successfully allocated.
*/
struct comedi_driver {
/* private: */
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c
index ccb37d1f0f8e..987414741605 100644
--- a/drivers/staging/comedi/drivers/cb_pcidda.c
+++ b/drivers/staging/comedi/drivers/cb_pcidda.c
@@ -248,8 +248,8 @@ static void cb_pcidda_write_caldac(struct comedi_device *dev,
cb_pcidda_serial_out(dev, value, num_caldac_bits);
/*
-* latch stream into appropriate caldac deselect reference dac
-*/
+ * latch stream into appropriate caldac deselect reference dac
+ */
cal2_bits = DESELECT_REF_DAC_BIT | DUMMY_BIT;
/* deactivate caldacs (one caldac for every two channels) */
for (i = 0; i < max_num_caldacs; i++)
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index b1c0860135d0..05126ba4ba51 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -837,7 +837,7 @@ static int mite_setup(struct comedi_device *dev, struct mite *mite,
* of 0x61f and bursts worked. 6281 powered up with register value of
* 0x1f and bursts didn't work. The NI windows driver reads the
* register, then does a bitwise-or of 0x600 with it and writes it back.
- *
+ *
* The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
* written and read back. The bits 0x1f always read as 1.
* The rest always read as zero.
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 0f97d7b611d7..b2e382888981 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -1832,11 +1832,10 @@ static int ni_ai_insn_read(struct comedi_device *dev,
unsigned int *data)
{
struct ni_private *devpriv = dev->private;
- unsigned int mask = (s->maxdata + 1) >> 1;
+ unsigned int mask = s->maxdata;
int i, n;
unsigned int signbits;
unsigned int d;
- unsigned long dl;
ni_load_channelgain_list(dev, s, 1, &insn->chanspec);
@@ -1875,7 +1874,7 @@ static int ni_ai_insn_read(struct comedi_device *dev,
return -ETIME;
}
d += signbits;
- data[n] = d;
+ data[n] = d & 0xffff;
}
} else if (devpriv->is_6143) {
for (n = 0; n < insn->n; n++) {
@@ -1887,15 +1886,15 @@ static int ni_ai_insn_read(struct comedi_device *dev,
* bit to move a single 16bit stranded sample into
* the FIFO.
*/
- dl = 0;
+ d = 0;
for (i = 0; i < NI_TIMEOUT; i++) {
if (ni_readl(dev, NI6143_AI_FIFO_STATUS_REG) &
0x01) {
/* Get stranded sample into FIFO */
ni_writel(dev, 0x01,
NI6143_AI_FIFO_CTRL_REG);
- dl = ni_readl(dev,
- NI6143_AI_FIFO_DATA_REG);
+ d = ni_readl(dev,
+ NI6143_AI_FIFO_DATA_REG);
break;
}
}
@@ -1903,7 +1902,7 @@ static int ni_ai_insn_read(struct comedi_device *dev,
dev_err(dev->class_dev, "timeout\n");
return -ETIME;
}
- data[n] = (((dl >> 16) & 0xFFFF) + signbits) & 0xFFFF;
+ data[n] = (((d >> 16) & 0xFFFF) + signbits) & 0xFFFF;
}
} else {
for (n = 0; n < insn->n; n++) {
@@ -1919,14 +1918,13 @@ static int ni_ai_insn_read(struct comedi_device *dev,
return -ETIME;
}
if (devpriv->is_m_series) {
- dl = ni_readl(dev, NI_M_AI_FIFO_DATA_REG);
- dl &= mask;
- data[n] = dl;
+ d = ni_readl(dev, NI_M_AI_FIFO_DATA_REG);
+ d &= mask;
+ data[n] = d;
} else {
d = ni_readw(dev, NI_E_AI_FIFO_DATA_REG);
- /* subtle: needs to be short addition */
d += signbits;
- data[n] = d;
+ data[n] = d & 0xffff;
}
}
}
@@ -2729,66 +2727,36 @@ static int ni_ao_insn_write(struct comedi_device *dev,
return insn->n;
}
-static int ni_ao_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- const struct ni_board_struct *board = dev->board_ptr;
- struct ni_private *devpriv = dev->private;
- unsigned int nbytes;
-
- switch (data[0]) {
- case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE:
- switch (data[1]) {
- case COMEDI_OUTPUT:
- nbytes = comedi_samples_to_bytes(s,
- board->ao_fifo_depth);
- data[2] = 1 + nbytes;
- if (devpriv->mite)
- data[2] += devpriv->mite->fifo_size;
- break;
- case COMEDI_INPUT:
- data[2] = 0;
- break;
- default:
- return -EINVAL;
- }
- return 0;
- default:
- break;
- }
-
- return -EINVAL;
-}
-
-static int ni_ao_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
+/*
+ * Arms the AO device in preparation for a trigger event.
+ * This function also allocates and prepares a DMA channel (or FIFO if DMA is
+ * not used). As a part of this preparation, this function preloads the DAC
+ * registers with the first values of the output stream. This ensures that the
+ * first clock cycle after the trigger can be used for output.
+ *
+ * Note that this function _must_ happen after a user has written data to the
+ * output buffers via either mmap or write(fileno,...).
+ */
+static int ni_ao_arm(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct ni_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
int ret;
int interrupt_b_bits;
int i;
static const int timeout = 1000;
/*
- * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT.
- * For backwards compatibility, also allow trig_num == 0 when
- * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT);
- * in that case, the internal trigger is being used as a pre-trigger
- * before the external trigger.
+ * Prevent ao from doing things like trying to allocate the ao dma
+ * channel multiple times.
*/
- if (!(trig_num == cmd->start_arg ||
- (trig_num == 0 && cmd->start_src != TRIG_INT)))
+ if (!devpriv->ao_needs_arming) {
+ dev_dbg(dev->class_dev, "%s: device does not need arming!\n",
+ __func__);
return -EINVAL;
+ }
- /*
- * Null trig at beginning prevent ao start trigger from executing more
- * than once per command (and doing things like trying to allocate the
- * ao dma channel multiple times).
- */
- s->async->inttrig = NULL;
+ devpriv->ao_needs_arming = 0;
ni_set_bits(dev, NISTC_INTB_ENA_REG,
NISTC_INTB_ENA_AO_FIFO | NISTC_INTB_ENA_AO_ERR, 0);
@@ -2840,6 +2808,75 @@ static int ni_ao_inttrig(struct comedi_device *dev,
devpriv->ao_cmd1,
NISTC_AO_CMD1_REG);
+ return 0;
+}
+
+static int ni_ao_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ const struct ni_board_struct *board = dev->board_ptr;
+ struct ni_private *devpriv = dev->private;
+ unsigned int nbytes;
+
+ switch (data[0]) {
+ case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE:
+ switch (data[1]) {
+ case COMEDI_OUTPUT:
+ nbytes = comedi_samples_to_bytes(s,
+ board->ao_fifo_depth);
+ data[2] = 1 + nbytes;
+ if (devpriv->mite)
+ data[2] += devpriv->mite->fifo_size;
+ break;
+ case COMEDI_INPUT:
+ data[2] = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+ case INSN_CONFIG_ARM:
+ return ni_ao_arm(dev, s);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int ni_ao_inttrig(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int trig_num)
+{
+ struct ni_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ int ret;
+
+ /*
+ * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT.
+ * For backwards compatibility, also allow trig_num == 0 when
+ * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT);
+ * in that case, the internal trigger is being used as a pre-trigger
+ * before the external trigger.
+ */
+ if (!(trig_num == cmd->start_arg ||
+ (trig_num == 0 && cmd->start_src != TRIG_INT)))
+ return -EINVAL;
+
+ /*
+ * Null trig at beginning prevent ao start trigger from executing more
+ * than once per command.
+ */
+ s->async->inttrig = NULL;
+
+ if (devpriv->ao_needs_arming) {
+ /* only arm this device if it still needs arming */
+ ret = ni_ao_arm(dev, s);
+ if (ret)
+ return ret;
+ }
+
ni_stc_writew(dev, NISTC_AO_CMD2_START1_PULSE | devpriv->ao_cmd2,
NISTC_AO_CMD2_REG);
@@ -3227,10 +3264,17 @@ static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
ni_ao_cmd_set_interrupts(dev, s);
/*
- * arm(ing) and star(ting) happen in ni_ao_inttrig, which _must_ be
- * called for ao commands since 1) TRIG_NOW is not supported and 2) DMA
- * must be setup and initially written to before arm/start happen.
+ * arm(ing) must happen later so that DMA can be setup and DACs
+ * preloaded with the actual output buffer before starting.
+ *
+ * start(ing) must happen _after_ arming is completed. Starting can be
+ * done either via ni_ao_inttrig, or via an external trigger.
+ *
+ * **Currently, ni_ao_inttrig will automatically attempt a call to
+ * ni_ao_arm if the device still needs arming at that point. This
+ * allows backwards compatibility.
*/
+ devpriv->ao_needs_arming = 1;
return 0;
}
diff --git a/drivers/staging/comedi/drivers/ni_stc.h b/drivers/staging/comedi/drivers/ni_stc.h
index 1966519cb6e5..f27b545f83eb 100644
--- a/drivers/staging/comedi/drivers/ni_stc.h
+++ b/drivers/staging/comedi/drivers/ni_stc.h
@@ -1053,6 +1053,20 @@ struct ni_private {
unsigned int is_67xx:1;
unsigned int is_6711:1;
unsigned int is_6713:1;
+
+ /*
+ * Boolean value of whether device needs to be armed.
+ *
+ * Currently, only NI AO devices are known to be needing arming, since
+ * the DAC registers must be preloaded before triggering.
+ * This variable should only be set true during a command operation
+ * (e.g ni_ao_cmd) and should then be set false by the arming
+ * function (e.g. ni_ao_arm).
+ *
+ * This variable helps to ensure that multiple DMA allocations are not
+ * possible.
+ */
+ unsigned int ao_needs_arming:1;
};
static const struct comedi_lrange range_ni_E_ao_ext;
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index 5ab49a798164..15cb4088467b 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -452,8 +452,9 @@ static void ni_tio_set_sync_mode(struct ni_gpct *counter)
unsigned int bits = 0;
unsigned int reg;
unsigned int mode;
- unsigned int clk_src;
- u64 ps;
+ unsigned int clk_src = 0;
+ u64 ps = 0;
+ int ret;
bool force_alt_sync;
/* only m series and 660x variants have counting mode registers */
@@ -483,9 +484,12 @@ static void ni_tio_set_sync_mode(struct ni_gpct *counter)
break;
}
- ni_tio_generic_clock_src_select(counter, &clk_src);
- ni_tio_clock_period_ps(counter, clk_src, &ps);
-
+ ret = ni_tio_generic_clock_src_select(counter, &clk_src);
+ if (ret)
+ return;
+ ret = ni_tio_clock_period_ps(counter, clk_src, &ps);
+ if (ret)
+ return;
/*
* It's not clear what we should do if clock_period is unknown, so we
* are not using the alt sync bit in that case.
@@ -809,7 +813,7 @@ static int ni_tio_get_clock_src(struct ni_gpct *counter,
unsigned int *clock_source,
unsigned int *period_ns)
{
- u64 temp64;
+ u64 temp64 = 0;
int ret;
ret = ni_tio_generic_clock_src_select(counter, clock_source);
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index 5aeed44dff70..5b5df0596ad9 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -771,9 +771,9 @@ static int pcl818_ai_cancel(struct comedi_device *dev,
s->async->scans_done < cmd->stop_arg)) {
if (!devpriv->ai_cmd_canceled) {
/*
- * Wait for running dma transfer to end,
- * do cleanup in interrupt.
- */
+ * Wait for running dma transfer to end,
+ * do cleanup in interrupt.
+ */
devpriv->ai_cmd_canceled = 1;
return 0;
}
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index c14a02564432..0dd5fe286855 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -75,24 +75,24 @@ struct s626_buffer_dma {
};
struct s626_private {
- uint8_t ai_cmd_running; /* ai_cmd is running */
+ u8 ai_cmd_running; /* ai_cmd is running */
unsigned int ai_sample_timer; /* time between samples in
* units of the timer */
int ai_convert_count; /* conversion counter */
unsigned int ai_convert_timer; /* time between conversion in
* units of the timer */
- uint16_t counter_int_enabs; /* counter interrupt enable mask
+ u16 counter_int_enabs; /* counter interrupt enable mask
* for MISC2 register */
- uint8_t adc_items; /* number of items in ADC poll list */
+ u8 adc_items; /* number of items in ADC poll list */
struct s626_buffer_dma rps_buf; /* DMA buffer used to hold ADC (RPS1)
* program */
struct s626_buffer_dma ana_buf; /* DMA buffer used to receive ADC data
* and hold DAC data */
- uint32_t *dac_wbuf; /* pointer to logical adrs of DMA buffer
+ u32 *dac_wbuf; /* pointer to logical adrs of DMA buffer
* used to hold DAC data */
- uint16_t dacpol; /* image of DAC polarity register */
- uint8_t trim_setpoint[12]; /* images of TrimDAC setpoints */
- uint32_t i2c_adrs; /* I2C device address for onboard EEPROM
+ u16 dacpol; /* image of DAC polarity register */
+ u8 trim_setpoint[12]; /* images of TrimDAC setpoints */
+ u32 i2c_adrs; /* I2C device address for onboard EEPROM
* (board rev dependent) */
};
@@ -179,7 +179,7 @@ static void s626_debi_transfer(struct comedi_device *dev)
/*
* Read a value from a gate array register.
*/
-static uint16_t s626_debi_read(struct comedi_device *dev, uint16_t addr)
+static u16 s626_debi_read(struct comedi_device *dev, u16 addr)
{
/* Set up DEBI control register value in shadow RAM */
writel(S626_DEBI_CMD_RDWORD | addr, dev->mmio + S626_P_DEBICMD);
@@ -193,8 +193,8 @@ static uint16_t s626_debi_read(struct comedi_device *dev, uint16_t addr)
/*
* Write a value to a gate array register.
*/
-static void s626_debi_write(struct comedi_device *dev, uint16_t addr,
- uint16_t wdata)
+static void s626_debi_write(struct comedi_device *dev, u16 addr,
+ u16 wdata)
{
/* Set up DEBI control register value in shadow RAM */
writel(S626_DEBI_CMD_WRWORD | addr, dev->mmio + S626_P_DEBICMD);
@@ -241,7 +241,7 @@ static int s626_i2c_handshake_eoc(struct comedi_device *dev,
return -EBUSY;
}
-static int s626_i2c_handshake(struct comedi_device *dev, uint32_t val)
+static int s626_i2c_handshake(struct comedi_device *dev, u32 val)
{
unsigned int ctrl;
int ret;
@@ -267,8 +267,8 @@ static int s626_i2c_handshake(struct comedi_device *dev, uint32_t val)
return ctrl & S626_I2C_ERR;
}
-/* Read uint8_t from EEPROM. */
-static uint8_t s626_i2c_read(struct comedi_device *dev, uint8_t addr)
+/* Read u8 from EEPROM. */
+static u8 s626_i2c_read(struct comedi_device *dev, u8 addr)
{
struct s626_private *devpriv = dev->private;
@@ -304,10 +304,10 @@ static uint8_t s626_i2c_read(struct comedi_device *dev, uint8_t addr)
/* *********** DAC FUNCTIONS *********** */
/* TrimDac LogicalChan-to-PhysicalChan mapping table. */
-static const uint8_t s626_trimchan[] = { 10, 9, 8, 3, 2, 7, 6, 1, 0, 5, 4 };
+static const u8 s626_trimchan[] = { 10, 9, 8, 3, 2, 7, 6, 1, 0, 5, 4 };
/* TrimDac LogicalChan-to-EepromAdrs mapping table. */
-static const uint8_t s626_trimadrs[] = {
+static const u8 s626_trimadrs[] = {
0x40, 0x41, 0x42, 0x50, 0x51, 0x52, 0x53, 0x60, 0x61, 0x62, 0x63
};
@@ -357,7 +357,7 @@ static int s626_send_dac_eoc(struct comedi_device *dev,
* channel 2. Assumes: (1) TSL2 slot records initialized, and (2)
* dacpol contains valid target image.
*/
-static int s626_send_dac(struct comedi_device *dev, uint32_t val)
+static int s626_send_dac(struct comedi_device *dev, u32 val)
{
struct s626_private *devpriv = dev->private;
int ret;
@@ -516,12 +516,12 @@ static int s626_send_dac(struct comedi_device *dev, uint32_t val)
* Private helper function: Write setpoint to an application DAC channel.
*/
static int s626_set_dac(struct comedi_device *dev,
- uint16_t chan, int16_t dacdata)
+ u16 chan, int16_t dacdata)
{
struct s626_private *devpriv = dev->private;
- uint16_t signmask;
- uint32_t ws_image;
- uint32_t val;
+ u16 signmask;
+ u32 ws_image;
+ u32 val;
/*
* Adjust DAC data polarity and set up Polarity Control Register image.
@@ -535,7 +535,7 @@ static int s626_set_dac(struct comedi_device *dev,
}
/* Limit DAC setpoint value to valid range. */
- if ((uint16_t)dacdata > 0x1FFF)
+ if ((u16)dacdata > 0x1FFF)
dacdata = 0x1FFF;
/*
@@ -575,23 +575,23 @@ static int s626_set_dac(struct comedi_device *dev,
* (write to non-existent trimdac). */
val |= 0x00004000; /* Address the two main dual-DAC devices
* (TSL's chip select enables target device). */
- val |= ((uint32_t)(chan & 1) << 15); /* Address the DAC channel
+ val |= ((u32)(chan & 1) << 15); /* Address the DAC channel
* within the device. */
- val |= (uint32_t)dacdata; /* Include DAC setpoint data. */
+ val |= (u32)dacdata; /* Include DAC setpoint data. */
return s626_send_dac(dev, val);
}
static int s626_write_trim_dac(struct comedi_device *dev,
- uint8_t logical_chan, uint8_t dac_data)
+ u8 logical_chan, u8 dac_data)
{
struct s626_private *devpriv = dev->private;
- uint32_t chan;
+ u32 chan;
/*
* Save the new setpoint in case the application needs to read it back
* later.
*/
- devpriv->trim_setpoint[logical_chan] = (uint8_t)dac_data;
+ devpriv->trim_setpoint[logical_chan] = (u8)dac_data;
/* Map logical channel number to physical channel number. */
chan = s626_trimchan[logical_chan];
@@ -633,7 +633,7 @@ static int s626_write_trim_dac(struct comedi_device *dev,
static int s626_load_trim_dacs(struct comedi_device *dev)
{
- uint8_t i;
+ u8 i;
int ret;
/* Copy TrimDac setpoint values from EEPROM to TrimDacs. */
@@ -661,7 +661,7 @@ static int s626_load_trim_dacs(struct comedi_device *dev)
* latches B.
*/
static void s626_set_latch_source(struct comedi_device *dev,
- unsigned int chan, uint16_t value)
+ unsigned int chan, u16 value)
{
s626_debi_replace(dev, S626_LP_CRB(chan),
~(S626_CRBMSK_INTCTRL | S626_CRBMSK_LATCHSRC),
@@ -672,7 +672,7 @@ static void s626_set_latch_source(struct comedi_device *dev,
* Write value into counter preload register.
*/
static void s626_preload(struct comedi_device *dev,
- unsigned int chan, uint32_t value)
+ unsigned int chan, u32 value)
{
s626_debi_write(dev, S626_LP_CNTR(chan), value);
s626_debi_write(dev, S626_LP_CNTR(chan) + 2, value >> 16);
@@ -686,7 +686,7 @@ static void s626_preload(struct comedi_device *dev,
static void s626_reset_cap_flags(struct comedi_device *dev,
unsigned int chan)
{
- uint16_t set;
+ u16 set;
set = S626_SET_CRB_INTRESETCMD(1);
if (chan < 3)
@@ -704,12 +704,12 @@ static void s626_reset_cap_flags(struct comedi_device *dev,
* ClkPol, ClkEnab, IndexSrc, IndexPol, LoadSrc.
*/
static void s626_set_mode_a(struct comedi_device *dev,
- unsigned int chan, uint16_t setup,
- uint16_t disable_int_src)
+ unsigned int chan, u16 setup,
+ u16 disable_int_src)
{
struct s626_private *devpriv = dev->private;
- uint16_t cra;
- uint16_t crb;
+ u16 cra;
+ u16 crb;
unsigned int cntsrc, clkmult, clkpol;
/* Initialize CRA and CRB images. */
@@ -782,12 +782,12 @@ static void s626_set_mode_a(struct comedi_device *dev,
}
static void s626_set_mode_b(struct comedi_device *dev,
- unsigned int chan, uint16_t setup,
- uint16_t disable_int_src)
+ unsigned int chan, u16 setup,
+ u16 disable_int_src)
{
struct s626_private *devpriv = dev->private;
- uint16_t cra;
- uint16_t crb;
+ u16 cra;
+ u16 crb;
unsigned int cntsrc, clkmult, clkpol;
/* Initialize CRA and CRB images. */
@@ -868,7 +868,7 @@ static void s626_set_mode_b(struct comedi_device *dev,
static void s626_set_mode(struct comedi_device *dev,
unsigned int chan,
- uint16_t setup, uint16_t disable_int_src)
+ u16 setup, u16 disable_int_src)
{
if (chan < 3)
s626_set_mode_a(dev, chan, setup, disable_int_src);
@@ -880,7 +880,7 @@ static void s626_set_mode(struct comedi_device *dev,
* Return/set a counter's enable. enab: 0=always enabled, 1=enabled by index.
*/
static void s626_set_enable(struct comedi_device *dev,
- unsigned int chan, uint16_t enab)
+ unsigned int chan, u16 enab)
{
unsigned int mask = S626_CRBMSK_INTCTRL;
unsigned int set;
@@ -901,11 +901,11 @@ static void s626_set_enable(struct comedi_device *dev,
* 2=OverflowA (B counters only), 3=disabled.
*/
static void s626_set_load_trig(struct comedi_device *dev,
- unsigned int chan, uint16_t trig)
+ unsigned int chan, u16 trig)
{
- uint16_t reg;
- uint16_t mask;
- uint16_t set;
+ u16 reg;
+ u16 mask;
+ u16 set;
if (chan < 3) {
reg = S626_LP_CRA(chan);
@@ -925,11 +925,11 @@ static void s626_set_load_trig(struct comedi_device *dev,
* 2=IndexOnly, 3=IndexAndOverflow.
*/
static void s626_set_int_src(struct comedi_device *dev,
- unsigned int chan, uint16_t int_source)
+ unsigned int chan, u16 int_source)
{
struct s626_private *devpriv = dev->private;
- uint16_t cra_reg = S626_LP_CRA(chan);
- uint16_t crb_reg = S626_LP_CRB(chan);
+ u16 cra_reg = S626_LP_CRA(chan);
+ u16 crb_reg = S626_LP_CRB(chan);
if (chan < 3) {
/* Reset any pending counter overflow or index captures */
@@ -941,7 +941,7 @@ static void s626_set_int_src(struct comedi_device *dev,
s626_debi_replace(dev, cra_reg, ~S626_CRAMSK_INTSRC_A,
S626_SET_CRA_INTSRC_A(int_source));
} else {
- uint16_t crb;
+ u16 crb;
/* Cache writeable CRB register image */
crb = s626_debi_read(dev, crb_reg);
@@ -985,7 +985,7 @@ static void s626_pulse_index(struct comedi_device *dev,
unsigned int chan)
{
if (chan < 3) {
- uint16_t cra;
+ u16 cra;
cra = s626_debi_read(dev, S626_LP_CRA(chan));
@@ -994,7 +994,7 @@ static void s626_pulse_index(struct comedi_device *dev,
(cra ^ S626_CRAMSK_INDXPOL_A));
s626_debi_write(dev, S626_LP_CRA(chan), cra);
} else {
- uint16_t crb;
+ u16 crb;
crb = s626_debi_read(dev, S626_LP_CRB(chan));
crb &= ~S626_CRBMSK_INTCTRL;
@@ -1062,7 +1062,7 @@ static int s626_dio_clear_irq(struct comedi_device *dev)
}
static void s626_handle_dio_interrupt(struct comedi_device *dev,
- uint16_t irqbit, uint8_t group)
+ u16 irqbit, u8 group)
{
struct s626_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
@@ -1110,8 +1110,8 @@ static void s626_handle_dio_interrupt(struct comedi_device *dev,
static void s626_check_dio_interrupts(struct comedi_device *dev)
{
- uint16_t irqbit;
- uint8_t group;
+ u16 irqbit;
+ u8 group;
for (group = 0; group < S626_DIO_BANKS; group++) {
/* read interrupt type */
@@ -1131,7 +1131,7 @@ static void s626_check_counter_interrupts(struct comedi_device *dev)
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- uint16_t irqbit;
+ u16 irqbit;
/* read interrupt type */
irqbit = s626_debi_read(dev, S626_LP_RDMISC2);
@@ -1196,7 +1196,7 @@ static bool s626_handle_eos_interrupt(struct comedi_device *dev)
* first uint16_t in the buffer because it contains junk data
* from the final ADC of the previous poll list scan.
*/
- uint32_t *readaddr = (uint32_t *)devpriv->ana_buf.logical_base + 1;
+ u32 *readaddr = (u32 *)devpriv->ana_buf.logical_base + 1;
int i;
/* get the data and hand it over to comedi */
@@ -1231,7 +1231,7 @@ static irqreturn_t s626_irq_handler(int irq, void *d)
{
struct comedi_device *dev = d;
unsigned long flags;
- uint32_t irqtype, irqstatus;
+ u32 irqtype, irqstatus;
if (!dev->attached)
return IRQ_NONE;
@@ -1272,25 +1272,25 @@ static irqreturn_t s626_irq_handler(int irq, void *d)
/*
* This function builds the RPS program for hardware driven acquisition.
*/
-static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
+static void s626_reset_adc(struct comedi_device *dev, u8 *ppl)
{
struct s626_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_cmd *cmd = &s->async->cmd;
- uint32_t *rps;
- uint32_t jmp_adrs;
- uint16_t i;
- uint16_t n;
- uint32_t local_ppl;
+ u32 *rps;
+ u32 jmp_adrs;
+ u16 i;
+ u16 n;
+ u32 local_ppl;
/* Stop RPS program in case it is currently running */
s626_mc_disable(dev, S626_MC1_ERPS1, S626_P_MC1);
/* Set starting logical address to write RPS commands. */
- rps = (uint32_t *)devpriv->rps_buf.logical_base;
+ rps = (u32 *)devpriv->rps_buf.logical_base;
/* Initialize RPS instruction pointer */
- writel((uint32_t)devpriv->rps_buf.physical_base,
+ writel((u32)devpriv->rps_buf.physical_base,
dev->mmio + S626_P_RPSADDR1);
/* Construct RPS program in rps_buf DMA buffer */
@@ -1372,8 +1372,8 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
* flushes the RPS' instruction prefetch pipeline.
*/
jmp_adrs =
- (uint32_t)devpriv->rps_buf.physical_base +
- (uint32_t)((unsigned long)rps -
+ (u32)devpriv->rps_buf.physical_base +
+ (u32)((unsigned long)rps -
(unsigned long)devpriv->
rps_buf.logical_base);
for (i = 0; i < (10 * S626_RPSCLK_PER_US / 2); i++) {
@@ -1408,7 +1408,7 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
/* Transfer ADC data from FB BUFFER 1 register to DMA buffer. */
*rps++ = S626_RPS_STREG |
(S626_BUGFIX_STREG(S626_P_FB_BUFFER1) >> 2);
- *rps++ = (uint32_t)devpriv->ana_buf.physical_base +
+ *rps++ = (u32)devpriv->ana_buf.physical_base +
(devpriv->adc_items << 2);
/*
@@ -1452,7 +1452,7 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
/* Transfer final ADC data from FB BUFFER 1 register to DMA buffer. */
*rps++ = S626_RPS_STREG | (S626_BUGFIX_STREG(S626_P_FB_BUFFER1) >> 2);
- *rps++ = (uint32_t)devpriv->ana_buf.physical_base +
+ *rps++ = (u32)devpriv->ana_buf.physical_base +
(devpriv->adc_items << 2);
/* Indicate ADC scan loop is finished. */
@@ -1465,7 +1465,7 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
/* Restart RPS program at its beginning. */
*rps++ = S626_RPS_JUMP; /* Branch to start of RPS program. */
- *rps++ = (uint32_t)devpriv->rps_buf.physical_base;
+ *rps++ = (u32)devpriv->rps_buf.physical_base;
/* End of RPS program build */
}
@@ -1488,11 +1488,11 @@ static int s626_ai_insn_read(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- uint16_t chan = CR_CHAN(insn->chanspec);
- uint16_t range = CR_RANGE(insn->chanspec);
- uint16_t adc_spec = 0;
- uint32_t gpio_image;
- uint32_t tmp;
+ u16 chan = CR_CHAN(insn->chanspec);
+ u16 range = CR_RANGE(insn->chanspec);
+ u16 adc_spec = 0;
+ u32 gpio_image;
+ u32 tmp;
int ret;
int n;
@@ -1585,7 +1585,7 @@ static int s626_ai_insn_read(struct comedi_device *dev,
return n;
}
-static int s626_ai_load_polllist(uint8_t *ppl, struct comedi_cmd *cmd)
+static int s626_ai_load_polllist(u8 *ppl, struct comedi_cmd *cmd)
{
int n;
@@ -1651,7 +1651,7 @@ static int s626_ns_to_timer(unsigned int *nanosec, unsigned int flags)
static void s626_timer_load(struct comedi_device *dev,
unsigned int chan, int tick)
{
- uint16_t setup =
+ u16 setup =
/* Preload upon index. */
S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
/* Disable hardware index. */
@@ -1664,7 +1664,7 @@ static void s626_timer_load(struct comedi_device *dev,
S626_SET_STD_CLKMULT(S626_CLKMULT_1X) |
/* Enabled by index */
S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX);
- uint16_t value_latchsrc = S626_LATCHSRC_A_INDXA;
+ u16 value_latchsrc = S626_LATCHSRC_A_INDXA;
/* uint16_t enab = S626_CLKENAB_ALWAYS; */
s626_set_mode(dev, chan, setup, false);
@@ -1693,7 +1693,7 @@ static void s626_timer_load(struct comedi_device *dev,
static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct s626_private *devpriv = dev->private;
- uint8_t ppl[16];
+ u8 ppl[16];
struct comedi_cmd *cmd = &s->async->cmd;
int tick;
@@ -1953,7 +1953,7 @@ static int s626_ao_insn_write(struct comedi_device *dev,
static void s626_dio_init(struct comedi_device *dev)
{
- uint16_t group;
+ u16 group;
/* Prepare to treat writes to WRCapSel as capture disables. */
s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_NOEDCAP);
@@ -2017,7 +2017,7 @@ static int s626_enc_insn_config(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
- uint16_t setup =
+ u16 setup =
/* Preload upon index. */
S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
/* Disable hardware index. */
@@ -2032,8 +2032,8 @@ static int s626_enc_insn_config(struct comedi_device *dev,
S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX);
/* uint16_t disable_int_src = true; */
/* uint32_t Preloadvalue; //Counter initial value */
- uint16_t value_latchsrc = S626_LATCHSRC_AB_READ;
- uint16_t enab = S626_CLKENAB_ALWAYS;
+ u16 value_latchsrc = S626_LATCHSRC_AB_READ;
+ u16 enab = S626_CLKENAB_ALWAYS;
/* (data==NULL) ? (Preloadvalue=0) : (Preloadvalue=data[0]); */
@@ -2052,7 +2052,7 @@ static int s626_enc_insn_read(struct comedi_device *dev,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
- uint16_t cntr_latch_reg = S626_LP_CNTR(chan);
+ u16 cntr_latch_reg = S626_LP_CNTR(chan);
int i;
for (i = 0; i < insn->n; i++) {
@@ -2090,7 +2090,7 @@ static int s626_enc_insn_write(struct comedi_device *dev,
return 1;
}
-static void s626_write_misc2(struct comedi_device *dev, uint16_t new_image)
+static void s626_write_misc2(struct comedi_device *dev, u16 new_image)
{
s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_WENABLE);
s626_debi_write(dev, S626_LP_WRMISC2, new_image);
@@ -2100,7 +2100,7 @@ static void s626_write_misc2(struct comedi_device *dev, uint16_t new_image)
static void s626_counters_init(struct comedi_device *dev)
{
int chan;
- uint16_t setup =
+ u16 setup =
/* Preload upon index. */
S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
/* Disable hardware index. */
@@ -2169,7 +2169,7 @@ static int s626_initialize(struct comedi_device *dev)
{
struct s626_private *devpriv = dev->private;
dma_addr_t phys_buf;
- uint16_t chan;
+ u16 chan;
int i;
int ret;
@@ -2248,7 +2248,7 @@ static int s626_initialize(struct comedi_device *dev)
*/
/* Physical start of RPS program */
- writel((uint32_t)devpriv->rps_buf.physical_base,
+ writel((u32)devpriv->rps_buf.physical_base,
dev->mmio + S626_P_RPSADDR1);
/* RPS program performs no explicit mem writes */
writel(0, dev->mmio + S626_P_RPSPAGE1);
@@ -2318,16 +2318,16 @@ static int s626_initialize(struct comedi_device *dev)
* enabled.
*/
phys_buf = devpriv->ana_buf.physical_base +
- (S626_DAC_WDMABUF_OS * sizeof(uint32_t));
- writel((uint32_t)phys_buf, dev->mmio + S626_P_BASEA2_OUT);
- writel((uint32_t)(phys_buf + sizeof(uint32_t)),
+ (S626_DAC_WDMABUF_OS * sizeof(u32));
+ writel((u32)phys_buf, dev->mmio + S626_P_BASEA2_OUT);
+ writel((u32)(phys_buf + sizeof(u32)),
dev->mmio + S626_P_PROTA2_OUT);
/*
* Cache Audio2's output DMA buffer logical address. This is
* where DAC data is buffered for A2 output DMA transfers.
*/
- devpriv->dac_wbuf = (uint32_t *)devpriv->ana_buf.logical_base +
+ devpriv->dac_wbuf = (u32 *)devpriv->ana_buf.logical_base +
S626_DAC_WDMABUF_OS;
/*
diff --git a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
index d0a8a28edd36..55d43c076b1c 100644
--- a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
@@ -250,3 +250,15 @@ int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice)
return n;
}
EXPORT_SYMBOL_GPL(comedi_get_n_channels);
+
+static int __init kcomedilib_module_init(void)
+{
+ return 0;
+}
+
+static void __exit kcomedilib_module_exit(void)
+{
+}
+
+module_init(kcomedilib_module_init);
+module_exit(kcomedilib_module_exit);
diff --git a/drivers/staging/dgnc/Makefile b/drivers/staging/dgnc/Makefile
index 995c874f40eb..40ff0d007695 100644
--- a/drivers/staging/dgnc/Makefile
+++ b/drivers/staging/dgnc/Makefile
@@ -2,5 +2,4 @@ obj-$(CONFIG_DGNC) += dgnc.o
dgnc-objs := dgnc_cls.o dgnc_driver.o\
dgnc_mgmt.o dgnc_neo.o\
- dgnc_tty.o dgnc_sysfs.o\
- dgnc_utils.o
+ dgnc_tty.o dgnc_utils.o
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index aedca66cbe41..c20ffdd254d8 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -385,9 +385,8 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
ch->ch_rxcount++;
}
- /*
- * Write new final heads to channel structure.
- */
+ /* Write new final heads to channel structure. */
+
ch->ch_r_head = head & RQUEUEMASK;
ch->ch_e_head = head & EQUEUEMASK;
@@ -666,9 +665,8 @@ static void cls_param(struct tty_struct *tty)
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
return;
- /*
- * If baud rate is zero, flush queues, and set mval to drop DTR.
- */
+ /* If baud rate is zero, flush queues, and set mval to drop DTR. */
+
if ((ch->ch_c_cflag & (CBAUD)) == 0) {
ch->ch_r_head = 0;
ch->ch_r_tail = 0;
@@ -887,9 +885,8 @@ static void cls_param(struct tty_struct *tty)
cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr));
}
-/*
- * Our board poller function.
- */
+/* Our board poller function. */
+
static void cls_tasklet(unsigned long data)
{
struct dgnc_board *bd = (struct dgnc_board *)data;
@@ -914,9 +911,8 @@ static void cls_tasklet(unsigned long data)
*/
spin_lock_irqsave(&bd->bd_intr_lock, flags);
- /*
- * If board is ready, parse deeper to see if there is anything to do.
- */
+ /* If board is ready, parse deeper to see if there is anything to do. */
+
if ((state == BOARD_READY) && (ports > 0)) {
/* Loop on each port */
for (i = 0; i < ports; i++) {
@@ -938,9 +934,8 @@ static void cls_tasklet(unsigned long data)
cls_copy_data_from_queue_to_uart(ch);
dgnc_wakeup_writes(ch);
- /*
- * Check carrier function.
- */
+ /* Check carrier function. */
+
dgnc_carrier(ch);
/*
@@ -992,9 +987,8 @@ static irqreturn_t cls_intr(int irq, void *voidbrd)
for (i = 0; i < brd->nasync; i++)
cls_parse_isr(brd, i);
- /*
- * Schedule tasklet to more in-depth servicing at a better time.
- */
+ /* Schedule tasklet to more in-depth servicing at a better time. */
+
tasklet_schedule(&brd->helper_tasklet);
spin_unlock_irqrestore(&brd->bd_intr_lock, flags);
@@ -1043,9 +1037,7 @@ static int cls_drain(struct tty_struct *tty, uint seconds)
un->un_flags |= UN_EMPTY;
spin_unlock_irqrestore(&ch->ch_lock, flags);
- /*
- * NOTE: Do something with time passed in.
- */
+ /* NOTE: Do something with time passed in. */
/* If ret is non-zero, user ctrl-c'ed us */
@@ -1112,9 +1104,8 @@ static void cls_uart_init(struct channel_t *ch)
readb(&ch->ch_cls_uart->msr);
}
-/*
- * Turns off UART.
- */
+/* Turns off UART. */
+
static void cls_uart_off(struct channel_t *ch)
{
writeb(0, &ch->ch_cls_uart->ier);
@@ -1160,9 +1151,8 @@ static void cls_send_break(struct channel_t *ch, int msecs)
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
- /*
- * If we receive a time of 0, this means turn off the break.
- */
+ /* If we receive a time of 0, this means turn off the break. */
+
if (msecs == 0) {
/* Turn break off, and unset some variables */
if (ch->ch_flags & CH_BREAK_SENDING) {
diff --git a/drivers/staging/dgnc/dgnc_cls.h b/drivers/staging/dgnc/dgnc_cls.h
index 2597e36d38c4..463ad30efb3b 100644
--- a/drivers/staging/dgnc/dgnc_cls.h
+++ b/drivers/staging/dgnc/dgnc_cls.h
@@ -69,7 +69,7 @@ struct cls_uart_struct {
#define UART_EXAR654_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */
#define UART_EXAR654_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */
#define UART_EXAR654_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */
-#define UART_EXAR654_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow COntrol Enable */
+#define UART_EXAR654_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow Control Enable */
#define UART_EXAR654_IER_XOFF 0x20 /* Xoff Interrupt Enable */
#define UART_EXAR654_IER_RTSDTR 0x40 /* Output Interrupt Enable */
#define UART_EXAR654_IER_CTSDSR 0x80 /* Input Interrupt Enable */
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index fd372d3afa46..5381dbddd8bb 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -24,31 +24,14 @@
#include "dgnc_tty.h"
#include "dgnc_cls.h"
#include "dgnc_neo.h"
-#include "dgnc_sysfs.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Digi International, http://www.digi.com");
MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line");
MODULE_SUPPORTED_DEVICE("dgnc");
-/**************************************************************************
- *
- * protos for this file
- *
- */
-static int dgnc_start(void);
-static int dgnc_request_irq(struct dgnc_board *brd);
-static void dgnc_free_irq(struct dgnc_board *brd);
-static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id);
-static void dgnc_cleanup_board(struct dgnc_board *brd);
-static void dgnc_poll_handler(ulong dummy);
-static int dgnc_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-static int dgnc_do_remap(struct dgnc_board *brd);
+/* File operations permitted on Control/Management major. */
-/*
- * File operations permitted on Control/Management major.
- */
static const struct file_operations dgnc_board_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = dgnc_mgmt_ioctl,
@@ -56,9 +39,8 @@ static const struct file_operations dgnc_board_fops = {
.release = dgnc_mgmt_close
};
-/*
- * Globals
- */
+/* Globals */
+
uint dgnc_num_boards;
struct dgnc_board *dgnc_board[MAXBOARDS];
DEFINE_SPINLOCK(dgnc_global_lock);
@@ -66,14 +48,12 @@ DEFINE_SPINLOCK(dgnc_poll_lock); /* Poll scheduling lock */
uint dgnc_major;
int dgnc_poll_tick = 20; /* Poll interval - 20 ms */
-/*
- * Static vars.
- */
+/* Static vars. */
+
static struct class *dgnc_class;
-/*
- * Poller stuff
- */
+/* Poller stuff */
+
static ulong dgnc_poll_time; /* Time of next poll */
static uint dgnc_poll_stop; /* Used to tell poller to stop */
static struct timer_list dgnc_poll_timer;
@@ -93,7 +73,7 @@ struct board_id {
unsigned int is_pci_express;
};
-static struct board_id dgnc_ids[] = {
+static const struct board_id dgnc_ids[] = {
{ PCI_DEVICE_CLASSIC_4_PCI_NAME, 4, 0 },
{ PCI_DEVICE_CLASSIC_4_422_PCI_NAME, 4, 0 },
{ PCI_DEVICE_CLASSIC_8_PCI_NAME, 8, 0 },
@@ -114,274 +94,20 @@ static struct board_id dgnc_ids[] = {
{ NULL, 0, 0 }
};
-static struct pci_driver dgnc_driver = {
- .name = "dgnc",
- .probe = dgnc_init_one,
- .id_table = dgnc_pci_tbl,
-};
-
-/************************************************************************
- *
- * Driver load/unload functions
- *
- ************************************************************************/
-
-static void cleanup(bool sysfiles)
-{
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&dgnc_poll_lock, flags);
- dgnc_poll_stop = 1;
- spin_unlock_irqrestore(&dgnc_poll_lock, flags);
-
- /* Turn off poller right away. */
- del_timer_sync(&dgnc_poll_timer);
-
- if (sysfiles)
- dgnc_remove_driver_sysfiles(&dgnc_driver);
-
- device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
- class_destroy(dgnc_class);
- unregister_chrdev(dgnc_major, "dgnc");
-
- for (i = 0; i < dgnc_num_boards; ++i) {
- dgnc_remove_ports_sysfiles(dgnc_board[i]);
- dgnc_cleanup_tty(dgnc_board[i]);
- dgnc_cleanup_board(dgnc_board[i]);
- }
-
- dgnc_tty_post_uninit();
-}
-
-/*
- * dgnc_cleanup_module()
- *
- * Module unload. This is where it all ends.
- */
-static void __exit dgnc_cleanup_module(void)
-{
- cleanup(true);
- pci_unregister_driver(&dgnc_driver);
-}
-
-/*
- * init_module()
- *
- * Module load. This is where it all starts.
- */
-static int __init dgnc_init_module(void)
-{
- int rc;
-
- /*
- * Initialize global stuff
- */
- rc = dgnc_start();
-
- if (rc < 0)
- return rc;
-
- /*
- * Find and configure all the cards
- */
- rc = pci_register_driver(&dgnc_driver);
- if (rc) {
- pr_warn("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n");
- cleanup(false);
- return rc;
- }
- dgnc_create_driver_sysfiles(&dgnc_driver);
-
- return 0;
-}
-
-module_init(dgnc_init_module);
-module_exit(dgnc_cleanup_module);
+/* Remap PCI memory. */
-/*
- * Start of driver.
- */
-static int dgnc_start(void)
+static int dgnc_do_remap(struct dgnc_board *brd)
{
int rc = 0;
- unsigned long flags;
- struct device *dev;
-
- /* make sure timer is initialized before we do anything else */
- init_timer(&dgnc_poll_timer);
-
- /*
- * Register our base character device into the kernel.
- * This allows the download daemon to connect to the downld device
- * before any of the boards are init'ed.
- *
- * Register management/dpa devices
- */
- rc = register_chrdev(0, "dgnc", &dgnc_board_fops);
- if (rc < 0) {
- pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
- return rc;
- }
- dgnc_major = rc;
-
- dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
- if (IS_ERR(dgnc_class)) {
- rc = PTR_ERR(dgnc_class);
- pr_err(DRVSTR ": Can't create dgnc_mgmt class (%d)\n", rc);
- goto failed_class;
- }
-
- dev = device_create(dgnc_class, NULL,
- MKDEV(dgnc_major, 0),
- NULL, "dgnc_mgmt");
- if (IS_ERR(dev)) {
- rc = PTR_ERR(dev);
- pr_err(DRVSTR ": Can't create device (%d)\n", rc);
- goto failed_device;
- }
-
- /*
- * Init any global tty stuff.
- */
- rc = dgnc_tty_preinit();
-
- if (rc < 0) {
- pr_err(DRVSTR ": tty preinit - not enough memory (%d)\n", rc);
- goto failed_tty;
- }
-
- /* Start the poller */
- spin_lock_irqsave(&dgnc_poll_lock, flags);
- setup_timer(&dgnc_poll_timer, dgnc_poll_handler, 0);
- dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
- dgnc_poll_timer.expires = dgnc_poll_time;
- spin_unlock_irqrestore(&dgnc_poll_lock, flags);
-
- add_timer(&dgnc_poll_timer);
-
- return 0;
-
-failed_tty:
- device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
-failed_device:
- class_destroy(dgnc_class);
-failed_class:
- unregister_chrdev(dgnc_major, "dgnc");
- return rc;
-}
-
-/* returns count (>= 0), or negative on error */
-static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int rc;
- struct dgnc_board *brd;
-
- /* wake up and enable device */
- rc = pci_enable_device(pdev);
-
- if (rc)
- return -EIO;
-
- brd = dgnc_found_board(pdev, ent->driver_data);
- if (IS_ERR(brd))
- return PTR_ERR(brd);
-
- /*
- * Do tty device initialization.
- */
-
- rc = dgnc_tty_register(brd);
- if (rc < 0) {
- pr_err(DRVSTR ": Can't register tty devices (%d)\n", rc);
- goto failed;
- }
-
- rc = dgnc_request_irq(brd);
- if (rc < 0) {
- pr_err(DRVSTR ": Can't finalize board init (%d)\n", rc);
- goto unregister_tty;
- }
-
- rc = dgnc_tty_init(brd);
- if (rc < 0) {
- pr_err(DRVSTR ": Can't init tty devices (%d)\n", rc);
- goto free_irq;
- }
-
- brd->state = BOARD_READY;
- brd->dpastatus = BD_RUNNING;
- dgnc_create_ports_sysfiles(brd);
-
- dgnc_board[dgnc_num_boards++] = brd;
-
- return 0;
-
-free_irq:
- dgnc_free_irq(brd);
-unregister_tty:
- dgnc_tty_unregister(brd);
-
-failed:
- kfree(brd);
+ brd->re_map_membase = ioremap(brd->membase, 0x1000);
+ if (!brd->re_map_membase)
+ rc = -ENOMEM;
return rc;
}
/*
- * dgnc_cleanup_board()
- *
- * Free all the memory associated with a board
- */
-static void dgnc_cleanup_board(struct dgnc_board *brd)
-{
- int i = 0;
-
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
- return;
-
- switch (brd->device) {
- case PCI_DEVICE_CLASSIC_4_DID:
- case PCI_DEVICE_CLASSIC_8_DID:
- case PCI_DEVICE_CLASSIC_4_422_DID:
- case PCI_DEVICE_CLASSIC_8_422_DID:
-
- /* Tell card not to interrupt anymore. */
- outb(0, brd->iobase + 0x4c);
- break;
-
- default:
- break;
- }
-
- if (brd->irq)
- free_irq(brd->irq, brd);
-
- tasklet_kill(&brd->helper_tasklet);
-
- if (brd->re_map_membase) {
- iounmap(brd->re_map_membase);
- brd->re_map_membase = NULL;
- }
-
- /* Free all allocated channels structs */
- for (i = 0; i < MAXPORTS ; i++) {
- if (brd->channels[i]) {
- kfree(brd->channels[i]->ch_rqueue);
- kfree(brd->channels[i]->ch_equeue);
- kfree(brd->channels[i]->ch_wqueue);
- kfree(brd->channels[i]);
- brd->channels[i] = NULL;
- }
- }
-
- dgnc_board[brd->boardnum] = NULL;
-
- kfree(brd);
-}
-
-/*
* dgnc_found_board()
*
* A board has been found, init it.
@@ -587,21 +313,6 @@ static void dgnc_free_irq(struct dgnc_board *brd)
}
/*
- * Remap PCI memory.
- */
-static int dgnc_do_remap(struct dgnc_board *brd)
-{
- int rc = 0;
-
- brd->re_map_membase = ioremap(brd->membase, 0x1000);
- if (!brd->re_map_membase)
- rc = -ENOMEM;
-
- return rc;
-}
-
-/*
- *
* Function:
*
* dgnc_poll_handler
@@ -623,7 +334,6 @@ static int dgnc_do_remap(struct dgnc_board *brd)
* As each timer expires, it determines (a) whether the "transmit"
* waiter needs to be woken up, and (b) whether the poller needs to
* be rescheduled.
- *
*/
static void dgnc_poll_handler(ulong dummy)
@@ -651,9 +361,8 @@ static void dgnc_poll_handler(ulong dummy)
spin_unlock_irqrestore(&brd->bd_lock, flags);
}
- /*
- * Schedule ourself back at the nominal wakeup interval.
- */
+ /* Schedule ourself back at the nominal wakeup interval. */
+
spin_lock_irqsave(&dgnc_poll_lock, flags);
dgnc_poll_time += dgnc_jiffies_from_ms(dgnc_poll_tick);
@@ -669,3 +378,240 @@ static void dgnc_poll_handler(ulong dummy)
if (!dgnc_poll_stop)
add_timer(&dgnc_poll_timer);
}
+
+/* returns count (>= 0), or negative on error */
+static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int rc;
+ struct dgnc_board *brd;
+
+ /* wake up and enable device */
+ rc = pci_enable_device(pdev);
+
+ if (rc)
+ return -EIO;
+
+ brd = dgnc_found_board(pdev, ent->driver_data);
+ if (IS_ERR(brd))
+ return PTR_ERR(brd);
+
+ /* Do tty device initialization. */
+
+ rc = dgnc_tty_register(brd);
+ if (rc < 0) {
+ pr_err(DRVSTR ": Can't register tty devices (%d)\n", rc);
+ goto failed;
+ }
+
+ rc = dgnc_request_irq(brd);
+ if (rc < 0) {
+ pr_err(DRVSTR ": Can't finalize board init (%d)\n", rc);
+ goto unregister_tty;
+ }
+
+ rc = dgnc_tty_init(brd);
+ if (rc < 0) {
+ pr_err(DRVSTR ": Can't init tty devices (%d)\n", rc);
+ goto free_irq;
+ }
+
+ brd->state = BOARD_READY;
+ brd->dpastatus = BD_RUNNING;
+
+ dgnc_board[dgnc_num_boards++] = brd;
+
+ return 0;
+
+free_irq:
+ dgnc_free_irq(brd);
+unregister_tty:
+ dgnc_tty_unregister(brd);
+
+failed:
+ kfree(brd);
+
+ return rc;
+}
+
+static struct pci_driver dgnc_driver = {
+ .name = "dgnc",
+ .probe = dgnc_init_one,
+ .id_table = dgnc_pci_tbl,
+};
+
+/* Start of driver. */
+
+static int dgnc_start(void)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct device *dev;
+
+ /* make sure timer is initialized before we do anything else */
+ init_timer(&dgnc_poll_timer);
+
+ /*
+ * Register our base character device into the kernel.
+ * This allows the download daemon to connect to the downld device
+ * before any of the boards are init'ed.
+ *
+ * Register management/dpa devices
+ */
+ rc = register_chrdev(0, "dgnc", &dgnc_board_fops);
+ if (rc < 0) {
+ pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
+ return rc;
+ }
+ dgnc_major = rc;
+
+ dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
+ if (IS_ERR(dgnc_class)) {
+ rc = PTR_ERR(dgnc_class);
+ pr_err(DRVSTR ": Can't create dgnc_mgmt class (%d)\n", rc);
+ goto failed_class;
+ }
+
+ dev = device_create(dgnc_class, NULL,
+ MKDEV(dgnc_major, 0),
+ NULL, "dgnc_mgmt");
+ if (IS_ERR(dev)) {
+ rc = PTR_ERR(dev);
+ pr_err(DRVSTR ": Can't create device (%d)\n", rc);
+ goto failed_device;
+ }
+
+ /* Start the poller */
+ spin_lock_irqsave(&dgnc_poll_lock, flags);
+ setup_timer(&dgnc_poll_timer, dgnc_poll_handler, 0);
+ dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
+ dgnc_poll_timer.expires = dgnc_poll_time;
+ spin_unlock_irqrestore(&dgnc_poll_lock, flags);
+
+ add_timer(&dgnc_poll_timer);
+
+ return 0;
+
+failed_device:
+ class_destroy(dgnc_class);
+failed_class:
+ unregister_chrdev(dgnc_major, "dgnc");
+ return rc;
+}
+
+/*
+ * dgnc_cleanup_board()
+ *
+ * Free all the memory associated with a board
+ */
+static void dgnc_cleanup_board(struct dgnc_board *brd)
+{
+ int i = 0;
+
+ if (!brd || brd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ switch (brd->device) {
+ case PCI_DEVICE_CLASSIC_4_DID:
+ case PCI_DEVICE_CLASSIC_8_DID:
+ case PCI_DEVICE_CLASSIC_4_422_DID:
+ case PCI_DEVICE_CLASSIC_8_422_DID:
+
+ /* Tell card not to interrupt anymore. */
+ outb(0, brd->iobase + 0x4c);
+ break;
+
+ default:
+ break;
+ }
+
+ if (brd->irq)
+ free_irq(brd->irq, brd);
+
+ tasklet_kill(&brd->helper_tasklet);
+
+ if (brd->re_map_membase) {
+ iounmap(brd->re_map_membase);
+ brd->re_map_membase = NULL;
+ }
+
+ /* Free all allocated channels structs */
+ for (i = 0; i < MAXPORTS ; i++) {
+ if (brd->channels[i]) {
+ kfree(brd->channels[i]->ch_rqueue);
+ kfree(brd->channels[i]->ch_equeue);
+ kfree(brd->channels[i]->ch_wqueue);
+ kfree(brd->channels[i]);
+ brd->channels[i] = NULL;
+ }
+ }
+
+ dgnc_board[brd->boardnum] = NULL;
+
+ kfree(brd);
+}
+
+/* Driver load/unload functions */
+
+static void cleanup(void)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dgnc_poll_lock, flags);
+ dgnc_poll_stop = 1;
+ spin_unlock_irqrestore(&dgnc_poll_lock, flags);
+
+ /* Turn off poller right away. */
+ del_timer_sync(&dgnc_poll_timer);
+
+ device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
+ class_destroy(dgnc_class);
+ unregister_chrdev(dgnc_major, "dgnc");
+
+ for (i = 0; i < dgnc_num_boards; ++i) {
+ dgnc_cleanup_tty(dgnc_board[i]);
+ dgnc_cleanup_board(dgnc_board[i]);
+ }
+}
+
+/*
+ * dgnc_cleanup_module()
+ *
+ * Module unload. This is where it all ends.
+ */
+static void __exit dgnc_cleanup_module(void)
+{
+ cleanup();
+ pci_unregister_driver(&dgnc_driver);
+}
+
+/*
+ * init_module()
+ *
+ * Module load. This is where it all starts.
+ */
+static int __init dgnc_init_module(void)
+{
+ int rc;
+
+ /* Initialize global stuff */
+
+ rc = dgnc_start();
+
+ if (rc < 0)
+ return rc;
+
+ /* Find and configure all the cards */
+
+ rc = pci_register_driver(&dgnc_driver);
+ if (rc) {
+ pr_warn("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n");
+ cleanup();
+ return rc;
+ }
+
+ return 0;
+}
+
+module_init(dgnc_init_module);
+module_exit(dgnc_cleanup_module);
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index 879202663a98..c8119f2fe881 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -12,11 +12,8 @@
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
- *************************************************************************
- *
* Driver includes
- *
- *************************************************************************/
+ */
#ifndef __DGNC_DRIVER_H
#define __DGNC_DRIVER_H
@@ -26,19 +23,14 @@
#include <linux/interrupt.h>
#include "digi.h" /* Digi specific ioctl header */
-#include "dgnc_sysfs.h" /* Support for SYSFS */
-/*************************************************************************
- *
- * Driver defines
- *
- *************************************************************************/
+/* Driver defines */
-/* Driver identification and error statments */
-#define PROCSTR "dgnc" /* /proc entries */
-#define DEVSTR "/dev/dg/dgnc" /* /dev entries */
-#define DRVSTR "dgnc" /* Driver name string */
-#define DG_PART "40002369_F" /* RPM part number */
+/* Driver identification and error statements */
+#define PROCSTR "dgnc" /* /proc entries */
+#define DEVSTR "/dev/dg/dgnc" /* /dev entries */
+#define DRVSTR "dgnc" /* Driver name string */
+#define DG_PART "40002369_F" /* RPM part number */
#define TRC_TO_CONSOLE 1
@@ -61,7 +53,8 @@
#define PORT_NUM(dev) ((dev) & 0x7f)
#define IS_PRINT(dev) (((dev) & 0xff) >= 0x80)
-/* MAX number of stop characters we will send
+/*
+ *MAX number of stop characters we will send
* when our read queue is getting full
*/
#define MAX_STOPS_SENT 5
@@ -88,35 +81,28 @@
#define _POSIX_VDISABLE '\0'
#endif
-/*
- * All the possible states the driver can be while being loaded.
- */
+/* All the possible states the driver can be while being loaded. */
+
enum {
DRIVER_INITIALIZED = 0,
DRIVER_READY
};
-/*
- * All the possible states the board can be while booting up.
- */
+/* All the possible states the board can be while booting up. */
+
enum {
BOARD_FAILED = 0,
BOARD_FOUND,
BOARD_READY
};
-/*************************************************************************
- *
- * Structures and closely related defines.
- *
- *************************************************************************/
+/* Structures and closely related defines. */
struct dgnc_board;
struct channel_t;
-/************************************************************************
- * Per board operations structure *
- ************************************************************************/
+/* Per board operations structure */
+
struct board_ops {
void (*tasklet)(unsigned long data);
irqreturn_t (*intr)(int irq, void *voidbrd);
@@ -138,16 +124,14 @@ struct board_ops {
void (*send_immediate_char)(struct channel_t *ch, unsigned char);
};
-/************************************************************************
- * Device flag definitions for bd_flags.
- ************************************************************************/
+/* Device flag definitions for bd_flags. */
+
#define BD_IS_PCI_EXPRESS 0x0001 /* Is a PCI Express board */
-/*
- * Per-board information
- */
+/* Per-board information */
+
struct dgnc_board {
- int magic; /* Board Magic number. */
+ int magic; /* Board Magic number. */
int boardnum; /* Board number: 0-32 */
int type; /* Type of board */
@@ -220,62 +204,56 @@ struct dgnc_board {
};
-/************************************************************************
- * Unit flag definitions for un_flags.
- ************************************************************************/
-#define UN_ISOPEN 0x0001 /* Device is open */
-#define UN_CLOSING 0x0002 /* Line is being closed */
-#define UN_IMM 0x0004 /* Service immediately */
-#define UN_BUSY 0x0008 /* Some work this channel */
-#define UN_BREAKI 0x0010 /* Input break received */
+/* Unit flag definitions for un_flags. */
+#define UN_ISOPEN 0x0001 /* Device is open */
+#define UN_CLOSING 0x0002 /* Line is being closed */
+#define UN_IMM 0x0004 /* Service immediately */
+#define UN_BUSY 0x0008 /* Some work this channel */
+#define UN_BREAKI 0x0010 /* Input break received */
#define UN_PWAIT 0x0020 /* Printer waiting for terminal */
-#define UN_TIME 0x0040 /* Waiting on time */
-#define UN_EMPTY 0x0080 /* Waiting output queue empty */
+#define UN_TIME 0x0040 /* Waiting on time */
+#define UN_EMPTY 0x0080 /* Waiting output queue empty */
#define UN_LOW 0x0100 /* Waiting output low water mark*/
-#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */
-#define UN_WOPEN 0x0400 /* Device waiting for open */
-#define UN_WIOCTL 0x0800 /* Device waiting for open */
-#define UN_HANGUP 0x8000 /* Carrier lost */
+#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */
+#define UN_WOPEN 0x0400 /* Device waiting for open */
+#define UN_WIOCTL 0x0800 /* Device waiting for open */
+#define UN_HANGUP 0x8000 /* Carrier lost */
struct device;
-/************************************************************************
- * Structure for terminal or printer unit.
- ************************************************************************/
+/* Structure for terminal or printer unit. */
struct un_t {
- int magic; /* Unit Magic Number. */
+ int magic; /* Unit Magic Number. */
struct channel_t *un_ch;
ulong un_time;
uint un_type;
- uint un_open_count; /* Counter of opens to port */
- struct tty_struct *un_tty;/* Pointer to unit tty structure */
- uint un_flags; /* Unit flags */
+ uint un_open_count; /* Counter of opens to port */
+ struct tty_struct *un_tty; /* Pointer to unit tty structure */
+ uint un_flags; /* Unit flags */
wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */
- uint un_dev; /* Minor device number */
+ uint un_dev; /* Minor device number */
struct device *un_sysfs;
};
-/************************************************************************
- * Device flag definitions for ch_flags.
- ************************************************************************/
-#define CH_PRON 0x0001 /* Printer on string */
-#define CH_STOP 0x0002 /* Output is stopped */
-#define CH_STOPI 0x0004 /* Input is stopped */
-#define CH_CD 0x0008 /* Carrier is present */
-#define CH_FCAR 0x0010 /* Carrier forced on */
-#define CH_HANGUP 0x0020 /* Hangup received */
-
-#define CH_RECEIVER_OFF 0x0040 /* Receiver is off */
-#define CH_OPENING 0x0080 /* Port in fragile open state */
-#define CH_CLOSING 0x0100 /* Port in fragile close state */
-#define CH_FIFO_ENABLED 0x0200 /* Port has FIFOs enabled */
-#define CH_TX_FIFO_EMPTY 0x0400 /* TX Fifo is completely empty */
-#define CH_TX_FIFO_LWM 0x0800 /* TX Fifo is below Low Water */
-#define CH_BREAK_SENDING 0x1000 /* Break is being sent */
-#define CH_LOOPBACK 0x2000 /* Channel is in lookback mode */
+/* Device flag definitions for ch_flags. */
+#define CH_PRON 0x0001 /* Printer on string */
+#define CH_STOP 0x0002 /* Output is stopped */
+#define CH_STOPI 0x0004 /* Input is stopped */
+#define CH_CD 0x0008 /* Carrier is present */
+#define CH_FCAR 0x0010 /* Carrier forced on */
+#define CH_HANGUP 0x0020 /* Hangup received */
+
+#define CH_RECEIVER_OFF 0x0040 /* Receiver is off */
+#define CH_OPENING 0x0080 /* Port in fragile open state */
+#define CH_CLOSING 0x0100 /* Port in fragile close state */
+#define CH_FIFO_ENABLED 0x0200 /* Port has FIFOs enabled */
+#define CH_TX_FIFO_EMPTY 0x0400 /* TX Fifo is completely empty */
+#define CH_TX_FIFO_LWM 0x0800 /* TX Fifo is below Low Water */
+#define CH_BREAK_SENDING 0x1000 /* Break is being sent */
+#define CH_LOOPBACK 0x2000 /* Channel is in lookback mode */
#define CH_BAUD0 0x08000 /* Used for checking B0 transitions */
-#define CH_FORCED_STOP 0x20000 /* Output is forcibly stopped */
-#define CH_FORCED_STOPI 0x40000 /* Input is forcibly stopped */
+#define CH_FORCED_STOP 0x20000 /* Output is forcibly stopped */
+#define CH_FORCED_STOPI 0x40000 /* Input is forcibly stopped */
/* Our Read/Error/Write queue sizes */
#define RQUEUEMASK 0x1FFF /* 8 K - 1 */
@@ -285,43 +263,41 @@ struct un_t {
#define EQUEUESIZE RQUEUESIZE
#define WQUEUESIZE (WQUEUEMASK + 1)
-/************************************************************************
- * Channel information structure.
- ************************************************************************/
+/* Channel information structure. */
struct channel_t {
- int magic; /* Channel Magic Number */
- struct dgnc_board *ch_bd; /* Board structure pointer */
+ int magic; /* Channel Magic Number */
+ struct dgnc_board *ch_bd; /* Board structure pointer */
struct digi_t ch_digi; /* Transparent Print structure */
- struct un_t ch_tun; /* Terminal unit info */
- struct un_t ch_pun; /* Printer unit info */
+ struct un_t ch_tun; /* Terminal unit info */
+ struct un_t ch_pun; /* Printer unit info */
spinlock_t ch_lock; /* provide for serialization */
wait_queue_head_t ch_flags_wait;
- uint ch_portnum; /* Port number, 0 offset. */
- uint ch_open_count; /* open count */
- uint ch_flags; /* Channel flags */
+ uint ch_portnum; /* Port number, 0 offset. */
+ uint ch_open_count; /* open count */
+ uint ch_flags; /* Channel flags */
ulong ch_close_delay; /* How long we should
* drop RTS/DTR for
*/
- ulong ch_cpstime; /* Time for CPS calculations */
+ ulong ch_cpstime; /* Time for CPS calculations */
- tcflag_t ch_c_iflag; /* channel iflags */
- tcflag_t ch_c_cflag; /* channel cflags */
- tcflag_t ch_c_oflag; /* channel oflags */
- tcflag_t ch_c_lflag; /* channel lflags */
- unsigned char ch_stopc; /* Stop character */
- unsigned char ch_startc; /* Start character */
+ tcflag_t ch_c_iflag; /* channel iflags */
+ tcflag_t ch_c_cflag; /* channel cflags */
+ tcflag_t ch_c_oflag; /* channel oflags */
+ tcflag_t ch_c_lflag; /* channel lflags */
+ unsigned char ch_stopc; /* Stop character */
+ unsigned char ch_startc; /* Start character */
uint ch_old_baud; /* Cache of the current baud */
uint ch_custom_speed;/* Custom baud, if set */
uint ch_wopen; /* Waiting for open process cnt */
- unsigned char ch_mostat; /* FEP output modem status */
- unsigned char ch_mistat; /* FEP input modem status */
+ unsigned char ch_mostat; /* FEP output modem status */
+ unsigned char ch_mistat; /* FEP input modem status */
struct neo_uart_struct __iomem *ch_neo_uart; /* Pointer to the
* "mapped" UART struct
@@ -347,10 +323,10 @@ struct channel_t {
ulong ch_rxcount; /* total of data received so far */
ulong ch_txcount; /* total of data transmitted so far */
- unsigned char ch_r_tlevel; /* Receive Trigger level */
- unsigned char ch_t_tlevel; /* Transmit Trigger level */
+ unsigned char ch_r_tlevel; /* Receive Trigger level */
+ unsigned char ch_t_tlevel; /* Transmit Trigger level */
- unsigned char ch_r_watermark; /* Receive Watermark */
+ unsigned char ch_r_watermark; /* Receive Watermark */
ulong ch_stop_sending_break; /* Time we should STOP
* sending a break
@@ -374,16 +350,15 @@ struct channel_t {
};
-/*
- * Our Global Variables.
- */
+/* Our Global Variables. */
+
extern uint dgnc_major; /* Our driver/mgmt major */
extern int dgnc_poll_tick; /* Poll interval - 20 ms */
extern spinlock_t dgnc_global_lock; /* Driver global spinlock */
extern spinlock_t dgnc_poll_lock; /* Poll scheduling lock */
extern uint dgnc_num_boards; /* Total number of boards */
-extern struct dgnc_board *dgnc_board[MAXBOARDS]; /* Array of board
- * structs
- */
+extern struct dgnc_board *dgnc_board[MAXBOARDS];/* Array of board
+ * structs
+ */
#endif
diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c
index 683c098391d9..9d9b15d6358a 100644
--- a/drivers/staging/dgnc/dgnc_mgmt.c
+++ b/drivers/staging/dgnc/dgnc_mgmt.c
@@ -13,13 +13,11 @@
* PURPOSE. See the GNU General Public License for more details.
*/
-/************************************************************************
- *
+/*
* This file implements the mgmt functionality for the
* Neo and ClassicBoard based product lines.
- *
- ************************************************************************
*/
+
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/sched.h> /* For jiffies, task states */
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index 5becb3741b67..3eefefe53174 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -107,7 +107,8 @@ static inline void neo_set_cts_flow_control(struct channel_t *ch)
/* Turn off auto Xon flow control */
efr &= ~UART_17158_EFR_IXON;
- /* Why? Because Exar's spec says we have to zero it
+ /*
+ * Why? Because Exar's spec says we have to zero it
* out before setting it
*/
writeb(0, &ch->ch_neo_uart->efr);
@@ -145,7 +146,8 @@ static inline void neo_set_rts_flow_control(struct channel_t *ch)
ier &= ~UART_17158_IER_XOFF;
efr &= ~UART_17158_EFR_IXOFF;
- /* Why? Because Exar's spec says we have to zero it
+ /*
+ * Why? Because Exar's spec says we have to zero it
* out before setting it
*/
writeb(0, &ch->ch_neo_uart->efr);
@@ -185,7 +187,8 @@ static inline void neo_set_ixon_flow_control(struct channel_t *ch)
/* Turn on auto Xon flow control */
efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXON);
- /* Why? Because Exar's spec says we have to zero it
+ /*
+ * Why? Because Exar's spec says we have to zero it
* out before setting it
*/
writeb(0, &ch->ch_neo_uart->efr);
@@ -225,7 +228,8 @@ static inline void neo_set_ixoff_flow_control(struct channel_t *ch)
ier |= UART_17158_IER_XOFF;
efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
- /* Why? Because Exar's spec says we have to zero it
+ /*
+ * Why? Because Exar's spec says we have to zero it
* out before setting it
*/
writeb(0, &ch->ch_neo_uart->efr);
@@ -268,7 +272,8 @@ static inline void neo_set_no_input_flow_control(struct channel_t *ch)
else
efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
- /* Why? Because Exar's spec says we have to zero
+ /*
+ * Why? Because Exar's spec says we have to zero
* it out before setting it
*/
writeb(0, &ch->ch_neo_uart->efr);
@@ -308,7 +313,8 @@ static inline void neo_set_no_output_flow_control(struct channel_t *ch)
else
efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXON);
- /* Why? Because Exar's spec says we have to zero it
+ /*
+ * Why? Because Exar's spec says we have to zero it
* out before setting it
*/
writeb(0, &ch->ch_neo_uart->efr);
@@ -351,9 +357,8 @@ static inline void neo_set_new_start_stop_chars(struct channel_t *ch)
neo_pci_posting_flush(ch->ch_bd);
}
-/*
- * No locks are assumed to be held when calling this function.
- */
+/* No locks are assumed to be held when calling this function. */
+
static inline void neo_clear_break(struct channel_t *ch, int force)
{
unsigned long flags;
@@ -381,9 +386,8 @@ static inline void neo_clear_break(struct channel_t *ch, int force)
spin_unlock_irqrestore(&ch->ch_lock, flags);
}
-/*
- * Parse the ISR register.
- */
+/* Parse the ISR register. */
+
static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
{
struct channel_t *ch;
@@ -412,8 +416,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
if (isr & (UART_17158_IIR_RDI_TIMEOUT | UART_IIR_RDI)) {
/* Read data from uart -> queue */
neo_copy_data_from_uart_to_queue(ch);
-
- /* Call our tty layer to enforce queue
+ /*
+ * Call our tty layer to enforce queue
* flow control if needed.
*/
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -438,7 +442,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
* one it was, so we can suspend or resume data flow.
*/
if (cause == UART_17158_XON_DETECT) {
- /* Is output stopped right now, if so,
+ /*
+ * Is output stopped right now, if so,
* resume it
*/
if (brd->channels[port]->ch_flags & CH_STOP) {
@@ -609,9 +614,8 @@ static void neo_param(struct tty_struct *tty)
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
return;
- /*
- * If baud rate is zero, flush queues, and set mval to drop DTR.
- */
+ /* If baud rate is zero, flush queues, and set mval to drop DTR. */
+
if ((ch->ch_c_cflag & (CBAUD)) == 0) {
ch->ch_r_head = 0;
ch->ch_r_tail = 0;
@@ -672,7 +676,8 @@ static void neo_param(struct tty_struct *tty)
4800, 9600, 19200, 38400 }
};
- /* Only use the TXPrint baud rate if the terminal unit
+ /*
+ * Only use the TXPrint baud rate if the terminal unit
* is NOT open
*/
if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
@@ -797,7 +802,8 @@ static void neo_param(struct tty_struct *tty)
if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
neo_set_cts_flow_control(ch);
} else if (ch->ch_c_iflag & IXON) {
- /* If start/stop is set to disable, then we should
+ /*
+ * If start/stop is set to disable, then we should
* disable flow control
*/
if ((ch->ch_startc == _POSIX_VDISABLE) ||
@@ -812,7 +818,8 @@ static void neo_param(struct tty_struct *tty)
if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
neo_set_rts_flow_control(ch);
} else if (ch->ch_c_iflag & IXOFF) {
- /* If start/stop is set to disable, then we should
+ /*
+ * If start/stop is set to disable, then we should
* disable flow control
*/
if ((ch->ch_startc == _POSIX_VDISABLE) ||
@@ -840,9 +847,8 @@ static void neo_param(struct tty_struct *tty)
neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
}
-/*
- * Our board poller function.
- */
+/* Our board poller function. */
+
static void neo_tasklet(unsigned long data)
{
struct dgnc_board *bd = (struct dgnc_board *)data;
@@ -867,9 +873,8 @@ static void neo_tasklet(unsigned long data)
*/
spin_lock_irqsave(&bd->bd_intr_lock, flags);
- /*
- * If board is ready, parse deeper to see if there is anything to do.
- */
+ /* If board is ready, parse deeper to see if there is anything to do. */
+
if ((state == BOARD_READY) && (ports > 0)) {
/* Loop on each port */
for (i = 0; i < ports; i++) {
@@ -997,9 +1002,9 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
break;
case UART_17158_RX_LINE_STATUS:
- /*
- * RXRDY and RX LINE Status (logic OR of LSR[4:1])
- */
+
+ /* RXRDY and RX LINE Status (logic OR of LSR[4:1]) */
+
neo_parse_lsr(brd, port);
break;
@@ -1022,9 +1027,9 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
break;
case UART_17158_MSR:
- /*
- * MSR or flow control was seen.
- */
+
+ /* MSR or flow control was seen. */
+
neo_parse_isr(brd, port);
break;
@@ -1041,9 +1046,8 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
port++;
}
- /*
- * Schedule tasklet to more in-depth servicing at a better time.
- */
+ /* Schedule tasklet to more in-depth servicing at a better time. */
+
tasklet_schedule(&brd->helper_tasklet);
spin_unlock_irqrestore(&brd->bd_intr_lock, flags);
@@ -1238,9 +1242,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
}
- /*
- * Discard character if we are ignoring the error mask.
- */
+ /* Discard character if we are ignoring the error mask. */
+
if (linestatus & error_mask) {
unsigned char discard;
@@ -1279,9 +1282,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
ch->ch_rxcount++;
}
- /*
- * Write new final heads to channel structure.
- */
+ /* Write new final heads to channel structure. */
+
ch->ch_r_head = head & RQUEUEMASK;
ch->ch_e_head = head & EQUEUEMASK;
@@ -1412,9 +1414,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
(ch->ch_flags & CH_BREAK_SENDING))
goto exit_unlock;
- /*
- * If FIFOs are disabled. Send data directly to txrx register
- */
+ /* If FIFOs are disabled. Send data directly to txrx register */
+
if (!(ch->ch_flags & CH_FIFO_ENABLED)) {
unsigned char lsrbits = readb(&ch->ch_neo_uart->lsr);
@@ -1458,9 +1459,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
goto exit_unlock;
}
- /*
- * We have to do it this way, because of the EXAR TXFIFO count bug.
- */
+ /* We have to do it this way, because of the EXAR TXFIFO count bug. */
+
if ((ch->ch_bd->dvid & 0xf0) < UART_XR17E158_DVID) {
if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM)))
goto exit_unlock;
@@ -1645,9 +1645,8 @@ static void neo_send_stop_character(struct channel_t *ch)
}
}
-/*
- * neo_uart_init
- */
+/* neo_uart_init */
+
static void neo_uart_init(struct channel_t *ch)
{
writeb(0, &ch->ch_neo_uart->ier);
@@ -1668,9 +1667,8 @@ static void neo_uart_init(struct channel_t *ch)
neo_pci_posting_flush(ch->ch_bd);
}
-/*
- * Make the UART completely turn off.
- */
+/* Make the UART completely turn off. */
+
static void neo_uart_off(struct channel_t *ch)
{
/* Turn off UART enhanced bits */
@@ -1705,9 +1703,8 @@ static uint neo_get_uart_bytes_left(struct channel_t *ch)
/* Channel lock MUST be held by the calling function! */
static void neo_send_break(struct channel_t *ch, int msecs)
{
- /*
- * If we receive a time of 0, this means turn off the break.
- */
+ /* If we receive a time of 0, this means turn off the break. */
+
if (msecs == 0) {
if (ch->ch_flags & CH_BREAK_SENDING) {
unsigned char temp = readb(&ch->ch_neo_uart->lcr);
diff --git a/drivers/staging/dgnc/dgnc_neo.h b/drivers/staging/dgnc/dgnc_neo.h
index abddd48353d0..77ecd9baae45 100644
--- a/drivers/staging/dgnc/dgnc_neo.h
+++ b/drivers/staging/dgnc/dgnc_neo.h
@@ -18,37 +18,38 @@
#include "dgnc_driver.h"
-/************************************************************************
- * Per channel/port NEO UART structure *
- ************************************************************************
- * Base Structure Entries Usage Meanings to Host *
- * *
- * W = read write R = read only *
- * U = Unused. *
- ************************************************************************/
+/*
+ * Per channel/port NEO UART structure
+ * Base Structure Entries Usage Meanings to Host
+ *
+ * W = read write R = read only
+ * U = Unused.
+ */
struct neo_uart_struct {
- u8 txrx; /* WR RHR/THR - Holding Reg */
+ u8 txrx; /* WR RHR/THR - Holding Reg */
u8 ier; /* WR IER - Interrupt Enable Reg */
- u8 isr_fcr; /* WR ISR/FCR - Interrupt Status Reg/Fifo Control Reg */
+ u8 isr_fcr; /* WR ISR/FCR - Interrupt Status Reg/Fifo
+ * Control Reg
+ */
u8 lcr; /* WR LCR - Line Control Reg */
u8 mcr; /* WR MCR - Modem Control Reg */
u8 lsr; /* WR LSR - Line Status Reg */
u8 msr; /* WR MSR - Modem Status Reg */
u8 spr; /* WR SPR - Scratch Pad Reg */
- u8 fctr; /* WR FCTR - Feature Control Reg */
+ u8 fctr; /* WR FCTR - Feature Control Reg */
u8 efr; /* WR EFR - Enhanced Function Reg */
- u8 tfifo; /* WR TXCNT/TXTRG - Transmit FIFO Reg */
- u8 rfifo; /* WR RXCNT/RXTRG - Receive FIFO Reg */
+ u8 tfifo; /* WR TXCNT/TXTRG - Transmit FIFO Reg */
+ u8 rfifo; /* WR RXCNT/RXTRG - Receive FIFO Reg */
u8 xoffchar1; /* WR XOFF 1 - XOff Character 1 Reg */
u8 xoffchar2; /* WR XOFF 2 - XOff Character 2 Reg */
u8 xonchar1; /* WR XON 1 - Xon Character 1 Reg */
u8 xonchar2; /* WR XON 2 - XOn Character 2 Reg */
u8 reserved1[0x2ff - 0x200]; /* U Reserved by Exar */
- u8 txrxburst[64]; /* RW 64 bytes of RX/TX FIFO Data */
+ u8 txrxburst[64]; /* RW 64 bytes of RX/TX FIFO Data */
u8 reserved2[0x37f - 0x340]; /* U Reserved by Exar */
- u8 rxburst_with_errors[64]; /* R 64 bytes of RX FIFO Data + LSR */
+ u8 rxburst_with_errors[64]; /* R 64 bytes of RX FIFO Data + LSR */
};
/* Where to read the extended interrupt register (32bits instead of 8bits) */
@@ -108,7 +109,9 @@ struct neo_uart_struct {
/* 17158 Extended IIR's */
#define UART_17158_IIR_RDI_TIMEOUT 0x0C /* Receiver data TIMEOUT */
#define UART_17158_IIR_XONXOFF 0x10 /* Received an XON/XOFF char */
-#define UART_17158_IIR_HWFLOW_STATE_CHANGE 0x20 /* CTS/DSR or RTS/DTR state change */
+#define UART_17158_IIR_HWFLOW_STATE_CHANGE 0x20 /* CTS/DSR or RTS/DTR
+ * state change
+ */
#define UART_17158_IIR_FIFO_ENABLED 0xC0 /* 16550 FIFOs are Enabled */
/*
@@ -119,8 +122,12 @@ struct neo_uart_struct {
#define UART_17158_RXRDY_TIMEOUT 0x2 /* RX Ready Timeout */
#define UART_17158_TXRDY 0x3 /* TX Ready */
#define UART_17158_MSR 0x4 /* Modem State Change */
-#define UART_17158_TX_AND_FIFO_CLR 0x40 /* Transmitter Holding Reg Empty */
-#define UART_17158_RX_FIFO_DATA_ERROR 0x80 /* UART detected an RX FIFO Data error */
+#define UART_17158_TX_AND_FIFO_CLR 0x40 /* Transmitter Holding
+ * Reg Empty
+ */
+#define UART_17158_RX_FIFO_DATA_ERROR 0x80 /* UART detected an RX FIFO
+ * Data error
+ */
/*
* These are the EXTENDED definitions for the 17C158's Interrupt
@@ -130,19 +137,22 @@ struct neo_uart_struct {
#define UART_17158_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */
#define UART_17158_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */
#define UART_17158_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */
-#define UART_17158_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow COntrol Enable */
+#define UART_17158_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow Control Enable */
-#define UART_17158_XOFF_DETECT 0x1 /* Indicates whether chip saw an incoming XOFF char */
-#define UART_17158_XON_DETECT 0x2 /* Indicates whether chip saw an incoming XON char */
+#define UART_17158_XOFF_DETECT 0x1 /* Indicates whether chip saw an
+ * incoming XOFF char
+ */
+#define UART_17158_XON_DETECT 0x2 /* Indicates whether chip saw an
+ * incoming XON char
+ */
#define UART_17158_IER_RSVD1 0x10 /* Reserved by Exar */
#define UART_17158_IER_XOFF 0x20 /* Xoff Interrupt Enable */
#define UART_17158_IER_RTSDTR 0x40 /* Output Interrupt Enable */
#define UART_17158_IER_CTSDSR 0x80 /* Input Interrupt Enable */
-/*
- * Our Global Variables
- */
+/* Our Global Variables */
+
extern struct board_ops dgnc_neo_ops;
#endif
diff --git a/drivers/staging/dgnc/dgnc_sysfs.c b/drivers/staging/dgnc/dgnc_sysfs.c
deleted file mode 100644
index 290bf6e226ac..000000000000
--- a/drivers/staging/dgnc/dgnc_sysfs.c
+++ /dev/null
@@ -1,703 +0,0 @@
-/*
- * Copyright 2004 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/serial_reg.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-
-#include "dgnc_driver.h"
-#include "dgnc_mgmt.h"
-
-static ssize_t version_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
-}
-static DRIVER_ATTR_RO(version);
-
-static ssize_t boards_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", dgnc_num_boards);
-}
-static DRIVER_ATTR_RO(boards);
-
-static ssize_t maxboards_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
-}
-static DRIVER_ATTR_RO(maxboards);
-
-static ssize_t pollrate_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%dms\n", dgnc_poll_tick);
-}
-
-static ssize_t pollrate_store(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- unsigned long flags;
- int tick;
- int ret;
-
- ret = sscanf(buf, "%d\n", &tick);
- if (ret != 1)
- return -EINVAL;
-
- spin_lock_irqsave(&dgnc_poll_lock, flags);
- dgnc_poll_tick = tick;
- spin_unlock_irqrestore(&dgnc_poll_lock, flags);
-
- return count;
-}
-static DRIVER_ATTR_RW(pollrate);
-
-void dgnc_create_driver_sysfiles(struct pci_driver *dgnc_driver)
-{
- int rc = 0;
- struct device_driver *driverfs = &dgnc_driver->driver;
-
- rc |= driver_create_file(driverfs, &driver_attr_version);
- rc |= driver_create_file(driverfs, &driver_attr_boards);
- rc |= driver_create_file(driverfs, &driver_attr_maxboards);
- rc |= driver_create_file(driverfs, &driver_attr_pollrate);
- if (rc)
- pr_err("DGNC: sysfs driver_create_file failed!\n");
-}
-
-void dgnc_remove_driver_sysfiles(struct pci_driver *dgnc_driver)
-{
- struct device_driver *driverfs = &dgnc_driver->driver;
-
- driver_remove_file(driverfs, &driver_attr_version);
- driver_remove_file(driverfs, &driver_attr_boards);
- driver_remove_file(driverfs, &driver_attr_maxboards);
- driver_remove_file(driverfs, &driver_attr_pollrate);
-}
-
-#define DGNC_VERIFY_BOARD(p, bd) \
- do { \
- if (!p) \
- return 0; \
- \
- bd = dev_get_drvdata(p); \
- if (!bd || bd->magic != DGNC_BOARD_MAGIC) \
- return 0; \
- if (bd->state != BOARD_READY) \
- return 0; \
- } while (0)
-
-static ssize_t vpd_show(struct device *p, struct device_attribute *attr,
- char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- count += sprintf(buf + count,
- "\n 0 1 2 3 4 5 6 7 8 9 A B C D E F");
- for (i = 0; i < 0x40 * 2; i++) {
- if (!(i % 16))
- count += sprintf(buf + count, "\n%04X ", i * 2);
- count += sprintf(buf + count, "%02X ", bd->vpd[i]);
- }
- count += sprintf(buf + count, "\n");
-
- return count;
-}
-static DEVICE_ATTR_RO(vpd);
-
-static ssize_t serial_number_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- if (bd->serial_num[0] == '\0')
- count += sprintf(buf + count, "<UNKNOWN>\n");
- else
- count += sprintf(buf + count, "%s\n", bd->serial_num);
-
- return count;
-}
-static DEVICE_ATTR_RO(serial_number);
-
-static ssize_t ports_state_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %s\n", bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_open_count ? "Open" : "Closed");
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_state);
-
-static ssize_t ports_baud_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %d\n", bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_old_baud);
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_baud);
-
-static ssize_t ports_msignals_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- struct channel_t *ch = bd->channels[i];
-
- if (ch->ch_open_count) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %s %s %s %s %s %s\n",
- ch->ch_portnum,
- (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
- (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
- (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
- (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
- (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
- (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
- } else {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d\n", ch->ch_portnum);
- }
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_msignals);
-
-static ssize_t ports_iflag_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_iflag);
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_iflag);
-
-static ssize_t ports_cflag_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_cflag);
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_cflag);
-
-static ssize_t ports_oflag_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_oflag);
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_oflag);
-
-static ssize_t ports_lflag_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_lflag);
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_lflag);
-
-static ssize_t ports_digi_flag_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_digi.digi_flags);
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_digi_flag);
-
-static ssize_t ports_rxcount_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_rxcount);
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_rxcount);
-
-static ssize_t ports_txcount_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_txcount);
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_txcount);
-
-/* this function creates the sys files that will export each signal status
- * to sysfs each value will be put in a separate filename
- */
-void dgnc_create_ports_sysfiles(struct dgnc_board *bd)
-{
- int rc = 0;
-
- dev_set_drvdata(&bd->pdev->dev, bd);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_state);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_baud);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_msignals);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_iflag);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_cflag);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_oflag);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_lflag);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_digi_flag);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_rxcount);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_txcount);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_vpd);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_serial_number);
- if (rc)
- dev_err(&bd->pdev->dev, "dgnc: sysfs device_create_file failed!\n");
-}
-
-/* removes all the sys files created for that port */
-void dgnc_remove_ports_sysfiles(struct dgnc_board *bd)
-{
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_state);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_baud);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_msignals);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_iflag);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_cflag);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_oflag);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_lflag);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_digi_flag);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_rxcount);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_txcount);
- device_remove_file(&bd->pdev->dev, &dev_attr_vpd);
- device_remove_file(&bd->pdev->dev, &dev_attr_serial_number);
-}
-
-static ssize_t tty_state_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%s",
- un->un_open_count ? "Open" : "Closed");
-}
-static DEVICE_ATTR_RO(tty_state);
-
-static ssize_t tty_baud_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_old_baud);
-}
-static DEVICE_ATTR_RO(tty_baud);
-
-static ssize_t tty_msignals_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- if (ch->ch_open_count) {
- return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n",
- (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
- (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
- (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
- (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
- (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
- (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
- }
- return 0;
-}
-static DEVICE_ATTR_RO(tty_msignals);
-
-static ssize_t tty_iflag_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
-}
-static DEVICE_ATTR_RO(tty_iflag);
-
-static ssize_t tty_cflag_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
-}
-static DEVICE_ATTR_RO(tty_cflag);
-
-static ssize_t tty_oflag_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
-}
-static DEVICE_ATTR_RO(tty_oflag);
-
-static ssize_t tty_lflag_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
-}
-static DEVICE_ATTR_RO(tty_lflag);
-
-static ssize_t tty_digi_flag_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
-}
-static DEVICE_ATTR_RO(tty_digi_flag);
-
-static ssize_t tty_rxcount_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
-}
-static DEVICE_ATTR_RO(tty_rxcount);
-
-static ssize_t tty_txcount_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
-}
-static DEVICE_ATTR_RO(tty_txcount);
-
-static ssize_t tty_custom_name_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%sn%d%c\n",
- (un->un_type == DGNC_PRINT) ? "pr" : "tty",
- bd->boardnum + 1, 'a' + ch->ch_portnum);
-}
-static DEVICE_ATTR_RO(tty_custom_name);
-
-static struct attribute *dgnc_sysfs_tty_entries[] = {
- &dev_attr_tty_state.attr,
- &dev_attr_tty_baud.attr,
- &dev_attr_tty_msignals.attr,
- &dev_attr_tty_iflag.attr,
- &dev_attr_tty_cflag.attr,
- &dev_attr_tty_oflag.attr,
- &dev_attr_tty_lflag.attr,
- &dev_attr_tty_digi_flag.attr,
- &dev_attr_tty_rxcount.attr,
- &dev_attr_tty_txcount.attr,
- &dev_attr_tty_custom_name.attr,
- NULL
-};
-
-static const struct attribute_group dgnc_tty_attribute_group = {
- .name = NULL,
- .attrs = dgnc_sysfs_tty_entries,
-};
-
-void dgnc_create_tty_sysfs(struct un_t *un, struct device *c)
-{
- int ret;
-
- ret = sysfs_create_group(&c->kobj, &dgnc_tty_attribute_group);
- if (ret) {
- dev_err(c, "dgnc: failed to create sysfs tty device attributes.\n");
- sysfs_remove_group(&c->kobj, &dgnc_tty_attribute_group);
- return;
- }
-
- dev_set_drvdata(c, un);
-}
-
-void dgnc_remove_tty_sysfs(struct device *c)
-{
- sysfs_remove_group(&c->kobj, &dgnc_tty_attribute_group);
-}
-
diff --git a/drivers/staging/dgnc/dgnc_sysfs.h b/drivers/staging/dgnc/dgnc_sysfs.h
deleted file mode 100644
index 7be7d55bc49e..000000000000
--- a/drivers/staging/dgnc/dgnc_sysfs.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-#ifndef __DGNC_SYSFS_H
-#define __DGNC_SYSFS_H
-
-#include <linux/device.h>
-#include "dgnc_driver.h"
-
-struct dgnc_board;
-struct channel_t;
-struct un_t;
-struct pci_driver;
-struct class_device;
-
-void dgnc_create_ports_sysfiles(struct dgnc_board *bd);
-void dgnc_remove_ports_sysfiles(struct dgnc_board *bd);
-
-void dgnc_create_driver_sysfiles(struct pci_driver *);
-void dgnc_remove_driver_sysfiles(struct pci_driver *);
-
-int dgnc_tty_class_init(void);
-int dgnc_tty_class_destroy(void);
-
-void dgnc_create_tty_sysfs(struct un_t *un, struct device *c);
-void dgnc_remove_tty_sysfs(struct device *c);
-
-#endif
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index 953d9310fa74..1e10c0fe4745 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -13,13 +13,9 @@
* PURPOSE. See the GNU General Public License for more details.
*/
-/************************************************************************
- *
+/*
* This file implements the tty driver functionality for the
* Neo and ClassicBoard PCI based product lines.
- *
- ************************************************************************
- *
*/
#include <linux/kernel.h>
@@ -39,27 +35,20 @@
#include "dgnc_tty.h"
#include "dgnc_neo.h"
#include "dgnc_cls.h"
-#include "dgnc_sysfs.h"
#include "dgnc_utils.h"
-/*
- * internal variables
- */
-static unsigned char *dgnc_TmpWriteBuf;
-
-/*
- * Default transparent print information.
- */
-static struct digi_t dgnc_digi_init = {
- .digi_flags = DIGI_COOK, /* Flags */
- .digi_maxcps = 100, /* Max CPS */
- .digi_maxchar = 50, /* Max chars in print queue */
- .digi_bufsize = 100, /* Printer buffer size */
- .digi_onlen = 4, /* size of printer on string */
- .digi_offlen = 4, /* size of printer off string */
- .digi_onstr = "\033[5i", /* ANSI printer on string ] */
- .digi_offstr = "\033[4i", /* ANSI printer off string ] */
- .digi_term = "ansi" /* default terminal type */
+/* Default transparent print information. */
+
+static const struct digi_t dgnc_digi_init = {
+ .digi_flags = DIGI_COOK, /* Flags */
+ .digi_maxcps = 100, /* Max CPS */
+ .digi_maxchar = 50, /* Max chars in print queue */
+ .digi_bufsize = 100, /* Printer buffer size */
+ .digi_onlen = 4, /* size of printer on string */
+ .digi_offlen = 4, /* size of printer off string */
+ .digi_onstr = "\033[5i", /* ANSI printer on string ] */
+ .digi_offstr = "\033[4i", /* ANSI printer off string ] */
+ .digi_term = "ansi" /* default terminal type */
};
/*
@@ -69,7 +58,7 @@ static struct digi_t dgnc_digi_init = {
* This defines a raw port at 9600 baud, 8 data bits, no parity,
* 1 stop bit.
*/
-static struct ktermios DgncDefaultTermios = {
+static struct ktermios default_termios = {
.c_iflag = (DEFAULT_IFLAGS), /* iflags */
.c_oflag = (DEFAULT_OFLAGS), /* oflags */
.c_cflag = (DEFAULT_CFLAGS), /* cflags */
@@ -113,6 +102,8 @@ static int dgnc_tty_write(struct tty_struct *tty, const unsigned char *buf,
static void dgnc_tty_set_termios(struct tty_struct *tty,
struct ktermios *old_termios);
static void dgnc_tty_send_xchar(struct tty_struct *tty, char ch);
+static void dgnc_set_signal_low(struct channel_t *ch, const unsigned char line);
+static void dgnc_wake_up_unit(struct un_t *unit);
static const struct tty_operations dgnc_tty_ops = {
.open = dgnc_tty_open,
@@ -137,36 +128,7 @@ static const struct tty_operations dgnc_tty_ops = {
.send_xchar = dgnc_tty_send_xchar
};
-/************************************************************************
- *
- * TTY Initialization/Cleanup Functions
- *
- ************************************************************************/
-
-/*
- * dgnc_tty_preinit()
- *
- * Initialize any global tty related data before we download any boards.
- */
-int dgnc_tty_preinit(void)
-{
- /*
- * Allocate a buffer for doing the copy from user space to
- * kernel space in dgnc_write(). We only use one buffer and
- * control access to it with a semaphore. If we are paging, we
- * are already in trouble so one buffer won't hurt much anyway.
- *
- * We are okay to sleep in the malloc, as this routine
- * is only called during module load, (not in interrupt context),
- * and with no locks held.
- */
- dgnc_TmpWriteBuf = kmalloc(WRITEBUFLEN, GFP_KERNEL);
-
- if (!dgnc_TmpWriteBuf)
- return -ENOMEM;
-
- return 0;
-}
+/* TTY Initialization/Cleanup Functions */
/*
* dgnc_tty_register()
@@ -194,7 +156,7 @@ int dgnc_tty_register(struct dgnc_board *brd)
brd->serial_driver->minor_start = 0;
brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
brd->serial_driver->subtype = SERIAL_TYPE_NORMAL;
- brd->serial_driver->init_termios = DgncDefaultTermios;
+ brd->serial_driver->init_termios = default_termios;
brd->serial_driver->driver_name = DRVSTR;
/*
@@ -233,7 +195,7 @@ int dgnc_tty_register(struct dgnc_board *brd)
brd->print_driver->minor_start = 0x80;
brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL;
brd->print_driver->subtype = SERIAL_TYPE_NORMAL;
- brd->print_driver->init_termios = DgncDefaultTermios;
+ brd->print_driver->init_termios = default_termios;
brd->print_driver->driver_name = DRVSTR;
/*
@@ -285,9 +247,7 @@ int dgnc_tty_init(struct dgnc_board *brd)
if (!brd)
return -ENXIO;
- /*
- * Initialize board structure elements.
- */
+ /* Initialize board structure elements. */
vaddr = brd->re_map_membase;
@@ -345,12 +305,10 @@ int dgnc_tty_init(struct dgnc_board *brd)
classp = tty_register_device(brd->serial_driver, i,
&ch->ch_bd->pdev->dev);
ch->ch_tun.un_sysfs = classp;
- dgnc_create_tty_sysfs(&ch->ch_tun, classp);
classp = tty_register_device(brd->print_driver, i,
&ch->ch_bd->pdev->dev);
ch->ch_pun.un_sysfs = classp;
- dgnc_create_tty_sysfs(&ch->ch_pun, classp);
}
}
@@ -365,17 +323,6 @@ err_free_channels:
}
/*
- * dgnc_tty_post_uninit()
- *
- * UnInitialize any global tty related data.
- */
-void dgnc_tty_post_uninit(void)
-{
- kfree(dgnc_TmpWriteBuf);
- dgnc_TmpWriteBuf = NULL;
-}
-
-/*
* dgnc_cleanup_tty()
*
* Uninitialize the TTY portion of this driver. Free all memory and
@@ -385,20 +332,14 @@ void dgnc_cleanup_tty(struct dgnc_board *brd)
{
int i = 0;
- for (i = 0; i < brd->nasync; i++) {
- if (brd->channels[i])
- dgnc_remove_tty_sysfs(brd->channels[i]->
- ch_tun.un_sysfs);
+ for (i = 0; i < brd->nasync; i++)
tty_unregister_device(brd->serial_driver, i);
- }
+
tty_unregister_driver(brd->serial_driver);
- for (i = 0; i < brd->nasync; i++) {
- if (brd->channels[i])
- dgnc_remove_tty_sysfs(brd->channels[i]->
- ch_pun.un_sysfs);
+ for (i = 0; i < brd->nasync; i++)
tty_unregister_device(brd->print_driver, i);
- }
+
tty_unregister_driver(brd->print_driver);
put_tty_driver(brd->serial_driver);
@@ -437,9 +378,7 @@ static void dgnc_wmove(struct channel_t *ch, char *buf, uint n)
}
if (n > 0) {
- /*
- * Move rest of data.
- */
+ /* Move rest of data. */
remain = n;
memcpy(ch->ch_wqueue + head, buf, remain);
head += remain;
@@ -509,9 +448,8 @@ void dgnc_input(struct channel_t *ch)
goto exit_unlock;
}
- /*
- * If we are throttled, simply don't read any data.
- */
+ /* If we are throttled, simply don't read any data. */
+
if (ch->ch_flags & CH_FORCED_STOPI)
goto exit_unlock;
@@ -624,10 +562,10 @@ exit_unlock:
tty_ldisc_deref(ld);
}
-/************************************************************************
+/*
* Determines when CARRIER changes state and takes appropriate
* action.
- ************************************************************************/
+ */
void dgnc_carrier(struct channel_t *ch)
{
int virt_carrier = 0;
@@ -645,28 +583,24 @@ void dgnc_carrier(struct channel_t *ch)
if (ch->ch_c_cflag & CLOCAL)
virt_carrier = 1;
- /*
- * Test for a VIRTUAL carrier transition to HIGH.
- */
+ /* Test for a VIRTUAL carrier transition to HIGH. */
+
if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
/*
* When carrier rises, wake any threads waiting
* for carrier in the open routine.
*/
-
if (waitqueue_active(&ch->ch_flags_wait))
wake_up_interruptible(&ch->ch_flags_wait);
}
- /*
- * Test for a PHYSICAL carrier transition to HIGH.
- */
+ /* Test for a PHYSICAL carrier transition to HIGH. */
+
if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
/*
* When carrier rises, wake any threads waiting
* for carrier in the open routine.
*/
-
if (waitqueue_active(&ch->ch_flags_wait))
wake_up_interruptible(&ch->ch_flags_wait);
}
@@ -704,9 +638,8 @@ void dgnc_carrier(struct channel_t *ch)
tty_hangup(ch->ch_pun.un_tty);
}
- /*
- * Make sure that our cached values reflect the current reality.
- */
+ /* Make sure that our cached values reflect the current reality. */
+
if (virt_carrier == 1)
ch->ch_flags |= CH_FCAR;
else
@@ -718,9 +651,8 @@ void dgnc_carrier(struct channel_t *ch)
ch->ch_flags &= ~CH_CD;
}
-/*
- * Assign the custom baud rate to the channel structure
- */
+/* Assign the custom baud rate to the channel structure */
+
static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate)
{
int testdiv;
@@ -854,6 +786,12 @@ void dgnc_check_queue_flow_control(struct channel_t *ch)
}
}
+static void dgnc_set_signal_low(struct channel_t *ch, const unsigned char sig)
+{
+ ch->ch_mostat &= ~(sig);
+ ch->ch_bd->bd_ops->assert_modem_signals(ch);
+}
+
void dgnc_wakeup_writes(struct channel_t *ch)
{
int qlen = 0;
@@ -864,9 +802,8 @@ void dgnc_wakeup_writes(struct channel_t *ch)
spin_lock_irqsave(&ch->ch_lock, flags);
- /*
- * If channel now has space, wake up anyone waiting on the condition.
- */
+ /* If channel now has space, wake up anyone waiting on the condition. */
+
qlen = ch->ch_w_head - ch->ch_w_tail;
if (qlen < 0)
qlen += WQUEUESIZE;
@@ -892,19 +829,15 @@ void dgnc_wakeup_writes(struct channel_t *ch)
* If RTS Toggle mode is on, whenever
* the queue and UART is empty, keep RTS low.
*/
- if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
- ch->ch_mostat &= ~(UART_MCR_RTS);
- ch->ch_bd->bd_ops->assert_modem_signals(ch);
- }
+ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)
+ dgnc_set_signal_low(ch, UART_MCR_RTS);
/*
* If DTR Toggle mode is on, whenever
* the queue and UART is empty, keep DTR low.
*/
- if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
- ch->ch_mostat &= ~(UART_MCR_DTR);
- ch->ch_bd->bd_ops->assert_modem_signals(ch);
- }
+ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)
+ dgnc_set_signal_low(ch, UART_MCR_DTR);
}
}
@@ -930,7 +863,7 @@ void dgnc_wakeup_writes(struct channel_t *ch)
spin_unlock_irqrestore(&ch->ch_lock, flags);
}
-struct dgnc_board *find_board_by_major(unsigned int major)
+static struct dgnc_board *find_board_by_major(unsigned int major)
{
int i;
@@ -948,16 +881,10 @@ struct dgnc_board *find_board_by_major(unsigned int major)
return NULL;
}
-/************************************************************************
- *
- * TTY Entry points and helper functions
- *
- ************************************************************************/
+/* TTY Entry points and helper functions */
+
+/* dgnc_tty_open() */
-/*
- * dgnc_tty_open()
- *
- */
static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
{
struct dgnc_board *brd;
@@ -1045,8 +972,8 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
* ch_flags_wait to wake us back up.
*/
rc = wait_event_interruptible(ch->ch_flags_wait,
- (((ch->ch_tun.un_flags | ch->ch_pun.un_flags) &
- UN_CLOSING) == 0));
+ (((ch->ch_tun.un_flags |
+ ch->ch_pun.un_flags) & UN_CLOSING) == 0));
/* If ret is non-zero, user ctrl-c'ed us */
if (rc)
@@ -1057,9 +984,8 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
/* Store our unit into driver_data, so we always have it available. */
tty->driver_data = un;
- /*
- * Initialize tty's
- */
+ /* Initialize tty's */
+
if (!(un->un_flags & UN_ISOPEN)) {
/* Store important variables. */
un->un_tty = tty;
@@ -1096,13 +1022,10 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
ch->ch_flags &= ~(CH_OPENING);
wake_up_interruptible(&ch->ch_flags_wait);
- /*
- * Initialize if neither terminal or printer is open.
- */
+ /* Initialize if neither terminal or printer is open. */
+
if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
- /*
- * Flush input queues.
- */
+ /* Flush input queues. */
ch->ch_r_head = 0;
ch->ch_r_tail = 0;
ch->ch_e_head = 0;
@@ -1138,16 +1061,13 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
brd->bd_ops->uart_init(ch);
}
- /*
- * Run param in case we changed anything
- */
+ /* Run param in case we changed anything */
+
brd->bd_ops->param(tty);
dgnc_carrier(ch);
- /*
- * follow protocol for opening port
- */
+ /* follow protocol for opening port */
spin_unlock_irqrestore(&ch->ch_lock, flags);
@@ -1248,9 +1168,8 @@ static int dgnc_block_til_ready(struct tty_struct *tty,
break;
}
- /*
- * Store the flags before we let go of channel lock
- */
+ /* Store the flags before we let go of channel lock */
+
if (sleep_on_un_flags)
old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
else
@@ -1269,12 +1188,13 @@ static int dgnc_block_til_ready(struct tty_struct *tty,
* from the current value.
*/
if (sleep_on_un_flags)
- retval = wait_event_interruptible(un->un_flags_wait,
- (old_flags != (ch->ch_tun.un_flags |
- ch->ch_pun.un_flags)));
+ retval = wait_event_interruptible
+ (un->un_flags_wait,
+ (old_flags != (ch->ch_tun.un_flags |
+ ch->ch_pun.un_flags)));
else
retval = wait_event_interruptible(ch->ch_flags_wait,
- (old_flags != ch->ch_flags));
+ (old_flags != ch->ch_flags));
/*
* We got woken up for some reason.
@@ -1304,10 +1224,8 @@ static void dgnc_tty_hangup(struct tty_struct *tty)
dgnc_tty_flush_buffer(tty);
}
-/*
- * dgnc_tty_close()
- *
- */
+/* dgnc_tty_close() */
+
static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
{
struct dgnc_board *bd;
@@ -1377,9 +1295,8 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
!(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
ch->ch_flags &= ~(CH_STOPI | CH_FORCED_STOPI);
- /*
- * turn off print device when closing print device.
- */
+ /* turn off print device when closing print device. */
+
if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) {
dgnc_wmove(ch, ch->ch_digi.digi_offstr,
(int)ch->ch_digi.digi_offlen);
@@ -1399,9 +1316,8 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
tty->closing = 0;
- /*
- * If we have HUPCL set, lower DTR and RTS
- */
+ /* If we have HUPCL set, lower DTR and RTS */
+
if (ch->ch_c_cflag & HUPCL) {
/* Drop RTS/DTR */
ch->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS);
@@ -1424,9 +1340,8 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
/* Turn off UART interrupts for this port */
ch->ch_bd->bd_ops->uart_off(ch);
} else {
- /*
- * turn off print device when closing print device.
- */
+ /* turn off print device when closing print device. */
+
if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) {
dgnc_wmove(ch, ch->ch_digi.digi_offstr,
(int)ch->ch_digi.digi_offlen);
@@ -1543,7 +1458,7 @@ static int dgnc_tty_write_room(struct tty_struct *tty)
int ret = 0;
unsigned long flags;
- if (!tty || !dgnc_TmpWriteBuf)
+ if (!tty)
return 0;
un = tty->driver_data;
@@ -1598,9 +1513,8 @@ static int dgnc_tty_write_room(struct tty_struct *tty)
*/
static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c)
{
- /*
- * Simply call tty_write.
- */
+ /* Simply call tty_write. */
+
dgnc_tty_write(tty, &c, 1);
return 1;
}
@@ -1623,7 +1537,7 @@ static int dgnc_tty_write(struct tty_struct *tty,
ushort tmask;
uint remain;
- if (!tty || !dgnc_TmpWriteBuf)
+ if (!tty)
return 0;
un = tty->driver_data;
@@ -1667,9 +1581,8 @@ static int dgnc_tty_write(struct tty_struct *tty,
*/
count = min(count, bufcount);
- /*
- * Bail if no space left.
- */
+ /* Bail if no space left. */
+
if (count <= 0)
goto exit_retry;
@@ -1712,9 +1625,7 @@ static int dgnc_tty_write(struct tty_struct *tty,
}
if (n > 0) {
- /*
- * Move rest of data.
- */
+ /* Move rest of data. */
remain = n;
memcpy(ch->ch_wqueue + head, buf, remain);
head += remain;
@@ -1749,9 +1660,7 @@ exit_retry:
return 0;
}
-/*
- * Return modem signals to ld.
- */
+/* Return modem signals to ld. */
static int dgnc_tty_tiocmget(struct tty_struct *tty)
{
@@ -1960,9 +1869,8 @@ static void dgnc_tty_send_xchar(struct tty_struct *tty, char c)
dev_dbg(tty->dev, "dgnc_tty_send_xchar finish\n");
}
-/*
- * Return modem signals to ld.
- */
+/* Return modem signals to ld. */
+
static inline int dgnc_get_mstat(struct channel_t *ch)
{
unsigned char mstat;
@@ -1994,9 +1902,8 @@ static inline int dgnc_get_mstat(struct channel_t *ch)
return result;
}
-/*
- * Return modem signals to ld.
- */
+/* Return modem signals to ld. */
+
static int dgnc_get_modem_info(struct channel_t *ch,
unsigned int __user *value)
{
@@ -2070,9 +1977,6 @@ static int dgnc_set_modem_info(struct channel_t *ch,
* dgnc_tty_digigeta()
*
* Ioctl to get the information for ditty.
- *
- *
- *
*/
static int dgnc_tty_digigeta(struct tty_struct *tty,
struct digi_t __user *retinfo)
@@ -2112,9 +2016,6 @@ static int dgnc_tty_digigeta(struct tty_struct *tty,
* dgnc_tty_digiseta()
*
* Ioctl to set the information for ditty.
- *
- *
- *
*/
static int dgnc_tty_digiseta(struct tty_struct *tty,
struct digi_t __user *new_info)
@@ -2145,9 +2046,8 @@ static int dgnc_tty_digiseta(struct tty_struct *tty,
spin_lock_irqsave(&ch->ch_lock, flags);
- /*
- * Handle transistions to and from RTS Toggle.
- */
+ /* Handle transitions to and from RTS Toggle. */
+
if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) &&
(new_digi.digi_flags & DIGI_RTS_TOGGLE))
ch->ch_mostat &= ~(UART_MCR_RTS);
@@ -2155,9 +2055,8 @@ static int dgnc_tty_digiseta(struct tty_struct *tty,
!(new_digi.digi_flags & DIGI_RTS_TOGGLE))
ch->ch_mostat |= (UART_MCR_RTS);
- /*
- * Handle transistions to and from DTR Toggle.
- */
+ /* Handle transitions to and from DTR Toggle. */
+
if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) &&
(new_digi.digi_flags & DIGI_DTR_TOGGLE))
ch->ch_mostat &= ~(UART_MCR_DTR);
@@ -2195,9 +2094,8 @@ static int dgnc_tty_digiseta(struct tty_struct *tty,
return 0;
}
-/*
- * dgnc_set_termios()
- */
+/* dgnc_set_termios() */
+
static void dgnc_tty_set_termios(struct tty_struct *tty,
struct ktermios *old_termios)
{
@@ -2428,11 +2326,18 @@ static void dgnc_tty_flush_buffer(struct tty_struct *tty)
spin_unlock_irqrestore(&ch->ch_lock, flags);
}
-/*****************************************************************************
- *
- * The IOCTL function and all of its helpers
+/*
+ * dgnc_wake_up_unit()
*
- *****************************************************************************/
+ * Wakes up processes waiting in the unit's (teminal/printer) wait queue
+ */
+static void dgnc_wake_up_unit(struct un_t *unit)
+{
+ unit->un_flags &= ~(UN_LOW | UN_EMPTY);
+ wake_up_interruptible(&unit->un_flags_wait);
+}
+
+/* The IOCTL function and all of its helpers */
/*
* dgnc_tty_ioctl()
@@ -2506,7 +2411,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
return 0;
case TCSBRKP:
- /* support for POSIX tcsendbreak()
+ /*
+ * support for POSIX tcsendbreak()
* According to POSIX.1 spec (7.2.2.1.2) breaks should be
* between 0.25 and 0.5 seconds so we'll ask for something
* in the middle: 0.375 seconds.
@@ -2583,9 +2489,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
spin_unlock_irqrestore(&ch->ch_lock, flags);
return dgnc_set_modem_info(ch, cmd, uarg);
- /*
- * Here are any additional ioctl's that we want to implement
- */
+ /* Here are any additional ioctl's that we want to implement */
case TCFLSH:
/*
@@ -2615,17 +2519,11 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
ch->ch_w_head = ch->ch_w_tail;
ch_bd_ops->flush_uart_write(ch);
- if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY)) {
- ch->ch_tun.un_flags &=
- ~(UN_LOW | UN_EMPTY);
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
-
- if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_pun.un_flags &=
- ~(UN_LOW | UN_EMPTY);
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
+ if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY))
+ dgnc_wake_up_unit(&ch->ch_tun);
+
+ if (ch->ch_pun.un_flags & (UN_LOW | UN_EMPTY))
+ dgnc_wake_up_unit(&ch->ch_pun);
}
}
@@ -2705,9 +2603,10 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
case DIGI_LOOPBACK:
{
uint loopback = 0;
- /* Let go of locks when accessing user space,
+ /*
+ * Let go of locks when accessing user space,
* could sleep
- */
+ */
spin_unlock_irqrestore(&ch->ch_lock, flags);
rc = get_user(loopback, (unsigned int __user *)arg);
if (rc)
@@ -2749,7 +2648,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
* This ioctl allows insertion of a character into the front
* of any pending data to be transmitted.
*
- * This ioctl is to satify the "Send Character Immediate"
+ * This ioctl is to satisfy the "Send Character Immediate"
* call that the RealPort protocol spec requires.
*/
case DIGI_REALPORT_SENDIMMEDIATE:
@@ -2769,7 +2668,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/*
* This ioctl returns all the current counts for the port.
*
- * This ioctl is to satify the "Line Error Counters"
+ * This ioctl is to satisfy the "Line Error Counters"
* call that the RealPort protocol spec requires.
*/
case DIGI_REALPORT_GETCOUNTERS:
@@ -2795,7 +2694,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/*
* This ioctl returns all current events.
*
- * This ioctl is to satify the "Event Reporting"
+ * This ioctl is to satisfy the "Event Reporting"
* call that the RealPort protocol spec requires.
*/
case DIGI_REALPORT_GETEVENTS:
@@ -2831,23 +2730,23 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
spin_unlock_irqrestore(&ch->ch_lock, flags);
- /*
- * Get data from user first.
- */
+ /* Get data from user first. */
+
if (copy_from_user(&buf, uarg, sizeof(buf)))
return -EFAULT;
spin_lock_irqsave(&ch->ch_lock, flags);
- /*
- * Figure out how much data is in our RX and TX queues.
- */
+ /* Figure out how much data is in our RX and TX queues. */
+
buf.rxbuf = (ch->ch_r_head - ch->ch_r_tail) & RQUEUEMASK;
buf.txbuf = (ch->ch_w_head - ch->ch_w_tail) & WQUEUEMASK;
/*
- * Is the UART empty? Add that value to whats in our TX queue.
+ * Is the UART empty?
+ * Add that value to whats in our TX queue.
*/
+
count = buf.txbuf + ch_bd_ops->get_uart_bytes_left(ch);
/*
@@ -2867,9 +2766,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (buf.txbuf > tdist)
buf.txbuf = tdist;
- /*
- * Report whether our queue and UART TX are completely empty.
- */
+ /* Report whether our queue and UART TX are completely empty. */
+
if (count)
buf.txdone = 0;
else
diff --git a/drivers/staging/dgnc/dgnc_tty.h b/drivers/staging/dgnc/dgnc_tty.h
index 24c9a412211e..1ee0eeeb4730 100644
--- a/drivers/staging/dgnc/dgnc_tty.h
+++ b/drivers/staging/dgnc/dgnc_tty.h
@@ -21,11 +21,9 @@
int dgnc_tty_register(struct dgnc_board *brd);
void dgnc_tty_unregister(struct dgnc_board *brd);
-int dgnc_tty_preinit(void);
-int dgnc_tty_init(struct dgnc_board *);
+int dgnc_tty_init(struct dgnc_board *brd);
-void dgnc_tty_post_uninit(void);
-void dgnc_cleanup_tty(struct dgnc_board *);
+void dgnc_cleanup_tty(struct dgnc_board *brd);
void dgnc_input(struct channel_t *ch);
void dgnc_carrier(struct channel_t *ch);
diff --git a/drivers/staging/dgnc/digi.h b/drivers/staging/dgnc/digi.h
index 5b983e6f5ee2..ec2e3dda6119 100644
--- a/drivers/staging/dgnc/digi.h
+++ b/drivers/staging/dgnc/digi.h
@@ -17,16 +17,16 @@
#define __DIGI_H
#ifndef TIOCM_LE
-#define TIOCM_LE 0x01 /* line enable */
-#define TIOCM_DTR 0x02 /* data terminal ready */
-#define TIOCM_RTS 0x04 /* request to send */
-#define TIOCM_ST 0x08 /* secondary transmit */
-#define TIOCM_SR 0x10 /* secondary receive */
-#define TIOCM_CTS 0x20 /* clear to send */
-#define TIOCM_CAR 0x40 /* carrier detect */
-#define TIOCM_RNG 0x80 /* ring indicator */
-#define TIOCM_DSR 0x100 /* data set ready */
-#define TIOCM_RI TIOCM_RNG /* ring (alternate) */
+#define TIOCM_LE 0x01 /* line enable */
+#define TIOCM_DTR 0x02 /* data terminal ready */
+#define TIOCM_RTS 0x04 /* request to send */
+#define TIOCM_ST 0x08 /* secondary transmit */
+#define TIOCM_SR 0x10 /* secondary receive */
+#define TIOCM_CTS 0x20 /* clear to send */
+#define TIOCM_CAR 0x40 /* carrier detect */
+#define TIOCM_RNG 0x80 /* ring indicator */
+#define TIOCM_DSR 0x100 /* data set ready */
+#define TIOCM_RI TIOCM_RNG /* ring (alternate) */
#define TIOCM_CD TIOCM_CAR /* carrier detect (alt) */
#endif
@@ -40,72 +40,71 @@
#define TIOCMBIS (('d' << 8) | 255) /* set modem ctrl state */
#endif
-#define DIGI_GETA (('e' << 8) | 94) /* Read params */
-#define DIGI_SETA (('e' << 8) | 95) /* Set params */
-#define DIGI_SETAW (('e' << 8) | 96) /* Drain & set params */
+#define DIGI_GETA (('e' << 8) | 94) /* Read params */
+#define DIGI_SETA (('e' << 8) | 95) /* Set params */
+#define DIGI_SETAW (('e' << 8) | 96) /* Drain & set params */
#define DIGI_SETAF (('e' << 8) | 97) /* Drain, flush & set params */
-#define DIGI_GET_NI_INFO (('d' << 8) | 250) /* Non-intelligent state info */
-#define DIGI_LOOPBACK (('d' << 8) | 252) /*
- * Enable/disable UART
- * internal loopback
- */
-#define DIGI_FAST 0x0002 /* Fast baud rates */
-#define RTSPACE 0x0004 /* RTS input flow control */
-#define CTSPACE 0x0008 /* CTS output flow control */
+#define DIGI_GET_NI_INFO (('d' << 8) | 250) /* Non-intelligent state info */
+#define DIGI_LOOPBACK (('d' << 8) | 252) /*
+ * Enable/disable UART
+ * internal loopback
+ */
+#define DIGI_FAST 0x0002 /* Fast baud rates */
+#define RTSPACE 0x0004 /* RTS input flow control */
+#define CTSPACE 0x0008 /* CTS output flow control */
#define DIGI_COOK 0x0080 /* Cooked processing done in FEP */
-#define DIGI_FORCEDCD 0x0100 /* Force carrier */
-#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */
+#define DIGI_FORCEDCD 0x0100 /* Force carrier */
+#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */
#define DIGI_PRINTER 0x0800 /* Hold port open for flow cntrl*/
-#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */
-#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */
-#define DIGI_PLEN 28 /* String length */
-#define DIGI_TSIZ 10 /* Terminal string len */
+#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */
+#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */
+#define DIGI_PLEN 28 /* String length */
+#define DIGI_TSIZ 10 /* Terminal string len */
-/************************************************************************
+/*
* Structure used with ioctl commands for DIGI parameters.
- ************************************************************************/
+ */
struct digi_t {
- unsigned short digi_flags; /* Flags (see above) */
- unsigned short digi_maxcps; /* Max printer CPS */
+ unsigned short digi_flags; /* Flags (see above) */
+ unsigned short digi_maxcps; /* Max printer CPS */
unsigned short digi_maxchar; /* Max chars in print queue */
- unsigned short digi_bufsize; /* Buffer size */
- unsigned char digi_onlen; /* Length of ON string */
+ unsigned short digi_bufsize; /* Buffer size */
+ unsigned char digi_onlen; /* Length of ON string */
unsigned char digi_offlen; /* Length of OFF string */
- char digi_onstr[DIGI_PLEN]; /* Printer on string */
- char digi_offstr[DIGI_PLEN]; /* Printer off string */
- char digi_term[DIGI_TSIZ]; /* terminal string */
+ char digi_onstr[DIGI_PLEN]; /* Printer on string */
+ char digi_offstr[DIGI_PLEN]; /* Printer off string */
+ char digi_term[DIGI_TSIZ]; /* terminal string */
};
-/************************************************************************
- * Structure to get driver status information
- ************************************************************************/
+/* Structure to get driver status information */
+
struct digi_dinfo {
- unsigned int dinfo_nboards; /* # boards configured */
+ unsigned int dinfo_nboards; /* # boards configured */
char dinfo_reserved[12]; /* for future expansion */
- char dinfo_version[16]; /* driver version */
+ char dinfo_version[16]; /* driver version */
};
-#define DIGI_GETDD (('d' << 8) | 248) /* get driver info */
+#define DIGI_GETDD (('d' << 8) | 248) /* get driver info */
-/************************************************************************
+/*
* Structure used with ioctl commands for per-board information
*
* physsize and memsize differ when board has "windowed" memory
- ************************************************************************/
+ */
struct digi_info {
- unsigned int info_bdnum; /* Board number (0 based) */
- unsigned int info_ioport; /* io port address */
- unsigned int info_physaddr; /* memory address */
+ unsigned int info_bdnum; /* Board number (0 based) */
+ unsigned int info_ioport; /* io port address */
+ unsigned int info_physaddr; /* memory address */
unsigned int info_physsize; /* Size of host mem window */
unsigned int info_memsize; /* Amount of dual-port mem */
- /* on board */
- unsigned short info_bdtype; /* Board type */
- unsigned short info_nports; /* number of ports */
- char info_bdstate; /* board state */
- char info_reserved[7]; /* for future expansion */
+ /* on board */
+ unsigned short info_bdtype; /* Board type */
+ unsigned short info_nports; /* number of ports */
+ char info_bdstate; /* board state */
+ char info_reserved[7]; /* for future expansion */
};
-#define DIGI_GETBD (('d' << 8) | 249) /* get board info */
+#define DIGI_GETBD (('d' << 8) | 249) /* get board info */
struct digi_getbuffer /* Struct for holding buffer use counts */
{
@@ -139,7 +138,7 @@ struct digi_getcounter {
#define DIGI_REALPORT_GETEVENTS (('e' << 8) | 111)
#define EV_OPU 0x0001 /* !<Output paused by client */
-#define EV_OPS 0x0002 /* !<Output paused by reqular sw flowctrl */
+#define EV_OPS 0x0002 /* !<Output paused by regular sw flowctrl */
#define EV_IPU 0x0010 /* !<Input paused unconditionally by user */
#define EV_IPS 0x0020 /* !<Input paused by high/low water marks */
#define EV_TXB 0x0040 /* !<Transmit break pending */
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index c3e298843b43..3f42fa8b0bf3 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -153,7 +153,6 @@ static void _nbu2ss_ep0_complete(struct usb_ep *_ep, struct usb_request *_req)
udc = (struct nbu2ss_udc *)_req->context;
p_ctrl = &udc->ctrl;
if ((p_ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
-
if (p_ctrl->bRequest == USB_REQ_SET_FEATURE) {
/*-------------------------------------------------*/
/* SET_FEATURE */
@@ -263,7 +262,7 @@ static int _nbu2ss_ep_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
}
_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
- _nbu2ss_endpoint_toggle_reset(udc, (ep->epnum|ep->direct));
+ _nbu2ss_endpoint_toggle_reset(udc, (ep->epnum | ep->direct));
if (ep->direct == USB_DIR_OUT) {
/*---------------------------------------------------------*/
@@ -460,7 +459,7 @@ static void _nbu2ss_ep_in_end(
if (length)
_nbu2ss_writel(&preg->EP_REGS[num].EP_WRITE, data32);
- data = ((((u32)length) << 5) & EPn_DW) | EPn_DEND;
+ data = (((length) << 5) & EPn_DW) | EPn_DEND;
_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data);
_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, EPn_AUTO);
@@ -753,7 +752,6 @@ static int _nbu2ss_ep0_out_transfer(
/* Receive data confirmation */
iRecvLength = _nbu2ss_readl(&udc->p_regs->EP0_LENGTH) & EP0_LDATA;
if (iRecvLength != 0) {
-
fRcvZero = 0;
iRemainSize = req->req.length - req->req.actual;
@@ -928,9 +926,8 @@ static int _nbu2ss_epn_out_pio(
req->req.actual += result;
- if ((req->req.actual == req->req.length)
- || ((req->req.actual % ep->ep.maxpacket) != 0)) {
-
+ if ((req->req.actual == req->req.length) ||
+ ((req->req.actual % ep->ep.maxpacket) != 0)) {
result = 0;
}
@@ -956,9 +953,8 @@ static int _nbu2ss_epn_out_data(
iBufSize = min((req->req.length - req->req.actual), data_size);
- if ((ep->ep_type != USB_ENDPOINT_XFER_INT)
- && (req->req.dma != 0)
- && (iBufSize >= sizeof(u32))) {
+ if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) &&
+ (iBufSize >= sizeof(u32))) {
nret = _nbu2ss_out_dma(udc, req, num, iBufSize);
} else {
iBufSize = min_t(u32, iBufSize, ep->ep.maxpacket);
@@ -999,9 +995,8 @@ static int _nbu2ss_epn_out_transfer(
}
}
} else {
- if ((req->req.actual == req->req.length)
- || ((req->req.actual % ep->ep.maxpacket) != 0)) {
-
+ if ((req->req.actual == req->req.length) ||
+ ((req->req.actual % ep->ep.maxpacket) != 0)) {
result = 0;
}
}
@@ -1170,9 +1165,8 @@ static int _nbu2ss_epn_in_data(
num = ep->epnum - 1;
- if ((ep->ep_type != USB_ENDPOINT_XFER_INT)
- && (req->req.dma != 0)
- && (data_size >= sizeof(u32))) {
+ if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) &&
+ (data_size >= sizeof(u32))) {
nret = _nbu2ss_in_dma(udc, ep, req, num, data_size);
} else {
data_size = min_t(u32, data_size, ep->ep.maxpacket);
@@ -1557,7 +1551,6 @@ static void _nbu2ss_epn_set_stall(
for (limit_cnt = 0
; limit_cnt < IN_DATA_EMPTY_COUNT
; limit_cnt++) {
-
regdata = _nbu2ss_readl(
&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
@@ -1582,11 +1575,8 @@ static int std_req_get_status(struct nbu2ss_udc *udc)
u8 ep_adrs;
int result = -EINVAL;
- if ((udc->ctrl.wValue != 0x0000)
- || (direction != USB_DIR_IN)) {
-
+ if ((udc->ctrl.wValue != 0x0000) || (direction != USB_DIR_IN))
return result;
- }
length = min_t(u16, udc->ctrl.wLength, sizeof(status_data));
@@ -1852,7 +1842,7 @@ static inline void _nbu2ss_ep0_int(struct nbu2ss_udc *udc)
status = _nbu2ss_readl(&udc->p_regs->EP0_STATUS);
intr = status & EP0_STATUS_RW_BIT;
- _nbu2ss_writel(&udc->p_regs->EP0_STATUS, ~(u32)intr);
+ _nbu2ss_writel(&udc->p_regs->EP0_STATUS, ~intr);
status &= (SETUP_INT | EP0_IN_INT | EP0_OUT_INT
| STG_END_INT | EP0_OUT_NULL_INT);
@@ -1897,9 +1887,8 @@ static inline void _nbu2ss_ep0_int(struct nbu2ss_udc *udc)
break;
case EP0_OUT_STATUS_PAHSE:
- if ((status & STG_END_INT)
- || (status & SETUP_INT)
- || (status & EP0_OUT_NULL_INT)) {
+ if ((status & STG_END_INT) || (status & SETUP_INT) ||
+ (status & EP0_OUT_NULL_INT)) {
status &= ~(STG_END_INT
| EP0_OUT_INT
| EP0_OUT_NULL_INT);
@@ -1982,7 +1971,6 @@ static inline void _nbu2ss_epn_in_int(
} else {
if (req->zero && ((req->req.actual % ep->ep.maxpacket) == 0)) {
-
status =
_nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
@@ -2127,7 +2115,7 @@ static inline void _nbu2ss_epn_int(struct nbu2ss_udc *udc, u32 epnum)
status = _nbu2ss_readl(&udc->p_regs->EP_REGS[num].EP_STATUS);
/* Interrupt Clear */
- _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_STATUS, ~(u32)status);
+ _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_STATUS, ~status);
req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
if (!req) {
@@ -2330,7 +2318,6 @@ static inline void _nbu2ss_check_vbus(struct nbu2ss_udc *udc)
/* VBUS ON Check*/
reg_dt = gpio_get_value(VBUS_VALUE);
if (reg_dt == 0) {
-
udc->linux_suspended = 0;
_nbu2ss_reset_controller(udc);
@@ -2502,7 +2489,6 @@ static irqreturn_t _nbu2ss_udc_irq(int irq, void *_udc)
int_bit = status >> 8;
for (epnum = 0; epnum < NUM_ENDPOINTS; epnum++) {
-
if (0x01 & int_bit)
_nbu2ss_ep_int(udc, epnum);
@@ -2546,9 +2532,8 @@ static int nbu2ss_ep_enable(
}
ep_type = usb_endpoint_type(desc);
- if ((ep_type == USB_ENDPOINT_XFER_CONTROL)
- || (ep_type == USB_ENDPOINT_XFER_ISOC)) {
-
+ if ((ep_type == USB_ENDPOINT_XFER_CONTROL) ||
+ (ep_type == USB_ENDPOINT_XFER_ISOC)) {
pr_err(" *** %s, bat bmAttributes\n", __func__);
return -EINVAL;
}
@@ -2557,9 +2542,7 @@ static int nbu2ss_ep_enable(
if (udc->vbus_active == 0)
return -ESHUTDOWN;
- if ((!udc->driver)
- || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
-
+ if ((!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
dev_err(ep->udc->dev, " *** %s, udc !!\n", __func__);
return -ESHUTDOWN;
}
@@ -2674,10 +2657,7 @@ static int nbu2ss_ep_queue(
}
req = container_of(_req, struct nbu2ss_req, req);
- if (unlikely
- (!_req->complete || !_req->buf
- || !list_empty(&req->queue))) {
-
+ if (unlikely(!_req->complete || !_req->buf || !list_empty(&req->queue))) {
if (!_req->complete)
pr_err("udc: %s --- !_req->complete\n", __func__);
@@ -2736,7 +2716,6 @@ static int nbu2ss_ep_queue(
list_add_tail(&req->queue, &ep->queue);
if (bflag && !ep->stalled) {
-
result = _nbu2ss_start_transfer(udc, ep, req, FALSE);
if (result < 0) {
dev_err(udc->dev, " *** %s, result = %d\n", __func__,
@@ -2938,7 +2917,7 @@ static void nbu2ss_ep_fifo_flush(struct usb_ep *_ep)
}
/*-------------------------------------------------------------------------*/
-static struct usb_ep_ops nbu2ss_ep_ops = {
+static const struct usb_ep_ops nbu2ss_ep_ops = {
.enable = nbu2ss_ep_enable,
.disable = nbu2ss_ep_disable,
@@ -2979,9 +2958,7 @@ static int nbu2ss_gad_get_frame(struct usb_gadget *pgadget)
if (data == 0)
return -EINVAL;
- data = _nbu2ss_readl(&udc->p_regs->USB_ADDRESS) & FRAME;
-
- return data;
+ return _nbu2ss_readl(&udc->p_regs->USB_ADDRESS) & FRAME;
}
/*-------------------------------------------------------------------------*/
@@ -3307,8 +3284,8 @@ static int nbu2ss_drv_remove(struct platform_device *pdev)
for (i = 0; i < NUM_ENDPOINTS; i++) {
ep = &udc->ep[i];
if (ep->virt_buf)
- dma_free_coherent(NULL, PAGE_SIZE,
- (void *)ep->virt_buf, ep->phys_buf);
+ dma_free_coherent(NULL, PAGE_SIZE, (void *)ep->virt_buf,
+ ep->phys_buf);
}
/* Interrupt Handler - Release */
diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
index 7561385761e9..a6e3af74a904 100644
--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
+++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
@@ -264,6 +264,39 @@ construct_line_bitmap(struct fbtft_par *par, u8 *dest, signed short *src,
}
}
+static void iterate_diffusion_matrix(u32 xres, u32 yres, int x,
+ int y, signed short *convert_buf,
+ signed short pixel, signed short error)
+{
+ u16 i, j;
+
+ /* diffusion matrix row */
+ for (i = 0; i < DIFFUSING_MATRIX_WIDTH; ++i)
+ /* diffusion matrix column */
+ for (j = 0; j < DIFFUSING_MATRIX_HEIGHT; ++j) {
+ signed short *write_pos;
+ signed char coeff;
+
+ /* skip pixels out of zone */
+ if (x + i < 0 || x + i >= xres || y + j >= yres)
+ continue;
+ write_pos = &convert_buf[(y + j) * xres + x + i];
+ coeff = diffusing_matrix[i][j];
+ if (-1 == coeff)
+ /* pixel itself */
+ *write_pos = pixel;
+ else {
+ signed short p = *write_pos + error * coeff;
+
+ if (p > WHITE)
+ p = WHITE;
+ if (p < BLACK)
+ p = BLACK;
+ *write_pos = p;
+ }
+ }
+}
+
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16 = (u16 *)par->info->screen_buffer;
@@ -303,7 +336,6 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
signed short error_b = pixel - BLACK;
signed short error_w = pixel - WHITE;
signed short error;
- u16 i, j;
/* what color close? */
if (abs(error_b) >= abs(error_w)) {
@@ -318,36 +350,10 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
error /= 8;
- /* diffusion matrix row */
- for (i = 0; i < DIFFUSING_MATRIX_WIDTH; ++i)
- /* diffusion matrix column */
- for (j = 0; j < DIFFUSING_MATRIX_HEIGHT; ++j) {
- signed short *write_pos;
- signed char coeff;
-
- /* skip pixels out of zone */
- if (x + i < 0 ||
- x + i >= par->info->var.xres
- || y + j >= par->info->var.yres)
- continue;
- write_pos = &convert_buf[
- (y + j) * par->info->var.xres +
- x + i];
- coeff = diffusing_matrix[i][j];
- if (coeff == -1)
- /* pixel itself */
- *write_pos = pixel;
- else {
- signed short p = *write_pos +
- error * coeff;
-
- if (p > WHITE)
- p = WHITE;
- if (p < BLACK)
- p = BLACK;
- *write_pos = p;
- }
- }
+ iterate_diffusion_matrix(par->info->var.xres,
+ par->info->var.yres,
+ x, y, convert_buf,
+ pixel, error);
}
/* 1 string = 2 pages */
diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
index c31e2e051d4a..19e33bab9cac 100644
--- a/drivers/staging/fbtft/fb_ili9325.c
+++ b/drivers/staging/fbtft/fb_ili9325.c
@@ -33,26 +33,23 @@
"04 16 2 7 6 3 2 1 7 7"
static unsigned int bt = 6; /* VGL=Vci*4 , VGH=Vci*4 */
-module_param(bt, uint, 0);
+module_param(bt, uint, 0000);
MODULE_PARM_DESC(bt, "Sets the factor used in the step-up circuits");
static unsigned int vc = 0x03; /* Vci1=Vci*0.80 */
-module_param(vc, uint, 0);
-MODULE_PARM_DESC(vc,
-"Sets the ratio factor of Vci to generate the reference voltages Vci1");
+module_param(vc, uint, 0000);
+MODULE_PARM_DESC(vc, "Sets the ratio factor of Vci to generate the reference voltages Vci1");
static unsigned int vrh = 0x0d; /* VREG1OUT=Vci*1.85 */
-module_param(vrh, uint, 0);
-MODULE_PARM_DESC(vrh,
-"Set the amplifying rate (1.6 ~ 1.9) of Vci applied to output the VREG1OUT");
+module_param(vrh, uint, 0000);
+MODULE_PARM_DESC(vrh, "Set the amplifying rate (1.6 ~ 1.9) of Vci applied to output the VREG1OUT");
static unsigned int vdv = 0x12; /* VCOMH amplitude=VREG1OUT*0.98 */
-module_param(vdv, uint, 0);
-MODULE_PARM_DESC(vdv,
-"Select the factor of VREG1OUT to set the amplitude of Vcom");
+module_param(vdv, uint, 0000);
+MODULE_PARM_DESC(vdv, "Select the factor of VREG1OUT to set the amplitude of Vcom");
static unsigned int vcm = 0x0a; /* VCOMH=VREG1OUT*0.735 */
-module_param(vcm, uint, 0);
+module_param(vcm, uint, 0000);
MODULE_PARM_DESC(vcm, "Set the internal VcomH voltage");
/*
diff --git a/drivers/staging/fbtft/fb_ili9481.c b/drivers/staging/fbtft/fb_ili9481.c
index 242adb3859bd..4e75f5abe2f9 100644
--- a/drivers/staging/fbtft/fb_ili9481.c
+++ b/drivers/staging/fbtft/fb_ili9481.c
@@ -27,7 +27,7 @@
#define WIDTH 320
#define HEIGHT 480
-static int default_init_sequence[] = {
+static s16 default_init_sequence[] = {
/* SLP_OUT - Sleep out */
-1, MIPI_DCS_EXIT_SLEEP_MODE,
-2, 50,
diff --git a/drivers/staging/fbtft/fb_ili9486.c b/drivers/staging/fbtft/fb_ili9486.c
index fa38d8885f0b..f4b314265f9e 100644
--- a/drivers/staging/fbtft/fb_ili9486.c
+++ b/drivers/staging/fbtft/fb_ili9486.c
@@ -26,7 +26,7 @@
#define HEIGHT 480
/* this init sequence matches PiScreen */
-static int default_init_sequence[] = {
+static s16 default_init_sequence[] = {
/* Interface Mode Control */
-1, 0xb0, 0x0,
-1, MIPI_DCS_EXIT_SLEEP_MODE,
diff --git a/drivers/staging/fbtft/fb_s6d02a1.c b/drivers/staging/fbtft/fb_s6d02a1.c
index 774b0ff69e6d..eb712aa0d692 100644
--- a/drivers/staging/fbtft/fb_s6d02a1.c
+++ b/drivers/staging/fbtft/fb_s6d02a1.c
@@ -24,7 +24,7 @@
#define DRVNAME "fb_s6d02a1"
-static int default_init_sequence[] = {
+static s16 default_init_sequence[] = {
-1, 0xf0, 0x5a, 0x5a,
diff --git a/drivers/staging/fbtft/fb_st7735r.c b/drivers/staging/fbtft/fb_st7735r.c
index 6670f2bb62ec..710b74bbba97 100644
--- a/drivers/staging/fbtft/fb_st7735r.c
+++ b/drivers/staging/fbtft/fb_st7735r.c
@@ -25,7 +25,7 @@
#define DEFAULT_GAMMA "0F 1A 0F 18 2F 28 20 22 1F 1B 23 37 00 07 02 10\n" \
"0F 1B 0F 17 33 2C 29 2E 30 30 39 3F 00 07 03 10"
-static int default_init_sequence[] = {
+static s16 default_init_sequence[] = {
-1, MIPI_DCS_SOFT_RESET,
-2, 150, /* delay */
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 587f68aa466c..bbe89c9c4fb9 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -253,7 +253,8 @@ static int fbtft_backlight_update_status(struct backlight_device *bd)
"%s: polarity=%d, power=%d, fb_blank=%d\n",
__func__, polarity, bd->props.power, bd->props.fb_blank);
- if ((bd->props.power == FB_BLANK_UNBLANK) && (bd->props.fb_blank == FB_BLANK_UNBLANK))
+ if ((bd->props.power == FB_BLANK_UNBLANK) &&
+ (bd->props.fb_blank == FB_BLANK_UNBLANK))
gpio_set_value(par->gpio.led[0], polarity);
else
gpio_set_value(par->gpio.led[0], !polarity);
@@ -299,7 +300,8 @@ void fbtft_register_backlight(struct fbtft_par *par)
bl_props.state |= BL_CORE_DRIVER1;
bd = backlight_device_register(dev_driver_string(par->info->device),
- par->info->device, par, &fbtft_bl_ops, &bl_props);
+ par->info->device, par,
+ &fbtft_bl_ops, &bl_props);
if (IS_ERR(bd)) {
dev_err(par->info->device,
"cannot register backlight device (%ld)\n",
@@ -350,9 +352,11 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line,
bool timeit = false;
int ret = 0;
- if (unlikely(par->debug & (DEBUG_TIME_FIRST_UPDATE | DEBUG_TIME_EACH_UPDATE))) {
+ if (unlikely(par->debug & (DEBUG_TIME_FIRST_UPDATE |
+ DEBUG_TIME_EACH_UPDATE))) {
if ((par->debug & DEBUG_TIME_EACH_UPDATE) ||
- ((par->debug & DEBUG_TIME_FIRST_UPDATE) && !par->first_update_done)) {
+ ((par->debug & DEBUG_TIME_FIRST_UPDATE) &&
+ !par->first_update_done)) {
ts_start = ktime_get();
timeit = true;
}
@@ -361,15 +365,17 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line,
/* Sanity checks */
if (start_line > end_line) {
dev_warn(par->info->device,
- "%s: start_line=%u is larger than end_line=%u. Shouldn't happen, will do full display update\n",
- __func__, start_line, end_line);
+ "%s: start_line=%u is larger than end_line=%u. Shouldn't happen, will do full display update\n",
+ __func__, start_line, end_line);
start_line = 0;
end_line = par->info->var.yres - 1;
}
- if (start_line > par->info->var.yres - 1 || end_line > par->info->var.yres - 1) {
+ if (start_line > par->info->var.yres - 1 ||
+ end_line > par->info->var.yres - 1) {
dev_warn(par->info->device,
"%s: start_line=%u or end_line=%u is larger than max=%d. Shouldn't happen, will do full display update\n",
- __func__, start_line, end_line, par->info->var.yres - 1);
+ __func__, start_line,
+ end_line, par->info->var.yres - 1);
start_line = 0;
end_line = par->info->var.yres - 1;
}
@@ -660,12 +666,13 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
unsigned int bpp = display->bpp;
unsigned int fps = display->fps;
int vmem_size, i;
- int *init_sequence = display->init_sequence;
+ s16 *init_sequence = display->init_sequence;
char *gamma = display->gamma;
unsigned long *gamma_curves = NULL;
/* sanity check */
- if (display->gamma_num * display->gamma_len > FBTFT_GAMMA_MAX_VALUES_TOTAL) {
+ if (display->gamma_num * display->gamma_len >
+ FBTFT_GAMMA_MAX_VALUES_TOTAL) {
dev_err(dev, "FBTFT_GAMMA_MAX_VALUES_TOTAL=%d is exceeded\n",
FBTFT_GAMMA_MAX_VALUES_TOTAL);
return NULL;
@@ -832,11 +839,13 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
#ifdef CONFIG_HAS_DMA
if (dma) {
dev->coherent_dma_mask = ~0;
- txbuf = dmam_alloc_coherent(dev, txbuflen, &par->txbuf.dma, GFP_DMA);
+ txbuf = dmam_alloc_coherent(dev, txbuflen,
+ &par->txbuf.dma, GFP_DMA);
} else
#endif
{
- txbuf = devm_kzalloc(par->info->device, txbuflen, GFP_KERNEL);
+ txbuf = devm_kzalloc(par->info->device,
+ txbuflen, GFP_KERNEL);
}
if (!txbuf)
goto alloc_fail;
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 89c4b5b76ce6..aacdde92cc2e 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -124,7 +124,7 @@ struct fbtft_display {
unsigned int bpp;
unsigned int fps;
int txbuflen;
- int *init_sequence;
+ s16 *init_sequence;
char *gamma;
int gamma_num;
int gamma_len;
@@ -229,7 +229,7 @@ struct fbtft_par {
int led[16];
int aux[16];
} gpio;
- int *init_sequence;
+ s16 *init_sequence;
struct {
struct mutex lock;
unsigned long *curves;
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
index e9211831b6a1..de46f8d988d2 100644
--- a/drivers/staging/fbtft/fbtft_device.c
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -96,9 +96,9 @@ static unsigned int buswidth = 8;
module_param(buswidth, uint, 0);
MODULE_PARM_DESC(buswidth, "Display bus width, used with the custom argument");
-static int init[FBTFT_MAX_INIT_SEQUENCE];
+static s16 init[FBTFT_MAX_INIT_SEQUENCE];
static int init_num;
-module_param_array(init, int, &init_num, 0);
+module_param_array(init, short, &init_num, 0);
MODULE_PARM_DESC(init, "Init sequence, used with the custom argument");
static unsigned long debug;
@@ -131,7 +131,7 @@ static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
"D0 00 14 15 13 2C 42 43 4E 09 16 14 18 21\n" \
"D0 00 14 15 13 0B 43 55 53 0C 17 14 23 20"
-static int cberry28_init_sequence[] = {
+static s16 cberry28_init_sequence[] = {
/* turn off sleep mode */
-1, MIPI_DCS_EXIT_SLEEP_MODE,
-2, 120,
@@ -180,7 +180,7 @@ static int cberry28_init_sequence[] = {
-3,
};
-static int hy28b_init_sequence[] = {
+static s16 hy28b_init_sequence[] = {
-1, 0x00e7, 0x0010, -1, 0x0000, 0x0001,
-1, 0x0001, 0x0100, -1, 0x0002, 0x0700,
-1, 0x0003, 0x1030, -1, 0x0004, 0x0000,
@@ -211,7 +211,7 @@ static int hy28b_init_sequence[] = {
"04 1F 4 7 7 0 7 7 6 0\n" \
"0F 00 1 7 4 0 0 0 6 7"
-static int pitft_init_sequence[] = {
+static s16 pitft_init_sequence[] = {
-1, MIPI_DCS_SOFT_RESET,
-2, 5,
-1, MIPI_DCS_SET_DISPLAY_OFF,
@@ -242,7 +242,7 @@ static int pitft_init_sequence[] = {
-3
};
-static int waveshare32b_init_sequence[] = {
+static s16 waveshare32b_init_sequence[] = {
-1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
-1, 0xCF, 0x00, 0xC1, 0x30,
-1, 0xE8, 0x85, 0x00, 0x78,
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
index ce0d254148e4..ded10718712b 100644
--- a/drivers/staging/fbtft/flexfb.c
+++ b/drivers/staging/fbtft/flexfb.c
@@ -38,9 +38,9 @@ static unsigned int height;
module_param(height, uint, 0);
MODULE_PARM_DESC(height, "Display height");
-static int init[512];
+static s16 init[512];
static int init_num;
-module_param_array(init, int, &init_num, 0);
+module_param_array(init, short, &init_num, 0);
MODULE_PARM_DESC(init, "Init sequence");
static unsigned int setaddrwin;
@@ -63,68 +63,316 @@ static bool latched;
module_param(latched, bool, 0);
MODULE_PARM_DESC(latched, "Use with latched 16-bit databus");
-static int *initp;
+static s16 *initp;
static int initp_num;
/* default init sequences */
-static int st7735r_init[] = {
--1, 0x01, -2, 150, -1, 0x11, -2, 500, -1, 0xB1, 0x01, 0x2C, 0x2D, -1, 0xB2, 0x01, 0x2C, 0x2D, -1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D,
--1, 0xB4, 0x07, -1, 0xC0, 0xA2, 0x02, 0x84, -1, 0xC1, 0xC5, -1, 0xC2, 0x0A, 0x00, -1, 0xC3, 0x8A, 0x2A, -1, 0xC4, 0x8A, 0xEE, -1, 0xC5, 0x0E,
--1, 0x20, -1, 0x36, 0xC0, -1, 0x3A, 0x05, -1, 0xE0, 0x0f, 0x1a, 0x0f, 0x18, 0x2f, 0x28, 0x20, 0x22, 0x1f, 0x1b, 0x23, 0x37, 0x00, 0x07, 0x02, 0x10,
--1, 0xE1, 0x0f, 0x1b, 0x0f, 0x17, 0x33, 0x2c, 0x29, 0x2e, 0x30, 0x30, 0x39, 0x3f, 0x00, 0x07, 0x03, 0x10, -1, 0x29, -2, 100, -1, 0x13, -2, 10, -3 };
-
-static int ssd1289_init[] = {
--1, 0x00, 0x0001, -1, 0x03, 0xA8A4, -1, 0x0C, 0x0000, -1, 0x0D, 0x080C, -1, 0x0E, 0x2B00, -1, 0x1E, 0x00B7, -1, 0x01, 0x2B3F, -1, 0x02, 0x0600,
--1, 0x10, 0x0000, -1, 0x11, 0x6070, -1, 0x05, 0x0000, -1, 0x06, 0x0000, -1, 0x16, 0xEF1C, -1, 0x17, 0x0003, -1, 0x07, 0x0233, -1, 0x0B, 0x0000,
--1, 0x0F, 0x0000, -1, 0x41, 0x0000, -1, 0x42, 0x0000, -1, 0x48, 0x0000, -1, 0x49, 0x013F, -1, 0x4A, 0x0000, -1, 0x4B, 0x0000, -1, 0x44, 0xEF00,
--1, 0x45, 0x0000, -1, 0x46, 0x013F, -1, 0x30, 0x0707, -1, 0x31, 0x0204, -1, 0x32, 0x0204, -1, 0x33, 0x0502, -1, 0x34, 0x0507, -1, 0x35, 0x0204,
--1, 0x36, 0x0204, -1, 0x37, 0x0502, -1, 0x3A, 0x0302, -1, 0x3B, 0x0302, -1, 0x23, 0x0000, -1, 0x24, 0x0000, -1, 0x25, 0x8000, -1, 0x4f, 0x0000,
--1, 0x4e, 0x0000, -1, 0x22, -3 };
-
-static int hx8340bn_init[] = {
--1, 0xC1, 0xFF, 0x83, 0x40, -1, 0x11, -2, 150, -1, 0xCA, 0x70, 0x00, 0xD9, -1, 0xB0, 0x01, 0x11,
--1, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06, -2, 20, -1, 0xC2, 0x60, 0x71, 0x01, 0x0E, 0x05, 0x02, 0x09, 0x31, 0x0A,
--1, 0xC3, 0x67, 0x30, 0x61, 0x17, 0x48, 0x07, 0x05, 0x33, -2, 10, -1, 0xB5, 0x35, 0x20, 0x45, -1, 0xB4, 0x33, 0x25, 0x4C, -2, 10,
--1, 0x3A, 0x05, -1, 0x29, -2, 10, -3 };
-
-static int ili9225_init[] = {
--1, 0x0001, 0x011C, -1, 0x0002, 0x0100, -1, 0x0003, 0x1030, -1, 0x0008, 0x0808, -1, 0x000C, 0x0000, -1, 0x000F, 0x0A01, -1, 0x0020, 0x0000,
--1, 0x0021, 0x0000, -2, 50, -1, 0x0010, 0x0A00, -1, 0x0011, 0x1038, -2, 50, -1, 0x0012, 0x1121, -1, 0x0013, 0x004E, -1, 0x0014, 0x676F,
--1, 0x0030, 0x0000, -1, 0x0031, 0x00DB, -1, 0x0032, 0x0000, -1, 0x0033, 0x0000, -1, 0x0034, 0x00DB, -1, 0x0035, 0x0000, -1, 0x0036, 0x00AF,
--1, 0x0037, 0x0000, -1, 0x0038, 0x00DB, -1, 0x0039, 0x0000, -1, 0x0050, 0x0000, -1, 0x0051, 0x060A, -1, 0x0052, 0x0D0A, -1, 0x0053, 0x0303,
--1, 0x0054, 0x0A0D, -1, 0x0055, 0x0A06, -1, 0x0056, 0x0000, -1, 0x0057, 0x0303, -1, 0x0058, 0x0000, -1, 0x0059, 0x0000, -2, 50,
--1, 0x0007, 0x1017, -2, 50, -3 };
-
-static int ili9320_init[] = {
--1, 0x00E5, 0x8000, -1, 0x0000, 0x0001, -1, 0x0001, 0x0100, -1, 0x0002, 0x0700, -1, 0x0003, 0x1030, -1, 0x0004, 0x0000, -1, 0x0008, 0x0202,
--1, 0x0009, 0x0000, -1, 0x000A, 0x0000, -1, 0x000C, 0x0000, -1, 0x000D, 0x0000, -1, 0x000F, 0x0000, -1, 0x0010, 0x0000, -1, 0x0011, 0x0007,
--1, 0x0012, 0x0000, -1, 0x0013, 0x0000, -2, 200, -1, 0x0010, 0x17B0, -1, 0x0011, 0x0031, -2, 50, -1, 0x0012, 0x0138, -2, 50, -1, 0x0013, 0x1800,
--1, 0x0029, 0x0008, -2, 50, -1, 0x0020, 0x0000, -1, 0x0021, 0x0000, -1, 0x0030, 0x0000, -1, 0x0031, 0x0505, -1, 0x0032, 0x0004,
--1, 0x0035, 0x0006, -1, 0x0036, 0x0707, -1, 0x0037, 0x0105, -1, 0x0038, 0x0002, -1, 0x0039, 0x0707, -1, 0x003C, 0x0704, -1, 0x003D, 0x0807,
--1, 0x0050, 0x0000, -1, 0x0051, 0x00EF, -1, 0x0052, 0x0000, -1, 0x0053, 0x013F, -1, 0x0060, 0x2700, -1, 0x0061, 0x0001, -1, 0x006A, 0x0000,
--1, 0x0080, 0x0000, -1, 0x0081, 0x0000, -1, 0x0082, 0x0000, -1, 0x0083, 0x0000, -1, 0x0084, 0x0000, -1, 0x0085, 0x0000, -1, 0x0090, 0x0010,
--1, 0x0092, 0x0000, -1, 0x0093, 0x0003, -1, 0x0095, 0x0110, -1, 0x0097, 0x0000, -1, 0x0098, 0x0000, -1, 0x0007, 0x0173, -3 };
-
-static int ili9325_init[] = {
--1, 0x00E3, 0x3008, -1, 0x00E7, 0x0012, -1, 0x00EF, 0x1231, -1, 0x0001, 0x0100, -1, 0x0002, 0x0700, -1, 0x0003, 0x1030, -1, 0x0004, 0x0000,
--1, 0x0008, 0x0207, -1, 0x0009, 0x0000, -1, 0x000A, 0x0000, -1, 0x000C, 0x0000, -1, 0x000D, 0x0000, -1, 0x000F, 0x0000, -1, 0x0010, 0x0000,
--1, 0x0011, 0x0007, -1, 0x0012, 0x0000, -1, 0x0013, 0x0000, -2, 200, -1, 0x0010, 0x1690, -1, 0x0011, 0x0223, -2, 50, -1, 0x0012, 0x000D, -2, 50,
--1, 0x0013, 0x1200, -1, 0x0029, 0x000A, -1, 0x002B, 0x000C, -2, 50, -1, 0x0020, 0x0000, -1, 0x0021, 0x0000, -1, 0x0030, 0x0000,
--1, 0x0031, 0x0506, -1, 0x0032, 0x0104, -1, 0x0035, 0x0207, -1, 0x0036, 0x000F, -1, 0x0037, 0x0306, -1, 0x0038, 0x0102, -1, 0x0039, 0x0707,
--1, 0x003C, 0x0702, -1, 0x003D, 0x1604, -1, 0x0050, 0x0000, -1, 0x0051, 0x00EF, -1, 0x0052, 0x0000, -1, 0x0053, 0x013F, -1, 0x0060, 0xA700,
--1, 0x0061, 0x0001, -1, 0x006A, 0x0000, -1, 0x0080, 0x0000, -1, 0x0081, 0x0000, -1, 0x0082, 0x0000, -1, 0x0083, 0x0000, -1, 0x0084, 0x0000,
--1, 0x0085, 0x0000, -1, 0x0090, 0x0010, -1, 0x0092, 0x0600, -1, 0x0007, 0x0133, -3 };
-
-static int ili9341_init[] = {
--1, 0x28, -2, 20, -1, 0xCF, 0x00, 0x83, 0x30, -1, 0xED, 0x64, 0x03, 0x12, 0x81, -1, 0xE8, 0x85, 0x01, 0x79,
--1, 0xCB, 0x39, 0x2c, 0x00, 0x34, 0x02, -1, 0xF7, 0x20, -1, 0xEA, 0x00, 0x00, -1, 0xC0, 0x26, -1, 0xC1, 0x11,
--1, 0xC5, 0x35, 0x3E, -1, 0xC7, 0xBE, -1, 0xB1, 0x00, 0x1B, -1, 0xB6, 0x0a, 0x82, 0x27, 0x00, -1, 0xB7, 0x07,
--1, 0x3A, 0x55, -1, 0x36, 0x48, -1, 0x11, -2, 120, -1, 0x29, -2, 20, -3 };
-
-static int ssd1351_init[] = { -1, 0xfd, 0x12, -1, 0xfd, 0xb1, -1, 0xae, -1, 0xb3, 0xf1, -1, 0xca, 0x7f, -1, 0xa0, 0x74,
- -1, 0x15, 0x00, 0x7f, -1, 0x75, 0x00, 0x7f, -1, 0xa1, 0x00, -1, 0xa2, 0x00, -1, 0xb5, 0x00,
- -1, 0xab, 0x01, -1, 0xb1, 0x32, -1, 0xb4, 0xa0, 0xb5, 0x55, -1, 0xbb, 0x17, -1, 0xbe, 0x05,
- -1, 0xc1, 0xc8, 0x80, 0xc8, -1, 0xc7, 0x0f, -1, 0xb6, 0x01, -1, 0xa6, -1, 0xaf, -3 };
+static s16 st7735r_init[] = {
+ -1, 0x01,
+ -2, 150,
+ -1, 0x11,
+ -2, 500,
+ -1, 0xB1, 0x01, 0x2C, 0x2D,
+ -1, 0xB2, 0x01, 0x2C, 0x2D,
+ -1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D,
+ -1, 0xB4, 0x07,
+ -1, 0xC0, 0xA2, 0x02, 0x84,
+ -1, 0xC1, 0xC5,
+ -1, 0xC2, 0x0A, 0x00,
+ -1, 0xC3, 0x8A, 0x2A,
+ -1, 0xC4, 0x8A, 0xEE,
+ -1, 0xC5, 0x0E,
+ -1, 0x20,
+ -1, 0x36, 0xC0,
+ -1, 0x3A, 0x05,
+ -1, 0xE0, 0x0f, 0x1a, 0x0f, 0x18, 0x2f, 0x28, 0x20, 0x22,
+ 0x1f, 0x1b, 0x23, 0x37, 0x00, 0x07, 0x02, 0x10,
+ -1, 0xE1, 0x0f, 0x1b, 0x0f, 0x17, 0x33, 0x2c, 0x29, 0x2e,
+ 0x30, 0x30, 0x39, 0x3f, 0x00, 0x07, 0x03, 0x10,
+ -1, 0x29,
+ -2, 100,
+ -1, 0x13,
+ -2, 10,
+ -3
+};
+
+static s16 ssd1289_init[] = {
+ -1, 0x00, 0x0001,
+ -1, 0x03, 0xA8A4,
+ -1, 0x0C, 0x0000,
+ -1, 0x0D, 0x080C,
+ -1, 0x0E, 0x2B00,
+ -1, 0x1E, 0x00B7,
+ -1, 0x01, 0x2B3F,
+ -1, 0x02, 0x0600,
+ -1, 0x10, 0x0000,
+ -1, 0x11, 0x6070,
+ -1, 0x05, 0x0000,
+ -1, 0x06, 0x0000,
+ -1, 0x16, 0xEF1C,
+ -1, 0x17, 0x0003,
+ -1, 0x07, 0x0233,
+ -1, 0x0B, 0x0000,
+ -1, 0x0F, 0x0000,
+ -1, 0x41, 0x0000,
+ -1, 0x42, 0x0000,
+ -1, 0x48, 0x0000,
+ -1, 0x49, 0x013F,
+ -1, 0x4A, 0x0000,
+ -1, 0x4B, 0x0000,
+ -1, 0x44, 0xEF00,
+ -1, 0x45, 0x0000,
+ -1, 0x46, 0x013F,
+ -1, 0x30, 0x0707,
+ -1, 0x31, 0x0204,
+ -1, 0x32, 0x0204,
+ -1, 0x33, 0x0502,
+ -1, 0x34, 0x0507,
+ -1, 0x35, 0x0204,
+ -1, 0x36, 0x0204,
+ -1, 0x37, 0x0502,
+ -1, 0x3A, 0x0302,
+ -1, 0x3B, 0x0302,
+ -1, 0x23, 0x0000,
+ -1, 0x24, 0x0000,
+ -1, 0x25, 0x8000,
+ -1, 0x4f, 0x0000,
+ -1, 0x4e, 0x0000,
+ -1, 0x22,
+ -3
+};
+
+static s16 hx8340bn_init[] = {
+ -1, 0xC1, 0xFF, 0x83, 0x40,
+ -1, 0x11,
+ -2, 150,
+ -1, 0xCA, 0x70, 0x00, 0xD9,
+ -1, 0xB0, 0x01, 0x11,
+ -1, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06,
+ -2, 20,
+ -1, 0xC2, 0x60, 0x71, 0x01, 0x0E, 0x05, 0x02, 0x09, 0x31, 0x0A,
+ -1, 0xC3, 0x67, 0x30, 0x61, 0x17, 0x48, 0x07, 0x05, 0x33,
+ -2, 10,
+ -1, 0xB5, 0x35, 0x20, 0x45,
+ -1, 0xB4, 0x33, 0x25, 0x4C,
+ -2, 10,
+ -1, 0x3A, 0x05,
+ -1, 0x29,
+ -2, 10,
+ -3
+};
+
+static s16 ili9225_init[] = {
+ -1, 0x0001, 0x011C,
+ -1, 0x0002, 0x0100,
+ -1, 0x0003, 0x1030,
+ -1, 0x0008, 0x0808,
+ -1, 0x000C, 0x0000,
+ -1, 0x000F, 0x0A01,
+ -1, 0x0020, 0x0000,
+ -1, 0x0021, 0x0000,
+ -2, 50,
+ -1, 0x0010, 0x0A00,
+ -1, 0x0011, 0x1038,
+ -2, 50,
+ -1, 0x0012, 0x1121,
+ -1, 0x0013, 0x004E,
+ -1, 0x0014, 0x676F,
+ -1, 0x0030, 0x0000,
+ -1, 0x0031, 0x00DB,
+ -1, 0x0032, 0x0000,
+ -1, 0x0033, 0x0000,
+ -1, 0x0034, 0x00DB,
+ -1, 0x0035, 0x0000,
+ -1, 0x0036, 0x00AF,
+ -1, 0x0037, 0x0000,
+ -1, 0x0038, 0x00DB,
+ -1, 0x0039, 0x0000,
+ -1, 0x0050, 0x0000,
+ -1, 0x0051, 0x060A,
+ -1, 0x0052, 0x0D0A,
+ -1, 0x0053, 0x0303,
+ -1, 0x0054, 0x0A0D,
+ -1, 0x0055, 0x0A06,
+ -1, 0x0056, 0x0000,
+ -1, 0x0057, 0x0303,
+ -1, 0x0058, 0x0000,
+ -1, 0x0059, 0x0000,
+ -2, 50,
+ -1, 0x0007, 0x1017,
+ -2, 50,
+ -3
+};
+
+static s16 ili9320_init[] = {
+ -1, 0x00E5, 0x8000,
+ -1, 0x0000, 0x0001,
+ -1, 0x0001, 0x0100,
+ -1, 0x0002, 0x0700,
+ -1, 0x0003, 0x1030,
+ -1, 0x0004, 0x0000,
+ -1, 0x0008, 0x0202,
+ -1, 0x0009, 0x0000,
+ -1, 0x000A, 0x0000,
+ -1, 0x000C, 0x0000,
+ -1, 0x000D, 0x0000,
+ -1, 0x000F, 0x0000,
+ -1, 0x0010, 0x0000,
+ -1, 0x0011, 0x0007,
+ -1, 0x0012, 0x0000,
+ -1, 0x0013, 0x0000,
+ -2, 200,
+ -1, 0x0010, 0x17B0,
+ -1, 0x0011, 0x0031,
+ -2, 50,
+ -1, 0x0012, 0x0138,
+ -2, 50,
+ -1, 0x0013, 0x1800,
+ -1, 0x0029, 0x0008,
+ -2, 50,
+ -1, 0x0020, 0x0000,
+ -1, 0x0021, 0x0000,
+ -1, 0x0030, 0x0000,
+ -1, 0x0031, 0x0505,
+ -1, 0x0032, 0x0004,
+ -1, 0x0035, 0x0006,
+ -1, 0x0036, 0x0707,
+ -1, 0x0037, 0x0105,
+ -1, 0x0038, 0x0002,
+ -1, 0x0039, 0x0707,
+ -1, 0x003C, 0x0704,
+ -1, 0x003D, 0x0807,
+ -1, 0x0050, 0x0000,
+ -1, 0x0051, 0x00EF,
+ -1, 0x0052, 0x0000,
+ -1, 0x0053, 0x013F,
+ -1, 0x0060, 0x2700,
+ -1, 0x0061, 0x0001,
+ -1, 0x006A, 0x0000,
+ -1, 0x0080, 0x0000,
+ -1, 0x0081, 0x0000,
+ -1, 0x0082, 0x0000,
+ -1, 0x0083, 0x0000,
+ -1, 0x0084, 0x0000,
+ -1, 0x0085, 0x0000,
+ -1, 0x0090, 0x0010,
+ -1, 0x0092, 0x0000,
+ -1, 0x0093, 0x0003,
+ -1, 0x0095, 0x0110,
+ -1, 0x0097, 0x0000,
+ -1, 0x0098, 0x0000,
+ -1, 0x0007, 0x0173,
+ -3
+};
+
+static s16 ili9325_init[] = {
+ -1, 0x00E3, 0x3008,
+ -1, 0x00E7, 0x0012,
+ -1, 0x00EF, 0x1231,
+ -1, 0x0001, 0x0100,
+ -1, 0x0002, 0x0700,
+ -1, 0x0003, 0x1030,
+ -1, 0x0004, 0x0000,
+ -1, 0x0008, 0x0207,
+ -1, 0x0009, 0x0000,
+ -1, 0x000A, 0x0000,
+ -1, 0x000C, 0x0000,
+ -1, 0x000D, 0x0000,
+ -1, 0x000F, 0x0000,
+ -1, 0x0010, 0x0000,
+ -1, 0x0011, 0x0007,
+ -1, 0x0012, 0x0000,
+ -1, 0x0013, 0x0000,
+ -2, 200,
+ -1, 0x0010, 0x1690,
+ -1, 0x0011, 0x0223,
+ -2, 50,
+ -1, 0x0012, 0x000D,
+ -2, 50,
+ -1, 0x0013, 0x1200,
+ -1, 0x0029, 0x000A,
+ -1, 0x002B, 0x000C,
+ -2, 50,
+ -1, 0x0020, 0x0000,
+ -1, 0x0021, 0x0000,
+ -1, 0x0030, 0x0000,
+ -1, 0x0031, 0x0506,
+ -1, 0x0032, 0x0104,
+ -1, 0x0035, 0x0207,
+ -1, 0x0036, 0x000F,
+ -1, 0x0037, 0x0306,
+ -1, 0x0038, 0x0102,
+ -1, 0x0039, 0x0707,
+ -1, 0x003C, 0x0702,
+ -1, 0x003D, 0x1604,
+ -1, 0x0050, 0x0000,
+ -1, 0x0051, 0x00EF,
+ -1, 0x0052, 0x0000,
+ -1, 0x0053, 0x013F,
+ -1, 0x0060, 0xA700,
+ -1, 0x0061, 0x0001,
+ -1, 0x006A, 0x0000,
+ -1, 0x0080, 0x0000,
+ -1, 0x0081, 0x0000,
+ -1, 0x0082, 0x0000,
+ -1, 0x0083, 0x0000,
+ -1, 0x0084, 0x0000,
+ -1, 0x0085, 0x0000,
+ -1, 0x0090, 0x0010,
+ -1, 0x0092, 0x0600,
+ -1, 0x0007, 0x0133,
+ -3
+};
+
+static s16 ili9341_init[] = {
+ -1, 0x28,
+ -2, 20,
+ -1, 0xCF, 0x00, 0x83, 0x30,
+ -1, 0xED, 0x64, 0x03, 0x12, 0x81,
+ -1, 0xE8, 0x85, 0x01, 0x79,
+ -1, 0xCB, 0x39, 0x2c, 0x00, 0x34, 0x02,
+ -1, 0xF7, 0x20,
+ -1, 0xEA, 0x00, 0x00,
+ -1, 0xC0, 0x26,
+ -1, 0xC1, 0x11,
+ -1, 0xC5, 0x35, 0x3E,
+ -1, 0xC7, 0xBE,
+ -1, 0xB1, 0x00, 0x1B,
+ -1, 0xB6, 0x0a, 0x82, 0x27, 0x00,
+ -1, 0xB7, 0x07,
+ -1, 0x3A, 0x55,
+ -1, 0x36, 0x48,
+ -1, 0x11,
+ -2, 120,
+ -1, 0x29,
+ -2, 20,
+ -3
+};
+
+static s16 ssd1351_init[] = {
+ -1, 0xfd, 0x12,
+ -1, 0xfd, 0xb1,
+ -1, 0xae,
+ -1, 0xb3, 0xf1,
+ -1, 0xca, 0x7f,
+ -1, 0xa0, 0x74,
+ -1, 0x15, 0x00, 0x7f,
+ -1, 0x75, 0x00, 0x7f,
+ -1, 0xa1, 0x00,
+ -1, 0xa2, 0x00,
+ -1, 0xb5, 0x00,
+ -1, 0xab, 0x01,
+ -1, 0xb1, 0x32,
+ -1, 0xb4, 0xa0, 0xb5, 0x55,
+ -1, 0xbb, 0x17,
+ -1, 0xbe, 0x05,
+ -1, 0xc1, 0xc8, 0x80, 0xc8,
+ -1, 0xc7, 0x0f,
+ -1, 0xb6, 0x01,
+ -1, 0xa6,
+ -1, 0xaf,
+ -3
+};
/**
* struct flexfb_lcd_controller - Describes the LCD controller properties
@@ -142,7 +390,7 @@ struct flexfb_lcd_controller {
unsigned int height;
unsigned int setaddrwin;
unsigned int regwidth;
- int *init_seq;
+ s16 *init_seq;
int init_seq_sz;
};
@@ -582,6 +830,7 @@ static const struct platform_device_id flexfb_platform_ids[] = {
{ "flexpfb", 0 },
{ },
};
+MODULE_DEVICE_TABLE(platform, flexfb_platform_ids);
static struct platform_driver flexfb_platform_driver = {
.driver = {
diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig
index 1f959339c671..5c009ab48f00 100644
--- a/drivers/staging/fsl-mc/bus/Kconfig
+++ b/drivers/staging/fsl-mc/bus/Kconfig
@@ -1,25 +1,17 @@
#
-# Freescale Management Complex (MC) bus drivers
+# DPAA2 fsl-mc bus
#
-# Copyright (C) 2014 Freescale Semiconductor, Inc.
+# Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
#
# This file is released under the GPLv2
#
config FSL_MC_BUS
- bool "Freescale Management Complex (MC) bus driver"
- depends on OF && ARM64
+ bool "QorIQ DPAA2 fsl-mc bus driver"
+ depends on OF && ARCH_LAYERSCAPE
select GENERIC_MSI_IRQ_DOMAIN
help
- Driver to enable the bus infrastructure for the Freescale
- QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware
- module of the QorIQ LS2 SoCs, that does resource management
- for hardware building-blocks in the SoC that can be used
- to dynamically create networking hardware objects such as
- network interfaces (NICs), crypto accelerator instances,
- or L2 switches.
-
- Only enable this option when building the kernel for
- Freescale QorQIQ LS2xxxx SoCs.
-
-
+ Driver to enable the bus infrastructure for the QorIQ DPAA2
+ architecture. The fsl-mc bus driver handles discovery of
+ DPAA2 objects (which are represented as Linux devices) and
+ binding objects to drivers.
diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/bus/dpbp-cmd.h
index 2860411ddb51..7d86539b5414 100644
--- a/drivers/staging/fsl-mc/include/dpbp-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpbp-cmd.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -33,37 +33,48 @@
#define _FSL_DPBP_CMD_H
/* DPBP Version */
-#define DPBP_VER_MAJOR 2
+#define DPBP_VER_MAJOR 3
#define DPBP_VER_MINOR 2
+/* Command versioning */
+#define DPBP_CMD_BASE_VERSION 1
+#define DPBP_CMD_ID_OFFSET 4
+
+#define DPBP_CMD(id) ((id << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
+
/* Command IDs */
-#define DPBP_CMDID_CLOSE 0x800
-#define DPBP_CMDID_OPEN 0x804
-#define DPBP_CMDID_CREATE 0x904
-#define DPBP_CMDID_DESTROY 0x900
-
-#define DPBP_CMDID_ENABLE 0x002
-#define DPBP_CMDID_DISABLE 0x003
-#define DPBP_CMDID_GET_ATTR 0x004
-#define DPBP_CMDID_RESET 0x005
-#define DPBP_CMDID_IS_ENABLED 0x006
-
-#define DPBP_CMDID_SET_IRQ 0x010
-#define DPBP_CMDID_GET_IRQ 0x011
-#define DPBP_CMDID_SET_IRQ_ENABLE 0x012
-#define DPBP_CMDID_GET_IRQ_ENABLE 0x013
-#define DPBP_CMDID_SET_IRQ_MASK 0x014
-#define DPBP_CMDID_GET_IRQ_MASK 0x015
-#define DPBP_CMDID_GET_IRQ_STATUS 0x016
-#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017
-
-#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0
-#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1
+#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
+#define DPBP_CMDID_OPEN DPBP_CMD(0x804)
+#define DPBP_CMDID_CREATE DPBP_CMD(0x904)
+#define DPBP_CMDID_DESTROY DPBP_CMD(0x984)
+#define DPBP_CMDID_GET_API_VERSION DPBP_CMD(0xa04)
+
+#define DPBP_CMDID_ENABLE DPBP_CMD(0x002)
+#define DPBP_CMDID_DISABLE DPBP_CMD(0x003)
+#define DPBP_CMDID_GET_ATTR DPBP_CMD(0x004)
+#define DPBP_CMDID_RESET DPBP_CMD(0x005)
+#define DPBP_CMDID_IS_ENABLED DPBP_CMD(0x006)
+
+#define DPBP_CMDID_SET_IRQ DPBP_CMD(0x010)
+#define DPBP_CMDID_GET_IRQ DPBP_CMD(0x011)
+#define DPBP_CMDID_SET_IRQ_ENABLE DPBP_CMD(0x012)
+#define DPBP_CMDID_GET_IRQ_ENABLE DPBP_CMD(0x013)
+#define DPBP_CMDID_SET_IRQ_MASK DPBP_CMD(0x014)
+#define DPBP_CMDID_GET_IRQ_MASK DPBP_CMD(0x015)
+#define DPBP_CMDID_GET_IRQ_STATUS DPBP_CMD(0x016)
+#define DPBP_CMDID_CLEAR_IRQ_STATUS DPBP_CMD(0x017)
+
+#define DPBP_CMDID_SET_NOTIFICATIONS DPBP_CMD(0x01b0)
+#define DPBP_CMDID_GET_NOTIFICATIONS DPBP_CMD(0x01b1)
struct dpbp_cmd_open {
__le32 dpbp_id;
};
+struct dpbp_cmd_destroy {
+ __le32 object_id;
+};
+
#define DPBP_ENABLE 0x1
struct dpbp_rsp_is_enabled {
diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c
index 5d4cd812a400..cf4782f6a049 100644
--- a/drivers/staging/fsl-mc/bus/dpbp.c
+++ b/drivers/staging/fsl-mc/bus/dpbp.c
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -32,7 +32,8 @@
#include "../include/mc-sys.h"
#include "../include/mc-cmd.h"
#include "../include/dpbp.h"
-#include "../include/dpbp-cmd.h"
+
+#include "dpbp-cmd.h"
/**
* dpbp_open() - Open a control session for the specified object.
@@ -107,28 +108,26 @@ EXPORT_SYMBOL(dpbp_close);
/**
* dpbp_create() - Create the DPBP object.
* @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @cfg: Configuration structure
- * @token: Returned token; use in subsequent API calls
+ * @obj_id: Returned object id; use in subsequent API calls
*
* Create the DPBP object, allocate required resources and
* perform required initialization.
*
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent calls to
- * this specific object. For objects that are created using the
- * DPL file, call dpbp_open function to get an authentication
- * token first.
+ * This function accepts an authentication token of a parent
+ * container that this object should be assigned to and returns
+ * an object id. This object_id will be used in all subsequent calls to
+ * this specific object.
*
* Return: '0' on Success; Error code otherwise.
*/
int dpbp_create(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
u32 cmd_flags,
const struct dpbp_cfg *cfg,
- u16 *token)
+ u32 *obj_id)
{
struct mc_command cmd = { 0 };
int err;
@@ -137,7 +136,7 @@ int dpbp_create(struct fsl_mc_io *mc_io,
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE,
- cmd_flags, 0);
+ cmd_flags, dprc_token);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -145,7 +144,7 @@ int dpbp_create(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = mc_cmd_hdr_read_token(&cmd);
+ *obj_id = mc_cmd_read_object_id(&cmd);
return 0;
}
@@ -153,20 +152,25 @@ int dpbp_create(struct fsl_mc_io *mc_io,
/**
* dpbp_destroy() - Destroy the DPBP object and release all its resources.
* @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
+ * @obj_id: ID of DPBP object
*
* Return: '0' on Success; error code otherwise.
*/
int dpbp_destroy(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
u32 cmd_flags,
- u16 token)
+ u32 obj_id)
{
+ struct dpbp_cmd_destroy *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY,
- cmd_flags, token);
+ cmd_flags, dprc_token);
+ cmd_params = (struct dpbp_cmd_destroy *)cmd.params;
+ cmd_params->object_id = cpu_to_le32(obj_id);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -609,8 +613,6 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
attr->bpid = le16_to_cpu(rsp_params->bpid);
attr->id = le32_to_cpu(rsp_params->id);
- attr->version.major = le16_to_cpu(rsp_params->version_major);
- attr->version.minor = le16_to_cpu(rsp_params->version_minor);
return 0;
}
@@ -689,3 +691,35 @@ int dpbp_get_notifications(struct fsl_mc_io *mc_io,
return 0;
}
+
+/**
+ * dpbp_get_api_version - Get Data Path Buffer Pool API version
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of Buffer Pool API
+ * @minor_ver: Minor version of Buffer Pool API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+ return 0;
+}
diff --git a/drivers/staging/fsl-mc/include/dpcon-cmd.h b/drivers/staging/fsl-mc/bus/dpcon-cmd.h
index 536b2ef13507..d0a5e194c5e1 100644
--- a/drivers/staging/fsl-mc/include/dpcon-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpcon-cmd.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
index d098a6d8f6bc..7cb514963c26 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -33,25 +33,32 @@
#define _FSL_DPMCP_CMD_H
/* Minimal supported DPMCP Version */
-#define DPMCP_MIN_VER_MAJOR 3
-#define DPMCP_MIN_VER_MINOR 0
+#define DPMCP_MIN_VER_MAJOR 3
+#define DPMCP_MIN_VER_MINOR 0
+
+/* Command versioning */
+#define DPMCP_CMD_BASE_VERSION 1
+#define DPMCP_CMD_ID_OFFSET 4
+
+#define DPMCP_CMD(id) ((id << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION)
/* Command IDs */
-#define DPMCP_CMDID_CLOSE 0x800
-#define DPMCP_CMDID_OPEN 0x80b
-#define DPMCP_CMDID_CREATE 0x90b
-#define DPMCP_CMDID_DESTROY 0x900
-
-#define DPMCP_CMDID_GET_ATTR 0x004
-#define DPMCP_CMDID_RESET 0x005
-
-#define DPMCP_CMDID_SET_IRQ 0x010
-#define DPMCP_CMDID_GET_IRQ 0x011
-#define DPMCP_CMDID_SET_IRQ_ENABLE 0x012
-#define DPMCP_CMDID_GET_IRQ_ENABLE 0x013
-#define DPMCP_CMDID_SET_IRQ_MASK 0x014
-#define DPMCP_CMDID_GET_IRQ_MASK 0x015
-#define DPMCP_CMDID_GET_IRQ_STATUS 0x016
+#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
+#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b)
+#define DPMCP_CMDID_CREATE DPMCP_CMD(0x90b)
+#define DPMCP_CMDID_DESTROY DPMCP_CMD(0x98b)
+#define DPMCP_CMDID_GET_API_VERSION DPMCP_CMD(0xa0b)
+
+#define DPMCP_CMDID_GET_ATTR DPMCP_CMD(0x004)
+#define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
+
+#define DPMCP_CMDID_SET_IRQ DPMCP_CMD(0x010)
+#define DPMCP_CMDID_GET_IRQ DPMCP_CMD(0x011)
+#define DPMCP_CMDID_SET_IRQ_ENABLE DPMCP_CMD(0x012)
+#define DPMCP_CMDID_GET_IRQ_ENABLE DPMCP_CMD(0x013)
+#define DPMCP_CMDID_SET_IRQ_MASK DPMCP_CMD(0x014)
+#define DPMCP_CMDID_GET_IRQ_MASK DPMCP_CMD(0x015)
+#define DPMCP_CMDID_GET_IRQ_STATUS DPMCP_CMD(0x016)
struct dpmcp_cmd_open {
__le32 dpmcp_id;
@@ -61,6 +68,10 @@ struct dpmcp_cmd_create {
__le32 portal_id;
};
+struct dpmcp_cmd_destroy {
+ __le32 object_id;
+};
+
struct dpmcp_cmd_set_irq {
/* cmd word 0 */
u8 irq_index;
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c
index 55766f78a528..e4d16519bcb4 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.c
+++ b/drivers/staging/fsl-mc/bus/dpmcp.c
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -106,28 +106,29 @@ int dpmcp_close(struct fsl_mc_io *mc_io,
/**
* dpmcp_create() - Create the DPMCP object.
* @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @cfg: Configuration structure
- * @token: Returned token; use in subsequent API calls
+ * @obj_id: Returned object id; use in subsequent API calls
*
* Create the DPMCP object, allocate required resources and
* perform required initialization.
*
* The object can be created either by declaring it in the
* DPL file, or by calling this function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent calls to
- * this specific object. For objects that are created using the
- * DPL file, call dpmcp_open function to get an authentication
- * token first.
+
+ * This function accepts an authentication token of a parent
+ * container that this object should be assigned to and returns
+ * an object id. This object_id will be used in all subsequent calls to
+ * this specific object.
*
* Return: '0' on Success; Error code otherwise.
*/
int dpmcp_create(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
u32 cmd_flags,
const struct dpmcp_cfg *cfg,
- u16 *token)
+ u32 *obj_id)
{
struct mc_command cmd = { 0 };
struct dpmcp_cmd_create *cmd_params;
@@ -136,7 +137,7 @@ int dpmcp_create(struct fsl_mc_io *mc_io,
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE,
- cmd_flags, 0);
+ cmd_flags, dprc_token);
cmd_params = (struct dpmcp_cmd_create *)cmd.params;
cmd_params->portal_id = cpu_to_le32(cfg->portal_id);
@@ -146,7 +147,7 @@ int dpmcp_create(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = mc_cmd_hdr_read_token(&cmd);
+ *obj_id = mc_cmd_read_object_id(&cmd);
return 0;
}
@@ -154,20 +155,25 @@ int dpmcp_create(struct fsl_mc_io *mc_io,
/**
* dpmcp_destroy() - Destroy the DPMCP object and release all its resources.
* @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
+ * @obj_id: ID of DPMCP object
*
* Return: '0' on Success; error code otherwise.
*/
int dpmcp_destroy(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
u32 cmd_flags,
- u16 token)
+ u32 obj_id)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_destroy *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY,
- cmd_flags, token);
+ cmd_flags, dprc_token);
+ cmd_params = (struct dpmcp_cmd_destroy *)cmd.params;
+ cmd_params->object_id = cpu_to_le32(obj_id);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -497,8 +503,38 @@ int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
rsp_params = (struct dpmcp_rsp_get_attributes *)cmd.params;
attr->id = le32_to_cpu(rsp_params->id);
- attr->version.major = le16_to_cpu(rsp_params->version_major);
- attr->version.minor = le16_to_cpu(rsp_params->version_minor);
+
+ return 0;
+}
+
+/**
+ * dpmcp_get_api_version - Get Data Path Management Command Portal API version
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of Data Path Management Command Portal API
+ * @minor_ver: Minor version of Data Path Management Command Portal API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmcp_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
return 0;
}
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.h b/drivers/staging/fsl-mc/bus/dpmcp.h
index fe79d4d9293d..98a100d543f6 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -32,23 +32,24 @@
#ifndef __FSL_DPMCP_H
#define __FSL_DPMCP_H
-/* Data Path Management Command Portal API
+/*
+ * Data Path Management Command Portal API
* Contains initialization APIs and runtime control APIs for DPMCP
*/
struct fsl_mc_io;
int dpmcp_open(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
+ u32 cmd_flags,
int dpmcp_id,
- uint16_t *token);
+ u16 *token);
/* Get portal ID from pool */
#define DPMCP_GET_PORTAL_ID_FROM_POOL (-1)
int dpmcp_close(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+ u32 cmd_flags,
+ u16 token);
/**
* struct dpmcp_cfg - Structure representing DPMCP configuration
@@ -59,18 +60,20 @@ struct dpmcp_cfg {
int portal_id;
};
-int dpmcp_create(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- const struct dpmcp_cfg *cfg,
- uint16_t *token);
+int dpmcp_create(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
+ u32 cmd_flags,
+ const struct dpmcp_cfg *cfg,
+ u32 *obj_id);
int dpmcp_destroy(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+ u16 dprc_token,
+ u32 cmd_flags,
+ u32 obj_id);
int dpmcp_reset(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+ u32 cmd_flags,
+ u16 token);
/* IRQ */
/* IRQ Index */
@@ -85,75 +88,65 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
* @irq_num: A user defined number associated with this IRQ
*/
struct dpmcp_irq_cfg {
- uint64_t paddr;
- uint32_t val;
- int irq_num;
+ u64 paddr;
+ u32 val;
+ int irq_num;
};
-int dpmcp_set_irq(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- struct dpmcp_irq_cfg *irq_cfg);
-
-int dpmcp_get_irq(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- int *type,
- struct dpmcp_irq_cfg *irq_cfg);
-
-int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint8_t en);
-
-int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint8_t *en);
-
-int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t mask);
-
-int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t *mask);
-
-int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t *status);
+int dpmcp_set_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ struct dpmcp_irq_cfg *irq_cfg);
+
+int dpmcp_get_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ int *type,
+ struct dpmcp_irq_cfg *irq_cfg);
+
+int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en);
+
+int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask);
+
+int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
/**
* struct dpmcp_attr - Structure representing DPMCP attributes
* @id: DPMCP object ID
- * @version: DPMCP version
*/
struct dpmcp_attr {
int id;
- /**
- * struct version - Structure representing DPMCP version
- * @major: DPMCP major version
- * @minor: DPMCP minor version
- */
- struct {
- uint16_t major;
- uint16_t minor;
- } version;
};
-int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpmcp_attr *attr);
+int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmcp_attr *attr);
#endif /* __FSL_DPMCP_H */
diff --git a/drivers/staging/fsl-mc/bus/dpmng-cmd.h b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
index a7b77d58c8cd..cdddfb80eecc 100644
--- a/drivers/staging/fsl-mc/bus/dpmng-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
@@ -12,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -41,13 +40,14 @@
#ifndef __FSL_DPMNG_CMD_H
#define __FSL_DPMNG_CMD_H
-/* Command IDs */
-#define DPMNG_CMDID_GET_CONT_ID 0x830
-#define DPMNG_CMDID_GET_VERSION 0x831
+/* Command versioning */
+#define DPMNG_CMD_BASE_VERSION 1
+#define DPMNG_CMD_ID_OFFSET 4
-struct dpmng_rsp_get_container_id {
- __le32 container_id;
-};
+#define DPMNG_CMD(id) ((id << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831)
struct dpmng_rsp_get_version {
__le32 revision;
diff --git a/drivers/staging/fsl-mc/bus/dpmng.c b/drivers/staging/fsl-mc/bus/dpmng.c
index 96b1d67756fa..ad5d5bbec529 100644
--- a/drivers/staging/fsl-mc/bus/dpmng.c
+++ b/drivers/staging/fsl-mc/bus/dpmng.c
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -72,36 +72,3 @@ int mc_get_version(struct fsl_mc_io *mc_io,
}
EXPORT_SYMBOL(mc_get_version);
-/**
- * dpmng_get_container_id() - Get container ID associated with a given portal.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @container_id: Requested container ID
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmng_get_container_id(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int *container_id)
-{
- struct mc_command cmd = { 0 };
- struct dpmng_rsp_get_container_id *rsp_params;
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_CONT_ID,
- cmd_flags,
- 0);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- rsp_params = (struct dpmng_rsp_get_container_id *)cmd.params;
- *container_id = le32_to_cpu(rsp_params->container_id);
-
- return 0;
-}
-
diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h
index 009d65673155..588b8cafdbc7 100644
--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h
@@ -12,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -42,48 +41,56 @@
#define _FSL_DPRC_CMD_H
/* Minimal supported DPRC Version */
-#define DPRC_MIN_VER_MAJOR 5
+#define DPRC_MIN_VER_MAJOR 6
#define DPRC_MIN_VER_MINOR 0
+/* Command versioning */
+#define DPRC_CMD_BASE_VERSION 1
+#define DPRC_CMD_ID_OFFSET 4
+
+#define DPRC_CMD(id) ((id << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
+
/* Command IDs */
-#define DPRC_CMDID_CLOSE 0x800
-#define DPRC_CMDID_OPEN 0x805
-#define DPRC_CMDID_CREATE 0x905
-
-#define DPRC_CMDID_GET_ATTR 0x004
-#define DPRC_CMDID_RESET_CONT 0x005
-
-#define DPRC_CMDID_SET_IRQ 0x010
-#define DPRC_CMDID_GET_IRQ 0x011
-#define DPRC_CMDID_SET_IRQ_ENABLE 0x012
-#define DPRC_CMDID_GET_IRQ_ENABLE 0x013
-#define DPRC_CMDID_SET_IRQ_MASK 0x014
-#define DPRC_CMDID_GET_IRQ_MASK 0x015
-#define DPRC_CMDID_GET_IRQ_STATUS 0x016
-#define DPRC_CMDID_CLEAR_IRQ_STATUS 0x017
-
-#define DPRC_CMDID_CREATE_CONT 0x151
-#define DPRC_CMDID_DESTROY_CONT 0x152
-#define DPRC_CMDID_SET_RES_QUOTA 0x155
-#define DPRC_CMDID_GET_RES_QUOTA 0x156
-#define DPRC_CMDID_ASSIGN 0x157
-#define DPRC_CMDID_UNASSIGN 0x158
-#define DPRC_CMDID_GET_OBJ_COUNT 0x159
-#define DPRC_CMDID_GET_OBJ 0x15A
-#define DPRC_CMDID_GET_RES_COUNT 0x15B
-#define DPRC_CMDID_GET_RES_IDS 0x15C
-#define DPRC_CMDID_GET_OBJ_REG 0x15E
-#define DPRC_CMDID_SET_OBJ_IRQ 0x15F
-#define DPRC_CMDID_GET_OBJ_IRQ 0x160
-#define DPRC_CMDID_SET_OBJ_LABEL 0x161
-#define DPRC_CMDID_GET_OBJ_DESC 0x162
-
-#define DPRC_CMDID_CONNECT 0x167
-#define DPRC_CMDID_DISCONNECT 0x168
-#define DPRC_CMDID_GET_POOL 0x169
-#define DPRC_CMDID_GET_POOL_COUNT 0x16A
-
-#define DPRC_CMDID_GET_CONNECTION 0x16C
+#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
+#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
+#define DPRC_CMDID_CREATE DPRC_CMD(0x905)
+#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
+
+#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
+#define DPRC_CMDID_RESET_CONT DPRC_CMD(0x005)
+
+#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010)
+#define DPRC_CMDID_GET_IRQ DPRC_CMD(0x011)
+#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012)
+#define DPRC_CMDID_GET_IRQ_ENABLE DPRC_CMD(0x013)
+#define DPRC_CMDID_SET_IRQ_MASK DPRC_CMD(0x014)
+#define DPRC_CMDID_GET_IRQ_MASK DPRC_CMD(0x015)
+#define DPRC_CMDID_GET_IRQ_STATUS DPRC_CMD(0x016)
+#define DPRC_CMDID_CLEAR_IRQ_STATUS DPRC_CMD(0x017)
+
+#define DPRC_CMDID_CREATE_CONT DPRC_CMD(0x151)
+#define DPRC_CMDID_DESTROY_CONT DPRC_CMD(0x152)
+#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830)
+#define DPRC_CMDID_SET_RES_QUOTA DPRC_CMD(0x155)
+#define DPRC_CMDID_GET_RES_QUOTA DPRC_CMD(0x156)
+#define DPRC_CMDID_ASSIGN DPRC_CMD(0x157)
+#define DPRC_CMDID_UNASSIGN DPRC_CMD(0x158)
+#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
+#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
+#define DPRC_CMDID_GET_RES_COUNT DPRC_CMD(0x15B)
+#define DPRC_CMDID_GET_RES_IDS DPRC_CMD(0x15C)
+#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
+#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
+#define DPRC_CMDID_GET_OBJ_IRQ DPRC_CMD(0x160)
+#define DPRC_CMDID_SET_OBJ_LABEL DPRC_CMD(0x161)
+#define DPRC_CMDID_GET_OBJ_DESC DPRC_CMD(0x162)
+
+#define DPRC_CMDID_CONNECT DPRC_CMD(0x167)
+#define DPRC_CMDID_DISCONNECT DPRC_CMD(0x168)
+#define DPRC_CMDID_GET_POOL DPRC_CMD(0x169)
+#define DPRC_CMDID_GET_POOL_COUNT DPRC_CMD(0x16A)
+
+#define DPRC_CMDID_GET_CONNECTION DPRC_CMD(0x16C)
struct dprc_cmd_open {
__le32 container_id;
@@ -199,9 +206,6 @@ struct dprc_rsp_get_attributes {
/* response word 1 */
__le32 options;
__le32 portal_id;
- /* response word 2 */
- __le16 version_major;
- __le16 version_minor;
};
struct dprc_cmd_set_res_quota {
diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c
index c5ee4639682b..4e416d89b736 100644
--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
@@ -1,7 +1,7 @@
/*
* Freescale data path resource container (DPRC) driver
*
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
*
* This file is licensed under the terms of the GNU General Public
@@ -505,7 +505,7 @@ static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
dprc_irq0_handler,
dprc_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
- "FSL MC DPRC irq0",
+ dev_name(&mc_dev->dev),
&mc_dev->dev);
if (error < 0) {
dev_err(&mc_dev->dev,
@@ -597,6 +597,7 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
bool mc_io_created = false;
bool msi_domain_set = false;
+ u16 major_ver, minor_ver;
if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
return -EINVAL;
@@ -669,13 +670,21 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
goto error_cleanup_open;
}
- if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR ||
- (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR &&
- mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) {
+ error = dprc_get_api_version(mc_dev->mc_io, 0,
+ &major_ver,
+ &minor_ver);
+ if (error < 0) {
+ dev_err(&mc_dev->dev, "dprc_get_api_version() failed: %d\n",
+ error);
+ goto error_cleanup_open;
+ }
+
+ if (major_ver < DPRC_MIN_VER_MAJOR ||
+ (major_ver == DPRC_MIN_VER_MAJOR &&
+ minor_ver < DPRC_MIN_VER_MINOR)) {
dev_err(&mc_dev->dev,
"ERROR: DPRC version %d.%d not supported\n",
- mc_bus->dprc_attr.version.major,
- mc_bus->dprc_attr.version.minor);
+ major_ver, minor_ver);
error = -ENOTSUPP;
goto error_cleanup_open;
}
diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c
index 9fea3def6041..572edd4c066e 100644
--- a/drivers/staging/fsl-mc/bus/dprc.c
+++ b/drivers/staging/fsl-mc/bus/dprc.c
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -565,8 +565,6 @@ int dprc_get_attributes(struct fsl_mc_io *mc_io,
attr->icid = le16_to_cpu(rsp_params->icid);
attr->options = le32_to_cpu(rsp_params->options);
attr->portal_id = le32_to_cpu(rsp_params->portal_id);
- attr->version.major = le16_to_cpu(rsp_params->version_major);
- attr->version.minor = le16_to_cpu(rsp_params->version_minor);
return 0;
}
@@ -1386,3 +1384,66 @@ int dprc_get_connection(struct fsl_mc_io *mc_io,
return 0;
}
+
+/**
+ * dprc_get_api_version - Get Data Path Resource Container API version
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of Data Path Resource Container API
+ * @minor_ver: Minor version of Data Path Resource Container API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+ return 0;
+}
+
+/**
+ * dprc_get_container_id - Get container ID associated with a given portal.
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @container_id: Requested container id
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_container_id(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int *container_id)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID,
+ cmd_flags,
+ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *container_id = (int)mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
index e93ab53bae67..ce07096c3b1f 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
@@ -1,7 +1,7 @@
/*
- * Freescale MC object device allocator driver
+ * fsl-mc object allocator driver
*
- * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -12,9 +12,9 @@
#include <linux/msi.h>
#include "../include/mc-bus.h"
#include "../include/mc-sys.h"
-#include "../include/dpbp-cmd.h"
-#include "../include/dpcon-cmd.h"
+#include "dpbp-cmd.h"
+#include "dpcon-cmd.h"
#include "fsl-mc-private.h"
#define FSL_MC_IS_ALLOCATABLE(_obj_type) \
@@ -23,15 +23,12 @@
strcmp(_obj_type, "dpcon") == 0)
/**
- * fsl_mc_resource_pool_add_device - add allocatable device to a resource
- * pool of a given MC bus
+ * fsl_mc_resource_pool_add_device - add allocatable object to a resource
+ * pool of a given fsl-mc bus
*
- * @mc_bus: pointer to the MC bus
- * @pool_type: MC bus pool type
- * @mc_dev: Pointer to allocatable MC object device
- *
- * It adds an allocatable MC object device to a container's resource pool of
- * the given resource type
+ * @mc_bus: pointer to the fsl-mc bus
+ * @pool_type: pool type
+ * @mc_dev: pointer to allocatable fsl-mc device
*/
static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
*mc_bus,
@@ -95,10 +92,10 @@ out:
* fsl_mc_resource_pool_remove_device - remove an allocatable device from a
* resource pool
*
- * @mc_dev: Pointer to allocatable MC object device
+ * @mc_dev: pointer to allocatable fsl-mc device
*
- * It permanently removes an allocatable MC object device from the resource
- * pool, the device is currently in, as long as it is in the pool's free list.
+ * It permanently removes an allocatable fsl-mc device from the resource
+ * pool. It's an error if the device is in use.
*/
static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
*mc_dev)
@@ -255,17 +252,18 @@ out_unlock:
EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
/**
- * fsl_mc_object_allocate - Allocates a MC object device of the given
- * pool type from a given MC bus
+ * fsl_mc_object_allocate - Allocates an fsl-mc object of the given
+ * pool type from a given fsl-mc bus instance
*
- * @mc_dev: MC device for which the MC object device is to be allocated
- * @pool_type: MC bus resource pool type
- * @new_mc_dev: Pointer to area where the pointer to the allocated
- * MC object device is to be returned
+ * @mc_dev: fsl-mc device which is used in conjunction with the
+ * allocated object
+ * @pool_type: pool type
+ * @new_mc_dev: pointer to area where the pointer to the allocated device
+ * is to be returned
*
- * This function allocates a MC object device from the device's parent DPRC,
- * from the corresponding MC bus' pool of allocatable MC object devices of
- * the given resource type. mc_dev cannot be a DPRC itself.
+ * Allocatable objects are always used in conjunction with some functional
+ * device. This function allocates an object of the specified type from
+ * the DPRC containing the functional device.
*
* NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC
* portals are allocated using fsl_mc_portal_allocate(), instead of
@@ -312,10 +310,9 @@ error:
EXPORT_SYMBOL_GPL(fsl_mc_object_allocate);
/**
- * fsl_mc_object_free - Returns an allocatable MC object device to the
- * corresponding resource pool of a given MC bus.
- *
- * @mc_adev: Pointer to the MC object device
+ * fsl_mc_object_free - Returns an fsl-mc object to the resource
+ * pool where it came from.
+ * @mc_adev: Pointer to the fsl-mc device
*/
void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
{
@@ -332,8 +329,14 @@ void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
EXPORT_SYMBOL_GPL(fsl_mc_object_free);
/*
- * Initialize the interrupt pool associated with a MC bus.
- * It allocates a block of IRQs from the GIC-ITS
+ * A DPRC and the devices in the DPRC all share the same GIC-ITS device
+ * ID. A block of IRQs is pre-allocated and maintained in a pool
+ * from which devices can allocate them when needed.
+ */
+
+/*
+ * Initialize the interrupt pool associated with an fsl-mc bus.
+ * It allocates a block of IRQs from the GIC-ITS.
*/
int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
unsigned int irq_count)
@@ -395,7 +398,7 @@ cleanup_msi_irqs:
EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
/**
- * Teardown the interrupt pool associated with an MC bus.
+ * Teardown the interrupt pool associated with an fsl-mc bus.
* It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
*/
void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
@@ -422,11 +425,7 @@ void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
/**
- * It allocates the IRQs required by a given MC object device. The
- * IRQs are allocated from the interrupt pool associated with the
- * MC bus that contains the device, if the device is not a DPRC device.
- * Otherwise, the IRQs are allocated from the interrupt pool associated
- * with the MC bus that represents the DPRC device itself.
+ * Allocate the IRQs required by a given fsl-mc device.
*/
int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
{
@@ -495,8 +494,7 @@ error_resource_alloc:
EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
/*
- * It frees the IRQs that were allocated for a MC object device, by
- * returning them to the corresponding interrupt pool.
+ * Frees the IRQs that were allocated for an fsl-mc device.
*/
void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
{
@@ -605,7 +603,7 @@ static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
return error;
dev_dbg(&mc_dev->dev,
- "Allocatable MC object device bound to fsl_mc_allocator driver");
+ "Allocatable fsl-mc device bound to fsl_mc_allocator driver");
return 0;
}
@@ -627,7 +625,7 @@ static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
}
dev_dbg(&mc_dev->dev,
- "Allocatable MC object device unbound from fsl_mc_allocator driver");
+ "Allocatable fsl-mc device unbound from fsl_mc_allocator driver");
return 0;
}
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
index 44f64b6f0fc9..5ac373c0c716 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
@@ -1,7 +1,7 @@
/*
* Freescale Management Complex (MC) bus driver
*
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
*
* This file is licensed under the terms of the GNU General Public
@@ -9,6 +9,8 @@
* warranty of any kind, whether express or implied.
*/
+#define pr_fmt(fmt) "fsl-mc: " fmt
+
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
@@ -34,7 +36,7 @@ static struct kmem_cache *mc_dev_cache;
/**
* struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
- * @root_mc_bus_dev: MC object device representing the root DPRC
+ * @root_mc_bus_dev: fsl-mc device representing the root DPRC
* @num_translation_ranges: number of entries in addr_translation_ranges
* @translation_ranges: array of bus to system address translation ranges
*/
@@ -62,8 +64,8 @@ struct fsl_mc_addr_translation_range {
/**
* fsl_mc_bus_match - device to driver matching callback
- * @dev: the MC object device structure to match against
- * @drv: the device driver to search for matching MC object device id
+ * @dev: the fsl-mc device to match against
+ * @drv: the device driver to search for matching fsl-mc object type
* structures
*
* Returns 1 on success, 0 otherwise.
@@ -91,7 +93,7 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
/*
* Traverse the match_id table of the given driver, trying to find
- * a matching for the given MC object device.
+ * a matching for the given device.
*/
for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
if (id->vendor == mc_dev->obj_desc.vendor &&
@@ -164,8 +166,7 @@ static int fsl_mc_driver_probe(struct device *dev)
error = mc_drv->probe(mc_dev);
if (error < 0) {
- dev_err(dev, "MC object device probe callback failed: %d\n",
- error);
+ dev_err(dev, "%s failed: %d\n", __func__, error);
return error;
}
@@ -183,9 +184,7 @@ static int fsl_mc_driver_remove(struct device *dev)
error = mc_drv->remove(mc_dev);
if (error < 0) {
- dev_err(dev,
- "MC object device remove callback failed: %d\n",
- error);
+ dev_err(dev, "%s failed: %d\n", __func__, error);
return error;
}
@@ -232,8 +231,6 @@ int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver,
return error;
}
- pr_info("MC object device driver %s registered\n",
- mc_driver->driver.name);
return 0;
}
EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
@@ -315,21 +312,6 @@ static int get_dprc_icid(struct fsl_mc_io *mc_io,
return error;
}
-static int get_dprc_version(struct fsl_mc_io *mc_io,
- int container_id, u16 *major, u16 *minor)
-{
- struct dprc_attributes attr;
- int error;
-
- error = get_dprc_attr(mc_io, container_id, &attr);
- if (error == 0) {
- *major = attr.version.major;
- *minor = attr.version.minor;
- }
-
- return error;
-}
-
static int translate_mc_addr(struct fsl_mc_device *mc_dev,
enum dprc_region_type mc_region_type,
u64 mc_offset, phys_addr_t *phys_addr)
@@ -452,7 +434,7 @@ bool fsl_mc_is_root_dprc(struct device *dev)
}
/**
- * Add a newly discovered MC object device to be visible in Linux
+ * Add a newly discovered fsl-mc device to be visible in Linux
*/
int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
struct fsl_mc_io *mc_io,
@@ -533,8 +515,8 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
goto error_cleanup_dev;
} else {
/*
- * A non-DPRC MC object device has to be a child of another
- * MC object (specifically a DPRC object)
+ * A non-DPRC object has to be a child of a DPRC, use the
+ * parent's ICID and interrupt domain.
*/
mc_dev->icid = parent_mc_dev->icid;
mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
@@ -572,8 +554,7 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
}
(void)get_device(&mc_dev->dev);
- dev_dbg(parent_dev, "Added MC object device %s\n",
- dev_name(&mc_dev->dev));
+ dev_dbg(parent_dev, "added %s\n", dev_name(&mc_dev->dev));
*new_mc_dev = mc_dev;
return 0;
@@ -590,10 +571,10 @@ error_cleanup_dev:
EXPORT_SYMBOL_GPL(fsl_mc_device_add);
/**
- * fsl_mc_device_remove - Remove a MC object device from being visible to
+ * fsl_mc_device_remove - Remove an fsl-mc device from being visible to
* Linux
*
- * @mc_dev: Pointer to a MC object device object
+ * @mc_dev: Pointer to an fsl-mc device
*/
void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
{
@@ -749,8 +730,6 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
struct mc_version mc_version;
struct resource res;
- dev_info(&pdev->dev, "Root MC bus device probed");
-
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;
@@ -783,8 +762,7 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
goto error_cleanup_mc_io;
}
- dev_info(&pdev->dev,
- "Freescale Management Complex Firmware version: %u.%u.%u\n",
+ dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
mc_version.major, mc_version.minor, mc_version.revision);
error = get_mc_addr_translation_ranges(&pdev->dev,
@@ -793,7 +771,7 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
if (error < 0)
goto error_cleanup_mc_io;
- error = dpmng_get_container_id(mc_io, 0, &container_id);
+ error = dprc_get_container_id(mc_io, 0, &container_id);
if (error < 0) {
dev_err(&pdev->dev,
"dpmng_get_container_id() failed: %d\n", error);
@@ -801,8 +779,9 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
}
memset(&obj_desc, 0, sizeof(struct dprc_obj_desc));
- error = get_dprc_version(mc_io, container_id,
- &obj_desc.ver_major, &obj_desc.ver_minor);
+ error = dprc_get_api_version(mc_io, 0,
+ &obj_desc.ver_major,
+ &obj_desc.ver_minor);
if (error < 0)
goto error_cleanup_mc_io;
@@ -840,7 +819,6 @@ static int fsl_mc_bus_remove(struct platform_device *pdev)
fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
mc->root_mc_bus_dev->mc_io = NULL;
- dev_info(&pdev->dev, "Root MC bus device removed");
return 0;
}
@@ -875,12 +853,10 @@ static int __init fsl_mc_bus_driver_init(void)
error = bus_register(&fsl_mc_bus_type);
if (error < 0) {
- pr_err("fsl-mc bus type registration failed: %d\n", error);
+ pr_err("bus type registration failed: %d\n", error);
goto error_cleanup_cache;
}
- pr_info("fsl-mc bus type registered\n");
-
error = platform_driver_register(&fsl_mc_bus_driver);
if (error < 0) {
pr_err("platform_driver_register() failed: %d\n", error);
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
index 3d46b1b1fa18..7975c6e6fee3 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
@@ -1,7 +1,7 @@
/*
* Freescale Management Complex (MC) bus driver MSI support
*
- * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
*
* This file is licensed under the terms of the GNU General Public
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-private.h b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
index d459c2673f39..5c49c9d2df6a 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-private.h
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
@@ -10,6 +10,9 @@
#ifndef _FSL_MC_PRIVATE_H_
#define _FSL_MC_PRIVATE_H_
+#include "../include/mc.h"
+#include "../include/mc-bus.h"
+
int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
struct fsl_mc_io *mc_io,
struct device *parent_dev,
diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
index 7a6ac640752f..6b1cd574644f 100644
--- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
@@ -1,7 +1,7 @@
/*
* Freescale Management Complex (MC) bus driver MSI support
*
- * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
*
* This file is licensed under the terms of the GNU General Public
@@ -19,7 +19,7 @@
#include "../include/mc-bus.h"
static struct irq_chip its_msi_irq_chip = {
- .name = "fsl-mc-bus-msi",
+ .name = "ITS-fMSI",
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_eoi = irq_chip_eoi_parent,
diff --git a/drivers/staging/fsl-mc/bus/mc-io.c b/drivers/staging/fsl-mc/bus/mc-io.c
index 798c965fe203..d66b87f0903b 100644
--- a/drivers/staging/fsl-mc/bus/mc-io.c
+++ b/drivers/staging/fsl-mc/bus/mc-io.c
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c
index 285917c7c8e4..4d82802b384d 100644
--- a/drivers/staging/fsl-mc/bus/mc-sys.c
+++ b/drivers/staging/fsl-mc/bus/mc-sys.c
@@ -1,4 +1,5 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* I/O services to send MC commands to the MC hardware
*
@@ -13,7 +14,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -67,7 +67,7 @@ static u16 mc_cmd_hdr_read_cmdid(struct mc_command *cmd)
struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
u16 cmd_id = le16_to_cpu(hdr->cmd_id);
- return (cmd_id & MC_CMD_HDR_CMDID_MASK) >> MC_CMD_HDR_CMDID_SHIFT;
+ return cmd_id;
}
static int mc_status_to_error(enum mc_cmd_status status)
@@ -200,7 +200,7 @@ static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
if (time_after_eq(jiffies, jiffies_until_timeout)) {
dev_dbg(mc_io->dev,
- "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
+ "MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n",
mc_io->portal_phys_addr,
(unsigned int)mc_cmd_hdr_read_token(cmd),
(unsigned int)mc_cmd_hdr_read_cmdid(cmd));
@@ -240,7 +240,7 @@ static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
if (timeout_usecs == 0) {
dev_dbg(mc_io->dev,
- "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
+ "MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n",
mc_io->portal_phys_addr,
(unsigned int)mc_cmd_hdr_read_token(cmd),
(unsigned int)mc_cmd_hdr_read_cmdid(cmd));
@@ -294,7 +294,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
if (status != MC_CMD_STATUS_OK) {
dev_dbg(mc_io->dev,
- "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n",
+ "MC command failed: portal: %#llx, dprc handle: %#x, command: %#x, status: %s (%#x)\n",
mc_io->portal_phys_addr,
(unsigned int)mc_cmd_hdr_read_token(cmd),
(unsigned int)mc_cmd_hdr_read_cmdid(cmd),
diff --git a/drivers/staging/fsl-mc/include/dpbp.h b/drivers/staging/fsl-mc/include/dpbp.h
index e14e85a5d6df..bf34b1e0e730 100644
--- a/drivers/staging/fsl-mc/include/dpbp.h
+++ b/drivers/staging/fsl-mc/include/dpbp.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,7 +33,8 @@
#ifndef __FSL_DPBP_H
#define __FSL_DPBP_H
-/* Data Path Buffer Pool API
+/*
+ * Data Path Buffer Pool API
* Contains initialization APIs and runtime control APIs for DPBP
*/
@@ -44,8 +46,8 @@ int dpbp_open(struct fsl_mc_io *mc_io,
u16 *token);
int dpbp_close(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
+ u32 cmd_flags,
+ u16 token);
/**
* struct dpbp_cfg - Structure representing DPBP configuration
@@ -55,14 +57,16 @@ struct dpbp_cfg {
u32 options;
};
-int dpbp_create(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- const struct dpbp_cfg *cfg,
- u16 *token);
+int dpbp_create(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
+ u32 cmd_flags,
+ const struct dpbp_cfg *cfg,
+ u32 *obj_id);
int dpbp_destroy(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
u32 cmd_flags,
- u16 token);
+ u32 obj_id);
int dpbp_enable(struct fsl_mc_io *mc_io,
u32 cmd_flags,
@@ -88,85 +92,75 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
* @irq_num: A user defined number associated with this IRQ
*/
struct dpbp_irq_cfg {
- u64 addr;
- u32 val;
- int irq_num;
+ u64 addr;
+ u32 val;
+ int irq_num;
};
-int dpbp_set_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- struct dpbp_irq_cfg *irq_cfg);
-
-int dpbp_get_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- int *type,
- struct dpbp_irq_cfg *irq_cfg);
-
-int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 en);
-
-int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 *en);
+int dpbp_set_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ struct dpbp_irq_cfg *irq_cfg);
+
+int dpbp_get_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ int *type,
+ struct dpbp_irq_cfg *irq_cfg);
+
+int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en);
int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 mask);
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *mask);
-
-int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *status);
-
-int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 status);
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask);
+
+int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
/**
* struct dpbp_attr - Structure representing DPBP attributes
* @id: DPBP object ID
- * @version: DPBP version
* @bpid: Hardware buffer pool ID; should be used as an argument in
* acquire/release operations on buffers
*/
struct dpbp_attr {
int id;
- /**
- * struct version - Structure representing DPBP version
- * @major: DPBP major version
- * @minor: DPBP minor version
- */
- struct {
- u16 major;
- u16 minor;
- } version;
u16 bpid;
};
-int dpbp_get_attributes(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dpbp_attr *attr);
+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_attr *attr);
/**
* DPBP notifications options
@@ -196,24 +190,29 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
* @options: Mask of available options; use 'DPBP_NOTIF_OPT_<X>' values
*/
struct dpbp_notification_cfg {
- u32 depletion_entry;
- u32 depletion_exit;
- u32 surplus_entry;
- u32 surplus_exit;
- u64 message_iova;
- u64 message_ctx;
- u16 options;
+ u32 depletion_entry;
+ u32 depletion_exit;
+ u32 surplus_entry;
+ u32 surplus_exit;
+ u64 message_iova;
+ u64 message_ctx;
+ u16 options;
};
-int dpbp_set_notifications(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dpbp_notification_cfg *cfg);
+int dpbp_set_notifications(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_notification_cfg *cfg);
+
+int dpbp_get_notifications(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_notification_cfg *cfg);
-int dpbp_get_notifications(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dpbp_notification_cfg *cfg);
+int dpbp_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
/** @} */
diff --git a/drivers/staging/fsl-mc/include/dpmng.h b/drivers/staging/fsl-mc/include/dpmng.h
index e5cfd017f9a5..7d8e255da578 100644
--- a/drivers/staging/fsl-mc/include/dpmng.h
+++ b/drivers/staging/fsl-mc/include/dpmng.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,7 +33,8 @@
#ifndef __FSL_DPMNG_H
#define __FSL_DPMNG_H
-/* Management Complex General API
+/*
+ * Management Complex General API
* Contains general API for the Management Complex firmware
*/
@@ -58,12 +60,12 @@ struct mc_version {
u32 revision;
};
-int mc_get_version(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- struct mc_version *mc_ver_info);
+int mc_get_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ struct mc_version *mc_ver_info);
-int dpmng_get_container_id(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int *container_id);
+int dpmng_get_container_id(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int *container_id);
#endif /* __FSL_DPMNG_H */
diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h
index 593b2bbe7f71..f9ea769ccfab 100644
--- a/drivers/staging/fsl-mc/include/dprc.h
+++ b/drivers/staging/fsl-mc/include/dprc.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -34,7 +35,8 @@
#include "mc-cmd.h"
-/* Data Path Resource Container API
+/*
+ * Data Path Resource Container API
* Contains DPRC API for managing and querying DPAA resources
*/
@@ -70,12 +72,14 @@ int dprc_close(struct fsl_mc_io *mc_io,
* and can be retrieved using dprc_get_attributes()
*/
-/* Spawn Policy Option allowed - Indicates that the new container is allowed
+/*
+ * Spawn Policy Option allowed - Indicates that the new container is allowed
* to spawn and have its own child containers.
*/
#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001
-/* General Container allocation policy - Indicates that the new container is
+/*
+ * General Container allocation policy - Indicates that the new container is
* allowed to allocate requested resources from its parent container; if not
* set, the container is only allowed to use resources in its own pools; Note
* that this is a container's global policy, but the parent container may
@@ -83,12 +87,14 @@ int dprc_close(struct fsl_mc_io *mc_io,
*/
#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002
-/* Object initialization allowed - software context associated with this
+/*
+ * Object initialization allowed - software context associated with this
* container is allowed to invoke object initialization operations.
*/
#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004
-/* Topology change allowed - software context associated with this
+/*
+ * Topology change allowed - software context associated with this
* container is allowed to invoke topology operations, such as attach/detach
* of network objects.
*/
@@ -116,17 +122,17 @@ struct dprc_cfg {
char label[16];
};
-int dprc_create_container(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dprc_cfg *cfg,
- int *child_container_id,
- u64 *child_portal_offset);
+int dprc_create_container(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dprc_cfg *cfg,
+ int *child_container_id,
+ u64 *child_portal_offset);
-int dprc_destroy_container(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int child_container_id);
+int dprc_destroy_container(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id);
int dprc_reset_container(struct fsl_mc_io *mc_io,
u32 cmd_flags,
@@ -139,7 +145,7 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
#define DPRC_IRQ_INDEX 0
/* Number of dprc's IRQs */
-#define DPRC_NUM_OF_IRQS 1
+#define DPRC_NUM_OF_IRQS 1
/* DPRC IRQ events */
@@ -151,12 +157,14 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
#define DPRC_IRQ_EVENT_RES_ADDED 0x00000004
/* IRQ event - Indicates that resources removed from the container */
#define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008
-/* IRQ event - Indicates that one of the descendant containers that opened by
+/*
+ * IRQ event - Indicates that one of the descendant containers that opened by
* this container is destroyed
*/
#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
-/* IRQ event - Indicates that on one of the container's opened object is
+/*
+ * IRQ event - Indicates that on one of the container's opened object is
* destroyed
*/
#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
@@ -171,59 +179,59 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
* @irq_num: A user defined number associated with this IRQ
*/
struct dprc_irq_cfg {
- phys_addr_t paddr;
- u32 val;
- int irq_num;
+ phys_addr_t paddr;
+ u32 val;
+ int irq_num;
};
-int dprc_set_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- struct dprc_irq_cfg *irq_cfg);
-
-int dprc_get_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- int *type,
- struct dprc_irq_cfg *irq_cfg);
-
-int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 en);
-
-int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 *en);
-
-int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 mask);
-
-int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *mask);
-
-int dprc_get_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *status);
-
-int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 status);
+int dprc_set_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg);
+
+int dprc_get_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ int *type,
+ struct dprc_irq_cfg *irq_cfg);
+
+int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en);
+
+int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask);
+
+int dprc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
/**
* struct dprc_attributes - Container attributes
@@ -231,63 +239,56 @@ int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
* @icid: Container's ICID
* @portal_id: Container's portal ID
* @options: Container's options as set at container's creation
- * @version: DPRC version
*/
struct dprc_attributes {
int container_id;
u16 icid;
int portal_id;
u64 options;
- /**
- * struct version - DPRC version
- * @major: DPRC major version
- * @minor: DPRC minor version
- */
- struct {
- u16 major;
- u16 minor;
- } version;
};
-int dprc_get_attributes(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dprc_attributes *attributes);
+int dprc_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dprc_attributes *attributes);
int dprc_set_res_quota(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int child_container_id,
- char *type,
- u16 quota);
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id,
+ char *type,
+ u16 quota);
int dprc_get_res_quota(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int child_container_id,
- char *type,
- u16 *quota);
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id,
+ char *type,
+ u16 *quota);
/* Resource request options */
-/* Explicit resource ID request - The requested objects/resources
+/*
+ * Explicit resource ID request - The requested objects/resources
* are explicit and sequential (in case of resources).
* The base ID is given at res_req at base_align field
*/
-#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001
+#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001
-/* Aligned resources request - Relevant only for resources
+/*
+ * Aligned resources request - Relevant only for resources
* request (and not objects). Indicates that resources base ID should be
* sequential and aligned to the value given at dprc_res_req base_align field
*/
-#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002
+#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002
-/* Plugged Flag - Relevant only for object assignment request.
+/*
+ * Plugged Flag - Relevant only for object assignment request.
* Indicates that after all objects assigned. An interrupt will be invoked at
* the relevant GPP. The assigned object will be marked as plugged.
* plugged objects can't be assigned from their container
*/
-#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004
+#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004
/**
* struct dprc_res_req - Resource request descriptor, to be used in assignment
@@ -312,33 +313,33 @@ struct dprc_res_req {
int id_base_align;
};
-int dprc_assign(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int container_id,
- struct dprc_res_req *res_req);
-
-int dprc_unassign(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int child_container_id,
- struct dprc_res_req *res_req);
-
-int dprc_get_pool_count(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int *pool_count);
-
-int dprc_get_pool(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int pool_index,
- char *type);
+int dprc_assign(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int container_id,
+ struct dprc_res_req *res_req);
+
+int dprc_unassign(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id,
+ struct dprc_res_req *res_req);
+
+int dprc_get_pool_count(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *pool_count);
+
+int dprc_get_pool(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int pool_index,
+ char *type);
int dprc_get_obj_count(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int *obj_count);
+ u32 cmd_flags,
+ u16 token,
+ int *obj_count);
/* Objects Attributes Flags */
@@ -353,7 +354,7 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
* masters;
* user is responsible for proper memory handling through IOMMU configuration.
*/
-#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
+#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
/**
* struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj()
@@ -381,41 +382,41 @@ struct dprc_obj_desc {
u16 flags;
};
-int dprc_get_obj(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int obj_index,
- struct dprc_obj_desc *obj_desc);
-
-int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- struct dprc_obj_desc *obj_desc);
-
-int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- u8 irq_index,
- struct dprc_irq_cfg *irq_cfg);
-
-int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- u8 irq_index,
- int *type,
- struct dprc_irq_cfg *irq_cfg);
-
-int dprc_get_res_count(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *type,
- int *res_count);
+int dprc_get_obj(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int obj_index,
+ struct dprc_obj_desc *obj_desc);
+
+int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ struct dprc_obj_desc *obj_desc);
+
+int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg);
+
+int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 irq_index,
+ int *type,
+ struct dprc_irq_cfg *irq_cfg);
+
+int dprc_get_res_count(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *type,
+ int *res_count);
/**
* enum dprc_iter_status - Iteration status
@@ -444,11 +445,11 @@ struct dprc_res_ids_range_desc {
enum dprc_iter_status iter_status;
};
-int dprc_get_res_ids(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *type,
- struct dprc_res_ids_range_desc *range_desc);
+int dprc_get_res_ids(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *type,
+ struct dprc_res_ids_range_desc *range_desc);
/* Region flags */
/* Cacheable - Indicates that region should be mapped as cacheable */
@@ -481,20 +482,20 @@ struct dprc_region_desc {
enum dprc_region_type type;
};
-int dprc_get_obj_region(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- u8 region_index,
- struct dprc_region_desc *region_desc);
-
-int dprc_set_obj_label(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- char *label);
+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 region_index,
+ struct dprc_region_desc *region_desc);
+
+int dprc_set_obj_label(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ char *label);
/**
* struct dprc_endpoint - Endpoint description for link connect/disconnect
@@ -521,24 +522,33 @@ struct dprc_connection_cfg {
u32 max_rate;
};
-int dprc_connect(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- const struct dprc_endpoint *endpoint1,
- const struct dprc_endpoint *endpoint2,
+int dprc_connect(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dprc_endpoint *endpoint1,
+ const struct dprc_endpoint *endpoint2,
const struct dprc_connection_cfg *cfg);
-int dprc_disconnect(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- const struct dprc_endpoint *endpoint);
-
-int dprc_get_connection(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- const struct dprc_endpoint *endpoint1,
- struct dprc_endpoint *endpoint2,
- int *state);
+int dprc_disconnect(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dprc_endpoint *endpoint);
+
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state);
+
+int dprc_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
+int dprc_get_container_id(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int *container_id);
#endif /* _FSL_DPRC_H */
diff --git a/drivers/staging/fsl-mc/include/mc-bus.h b/drivers/staging/fsl-mc/include/mc-bus.h
index 170684a57ca2..42700de94d59 100644
--- a/drivers/staging/fsl-mc/include/mc-bus.h
+++ b/drivers/staging/fsl-mc/include/mc-bus.h
@@ -1,7 +1,7 @@
/*
* Freescale Management Complex (MC) bus declarations
*
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
*
* This file is licensed under the terms of the GNU General Public
@@ -42,8 +42,8 @@ struct msi_domain_info;
*/
struct fsl_mc_resource_pool {
enum fsl_mc_pool_type type;
- int16_t max_count;
- int16_t free_count;
+ int max_count;
+ int free_count;
struct mutex mutex; /* serializes access to free_list */
struct list_head free_list;
struct fsl_mc_bus *mc_bus;
diff --git a/drivers/staging/fsl-mc/include/mc-cmd.h b/drivers/staging/fsl-mc/include/mc-cmd.h
index 5decb9890c31..2e08aa31b084 100644
--- a/drivers/staging/fsl-mc/include/mc-cmd.h
+++ b/drivers/staging/fsl-mc/include/mc-cmd.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -48,6 +49,15 @@ struct mc_command {
u64 params[MC_CMD_NUM_OF_PARAMS];
};
+struct mc_rsp_create {
+ __le32 object_id;
+};
+
+struct mc_rsp_api_ver {
+ __le16 major_ver;
+ __le16 minor_ver;
+};
+
enum mc_cmd_status {
MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
@@ -72,11 +82,6 @@ enum mc_cmd_status {
/* Command completion flag */
#define MC_CMD_FLAG_INTR_DIS 0x01
-#define MC_CMD_HDR_CMDID_MASK 0xFFF0
-#define MC_CMD_HDR_CMDID_SHIFT 4
-#define MC_CMD_HDR_TOKEN_MASK 0xFFC0
-#define MC_CMD_HDR_TOKEN_SHIFT 6
-
static inline u64 mc_encode_cmd_header(u16 cmd_id,
u32 cmd_flags,
u16 token)
@@ -84,10 +89,8 @@ static inline u64 mc_encode_cmd_header(u16 cmd_id,
u64 header = 0;
struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
- hdr->cmd_id = cpu_to_le16((cmd_id << MC_CMD_HDR_CMDID_SHIFT) &
- MC_CMD_HDR_CMDID_MASK);
- hdr->token = cpu_to_le16((token << MC_CMD_HDR_TOKEN_SHIFT) &
- MC_CMD_HDR_TOKEN_MASK);
+ hdr->cmd_id = cpu_to_le16(cmd_id);
+ hdr->token = cpu_to_le16(token);
hdr->status = MC_CMD_STATUS_READY;
if (cmd_flags & MC_CMD_FLAG_PRI)
hdr->flags_hw = MC_CMD_FLAG_PRI;
@@ -102,7 +105,26 @@ static inline u16 mc_cmd_hdr_read_token(struct mc_command *cmd)
struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
u16 token = le16_to_cpu(hdr->token);
- return (token & MC_CMD_HDR_TOKEN_MASK) >> MC_CMD_HDR_TOKEN_SHIFT;
+ return token;
+}
+
+static inline u32 mc_cmd_read_object_id(struct mc_command *cmd)
+{
+ struct mc_rsp_create *rsp_params;
+
+ rsp_params = (struct mc_rsp_create *)cmd->params;
+ return le32_to_cpu(rsp_params->object_id);
+}
+
+static inline void mc_cmd_read_api_version(struct mc_command *cmd,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct mc_rsp_api_ver *rsp_params;
+
+ rsp_params = (struct mc_rsp_api_ver *)cmd->params;
+ *major_ver = le16_to_cpu(rsp_params->major_ver);
+ *minor_ver = le16_to_cpu(rsp_params->minor_ver);
}
#endif /* __FSL_MC_CMD_H */
diff --git a/drivers/staging/fsl-mc/include/mc-sys.h b/drivers/staging/fsl-mc/include/mc-sys.h
index 89ad0cf54702..dca7f9084e05 100644
--- a/drivers/staging/fsl-mc/include/mc-sys.h
+++ b/drivers/staging/fsl-mc/include/mc-sys.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Interface of the I/O services to send MC commands to the MC hardware
*
diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h
index f6e720e84460..1c46c0c2a895 100644
--- a/drivers/staging/fsl-mc/include/mc.h
+++ b/drivers/staging/fsl-mc/include/mc.h
@@ -1,7 +1,7 @@
/*
* Freescale Management Complex (MC) bus public interface
*
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
*
* This file is licensed under the terms of the GNU General Public
@@ -81,7 +81,7 @@ enum fsl_mc_pool_type {
*/
struct fsl_mc_resource {
enum fsl_mc_pool_type type;
- int32_t id;
+ s32 id;
void *data;
struct fsl_mc_resource_pool *parent_pool;
struct list_head node;
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 49c718b91e55..41a49c8194e5 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -1667,12 +1667,6 @@ static inline void fill_plug_rsp_nack(struct fwserial_mgmt_pkt *pkt)
pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
}
-static inline void fill_unplug_req(struct fwserial_mgmt_pkt *pkt)
-{
- pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG);
- pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
-}
-
static inline void fill_unplug_rsp_nack(struct fwserial_mgmt_pkt *pkt)
{
pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG_RSP | FWSC_RSP_NACK);
diff --git a/drivers/staging/gdm724x/gdm_lte.h b/drivers/staging/gdm724x/gdm_lte.h
index 88414e5a70cc..7ddeabc0e50a 100644
--- a/drivers/staging/gdm724x/gdm_lte.h
+++ b/drivers/staging/gdm724x/gdm_lte.h
@@ -47,15 +47,15 @@ struct phy_dev {
void *priv_dev;
struct net_device *dev[MAX_NIC_TYPE];
int (*send_hci_func)(void *priv_dev, void *data, int len,
- void (*cb)(void *cb_data), void *cb_data);
+ void (*cb)(void *cb_data), void *cb_data);
int (*send_sdu_func)(void *priv_dev, void *data, int len,
- unsigned int dftEpsId, unsigned int epsId,
- void (*cb)(void *cb_data), void *cb_data,
- int dev_idx, int nic_type);
+ unsigned int dftEpsId, unsigned int epsId,
+ void (*cb)(void *cb_data), void *cb_data,
+ int dev_idx, int nic_type);
int (*rcv_func)(void *priv_dev,
- int (*cb)(void *cb_data, void *data, int len,
- int context),
- void *cb_data, int context);
+ int (*cb)(void *cb_data, void *data, int len,
+ int context),
+ void *cb_data, int context);
struct gdm_endian * (*get_endian)(void *priv_dev);
};
diff --git a/drivers/staging/gdm724x/gdm_tty.h b/drivers/staging/gdm724x/gdm_tty.h
index 297438b4ddcb..195c5902989f 100644
--- a/drivers/staging/gdm724x/gdm_tty.h
+++ b/drivers/staging/gdm724x/gdm_tty.h
@@ -17,7 +17,6 @@
#include <linux/types.h>
#include <linux/tty.h>
-
#define TTY_MAX_COUNT 2
#define MAX_ISSUE_NUM 3
diff --git a/drivers/staging/gdm724x/netlink_k.h b/drivers/staging/gdm724x/netlink_k.h
index 7cf979b3f826..5ebd73157f5a 100644
--- a/drivers/staging/gdm724x/netlink_k.h
+++ b/drivers/staging/gdm724x/netlink_k.h
@@ -18,7 +18,8 @@
#include <net/sock.h>
struct sock *netlink_init(int unit,
- void (*cb)(struct net_device *dev, u16 type, void *msg, int len));
+ void (*cb)(struct net_device *dev,
+ u16 type, void *msg, int len));
int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len);
#endif /* _NETLINK_K_H_ */
diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
index 70323aa11f24..3fda0cd6bb42 100644
--- a/drivers/staging/greybus/arche-apb-ctrl.c
+++ b/drivers/staging/greybus/arche-apb-ctrl.c
@@ -183,7 +183,7 @@ static int standby_boot_seq(struct platform_device *pdev)
* Pasted from WDM spec,
* - A falling edge on POWEROFF_L is detected (a)
* - WDM enters standby mode, but no output signals are changed
- * */
+ */
/* TODO: POWEROFF_L is input to WDM module */
apb->state = ARCHE_PLATFORM_STATE_STANDBY;
@@ -285,8 +285,10 @@ static ssize_t state_store(struct device *dev,
if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
return count;
- /* First we want to make sure we power off everything
- * and then enter FW flashing state */
+ /*
+ * First we want to make sure we power off everything
+ * and then enter FW flashing state
+ */
poweroff_seq(pdev);
ret = fw_flashing_seq(pdev);
} else {
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index d33d6fe078ad..338c2d3ee842 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -457,7 +457,8 @@ retry:
goto exit;
/* First we want to make sure we power off everything
- * and then activate back again */
+ * and then activate back again
+ */
device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
arche_platform_poweroff_seq(arche_pdata);
diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
index 8a0744b58a32..f8862c6d7102 100644
--- a/drivers/staging/greybus/audio_codec.c
+++ b/drivers/staging/greybus/audio_codec.c
@@ -405,7 +405,6 @@ static void gbcodec_shutdown(struct snd_pcm_substream *substream,
params->state = GBAUDIO_CODEC_SHUTDOWN;
mutex_unlock(&codec->lock);
pm_relax(dai->dev);
- return;
}
static int gbcodec_hw_params(struct snd_pcm_substream *substream,
@@ -655,8 +654,10 @@ static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
ret = gb_audio_apbridgea_shutdown_rx(data->connection,
0);
params->state = GBAUDIO_CODEC_STOP;
- } else
+ } else {
ret = -EINVAL;
+ }
+
if (ret)
dev_err_ratelimited(dai->dev,
"%s:Error during %s %s stream:%d\n",
diff --git a/drivers/staging/greybus/audio_codec.h b/drivers/staging/greybus/audio_codec.h
index ca027bd99ad7..62fd93939a1f 100644
--- a/drivers/staging/greybus/audio_codec.h
+++ b/drivers/staging/greybus/audio_codec.h
@@ -158,7 +158,6 @@ struct gbaudio_module_info {
int dev_id; /* check if it should be bundle_id/hd_cport_id */
int vid;
int pid;
- int slot;
int type;
int set_uevent;
char vstr[NAME_SIZE];
diff --git a/drivers/staging/greybus/audio_manager.h b/drivers/staging/greybus/audio_manager.h
index c4ca09754a6a..5ab8f5e0ed3f 100644
--- a/drivers/staging/greybus/audio_manager.h
+++ b/drivers/staging/greybus/audio_manager.h
@@ -18,10 +18,9 @@
struct gb_audio_manager_module_descriptor {
char name[GB_AUDIO_MANAGER_MODULE_NAME_LEN];
- int slot;
int vid;
int pid;
- int cport;
+ int intf_id;
unsigned int ip_devices;
unsigned int op_devices;
};
diff --git a/drivers/staging/greybus/audio_manager_module.c b/drivers/staging/greybus/audio_manager_module.c
index a10e96ad79c1..adc16977452d 100644
--- a/drivers/staging/greybus/audio_manager_module.c
+++ b/drivers/staging/greybus/audio_manager_module.c
@@ -81,16 +81,6 @@ static ssize_t gb_audio_module_name_show(
static struct gb_audio_manager_module_attribute gb_audio_module_name_attribute =
__ATTR(name, 0664, gb_audio_module_name_show, NULL);
-static ssize_t gb_audio_module_slot_show(
- struct gb_audio_manager_module *module,
- struct gb_audio_manager_module_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d", module->desc.slot);
-}
-
-static struct gb_audio_manager_module_attribute gb_audio_module_slot_attribute =
- __ATTR(slot, 0664, gb_audio_module_slot_show, NULL);
-
static ssize_t gb_audio_module_vid_show(
struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr, char *buf)
@@ -111,16 +101,16 @@ static ssize_t gb_audio_module_pid_show(
static struct gb_audio_manager_module_attribute gb_audio_module_pid_attribute =
__ATTR(pid, 0664, gb_audio_module_pid_show, NULL);
-static ssize_t gb_audio_module_cport_show(
+static ssize_t gb_audio_module_intf_id_show(
struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr, char *buf)
{
- return sprintf(buf, "%d", module->desc.cport);
+ return sprintf(buf, "%d", module->desc.intf_id);
}
static struct gb_audio_manager_module_attribute
- gb_audio_module_cport_attribute =
- __ATTR(cport, 0664, gb_audio_module_cport_show, NULL);
+ gb_audio_module_intf_id_attribute =
+ __ATTR(intf_id, 0664, gb_audio_module_intf_id_show, NULL);
static ssize_t gb_audio_module_ip_devices_show(
struct gb_audio_manager_module *module,
@@ -146,10 +136,9 @@ static struct gb_audio_manager_module_attribute
static struct attribute *gb_audio_module_default_attrs[] = {
&gb_audio_module_name_attribute.attr,
- &gb_audio_module_slot_attribute.attr,
&gb_audio_module_vid_attribute.attr,
&gb_audio_module_pid_attribute.attr,
- &gb_audio_module_cport_attribute.attr,
+ &gb_audio_module_intf_id_attribute.attr,
&gb_audio_module_ip_devices_attribute.attr,
&gb_audio_module_op_devices_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
@@ -164,29 +153,26 @@ static struct kobj_type gb_audio_module_type = {
static void send_add_uevent(struct gb_audio_manager_module *module)
{
char name_string[128];
- char slot_string[64];
char vid_string[64];
char pid_string[64];
- char cport_string[64];
+ char intf_id_string[64];
char ip_devices_string[64];
char op_devices_string[64];
char *envp[] = {
name_string,
- slot_string,
vid_string,
pid_string,
- cport_string,
+ intf_id_string,
ip_devices_string,
op_devices_string,
NULL
};
snprintf(name_string, 128, "NAME=%s", module->desc.name);
- snprintf(slot_string, 64, "SLOT=%d", module->desc.slot);
snprintf(vid_string, 64, "VID=%d", module->desc.vid);
snprintf(pid_string, 64, "PID=%d", module->desc.pid);
- snprintf(cport_string, 64, "CPORT=%d", module->desc.cport);
+ snprintf(intf_id_string, 64, "INTF_ID=%d", module->desc.intf_id);
snprintf(ip_devices_string, 64, "I/P DEVICES=0x%X",
module->desc.ip_devices);
snprintf(op_devices_string, 64, "O/P DEVICES=0x%X",
@@ -246,13 +232,12 @@ int gb_audio_manager_module_create(
void gb_audio_manager_module_dump(struct gb_audio_manager_module *module)
{
- pr_info("audio module #%d name=%s slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X o/p devices=0x%X\n",
+ pr_info("audio module #%d name=%s vid=%d pid=%d intf_id=%d i/p devices=0x%X o/p devices=0x%X\n",
module->id,
module->desc.name,
- module->desc.slot,
module->desc.vid,
module->desc.pid,
- module->desc.cport,
+ module->desc.intf_id,
module->desc.ip_devices,
module->desc.op_devices);
}
diff --git a/drivers/staging/greybus/audio_manager_sysfs.c b/drivers/staging/greybus/audio_manager_sysfs.c
index d8bf8591ff9e..34ebd147052f 100644
--- a/drivers/staging/greybus/audio_manager_sysfs.c
+++ b/drivers/staging/greybus/audio_manager_sysfs.c
@@ -20,10 +20,9 @@ static ssize_t manager_sysfs_add_store(
int num = sscanf(buf,
"name=%" GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF "s "
- "slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X"
- "o/p devices=0x%X",
- desc.name, &desc.slot, &desc.vid, &desc.pid,
- &desc.cport, &desc.ip_devices, &desc.op_devices);
+ "vid=%d pid=%d intf_id=%d i/p devices=0x%X o/p devices=0x%X",
+ desc.name, &desc.vid, &desc.pid, &desc.intf_id,
+ &desc.ip_devices, &desc.op_devices);
if (num != 7)
return -EINVAL;
@@ -44,7 +43,7 @@ static ssize_t manager_sysfs_remove_store(
{
int id;
- int num = sscanf(buf, "%d", &id);
+ int num = kstrtoint(buf, 10, &id);
if (num != 1)
return -EINVAL;
@@ -65,16 +64,17 @@ static ssize_t manager_sysfs_dump_store(
{
int id;
- int num = sscanf(buf, "%d", &id);
+ int num = kstrtoint(buf, 10, &id);
if (num == 1) {
num = gb_audio_manager_dump_module(id);
if (num)
return num;
- } else if (!strncmp("all", buf, 3))
+ } else if (!strncmp("all", buf, 3)) {
gb_audio_manager_dump_all();
- else
+ } else {
return -EINVAL;
+ }
return count;
}
diff --git a/drivers/staging/greybus/audio_module.c b/drivers/staging/greybus/audio_module.c
index ae1c0fa85752..17a9948b1ba1 100644
--- a/drivers/staging/greybus/audio_module.c
+++ b/drivers/staging/greybus/audio_module.c
@@ -207,10 +207,8 @@ static int gb_audio_add_data_connection(struct gbaudio_module_info *gbmodule,
struct gbaudio_data_connection *dai;
dai = devm_kzalloc(gbmodule->dev, sizeof(*dai), GFP_KERNEL);
- if (!dai) {
- dev_err(gbmodule->dev, "DAI Malloc failure\n");
+ if (!dai)
return -ENOMEM;
- }
connection = gb_connection_create_offloaded(bundle,
le16_to_cpu(cport_desc->id),
@@ -345,10 +343,9 @@ static int gb_audio_probe(struct gb_bundle *bundle,
dev_dbg(dev, "Inform set_event:%d to above layer\n", 1);
/* prepare for the audio manager */
strlcpy(desc.name, gbmodule->name, GB_AUDIO_MANAGER_MODULE_NAME_LEN);
- desc.slot = 1; /* todo */
desc.vid = 2; /* todo */
desc.pid = 3; /* todo */
- desc.cport = gbmodule->dev_id;
+ desc.intf_id = gbmodule->dev_id;
desc.op_devices = gbmodule->op_devices;
desc.ip_devices = gbmodule->ip_devices;
gbmodule->manager_id = gb_audio_manager_add(&desc);
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
index b6251691a33d..8b216ca99cf9 100644
--- a/drivers/staging/greybus/audio_topology.c
+++ b/drivers/staging/greybus/audio_topology.c
@@ -114,6 +114,7 @@ static int gbaudio_map_widgetname(struct gbaudio_module_info *module,
const char *name)
{
struct gbaudio_widget *widget;
+
list_for_each_entry(widget, &module->widget_list, list) {
if (!strncmp(widget->name, name, NAME_SIZE))
return widget->id;
@@ -1044,8 +1045,10 @@ static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module,
control->texts = (const char * const *)
gb_generate_enum_strings(module, gbenum);
control->items = gbenum->items;
- } else
+ } else {
csize = sizeof(struct gb_audio_control);
+ }
+
*w_size += csize;
curr = (void *)curr + csize;
list_add(&control->list, &module->widget_ctl_list);
@@ -1190,8 +1193,9 @@ static int gbaudio_tplg_process_kcontrols(struct gbaudio_module_info *module,
control->texts = (const char * const *)
gb_generate_enum_strings(module, gbenum);
control->items = gbenum->items;
- } else
+ } else {
csize = sizeof(struct gb_audio_control);
+ }
list_add(&control->list, &module->ctl_list);
dev_dbg(module->dev, "%d:%s created of type %d\n", curr->id,
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
index 491bdd720c0c..1c5b41ae6774 100644
--- a/drivers/staging/greybus/camera.c
+++ b/drivers/staging/greybus/camera.c
@@ -289,6 +289,7 @@ static const int gb_camera_configure_streams_validate_response(
for (i = 0; i < resp->num_streams; i++) {
struct gb_camera_stream_config_response *cfg = &resp->config[i];
+
if (cfg->padding) {
gcam_err(gcam, "stream #%u padding != 0\n", i);
return -EIO;
@@ -796,7 +797,7 @@ static int gb_camera_op_configure_streams(void *priv, unsigned int *nstreams,
if (gb_nstreams > GB_CAMERA_MAX_STREAMS)
return -EINVAL;
- gb_streams = kzalloc(gb_nstreams * sizeof(*gb_streams), GFP_KERNEL);
+ gb_streams = kcalloc(gb_nstreams, sizeof(*gb_streams), GFP_KERNEL);
if (!gb_streams)
return -ENOMEM;
@@ -937,7 +938,7 @@ static ssize_t gb_camera_debugfs_configure_streams(struct gb_camera *gcam,
return ret;
/* For each stream to configure parse width, height and format */
- streams = kzalloc(nstreams * sizeof(*streams), GFP_KERNEL);
+ streams = kcalloc(nstreams, sizeof(*streams), GFP_KERNEL);
if (!streams)
return -ENOMEM;
@@ -1118,7 +1119,7 @@ static ssize_t gb_camera_debugfs_write(struct file *file,
char *kbuf;
if (len > 1024)
- return -EINVAL;
+ return -EINVAL;
kbuf = kmalloc(len + 1, GFP_KERNEL);
if (!kbuf)
diff --git a/drivers/staging/greybus/es2.c b/drivers/staging/greybus/es2.c
index baab460eeaa3..f1d256df06d5 100644
--- a/drivers/staging/greybus/es2.c
+++ b/drivers/staging/greybus/es2.c
@@ -175,10 +175,9 @@ static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
u8 *data;
int retval;
- data = kmalloc(size, GFP_KERNEL);
+ data = kmemdup(req, size, GFP_KERNEL);
if (!data)
return -ENOMEM;
- memcpy(data, req, size);
retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
cmd,
@@ -1034,7 +1033,7 @@ static struct arpc *arpc_alloc(void *payload, u16 size, u8 type)
goto err_free_req;
rpc->req->type = type;
- rpc->req->size = cpu_to_le16(sizeof(rpc->req) + size);
+ rpc->req->size = cpu_to_le16(sizeof(*rpc->req) + size);
memcpy(rpc->req->data, payload, size);
init_completion(&rpc->response_received);
diff --git a/drivers/staging/greybus/log.c b/drivers/staging/greybus/log.c
index 70dd9e5a1cf2..1a18ab1ff8aa 100644
--- a/drivers/staging/greybus/log.c
+++ b/drivers/staging/greybus/log.c
@@ -55,8 +55,10 @@ static int gb_log_request_handler(struct gb_operation *op)
/* Ensure the buffer is 0 terminated */
receive->msg[len - 1] = '\0';
- /* Print with dev_dbg() so that it can be easily turned off using
- * dynamic debugging (and prevent any DoS) */
+ /*
+ * Print with dev_dbg() so that it can be easily turned off using
+ * dynamic debugging (and prevent any DoS)
+ */
dev_dbg(dev, "%s", receive->msg);
return 0;
diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c
index 5649ef1e379d..66b37ea29ef0 100644
--- a/drivers/staging/greybus/sdio.c
+++ b/drivers/staging/greybus/sdio.c
@@ -191,9 +191,8 @@ static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event)
state_changed = 1;
}
- if (event & GB_SDIO_WP) {
+ if (event & GB_SDIO_WP)
host->read_only = true;
- }
if (state_changed) {
dev_info(mmc_dev(host->mmc), "card %s now event\n",
diff --git a/drivers/staging/greybus/timesync.c b/drivers/staging/greybus/timesync.c
index 2e68af7dea6d..e586627f4bbc 100644
--- a/drivers/staging/greybus/timesync.c
+++ b/drivers/staging/greybus/timesync.c
@@ -807,11 +807,11 @@ static int gb_timesync_schedule(struct gb_timesync_svc *timesync_svc, int state)
return -EINVAL;
mutex_lock(&timesync_svc->mutex);
- if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID) {
+ if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID)
gb_timesync_set_state_atomic(timesync_svc, state);
- } else {
+ else
ret = -ENODEV;
- }
+
mutex_unlock(&timesync_svc->mutex);
return ret;
}
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index 2633d2bfb1b4..6d39f4a04754 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -623,9 +623,6 @@ static int get_serial_info(struct gb_tty *gb_tty,
{
struct serial_struct tmp;
- if (!info)
- return -EINVAL;
-
memset(&tmp, 0, sizeof(tmp));
tmp.flags = ASYNC_LOW_LATENCY | ASYNC_SKIP_TEST;
tmp.type = PORT_16550A;
@@ -711,25 +708,20 @@ static int wait_serial_change(struct gb_tty *gb_tty, unsigned long arg)
return retval;
}
-static int get_serial_usage(struct gb_tty *gb_tty,
- struct serial_icounter_struct __user *count)
+static int gb_tty_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
{
- struct serial_icounter_struct icount;
- int retval = 0;
-
- memset(&icount, 0, sizeof(icount));
- icount.dsr = gb_tty->iocount.dsr;
- icount.rng = gb_tty->iocount.rng;
- icount.dcd = gb_tty->iocount.dcd;
- icount.frame = gb_tty->iocount.frame;
- icount.overrun = gb_tty->iocount.overrun;
- icount.parity = gb_tty->iocount.parity;
- icount.brk = gb_tty->iocount.brk;
+ struct gb_tty *gb_tty = tty->driver_data;
- if (copy_to_user(count, &icount, sizeof(icount)) > 0)
- retval = -EFAULT;
+ icount->dsr = gb_tty->iocount.dsr;
+ icount->rng = gb_tty->iocount.rng;
+ icount->dcd = gb_tty->iocount.dcd;
+ icount->frame = gb_tty->iocount.frame;
+ icount->overrun = gb_tty->iocount.overrun;
+ icount->parity = gb_tty->iocount.parity;
+ icount->brk = gb_tty->iocount.brk;
- return retval;
+ return 0;
}
static int gb_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
@@ -746,9 +738,6 @@ static int gb_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
(struct serial_struct __user *)arg);
case TIOCMIWAIT:
return wait_serial_change(gb_tty, arg);
- case TIOCGICOUNT:
- return get_serial_usage(gb_tty,
- (struct serial_icounter_struct __user *)arg);
}
return -ENOIOCTLCMD;
@@ -830,9 +819,10 @@ static const struct tty_operations gb_ops = {
.set_termios = gb_tty_set_termios,
.tiocmget = gb_tty_tiocmget,
.tiocmset = gb_tty_tiocmset,
+ .get_icount = gb_tty_get_icount,
};
-static struct tty_port_operations gb_port_ops = {
+static const struct tty_port_operations gb_port_ops = {
.dtr_rts = gb_tty_dtr_rts,
.activate = gb_tty_port_activate,
.shutdown = gb_tty_port_shutdown,
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
index 8ed4d395be58..19b550fff04b 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -38,7 +38,7 @@ static u8 bits_magic[] = {
static struct platform_device *firmware_pdev;
static char *file = "xlinx_fpga_firmware.bit";
-module_param(file, charp, S_IRUGO);
+module_param(file, charp, 0444);
MODULE_PARM_DESC(file, "Xilinx FPGA firmware file.");
static void read_bitstream(char *bitdata, char *buf, int *offset, int rdsize)
diff --git a/drivers/staging/i4l/act2000/act2000_isa.c b/drivers/staging/i4l/act2000/act2000_isa.c
index ad7a0391369f..76ff5de65781 100644
--- a/drivers/staging/i4l/act2000/act2000_isa.c
+++ b/drivers/staging/i4l/act2000/act2000_isa.c
@@ -259,6 +259,7 @@ act2000_isa_receive(act2000_card *card)
"act2000_isa_receive: Invalid CAPI msg\n");
{
int i; __u8 *p; __u8 *t; __u8 tmp[30];
+
for (i = 0, p = (__u8 *)&card->idat.isa.rcvhdr, t = tmp; i < 8; i++)
t += sprintf(t, "%02x ", *(p++));
printk(KERN_WARNING "act2000_isa_receive: %s\n", tmp);
diff --git a/drivers/staging/i4l/act2000/capi.c b/drivers/staging/i4l/act2000/capi.c
index 62f56294853c..61386a78fb91 100644
--- a/drivers/staging/i4l/act2000/capi.c
+++ b/drivers/staging/i4l/act2000/capi.c
@@ -99,7 +99,7 @@ actcapi_chkhdr(act2000_card *card, actcapi_msghdr *hdr)
for (i = 0; i < num_valid_imsg; i++)
if ((hdr->cmd.cmd == valid_msg[i].cmd.cmd) &&
(hdr->cmd.subcmd == valid_msg[i].cmd.subcmd)) {
- return (i ? 1 : 2);
+ return i ? 1 : 2;
}
return 0;
}
@@ -506,6 +506,7 @@ static int
new_plci(act2000_card *card, __u16 plci)
{
int i;
+
for (i = 0; i < ACT2000_BCH; i++)
if (card->bch[i].plci == 0x8000) {
card->bch[i].plci = plci;
@@ -518,6 +519,7 @@ static int
find_plci(act2000_card *card, __u16 plci)
{
int i;
+
for (i = 0; i < ACT2000_BCH; i++)
if (card->bch[i].plci == plci)
return i;
@@ -528,6 +530,7 @@ static int
find_ncci(act2000_card *card, __u16 ncci)
{
int i;
+
for (i = 0; i < ACT2000_BCH; i++)
if (card->bch[i].ncci == ncci)
return i;
@@ -538,6 +541,7 @@ static int
find_dialing(act2000_card *card, __u16 callref)
{
int i;
+
for (i = 0; i < ACT2000_BCH; i++)
if ((card->bch[i].callref == callref) &&
(card->bch[i].fsm_state == ACT2000_STATE_OCALL))
@@ -1088,6 +1092,7 @@ actcapi_debug_msg(struct sk_buff *skb, int direction)
int l = msg->hdr.len - 12;
int j;
char *p = tmp;
+
for (j = 0; j < l; j++)
p += sprintf(p, "%02x ", msg->msg.info_ind.el.display[j]);
printk(KERN_DEBUG " D = '%s'\n", tmp);
diff --git a/drivers/staging/i4l/act2000/module.c b/drivers/staging/i4l/act2000/module.c
index 99c9c0a1c63e..6aa120319e52 100644
--- a/drivers/staging/i4l/act2000/module.c
+++ b/drivers/staging/i4l/act2000/module.c
@@ -19,8 +19,7 @@
#include <linux/slab.h>
#include <linux/init.h>
-static unsigned short act2000_isa_ports[] =
-{
+static unsigned short act2000_isa_ports[] = {
0x0200, 0x0240, 0x0280, 0x02c0, 0x0300, 0x0340, 0x0380,
0xcfe0, 0xcfa0, 0xcf60, 0xcf20, 0xcee0, 0xcea0, 0xce60,
};
@@ -95,7 +94,7 @@ act2000_find_msn(act2000_card *card, char *msn, int ia5)
p = p->next;
}
if (!ia5)
- return (1 << (eaz - '0'));
+ return 1 << (eaz - '0');
else
return eaz;
}
@@ -111,10 +110,10 @@ act2000_find_eaz(act2000_card *card, char eaz)
while (p) {
if (p->eaz == eaz)
- return (p->msn);
+ return p->msn;
p = p->next;
}
- return ("\0");
+ return "\0";
}
/*
@@ -293,7 +292,7 @@ act2000_command(act2000_card *card, isdn_ctrl *c)
if (ret)
return ret;
if (card->flags & ACT2000_FLAGS_RUNNING)
- return (actcapi_manufacturer_req_msn(card));
+ return actcapi_manufacturer_req_msn(card);
return 0;
case ACT2000_IOCTL_ADDCARD:
if (copy_from_user(&cdef, arg,
@@ -377,6 +376,7 @@ act2000_command(act2000_card *card, isdn_ctrl *c)
}
if (card->ptype == ISDN_PTYPE_1TR6) {
int i;
+
chan->eazmask = 0;
for (i = 0; i < strlen(c->parm.num); i++)
if (isdigit(c->parm.num[i]))
@@ -512,7 +512,7 @@ if_command(isdn_ctrl *c)
act2000_card *card = act2000_findcard(c->driver);
if (card)
- return (act2000_command(card, c));
+ return act2000_command(card, c);
printk(KERN_ERR
"act2000: if_command %d called with invalid driverId %d!\n",
c->command, c->driver);
@@ -527,7 +527,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel)
if (card) {
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
- return (len);
+ return len;
}
printk(KERN_ERR
"act2000: if_writecmd called with invalid driverId!\n");
@@ -542,7 +542,7 @@ if_readstatus(u_char __user *buf, int len, int id, int channel)
if (card) {
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
- return (act2000_readstatus(buf, len, card));
+ return act2000_readstatus(buf, len, card);
}
printk(KERN_ERR
"act2000: if_readstatus called with invalid driverId!\n");
@@ -557,7 +557,7 @@ if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
if (card) {
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
- return (act2000_sendbuf(card, channel, ack, skb));
+ return act2000_sendbuf(card, channel, ack, skb);
}
printk(KERN_ERR
"act2000: if_sendbuf called with invalid driverId!\n");
@@ -574,6 +574,7 @@ act2000_alloccard(int bus, int port, int irq, char *id)
{
int i;
act2000_card *card;
+
if (!(card = kzalloc(sizeof(act2000_card), GFP_KERNEL))) {
printk(KERN_WARNING
"act2000: (%s) Could not allocate card-struct.\n", id);
@@ -776,7 +777,7 @@ act2000_addcard(int bus, int port, int irq, char *id)
failed++;
}
}
- return (added - failed);
+ return added - failed;
}
#define DRIVERNAME "IBM Active 2000 ISDN driver"
@@ -795,6 +796,7 @@ static void __exit act2000_exit(void)
{
act2000_card *card = cards;
act2000_card *last;
+
while (card) {
unregister_card(card);
del_timer_sync(&card->ptimer);
diff --git a/drivers/staging/i4l/icn/icn.c b/drivers/staging/i4l/icn/icn.c
index 514bfc2c5b53..3750ba38adc5 100644
--- a/drivers/staging/i4l/icn/icn.c
+++ b/drivers/staging/i4l/icn/icn.c
@@ -411,8 +411,7 @@ typedef struct icn_stat {
int action;
} icn_stat;
/* *INDENT-OFF* */
-static icn_stat icn_stat_table[] =
-{
+static icn_stat icn_stat_table[] = {
{"BCON_", ISDN_STAT_BCONN, 1}, /* B-Channel connected */
{"BDIS_", ISDN_STAT_BHUP, 2}, /* B-Channel disconnected */
/*
diff --git a/drivers/staging/i4l/icn/icn.h b/drivers/staging/i4l/icn/icn.h
index f8f2e76d34bf..07e2e0196527 100644
--- a/drivers/staging/i4l/icn/icn.h
+++ b/drivers/staging/i4l/icn/icn.h
@@ -54,7 +54,7 @@ typedef struct icn_cdef {
/* some useful macros for debugging */
#ifdef ICN_DEBUG_PORT
-#define OUTB_P(v, p) {printk(KERN_DEBUG "icn: outb_p(0x%02x,0x%03x)\n", v, p); outb_p(v, p);}
+#define OUTB_P(v, p) {pr_debug("icn: outb_p(0x%02x,0x%03x)\n", v, p); outb_p(v, p);}
#else
#define OUTB_P outb
#endif
@@ -186,8 +186,7 @@ typedef icn_dev *icn_devptr;
#ifdef __KERNEL__
static icn_card *cards = (icn_card *) 0;
-static u_char chan2bank[] =
-{0, 4, 8, 12}; /* for icn_map_channel() */
+static u_char chan2bank[] = {0, 4, 8, 12}; /* for icn_map_channel() */
static icn_dev dev;
diff --git a/drivers/staging/i4l/pcbit/callbacks.c b/drivers/staging/i4l/pcbit/callbacks.c
index efb6d6a3639a..212ab0b229d4 100644
--- a/drivers/staging/i4l/pcbit/callbacks.c
+++ b/drivers/staging/i4l/pcbit/callbacks.c
@@ -22,7 +22,7 @@
#include <linux/mm.h>
#include <linux/skbuff.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/isdnif.h>
diff --git a/drivers/staging/i4l/pcbit/capi.c b/drivers/staging/i4l/pcbit/capi.c
index 373f90feda5a..a6c4e00dc726 100644
--- a/drivers/staging/i4l/pcbit/capi.c
+++ b/drivers/staging/i4l/pcbit/capi.c
@@ -27,7 +27,6 @@
* encode our number in CallerPN and ConnectedPN
*/
-#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -36,8 +35,8 @@
#include <linux/skbuff.h>
-#include <asm/io.h>
-#include <asm/string.h>
+#include <linux/io.h>
+#include <linux/string.h>
#include <linux/isdnif.h>
diff --git a/drivers/staging/i4l/pcbit/drv.c b/drivers/staging/i4l/pcbit/drv.c
index d417df5efb5f..89b0b5b94ce5 100644
--- a/drivers/staging/i4l/pcbit/drv.c
+++ b/drivers/staging/i4l/pcbit/drv.c
@@ -27,12 +27,11 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
-#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/isdnif.h>
-#include <asm/string.h>
-#include <asm/io.h>
+#include <linux/string.h>
+#include <linux/io.h>
#include <linux/ioport.h>
#include "pcbit.h"
diff --git a/drivers/staging/i4l/pcbit/edss1.c b/drivers/staging/i4l/pcbit/edss1.c
index 6d291d548423..5980d1b5da95 100644
--- a/drivers/staging/i4l/pcbit/edss1.c
+++ b/drivers/staging/i4l/pcbit/edss1.c
@@ -23,7 +23,7 @@
#include <linux/skbuff.h>
#include <linux/timer.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/isdnif.h>
diff --git a/drivers/staging/i4l/pcbit/layer2.c b/drivers/staging/i4l/pcbit/layer2.c
index a136c72547e5..0592bf6ee9c9 100644
--- a/drivers/staging/i4l/pcbit/layer2.c
+++ b/drivers/staging/i4l/pcbit/layer2.c
@@ -36,7 +36,7 @@
#include <linux/isdnif.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "pcbit.h"
diff --git a/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583 b/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583
deleted file mode 100644
index 470f7ad9c073..000000000000
--- a/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583
+++ /dev/null
@@ -1,6 +0,0 @@
-What: /sys/bus/iio/devices/device[n]/in_illuminance0_calibrate
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property causes an internal calibration of the als gain trim
- value which is later used in calculating illuminance in lux.
diff --git a/drivers/staging/iio/TODO b/drivers/staging/iio/TODO
index 93a896883e37..4922402e2e98 100644
--- a/drivers/staging/iio/TODO
+++ b/drivers/staging/iio/TODO
@@ -1,76 +1,8 @@
-2009 8/18
-
-Core:
-1) Get reviews
-2) Additional testing
-3) Ensure all desirable features present by adding more devices.
- Major changes not expected except in response to comments
-
-Max1363 core:
-1) Possibly add sysfs exports of constant useful to userspace.
-Would be nice
-2) Support hardware generated interrupts
-3) Expand device set. Lots of other maxim adc's have very
- similar interfaces.
-
-MXS LRADC driver:
-This is a classic MFD device as it combines the following subdevices
- - touchscreen controller (input subsystem related device)
- - general purpose ADC channels
- - battery voltage monitor (power subsystem related device)
- - die temperature monitor (thermal management)
-
-At least the battery voltage and die temperature feature is required in-kernel
-by a driver of the SoC's battery charging unit to avoid any damage to the
-silicon and the battery.
-
-TSL2561
-Would be nice
-1) Open question of userspace vs kernel space balance when
-converting to useful light measurements from device ones.
-2) Add sysfs elements necessary to allow device agnostic
-unit conversion.
-
-LIS3L02DQ core
-
-LIS3L02DQ ring
-
-KXSD9
-Currently minimal driver, would be nice to add:
-1) Support for all chip generated interrupts (events),
-basically get support up to level of lis3l02dq driver.
-
-Ring buffer core
-
-SCA3000
-Would be nice
-1) Testing on devices other than sca3000-e05
-
-Trigger core support
-1) Discussion of approach. Is it general enough?
-
-Ring Buffer:
-1) Discussion of approach.
-There are probably better ways of doing this. The
-intention is to allow for more than one software ring
-buffer implementation as different users will have
-different requirements. This one suits mid range
-frequencies (100Hz - 4kHz).
-2) Lots of testing
-
-GPIO trigger
-1) Add control over the type of interrupt etc. This will
-necessitate a header that is also visible from arch board
-files. (avoided at the moment to keep the driver set
-contained in staging).
+2016 10/09
ADI Drivers:
CC the device-drivers-devel@blackfin.uclinux.org mailing list when
e-mailing the normal IIO list (see below).
-Documentation
-1) Lots of cleanup and expansion.
-2) Some device require individual docs.
-
Contact: Jonathan Cameron <jic23@kernel.org>.
Mailing list: linux-iio@vger.kernel.org
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index 1c994b57c7d2..c6b0f5eae7ab 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -51,14 +51,4 @@ config ADIS16240
To compile this driver as a module, say M here: the module will be
called adis16240.
-config SCA3000
- depends on IIO_BUFFER
- depends on SPI
- tristate "VTI SCA3000 series accelerometers"
- help
- Say Y here to build support for the VTI SCA3000 series of SPI
- accelerometers. These devices use a hardware ring buffer.
-
- To compile this driver as a module, say M here: the module will be
- called sca3000.
endmenu
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index 1810a434a755..febb137b60c4 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -13,6 +13,3 @@ obj-$(CONFIG_ADIS16209) += adis16209.o
adis16240-y := adis16240_core.o
obj-$(CONFIG_ADIS16240) += adis16240.o
-
-sca3000-y := sca3000_core.o sca3000_ring.o
-obj-$(CONFIG_SCA3000) += sca3000.o
diff --git a/drivers/staging/iio/accel/sca3000.h b/drivers/staging/iio/accel/sca3000.h
deleted file mode 100644
index 4dcc8575cbe3..000000000000
--- a/drivers/staging/iio/accel/sca3000.h
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * sca3000.c -- support VTI sca3000 series accelerometers
- * via SPI
- *
- * Copyright (c) 2007 Jonathan Cameron <jic23@kernel.org>
- *
- * Partly based upon tle62x0.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Initial mode is direct measurement.
- *
- * Untested things
- *
- * Temperature reading (the e05 I'm testing with doesn't have a sensor)
- *
- * Free fall detection mode - supported but untested as I'm not droping my
- * dubious wire rig far enough to test it.
- *
- * Unsupported as yet
- *
- * Time stamping of data from ring. Various ideas on how to do this but none
- * are remotely simple. Suggestions welcome.
- *
- * Individual enabling disabling of channels going into ring buffer
- *
- * Overflow handling (this is signaled for all but 8 bit ring buffer mode.)
- *
- * Motion detector using AND combinations of signals.
- *
- * Note: Be very careful about not touching an register bytes marked
- * as reserved on the data sheet. They really mean it as changing convents of
- * some will cause the device to lock up.
- *
- * Known issues - on rare occasions the interrupts lock up. Not sure why as yet.
- * Can probably alleviate this by reading the interrupt register on start, but
- * that is really just brushing the problem under the carpet.
- */
-#ifndef _SCA3000
-#define _SCA3000
-
-#define SCA3000_WRITE_REG(a) (((a) << 2) | 0x02)
-#define SCA3000_READ_REG(a) ((a) << 2)
-
-#define SCA3000_REG_ADDR_REVID 0x00
-#define SCA3000_REVID_MAJOR_MASK 0xf0
-#define SCA3000_REVID_MINOR_MASK 0x0f
-
-#define SCA3000_REG_ADDR_STATUS 0x02
-#define SCA3000_LOCKED 0x20
-#define SCA3000_EEPROM_CS_ERROR 0x02
-#define SCA3000_SPI_FRAME_ERROR 0x01
-
-/* All reads done using register decrement so no need to directly access LSBs */
-#define SCA3000_REG_ADDR_X_MSB 0x05
-#define SCA3000_REG_ADDR_Y_MSB 0x07
-#define SCA3000_REG_ADDR_Z_MSB 0x09
-
-#define SCA3000_REG_ADDR_RING_OUT 0x0f
-
-/* Temp read untested - the e05 doesn't have the sensor */
-#define SCA3000_REG_ADDR_TEMP_MSB 0x13
-
-#define SCA3000_REG_ADDR_MODE 0x14
-#define SCA3000_MODE_PROT_MASK 0x28
-
-#define SCA3000_RING_BUF_ENABLE 0x80
-#define SCA3000_RING_BUF_8BIT 0x40
-/*
- * Free fall detection triggers an interrupt if the acceleration
- * is below a threshold for equivalent of 25cm drop
- */
-#define SCA3000_FREE_FALL_DETECT 0x10
-#define SCA3000_MEAS_MODE_NORMAL 0x00
-#define SCA3000_MEAS_MODE_OP_1 0x01
-#define SCA3000_MEAS_MODE_OP_2 0x02
-
-/*
- * In motion detection mode the accelerations are band pass filtered
- * (approx 1 - 25Hz) and then a programmable threshold used to trigger
- * and interrupt.
- */
-#define SCA3000_MEAS_MODE_MOT_DET 0x03
-
-#define SCA3000_REG_ADDR_BUF_COUNT 0x15
-
-#define SCA3000_REG_ADDR_INT_STATUS 0x16
-
-#define SCA3000_INT_STATUS_THREE_QUARTERS 0x80
-#define SCA3000_INT_STATUS_HALF 0x40
-
-#define SCA3000_INT_STATUS_FREE_FALL 0x08
-#define SCA3000_INT_STATUS_Y_TRIGGER 0x04
-#define SCA3000_INT_STATUS_X_TRIGGER 0x02
-#define SCA3000_INT_STATUS_Z_TRIGGER 0x01
-
-/* Used to allow access to multiplexed registers */
-#define SCA3000_REG_ADDR_CTRL_SEL 0x18
-/* Only available for SCA3000-D03 and SCA3000-D01 */
-#define SCA3000_REG_CTRL_SEL_I2C_DISABLE 0x01
-#define SCA3000_REG_CTRL_SEL_MD_CTRL 0x02
-#define SCA3000_REG_CTRL_SEL_MD_Y_TH 0x03
-#define SCA3000_REG_CTRL_SEL_MD_X_TH 0x04
-#define SCA3000_REG_CTRL_SEL_MD_Z_TH 0x05
-/*
- * BE VERY CAREFUL WITH THIS, IF 3 BITS ARE NOT SET the device
- * will not function
- */
-#define SCA3000_REG_CTRL_SEL_OUT_CTRL 0x0B
-#define SCA3000_OUT_CTRL_PROT_MASK 0xE0
-#define SCA3000_OUT_CTRL_BUF_X_EN 0x10
-#define SCA3000_OUT_CTRL_BUF_Y_EN 0x08
-#define SCA3000_OUT_CTRL_BUF_Z_EN 0x04
-#define SCA3000_OUT_CTRL_BUF_DIV_MASK 0x03
-#define SCA3000_OUT_CTRL_BUF_DIV_4 0x02
-#define SCA3000_OUT_CTRL_BUF_DIV_2 0x01
-
-/*
- * Control which motion detector interrupts are on.
- * For now only OR combinations are supported.
- */
-#define SCA3000_MD_CTRL_PROT_MASK 0xC0
-#define SCA3000_MD_CTRL_OR_Y 0x01
-#define SCA3000_MD_CTRL_OR_X 0x02
-#define SCA3000_MD_CTRL_OR_Z 0x04
-/* Currently unsupported */
-#define SCA3000_MD_CTRL_AND_Y 0x08
-#define SCA3000_MD_CTRL_AND_X 0x10
-#define SAC3000_MD_CTRL_AND_Z 0x20
-
-/*
- * Some control registers of complex access methods requiring this register to
- * be used to remove a lock.
- */
-#define SCA3000_REG_ADDR_UNLOCK 0x1e
-
-#define SCA3000_REG_ADDR_INT_MASK 0x21
-#define SCA3000_INT_MASK_PROT_MASK 0x1C
-
-#define SCA3000_INT_MASK_RING_THREE_QUARTER 0x80
-#define SCA3000_INT_MASK_RING_HALF 0x40
-
-#define SCA3000_INT_MASK_ALL_INTS 0x02
-#define SCA3000_INT_MASK_ACTIVE_HIGH 0x01
-#define SCA3000_INT_MASK_ACTIVE_LOW 0x00
-
-/* Values of multiplexed registers (write to ctrl_data after select) */
-#define SCA3000_REG_ADDR_CTRL_DATA 0x22
-
-/*
- * Measurement modes available on some sca3000 series chips. Code assumes others
- * may become available in the future.
- *
- * Bypass - Bypass the low-pass filter in the signal channel so as to increase
- * signal bandwidth.
- *
- * Narrow - Narrow low-pass filtering of the signal channel and half output
- * data rate by decimation.
- *
- * Wide - Widen low-pass filtering of signal channel to increase bandwidth
- */
-#define SCA3000_OP_MODE_BYPASS 0x01
-#define SCA3000_OP_MODE_NARROW 0x02
-#define SCA3000_OP_MODE_WIDE 0x04
-#define SCA3000_MAX_TX 6
-#define SCA3000_MAX_RX 2
-
-/**
- * struct sca3000_state - device instance state information
- * @us: the associated spi device
- * @info: chip variant information
- * @interrupt_handler_ws: event interrupt handler for all events
- * @last_timestamp: the timestamp of the last event
- * @mo_det_use_count: reference counter for the motion detection unit
- * @lock: lock used to protect elements of sca3000_state
- * and the underlying device state.
- * @bpse: number of bits per scan element
- * @tx: dma-able transmit buffer
- * @rx: dma-able receive buffer
- **/
-struct sca3000_state {
- struct spi_device *us;
- const struct sca3000_chip_info *info;
- struct work_struct interrupt_handler_ws;
- s64 last_timestamp;
- int mo_det_use_count;
- struct mutex lock;
- int bpse;
- /* Can these share a cacheline ? */
- u8 rx[2] ____cacheline_aligned;
- u8 tx[6] ____cacheline_aligned;
-};
-
-/**
- * struct sca3000_chip_info - model dependent parameters
- * @scale: scale * 10^-6
- * @temp_output: some devices have temperature sensors.
- * @measurement_mode_freq: normal mode sampling frequency
- * @option_mode_1: first optional mode. Not all models have one
- * @option_mode_1_freq: option mode 1 sampling frequency
- * @option_mode_2: second optional mode. Not all chips have one
- * @option_mode_2_freq: option mode 2 sampling frequency
- *
- * This structure is used to hold information about the functionality of a given
- * sca3000 variant.
- **/
-struct sca3000_chip_info {
- unsigned int scale;
- bool temp_output;
- int measurement_mode_freq;
- int option_mode_1;
- int option_mode_1_freq;
- int option_mode_2;
- int option_mode_2_freq;
- int mot_det_mult_xz[6];
- int mot_det_mult_y[7];
-};
-
-int sca3000_read_data_short(struct sca3000_state *st,
- u8 reg_address_high,
- int len);
-
-/**
- * sca3000_write_reg() write a single register
- * @address: address of register on chip
- * @val: value to be written to register
- *
- * The main lock must be held.
- **/
-int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val);
-
-#ifdef CONFIG_IIO_BUFFER
-/**
- * sca3000_register_ring_funcs() setup the ring state change functions
- **/
-void sca3000_register_ring_funcs(struct iio_dev *indio_dev);
-
-/**
- * sca3000_configure_ring() - allocate and configure ring buffer
- * @indio_dev: iio-core device whose ring is to be configured
- *
- * The hardware ring buffer needs far fewer ring buffer functions than
- * a software one as a lot of things are handled automatically.
- * This function also tells the iio core that our device supports a
- * hardware ring buffer mode.
- **/
-int sca3000_configure_ring(struct iio_dev *indio_dev);
-
-/**
- * sca3000_unconfigure_ring() - deallocate the ring buffer
- * @indio_dev: iio-core device whose ring we are freeing
- **/
-void sca3000_unconfigure_ring(struct iio_dev *indio_dev);
-
-/**
- * sca3000_ring_int_process() handles ring related event pushing and escalation
- * @val: the event code
- **/
-void sca3000_ring_int_process(u8 val, struct iio_buffer *ring);
-
-#else
-static inline void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
-{
-}
-
-static inline
-int sca3000_register_ring_access_and_init(struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-static inline void sca3000_ring_int_process(u8 val, void *ring)
-{
-}
-
-#endif
-#endif /* _SCA3000 */
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
deleted file mode 100644
index 564b36d4f648..000000000000
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ /dev/null
@@ -1,1210 +0,0 @@
-/*
- * sca3000_core.c -- support VTI sca3000 series accelerometers via SPI
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
- *
- * See industrialio/accels/sca3000.h for comments.
- */
-
-#include <linux/interrupt.h>
-#include <linux/fs.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/events.h>
-#include <linux/iio/buffer.h>
-
-#include "sca3000.h"
-
-enum sca3000_variant {
- d01,
- e02,
- e04,
- e05,
-};
-
-/*
- * Note where option modes are not defined, the chip simply does not
- * support any.
- * Other chips in the sca3000 series use i2c and are not included here.
- *
- * Some of these devices are only listed in the family data sheet and
- * do not actually appear to be available.
- */
-static const struct sca3000_chip_info sca3000_spi_chip_info_tbl[] = {
- [d01] = {
- .scale = 7357,
- .temp_output = true,
- .measurement_mode_freq = 250,
- .option_mode_1 = SCA3000_OP_MODE_BYPASS,
- .option_mode_1_freq = 250,
- .mot_det_mult_xz = {50, 100, 200, 350, 650, 1300},
- .mot_det_mult_y = {50, 100, 150, 250, 450, 850, 1750},
- },
- [e02] = {
- .scale = 9810,
- .measurement_mode_freq = 125,
- .option_mode_1 = SCA3000_OP_MODE_NARROW,
- .option_mode_1_freq = 63,
- .mot_det_mult_xz = {100, 150, 300, 550, 1050, 2050},
- .mot_det_mult_y = {50, 100, 200, 350, 700, 1350, 2700},
- },
- [e04] = {
- .scale = 19620,
- .measurement_mode_freq = 100,
- .option_mode_1 = SCA3000_OP_MODE_NARROW,
- .option_mode_1_freq = 50,
- .option_mode_2 = SCA3000_OP_MODE_WIDE,
- .option_mode_2_freq = 400,
- .mot_det_mult_xz = {200, 300, 600, 1100, 2100, 4100},
- .mot_det_mult_y = {100, 200, 400, 7000, 1400, 2700, 54000},
- },
- [e05] = {
- .scale = 61313,
- .measurement_mode_freq = 200,
- .option_mode_1 = SCA3000_OP_MODE_NARROW,
- .option_mode_1_freq = 50,
- .option_mode_2 = SCA3000_OP_MODE_WIDE,
- .option_mode_2_freq = 400,
- .mot_det_mult_xz = {600, 900, 1700, 3200, 6100, 11900},
- .mot_det_mult_y = {300, 600, 1200, 2000, 4100, 7800, 15600},
- },
-};
-
-int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val)
-{
- st->tx[0] = SCA3000_WRITE_REG(address);
- st->tx[1] = val;
- return spi_write(st->us, st->tx, 2);
-}
-
-int sca3000_read_data_short(struct sca3000_state *st,
- u8 reg_address_high,
- int len)
-{
- struct spi_transfer xfer[2] = {
- {
- .len = 1,
- .tx_buf = st->tx,
- }, {
- .len = len,
- .rx_buf = st->rx,
- }
- };
- st->tx[0] = SCA3000_READ_REG(reg_address_high);
-
- return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
-}
-
-/**
- * sca3000_reg_lock_on() test if the ctrl register lock is on
- *
- * Lock must be held.
- **/
-static int sca3000_reg_lock_on(struct sca3000_state *st)
-{
- int ret;
-
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_STATUS, 1);
- if (ret < 0)
- return ret;
-
- return !(st->rx[0] & SCA3000_LOCKED);
-}
-
-/**
- * __sca3000_unlock_reg_lock() unlock the control registers
- *
- * Note the device does not appear to support doing this in a single transfer.
- * This should only ever be used as part of ctrl reg read.
- * Lock must be held before calling this
- **/
-static int __sca3000_unlock_reg_lock(struct sca3000_state *st)
-{
- struct spi_transfer xfer[3] = {
- {
- .len = 2,
- .cs_change = 1,
- .tx_buf = st->tx,
- }, {
- .len = 2,
- .cs_change = 1,
- .tx_buf = st->tx + 2,
- }, {
- .len = 2,
- .tx_buf = st->tx + 4,
- },
- };
- st->tx[0] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
- st->tx[1] = 0x00;
- st->tx[2] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
- st->tx[3] = 0x50;
- st->tx[4] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
- st->tx[5] = 0xA0;
-
- return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
-}
-
-/**
- * sca3000_write_ctrl_reg() write to a lock protect ctrl register
- * @sel: selects which registers we wish to write to
- * @val: the value to be written
- *
- * Certain control registers are protected against overwriting by the lock
- * register and use a shared write address. This function allows writing of
- * these registers.
- * Lock must be held.
- **/
-static int sca3000_write_ctrl_reg(struct sca3000_state *st,
- u8 sel,
- uint8_t val)
-{
- int ret;
-
- ret = sca3000_reg_lock_on(st);
- if (ret < 0)
- goto error_ret;
- if (ret) {
- ret = __sca3000_unlock_reg_lock(st);
- if (ret)
- goto error_ret;
- }
-
- /* Set the control select register */
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_SEL, sel);
- if (ret)
- goto error_ret;
-
- /* Write the actual value into the register */
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_DATA, val);
-
-error_ret:
- return ret;
-}
-
-/**
- * sca3000_read_ctrl_reg() read from lock protected control register.
- *
- * Lock must be held.
- **/
-static int sca3000_read_ctrl_reg(struct sca3000_state *st,
- u8 ctrl_reg)
-{
- int ret;
-
- ret = sca3000_reg_lock_on(st);
- if (ret < 0)
- goto error_ret;
- if (ret) {
- ret = __sca3000_unlock_reg_lock(st);
- if (ret)
- goto error_ret;
- }
- /* Set the control select register */
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_SEL, ctrl_reg);
- if (ret)
- goto error_ret;
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_CTRL_DATA, 1);
- if (ret)
- goto error_ret;
- return st->rx[0];
-error_ret:
- return ret;
-}
-
-/**
- * sca3000_show_rev() - sysfs interface to read the chip revision number
- **/
-static ssize_t sca3000_show_rev(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int len = 0, ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_REVID, 1);
- if (ret < 0)
- goto error_ret;
- len += sprintf(buf + len,
- "major=%d, minor=%d\n",
- st->rx[0] & SCA3000_REVID_MAJOR_MASK,
- st->rx[0] & SCA3000_REVID_MINOR_MASK);
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
-/**
- * sca3000_show_available_measurement_modes() display available modes
- *
- * This is all read from chip specific data in the driver. Not all
- * of the sca3000 series support modes other than normal.
- **/
-static ssize_t
-sca3000_show_available_measurement_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int len = 0;
-
- len += sprintf(buf + len, "0 - normal mode");
- switch (st->info->option_mode_1) {
- case SCA3000_OP_MODE_NARROW:
- len += sprintf(buf + len, ", 1 - narrow mode");
- break;
- case SCA3000_OP_MODE_BYPASS:
- len += sprintf(buf + len, ", 1 - bypass mode");
- break;
- }
- switch (st->info->option_mode_2) {
- case SCA3000_OP_MODE_WIDE:
- len += sprintf(buf + len, ", 2 - wide mode");
- break;
- }
- /* always supported */
- len += sprintf(buf + len, " 3 - motion detection\n");
-
- return len;
-}
-
-/**
- * sca3000_show_measurement_mode() sysfs read of current mode
- **/
-static ssize_t
-sca3000_show_measurement_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int len = 0, ret;
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
- /* mask bottom 2 bits - only ones that are relevant */
- st->rx[0] &= 0x03;
- switch (st->rx[0]) {
- case SCA3000_MEAS_MODE_NORMAL:
- len += sprintf(buf + len, "0 - normal mode\n");
- break;
- case SCA3000_MEAS_MODE_MOT_DET:
- len += sprintf(buf + len, "3 - motion detection\n");
- break;
- case SCA3000_MEAS_MODE_OP_1:
- switch (st->info->option_mode_1) {
- case SCA3000_OP_MODE_NARROW:
- len += sprintf(buf + len, "1 - narrow mode\n");
- break;
- case SCA3000_OP_MODE_BYPASS:
- len += sprintf(buf + len, "1 - bypass mode\n");
- break;
- }
- break;
- case SCA3000_MEAS_MODE_OP_2:
- switch (st->info->option_mode_2) {
- case SCA3000_OP_MODE_WIDE:
- len += sprintf(buf + len, "2 - wide mode\n");
- break;
- }
- break;
- }
-
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
-/**
- * sca3000_store_measurement_mode() set the current mode
- **/
-static ssize_t
-sca3000_store_measurement_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret;
- u8 mask = 0x03;
- u8 val;
-
- mutex_lock(&st->lock);
- ret = kstrtou8(buf, 10, &val);
- if (ret)
- goto error_ret;
- if (val > 3) {
- ret = -EINVAL;
- goto error_ret;
- }
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
- st->rx[0] &= ~mask;
- st->rx[0] |= (val & mask);
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE, st->rx[0]);
- if (ret)
- goto error_ret;
- mutex_unlock(&st->lock);
-
- return len;
-
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret;
-}
-
-/*
- * Not even vaguely standard attributes so defined here rather than
- * in the relevant IIO core headers
- */
-static IIO_DEVICE_ATTR(measurement_mode_available, S_IRUGO,
- sca3000_show_available_measurement_modes,
- NULL, 0);
-
-static IIO_DEVICE_ATTR(measurement_mode, S_IRUGO | S_IWUSR,
- sca3000_show_measurement_mode,
- sca3000_store_measurement_mode,
- 0);
-
-/* More standard attributes */
-
-static IIO_DEVICE_ATTR(revision, S_IRUGO, sca3000_show_rev, NULL, 0);
-
-static const struct iio_event_spec sca3000_event = {
- .type = IIO_EV_TYPE_MAG,
- .dir = IIO_EV_DIR_RISING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
-};
-
-#define SCA3000_CHAN(index, mod) \
- { \
- .type = IIO_ACCEL, \
- .modified = 1, \
- .channel2 = mod, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
- .address = index, \
- .scan_index = index, \
- .scan_type = { \
- .sign = 's', \
- .realbits = 11, \
- .storagebits = 16, \
- .shift = 5, \
- }, \
- .event_spec = &sca3000_event, \
- .num_event_specs = 1, \
- }
-
-static const struct iio_chan_spec sca3000_channels[] = {
- SCA3000_CHAN(0, IIO_MOD_X),
- SCA3000_CHAN(1, IIO_MOD_Y),
- SCA3000_CHAN(2, IIO_MOD_Z),
-};
-
-static const struct iio_chan_spec sca3000_channels_with_temp[] = {
- SCA3000_CHAN(0, IIO_MOD_X),
- SCA3000_CHAN(1, IIO_MOD_Y),
- SCA3000_CHAN(2, IIO_MOD_Z),
- {
- .type = IIO_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_OFFSET),
- /* No buffer support */
- .scan_index = -1,
- },
-};
-
-static u8 sca3000_addresses[3][3] = {
- [0] = {SCA3000_REG_ADDR_X_MSB, SCA3000_REG_CTRL_SEL_MD_X_TH,
- SCA3000_MD_CTRL_OR_X},
- [1] = {SCA3000_REG_ADDR_Y_MSB, SCA3000_REG_CTRL_SEL_MD_Y_TH,
- SCA3000_MD_CTRL_OR_Y},
- [2] = {SCA3000_REG_ADDR_Z_MSB, SCA3000_REG_CTRL_SEL_MD_Z_TH,
- SCA3000_MD_CTRL_OR_Z},
-};
-
-/**
- * __sca3000_get_base_freq() obtain mode specific base frequency
- *
- * lock must be held
- **/
-static inline int __sca3000_get_base_freq(struct sca3000_state *st,
- const struct sca3000_chip_info *info,
- int *base_freq)
-{
- int ret;
-
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
- switch (0x03 & st->rx[0]) {
- case SCA3000_MEAS_MODE_NORMAL:
- *base_freq = info->measurement_mode_freq;
- break;
- case SCA3000_MEAS_MODE_OP_1:
- *base_freq = info->option_mode_1_freq;
- break;
- case SCA3000_MEAS_MODE_OP_2:
- *base_freq = info->option_mode_2_freq;
- break;
- default:
- ret = -EINVAL;
- }
-error_ret:
- return ret;
-}
-
-/**
- * read_raw handler for IIO_CHAN_INFO_SAMP_FREQ
- *
- * lock must be held
- **/
-static int read_raw_samp_freq(struct sca3000_state *st, int *val)
-{
- int ret;
-
- ret = __sca3000_get_base_freq(st, st->info, val);
- if (ret)
- return ret;
-
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
- if (ret < 0)
- return ret;
-
- if (*val > 0) {
- ret &= SCA3000_OUT_CTRL_BUF_DIV_MASK;
- switch (ret) {
- case SCA3000_OUT_CTRL_BUF_DIV_2:
- *val /= 2;
- break;
- case SCA3000_OUT_CTRL_BUF_DIV_4:
- *val /= 4;
- break;
- }
- }
-
- return 0;
-}
-
-/**
- * write_raw handler for IIO_CHAN_INFO_SAMP_FREQ
- *
- * lock must be held
- **/
-static int write_raw_samp_freq(struct sca3000_state *st, int val)
-{
- int ret, base_freq, ctrlval;
-
- ret = __sca3000_get_base_freq(st, st->info, &base_freq);
- if (ret)
- return ret;
-
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
- if (ret < 0)
- return ret;
-
- ctrlval = ret & ~SCA3000_OUT_CTRL_BUF_DIV_MASK;
-
- if (val == base_freq / 2)
- ctrlval |= SCA3000_OUT_CTRL_BUF_DIV_2;
- if (val == base_freq / 4)
- ctrlval |= SCA3000_OUT_CTRL_BUF_DIV_4;
- else if (val != base_freq)
- return -EINVAL;
-
- return sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
- ctrlval);
-}
-
-static int sca3000_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long mask)
-{
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret;
- u8 address;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- mutex_lock(&st->lock);
- if (chan->type == IIO_ACCEL) {
- if (st->mo_det_use_count) {
- mutex_unlock(&st->lock);
- return -EBUSY;
- }
- address = sca3000_addresses[chan->address][0];
- ret = sca3000_read_data_short(st, address, 2);
- if (ret < 0) {
- mutex_unlock(&st->lock);
- return ret;
- }
- *val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF;
- *val = ((*val) << (sizeof(*val) * 8 - 13)) >>
- (sizeof(*val) * 8 - 13);
- } else {
- /* get the temperature when available */
- ret = sca3000_read_data_short(st,
- SCA3000_REG_ADDR_TEMP_MSB,
- 2);
- if (ret < 0) {
- mutex_unlock(&st->lock);
- return ret;
- }
- *val = ((st->rx[0] & 0x3F) << 3) |
- ((st->rx[1] & 0xE0) >> 5);
- }
- mutex_unlock(&st->lock);
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_SCALE:
- *val = 0;
- if (chan->type == IIO_ACCEL)
- *val2 = st->info->scale;
- else /* temperature */
- *val2 = 555556;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_CHAN_INFO_OFFSET:
- *val = -214;
- *val2 = 600000;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_CHAN_INFO_SAMP_FREQ:
- mutex_lock(&st->lock);
- ret = read_raw_samp_freq(st, val);
- mutex_unlock(&st->lock);
- return ret ? ret : IIO_VAL_INT;
- default:
- return -EINVAL;
- }
-}
-
-static int sca3000_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
-{
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret;
-
- switch (mask) {
- case IIO_CHAN_INFO_SAMP_FREQ:
- if (val2)
- return -EINVAL;
- mutex_lock(&st->lock);
- ret = write_raw_samp_freq(st, val);
- mutex_unlock(&st->lock);
- return ret;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-/**
- * sca3000_read_av_freq() sysfs function to get available frequencies
- *
- * The later modes are only relevant to the ring buffer - and depend on current
- * mode. Note that data sheet gives rather wide tolerances for these so integer
- * division will give good enough answer and not all chips have them specified
- * at all.
- **/
-static ssize_t sca3000_read_av_freq(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int len = 0, ret, val;
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- val = st->rx[0];
- mutex_unlock(&st->lock);
- if (ret)
- goto error_ret;
-
- switch (val & 0x03) {
- case SCA3000_MEAS_MODE_NORMAL:
- len += sprintf(buf + len, "%d %d %d\n",
- st->info->measurement_mode_freq,
- st->info->measurement_mode_freq / 2,
- st->info->measurement_mode_freq / 4);
- break;
- case SCA3000_MEAS_MODE_OP_1:
- len += sprintf(buf + len, "%d %d %d\n",
- st->info->option_mode_1_freq,
- st->info->option_mode_1_freq / 2,
- st->info->option_mode_1_freq / 4);
- break;
- case SCA3000_MEAS_MODE_OP_2:
- len += sprintf(buf + len, "%d %d %d\n",
- st->info->option_mode_2_freq,
- st->info->option_mode_2_freq / 2,
- st->info->option_mode_2_freq / 4);
- break;
- }
- return len;
-error_ret:
- return ret;
-}
-
-/*
- * Should only really be registered if ring buffer support is compiled in.
- * Does no harm however and doing it right would add a fair bit of complexity
- */
-static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sca3000_read_av_freq);
-
-/**
- * sca3000_read_thresh() - query of a threshold
- **/
-static int sca3000_read_thresh(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int *val, int *val2)
-{
- int ret, i;
- struct sca3000_state *st = iio_priv(indio_dev);
- int num = chan->channel2;
-
- mutex_lock(&st->lock);
- ret = sca3000_read_ctrl_reg(st, sca3000_addresses[num][1]);
- mutex_unlock(&st->lock);
- if (ret < 0)
- return ret;
- *val = 0;
- if (num == 1)
- for_each_set_bit(i, (unsigned long *)&ret,
- ARRAY_SIZE(st->info->mot_det_mult_y))
- *val += st->info->mot_det_mult_y[i];
- else
- for_each_set_bit(i, (unsigned long *)&ret,
- ARRAY_SIZE(st->info->mot_det_mult_xz))
- *val += st->info->mot_det_mult_xz[i];
-
- return IIO_VAL_INT;
-}
-
-/**
- * sca3000_write_thresh() control of threshold
- **/
-static int sca3000_write_thresh(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int val, int val2)
-{
- struct sca3000_state *st = iio_priv(indio_dev);
- int num = chan->channel2;
- int ret;
- int i;
- u8 nonlinear = 0;
-
- if (num == 1) {
- i = ARRAY_SIZE(st->info->mot_det_mult_y);
- while (i > 0)
- if (val >= st->info->mot_det_mult_y[--i]) {
- nonlinear |= (1 << i);
- val -= st->info->mot_det_mult_y[i];
- }
- } else {
- i = ARRAY_SIZE(st->info->mot_det_mult_xz);
- while (i > 0)
- if (val >= st->info->mot_det_mult_xz[--i]) {
- nonlinear |= (1 << i);
- val -= st->info->mot_det_mult_xz[i];
- }
- }
-
- mutex_lock(&st->lock);
- ret = sca3000_write_ctrl_reg(st, sca3000_addresses[num][1], nonlinear);
- mutex_unlock(&st->lock);
-
- return ret;
-}
-
-static struct attribute *sca3000_attributes[] = {
- &iio_dev_attr_revision.dev_attr.attr,
- &iio_dev_attr_measurement_mode_available.dev_attr.attr,
- &iio_dev_attr_measurement_mode.dev_attr.attr,
- &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group sca3000_attribute_group = {
- .attrs = sca3000_attributes,
-};
-
-/**
- * sca3000_event_handler() - handling ring and non ring events
- *
- * Ring related interrupt handler. Depending on event, push to
- * the ring buffer event chrdev or the event one.
- *
- * This function is complicated by the fact that the devices can signify ring
- * and non ring events via the same interrupt line and they can only
- * be distinguished via a read of the relevant status register.
- **/
-static irqreturn_t sca3000_event_handler(int irq, void *private)
-{
- struct iio_dev *indio_dev = private;
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret, val;
- s64 last_timestamp = iio_get_time_ns(indio_dev);
-
- /*
- * Could lead if badly timed to an extra read of status reg,
- * but ensures no interrupt is missed.
- */
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_STATUS, 1);
- val = st->rx[0];
- mutex_unlock(&st->lock);
- if (ret)
- goto done;
-
- sca3000_ring_int_process(val, indio_dev->buffer);
-
- if (val & SCA3000_INT_STATUS_FREE_FALL)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_X_AND_Y_AND_Z,
- IIO_EV_TYPE_MAG,
- IIO_EV_DIR_FALLING),
- last_timestamp);
-
- if (val & SCA3000_INT_STATUS_Y_TRIGGER)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Y,
- IIO_EV_TYPE_MAG,
- IIO_EV_DIR_RISING),
- last_timestamp);
-
- if (val & SCA3000_INT_STATUS_X_TRIGGER)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_X,
- IIO_EV_TYPE_MAG,
- IIO_EV_DIR_RISING),
- last_timestamp);
-
- if (val & SCA3000_INT_STATUS_Z_TRIGGER)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Z,
- IIO_EV_TYPE_MAG,
- IIO_EV_DIR_RISING),
- last_timestamp);
-
-done:
- return IRQ_HANDLED;
-}
-
-/**
- * sca3000_read_event_config() what events are enabled
- **/
-static int sca3000_read_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir)
-{
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret;
- u8 protect_mask = 0x03;
- int num = chan->channel2;
-
- /* read current value of mode register */
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
-
- if ((st->rx[0] & protect_mask) != SCA3000_MEAS_MODE_MOT_DET) {
- ret = 0;
- } else {
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
- if (ret < 0)
- goto error_ret;
- /* only supporting logical or's for now */
- ret = !!(ret & sca3000_addresses[num][2]);
- }
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret;
-}
-
-/**
- * sca3000_query_free_fall_mode() is free fall mode enabled
- **/
-static ssize_t sca3000_query_free_fall_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int val;
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- val = st->rx[0];
- mutex_unlock(&st->lock);
- if (ret < 0)
- return ret;
- return sprintf(buf, "%d\n", !!(val & SCA3000_FREE_FALL_DETECT));
-}
-
-/**
- * sca3000_set_free_fall_mode() simple on off control for free fall int
- *
- * In these chips the free fall detector should send an interrupt if
- * the device falls more than 25cm. This has not been tested due
- * to fragile wiring.
- **/
-static ssize_t sca3000_set_free_fall_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- u8 val;
- int ret;
- u8 protect_mask = SCA3000_FREE_FALL_DETECT;
-
- mutex_lock(&st->lock);
- ret = kstrtou8(buf, 10, &val);
- if (ret)
- goto error_ret;
-
- /* read current value of mode register */
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
-
- /* if off and should be on */
- if (val && !(st->rx[0] & protect_mask))
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
- (st->rx[0] | SCA3000_FREE_FALL_DETECT));
- /* if on and should be off */
- else if (!val && (st->rx[0] & protect_mask))
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
- (st->rx[0] & ~protect_mask));
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
-/**
- * sca3000_write_event_config() simple on off control for motion detector
- *
- * This is a per axis control, but enabling any will result in the
- * motion detector unit being enabled.
- * N.B. enabling motion detector stops normal data acquisition.
- * There is a complexity in knowing which mode to return to when
- * this mode is disabled. Currently normal mode is assumed.
- **/
-static int sca3000_write_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- int state)
-{
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret, ctrlval;
- u8 protect_mask = 0x03;
- int num = chan->channel2;
-
- mutex_lock(&st->lock);
- /*
- * First read the motion detector config to find out if
- * this axis is on
- */
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
- if (ret < 0)
- goto exit_point;
- ctrlval = ret;
- /* if off and should be on */
- if (state && !(ctrlval & sca3000_addresses[num][2])) {
- ret = sca3000_write_ctrl_reg(st,
- SCA3000_REG_CTRL_SEL_MD_CTRL,
- ctrlval |
- sca3000_addresses[num][2]);
- if (ret)
- goto exit_point;
- st->mo_det_use_count++;
- } else if (!state && (ctrlval & sca3000_addresses[num][2])) {
- ret = sca3000_write_ctrl_reg(st,
- SCA3000_REG_CTRL_SEL_MD_CTRL,
- ctrlval &
- ~(sca3000_addresses[num][2]));
- if (ret)
- goto exit_point;
- st->mo_det_use_count--;
- }
-
- /* read current value of mode register */
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto exit_point;
- /* if off and should be on */
- if ((st->mo_det_use_count) &&
- ((st->rx[0] & protect_mask) != SCA3000_MEAS_MODE_MOT_DET))
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
- (st->rx[0] & ~protect_mask)
- | SCA3000_MEAS_MODE_MOT_DET);
- /* if on and should be off */
- else if (!(st->mo_det_use_count) &&
- ((st->rx[0] & protect_mask) == SCA3000_MEAS_MODE_MOT_DET))
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
- (st->rx[0] & ~protect_mask));
-exit_point:
- mutex_unlock(&st->lock);
-
- return ret;
-}
-
-/* Free fall detector related event attribute */
-static IIO_DEVICE_ATTR_NAMED(accel_xayaz_mag_falling_en,
- in_accel_x & y & z_mag_falling_en,
- S_IRUGO | S_IWUSR,
- sca3000_query_free_fall_mode,
- sca3000_set_free_fall_mode,
- 0);
-
-static IIO_CONST_ATTR_NAMED(accel_xayaz_mag_falling_period,
- in_accel_x & y & z_mag_falling_period,
- "0.226");
-
-static struct attribute *sca3000_event_attributes[] = {
- &iio_dev_attr_accel_xayaz_mag_falling_en.dev_attr.attr,
- &iio_const_attr_accel_xayaz_mag_falling_period.dev_attr.attr,
- NULL,
-};
-
-static struct attribute_group sca3000_event_attribute_group = {
- .attrs = sca3000_event_attributes,
- .name = "events",
-};
-
-/**
- * sca3000_clean_setup() get the device into a predictable state
- *
- * Devices use flash memory to store many of the register values
- * and hence can come up in somewhat unpredictable states.
- * Hence reset everything on driver load.
- **/
-static int sca3000_clean_setup(struct sca3000_state *st)
-{
- int ret;
-
- mutex_lock(&st->lock);
- /* Ensure all interrupts have been acknowledged */
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_STATUS, 1);
- if (ret)
- goto error_ret;
-
- /* Turn off all motion detection channels */
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
- if (ret < 0)
- goto error_ret;
- ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL,
- ret & SCA3000_MD_CTRL_PROT_MASK);
- if (ret)
- goto error_ret;
-
- /* Disable ring buffer */
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
- if (ret < 0)
- goto error_ret;
- ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
- (ret & SCA3000_OUT_CTRL_PROT_MASK)
- | SCA3000_OUT_CTRL_BUF_X_EN
- | SCA3000_OUT_CTRL_BUF_Y_EN
- | SCA3000_OUT_CTRL_BUF_Z_EN
- | SCA3000_OUT_CTRL_BUF_DIV_4);
- if (ret)
- goto error_ret;
- /* Enable interrupts, relevant to mode and set up as active low */
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
- if (ret)
- goto error_ret;
- ret = sca3000_write_reg(st,
- SCA3000_REG_ADDR_INT_MASK,
- (ret & SCA3000_INT_MASK_PROT_MASK)
- | SCA3000_INT_MASK_ACTIVE_LOW);
- if (ret)
- goto error_ret;
- /*
- * Select normal measurement mode, free fall off, ring off
- * Ring in 12 bit mode - it is fine to overwrite reserved bits 3,5
- * as that occurs in one of the example on the datasheet
- */
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
- (st->rx[0] & SCA3000_MODE_PROT_MASK));
- st->bpse = 11;
-
-error_ret:
- mutex_unlock(&st->lock);
- return ret;
-}
-
-static const struct iio_info sca3000_info = {
- .attrs = &sca3000_attribute_group,
- .read_raw = &sca3000_read_raw,
- .write_raw = &sca3000_write_raw,
- .event_attrs = &sca3000_event_attribute_group,
- .read_event_value = &sca3000_read_thresh,
- .write_event_value = &sca3000_write_thresh,
- .read_event_config = &sca3000_read_event_config,
- .write_event_config = &sca3000_write_event_config,
- .driver_module = THIS_MODULE,
-};
-
-static int sca3000_probe(struct spi_device *spi)
-{
- int ret;
- struct sca3000_state *st;
- struct iio_dev *indio_dev;
-
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
-
- st = iio_priv(indio_dev);
- spi_set_drvdata(spi, indio_dev);
- st->us = spi;
- mutex_init(&st->lock);
- st->info = &sca3000_spi_chip_info_tbl[spi_get_device_id(spi)
- ->driver_data];
-
- indio_dev->dev.parent = &spi->dev;
- indio_dev->name = spi_get_device_id(spi)->name;
- indio_dev->info = &sca3000_info;
- if (st->info->temp_output) {
- indio_dev->channels = sca3000_channels_with_temp;
- indio_dev->num_channels =
- ARRAY_SIZE(sca3000_channels_with_temp);
- } else {
- indio_dev->channels = sca3000_channels;
- indio_dev->num_channels = ARRAY_SIZE(sca3000_channels);
- }
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- sca3000_configure_ring(indio_dev);
- ret = iio_device_register(indio_dev);
- if (ret < 0)
- return ret;
-
- if (spi->irq) {
- ret = request_threaded_irq(spi->irq,
- NULL,
- &sca3000_event_handler,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "sca3000",
- indio_dev);
- if (ret)
- goto error_unregister_dev;
- }
- sca3000_register_ring_funcs(indio_dev);
- ret = sca3000_clean_setup(st);
- if (ret)
- goto error_free_irq;
- return 0;
-
-error_free_irq:
- if (spi->irq)
- free_irq(spi->irq, indio_dev);
-error_unregister_dev:
- iio_device_unregister(indio_dev);
- return ret;
-}
-
-static int sca3000_stop_all_interrupts(struct sca3000_state *st)
-{
- int ret;
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
- if (ret)
- goto error_ret;
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_INT_MASK,
- (st->rx[0] &
- ~(SCA3000_INT_MASK_RING_THREE_QUARTER |
- SCA3000_INT_MASK_RING_HALF |
- SCA3000_INT_MASK_ALL_INTS)));
-error_ret:
- mutex_unlock(&st->lock);
- return ret;
-}
-
-static int sca3000_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct sca3000_state *st = iio_priv(indio_dev);
-
- /* Must ensure no interrupts can be generated after this! */
- sca3000_stop_all_interrupts(st);
- if (spi->irq)
- free_irq(spi->irq, indio_dev);
- iio_device_unregister(indio_dev);
- sca3000_unconfigure_ring(indio_dev);
-
- return 0;
-}
-
-static const struct spi_device_id sca3000_id[] = {
- {"sca3000_d01", d01},
- {"sca3000_e02", e02},
- {"sca3000_e04", e04},
- {"sca3000_e05", e05},
- {}
-};
-MODULE_DEVICE_TABLE(spi, sca3000_id);
-
-static struct spi_driver sca3000_driver = {
- .driver = {
- .name = "sca3000",
- },
- .probe = sca3000_probe,
- .remove = sca3000_remove,
- .id_table = sca3000_id,
-};
-module_spi_driver(sca3000_driver);
-
-MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
-MODULE_DESCRIPTION("VTI SCA3000 Series Accelerometers SPI driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
deleted file mode 100644
index d1cb9b9cf22b..000000000000
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * sca3000_ring.c -- support VTI sca3000 series accelerometers via SPI
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
- *
- */
-
-#include <linux/interrupt.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
-#include <linux/sched.h>
-#include <linux/poll.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include "../ring_hw.h"
-#include "sca3000.h"
-
-/* RFC / future work
- *
- * The internal ring buffer doesn't actually change what it holds depending
- * on which signals are enabled etc, merely whether you can read them.
- * As such the scan mode selection is somewhat different than for a software
- * ring buffer and changing it actually covers any data already in the buffer.
- * Currently scan elements aren't configured so it doesn't matter.
- */
-
-static int sca3000_read_data(struct sca3000_state *st,
- u8 reg_address_high,
- u8 **rx_p,
- int len)
-{
- int ret;
- struct spi_transfer xfer[2] = {
- {
- .len = 1,
- .tx_buf = st->tx,
- }, {
- .len = len,
- }
- };
- *rx_p = kmalloc(len, GFP_KERNEL);
- if (!*rx_p) {
- ret = -ENOMEM;
- goto error_ret;
- }
- xfer[1].rx_buf = *rx_p;
- st->tx[0] = SCA3000_READ_REG(reg_address_high);
- ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
- if (ret) {
- dev_err(get_device(&st->us->dev), "problem reading register");
- goto error_free_rx;
- }
-
- return 0;
-error_free_rx:
- kfree(*rx_p);
-error_ret:
- return ret;
-}
-
-/**
- * sca3000_read_first_n_hw_rb() - main ring access, pulls data from ring
- * @r: the ring
- * @count: number of samples to try and pull
- * @data: output the actual samples pulled from the hw ring
- *
- * Currently does not provide timestamps. As the hardware doesn't add them they
- * can only be inferred approximately from ring buffer events such as 50% full
- * and knowledge of when buffer was last emptied. This is left to userspace.
- **/
-static int sca3000_read_first_n_hw_rb(struct iio_buffer *r,
- size_t count, char __user *buf)
-{
- struct iio_hw_buffer *hw_ring = iio_to_hw_buf(r);
- struct iio_dev *indio_dev = hw_ring->private;
- struct sca3000_state *st = iio_priv(indio_dev);
- u8 *rx;
- int ret, i, num_available, num_read = 0;
- int bytes_per_sample = 1;
-
- if (st->bpse == 11)
- bytes_per_sample = 2;
-
- mutex_lock(&st->lock);
- if (count % bytes_per_sample) {
- ret = -EINVAL;
- goto error_ret;
- }
-
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_BUF_COUNT, 1);
- if (ret)
- goto error_ret;
- num_available = st->rx[0];
- /*
- * num_available is the total number of samples available
- * i.e. number of time points * number of channels.
- */
- if (count > num_available * bytes_per_sample)
- num_read = num_available * bytes_per_sample;
- else
- num_read = count;
-
- ret = sca3000_read_data(st,
- SCA3000_REG_ADDR_RING_OUT,
- &rx, num_read);
- if (ret)
- goto error_ret;
-
- for (i = 0; i < num_read / sizeof(u16); i++)
- *(((u16 *)rx) + i) = be16_to_cpup((__be16 *)rx + i);
-
- if (copy_to_user(buf, rx, num_read))
- ret = -EFAULT;
- kfree(rx);
- r->stufftoread = 0;
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret ? ret : num_read;
-}
-
-static size_t sca3000_ring_buf_data_available(struct iio_buffer *r)
-{
- return r->stufftoread ? r->watermark : 0;
-}
-
-/**
- * sca3000_query_ring_int() is the hardware ring status interrupt enabled
- **/
-static ssize_t sca3000_query_ring_int(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret, val;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
- val = st->rx[0];
- mutex_unlock(&st->lock);
- if (ret)
- return ret;
-
- return sprintf(buf, "%d\n", !!(val & this_attr->address));
-}
-
-/**
- * sca3000_set_ring_int() set state of ring status interrupt
- **/
-static ssize_t sca3000_set_ring_int(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- u8 val;
- int ret;
-
- mutex_lock(&st->lock);
- ret = kstrtou8(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
- if (ret)
- goto error_ret;
- if (val)
- ret = sca3000_write_reg(st,
- SCA3000_REG_ADDR_INT_MASK,
- st->rx[0] | this_attr->address);
- else
- ret = sca3000_write_reg(st,
- SCA3000_REG_ADDR_INT_MASK,
- st->rx[0] & ~this_attr->address);
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
-static IIO_DEVICE_ATTR(50_percent, S_IRUGO | S_IWUSR,
- sca3000_query_ring_int,
- sca3000_set_ring_int,
- SCA3000_INT_MASK_RING_HALF);
-
-static IIO_DEVICE_ATTR(75_percent, S_IRUGO | S_IWUSR,
- sca3000_query_ring_int,
- sca3000_set_ring_int,
- SCA3000_INT_MASK_RING_THREE_QUARTER);
-
-static ssize_t sca3000_show_buffer_scale(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
-
- return sprintf(buf, "0.%06d\n", 4 * st->info->scale);
-}
-
-static IIO_DEVICE_ATTR(in_accel_scale,
- S_IRUGO,
- sca3000_show_buffer_scale,
- NULL,
- 0);
-
-/*
- * Ring buffer attributes
- * This device is a bit unusual in that the sampling frequency and bpse
- * only apply to the ring buffer. At all times full rate and accuracy
- * is available via direct reading from registers.
- */
-static const struct attribute *sca3000_ring_attributes[] = {
- &iio_dev_attr_50_percent.dev_attr.attr,
- &iio_dev_attr_75_percent.dev_attr.attr,
- &iio_dev_attr_in_accel_scale.dev_attr.attr,
- NULL,
-};
-
-static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
-{
- struct iio_buffer *buf;
- struct iio_hw_buffer *ring;
-
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring)
- return NULL;
-
- ring->private = indio_dev;
- buf = &ring->buf;
- buf->stufftoread = 0;
- buf->length = 64;
- buf->attrs = sca3000_ring_attributes;
- iio_buffer_init(buf);
-
- return buf;
-}
-
-static void sca3000_ring_release(struct iio_buffer *r)
-{
- kfree(iio_to_hw_buf(r));
-}
-
-static const struct iio_buffer_access_funcs sca3000_ring_access_funcs = {
- .read_first_n = &sca3000_read_first_n_hw_rb,
- .data_available = sca3000_ring_buf_data_available,
- .release = sca3000_ring_release,
-
- .modes = INDIO_BUFFER_HARDWARE,
-};
-
-int sca3000_configure_ring(struct iio_dev *indio_dev)
-{
- struct iio_buffer *buffer;
-
- buffer = sca3000_rb_allocate(indio_dev);
- if (!buffer)
- return -ENOMEM;
- indio_dev->modes |= INDIO_BUFFER_HARDWARE;
-
- indio_dev->buffer->access = &sca3000_ring_access_funcs;
-
- iio_device_attach_buffer(indio_dev, buffer);
-
- return 0;
-}
-
-void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
-{
- iio_buffer_put(indio_dev->buffer);
-}
-
-static inline
-int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
-{
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret;
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
- if (state) {
- dev_info(&indio_dev->dev, "supposedly enabling ring buffer\n");
- ret = sca3000_write_reg(st,
- SCA3000_REG_ADDR_MODE,
- (st->rx[0] | SCA3000_RING_BUF_ENABLE));
- } else
- ret = sca3000_write_reg(st,
- SCA3000_REG_ADDR_MODE,
- (st->rx[0] & ~SCA3000_RING_BUF_ENABLE));
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret;
-}
-
-/**
- * sca3000_hw_ring_preenable() hw ring buffer preenable function
- *
- * Very simple enable function as the chip will allows normal reads
- * during ring buffer operation so as long as it is indeed running
- * before we notify the core, the precise ordering does not matter.
- **/
-static int sca3000_hw_ring_preenable(struct iio_dev *indio_dev)
-{
- return __sca3000_hw_ring_state_set(indio_dev, 1);
-}
-
-static int sca3000_hw_ring_postdisable(struct iio_dev *indio_dev)
-{
- return __sca3000_hw_ring_state_set(indio_dev, 0);
-}
-
-static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = {
- .preenable = &sca3000_hw_ring_preenable,
- .postdisable = &sca3000_hw_ring_postdisable,
-};
-
-void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
-{
- indio_dev->setup_ops = &sca3000_ring_setup_ops;
-}
-
-/**
- * sca3000_ring_int_process() ring specific interrupt handling.
- *
- * This is only split from the main interrupt handler so as to
- * reduce the amount of code if the ring buffer is not enabled.
- **/
-void sca3000_ring_int_process(u8 val, struct iio_buffer *ring)
-{
- if (val & (SCA3000_INT_STATUS_THREE_QUARTERS |
- SCA3000_INT_STATUS_HALF)) {
- ring->stufftoread = true;
- wake_up_interruptible(&ring->pollq);
- }
-}
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index 3cdd83ccec8e..ac09485923b6 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -2,7 +2,6 @@
# Makefile for industrial I/O ADC drivers
#
-ad7606-y := ad7606_core.o ad7606_ring.o
obj-$(CONFIG_AD7606_IFACE_PARALLEL) += ad7606_par.o
obj-$(CONFIG_AD7606_IFACE_SPI) += ad7606_spi.o
obj-$(CONFIG_AD7606) += ad7606.o
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 1cf6b79801a9..1fb68c01abd5 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -152,7 +152,8 @@
*/
struct ad7192_state {
- struct regulator *reg;
+ struct regulator *avdd;
+ struct regulator *dvdd;
u16 int_vref_mv;
u32 mclk;
u32 f_order;
@@ -322,57 +323,6 @@ out:
return ret;
}
-static ssize_t ad7192_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7192_state *st = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", st->mclk /
- (st->f_order * 1024 * AD7192_MODE_RATE(st->mode)));
-}
-
-static ssize_t ad7192_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7192_state *st = iio_priv(indio_dev);
- unsigned long lval;
- int div, ret;
-
- ret = kstrtoul(buf, 10, &lval);
- if (ret)
- return ret;
- if (lval == 0)
- return -EINVAL;
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- div = st->mclk / (lval * st->f_order * 1024);
- if (div < 1 || div > 1023) {
- ret = -EINVAL;
- goto out;
- }
-
- st->mode &= ~AD7192_MODE_RATE(-1);
- st->mode |= AD7192_MODE_RATE(div);
- ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
-
-out:
- iio_device_release_direct_mode(indio_dev);
-
- return ret ? ret : len;
-}
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- ad7192_read_frequency,
- ad7192_write_frequency);
-
static ssize_t
ad7192_show_scale_available(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -471,7 +421,6 @@ static IIO_DEVICE_ATTR(ac_excitation_en, S_IRUGO | S_IWUSR,
AD7192_REG_MODE);
static struct attribute *ad7192_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
&iio_dev_attr_bridge_switch_en.dev_attr.attr,
@@ -484,7 +433,6 @@ static const struct attribute_group ad7192_attribute_group = {
};
static struct attribute *ad7195_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
&iio_dev_attr_bridge_switch_en.dev_attr.attr,
@@ -536,6 +484,10 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
if (chan->type == IIO_TEMP)
*val -= 273 * ad7192_get_temp_scale(unipolar);
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->mclk /
+ (st->f_order * 1024 * AD7192_MODE_RATE(st->mode));
+ return IIO_VAL_INT;
}
return -EINVAL;
@@ -548,7 +500,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
long mask)
{
struct ad7192_state *st = iio_priv(indio_dev);
- int ret, i;
+ int ret, i, div;
unsigned int tmp;
ret = iio_device_claim_direct_mode(indio_dev);
@@ -572,6 +524,22 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
break;
}
break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (!val) {
+ ret = -EINVAL;
+ break;
+ }
+
+ div = st->mclk / (val * st->f_order * 1024);
+ if (div < 1 || div > 1023) {
+ ret = -EINVAL;
+ break;
+ }
+
+ st->mode &= ~AD7192_MODE_RATE(-1);
+ st->mode |= AD7192_MODE_RATE(div);
+ ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
+ break;
default:
ret = -EINVAL;
}
@@ -585,7 +553,14 @@ static int ad7192_write_raw_get_fmt(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
long mask)
{
- return IIO_VAL_INT_PLUS_NANO;
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
}
static const struct iio_info ad7192_info = {
@@ -659,15 +634,30 @@ static int ad7192_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
- st->reg = devm_regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- return ret;
+ st->avdd = devm_regulator_get(&spi->dev, "avdd");
+ if (IS_ERR(st->avdd))
+ return PTR_ERR(st->avdd);
- voltage_uv = regulator_get_voltage(st->reg);
+ ret = regulator_enable(st->avdd);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable specified AVdd supply\n");
+ return ret;
+ }
+
+ st->dvdd = devm_regulator_get(&spi->dev, "dvdd");
+ if (IS_ERR(st->dvdd)) {
+ ret = PTR_ERR(st->dvdd);
+ goto error_disable_avdd;
}
+ ret = regulator_enable(st->dvdd);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable specified DVdd supply\n");
+ goto error_disable_avdd;
+ }
+
+ voltage_uv = regulator_get_voltage(st->avdd);
+
if (pdata->vref_mv)
st->int_vref_mv = pdata->vref_mv;
else if (voltage_uv)
@@ -701,7 +691,7 @@ static int ad7192_probe(struct spi_device *spi)
ret = ad_sd_setup_buffer_and_trigger(indio_dev);
if (ret)
- goto error_disable_reg;
+ goto error_disable_dvdd;
ret = ad7192_setup(st, pdata);
if (ret)
@@ -714,9 +704,10 @@ static int ad7192_probe(struct spi_device *spi)
error_remove_trigger:
ad_sd_cleanup_buffer_and_trigger(indio_dev);
-error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+error_disable_dvdd:
+ regulator_disable(st->dvdd);
+error_disable_avdd:
+ regulator_disable(st->avdd);
return ret;
}
@@ -729,8 +720,8 @@ static int ad7192_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
ad_sd_cleanup_buffer_and_trigger(indio_dev);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ regulator_disable(st->dvdd);
+ regulator_disable(st->avdd);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index b460dda7eb65..ee679ac0368f 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -777,7 +777,7 @@ static struct attribute *ad7280_event_attributes[] = {
NULL,
};
-static struct attribute_group ad7280_event_attrs_group = {
+static const struct attribute_group ad7280_event_attrs_group = {
.attrs = ad7280_event_attributes,
};
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606.c
index f79ee61851f6..453190864b2f 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606.c
@@ -13,7 +13,7 @@
#include <linux/sysfs.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/module.h>
@@ -21,58 +21,109 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#include "ad7606.h"
-int ad7606_reset(struct ad7606_state *st)
+static int ad7606_reset(struct ad7606_state *st)
{
- if (gpio_is_valid(st->pdata->gpio_reset)) {
- gpio_set_value(st->pdata->gpio_reset, 1);
+ if (st->gpio_reset) {
+ gpiod_set_value(st->gpio_reset, 1);
ndelay(100); /* t_reset >= 100ns */
- gpio_set_value(st->pdata->gpio_reset, 0);
+ gpiod_set_value(st->gpio_reset, 0);
return 0;
}
return -ENODEV;
}
+static int ad7606_read_samples(struct ad7606_state *st)
+{
+ unsigned int num = st->chip_info->num_channels;
+ u16 *data = st->data;
+ int ret;
+
+ /*
+ * The frstdata signal is set to high while and after reading the sample
+ * of the first channel and low for all other channels. This can be used
+ * to check that the incoming data is correctly aligned. During normal
+ * operation the data should never become unaligned, but some glitch or
+ * electrostatic discharge might cause an extra read or clock cycle.
+ * Monitoring the frstdata signal allows to recover from such failure
+ * situations.
+ */
+
+ if (st->gpio_frstdata) {
+ ret = st->bops->read_block(st->dev, 1, data);
+ if (ret)
+ return ret;
+
+ if (!gpiod_get_value(st->gpio_frstdata)) {
+ ad7606_reset(st);
+ return -EIO;
+ }
+
+ data++;
+ num--;
+ }
+
+ return st->bops->read_block(st->dev, num, data);
+}
+
+static irqreturn_t ad7606_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct ad7606_state *st = iio_priv(pf->indio_dev);
+
+ gpiod_set_value(st->gpio_convst, 1);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ad7606_poll_bh_to_ring() bh of trigger launched polling to ring buffer
+ * @work_s: the work struct through which this was scheduled
+ *
+ * Currently there is no option in this driver to disable the saving of
+ * timestamps within the ring.
+ * I think the one copy of this at a time was to avoid problems if the
+ * trigger was set far too high and the reads then locked up the computer.
+ **/
+static void ad7606_poll_bh_to_ring(struct work_struct *work_s)
+{
+ struct ad7606_state *st = container_of(work_s, struct ad7606_state,
+ poll_work);
+ struct iio_dev *indio_dev = iio_priv_to_dev(st);
+ int ret;
+
+ ret = ad7606_read_samples(st);
+ if (ret == 0)
+ iio_push_to_buffers_with_timestamp(indio_dev, st->data,
+ iio_get_time_ns(indio_dev));
+
+ gpiod_set_value(st->gpio_convst, 0);
+ iio_trigger_notify_done(indio_dev->trig);
+}
+
static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
{
struct ad7606_state *st = iio_priv(indio_dev);
int ret;
st->done = false;
- gpio_set_value(st->pdata->gpio_convst, 1);
+ gpiod_set_value(st->gpio_convst, 1);
ret = wait_event_interruptible(st->wq_data_avail, st->done);
if (ret)
goto error_ret;
- if (gpio_is_valid(st->pdata->gpio_frstdata)) {
- ret = st->bops->read_block(st->dev, 1, st->data);
- if (ret)
- goto error_ret;
- if (!gpio_get_value(st->pdata->gpio_frstdata)) {
- /* This should never happen */
- ad7606_reset(st);
- ret = -EIO;
- goto error_ret;
- }
- ret = st->bops->read_block(st->dev,
- st->chip_info->num_channels - 1, &st->data[1]);
- if (ret)
- goto error_ret;
- } else {
- ret = st->bops->read_block(st->dev,
- st->chip_info->num_channels, st->data);
- if (ret)
- goto error_ret;
- }
-
- ret = st->data[ch];
+ ret = ad7606_read_samples(st);
+ if (ret == 0)
+ ret = st->data[ch];
error_ret:
- gpio_set_value(st->pdata->gpio_convst, 0);
+ gpiod_set_value(st->gpio_convst, 0);
return ret;
}
@@ -103,6 +154,9 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
*val = st->range * 2;
*val2 = st->chip_info->channels[0].scan_type.realbits;
return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = st->oversampling;
+ return IIO_VAL_INT;
}
return -EINVAL;
}
@@ -129,12 +183,11 @@ static ssize_t ad7606_store_range(struct device *dev,
if (ret)
return ret;
- if (!(lval == 5000 || lval == 10000)) {
- dev_err(dev, "range is not supported\n");
+ if (!(lval == 5000 || lval == 10000))
return -EINVAL;
- }
+
mutex_lock(&indio_dev->mlock);
- gpio_set_value(st->pdata->gpio_range, lval == 10000);
+ gpiod_set_value(st->gpio_range, lval == 10000);
st->range = lval;
mutex_unlock(&indio_dev->mlock);
@@ -145,19 +198,9 @@ static IIO_DEVICE_ATTR(in_voltage_range, S_IRUGO | S_IWUSR,
ad7606_show_range, ad7606_store_range, 0);
static IIO_CONST_ATTR(in_voltage_range_available, "5000 10000");
-static ssize_t ad7606_show_oversampling_ratio(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7606_state *st = iio_priv(indio_dev);
-
- return sprintf(buf, "%u\n", st->oversampling);
-}
-
static int ad7606_oversampling_get_index(unsigned int val)
{
- unsigned char supported[] = {0, 2, 4, 8, 16, 32, 64};
+ unsigned char supported[] = {1, 2, 4, 8, 16, 32, 64};
int i;
for (i = 0; i < ARRAY_SIZE(supported); i++)
@@ -167,44 +210,45 @@ static int ad7606_oversampling_get_index(unsigned int val)
return -EINVAL;
}
-static ssize_t ad7606_store_oversampling_ratio(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int ad7606_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7606_state *st = iio_priv(indio_dev);
- unsigned long lval;
+ int values[3];
int ret;
- ret = kstrtoul(buf, 10, &lval);
- if (ret)
- return ret;
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ if (val2)
+ return -EINVAL;
+ ret = ad7606_oversampling_get_index(val);
+ if (ret < 0)
+ return ret;
- ret = ad7606_oversampling_get_index(lval);
- if (ret < 0) {
- dev_err(dev, "oversampling %lu is not supported\n", lval);
- return ret;
- }
+ values[0] = (ret >> 0) & 1;
+ values[1] = (ret >> 1) & 1;
+ values[2] = (ret >> 2) & 1;
- mutex_lock(&indio_dev->mlock);
- gpio_set_value(st->pdata->gpio_os0, (ret >> 0) & 1);
- gpio_set_value(st->pdata->gpio_os1, (ret >> 1) & 1);
- gpio_set_value(st->pdata->gpio_os1, (ret >> 2) & 1);
- st->oversampling = lval;
- mutex_unlock(&indio_dev->mlock);
+ mutex_lock(&indio_dev->mlock);
+ gpiod_set_array_value(ARRAY_SIZE(values), st->gpio_os->desc,
+ values);
+ st->oversampling = val;
+ mutex_unlock(&indio_dev->mlock);
- return count;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
-static IIO_DEVICE_ATTR(oversampling_ratio, S_IRUGO | S_IWUSR,
- ad7606_show_oversampling_ratio,
- ad7606_store_oversampling_ratio, 0);
-static IIO_CONST_ATTR(oversampling_ratio_available, "0 2 4 8 16 32 64");
+static IIO_CONST_ATTR(oversampling_ratio_available, "1 2 4 8 16 32 64");
static struct attribute *ad7606_attributes_os_and_range[] = {
&iio_dev_attr_in_voltage_range.dev_attr.attr,
&iio_const_attr_in_voltage_range_available.dev_attr.attr,
- &iio_dev_attr_oversampling_ratio.dev_attr.attr,
&iio_const_attr_oversampling_ratio_available.dev_attr.attr,
NULL,
};
@@ -214,7 +258,6 @@ static const struct attribute_group ad7606_attribute_group_os_and_range = {
};
static struct attribute *ad7606_attributes_os[] = {
- &iio_dev_attr_oversampling_ratio.dev_attr.attr,
&iio_const_attr_oversampling_ratio_available.dev_attr.attr,
NULL,
};
@@ -241,6 +284,8 @@ static const struct attribute_group ad7606_attribute_group_range = {
.address = num, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
+ .info_mask_shared_by_all = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.scan_index = num, \
.scan_type = { \
.sign = 's', \
@@ -267,20 +312,14 @@ static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
* More devices added in future
*/
[ID_AD7606_8] = {
- .name = "ad7606",
- .int_vref_mv = 2500,
.channels = ad7606_channels,
.num_channels = 9,
},
[ID_AD7606_6] = {
- .name = "ad7606-6",
- .int_vref_mv = 2500,
.channels = ad7606_channels,
.num_channels = 7,
},
[ID_AD7606_4] = {
- .name = "ad7606-4",
- .int_vref_mv = 2500,
.channels = ad7606_channels,
.num_channels = 5,
},
@@ -288,119 +327,34 @@ static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
static int ad7606_request_gpios(struct ad7606_state *st)
{
- struct gpio gpio_array[3] = {
- [0] = {
- .gpio = st->pdata->gpio_os0,
- .flags = GPIOF_DIR_OUT | ((st->oversampling & 1) ?
- GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
- .label = "AD7606_OS0",
- },
- [1] = {
- .gpio = st->pdata->gpio_os1,
- .flags = GPIOF_DIR_OUT | ((st->oversampling & 2) ?
- GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
- .label = "AD7606_OS1",
- },
- [2] = {
- .gpio = st->pdata->gpio_os2,
- .flags = GPIOF_DIR_OUT | ((st->oversampling & 4) ?
- GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
- .label = "AD7606_OS2",
- },
- };
- int ret;
-
- if (gpio_is_valid(st->pdata->gpio_convst)) {
- ret = gpio_request_one(st->pdata->gpio_convst,
- GPIOF_OUT_INIT_LOW,
- "AD7606_CONVST");
- if (ret) {
- dev_err(st->dev, "failed to request GPIO CONVST\n");
- goto error_ret;
- }
- } else {
- ret = -EIO;
- goto error_ret;
- }
-
- if (gpio_is_valid(st->pdata->gpio_os0) &&
- gpio_is_valid(st->pdata->gpio_os1) &&
- gpio_is_valid(st->pdata->gpio_os2)) {
- ret = gpio_request_array(gpio_array, ARRAY_SIZE(gpio_array));
- if (ret < 0)
- goto error_free_convst;
- }
-
- if (gpio_is_valid(st->pdata->gpio_reset)) {
- ret = gpio_request_one(st->pdata->gpio_reset,
- GPIOF_OUT_INIT_LOW,
- "AD7606_RESET");
- if (ret < 0)
- goto error_free_os;
- }
-
- if (gpio_is_valid(st->pdata->gpio_range)) {
- ret = gpio_request_one(st->pdata->gpio_range, GPIOF_DIR_OUT |
- ((st->range == 10000) ? GPIOF_INIT_HIGH :
- GPIOF_INIT_LOW), "AD7606_RANGE");
- if (ret < 0)
- goto error_free_reset;
- }
- if (gpio_is_valid(st->pdata->gpio_stby)) {
- ret = gpio_request_one(st->pdata->gpio_stby,
- GPIOF_OUT_INIT_HIGH,
- "AD7606_STBY");
- if (ret < 0)
- goto error_free_range;
- }
-
- if (gpio_is_valid(st->pdata->gpio_frstdata)) {
- ret = gpio_request_one(st->pdata->gpio_frstdata, GPIOF_IN,
- "AD7606_FRSTDATA");
- if (ret < 0)
- goto error_free_stby;
- }
-
- return 0;
-
-error_free_stby:
- if (gpio_is_valid(st->pdata->gpio_stby))
- gpio_free(st->pdata->gpio_stby);
-error_free_range:
- if (gpio_is_valid(st->pdata->gpio_range))
- gpio_free(st->pdata->gpio_range);
-error_free_reset:
- if (gpio_is_valid(st->pdata->gpio_reset))
- gpio_free(st->pdata->gpio_reset);
-error_free_os:
- if (gpio_is_valid(st->pdata->gpio_os0) &&
- gpio_is_valid(st->pdata->gpio_os1) &&
- gpio_is_valid(st->pdata->gpio_os2))
- gpio_free_array(gpio_array, ARRAY_SIZE(gpio_array));
-error_free_convst:
- gpio_free(st->pdata->gpio_convst);
-error_ret:
- return ret;
-}
-
-static void ad7606_free_gpios(struct ad7606_state *st)
-{
- if (gpio_is_valid(st->pdata->gpio_frstdata))
- gpio_free(st->pdata->gpio_frstdata);
- if (gpio_is_valid(st->pdata->gpio_stby))
- gpio_free(st->pdata->gpio_stby);
- if (gpio_is_valid(st->pdata->gpio_range))
- gpio_free(st->pdata->gpio_range);
- if (gpio_is_valid(st->pdata->gpio_reset))
- gpio_free(st->pdata->gpio_reset);
- if (gpio_is_valid(st->pdata->gpio_os0) &&
- gpio_is_valid(st->pdata->gpio_os1) &&
- gpio_is_valid(st->pdata->gpio_os2)) {
- gpio_free(st->pdata->gpio_os2);
- gpio_free(st->pdata->gpio_os1);
- gpio_free(st->pdata->gpio_os0);
- }
- gpio_free(st->pdata->gpio_convst);
+ struct device *dev = st->dev;
+
+ st->gpio_convst = devm_gpiod_get(dev, "conversion-start",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->gpio_convst))
+ return PTR_ERR(st->gpio_convst);
+
+ st->gpio_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(st->gpio_reset))
+ return PTR_ERR(st->gpio_reset);
+
+ st->gpio_range = devm_gpiod_get_optional(dev, "range", GPIOD_OUT_LOW);
+ if (IS_ERR(st->gpio_range))
+ return PTR_ERR(st->gpio_range);
+
+ st->gpio_standby = devm_gpiod_get_optional(dev, "standby",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(st->gpio_standby))
+ return PTR_ERR(st->gpio_standby);
+
+ st->gpio_frstdata = devm_gpiod_get_optional(dev, "first-data",
+ GPIOD_IN);
+ if (IS_ERR(st->gpio_frstdata))
+ return PTR_ERR(st->gpio_frstdata);
+
+ st->gpio_os = devm_gpiod_get_array_optional(dev, "oversampling-ratio",
+ GPIOD_OUT_LOW);
+ return PTR_ERR_OR_ZERO(st->gpio_os);
}
/**
@@ -429,12 +383,14 @@ static const struct iio_info ad7606_info_no_os_or_range = {
static const struct iio_info ad7606_info_os_and_range = {
.driver_module = THIS_MODULE,
.read_raw = &ad7606_read_raw,
+ .write_raw = &ad7606_write_raw,
.attrs = &ad7606_attribute_group_os_and_range,
};
static const struct iio_info ad7606_info_os = {
.driver_module = THIS_MODULE,
.read_raw = &ad7606_read_raw,
+ .write_raw = &ad7606_write_raw,
.attrs = &ad7606_attribute_group_os,
};
@@ -444,81 +400,73 @@ static const struct iio_info ad7606_info_range = {
.attrs = &ad7606_attribute_group_range,
};
-struct iio_dev *ad7606_probe(struct device *dev, int irq,
- void __iomem *base_address,
- unsigned int id,
- const struct ad7606_bus_ops *bops)
+int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
+ const char *name, unsigned int id,
+ const struct ad7606_bus_ops *bops)
{
- struct ad7606_platform_data *pdata = dev->platform_data;
struct ad7606_state *st;
int ret;
struct iio_dev *indio_dev;
indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
st = iio_priv(indio_dev);
st->dev = dev;
st->bops = bops;
st->base_address = base_address;
- st->range = pdata->default_range == 10000 ? 10000 : 5000;
+ st->range = 5000;
+ st->oversampling = 1;
+ INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring);
- ret = ad7606_oversampling_get_index(pdata->default_os);
- if (ret < 0) {
- dev_warn(dev, "oversampling %d is not supported\n",
- pdata->default_os);
- st->oversampling = 0;
- } else {
- st->oversampling = pdata->default_os;
- }
+ st->reg = devm_regulator_get(dev, "avcc");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
- st->reg = devm_regulator_get(dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- return ERR_PTR(ret);
+ ret = regulator_enable(st->reg);
+ if (ret) {
+ dev_err(dev, "Failed to enable specified AVcc supply\n");
+ return ret;
}
- st->pdata = pdata;
+ ret = ad7606_request_gpios(st);
+ if (ret)
+ goto error_disable_reg;
+
st->chip_info = &ad7606_chip_info_tbl[id];
indio_dev->dev.parent = dev;
- if (gpio_is_valid(st->pdata->gpio_os0) &&
- gpio_is_valid(st->pdata->gpio_os1) &&
- gpio_is_valid(st->pdata->gpio_os2)) {
- if (gpio_is_valid(st->pdata->gpio_range))
+ if (st->gpio_os) {
+ if (st->gpio_range)
indio_dev->info = &ad7606_info_os_and_range;
else
indio_dev->info = &ad7606_info_os;
} else {
- if (gpio_is_valid(st->pdata->gpio_range))
+ if (st->gpio_range)
indio_dev->info = &ad7606_info_range;
else
indio_dev->info = &ad7606_info_no_os_or_range;
}
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->name = st->chip_info->name;
+ indio_dev->name = name;
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
init_waitqueue_head(&st->wq_data_avail);
- ret = ad7606_request_gpios(st);
- if (ret)
- goto error_disable_reg;
-
ret = ad7606_reset(st);
if (ret)
dev_warn(st->dev, "failed to RESET: no RESET GPIO specified\n");
- ret = request_irq(irq, ad7606_interrupt,
- IRQF_TRIGGER_FALLING, st->chip_info->name, indio_dev);
+ ret = request_irq(irq, ad7606_interrupt, IRQF_TRIGGER_FALLING, name,
+ indio_dev);
if (ret)
- goto error_free_gpios;
+ goto error_disable_reg;
- ret = ad7606_register_ring_funcs_and_init(indio_dev);
+ ret = iio_triggered_buffer_setup(indio_dev, &ad7606_trigger_handler,
+ NULL, NULL);
if (ret)
goto error_free_irq;
@@ -526,35 +474,31 @@ struct iio_dev *ad7606_probe(struct device *dev, int irq,
if (ret)
goto error_unregister_ring;
- return indio_dev;
+ dev_set_drvdata(dev, indio_dev);
+
+ return 0;
error_unregister_ring:
- ad7606_ring_cleanup(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
error_free_irq:
free_irq(irq, indio_dev);
-error_free_gpios:
- ad7606_free_gpios(st);
-
error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
- return ERR_PTR(ret);
+ regulator_disable(st->reg);
+ return ret;
}
EXPORT_SYMBOL_GPL(ad7606_probe);
-int ad7606_remove(struct iio_dev *indio_dev, int irq)
+int ad7606_remove(struct device *dev, int irq)
{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- ad7606_ring_cleanup(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
free_irq(irq, indio_dev);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
-
- ad7606_free_gpios(st);
+ regulator_disable(st->reg);
return 0;
}
@@ -567,10 +511,9 @@ static int ad7606_suspend(struct device *dev)
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
- if (gpio_is_valid(st->pdata->gpio_stby)) {
- if (gpio_is_valid(st->pdata->gpio_range))
- gpio_set_value(st->pdata->gpio_range, 1);
- gpio_set_value(st->pdata->gpio_stby, 0);
+ if (st->gpio_standby) {
+ gpiod_set_value(st->gpio_range, 1);
+ gpiod_set_value(st->gpio_standby, 0);
}
return 0;
@@ -581,12 +524,9 @@ static int ad7606_resume(struct device *dev)
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
- if (gpio_is_valid(st->pdata->gpio_stby)) {
- if (gpio_is_valid(st->pdata->gpio_range))
- gpio_set_value(st->pdata->gpio_range,
- st->range == 10000);
-
- gpio_set_value(st->pdata->gpio_stby, 1);
+ if (st->gpio_standby) {
+ gpiod_set_value(st->gpio_range, st->range == 10000);
+ gpiod_set_value(st->gpio_standby, 1);
ad7606_reset(st);
}
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index 39f50440d915..746f9553d2ba 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -9,48 +9,14 @@
#ifndef IIO_ADC_AD7606_H_
#define IIO_ADC_AD7606_H_
-/*
- * TODO: struct ad7606_platform_data needs to go into include/linux/iio
- */
-
-/**
- * struct ad7606_platform_data - platform/board specific information
- * @default_os: default oversampling value {0, 2, 4, 8, 16, 32, 64}
- * @default_range: default range +/-{5000, 10000} mVolt
- * @gpio_convst: number of gpio connected to the CONVST pin
- * @gpio_reset: gpio connected to the RESET pin, if not used set to -1
- * @gpio_range: gpio connected to the RANGE pin, if not used set to -1
- * @gpio_os0: gpio connected to the OS0 pin, if not used set to -1
- * @gpio_os1: gpio connected to the OS1 pin, if not used set to -1
- * @gpio_os2: gpio connected to the OS2 pin, if not used set to -1
- * @gpio_frstdata: gpio connected to the FRSTDAT pin, if not used set to -1
- * @gpio_stby: gpio connected to the STBY pin, if not used set to -1
- */
-
-struct ad7606_platform_data {
- unsigned int default_os;
- unsigned int default_range;
- unsigned int gpio_convst;
- unsigned int gpio_reset;
- unsigned int gpio_range;
- unsigned int gpio_os0;
- unsigned int gpio_os1;
- unsigned int gpio_os2;
- unsigned int gpio_frstdata;
- unsigned int gpio_stby;
-};
-
/**
* struct ad7606_chip_info - chip specific information
* @name: identification string for chip
- * @int_vref_mv: the internal reference voltage
* @channels: channel specification
* @num_channels: number of channels
*/
struct ad7606_chip_info {
- const char *name;
- u16 int_vref_mv;
const struct iio_chan_spec *channels;
unsigned int num_channels;
};
@@ -62,7 +28,6 @@ struct ad7606_chip_info {
struct ad7606_state {
struct device *dev;
const struct ad7606_chip_info *chip_info;
- struct ad7606_platform_data *pdata;
struct regulator *reg;
struct work_struct poll_work;
wait_queue_head_t wq_data_avail;
@@ -72,12 +37,19 @@ struct ad7606_state {
bool done;
void __iomem *base_address;
+ struct gpio_desc *gpio_convst;
+ struct gpio_desc *gpio_reset;
+ struct gpio_desc *gpio_range;
+ struct gpio_desc *gpio_standby;
+ struct gpio_desc *gpio_frstdata;
+ struct gpio_descs *gpio_os;
+
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
+ * 8 * 16-bit samples + 64-bit timestamp
*/
-
- unsigned short data[8] ____cacheline_aligned;
+ unsigned short data[12] ____cacheline_aligned;
};
struct ad7606_bus_ops {
@@ -85,11 +57,10 @@ struct ad7606_bus_ops {
int (*read_block)(struct device *, int, void *);
};
-struct iio_dev *ad7606_probe(struct device *dev, int irq,
- void __iomem *base_address, unsigned int id,
- const struct ad7606_bus_ops *bops);
-int ad7606_remove(struct iio_dev *indio_dev, int irq);
-int ad7606_reset(struct ad7606_state *st);
+int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
+ const char *name, unsigned int id,
+ const struct ad7606_bus_ops *bops);
+int ad7606_remove(struct device *dev, int irq);
enum ad7606_supported_device_ids {
ID_AD7606_8,
@@ -97,9 +68,6 @@ enum ad7606_supported_device_ids {
ID_AD7606_4
};
-int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev);
-void ad7606_ring_cleanup(struct iio_dev *indio_dev);
-
#ifdef CONFIG_PM_SLEEP
extern const struct dev_pm_ops ad7606_pm_ops;
#define AD7606_PM_OPS (&ad7606_pm_ops)
diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c
index 84d23930fdde..cd6c410c0484 100644
--- a/drivers/staging/iio/adc/ad7606_par.c
+++ b/drivers/staging/iio/adc/ad7606_par.c
@@ -49,8 +49,8 @@ static const struct ad7606_bus_ops ad7606_par8_bops = {
static int ad7606_par_probe(struct platform_device *pdev)
{
+ const struct platform_device_id *id = platform_get_device_id(pdev);
struct resource *res;
- struct iio_dev *indio_dev;
void __iomem *addr;
resource_size_t remap_size;
int irq;
@@ -68,26 +68,15 @@ static int ad7606_par_probe(struct platform_device *pdev)
remap_size = resource_size(res);
- indio_dev = ad7606_probe(&pdev->dev, irq, addr,
- platform_get_device_id(pdev)->driver_data,
- remap_size > 1 ? &ad7606_par16_bops :
- &ad7606_par8_bops);
-
- if (IS_ERR(indio_dev))
- return PTR_ERR(indio_dev);
-
- platform_set_drvdata(pdev, indio_dev);
-
- return 0;
+ return ad7606_probe(&pdev->dev, irq, addr,
+ id->name, id->driver_data,
+ remap_size > 1 ? &ad7606_par16_bops :
+ &ad7606_par8_bops);
}
static int ad7606_par_remove(struct platform_device *pdev)
{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
-
- ad7606_remove(indio_dev, platform_get_irq(pdev, 0));
-
- return 0;
+ return ad7606_remove(&pdev->dev, platform_get_irq(pdev, 0));
}
static const struct platform_device_id ad7606_driver_ids[] = {
diff --git a/drivers/staging/iio/adc/ad7606_ring.c b/drivers/staging/iio/adc/ad7606_ring.c
deleted file mode 100644
index 0572df9aad85..000000000000
--- a/drivers/staging/iio/adc/ad7606_ring.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright 2011-2012 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- *
- */
-
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
-
-#include "ad7606.h"
-
-/**
- * ad7606_trigger_handler_th() th/bh of trigger launched polling to ring buffer
- *
- **/
-static irqreturn_t ad7606_trigger_handler_th_bh(int irq, void *p)
-{
- struct iio_poll_func *pf = p;
- struct ad7606_state *st = iio_priv(pf->indio_dev);
-
- gpio_set_value(st->pdata->gpio_convst, 1);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ad7606_poll_bh_to_ring() bh of trigger launched polling to ring buffer
- * @work_s: the work struct through which this was scheduled
- *
- * Currently there is no option in this driver to disable the saving of
- * timestamps within the ring.
- * I think the one copy of this at a time was to avoid problems if the
- * trigger was set far too high and the reads then locked up the computer.
- **/
-static void ad7606_poll_bh_to_ring(struct work_struct *work_s)
-{
- struct ad7606_state *st = container_of(work_s, struct ad7606_state,
- poll_work);
- struct iio_dev *indio_dev = iio_priv_to_dev(st);
- __u8 *buf;
- int ret;
-
- buf = kzalloc(indio_dev->scan_bytes, GFP_KERNEL);
- if (!buf)
- return;
-
- if (gpio_is_valid(st->pdata->gpio_frstdata)) {
- ret = st->bops->read_block(st->dev, 1, buf);
- if (ret)
- goto done;
- if (!gpio_get_value(st->pdata->gpio_frstdata)) {
- /* This should never happen. However
- * some signal glitch caused by bad PCB desgin or
- * electrostatic discharge, could cause an extra read
- * or clock. This allows recovery.
- */
- ad7606_reset(st);
- goto done;
- }
- ret = st->bops->read_block(st->dev,
- st->chip_info->num_channels - 1, buf + 2);
- if (ret)
- goto done;
- } else {
- ret = st->bops->read_block(st->dev,
- st->chip_info->num_channels, buf);
- if (ret)
- goto done;
- }
-
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
- iio_get_time_ns(indio_dev));
-done:
- gpio_set_value(st->pdata->gpio_convst, 0);
- iio_trigger_notify_done(indio_dev->trig);
- kfree(buf);
-}
-
-int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
-
- INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring);
-
- return iio_triggered_buffer_setup(indio_dev,
- &ad7606_trigger_handler_th_bh, &ad7606_trigger_handler_th_bh,
- NULL);
-}
-
-void ad7606_ring_cleanup(struct iio_dev *indio_dev)
-{
- iio_triggered_buffer_cleanup(indio_dev);
-}
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index 9587fa86dc69..c9b1f26685f4 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -42,25 +42,16 @@ static const struct ad7606_bus_ops ad7606_spi_bops = {
static int ad7606_spi_probe(struct spi_device *spi)
{
- struct iio_dev *indio_dev;
+ const struct spi_device_id *id = spi_get_device_id(spi);
- indio_dev = ad7606_probe(&spi->dev, spi->irq, NULL,
- spi_get_device_id(spi)->driver_data,
- &ad7606_spi_bops);
-
- if (IS_ERR(indio_dev))
- return PTR_ERR(indio_dev);
-
- spi_set_drvdata(spi, indio_dev);
-
- return 0;
+ return ad7606_probe(&spi->dev, spi->irq, NULL,
+ id->name, id->driver_data,
+ &ad7606_spi_bops);
}
static int ad7606_spi_remove(struct spi_device *spi)
{
- struct iio_dev *indio_dev = dev_get_drvdata(&spi->dev);
-
- return ad7606_remove(indio_dev, spi->irq);
+ return ad7606_remove(&spi->dev, spi->irq);
}
static const struct spi_device_id ad7606_id[] = {
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index c9a0c2aa602f..e14960038d3e 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -173,14 +173,16 @@ static int ad7780_probe(struct spi_device *spi)
ad_sd_init(&st->sd, indio_dev, spi, &ad7780_sigma_delta_info);
- st->reg = devm_regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- return ret;
-
- voltage_uv = regulator_get_voltage(st->reg);
+ st->reg = devm_regulator_get(&spi->dev, "avdd");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+
+ ret = regulator_enable(st->reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable specified AVdd supply\n");
+ return ret;
}
+ voltage_uv = regulator_get_voltage(st->reg);
st->chip_info =
&ad7780_chip_info_tbl[spi_get_device_id(spi)->driver_data];
@@ -222,8 +224,7 @@ static int ad7780_probe(struct spi_device *spi)
error_cleanup_buffer_and_trigger:
ad_sd_cleanup_buffer_and_trigger(indio_dev);
error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ regulator_disable(st->reg);
return ret;
}
@@ -236,8 +237,7 @@ static int ad7780_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
ad_sd_cleanup_buffer_and_trigger(indio_dev);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ regulator_disable(st->reg);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 5e8115b01011..72551f827382 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -327,7 +327,7 @@ static struct attribute *ad7816_event_attributes[] = {
NULL,
};
-static struct attribute_group ad7816_event_attribute_group = {
+static const struct attribute_group ad7816_event_attribute_group = {
.attrs = ad7816_event_attributes,
.name = "events",
};
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 3faffe59c933..a7d90c8bac5e 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -2039,7 +2039,7 @@ static struct attribute *adt7316_event_attributes[] = {
NULL,
};
-static struct attribute_group adt7316_event_attribute_group = {
+static const struct attribute_group adt7316_event_attribute_group = {
.attrs = adt7316_event_attributes,
.name = "events",
};
@@ -2060,7 +2060,7 @@ static struct attribute *adt7516_event_attributes[] = {
NULL,
};
-static struct attribute_group adt7516_event_attribute_group = {
+static const struct attribute_group adt7516_event_attribute_group = {
.attrs = adt7516_event_attributes,
.name = "events",
};
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index 5578a077fcfb..6998c3ddfb6a 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -562,7 +562,7 @@ static struct attribute *ad7150_event_attributes[] = {
NULL,
};
-static struct attribute_group ad7150_event_attribute_group = {
+static const struct attribute_group ad7150_event_attribute_group = {
.attrs = ad7150_event_attributes,
.name = "events",
};
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index 485d0a5af53c..b91b50f345bd 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -89,6 +89,7 @@ struct ad7152_chip_info {
*/
u8 filter_rate_setup;
u8 setup[2];
+ struct mutex state_lock; /* protect hardware state */
};
static inline ssize_t ad7152_start_calib(struct device *dev,
@@ -115,10 +116,10 @@ static inline ssize_t ad7152_start_calib(struct device *dev,
else
regval |= AD7152_CONF_CH2EN;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&chip->state_lock);
ret = i2c_smbus_write_byte_data(chip->client, AD7152_REG_CFG, regval);
if (ret < 0) {
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&chip->state_lock);
return ret;
}
@@ -126,14 +127,15 @@ static inline ssize_t ad7152_start_calib(struct device *dev,
mdelay(20);
ret = i2c_smbus_read_byte_data(chip->client, AD7152_REG_CFG);
if (ret < 0) {
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&chip->state_lock);
return ret;
}
} while ((ret == regval) && timeout--);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&chip->state_lock);
return len;
}
+
static ssize_t ad7152_start_offset_calib(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -142,6 +144,7 @@ static ssize_t ad7152_start_offset_calib(struct device *dev,
return ad7152_start_calib(dev, attr, buf, len,
AD7152_CONF_MODE_OFFS_CAL);
}
+
static ssize_t ad7152_start_gain_calib(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -165,63 +168,12 @@ static const unsigned char ad7152_filter_rate_table[][2] = {
{200, 5 + 1}, {50, 20 + 1}, {20, 50 + 1}, {17, 60 + 1},
};
-static ssize_t ad7152_show_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7152_chip_info *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n",
- ad7152_filter_rate_table[chip->filter_rate_setup][0]);
-}
-
-static ssize_t ad7152_store_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7152_chip_info *chip = iio_priv(indio_dev);
- u8 data;
- int ret, i;
-
- ret = kstrtou8(buf, 10, &data);
- if (ret < 0)
- return ret;
-
- for (i = 0; i < ARRAY_SIZE(ad7152_filter_rate_table); i++)
- if (data >= ad7152_filter_rate_table[i][0])
- break;
-
- if (i >= ARRAY_SIZE(ad7152_filter_rate_table))
- i = ARRAY_SIZE(ad7152_filter_rate_table) - 1;
-
- mutex_lock(&indio_dev->mlock);
- ret = i2c_smbus_write_byte_data(chip->client,
- AD7152_REG_CFG2, AD7152_CFG2_OSR(i));
- if (ret < 0) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
-
- chip->filter_rate_setup = i;
- mutex_unlock(&indio_dev->mlock);
-
- return len;
-}
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR,
- ad7152_show_filter_rate_setup,
- ad7152_store_filter_rate_setup);
-
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("200 50 20 17");
static IIO_CONST_ATTR(in_capacitance_scale_available,
"0.000061050 0.000030525 0.000015263 0.000007631");
static struct attribute *ad7152_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr,
&iio_dev_attr_in_capacitance1_calibbias_calibration.dev_attr.attr,
&iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr,
@@ -247,6 +199,51 @@ static const int ad7152_scale_table[] = {
30525, 7631, 15263, 61050
};
+/**
+ * read_raw handler for IIO_CHAN_INFO_SAMP_FREQ
+ *
+ * lock must be held
+ **/
+static int ad7152_read_raw_samp_freq(struct device *dev, int *val)
+{
+ struct ad7152_chip_info *chip = iio_priv(dev_to_iio_dev(dev));
+
+ *val = ad7152_filter_rate_table[chip->filter_rate_setup][0];
+
+ return 0;
+}
+
+/**
+ * write_raw handler for IIO_CHAN_INFO_SAMP_FREQ
+ *
+ * lock must be held
+ **/
+static int ad7152_write_raw_samp_freq(struct device *dev, int val)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad7152_chip_info *chip = iio_priv(indio_dev);
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(ad7152_filter_rate_table); i++)
+ if (val >= ad7152_filter_rate_table[i][0])
+ break;
+
+ if (i >= ARRAY_SIZE(ad7152_filter_rate_table))
+ i = ARRAY_SIZE(ad7152_filter_rate_table) - 1;
+
+ mutex_lock(&chip->state_lock);
+ ret = i2c_smbus_write_byte_data(chip->client,
+ AD7152_REG_CFG2, AD7152_CFG2_OSR(i));
+ if (ret < 0) {
+ mutex_unlock(&chip->state_lock);
+ return ret;
+ }
+
+ chip->filter_rate_setup = i;
+ mutex_unlock(&chip->state_lock);
+
+ return ret;
+}
static int ad7152_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
@@ -256,7 +253,7 @@ static int ad7152_write_raw(struct iio_dev *indio_dev,
struct ad7152_chip_info *chip = iio_priv(indio_dev);
int ret, i;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&chip->state_lock);
switch (mask) {
case IIO_CHAN_INFO_CALIBSCALE:
@@ -309,14 +306,26 @@ static int ad7152_write_raw(struct iio_dev *indio_dev,
ret = 0;
break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val2) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = ad7152_write_raw_samp_freq(&indio_dev->dev, val);
+ if (ret < 0)
+ goto out;
+
+ ret = 0;
+ break;
default:
ret = -EINVAL;
}
out:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&chip->state_lock);
return ret;
}
+
static int ad7152_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2,
@@ -326,7 +335,7 @@ static int ad7152_read_raw(struct iio_dev *indio_dev,
int ret;
u8 regval = 0;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&chip->state_lock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -403,11 +412,18 @@ static int ad7152_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT_PLUS_NANO;
break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = ad7152_read_raw_samp_freq(&indio_dev->dev, val);
+ if (ret < 0)
+ goto out;
+
+ ret = IIO_VAL_INT;
+ break;
default:
ret = -EINVAL;
}
out:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&chip->state_lock);
return ret;
}
@@ -440,6 +456,7 @@ static const struct iio_chan_spec ad7152_channels[] = {
BIT(IIO_CHAN_INFO_CALIBSCALE) |
BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
}, {
.type = IIO_CAPACITANCE,
.differential = 1,
@@ -450,6 +467,7 @@ static const struct iio_chan_spec ad7152_channels[] = {
BIT(IIO_CHAN_INFO_CALIBSCALE) |
BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
}, {
.type = IIO_CAPACITANCE,
.indexed = 1,
@@ -458,6 +476,7 @@ static const struct iio_chan_spec ad7152_channels[] = {
BIT(IIO_CHAN_INFO_CALIBSCALE) |
BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
}, {
.type = IIO_CAPACITANCE,
.differential = 1,
@@ -468,8 +487,10 @@ static const struct iio_chan_spec ad7152_channels[] = {
BIT(IIO_CHAN_INFO_CALIBSCALE) |
BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
}
};
+
/*
* device probe and remove
*/
@@ -489,6 +510,7 @@ static int ad7152_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
chip->client = client;
+ mutex_init(&chip->state_lock);
/* Establish that the iio_dev is a child of the i2c device */
indio_dev->name = id->name;
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 5771d4ee8ef1..81f8b9ee1120 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -70,8 +70,10 @@
#define AD7746_EXCSETUP_EXCLVL(x) (((x) & 0x3) << 0)
/* Config Register Bit Designations (AD7746_REG_CFG) */
-#define AD7746_CONF_VTFS(x) ((x) << 6)
-#define AD7746_CONF_CAPFS(x) ((x) << 3)
+#define AD7746_CONF_VTFS_SHIFT 6
+#define AD7746_CONF_CAPFS_SHIFT 3
+#define AD7746_CONF_VTFS_MASK GENMASK(7, 6)
+#define AD7746_CONF_CAPFS_MASK GENMASK(5, 3)
#define AD7746_CONF_MODE_IDLE (0 << 0)
#define AD7746_CONF_MODE_CONT_CONV (1 << 0)
#define AD7746_CONF_MODE_SINGLE_CONV (2 << 0)
@@ -122,7 +124,8 @@ static const struct iio_chan_spec ad7746_channels[] = {
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7746_REG_VT_DATA_HIGH << 8 |
AD7746_VTSETUP_VTMD_EXT_VIN,
},
@@ -132,7 +135,8 @@ static const struct iio_chan_spec ad7746_channels[] = {
.channel = 1,
.extend_name = "supply",
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7746_REG_VT_DATA_HIGH << 8 |
AD7746_VTSETUP_VTMD_VDD_MON,
},
@@ -159,7 +163,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7746_REG_CAP_DATA_HIGH << 8,
},
[CIN1_DIFF] = {
@@ -171,7 +175,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7746_REG_CAP_DATA_HIGH << 8 |
AD7746_CAPSETUP_CAPDIFF
},
@@ -182,7 +186,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7746_REG_CAP_DATA_HIGH << 8 |
AD7746_CAPSETUP_CIN2,
},
@@ -195,7 +199,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7746_REG_CAP_DATA_HIGH << 8 |
AD7746_CAPSETUP_CAPDIFF | AD7746_CAPSETUP_CIN2,
}
@@ -215,15 +219,16 @@ static int ad7746_select_channel(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
struct ad7746_chip_info *chip = iio_priv(indio_dev);
- int ret, delay;
+ int ret, delay, idx;
u8 vt_setup, cap_setup;
switch (chan->type) {
case IIO_CAPACITANCE:
cap_setup = (chan->address & 0xFF) | AD7746_CAPSETUP_CAPEN;
vt_setup = chip->vt_setup & ~AD7746_VTSETUP_VTEN;
- delay = ad7746_cap_filter_rate_table[(chip->config >> 3) &
- 0x7][1];
+ idx = (chip->config & AD7746_CONF_CAPFS_MASK) >>
+ AD7746_CONF_CAPFS_SHIFT;
+ delay = ad7746_cap_filter_rate_table[idx][1];
if (chip->capdac_set != chan->channel) {
ret = i2c_smbus_write_byte_data(chip->client,
@@ -244,8 +249,9 @@ static int ad7746_select_channel(struct iio_dev *indio_dev,
case IIO_TEMP:
vt_setup = (chan->address & 0xFF) | AD7746_VTSETUP_VTEN;
cap_setup = chip->cap_setup & ~AD7746_CAPSETUP_CAPEN;
- delay = ad7746_cap_filter_rate_table[(chip->config >> 6) &
- 0x3][1];
+ idx = (chip->config & AD7746_CONF_VTFS_MASK) >>
+ AD7746_CONF_VTFS_SHIFT;
+ delay = ad7746_cap_filter_rate_table[idx][1];
break;
default:
return -EINVAL;
@@ -355,101 +361,47 @@ static IIO_DEVICE_ATTR(in_capacitance1_calibscale_calibration,
static IIO_DEVICE_ATTR(in_voltage0_calibscale_calibration,
S_IWUSR, NULL, ad7746_start_gain_calib, VIN);
-static ssize_t ad7746_show_cap_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int ad7746_store_cap_filter_rate_setup(struct ad7746_chip_info *chip,
+ int val)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7746_chip_info *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", ad7746_cap_filter_rate_table[
- (chip->config >> 3) & 0x7][0]);
-}
-
-static ssize_t ad7746_store_cap_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7746_chip_info *chip = iio_priv(indio_dev);
- u8 data;
- int ret, i;
-
- ret = kstrtou8(buf, 10, &data);
- if (ret < 0)
- return ret;
+ int i;
for (i = 0; i < ARRAY_SIZE(ad7746_cap_filter_rate_table); i++)
- if (data >= ad7746_cap_filter_rate_table[i][0])
+ if (val >= ad7746_cap_filter_rate_table[i][0])
break;
if (i >= ARRAY_SIZE(ad7746_cap_filter_rate_table))
i = ARRAY_SIZE(ad7746_cap_filter_rate_table) - 1;
- mutex_lock(&indio_dev->mlock);
- chip->config &= ~AD7746_CONF_CAPFS(0x7);
- chip->config |= AD7746_CONF_CAPFS(i);
- mutex_unlock(&indio_dev->mlock);
+ chip->config &= ~AD7746_CONF_CAPFS_MASK;
+ chip->config |= i << AD7746_CONF_CAPFS_SHIFT;
- return len;
+ return 0;
}
-static ssize_t ad7746_show_vt_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int ad7746_store_vt_filter_rate_setup(struct ad7746_chip_info *chip,
+ int val)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7746_chip_info *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", ad7746_vt_filter_rate_table[
- (chip->config >> 6) & 0x3][0]);
-}
-
-static ssize_t ad7746_store_vt_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7746_chip_info *chip = iio_priv(indio_dev);
- u8 data;
- int ret, i;
-
- ret = kstrtou8(buf, 10, &data);
- if (ret < 0)
- return ret;
+ int i;
for (i = 0; i < ARRAY_SIZE(ad7746_vt_filter_rate_table); i++)
- if (data >= ad7746_vt_filter_rate_table[i][0])
+ if (val >= ad7746_vt_filter_rate_table[i][0])
break;
if (i >= ARRAY_SIZE(ad7746_vt_filter_rate_table))
i = ARRAY_SIZE(ad7746_vt_filter_rate_table) - 1;
- mutex_lock(&indio_dev->mlock);
- chip->config &= ~AD7746_CONF_VTFS(0x3);
- chip->config |= AD7746_CONF_VTFS(i);
- mutex_unlock(&indio_dev->mlock);
+ chip->config &= ~AD7746_CONF_VTFS_MASK;
+ chip->config |= i << AD7746_CONF_VTFS_SHIFT;
- return len;
+ return 0;
}
-static IIO_DEVICE_ATTR(in_capacitance_sampling_frequency,
- S_IRUGO | S_IWUSR, ad7746_show_cap_filter_rate_setup,
- ad7746_store_cap_filter_rate_setup, 0);
-
-static IIO_DEVICE_ATTR(in_voltage_sampling_frequency,
- S_IRUGO | S_IWUSR, ad7746_show_vt_filter_rate_setup,
- ad7746_store_vt_filter_rate_setup, 0);
-
static IIO_CONST_ATTR(in_voltage_sampling_frequency_available, "50 31 16 8");
static IIO_CONST_ATTR(in_capacitance_sampling_frequency_available,
"91 84 50 26 16 13 11 9");
static struct attribute *ad7746_attributes[] = {
- &iio_dev_attr_in_capacitance_sampling_frequency.dev_attr.attr,
- &iio_dev_attr_in_voltage_sampling_frequency.dev_attr.attr,
&iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr,
&iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr,
&iio_dev_attr_in_capacitance1_calibscale_calibration.dev_attr.attr,
@@ -547,6 +499,23 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
ret = 0;
break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val2) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (chan->type) {
+ case IIO_CAPACITANCE:
+ ret = ad7746_store_cap_filter_rate_setup(chip, val);
+ break;
+ case IIO_VOLTAGE:
+ ret = ad7746_store_vt_filter_rate_setup(chip, val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
default:
ret = -EINVAL;
}
@@ -562,7 +531,7 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
long mask)
{
struct ad7746_chip_info *chip = iio_priv(indio_dev);
- int ret, delay;
+ int ret, delay, idx;
u8 regval, reg;
mutex_lock(&indio_dev->mlock);
@@ -667,6 +636,24 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
}
break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ switch (chan->type) {
+ case IIO_CAPACITANCE:
+ idx = (chip->config & AD7746_CONF_CAPFS_MASK) >>
+ AD7746_CONF_CAPFS_SHIFT;
+ *val = ad7746_cap_filter_rate_table[idx][0];
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_VOLTAGE:
+ idx = (chip->config & AD7746_CONF_VTFS_MASK) >>
+ AD7746_CONF_VTFS_SHIFT;
+ *val = ad7746_vt_filter_rate_table[idx][0];
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
default:
ret = -EINVAL;
}
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index 358400b22d33..a5b2f068168d 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -204,7 +204,6 @@ static int ad9832_probe(struct spi_device *spi)
struct ad9832_platform_data *pdata = dev_get_platdata(&spi->dev);
struct iio_dev *indio_dev;
struct ad9832_state *st;
- struct regulator *reg;
int ret;
if (!pdata) {
@@ -212,21 +211,35 @@ static int ad9832_probe(struct spi_device *spi)
return -ENODEV;
}
- reg = devm_regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(reg)) {
- ret = regulator_enable(reg);
- if (ret)
- return ret;
- }
-
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev) {
- ret = -ENOMEM;
- goto error_disable_reg;
- }
+ if (!indio_dev)
+ return -ENOMEM;
+
spi_set_drvdata(spi, indio_dev);
st = iio_priv(indio_dev);
- st->reg = reg;
+
+ st->avdd = devm_regulator_get(&spi->dev, "avdd");
+ if (IS_ERR(st->avdd))
+ return PTR_ERR(st->avdd);
+
+ ret = regulator_enable(st->avdd);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable specified AVDD supply\n");
+ return ret;
+ }
+
+ st->dvdd = devm_regulator_get(&spi->dev, "dvdd");
+ if (IS_ERR(st->dvdd)) {
+ ret = PTR_ERR(st->dvdd);
+ goto error_disable_avdd;
+ }
+
+ ret = regulator_enable(st->dvdd);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable specified DVDD supply\n");
+ goto error_disable_avdd;
+ }
+
st->mclk = pdata->mclk;
st->spi = spi;
@@ -277,42 +290,43 @@ static int ad9832_probe(struct spi_device *spi)
ret = spi_sync(st->spi, &st->msg);
if (ret) {
dev_err(&spi->dev, "device init failed\n");
- goto error_disable_reg;
+ goto error_disable_dvdd;
}
ret = ad9832_write_frequency(st, AD9832_FREQ0HM, pdata->freq0);
if (ret)
- goto error_disable_reg;
+ goto error_disable_dvdd;
ret = ad9832_write_frequency(st, AD9832_FREQ1HM, pdata->freq1);
if (ret)
- goto error_disable_reg;
+ goto error_disable_dvdd;
ret = ad9832_write_phase(st, AD9832_PHASE0H, pdata->phase0);
if (ret)
- goto error_disable_reg;
+ goto error_disable_dvdd;
ret = ad9832_write_phase(st, AD9832_PHASE1H, pdata->phase1);
if (ret)
- goto error_disable_reg;
+ goto error_disable_dvdd;
ret = ad9832_write_phase(st, AD9832_PHASE2H, pdata->phase2);
if (ret)
- goto error_disable_reg;
+ goto error_disable_dvdd;
ret = ad9832_write_phase(st, AD9832_PHASE3H, pdata->phase3);
if (ret)
- goto error_disable_reg;
+ goto error_disable_dvdd;
ret = iio_device_register(indio_dev);
if (ret)
- goto error_disable_reg;
+ goto error_disable_dvdd;
return 0;
-error_disable_reg:
- if (!IS_ERR(reg))
- regulator_disable(reg);
+error_disable_dvdd:
+ regulator_disable(st->dvdd);
+error_disable_avdd:
+ regulator_disable(st->avdd);
return ret;
}
@@ -323,8 +337,8 @@ static int ad9832_remove(struct spi_device *spi)
struct ad9832_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ regulator_disable(st->dvdd);
+ regulator_disable(st->avdd);
return 0;
}
diff --git a/drivers/staging/iio/frequency/ad9832.h b/drivers/staging/iio/frequency/ad9832.h
index d32323b46be6..1b08b04482a4 100644
--- a/drivers/staging/iio/frequency/ad9832.h
+++ b/drivers/staging/iio/frequency/ad9832.h
@@ -58,7 +58,8 @@
/**
* struct ad9832_state - driver instance specific data
* @spi: spi_device
- * @reg: supply regulator
+ * @avdd: supply regulator for the analog section
+ * @dvdd: supply regulator for the digital section
* @mclk: external master clock
* @ctrl_fp: cached frequency/phase control word
* @ctrl_ss: cached sync/selsrc control word
@@ -76,7 +77,8 @@
struct ad9832_state {
struct spi_device *spi;
- struct regulator *reg;
+ struct regulator *avdd;
+ struct regulator *dvdd;
unsigned long mclk;
unsigned short ctrl_fp;
unsigned short ctrl_ss;
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 6366216e4f37..19216af1dfc9 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -329,11 +329,14 @@ static int ad9834_probe(struct spi_device *spi)
return -ENODEV;
}
- reg = devm_regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(reg)) {
- ret = regulator_enable(reg);
- if (ret)
- return ret;
+ reg = devm_regulator_get(&spi->dev, "avdd");
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ ret = regulator_enable(reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable specified AVDD supply\n");
+ return ret;
}
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
@@ -416,8 +419,7 @@ static int ad9834_probe(struct spi_device *spi)
return 0;
error_disable_reg:
- if (!IS_ERR(reg))
- regulator_disable(reg);
+ regulator_disable(reg);
return ret;
}
@@ -428,8 +430,7 @@ static int ad9834_remove(struct spi_device *spi)
struct ad9834_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ regulator_disable(st->reg);
return 0;
}
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 3892a7470410..944789843938 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -726,13 +726,16 @@ static int ad5933_probe(struct i2c_client *client,
if (!pdata)
pdata = &ad5933_default_pdata;
- st->reg = devm_regulator_get(&client->dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- return ret;
- voltage_uv = regulator_get_voltage(st->reg);
+ st->reg = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+
+ ret = regulator_enable(st->reg);
+ if (ret) {
+ dev_err(&client->dev, "Failed to enable specified VDD supply\n");
+ return ret;
}
+ voltage_uv = regulator_get_voltage(st->reg);
if (voltage_uv)
st->vref_mv = voltage_uv / 1000;
@@ -775,8 +778,7 @@ static int ad5933_probe(struct i2c_client *client,
error_unreg_ring:
iio_kfifo_free(indio_dev->buffer);
error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ regulator_disable(st->reg);
return ret;
}
@@ -788,8 +790,7 @@ static int ad5933_remove(struct i2c_client *client)
iio_device_unregister(indio_dev);
iio_kfifo_free(indio_dev->buffer);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ regulator_disable(st->reg);
return 0;
}
diff --git a/drivers/staging/iio/light/Kconfig b/drivers/staging/iio/light/Kconfig
index ca8d6e66c899..4fbf6298c0f3 100644
--- a/drivers/staging/iio/light/Kconfig
+++ b/drivers/staging/iio/light/Kconfig
@@ -3,18 +3,6 @@
#
menu "Light sensors"
-config SENSORS_ISL29018
- tristate "ISL 29018 light and proximity sensor"
- depends on I2C
- select REGMAP_I2C
- default n
- help
- If you say yes here you get support for ambient light sensing and
- proximity infrared sensing from Intersil ISL29018.
- This driver will provide the measurements of ambient light intensity
- in lux, proximity infrared sensing and normal infrared sensing.
- Data from sensor is accessible via sysfs.
-
config SENSORS_ISL29028
tristate "Intersil ISL29028 Concurrent Light and Proximity Sensor"
depends on I2C
@@ -25,13 +13,6 @@ config SENSORS_ISL29028
Proximity value via iio. The ISL29028 provides the concurrent sensing
of ambient light and proximity.
-config TSL2583
- tristate "TAOS TSL2580, TSL2581 and TSL2583 light-to-digital converters"
- depends on I2C
- help
- Provides support for the TAOS tsl2580, tsl2581 and tsl2583 devices.
- Access ALS data via iio, sysfs.
-
config TSL2x7x
tristate "TAOS TSL/TMD2x71 and TSL/TMD2x72 Family of light and proximity sensors"
depends on I2C
diff --git a/drivers/staging/iio/light/Makefile b/drivers/staging/iio/light/Makefile
index 9960fdf7c15b..f8693e9fdc94 100644
--- a/drivers/staging/iio/light/Makefile
+++ b/drivers/staging/iio/light/Makefile
@@ -2,7 +2,5 @@
# Makefile for industrial I/O Light sensors
#
-obj-$(CONFIG_SENSORS_ISL29018) += isl29018.o
obj-$(CONFIG_SENSORS_ISL29028) += isl29028.o
-obj-$(CONFIG_TSL2583) += tsl2583.o
obj-$(CONFIG_TSL2x7x) += tsl2x7x_core.o
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
deleted file mode 100644
index 08f1583ee34e..000000000000
--- a/drivers/staging/iio/light/tsl2583.c
+++ /dev/null
@@ -1,963 +0,0 @@
-/*
- * Device driver for monitoring ambient light intensity (lux)
- * within the TAOS tsl258x family of devices (tsl2580, tsl2581).
- *
- * Copyright (c) 2011, TAOS Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/i2c.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/mutex.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/iio/iio.h>
-
-#define TSL258X_MAX_DEVICE_REGS 32
-
-/* Triton register offsets */
-#define TSL258X_REG_MAX 8
-
-/* Device Registers and Masks */
-#define TSL258X_CNTRL 0x00
-#define TSL258X_ALS_TIME 0X01
-#define TSL258X_INTERRUPT 0x02
-#define TSL258X_GAIN 0x07
-#define TSL258X_REVID 0x11
-#define TSL258X_CHIPID 0x12
-#define TSL258X_ALS_CHAN0LO 0x14
-#define TSL258X_ALS_CHAN0HI 0x15
-#define TSL258X_ALS_CHAN1LO 0x16
-#define TSL258X_ALS_CHAN1HI 0x17
-#define TSL258X_TMR_LO 0x18
-#define TSL258X_TMR_HI 0x19
-
-/* tsl2583 cmd reg masks */
-#define TSL258X_CMD_REG 0x80
-#define TSL258X_CMD_SPL_FN 0x60
-#define TSL258X_CMD_ALS_INT_CLR 0X01
-
-/* tsl2583 cntrl reg masks */
-#define TSL258X_CNTL_ADC_ENBL 0x02
-#define TSL258X_CNTL_PWR_ON 0x01
-
-/* tsl2583 status reg masks */
-#define TSL258X_STA_ADC_VALID 0x01
-#define TSL258X_STA_ADC_INTR 0x10
-
-/* Lux calculation constants */
-#define TSL258X_LUX_CALC_OVER_FLOW 65535
-
-enum {
- TSL258X_CHIP_UNKNOWN = 0,
- TSL258X_CHIP_WORKING = 1,
- TSL258X_CHIP_SUSPENDED = 2
-};
-
-/* Per-device data */
-struct taos_als_info {
- u16 als_ch0;
- u16 als_ch1;
- u16 lux;
-};
-
-struct taos_settings {
- int als_time;
- int als_gain;
- int als_gain_trim;
- int als_cal_target;
-};
-
-struct tsl2583_chip {
- struct mutex als_mutex;
- struct i2c_client *client;
- struct taos_als_info als_cur_info;
- struct taos_settings taos_settings;
- int als_time_scale;
- int als_saturation;
- int taos_chip_status;
- u8 taos_config[8];
-};
-
-/*
- * Initial values for device - this values can/will be changed by driver.
- * and applications as needed.
- * These values are dynamic.
- */
-static const u8 taos_config[8] = {
- 0x00, 0xee, 0x00, 0x03, 0x00, 0xFF, 0xFF, 0x00
-}; /* cntrl atime intC Athl0 Athl1 Athh0 Athh1 gain */
-
-struct taos_lux {
- unsigned int ratio;
- unsigned int ch0;
- unsigned int ch1;
-};
-
-/* This structure is intentionally large to accommodate updates via sysfs. */
-/* Sized to 11 = max 10 segments + 1 termination segment */
-/* Assumption is one and only one type of glass used */
-static struct taos_lux taos_device_lux[11] = {
- { 9830, 8520, 15729 },
- { 12452, 10807, 23344 },
- { 14746, 6383, 11705 },
- { 17695, 4063, 6554 },
-};
-
-struct gainadj {
- s16 ch0;
- s16 ch1;
-};
-
-/* Index = (0 - 3) Used to validate the gain selection index */
-static const struct gainadj gainadj[] = {
- { 1, 1 },
- { 8, 8 },
- { 16, 16 },
- { 107, 115 }
-};
-
-/*
- * Provides initial operational parameter defaults.
- * These defaults may be changed through the device's sysfs files.
- */
-static void taos_defaults(struct tsl2583_chip *chip)
-{
- /* Operational parameters */
- chip->taos_settings.als_time = 100;
- /* must be a multiple of 50mS */
- chip->taos_settings.als_gain = 0;
- /* this is actually an index into the gain table */
- /* assume clear glass as default */
- chip->taos_settings.als_gain_trim = 1000;
- /* default gain trim to account for aperture effects */
- chip->taos_settings.als_cal_target = 130;
- /* Known external ALS reading used for calibration */
-}
-
-/*
- * Read a number of bytes starting at register (reg) location.
- * Return 0, or i2c_smbus_write_byte ERROR code.
- */
-static int
-taos_i2c_read(struct i2c_client *client, u8 reg, u8 *val, unsigned int len)
-{
- int i, ret;
-
- for (i = 0; i < len; i++) {
- /* select register to write */
- ret = i2c_smbus_write_byte(client, (TSL258X_CMD_REG | reg));
- if (ret < 0) {
- dev_err(&client->dev,
- "taos_i2c_read failed to write register %x\n",
- reg);
- return ret;
- }
- /* read the data */
- *val = i2c_smbus_read_byte(client);
- val++;
- reg++;
- }
- return 0;
-}
-
-/*
- * Reads and calculates current lux value.
- * The raw ch0 and ch1 values of the ambient light sensed in the last
- * integration cycle are read from the device.
- * Time scale factor array values are adjusted based on the integration time.
- * The raw values are multiplied by a scale factor, and device gain is obtained
- * using gain index. Limit checks are done next, then the ratio of a multiple
- * of ch1 value, to the ch0 value, is calculated. The array taos_device_lux[]
- * declared above is then scanned to find the first ratio value that is just
- * above the ratio we just calculated. The ch0 and ch1 multiplier constants in
- * the array are then used along with the time scale factor array values, to
- * calculate the lux.
- */
-static int taos_get_lux(struct iio_dev *indio_dev)
-{
- u16 ch0, ch1; /* separated ch0/ch1 data from device */
- u32 lux; /* raw lux calculated from device data */
- u64 lux64;
- u32 ratio;
- u8 buf[5];
- struct taos_lux *p;
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int i, ret;
- u32 ch0lux = 0;
- u32 ch1lux = 0;
-
- if (mutex_trylock(&chip->als_mutex) == 0) {
- dev_info(&chip->client->dev, "taos_get_lux device is busy\n");
- return chip->als_cur_info.lux; /* busy, so return LAST VALUE */
- }
-
- if (chip->taos_chip_status != TSL258X_CHIP_WORKING) {
- /* device is not enabled */
- dev_err(&chip->client->dev, "taos_get_lux device is not enabled\n");
- ret = -EBUSY;
- goto out_unlock;
- }
-
- ret = taos_i2c_read(chip->client, (TSL258X_CMD_REG), &buf[0], 1);
- if (ret < 0) {
- dev_err(&chip->client->dev, "taos_get_lux failed to read CMD_REG\n");
- goto out_unlock;
- }
- /* is data new & valid */
- if (!(buf[0] & TSL258X_STA_ADC_INTR)) {
- dev_err(&chip->client->dev, "taos_get_lux data not valid\n");
- ret = chip->als_cur_info.lux; /* return LAST VALUE */
- goto out_unlock;
- }
-
- for (i = 0; i < 4; i++) {
- int reg = TSL258X_CMD_REG | (TSL258X_ALS_CHAN0LO + i);
-
- ret = taos_i2c_read(chip->client, reg, &buf[i], 1);
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "taos_get_lux failed to read register %x\n",
- reg);
- goto out_unlock;
- }
- }
-
- /*
- * clear status, really interrupt status (interrupts are off), but
- * we use the bit anyway - don't forget 0x80 - this is a command
- */
- ret = i2c_smbus_write_byte(chip->client,
- (TSL258X_CMD_REG | TSL258X_CMD_SPL_FN |
- TSL258X_CMD_ALS_INT_CLR));
-
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "taos_i2c_write_command failed in taos_get_lux, err = %d\n",
- ret);
- goto out_unlock; /* have no data, so return failure */
- }
-
- /* extract ALS/lux data */
- ch0 = le16_to_cpup((const __le16 *)&buf[0]);
- ch1 = le16_to_cpup((const __le16 *)&buf[2]);
-
- chip->als_cur_info.als_ch0 = ch0;
- chip->als_cur_info.als_ch1 = ch1;
-
- if ((ch0 >= chip->als_saturation) || (ch1 >= chip->als_saturation))
- goto return_max;
-
- if (!ch0) {
- /* have no data, so return LAST VALUE */
- ret = 0;
- chip->als_cur_info.lux = 0;
- goto out_unlock;
- }
- /* calculate ratio */
- ratio = (ch1 << 15) / ch0;
- /* convert to unscaled lux using the pointer to the table */
- for (p = (struct taos_lux *)taos_device_lux;
- p->ratio != 0 && p->ratio < ratio; p++)
- ;
-
- if (p->ratio == 0) {
- lux = 0;
- } else {
- ch0lux = ((ch0 * p->ch0) +
- (gainadj[chip->taos_settings.als_gain].ch0 >> 1))
- / gainadj[chip->taos_settings.als_gain].ch0;
- ch1lux = ((ch1 * p->ch1) +
- (gainadj[chip->taos_settings.als_gain].ch1 >> 1))
- / gainadj[chip->taos_settings.als_gain].ch1;
- lux = ch0lux - ch1lux;
- }
-
- /* note: lux is 31 bit max at this point */
- if (ch1lux > ch0lux) {
- dev_dbg(&chip->client->dev, "No Data - Return last value\n");
- ret = 0;
- chip->als_cur_info.lux = 0;
- goto out_unlock;
- }
-
- /* adjust for active time scale */
- if (chip->als_time_scale == 0)
- lux = 0;
- else
- lux = (lux + (chip->als_time_scale >> 1)) /
- chip->als_time_scale;
-
- /* Adjust for active gain scale.
- * The taos_device_lux tables above have a factor of 8192 built in,
- * so we need to shift right.
- * User-specified gain provides a multiplier.
- * Apply user-specified gain before shifting right to retain precision.
- * Use 64 bits to avoid overflow on multiplication.
- * Then go back to 32 bits before division to avoid using div_u64().
- */
- lux64 = lux;
- lux64 = lux64 * chip->taos_settings.als_gain_trim;
- lux64 >>= 13;
- lux = lux64;
- lux = (lux + 500) / 1000;
- if (lux > TSL258X_LUX_CALC_OVER_FLOW) { /* check for overflow */
-return_max:
- lux = TSL258X_LUX_CALC_OVER_FLOW;
- }
-
- /* Update the structure with the latest VALID lux. */
- chip->als_cur_info.lux = lux;
- ret = lux;
-
-out_unlock:
- mutex_unlock(&chip->als_mutex);
- return ret;
-}
-
-/*
- * Obtain single reading and calculate the als_gain_trim (later used
- * to derive actual lux).
- * Return updated gain_trim value.
- */
-static int taos_als_calibrate(struct iio_dev *indio_dev)
-{
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- u8 reg_val;
- unsigned int gain_trim_val;
- int ret;
- int lux_val;
-
- ret = i2c_smbus_write_byte(chip->client,
- (TSL258X_CMD_REG | TSL258X_CNTRL));
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "taos_als_calibrate failed to reach the CNTRL register, ret=%d\n",
- ret);
- return ret;
- }
-
- reg_val = i2c_smbus_read_byte(chip->client);
- if ((reg_val & (TSL258X_CNTL_ADC_ENBL | TSL258X_CNTL_PWR_ON))
- != (TSL258X_CNTL_ADC_ENBL | TSL258X_CNTL_PWR_ON)) {
- dev_err(&chip->client->dev,
- "taos_als_calibrate failed: device not powered on with ADC enabled\n");
- return -1;
- }
-
- ret = i2c_smbus_write_byte(chip->client,
- (TSL258X_CMD_REG | TSL258X_CNTRL));
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "taos_als_calibrate failed to reach the STATUS register, ret=%d\n",
- ret);
- return ret;
- }
- reg_val = i2c_smbus_read_byte(chip->client);
-
- if ((reg_val & TSL258X_STA_ADC_VALID) != TSL258X_STA_ADC_VALID) {
- dev_err(&chip->client->dev,
- "taos_als_calibrate failed: STATUS - ADC not valid.\n");
- return -ENODATA;
- }
- lux_val = taos_get_lux(indio_dev);
- if (lux_val < 0) {
- dev_err(&chip->client->dev, "taos_als_calibrate failed to get lux\n");
- return lux_val;
- }
- gain_trim_val = (unsigned int)(((chip->taos_settings.als_cal_target)
- * chip->taos_settings.als_gain_trim) / lux_val);
-
- if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
- dev_err(&chip->client->dev,
- "taos_als_calibrate failed: trim_val of %d is out of range\n",
- gain_trim_val);
- return -ENODATA;
- }
- chip->taos_settings.als_gain_trim = (int)gain_trim_val;
-
- return (int)gain_trim_val;
-}
-
-/*
- * Turn the device on.
- * Configuration must be set before calling this function.
- */
-static int taos_chip_on(struct iio_dev *indio_dev)
-{
- int i;
- int ret;
- u8 *uP;
- u8 utmp;
- int als_count;
- int als_time;
- struct tsl2583_chip *chip = iio_priv(indio_dev);
-
- /* and make sure we're not already on */
- if (chip->taos_chip_status == TSL258X_CHIP_WORKING) {
- /* if forcing a register update - turn off, then on */
- dev_info(&chip->client->dev, "device is already enabled\n");
- return -EINVAL;
- }
-
- /* determine als integration register */
- als_count = (chip->taos_settings.als_time * 100 + 135) / 270;
- if (!als_count)
- als_count = 1; /* ensure at least one cycle */
-
- /* convert back to time (encompasses overrides) */
- als_time = (als_count * 27 + 5) / 10;
- chip->taos_config[TSL258X_ALS_TIME] = 256 - als_count;
-
- /* Set the gain based on taos_settings struct */
- chip->taos_config[TSL258X_GAIN] = chip->taos_settings.als_gain;
-
- /* set chip struct re scaling and saturation */
- chip->als_saturation = als_count * 922; /* 90% of full scale */
- chip->als_time_scale = (als_time + 25) / 50;
-
- /*
- * TSL258x Specific power-on / adc enable sequence
- * Power on the device 1st.
- */
- utmp = TSL258X_CNTL_PWR_ON;
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL258X_CMD_REG | TSL258X_CNTRL, utmp);
- if (ret < 0) {
- dev_err(&chip->client->dev, "taos_chip_on failed on CNTRL reg.\n");
- return ret;
- }
-
- /*
- * Use the following shadow copy for our delay before enabling ADC.
- * Write all the registers.
- */
- for (i = 0, uP = chip->taos_config; i < TSL258X_REG_MAX; i++) {
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL258X_CMD_REG + i,
- *uP++);
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "taos_chip_on failed on reg %d.\n", i);
- return ret;
- }
- }
-
- usleep_range(3000, 3500);
- /*
- * NOW enable the ADC
- * initialize the desired mode of operation
- */
- utmp = TSL258X_CNTL_PWR_ON | TSL258X_CNTL_ADC_ENBL;
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL258X_CMD_REG | TSL258X_CNTRL,
- utmp);
- if (ret < 0) {
- dev_err(&chip->client->dev, "taos_chip_on failed on 2nd CTRL reg.\n");
- return ret;
- }
- chip->taos_chip_status = TSL258X_CHIP_WORKING;
-
- return ret;
-}
-
-static int taos_chip_off(struct iio_dev *indio_dev)
-{
- struct tsl2583_chip *chip = iio_priv(indio_dev);
-
- /* turn device off */
- chip->taos_chip_status = TSL258X_CHIP_SUSPENDED;
- return i2c_smbus_write_byte_data(chip->client,
- TSL258X_CMD_REG | TSL258X_CNTRL,
- 0x00);
-}
-
-/* Sysfs Interface Functions */
-
-static ssize_t taos_power_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", chip->taos_chip_status);
-}
-
-static ssize_t taos_power_state_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- int value;
-
- if (kstrtoint(buf, 0, &value))
- return -EINVAL;
-
- if (!value)
- taos_chip_off(indio_dev);
- else
- taos_chip_on(indio_dev);
-
- return len;
-}
-
-static ssize_t taos_gain_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- char gain[4] = {0};
-
- switch (chip->taos_settings.als_gain) {
- case 0:
- strcpy(gain, "001");
- break;
- case 1:
- strcpy(gain, "008");
- break;
- case 2:
- strcpy(gain, "016");
- break;
- case 3:
- strcpy(gain, "111");
- break;
- }
-
- return sprintf(buf, "%s\n", gain);
-}
-
-static ssize_t taos_gain_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int value;
-
- if (kstrtoint(buf, 0, &value))
- return -EINVAL;
-
- switch (value) {
- case 1:
- chip->taos_settings.als_gain = 0;
- break;
- case 8:
- chip->taos_settings.als_gain = 1;
- break;
- case 16:
- chip->taos_settings.als_gain = 2;
- break;
- case 111:
- chip->taos_settings.als_gain = 3;
- break;
- default:
- dev_err(dev, "Invalid Gain Index (must be 1,8,16,111)\n");
- return -1;
- }
-
- return len;
-}
-
-static ssize_t taos_gain_available_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%s\n", "1 8 16 111");
-}
-
-static ssize_t taos_als_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", chip->taos_settings.als_time);
-}
-
-static ssize_t taos_als_time_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int value;
-
- if (kstrtoint(buf, 0, &value))
- return -EINVAL;
-
- if ((value < 50) || (value > 650))
- return -EINVAL;
-
- if (value % 50)
- return -EINVAL;
-
- chip->taos_settings.als_time = value;
-
- return len;
-}
-
-static ssize_t taos_als_time_available_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%s\n",
- "50 100 150 200 250 300 350 400 450 500 550 600 650");
-}
-
-static ssize_t taos_als_trim_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", chip->taos_settings.als_gain_trim);
-}
-
-static ssize_t taos_als_trim_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int value;
-
- if (kstrtoint(buf, 0, &value))
- return -EINVAL;
-
- if (value)
- chip->taos_settings.als_gain_trim = value;
-
- return len;
-}
-
-static ssize_t taos_als_cal_target_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", chip->taos_settings.als_cal_target);
-}
-
-static ssize_t taos_als_cal_target_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int value;
-
- if (kstrtoint(buf, 0, &value))
- return -EINVAL;
-
- if (value)
- chip->taos_settings.als_cal_target = value;
-
- return len;
-}
-
-static ssize_t taos_lux_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- int ret;
-
- ret = taos_get_lux(dev_to_iio_dev(dev));
- if (ret < 0)
- return ret;
-
- return sprintf(buf, "%d\n", ret);
-}
-
-static ssize_t taos_do_calibrate(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- int value;
-
- if (kstrtoint(buf, 0, &value))
- return -EINVAL;
-
- if (value == 1)
- taos_als_calibrate(indio_dev);
-
- return len;
-}
-
-static ssize_t taos_luxtable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int i;
- int offset = 0;
-
- for (i = 0; i < ARRAY_SIZE(taos_device_lux); i++) {
- offset += sprintf(buf + offset, "%u,%u,%u,",
- taos_device_lux[i].ratio,
- taos_device_lux[i].ch0,
- taos_device_lux[i].ch1);
- if (taos_device_lux[i].ratio == 0) {
- /*
- * We just printed the first "0" entry.
- * Now get rid of the extra "," and break.
- */
- offset--;
- break;
- }
- }
-
- offset += sprintf(buf + offset, "\n");
- return offset;
-}
-
-static ssize_t taos_luxtable_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int value[ARRAY_SIZE(taos_device_lux) * 3 + 1];
- int n;
-
- get_options(buf, ARRAY_SIZE(value), value);
-
- /* We now have an array of ints starting at value[1], and
- * enumerated by value[0].
- * We expect each group of three ints is one table entry,
- * and the last table entry is all 0.
- */
- n = value[0];
- if ((n % 3) || n < 6 || n > ((ARRAY_SIZE(taos_device_lux) - 1) * 3)) {
- dev_info(dev, "LUX TABLE INPUT ERROR 1 Value[0]=%d\n", n);
- return -EINVAL;
- }
- if ((value[(n - 2)] | value[(n - 1)] | value[n]) != 0) {
- dev_info(dev, "LUX TABLE INPUT ERROR 2 Value[0]=%d\n", n);
- return -EINVAL;
- }
-
- if (chip->taos_chip_status == TSL258X_CHIP_WORKING)
- taos_chip_off(indio_dev);
-
- /* Zero out the table */
- memset(taos_device_lux, 0, sizeof(taos_device_lux));
- memcpy(taos_device_lux, &value[1], (value[0] * 4));
-
- taos_chip_on(indio_dev);
-
- return len;
-}
-
-static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
- taos_power_state_show, taos_power_state_store);
-
-static DEVICE_ATTR(illuminance0_calibscale, S_IRUGO | S_IWUSR,
- taos_gain_show, taos_gain_store);
-static DEVICE_ATTR(illuminance0_calibscale_available, S_IRUGO,
- taos_gain_available_show, NULL);
-
-static DEVICE_ATTR(illuminance0_integration_time, S_IRUGO | S_IWUSR,
- taos_als_time_show, taos_als_time_store);
-static DEVICE_ATTR(illuminance0_integration_time_available, S_IRUGO,
- taos_als_time_available_show, NULL);
-
-static DEVICE_ATTR(illuminance0_calibbias, S_IRUGO | S_IWUSR,
- taos_als_trim_show, taos_als_trim_store);
-
-static DEVICE_ATTR(illuminance0_input_target, S_IRUGO | S_IWUSR,
- taos_als_cal_target_show, taos_als_cal_target_store);
-
-static DEVICE_ATTR(illuminance0_input, S_IRUGO, taos_lux_show, NULL);
-static DEVICE_ATTR(illuminance0_calibrate, S_IWUSR, NULL, taos_do_calibrate);
-static DEVICE_ATTR(illuminance0_lux_table, S_IRUGO | S_IWUSR,
- taos_luxtable_show, taos_luxtable_store);
-
-static struct attribute *sysfs_attrs_ctrl[] = {
- &dev_attr_power_state.attr,
- &dev_attr_illuminance0_calibscale.attr, /* Gain */
- &dev_attr_illuminance0_calibscale_available.attr,
- &dev_attr_illuminance0_integration_time.attr, /* I time*/
- &dev_attr_illuminance0_integration_time_available.attr,
- &dev_attr_illuminance0_calibbias.attr, /* trim */
- &dev_attr_illuminance0_input_target.attr,
- &dev_attr_illuminance0_input.attr,
- &dev_attr_illuminance0_calibrate.attr,
- &dev_attr_illuminance0_lux_table.attr,
- NULL
-};
-
-static const struct attribute_group tsl2583_attribute_group = {
- .attrs = sysfs_attrs_ctrl,
-};
-
-/* Use the default register values to identify the Taos device */
-static int taos_tsl258x_device(unsigned char *bufp)
-{
- return ((bufp[TSL258X_CHIPID] & 0xf0) == 0x90);
-}
-
-static const struct iio_info tsl2583_info = {
- .attrs = &tsl2583_attribute_group,
- .driver_module = THIS_MODULE,
-};
-
-/*
- * Client probe function - When a valid device is found, the driver's device
- * data structure is updated, and initialization completes successfully.
- */
-static int taos_probe(struct i2c_client *clientp,
- const struct i2c_device_id *idp)
-{
- int i, ret;
- unsigned char buf[TSL258X_MAX_DEVICE_REGS];
- struct tsl2583_chip *chip;
- struct iio_dev *indio_dev;
-
- if (!i2c_check_functionality(clientp->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA)) {
- dev_err(&clientp->dev, "taos_probe() - i2c smbus byte data func unsupported\n");
- return -EOPNOTSUPP;
- }
-
- indio_dev = devm_iio_device_alloc(&clientp->dev, sizeof(*chip));
- if (!indio_dev)
- return -ENOMEM;
- chip = iio_priv(indio_dev);
- chip->client = clientp;
- i2c_set_clientdata(clientp, indio_dev);
-
- mutex_init(&chip->als_mutex);
- chip->taos_chip_status = TSL258X_CHIP_UNKNOWN;
- memcpy(chip->taos_config, taos_config, sizeof(chip->taos_config));
-
- for (i = 0; i < TSL258X_MAX_DEVICE_REGS; i++) {
- ret = i2c_smbus_write_byte(clientp,
- (TSL258X_CMD_REG | (TSL258X_CNTRL + i)));
- if (ret < 0) {
- dev_err(&clientp->dev,
- "i2c_smbus_write_byte to cmd reg failed in taos_probe(), err = %d\n",
- ret);
- return ret;
- }
- ret = i2c_smbus_read_byte(clientp);
- if (ret < 0) {
- dev_err(&clientp->dev,
- "i2c_smbus_read_byte from reg failed in taos_probe(), err = %d\n",
- ret);
- return ret;
- }
- buf[i] = ret;
- }
-
- if (!taos_tsl258x_device(buf)) {
- dev_info(&clientp->dev,
- "i2c device found but does not match expected id in taos_probe()\n");
- return -EINVAL;
- }
-
- ret = i2c_smbus_write_byte(clientp, (TSL258X_CMD_REG | TSL258X_CNTRL));
- if (ret < 0) {
- dev_err(&clientp->dev,
- "i2c_smbus_write_byte() to cmd reg failed in taos_probe(), err = %d\n",
- ret);
- return ret;
- }
-
- indio_dev->info = &tsl2583_info;
- indio_dev->dev.parent = &clientp->dev;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->name = chip->client->name;
- ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
- if (ret) {
- dev_err(&clientp->dev, "iio registration failed\n");
- return ret;
- }
-
- /* Load up the V2 defaults (these are hard coded defaults for now) */
- taos_defaults(chip);
-
- /* Make sure the chip is on */
- taos_chip_on(indio_dev);
-
- dev_info(&clientp->dev, "Light sensor found.\n");
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int taos_suspend(struct device *dev)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int ret = 0;
-
- mutex_lock(&chip->als_mutex);
-
- if (chip->taos_chip_status == TSL258X_CHIP_WORKING) {
- ret = taos_chip_off(indio_dev);
- chip->taos_chip_status = TSL258X_CHIP_SUSPENDED;
- }
-
- mutex_unlock(&chip->als_mutex);
- return ret;
-}
-
-static int taos_resume(struct device *dev)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int ret = 0;
-
- mutex_lock(&chip->als_mutex);
-
- if (chip->taos_chip_status == TSL258X_CHIP_SUSPENDED)
- ret = taos_chip_on(indio_dev);
-
- mutex_unlock(&chip->als_mutex);
- return ret;
-}
-
-static SIMPLE_DEV_PM_OPS(taos_pm_ops, taos_suspend, taos_resume);
-#define TAOS_PM_OPS (&taos_pm_ops)
-#else
-#define TAOS_PM_OPS NULL
-#endif
-
-static struct i2c_device_id taos_idtable[] = {
- { "tsl2580", 0 },
- { "tsl2581", 1 },
- { "tsl2583", 2 },
- {}
-};
-MODULE_DEVICE_TABLE(i2c, taos_idtable);
-
-/* Driver definition */
-static struct i2c_driver taos_driver = {
- .driver = {
- .name = "tsl2583",
- .pm = TAOS_PM_OPS,
- },
- .id_table = taos_idtable,
- .probe = taos_probe,
-};
-module_i2c_driver(taos_driver);
-
-MODULE_AUTHOR("J. August Brenner<jbrenner@taosinc.com>");
-MODULE_DESCRIPTION("TAOS tsl2583 ambient light sensor driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index ebb8a1993303..3af8f77b8e41 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -465,38 +465,26 @@ err_ret:
return ret;
}
-static ssize_t ade7758_read_frequency(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int ade7758_read_samp_freq(struct device *dev, int *val)
{
int ret;
u8 t;
- int sps;
ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &t);
if (ret)
return ret;
t = (t >> 5) & 0x3;
- sps = 26040 / (1 << t);
+ *val = 26040 / (1 << t);
- return sprintf(buf, "%d SPS\n", sps);
+ return 0;
}
-static ssize_t ade7758_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static int ade7758_write_samp_freq(struct device *dev, int val)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- u16 val;
int ret;
u8 reg, t;
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- return ret;
-
- mutex_lock(&indio_dev->mlock);
-
switch (val) {
case 26040:
t = 0;
@@ -525,9 +513,49 @@ static ssize_t ade7758_write_frequency(struct device *dev,
ret = ade7758_spi_write_reg_8(dev, ADE7758_WAVMODE, reg);
out:
- mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
- return ret ? ret : len;
+static int ade7758_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ mutex_lock(&indio_dev->mlock);
+ ret = ade7758_read_samp_freq(&indio_dev->dev, val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ade7758_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val2)
+ return -EINVAL;
+ mutex_lock(&indio_dev->mlock);
+ ret = ade7758_write_samp_freq(&indio_dev->dev, val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
}
static IIO_DEV_ATTR_TEMP_RAW(ade7758_read_8bit);
@@ -553,17 +581,12 @@ static IIO_DEV_ATTR_BVAHR(ade7758_read_16bit,
static IIO_DEV_ATTR_CVAHR(ade7758_read_16bit,
ADE7758_CVAHR);
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- ade7758_read_frequency,
- ade7758_write_frequency);
-
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26040 13020 6510 3255");
static struct attribute *ade7758_attributes[] = {
&iio_dev_attr_in_temp_raw.dev_attr.attr,
&iio_const_attr_in_temp_offset.dev_attr.attr,
&iio_const_attr_in_temp_scale.dev_attr.attr,
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_awatthr.dev_attr.attr,
&iio_dev_attr_bwatthr.dev_attr.attr,
@@ -611,6 +634,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 0,
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE),
.scan_index = 0,
.scan_type = {
@@ -622,6 +646,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.type = IIO_CURRENT,
.indexed = 1,
.channel = 0,
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT),
.scan_index = 1,
.scan_type = {
@@ -634,6 +659,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "apparent",
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR),
.scan_index = 2,
.scan_type = {
@@ -646,6 +672,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "active",
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR),
.scan_index = 3,
.scan_type = {
@@ -658,6 +685,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "reactive",
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR),
.scan_index = 4,
.scan_type = {
@@ -669,6 +697,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE),
.scan_index = 5,
.scan_type = {
@@ -680,6 +709,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.type = IIO_CURRENT,
.indexed = 1,
.channel = 1,
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT),
.scan_index = 6,
.scan_type = {
@@ -692,6 +722,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.indexed = 1,
.channel = 1,
.extend_name = "apparent",
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR),
.scan_index = 7,
.scan_type = {
@@ -704,6 +735,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.indexed = 1,
.channel = 1,
.extend_name = "active",
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR),
.scan_index = 8,
.scan_type = {
@@ -716,6 +748,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.indexed = 1,
.channel = 1,
.extend_name = "reactive",
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR),
.scan_index = 9,
.scan_type = {
@@ -727,6 +760,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 2,
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE),
.scan_index = 10,
.scan_type = {
@@ -738,6 +772,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.type = IIO_CURRENT,
.indexed = 1,
.channel = 2,
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT),
.scan_index = 11,
.scan_type = {
@@ -750,6 +785,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.indexed = 1,
.channel = 2,
.extend_name = "apparent",
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR),
.scan_index = 12,
.scan_type = {
@@ -762,6 +798,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.indexed = 1,
.channel = 2,
.extend_name = "active",
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR),
.scan_index = 13,
.scan_type = {
@@ -774,6 +811,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.indexed = 1,
.channel = 2,
.extend_name = "reactive",
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR),
.scan_index = 14,
.scan_type = {
@@ -787,6 +825,8 @@ static const struct iio_chan_spec ade7758_channels[] = {
static const struct iio_info ade7758_info = {
.attrs = &ade7758_attribute_group,
+ .read_raw = &ade7758_read_raw,
+ .write_raw = &ade7758_write_raw,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/ring_hw.h b/drivers/staging/iio/ring_hw.h
deleted file mode 100644
index 75bf47bfee78..000000000000
--- a/drivers/staging/iio/ring_hw.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * ring_hw.h - common functionality for iio hardware ring buffers
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
- *
- */
-
-#ifndef _RING_HW_H_
-#define _RING_HW_H_
-
-/**
- * struct iio_hw_ring_buffer- hardware ring buffer
- * @buf: generic ring buffer elements
- * @private: device specific data
- */
-struct iio_hw_buffer {
- struct iio_buffer buf;
- void *private;
-};
-
-#define iio_to_hw_buf(r) container_of(r, struct iio_hw_buffer, buf)
-
-#endif /* _RING_HW_H_ */
diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
index 81c46f4d0935..a604c83c957e 100644
--- a/drivers/staging/ks7010/ks7010_sdio.c
+++ b/drivers/staging/ks7010/ks7010_sdio.c
@@ -35,18 +35,18 @@ MODULE_DEVICE_TABLE(sdio, ks7010_sdio_ids);
/* macro */
#define inc_txqhead(priv) \
- (priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE)
+ (priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE)
#define inc_txqtail(priv) \
- (priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE)
+ (priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE)
#define cnt_txqbody(priv) \
- (((priv->tx_dev.qtail + TX_DEVICE_BUFF_SIZE) - (priv->tx_dev.qhead)) % TX_DEVICE_BUFF_SIZE)
+ (((priv->tx_dev.qtail + TX_DEVICE_BUFF_SIZE) - (priv->tx_dev.qhead)) % TX_DEVICE_BUFF_SIZE)
#define inc_rxqhead(priv) \
- (priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE)
+ (priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE)
#define inc_rxqtail(priv) \
- (priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE)
+ (priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE)
#define cnt_rxqbody(priv) \
- (((priv->rx_dev.qtail + RX_DEVICE_BUFF_SIZE) - (priv->rx_dev.qhead)) % RX_DEVICE_BUFF_SIZE)
+ (((priv->rx_dev.qtail + RX_DEVICE_BUFF_SIZE) - (priv->rx_dev.qhead)) % RX_DEVICE_BUFF_SIZE)
static int ks7010_sdio_read(struct ks_wlan_private *priv, unsigned int address,
unsigned char *buffer, int length)
@@ -76,10 +76,9 @@ static int ks7010_sdio_write(struct ks_wlan_private *priv, unsigned int address,
card = priv->ks_wlan_hw.sdio_card;
if (length == 1) /* CMD52 */
- sdio_writeb(card->func, *buffer, (unsigned int)address, &rc);
+ sdio_writeb(card->func, *buffer, address, &rc);
else /* CMD53 */
- rc = sdio_memcpy_toio(card->func, (unsigned int)address, buffer,
- length);
+ rc = sdio_memcpy_toio(card->func, address, buffer, length);
if (rc != 0)
DPRINTK(1, "sdio error=%d size=%d\n", rc, length);
@@ -255,7 +254,7 @@ int ks_wlan_hw_power_save(struct ks_wlan_private *priv)
static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p,
unsigned long size,
- void (*complete_handler) (void *arg1, void *arg2),
+ void (*complete_handler)(void *arg1, void *arg2),
void *arg1, void *arg2)
{
struct tx_device_buffer *sp;
@@ -294,6 +293,7 @@ static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer,
int retval;
unsigned char rw_data;
struct hostif_hdr *hdr;
+
hdr = (struct hostif_hdr *)buffer;
DPRINTK(4, "size=%d\n", hdr->size);
@@ -353,11 +353,12 @@ static void tx_device_task(void *dev)
}
int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
- void (*complete_handler) (void *arg1, void *arg2),
+ void (*complete_handler)(void *arg1, void *arg2),
void *arg1, void *arg2)
{
int result = 0;
struct hostif_hdr *hdr;
+
hdr = (struct hostif_hdr *)p;
if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) {
@@ -412,7 +413,7 @@ static void ks_wlan_hw_rx(void *dev, uint16_t size)
/* receive data */
if (cnt_rxqbody(priv) >= (RX_DEVICE_BUFF_SIZE - 1)) {
/* in case of buffer overflow */
- DPRINTK(1, "rx buffer overflow \n");
+ DPRINTK(1, "rx buffer overflow\n");
goto error_out;
}
rx_buffer = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qtail];
@@ -658,10 +659,12 @@ static void ks_sdio_interrupt(struct sdio_func *func)
static int trx_device_init(struct ks_wlan_private *priv)
{
/* initialize values (tx) */
- priv->tx_dev.qtail = priv->tx_dev.qhead = 0;
+ priv->tx_dev.qhead = 0;
+ priv->tx_dev.qtail = 0;
/* initialize values (rx) */
- priv->rx_dev.qtail = priv->rx_dev.qhead = 0;
+ priv->rx_dev.qhead = 0;
+ priv->rx_dev.qtail = 0;
/* initialize spinLock (tx,rx) */
spin_lock_init(&priv->tx_dev.tx_dev_lock);
@@ -718,7 +721,7 @@ static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
return rc;
}
-#define ROM_BUFF_SIZE (64*1024)
+#define ROM_BUFF_SIZE (64 * 1024)
static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address,
unsigned char *data, unsigned int size)
{
@@ -955,7 +958,7 @@ static int ks7010_sdio_probe(struct sdio_func *func,
priv = NULL;
netdev = NULL;
- /* initilize ks_sdio_card */
+ /* initialize ks_sdio_card */
card = kzalloc(sizeof(*card), GFP_KERNEL);
if (!card)
return -ENOMEM;
@@ -1117,6 +1120,7 @@ static void ks7010_sdio_remove(struct sdio_func *func)
int ret;
struct ks_sdio_card *card;
struct ks_wlan_private *priv;
+
DPRINTK(1, "ks7010_sdio_remove()\n");
card = sdio_get_drvdata(func);
@@ -1142,6 +1146,7 @@ static void ks7010_sdio_remove(struct sdio_func *func)
/* send stop request to MAC */
{
struct hostif_stop_request_t *pp;
+
pp = kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL);
if (!pp) {
DPRINTK(3, "allocate memory failed..\n");
diff --git a/drivers/staging/ks7010/ks7010_sdio.h b/drivers/staging/ks7010/ks7010_sdio.h
index c72064b48bd8..0f5fd848e23d 100644
--- a/drivers/staging/ks7010/ks7010_sdio.h
+++ b/drivers/staging/ks7010/ks7010_sdio.h
@@ -1,5 +1,5 @@
/*
- * Driver for KeyStream, KS7010 based SDIO cards.
+ * Driver for KeyStream, KS7010 based SDIO cards.
*
* Copyright (C) 2006-2008 KeyStream Corp.
* Copyright (C) 2009 Renesas Technology Corp.
@@ -41,7 +41,7 @@
/* Write Index Register */
#define WRITE_INDEX 0x000010
-/* Write Status/Read Data Size Register
+/* Write Status/Read Data Size Register
* for network packet (less than 2048 bytes data)
*/
#define WSTATUS_RSIZE 0x000014
@@ -53,14 +53,14 @@
/* ARM to SD interrupt Pending */
#define INT_PENDING 0x000024
-#define INT_GCR_B (1<<7)
-#define INT_GCR_A (1<<6)
-#define INT_WRITE_STATUS (1<<5)
-#define INT_WRITE_INDEX (1<<4)
-#define INT_WRITE_SIZE (1<<3)
-#define INT_READ_STATUS (1<<2)
-#define INT_READ_INDEX (1<<1)
-#define INT_READ_SIZE (1<<0)
+#define INT_GCR_B BIT(7)
+#define INT_GCR_A BIT(6)
+#define INT_WRITE_STATUS BIT(5)
+#define INT_WRITE_INDEX BIT(4)
+#define INT_WRITE_SIZE BIT(3)
+#define INT_READ_STATUS BIT(2)
+#define INT_READ_INDEX BIT(1)
+#define INT_READ_SIZE BIT(0)
/* General Communication Register A */
#define GCR_A 0x000028
@@ -100,7 +100,7 @@ struct hw_info_t {
struct ks_sdio_packet {
struct ks_sdio_packet *next;
u16 nb;
- u8 buffer[0] __attribute__ ((aligned(4)));
+ u8 buffer[0] __aligned(4);
};
struct ks_sdio_card {
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index c57ca581550a..1fbd495e5e63 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -23,11 +23,11 @@
/* macro */
#define inc_smeqhead(priv) \
- ( priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE )
+ (priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE)
#define inc_smeqtail(priv) \
- ( priv->sme_i.qtail = (priv->sme_i.qtail + 1) % SME_EVENT_BUFF_SIZE )
+ (priv->sme_i.qtail = (priv->sme_i.qtail + 1) % SME_EVENT_BUFF_SIZE)
#define cnt_smeqbody(priv) \
- (((priv->sme_i.qtail + SME_EVENT_BUFF_SIZE) - (priv->sme_i.qhead)) % SME_EVENT_BUFF_SIZE )
+ (((priv->sme_i.qtail + SME_EVENT_BUFF_SIZE) - (priv->sme_i.qhead)) % SME_EVENT_BUFF_SIZE)
#define KS_WLAN_MEM_FLAG (GFP_ATOMIC)
@@ -97,11 +97,10 @@ int ks_wlan_do_power_save(struct ks_wlan_private *priv)
{
DPRINTK(4, "psstatus.status=%d\n", atomic_read(&priv->psstatus.status));
- if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS)
hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
- } else {
+ else
priv->dev_state = DEVICE_STATE_READY;
- }
return 0;
}
@@ -187,13 +186,7 @@ int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info_t *ap_info)
memcpy(wrqu.ap_addr.sa_data,
&(priv->current_ap.bssid[0]), ETH_ALEN);
DPRINTK(3,
- "IWEVENT: connect bssid=%02x:%02x:%02x:%02x:%02x:%02x\n",
- (unsigned char)wrqu.ap_addr.sa_data[0],
- (unsigned char)wrqu.ap_addr.sa_data[1],
- (unsigned char)wrqu.ap_addr.sa_data[2],
- (unsigned char)wrqu.ap_addr.sa_data[3],
- (unsigned char)wrqu.ap_addr.sa_data[4],
- (unsigned char)wrqu.ap_addr.sa_data[5]);
+ "IWEVENT: connect bssid=%pM\n", wrqu.ap_addr.sa_data);
wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL);
}
DPRINTK(4, "\n Link AP\n");
@@ -420,16 +413,11 @@ void hostif_data_indication(struct ks_wlan_private *priv)
/* needed parameters: count, keyid, key type, TSC */
sprintf(buf,
"MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr="
- "%02x:%02x:%02x:%02x:%02x:%02x)",
+ "%pM)",
auth_type - 1,
eth_hdr->
h_dest[0] & 0x01 ? "broad" :
- "uni", eth_hdr->h_source[0],
- eth_hdr->h_source[1],
- eth_hdr->h_source[2],
- eth_hdr->h_source[3],
- eth_hdr->h_source[4],
- eth_hdr->h_source[5]);
+ "uni", eth_hdr->h_source);
memset(&wrqu, 0, sizeof(wrqu));
wrqu.data.length = strlen(buf);
DPRINTK(4,
@@ -476,8 +464,6 @@ void hostif_data_indication(struct ks_wlan_private *priv)
skb->dev->last_rx = jiffies;
netif_rx(skb);
} else {
- printk(KERN_WARNING
- "ks_wlan: Memory squeeze, dropping packet.\n");
priv->nstats.rx_dropped++;
}
break;
@@ -511,8 +497,6 @@ void hostif_data_indication(struct ks_wlan_private *priv)
skb->dev->last_rx = jiffies;
netif_rx(skb);
} else {
- printk(KERN_WARNING
- "ks_wlan: Memory squeeze, dropping packet.\n");
priv->nstats.rx_dropped++;
}
break;
@@ -560,10 +544,7 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
dev->dev_addr[5] = priv->eth_addr[5];
dev->dev_addr[6] = 0x00;
dev->dev_addr[7] = 0x00;
- printk(KERN_INFO
- "ks_wlan: MAC ADDRESS = %02x:%02x:%02x:%02x:%02x:%02x\n",
- priv->eth_addr[0], priv->eth_addr[1], priv->eth_addr[2],
- priv->eth_addr[3], priv->eth_addr[4], priv->eth_addr[5]);
+ netdev_info(dev, "MAC ADDRESS = %pM\n", priv->eth_addr);
break;
case DOT11_PRODUCT_VERSION:
/* firmware version */
@@ -571,8 +552,8 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
priv->version_size = priv->rx_size;
memcpy(priv->firmware_version, priv->rxp, priv->rx_size);
priv->firmware_version[priv->rx_size] = '\0';
- printk(KERN_INFO "ks_wlan: firmware ver. = %s\n",
- priv->firmware_version);
+ netdev_info(dev, "firmware ver. = %s\n",
+ priv->firmware_version);
hostif_sme_enqueue(priv, SME_GET_PRODUCT_VERSION);
/* wake_up_interruptible_all(&priv->confirm_wait); */
complete(&priv->confirm_wait);
@@ -592,12 +573,12 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
} else if (priv->eeprom_sum.type == 1) {
if (priv->eeprom_sum.result == 0) {
priv->eeprom_checksum = EEPROM_NG;
- printk("LOCAL_EEPROM_SUM NG\n");
+ netdev_info(dev, "LOCAL_EEPROM_SUM NG\n");
} else if (priv->eeprom_sum.result == 1) {
priv->eeprom_checksum = EEPROM_OK;
}
} else {
- printk("LOCAL_EEPROM_SUM error!\n");
+ netdev_err(dev, "LOCAL_EEPROM_SUM error!\n");
}
break;
default:
@@ -705,15 +686,13 @@ void hostif_mib_set_confirm(struct ks_wlan_private *priv)
break;
case DOT11_GMK1_TSC:
DPRINTK(2, "DOT11_GMK1_TSC:mib_status=%d\n", (int)mib_status);
- if (atomic_read(&priv->psstatus.snooze_guard)) {
+ if (atomic_read(&priv->psstatus.snooze_guard))
atomic_set(&priv->psstatus.snooze_guard, 0);
- }
break;
case DOT11_GMK2_TSC:
DPRINTK(2, "DOT11_GMK2_TSC:mib_status=%d\n", (int)mib_status);
- if (atomic_read(&priv->psstatus.snooze_guard)) {
+ if (atomic_read(&priv->psstatus.snooze_guard))
atomic_set(&priv->psstatus.snooze_guard, 0);
- }
break;
case LOCAL_PMK:
DPRINTK(2, "LOCAL_PMK:mib_status=%d\n", (int)mib_status);
@@ -766,8 +745,9 @@ void hostif_sleep_confirm(struct ks_wlan_private *priv)
static
void hostif_start_confirm(struct ks_wlan_private *priv)
{
-#ifdef WPS
+#ifdef WPS
union iwreq_data wrqu;
+
wrqu.data.length = 0;
wrqu.data.flags = 0;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
@@ -789,6 +769,7 @@ void hostif_connect_indication(struct ks_wlan_private *priv)
unsigned int old_status = priv->connect_status;
struct net_device *netdev = priv->net_dev;
union iwreq_data wrqu0;
+
connect_code = get_WORD(priv);
switch (connect_code) {
@@ -894,7 +875,7 @@ void hostif_stop_confirm(struct ks_wlan_private *priv)
netif_carrier_off(netdev);
tmp = FORCE_DISCONNECT & priv->connect_status;
priv->connect_status = tmp | DISCONNECT_STATUS;
- printk("IWEVENT: disconnect\n");
+ netdev_info(netdev, "IWEVENT: disconnect\n");
wrqu0.data.length = 0;
wrqu0.data.flags = 0;
@@ -904,7 +885,7 @@ void hostif_stop_confirm(struct ks_wlan_private *priv)
&& (old_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
eth_zero_addr(wrqu0.ap_addr.sa_data);
DPRINTK(3, "IWEVENT: disconnect\n");
- printk("IWEVENT: disconnect\n");
+ netdev_info(netdev, "IWEVENT: disconnect\n");
DPRINTK(3, "disconnect :: scan_ind_count=%d\n",
priv->scan_ind_count);
wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL);
@@ -928,6 +909,7 @@ static
void hostif_infrastructure_set_confirm(struct ks_wlan_private *priv)
{
uint16_t result_code;
+
DPRINTK(3, "\n");
result_code = get_WORD(priv);
DPRINTK(3, "result code = %d\n", result_code);
@@ -993,6 +975,7 @@ void hostif_bss_scan_confirm(struct ks_wlan_private *priv)
unsigned int result_code;
struct net_device *dev = priv->net_dev;
union iwreq_data wrqu;
+
result_code = get_DWORD(priv);
DPRINTK(2, "result=%d :: scan_ind_count=%d\n", result_code,
priv->scan_ind_count);
@@ -1110,7 +1093,7 @@ void hostif_event_check(struct ks_wlan_private *priv)
case HIF_AP_SET_CONF:
default:
//DPRINTK(1, "undefined event[%04X]\n", event);
- printk("undefined event[%04X]\n", event);
+ netdev_err(priv->net_dev, "undefined event[%04X]\n", event);
/* wake_up_all(&priv->confirm_wait); */
complete(&priv->confirm_wait);
break;
@@ -1184,9 +1167,7 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
eth = (struct ethhdr *)packet->data;
if (memcmp(&priv->eth_addr[0], eth->h_source, ETH_ALEN)) {
DPRINTK(1, "invalid mac address !!\n");
- DPRINTK(1, "ethernet->h_source=%02X:%02X:%02X:%02X:%02X:%02X\n",
- eth->h_source[0], eth->h_source[1], eth->h_source[2],
- eth->h_source[3], eth->h_source[4], eth->h_source[5]);
+ DPRINTK(1, "ethernet->h_source=%pM\n", eth->h_source);
dev_kfree_skb(packet);
kfree(pp);
return -3;
@@ -1244,7 +1225,7 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
pp->auth_type = cpu_to_le16((uint16_t) TYPE_AUTH); /* no encryption */
} else {
if (priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) {
- MichaelMICFunction(&michel_mic, (uint8_t *) priv->wpa.key[0].tx_mic_key, (uint8_t *) & pp->data[0], (int)packet_len, (uint8_t) 0, /* priority */
+ MichaelMICFunction(&michel_mic, (uint8_t *) priv->wpa.key[0].tx_mic_key, (uint8_t *) &pp->data[0], (int)packet_len, (uint8_t) 0, /* priority */
(uint8_t *) michel_mic.
Result);
memcpy(p, michel_mic.Result, 8);
@@ -1294,10 +1275,11 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
return result;
}
-#define ps_confirm_wait_inc(priv) do{if(atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET){ \
- atomic_inc(&priv->psstatus.confirm_wait); \
- /* atomic_set(&priv->psstatus.status, PS_CONF_WAIT);*/ \
- } }while(0)
+#define ps_confirm_wait_inc(priv) do { \
+ if (atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET) { \
+ atomic_inc(&priv->psstatus.confirm_wait); \
+ /* atomic_set(&priv->psstatus.status, PS_CONF_WAIT);*/ \
+ } } while (0)
static
void hostif_mib_get_request(struct ks_wlan_private *priv,
@@ -1891,6 +1873,7 @@ static
void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
{
uint32_t val;
+
switch (type) {
case SME_WEP_INDEX_REQUEST:
val = cpu_to_le32((uint32_t) (priv->reg.wep_index));
@@ -1936,18 +1919,17 @@ void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
break;
}
- return;
}
struct wpa_suite_t {
unsigned short size;
unsigned char suite[4][CIPHER_ID_LEN];
-} __attribute__ ((packed));
+} __packed;
struct rsn_mode_t {
uint32_t rsn_mode;
uint16_t rsn_capability;
-} __attribute__ ((packed));
+} __packed;
static
void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
@@ -2125,7 +2107,6 @@ void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
break;
}
- return;
}
static
@@ -2216,10 +2197,7 @@ void hostif_sme_mode_setup(struct ks_wlan_private *priv)
} else {
hostif_infrastructure_set2_request(priv);
DPRINTK(2,
- "Infra bssid = %02x:%02x:%02x:%02x:%02x:%02x\n",
- priv->reg.bssid[0], priv->reg.bssid[1],
- priv->reg.bssid[2], priv->reg.bssid[3],
- priv->reg.bssid[4], priv->reg.bssid[5]);
+ "Infra bssid = %pM\n", priv->reg.bssid);
}
break;
case MODE_ADHOC:
@@ -2229,17 +2207,13 @@ void hostif_sme_mode_setup(struct ks_wlan_private *priv)
} else {
hostif_adhoc_set2_request(priv);
DPRINTK(2,
- "Adhoc bssid = %02x:%02x:%02x:%02x:%02x:%02x\n",
- priv->reg.bssid[0], priv->reg.bssid[1],
- priv->reg.bssid[2], priv->reg.bssid[3],
- priv->reg.bssid[4], priv->reg.bssid[5]);
+ "Adhoc bssid = %pM\n", priv->reg.bssid);
}
break;
default:
break;
}
- return;
}
static
@@ -2340,7 +2314,6 @@ void hostif_sme_powermgt_set(struct ks_wlan_private *priv)
}
hostif_power_mngmt_request(priv, mode, wake_up, receiveDTIMs);
- return;
}
static
@@ -2358,13 +2331,13 @@ void hostif_sme_sleep_set(struct ks_wlan_private *priv)
break;
}
- return;
}
static
void hostif_sme_set_key(struct ks_wlan_private *priv, int type)
{
uint32_t val;
+
switch (type) {
case SME_SET_FLAG:
val = cpu_to_le32((uint32_t) (priv->reg.privacy_invoked));
@@ -2416,7 +2389,6 @@ void hostif_sme_set_key(struct ks_wlan_private *priv, int type)
&priv->wpa.key[2].rx_seq[0]);
break;
}
- return;
}
static
@@ -2427,16 +2399,14 @@ void hostif_sme_set_pmksa(struct ks_wlan_private *priv)
struct {
uint8_t bssid[ETH_ALEN];
uint8_t pmkid[IW_PMKID_LEN];
- } __attribute__ ((packed)) list[PMK_LIST_MAX];
- } __attribute__ ((packed)) pmkcache;
+ } __packed list[PMK_LIST_MAX];
+ } __packed pmkcache;
struct pmk_t *pmk;
- struct list_head *ptr;
int i;
DPRINTK(4, "pmklist.size=%d\n", priv->pmklist.size);
i = 0;
- list_for_each(ptr, &priv->pmklist.head) {
- pmk = list_entry(ptr, struct pmk_t, list);
+ list_for_each_entry(pmk, &priv->pmklist.head, list) {
if (i < PMK_LIST_MAX) {
memcpy(pmkcache.list[i].bssid, pmk->bssid, ETH_ALEN);
memcpy(pmkcache.list[i].pmkid, pmk->pmkid,
@@ -2461,9 +2431,8 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
DPRINTK(3, "event=%d\n", event);
switch (event) {
case SME_START:
- if (priv->dev_state == DEVICE_STATE_BOOT) {
+ if (priv->dev_state == DEVICE_STATE_BOOT)
hostif_mib_get_request(priv, DOT11_MAC_ADDRESS);
- }
break;
case SME_MULTICAST_REQUEST:
hostif_sme_multicast_set(priv);
@@ -2508,14 +2477,12 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
}
break;
case SME_GET_MAC_ADDRESS:
- if (priv->dev_state == DEVICE_STATE_BOOT) {
+ if (priv->dev_state == DEVICE_STATE_BOOT)
hostif_mib_get_request(priv, DOT11_PRODUCT_VERSION);
- }
break;
case SME_GET_PRODUCT_VERSION:
- if (priv->dev_state == DEVICE_STATE_BOOT) {
+ if (priv->dev_state == DEVICE_STATE_BOOT)
priv->dev_state = DEVICE_STATE_PREINIT;
- }
break;
case SME_STOP_REQUEST:
hostif_stop_request(priv);
@@ -2594,9 +2561,8 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
/* for power save */
atomic_set(&priv->psstatus.snooze_guard, 0);
atomic_set(&priv->psstatus.confirm_wait, 0);
- if (priv->dev_state == DEVICE_STATE_PREINIT) {
+ if (priv->dev_state == DEVICE_STATE_PREINIT)
priv->dev_state = DEVICE_STATE_INIT;
- }
/* wake_up_interruptible_all(&priv->confirm_wait); */
complete(&priv->confirm_wait);
break;
@@ -2652,7 +2618,6 @@ void hostif_sme_task(unsigned long dev)
tasklet_schedule(&priv->sme_task);
}
}
- return;
}
/* send to Station Management Entity module */
@@ -2672,7 +2637,7 @@ void hostif_sme_enqueue(struct ks_wlan_private *priv, unsigned short event)
} else {
/* in case of buffer overflow */
//DPRINTK(2,"sme queue buffer overflow\n");
- printk("sme queue buffer overflow\n");
+ netdev_err(priv->net_dev, "sme queue buffer overflow\n");
}
tasklet_schedule(&priv->sme_task);
@@ -2736,5 +2701,4 @@ int hostif_init(struct ks_wlan_private *priv)
void hostif_exit(struct ks_wlan_private *priv)
{
tasklet_kill(&priv->sme_task);
- return;
}
diff --git a/drivers/staging/ks7010/ks_wlan.h b/drivers/staging/ks7010/ks_wlan.h
index c2cc288ae899..279e9b06fc4b 100644
--- a/drivers/staging/ks7010/ks_wlan.h
+++ b/drivers/staging/ks7010/ks_wlan.h
@@ -14,7 +14,6 @@
#define WPS
-#include <linux/version.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -25,13 +24,13 @@
#include <linux/netdevice.h> /* struct net_device_stats, struct sk_buff */
#include <linux/etherdevice.h>
#include <linux/wireless.h>
-#include <asm/atomic.h> /* struct atmic_t */
+#include <linux/atomic.h> /* struct atomic_t */
#include <linux/timer.h> /* struct timer_list */
#include <linux/string.h>
#include <linux/completion.h> /* struct completion */
#include <linux/workqueue.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "ks7010_sdio.h"
@@ -43,36 +42,36 @@
#endif
struct ks_wlan_parameter {
- uint8_t operation_mode; /* Operation Mode */
- uint8_t channel; /* Channel */
- uint8_t tx_rate; /* Transmit Rate */
+ u8 operation_mode; /* Operation Mode */
+ u8 channel; /* Channel */
+ u8 tx_rate; /* Transmit Rate */
struct {
- uint8_t size;
- uint8_t body[16];
+ u8 size;
+ u8 body[16];
} rate_set;
- uint8_t bssid[ETH_ALEN]; /* BSSID */
+ u8 bssid[ETH_ALEN]; /* BSSID */
struct {
- uint8_t size;
- uint8_t body[32 + 1];
+ u8 size;
+ u8 body[32 + 1];
} ssid; /* SSID */
- uint8_t preamble; /* Preamble */
- uint8_t powermgt; /* PowerManagementMode */
- uint32_t scan_type; /* AP List Scan Type */
+ u8 preamble; /* Preamble */
+ u8 powermgt; /* PowerManagementMode */
+ u32 scan_type; /* AP List Scan Type */
#define BEACON_LOST_COUNT_MIN 0
#define BEACON_LOST_COUNT_MAX 65535
- uint32_t beacon_lost_count; /* Beacon Lost Count */
- uint32_t rts; /* RTS Threashold */
- uint32_t fragment; /* Fragmentation Threashold */
- uint32_t privacy_invoked;
- uint32_t wep_index;
+ u32 beacon_lost_count; /* Beacon Lost Count */
+ u32 rts; /* RTS Threashold */
+ u32 fragment; /* Fragmentation Threashold */
+ u32 privacy_invoked;
+ u32 wep_index;
struct {
- uint8_t size;
- uint8_t val[13 * 2 + 1];
+ u8 size;
+ u8 val[13 * 2 + 1];
} wep_key[4];
- uint16_t authenticate_type;
- uint16_t phy_type; /* 11b/11g/11bg mode type */
- uint16_t cts_mode; /* for 11g/11bg mode cts mode */
- uint16_t phy_info_timer; /* phy information timer */
+ u16 authenticate_type;
+ u16 phy_type; /* 11b/11g/11bg mode type */
+ u16 cts_mode; /* for 11g/11bg mode cts mode */
+ u16 phy_info_timer; /* phy information timer */
};
enum {
@@ -216,37 +215,37 @@ struct hostt_t {
#define RSN_IE_BODY_MAX 64
struct rsn_ie_t {
- uint8_t id; /* 0xdd = WPA or 0x30 = RSN */
- uint8_t size; /* max ? 255 ? */
- uint8_t body[RSN_IE_BODY_MAX];
+ u8 id; /* 0xdd = WPA or 0x30 = RSN */
+ u8 size; /* max ? 255 ? */
+ u8 body[RSN_IE_BODY_MAX];
} __packed;
#ifdef WPS
#define WPS_IE_BODY_MAX 255
struct wps_ie_t {
- uint8_t id; /* 221 'dd <len> 00 50 F2 04' */
- uint8_t size; /* max ? 255 ? */
- uint8_t body[WPS_IE_BODY_MAX];
+ u8 id; /* 221 'dd <len> 00 50 F2 04' */
+ u8 size; /* max ? 255 ? */
+ u8 body[WPS_IE_BODY_MAX];
} __packed;
#endif /* WPS */
struct local_ap_t {
- uint8_t bssid[6];
- uint8_t rssi;
- uint8_t sq;
+ u8 bssid[6];
+ u8 rssi;
+ u8 sq;
struct {
- uint8_t size;
- uint8_t body[32];
- uint8_t ssid_pad;
+ u8 size;
+ u8 body[32];
+ u8 ssid_pad;
} ssid;
struct {
- uint8_t size;
- uint8_t body[16];
- uint8_t rate_pad;
+ u8 size;
+ u8 body[16];
+ u8 rate_pad;
} rate_set;
- uint16_t capability;
- uint8_t channel;
- uint8_t noise;
+ u16 capability;
+ u8 channel;
+ u8 noise;
struct rsn_ie_t wpa_ie;
struct rsn_ie_t rsn_ie;
#ifdef WPS
@@ -262,15 +261,15 @@ struct local_aplist_t {
};
struct local_gain_t {
- uint8_t TxMode;
- uint8_t RxMode;
- uint8_t TxGain;
- uint8_t RxGain;
+ u8 TxMode;
+ u8 RxMode;
+ u8 TxGain;
+ u8 RxGain;
};
struct local_eeprom_sum_t {
- uint8_t type;
- uint8_t result;
+ u8 type;
+ u8 result;
};
enum {
@@ -352,25 +351,25 @@ enum {
#define MIC_KEY_SIZE 8
struct wpa_key_t {
- uint32_t ext_flags; /* IW_ENCODE_EXT_xxx */
- uint8_t tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
- uint8_t rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
+ u32 ext_flags; /* IW_ENCODE_EXT_xxx */
+ u8 tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
+ u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
struct sockaddr addr; /* ff:ff:ff:ff:ff:ff for broadcast/multicast
* (group) keys or unicast address for
* individual keys */
- uint16_t alg;
- uint16_t key_len; /* WEP: 5 or 13, TKIP: 32, CCMP: 16 */
- uint8_t key_val[IW_ENCODING_TOKEN_MAX];
- uint8_t tx_mic_key[MIC_KEY_SIZE];
- uint8_t rx_mic_key[MIC_KEY_SIZE];
+ u16 alg;
+ u16 key_len; /* WEP: 5 or 13, TKIP: 32, CCMP: 16 */
+ u8 key_val[IW_ENCODING_TOKEN_MAX];
+ u8 tx_mic_key[MIC_KEY_SIZE];
+ u8 rx_mic_key[MIC_KEY_SIZE];
};
#define WPA_KEY_INDEX_MAX 4
#define WPA_RX_SEQ_LEN 6
struct mic_failure_t {
- uint16_t failure; /* MIC Failure counter 0 or 1 or 2 */
- uint16_t counter; /* 1sec counter 0-60 */
- uint32_t last_failure_time;
+ u16 failure; /* MIC Failure counter 0 or 1 or 2 */
+ u16 counter; /* 1sec counter 0-60 */
+ u32 last_failure_time;
int stop; /* stop flag */
};
@@ -391,12 +390,12 @@ struct wpa_status_t {
#include <linux/list.h>
#define PMK_LIST_MAX 8
struct pmk_list_t {
- uint16_t size;
+ u16 size;
struct list_head head;
struct pmk_t {
struct list_head list;
- uint8_t bssid[ETH_ALEN];
- uint8_t pmkid[IW_PMKID_LEN];
+ u8 bssid[ETH_ALEN];
+ u8 pmkid[IW_PMKID_LEN];
} pmk[PMK_LIST_MAX];
};
@@ -404,7 +403,7 @@ struct pmk_list_t {
struct wps_status_t {
int wps_enabled;
int ielen;
- uint8_t ie[255];
+ u8 ie[255];
};
#endif /* WPS */
@@ -439,7 +438,7 @@ struct ks_wlan_private {
struct pmk_list_t pmklist;
/* wireless parameter */
struct ks_wlan_parameter reg;
- uint8_t current_rate;
+ u8 current_rate;
char nick[IW_ESSID_MAX_SIZE + 1];
@@ -472,24 +471,24 @@ struct ks_wlan_private {
/* spinlock_t lock; */
#define FORCE_DISCONNECT 0x80000000
#define CONNECT_STATUS_MASK 0x7FFFFFFF
- uint32_t connect_status; /* connect status */
+ u32 connect_status; /* connect status */
int infra_status; /* Infractructure status */
- uint8_t data_buff[0x1000];
+ u8 data_buff[0x1000];
- uint8_t scan_ssid_len;
- uint8_t scan_ssid[IW_ESSID_MAX_SIZE + 1];
+ u8 scan_ssid_len;
+ u8 scan_ssid[IW_ESSID_MAX_SIZE + 1];
struct local_gain_t gain;
#ifdef WPS
struct net_device *l2_dev;
int l2_fd;
struct wps_status_t wps;
#endif /* WPS */
- uint8_t sleep_mode;
+ u8 sleep_mode;
- uint8_t region;
+ u8 region;
struct local_eeprom_sum_t eeprom_sum;
- uint8_t eeprom_checksum;
+ u8 eeprom_checksum;
struct hostt_t hostt;
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index b2b4fa4c3834..e5d04adaeb1a 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -24,9 +24,9 @@
#include <linux/pci.h>
#include <linux/ctype.h>
#include <linux/timer.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static int wep_on_off;
#define WEP_OFF 0
@@ -50,10 +50,10 @@ static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
/* A few details needed for WEP (Wireless Equivalent Privacy) */
#define MAX_KEY_SIZE 13 /* 128 (?) bits */
#define MIN_KEY_SIZE 5 /* 40 bits RC4 - WEP */
-typedef struct wep_key_t {
+struct wep_key {
u16 len;
u8 key[16]; /* 40-bit and 104-bit keys */
-} wep_key_t;
+};
/* Backward compatibility */
#ifndef IW_ENCODE_NOKEY
@@ -88,9 +88,9 @@ int ks_wlan_update_phy_information(struct ks_wlan_private *priv)
DPRINTK(4, "in_interrupt = %ld\n", in_interrupt());
- if (priv->dev_state < DEVICE_STATE_READY) {
+ if (priv->dev_state < DEVICE_STATE_READY)
return -1; /* not finished initialize */
- }
+
if (atomic_read(&update_phyinfo))
return 1;
@@ -182,19 +182,18 @@ static int ks_wlan_get_name(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
- if (priv->dev_state < DEVICE_STATE_READY) {
+ if (priv->dev_state < DEVICE_STATE_READY)
strcpy(cwrq, "NOT READY!");
- } else if (priv->reg.phy_type == D_11B_ONLY_MODE) {
+ else if (priv->reg.phy_type == D_11B_ONLY_MODE)
strcpy(cwrq, "IEEE 802.11b");
- } else if (priv->reg.phy_type == D_11G_ONLY_MODE) {
+ else if (priv->reg.phy_type == D_11G_ONLY_MODE)
strcpy(cwrq, "IEEE 802.11g");
- } else {
+ else
strcpy(cwrq, "IEEE 802.11b/g");
- }
return 0;
}
@@ -209,9 +208,8 @@ static int ks_wlan_set_freq(struct net_device *dev,
(struct ks_wlan_private *)netdev_priv(dev);
int rc = -EINPROGRESS; /* Call commit handler */
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
/* If setting by frequency, convert to a channel */
@@ -219,6 +217,7 @@ static int ks_wlan_set_freq(struct net_device *dev,
(fwrq->m >= (int)2.412e8) && (fwrq->m <= (int)2.487e8)) {
int f = fwrq->m / 100000;
int c = 0;
+
while ((c < 14) && (f != frequency_list[c]))
c++;
/* Hack to fall through... */
@@ -257,13 +256,13 @@ static int ks_wlan_get_freq(struct net_device *dev,
(struct ks_wlan_private *)netdev_priv(dev);
int f;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
- if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS)
f = (int)priv->current_ap.channel;
- } else
+ else
f = (int)priv->reg.channel;
fwrq->m = frequency_list[f - 1] * 100000;
fwrq->e = 1;
@@ -283,9 +282,9 @@ static int ks_wlan_set_essid(struct net_device *dev,
DPRINTK(2, " %d\n", dwrq->flags);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
/* Check if we asked for `any' */
@@ -301,14 +300,14 @@ static int ks_wlan_set_essid(struct net_device *dev,
len--;
/* Check the size of the string */
- if (len > IW_ESSID_MAX_SIZE) {
+ if (len > IW_ESSID_MAX_SIZE)
return -EINVAL;
- }
+
#else
/* Check the size of the string */
- if (dwrq->length > IW_ESSID_MAX_SIZE + 1) {
+ if (dwrq->length > IW_ESSID_MAX_SIZE + 1)
return -E2BIG;
- }
+
#endif
/* Set the SSID */
@@ -340,9 +339,9 @@ static int ks_wlan_get_essid(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
/* Note : if dwrq->flags != 0, we should
@@ -385,25 +384,23 @@ static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info,
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
if (priv->reg.operation_mode == MODE_ADHOC ||
priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
- memcpy(priv->reg.bssid, (u8 *) & ap_addr->sa_data, ETH_ALEN);
+ memcpy(priv->reg.bssid, &ap_addr->sa_data, ETH_ALEN);
- if (is_valid_ether_addr((u8 *) priv->reg.bssid)) {
+ if (is_valid_ether_addr((u8 *)priv->reg.bssid))
priv->need_commit |= SME_MODE_SET;
- }
+
} else {
eth_zero_addr(priv->reg.bssid);
return -EOPNOTSUPP;
}
- DPRINTK(2, "bssid = %02x:%02x:%02x:%02x:%02x:%02x\n",
- priv->reg.bssid[0], priv->reg.bssid[1], priv->reg.bssid[2],
- priv->reg.bssid[3], priv->reg.bssid[4], priv->reg.bssid[5]);
+ DPRINTK(2, "bssid = %pM\n", priv->reg.bssid);
/* Write it to the card */
if (priv->need_commit) {
@@ -421,15 +418,14 @@ static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
- if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS)
memcpy(awrq->sa_data, &(priv->current_ap.bssid[0]), ETH_ALEN);
- } else {
+ else
eth_zero_addr(awrq->sa_data);
- }
awrq->sa_family = ARPHRD_ETHER;
@@ -445,15 +441,14 @@ static int ks_wlan_set_nick(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
/* Check the size of the string */
- if (dwrq->length > 16 + 1) {
+ if (dwrq->length > 16 + 1)
return -E2BIG;
- }
+
memset(priv->nick, 0, sizeof(priv->nick));
memcpy(priv->nick, extra, dwrq->length);
@@ -469,9 +464,9 @@ static int ks_wlan_get_nick(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
strncpy(extra, priv->nick, 16);
extra[16] = '\0';
@@ -490,9 +485,9 @@ static int ks_wlan_set_rate(struct net_device *dev,
(struct ks_wlan_private *)netdev_priv(dev);
int i = 0;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
if (priv->reg.phy_type == D_11B_ONLY_MODE) {
if (vwrq->fixed == 1) {
@@ -727,13 +722,13 @@ static int ks_wlan_get_rate(struct net_device *dev,
DPRINTK(2, "in_interrupt = %ld update_phyinfo = %d\n",
in_interrupt(), atomic_read(&update_phyinfo));
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
- if (!atomic_read(&update_phyinfo)) {
+ if (!atomic_read(&update_phyinfo))
ks_wlan_update_phy_information(priv);
- }
+
vwrq->value = ((priv->current_rate) & RATE_MASK) * 500000;
if (priv->reg.tx_rate == TX_RATE_FIXED)
vwrq->fixed = 1;
@@ -752,15 +747,15 @@ static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info,
(struct ks_wlan_private *)netdev_priv(dev);
int rthr = vwrq->value;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
if (vwrq->disabled)
rthr = 2347;
- if ((rthr < 0) || (rthr > 2347)) {
+ if ((rthr < 0) || (rthr > 2347))
return -EINVAL;
- }
+
priv->reg.rts = rthr;
priv->need_commit |= SME_RTS;
@@ -775,9 +770,9 @@ static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
vwrq->value = priv->reg.rts;
vwrq->disabled = (vwrq->value >= 2347);
@@ -796,15 +791,15 @@ static int ks_wlan_set_frag(struct net_device *dev,
(struct ks_wlan_private *)netdev_priv(dev);
int fthr = vwrq->value;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
if (vwrq->disabled)
fthr = 2346;
- if ((fthr < 256) || (fthr > 2346)) {
+ if ((fthr < 256) || (fthr > 2346))
return -EINVAL;
- }
+
fthr &= ~0x1; /* Get an even value - is it really needed ??? */
priv->reg.fragment = fthr;
priv->need_commit |= SME_FRAG;
@@ -821,9 +816,9 @@ static int ks_wlan_get_frag(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
vwrq->value = priv->reg.fragment;
vwrq->disabled = (vwrq->value >= 2346);
@@ -835,7 +830,7 @@ static int ks_wlan_get_frag(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Wireless Handler : set Mode of Operation */
static int ks_wlan_set_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
@@ -843,9 +838,9 @@ static int ks_wlan_set_mode(struct net_device *dev,
DPRINTK(2, "mode=%d\n", *uwrq);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
switch (*uwrq) {
case IW_MODE_ADHOC:
@@ -871,15 +866,14 @@ static int ks_wlan_set_mode(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Wireless Handler : get Mode of Operation */
static int ks_wlan_get_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
/* If not managed, assume it's ad-hoc */
@@ -906,16 +900,15 @@ static int ks_wlan_set_encode(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- wep_key_t key;
+ struct wep_key key;
int index = (dwrq->flags & IW_ENCODE_INDEX);
int current_index = priv->reg.wep_index;
int i;
DPRINTK(2, "flags=%04X\n", dwrq->flags);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
/* index check */
@@ -959,9 +952,9 @@ static int ks_wlan_set_encode(struct net_device *dev,
}
/* Send the key to the card */
priv->reg.wep_key[index].size = key.len;
- for (i = 0; i < (priv->reg.wep_key[index].size); i++) {
+ for (i = 0; i < (priv->reg.wep_key[index].size); i++)
priv->reg.wep_key[index].val[i] = key.key[i];
- }
+
priv->need_commit |= (SME_WEP_VAL1 << index);
priv->reg.wep_index = index;
priv->need_commit |= SME_WEP_INDEX;
@@ -973,9 +966,9 @@ static int ks_wlan_set_encode(struct net_device *dev,
priv->reg.wep_key[2].size = 0;
priv->reg.wep_key[3].size = 0;
priv->reg.privacy_invoked = 0x00;
- if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) {
+ if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY)
priv->need_commit |= SME_MODE_SET;
- }
+
priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
wep_on_off = WEP_OFF;
priv->need_commit |= SME_WEP_FLAG;
@@ -997,14 +990,14 @@ static int ks_wlan_set_encode(struct net_device *dev,
priv->need_commit |= SME_WEP_FLAG;
if (dwrq->flags & IW_ENCODE_OPEN) {
- if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) {
+ if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY)
priv->need_commit |= SME_MODE_SET;
- }
+
priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
} else if (dwrq->flags & IW_ENCODE_RESTRICTED) {
- if (priv->reg.authenticate_type == AUTH_TYPE_OPEN_SYSTEM) {
+ if (priv->reg.authenticate_type == AUTH_TYPE_OPEN_SYSTEM)
priv->need_commit |= SME_MODE_SET;
- }
+
priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY;
}
// return -EINPROGRESS; /* Call commit handler */
@@ -1026,9 +1019,9 @@ static int ks_wlan_get_encode(struct net_device *dev,
char zeros[16];
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
dwrq->flags = IW_ENCODE_DISABLED;
@@ -1056,9 +1049,8 @@ static int ks_wlan_get_encode(struct net_device *dev,
/* Copy the key to the user buffer */
if ((index >= 0) && (index < 4))
dwrq->length = priv->reg.wep_key[index].size;
- if (dwrq->length > 16) {
+ if (dwrq->length > 16)
dwrq->length = 0;
- }
#if 1 /* IW_ENCODE_NOKEY; */
if (dwrq->length) {
if ((index >= 0) && (index < 4))
@@ -1086,9 +1078,8 @@ static int ks_wlan_get_txpow(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
/* Not Support */
@@ -1113,9 +1104,8 @@ static int ks_wlan_get_retry(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
/* Not Support */
@@ -1139,9 +1129,9 @@ static int ks_wlan_get_range(struct net_device *dev,
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
dwrq->length = sizeof(struct iw_range);
memset(range, 0, sizeof(*range));
@@ -1267,9 +1257,9 @@ static int ks_wlan_set_power(struct net_device *dev,
(struct ks_wlan_private *)netdev_priv(dev);
short enabled;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
enabled = vwrq->disabled ? 0 : 1;
if (enabled == 0) { /* 0 */
@@ -1301,9 +1291,8 @@ static int ks_wlan_get_power(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (priv->reg.powermgt > 0)
vwrq->disabled = 0;
@@ -1322,9 +1311,8 @@ static int ks_wlan_get_iwstats(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
vwrq->qual = 0; /* not supported */
vwrq->level = priv->wstats.qual.level;
@@ -1372,9 +1360,8 @@ static int ks_wlan_get_aplist(struct net_device *dev,
int i;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
for (i = 0; i < priv->aplist.size; i++) {
memcpy(address[i].sa_data, &(priv->aplist.ap[i].bssid[0]),
@@ -1404,11 +1391,11 @@ static int ks_wlan_set_scan(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
struct iw_scan_req *req = NULL;
+
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
/* specified SSID SCAN */
@@ -1598,11 +1585,11 @@ static int ks_wlan_get_scan(struct net_device *dev,
(struct ks_wlan_private *)netdev_priv(dev);
int i;
char *current_ev = extra;
+
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (priv->sme_i.sme_flag & SME_AP_SCAN) {
DPRINTK(2, "flag AP_SCAN\n");
@@ -1675,9 +1662,8 @@ static int ks_wlan_set_genie(struct net_device *dev,
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
return 0;
// return -EOPNOTSUPP;
@@ -1696,26 +1682,23 @@ static int ks_wlan_set_auth_mode(struct net_device *dev,
DPRINTK(2, "index=%d:value=%08X\n", index, value);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
switch (index) {
case IW_AUTH_WPA_VERSION: /* 0 */
switch (value) {
case IW_AUTH_WPA_VERSION_DISABLED:
priv->wpa.version = value;
- if (priv->wpa.rsn_enabled) {
+ if (priv->wpa.rsn_enabled)
priv->wpa.rsn_enabled = 0;
- }
priv->need_commit |= SME_RSN;
break;
case IW_AUTH_WPA_VERSION_WPA:
case IW_AUTH_WPA_VERSION_WPA2:
priv->wpa.version = value;
- if (!(priv->wpa.rsn_enabled)) {
+ if (!(priv->wpa.rsn_enabled))
priv->wpa.rsn_enabled = 1;
- }
priv->need_commit |= SME_RSN;
break;
default:
@@ -1832,11 +1815,11 @@ static int ks_wlan_get_auth_mode(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
int index = (vwrq->flags & IW_AUTH_INDEX);
+
DPRINTK(2, "index=%d\n", index);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
/* WPA (not used ?? wpa_supplicant) */
@@ -1886,18 +1869,17 @@ static int ks_wlan_set_encode_ext(struct net_device *dev,
DPRINTK(2, "flags=%04X:: ext_flags=%08X\n", dwrq->flags,
enc->ext_flags);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
if (index < 1 || index > 4)
return -EINVAL;
else
index--;
- if (dwrq->flags & IW_ENCODE_DISABLED) {
+ if (dwrq->flags & IW_ENCODE_DISABLED)
priv->wpa.key[index].key_len = 0;
- }
if (enc) {
priv->wpa.key[index].ext_flags = enc->ext_flags;
@@ -1986,9 +1968,8 @@ static int ks_wlan_get_encode_ext(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
/* WPA (not used ?? wpa_supplicant)
@@ -2015,13 +1996,13 @@ static int ks_wlan_set_pmksa(struct net_device *dev,
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
- if (!extra) {
+ if (!extra)
return -EINVAL;
- }
+
pmksa = (struct iw_pmksa *)extra;
DPRINTK(2, "cmd=%d\n", pmksa->cmd);
@@ -2141,16 +2122,16 @@ static struct iw_statistics *ks_get_wireless_stats(struct net_device *dev)
/*------------------------------------------------------------------*/
/* Private handler : set stop request */
static int ks_wlan_set_stop_request(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
if (!(*uwrq))
return -EINVAL;
@@ -2173,15 +2154,14 @@ static int ks_wlan_set_mlme(struct net_device *dev,
DPRINTK(2, ":%d :%d\n", mlme->cmd, mlme->reason_code);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
- if (mlme->reason_code == WLAN_REASON_MIC_FAILURE) {
+ if (mlme->reason_code == WLAN_REASON_MIC_FAILURE)
return 0;
- }
case IW_MLME_DISASSOC:
mode = 1;
return ks_wlan_set_stop_request(dev, NULL, &mode, NULL);
@@ -2207,14 +2187,14 @@ static int ks_wlan_get_firmware_version(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set force disconnect status */
static int ks_wlan_set_detach(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
if (*uwrq == CONNECT_STATUS) { /* 0 */
priv->connect_status &= ~FORCE_DISCONNECT;
@@ -2232,14 +2212,14 @@ static int ks_wlan_set_detach(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get force disconnect status */
static int ks_wlan_get_detach(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
*uwrq = ((priv->connect_status & FORCE_DISCONNECT) ? 1 : 0);
return 0;
@@ -2248,14 +2228,14 @@ static int ks_wlan_get_detach(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get connect status */
static int ks_wlan_get_connect(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
*uwrq = (priv->connect_status & CONNECT_STATUS_MASK);
return 0;
@@ -2265,15 +2245,15 @@ static int ks_wlan_get_connect(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set preamble */
static int ks_wlan_set_preamble(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
if (*uwrq == LONG_PREAMBLE) { /* 0 */
priv->reg.preamble = LONG_PREAMBLE;
@@ -2290,15 +2270,15 @@ static int ks_wlan_set_preamble(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get preamble */
static int ks_wlan_get_preamble(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
*uwrq = priv->reg.preamble;
return 0;
@@ -2307,15 +2287,15 @@ static int ks_wlan_get_preamble(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set power save mode */
static int ks_wlan_set_powermgt(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
if (*uwrq == POWMGT_ACTIVE_MODE) { /* 0 */
priv->reg.powermgt = POWMGT_ACTIVE_MODE;
@@ -2340,15 +2320,15 @@ static int ks_wlan_set_powermgt(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get power save made */
static int ks_wlan_get_powermgt(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
*uwrq = priv->reg.powermgt;
return 0;
@@ -2357,15 +2337,14 @@ static int ks_wlan_get_powermgt(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set scan type */
static int ks_wlan_set_scan_type(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (*uwrq == ACTIVE_SCAN) { /* 0 */
priv->reg.scan_type = ACTIVE_SCAN;
@@ -2380,15 +2359,14 @@ static int ks_wlan_set_scan_type(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get scan type */
static int ks_wlan_get_scan_type(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
*uwrq = priv->reg.scan_type;
return 0;
@@ -2404,9 +2382,8 @@ static int ks_wlan_data_write(struct net_device *dev,
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
unsigned char *wbuff = NULL;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
wbuff = (unsigned char *)kmalloc(dwrq->length, GFP_ATOMIC);
if (!wbuff)
@@ -2428,9 +2405,8 @@ static int ks_wlan_data_read(struct net_device *dev,
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
unsigned short read_length;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (!atomic_read(&priv->event_count)) {
if (priv->dev_state < DEVICE_STATE_BOOT) { /* Remove device */
@@ -2488,9 +2464,8 @@ static int ks_wlan_get_wep_ascii(struct net_device *dev,
int i, j, len = 0;
char tmp[WEP_ASCII_BUFF_SIZE];
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
strcpy(tmp, " WEP keys ASCII \n");
len += strlen(" WEP keys ASCII \n");
@@ -2531,19 +2506,18 @@ static int ks_wlan_get_wep_ascii(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set beacon lost count */
static int ks_wlan_set_beacon_lost(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
- if (*uwrq >= BEACON_LOST_COUNT_MIN && *uwrq <= BEACON_LOST_COUNT_MAX) {
+ if (*uwrq >= BEACON_LOST_COUNT_MIN && *uwrq <= BEACON_LOST_COUNT_MAX)
priv->reg.beacon_lost_count = *uwrq;
- } else
+ else
return -EINVAL;
if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
@@ -2556,15 +2530,14 @@ static int ks_wlan_set_beacon_lost(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get beacon lost count */
static int ks_wlan_get_beacon_lost(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
*uwrq = priv->reg.beacon_lost_count;
return 0;
@@ -2573,15 +2546,14 @@ static int ks_wlan_get_beacon_lost(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set phy type */
static int ks_wlan_set_phy_type(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (*uwrq == D_11B_ONLY_MODE) { /* 0 */
priv->reg.phy_type = D_11B_ONLY_MODE;
@@ -2599,15 +2571,14 @@ static int ks_wlan_set_phy_type(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get phy type */
static int ks_wlan_get_phy_type(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
*uwrq = priv->reg.phy_type;
return 0;
@@ -2616,15 +2587,14 @@ static int ks_wlan_get_phy_type(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set cts mode */
static int ks_wlan_set_cts_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (*uwrq == CTS_MODE_FALSE) { /* 0 */
priv->reg.cts_mode = CTS_MODE_FALSE;
@@ -2644,15 +2614,14 @@ static int ks_wlan_set_cts_mode(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get cts mode */
static int ks_wlan_get_cts_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
*uwrq = priv->reg.cts_mode;
return 0;
@@ -2662,7 +2631,7 @@ static int ks_wlan_get_cts_mode(struct net_device *dev,
/* Private handler : set sleep mode */
static int ks_wlan_set_sleep_mode(struct net_device *dev,
struct iw_request_info *info,
- __u32 * uwrq, char *extra)
+ __u32 *uwrq, char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
@@ -2692,7 +2661,7 @@ static int ks_wlan_set_sleep_mode(struct net_device *dev,
/* Private handler : get sleep mode */
static int ks_wlan_get_sleep_mode(struct net_device *dev,
struct iw_request_info *info,
- __u32 * uwrq, char *extra)
+ __u32 *uwrq, char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
@@ -2708,16 +2677,15 @@ static int ks_wlan_get_sleep_mode(struct net_device *dev,
/* Private handler : set phy information timer */
static int ks_wlan_set_phy_information_timer(struct net_device *dev,
struct iw_request_info *info,
- __u32 * uwrq, char *extra)
+ __u32 *uwrq, char *extra)
{
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (*uwrq >= 0 && *uwrq <= 0xFFFF) /* 0-65535 */
- priv->reg.phy_info_timer = (uint16_t) * uwrq;
+ priv->reg.phy_info_timer = (uint16_t)*uwrq;
else
return -EINVAL;
@@ -2730,13 +2698,12 @@ static int ks_wlan_set_phy_information_timer(struct net_device *dev,
/* Private handler : get phy information timer */
static int ks_wlan_get_phy_information_timer(struct net_device *dev,
struct iw_request_info *info,
- __u32 * uwrq, char *extra)
+ __u32 *uwrq, char *extra)
{
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
*uwrq = priv->reg.phy_info_timer;
return 0;
@@ -2747,16 +2714,15 @@ static int ks_wlan_get_phy_information_timer(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set WPS enable */
static int ks_wlan_set_wps_enable(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (*uwrq == 0 || *uwrq == 1)
priv->wps.wps_enabled = *uwrq;
@@ -2771,16 +2737,15 @@ static int ks_wlan_set_wps_enable(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get WPS enable */
static int ks_wlan_get_wps_enable(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
*uwrq = priv->wps.wps_enabled;
netdev_info(dev, "return=%d\n", *uwrq);
@@ -2801,16 +2766,14 @@ static int ks_wlan_set_wps_probe_req(struct net_device *dev,
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
DPRINTK(2, "dwrq->length=%d\n", dwrq->length);
/* length check */
- if (p[1] + 2 != dwrq->length || dwrq->length > 256) {
+ if (p[1] + 2 != dwrq->length || dwrq->length > 256)
return -EINVAL;
- }
priv->wps.ielen = p[1] + 2 + 1; /* IE header + IE + sizeof(len) */
len = p[1] + 2; /* IE header + IE */
@@ -2833,14 +2796,14 @@ static int ks_wlan_set_wps_probe_req(struct net_device *dev,
/* Private handler : get WPS probe req */
static int ks_wlan_get_wps_probe_req(struct net_device *dev,
struct iw_request_info *info,
- __u32 * uwrq, char *extra)
+ __u32 *uwrq, char *extra)
{
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
return 0;
}
@@ -2850,18 +2813,17 @@ static int ks_wlan_get_wps_probe_req(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set tx gain control value */
static int ks_wlan_set_tx_gain(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (*uwrq >= 0 && *uwrq <= 0xFF) /* 0-255 */
- priv->gain.TxGain = (uint8_t) * uwrq;
+ priv->gain.TxGain = (uint8_t)*uwrq;
else
return -EINVAL;
@@ -2877,15 +2839,14 @@ static int ks_wlan_set_tx_gain(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get tx gain control value */
static int ks_wlan_get_tx_gain(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
*uwrq = priv->gain.TxGain;
hostif_sme_enqueue(priv, SME_GET_GAIN);
@@ -2895,18 +2856,17 @@ static int ks_wlan_get_tx_gain(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set rx gain control value */
static int ks_wlan_set_rx_gain(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (*uwrq >= 0 && *uwrq <= 0xFF) /* 0-255 */
- priv->gain.RxGain = (uint8_t) * uwrq;
+ priv->gain.RxGain = (uint8_t)*uwrq;
else
return -EINVAL;
@@ -2922,15 +2882,14 @@ static int ks_wlan_set_rx_gain(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get rx gain control value */
static int ks_wlan_get_rx_gain(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
*uwrq = priv->gain.RxGain;
hostif_sme_enqueue(priv, SME_GET_GAIN);
@@ -2941,17 +2900,16 @@ static int ks_wlan_get_rx_gain(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set region value */
static int ks_wlan_set_region(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (*uwrq >= 0x9 && *uwrq <= 0xF) /* 0x9-0xf */
- priv->region = (uint8_t) * uwrq;
+ priv->region = (uint8_t)*uwrq;
else
return -EINVAL;
@@ -2963,7 +2921,7 @@ static int ks_wlan_set_region(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get eeprom checksum result */
static int ks_wlan_get_eeprom_cksum(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
@@ -3090,7 +3048,7 @@ static void print_hif_event(struct net_device *dev, int event)
/*------------------------------------------------------------------*/
/* Private handler : get host command history */
static int ks_wlan_hostt(struct net_device *dev, struct iw_request_info *info,
- __u32 * uwrq, char *extra)
+ __u32 *uwrq, char *extra)
{
int i, event;
struct ks_wlan_private *priv =
@@ -3293,6 +3251,7 @@ static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
{
int rc = 0;
struct iwreq *wrq = (struct iwreq *)rq;
+
switch (cmd) {
case SIOCIWFIRSTPRIV + 20: /* KS_WLAN_SET_STOP_REQ */
rc = ks_wlan_set_stop_request(dev, NULL, &(wrq->u.mode), NULL);
@@ -3311,9 +3270,8 @@ struct net_device_stats *ks_wlan_get_stats(struct net_device *dev)
{
struct ks_wlan_private *priv = netdev_priv(dev);
- if (priv->dev_state < DEVICE_STATE_READY) {
+ if (priv->dev_state < DEVICE_STATE_READY)
return NULL; /* not finished initialize */
- }
return &priv->nstats;
}
@@ -3323,6 +3281,7 @@ int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct sockaddr *mac_addr = (struct sockaddr *)addr;
+
if (netif_running(dev))
return -EBUSY;
memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
@@ -3330,10 +3289,7 @@ int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
priv->mac_address_valid = 0;
hostif_sme_enqueue(priv, SME_MACADDRESS_SET_REQUEST);
- netdev_info(dev,
- "ks_wlan: MAC ADDRESS = %02x:%02x:%02x:%02x:%02x:%02x\n",
- priv->eth_addr[0], priv->eth_addr[1], priv->eth_addr[2],
- priv->eth_addr[3], priv->eth_addr[4], priv->eth_addr[5]);
+ netdev_info(dev, "ks_wlan: MAC ADDRESS = %pM\n", priv->eth_addr);
return 0;
}
@@ -3344,9 +3300,8 @@ void ks_wlan_tx_timeout(struct net_device *dev)
DPRINTK(1, "head(%d) tail(%d)!!\n", priv->tx_dev.qhead,
priv->tx_dev.qtail);
- if (!netif_queue_stopped(dev)) {
+ if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
- }
priv->nstats.tx_errors++;
netif_wake_queue(dev);
}
@@ -3375,9 +3330,8 @@ int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_trans_update(dev);
DPRINTK(4, "rc=%d\n", rc);
- if (rc) {
+ if (rc)
rc = 0;
- }
return rc;
}
@@ -3410,9 +3364,8 @@ void ks_wlan_set_multicast_list(struct net_device *dev)
struct ks_wlan_private *priv = netdev_priv(dev);
DPRINTK(4, "\n");
- if (priv->dev_state < DEVICE_STATE_READY) {
+ if (priv->dev_state < DEVICE_STATE_READY)
return; /* not finished initialize */
- }
hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST);
}
@@ -3426,8 +3379,8 @@ int ks_wlan_open(struct net_device *dev)
if (!priv->mac_address_valid) {
netdev_err(dev, "ks_wlan : %s Not READY !!\n", dev->name);
return -EBUSY;
- } else
- netif_start_queue(dev);
+ }
+ netif_start_queue(dev);
return 0;
}
@@ -3474,9 +3427,8 @@ int ks_wlan_net_start(struct net_device *dev)
/* phy information update timer */
atomic_set(&update_phyinfo, 0);
- init_timer(&update_phyinfo_timer);
- update_phyinfo_timer.function = ks_wlan_update_phyinfo_timeout;
- update_phyinfo_timer.data = (unsigned long)priv;
+ setup_timer(&update_phyinfo_timer, ks_wlan_update_phyinfo_timeout,
+ (unsigned long)priv);
/* dummy address set */
memcpy(priv->eth_addr, dummy_addr, ETH_ALEN);
diff --git a/drivers/staging/ks7010/michael_mic.c b/drivers/staging/ks7010/michael_mic.c
index 78ae2b8fb7f3..2f535c08e172 100644
--- a/drivers/staging/ks7010/michael_mic.c
+++ b/drivers/staging/ks7010/michael_mic.c
@@ -14,10 +14,11 @@
#include "michael_mic.h"
// Rotation functions on 32 bit values
-#define ROL32( A, n ) ( ((A) << (n)) | ( ((A)>>(32-(n))) & ( (1UL << (n)) - 1 ) ) )
-#define ROR32( A, n ) ROL32( (A), 32-(n) )
+#define ROL32(A, n) (((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1)))
+#define ROR32(A, n) ROL32((A), 32-(n))
// Convert from Byte[] to UInt32 in a portable way
-#define getUInt32( A, B ) (uint32_t)(A[B+0] << 0) + (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24)
+#define getUInt32(A, B) ((uint32_t)(A[B+0] << 0) \
+ + (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24))
// Convert from UInt32 to Byte[] in a portable way
#define putUInt32(A, B, C) \
@@ -48,21 +49,22 @@ void MichaelInitializeFunction(struct michel_mic_t *Mic, uint8_t *key)
}
#define MichaelBlockFunction(L, R) \
-do{ \
- R ^= ROL32( L, 17 ); \
+do { \
+ R ^= ROL32(L, 17); \
L += R; \
R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8); \
L += R; \
- R ^= ROL32( L, 3 ); \
+ R ^= ROL32(L, 3); \
L += R; \
- R ^= ROR32( L, 2 ); \
+ R ^= ROR32(L, 2); \
L += R; \
-}while(0)
+} while (0)
static
void MichaelAppend(struct michel_mic_t *Mic, uint8_t *src, int nBytes)
{
int addlen;
+
if (Mic->nBytesInM) {
addlen = 4 - Mic->nBytesInM;
if (addlen > nBytes)
@@ -96,7 +98,8 @@ void MichaelAppend(struct michel_mic_t *Mic, uint8_t *src, int nBytes)
static
void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t *dst)
{
- uint8_t *data = Mic->M;
+ u8 *data = Mic->M;
+
switch (Mic->nBytesInM) {
case 0:
Mic->L ^= 0x5a;
@@ -122,11 +125,11 @@ void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t *dst)
MichaelClear(Mic);
}
-void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t *Key,
- uint8_t *Data, int Len, uint8_t priority,
- uint8_t *Result)
+void MichaelMICFunction(struct michel_mic_t *Mic, u8 *Key,
+ u8 *Data, int Len, u8 priority,
+ u8 *Result)
{
- uint8_t pad_data[4] = { priority, 0, 0, 0 };
+ u8 pad_data[4] = { priority, 0, 0, 0 };
// Compute the MIC value
/*
* IEEE802.11i page 47
diff --git a/drivers/staging/ks7010/michael_mic.h b/drivers/staging/ks7010/michael_mic.h
index efaa21788fc7..248f849fc4a5 100644
--- a/drivers/staging/ks7010/michael_mic.h
+++ b/drivers/staging/ks7010/michael_mic.h
@@ -11,15 +11,15 @@
/* MichelMIC routine define */
struct michel_mic_t {
- uint32_t K0; // Key
- uint32_t K1; // Key
- uint32_t L; // Current state
- uint32_t R; // Current state
- uint8_t M[4]; // Message accumulator (single word)
- int nBytesInM; // # bytes in M
- uint8_t Result[8];
+ u32 K0; // Key
+ u32 K1; // Key
+ u32 L; // Current state
+ u32 R; // Current state
+ u8 M[4]; // Message accumulator (single word)
+ int nBytesInM; // # bytes in M
+ u8 Result[8];
};
-void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t *Key,
- uint8_t *Data, int Len, uint8_t priority,
- uint8_t *Result);
+void MichaelMICFunction(struct michel_mic_t *Mic, u8 *Key,
+ u8 *Data, int Len, u8 priority,
+ u8 *Result);
diff --git a/drivers/staging/lustre/include/linux/libcfs/curproc.h b/drivers/staging/lustre/include/linux/libcfs/curproc.h
index be0675d8ff5e..1ea27c9e3708 100644
--- a/drivers/staging/lustre/include/linux/libcfs/curproc.h
+++ b/drivers/staging/lustre/include/linux/libcfs/curproc.h
@@ -53,7 +53,7 @@
#define current_pid() (current->pid)
#define current_comm() (current->comm)
-typedef __u32 cfs_cap_t;
+typedef u32 cfs_cap_t;
#define CFS_CAP_CHOWN 0
#define CFS_CAP_DAC_OVERRIDE 1
@@ -65,15 +65,15 @@ typedef __u32 cfs_cap_t;
#define CFS_CAP_SYS_BOOT 23
#define CFS_CAP_SYS_RESOURCE 24
-#define CFS_CAP_FS_MASK ((1 << CFS_CAP_CHOWN) | \
- (1 << CFS_CAP_DAC_OVERRIDE) | \
- (1 << CFS_CAP_DAC_READ_SEARCH) | \
- (1 << CFS_CAP_FOWNER) | \
- (1 << CFS_CAP_FSETID) | \
- (1 << CFS_CAP_LINUX_IMMUTABLE) | \
- (1 << CFS_CAP_SYS_ADMIN) | \
- (1 << CFS_CAP_SYS_BOOT) | \
- (1 << CFS_CAP_SYS_RESOURCE))
+#define CFS_CAP_FS_MASK (BIT(CFS_CAP_CHOWN) | \
+ BIT(CFS_CAP_DAC_OVERRIDE) | \
+ BIT(CFS_CAP_DAC_READ_SEARCH) | \
+ BIT(CFS_CAP_FOWNER) | \
+ BIT(CFS_CAP_FSETID) | \
+ BIT(CFS_CAP_LINUX_IMMUTABLE) | \
+ BIT(CFS_CAP_SYS_ADMIN) | \
+ BIT(CFS_CAP_SYS_BOOT) | \
+ BIT(CFS_CAP_SYS_RESOURCE))
void cfs_cap_raise(cfs_cap_t cap);
void cfs_cap_lower(cfs_cap_t cap);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 3b92d38d37e2..cc2c0e97bb7e 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -61,7 +61,7 @@
sigset_t cfs_block_allsigs(void);
sigset_t cfs_block_sigs(unsigned long sigs);
sigset_t cfs_block_sigsinv(unsigned long sigs);
-void cfs_restore_sigs(sigset_t);
+void cfs_restore_sigs(sigset_t sigset);
void cfs_clear_sigpending(void);
/*
@@ -71,7 +71,7 @@ void cfs_clear_sigpending(void);
/* returns a random 32-bit integer */
unsigned int cfs_rand(void);
/* seed the generator */
-void cfs_srand(unsigned int, unsigned int);
+void cfs_srand(unsigned int seed1, unsigned int seed2);
void cfs_get_random_bytes(void *buf, int size);
#include "libcfs_debug.h"
@@ -125,7 +125,6 @@ extern struct miscdevice libcfs_dev;
/**
* The path of debug log dump upcall script.
*/
-extern char lnet_upcall[1024];
extern char lnet_debug_log_upcall[1024];
extern struct cfs_wi_sched *cfs_sched_rehash;
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
index 81d8079e3b5e..6d8752a368fa 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
@@ -92,7 +92,7 @@ struct cfs_cpt_table {
/* node mask */
nodemask_t ctb_nodemask;
/* version */
- __u64 ctb_version;
+ u64 ctb_version;
};
static inline cpumask_t *
@@ -211,7 +211,7 @@ int cfs_cpu_ht_nsiblings(int cpu);
*/
void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
/*
- * destory per-cpu-partition variable
+ * destroy per-cpu-partition variable
*/
void cfs_percpt_free(void *vars);
int cfs_percpt_number(void *vars);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
index 02be7d7608a5..8f34c5ddc63e 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
@@ -29,10 +29,12 @@
#define _LIBCFS_CRYPTO_H
struct cfs_crypto_hash_type {
- char *cht_name; /**< hash algorithm name, equal to
- * format name for crypto api */
- unsigned int cht_key; /**< init key by default (valid for
- * 4 bytes context like crc32, adler */
+ char *cht_name; /*< hash algorithm name, equal to
+ * format name for crypto api
+ */
+ unsigned int cht_key; /*< init key by default (valid for
+ * 4 bytes context like crc32, adler
+ */
unsigned int cht_size; /**< hash digest size */
};
@@ -135,7 +137,7 @@ static inline unsigned char cfs_crypto_hash_alg(const char *algname)
enum cfs_crypto_hash_alg hash_alg;
for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++)
- if (strcmp(hash_types[hash_alg].cht_name, algname) == 0)
+ if (!strcmp(hash_types[hash_alg].cht_name, algname))
return hash_alg;
return CFS_HASH_ALG_UNKNOWN;
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
index bdbbe934584c..fedb46dff696 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
@@ -39,8 +39,8 @@ extern int cfs_fail_err;
extern wait_queue_head_t cfs_race_waitq;
extern int cfs_race_state;
-int __cfs_fail_check_set(__u32 id, __u32 value, int set);
-int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set);
+int __cfs_fail_check_set(u32 id, u32 value, int set);
+int __cfs_fail_timeout_set(u32 id, u32 value, int ms, int set);
enum {
CFS_FAIL_LOC_NOSET = 0,
@@ -55,11 +55,11 @@ enum {
#define CFS_FAILED_BIT 30
/* CFS_FAILED is 0x40000000 */
-#define CFS_FAILED (1 << CFS_FAILED_BIT)
+#define CFS_FAILED BIT(CFS_FAILED_BIT)
#define CFS_FAIL_ONCE_BIT 31
/* CFS_FAIL_ONCE is 0x80000000 */
-#define CFS_FAIL_ONCE (1 << CFS_FAIL_ONCE_BIT)
+#define CFS_FAIL_ONCE BIT(CFS_FAIL_ONCE_BIT)
/* The following flags aren't made to be combined */
#define CFS_FAIL_SKIP 0x20000000 /* skip N times then fail */
@@ -69,14 +69,14 @@ enum {
#define CFS_FAULT 0x02000000 /* match any CFS_FAULT_CHECK */
-static inline bool CFS_FAIL_PRECHECK(__u32 id)
+static inline bool CFS_FAIL_PRECHECK(u32 id)
{
- return cfs_fail_loc != 0 &&
+ return cfs_fail_loc &&
((cfs_fail_loc & CFS_FAIL_MASK_LOC) == (id & CFS_FAIL_MASK_LOC) ||
- (cfs_fail_loc & id & CFS_FAULT));
+ (cfs_fail_loc & id & CFS_FAULT));
}
-static inline int cfs_fail_check_set(__u32 id, __u32 value,
+static inline int cfs_fail_check_set(u32 id, u32 value,
int set, int quiet)
{
int ret = 0;
@@ -103,28 +103,34 @@ static inline int cfs_fail_check_set(__u32 id, __u32 value,
#define CFS_FAIL_CHECK_QUIET(id) \
cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET, 1)
-/* If id hit cfs_fail_loc and cfs_fail_val == (-1 or value) return 1,
- * otherwise return 0 */
+/*
+ * If id hit cfs_fail_loc and cfs_fail_val == (-1 or value) return 1,
+ * otherwise return 0
+ */
#define CFS_FAIL_CHECK_VALUE(id, value) \
cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 0)
#define CFS_FAIL_CHECK_VALUE_QUIET(id, value) \
cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 1)
-/* If id hit cfs_fail_loc, cfs_fail_loc |= value and return 1,
- * otherwise return 0 */
+/*
+ * If id hit cfs_fail_loc, cfs_fail_loc |= value and return 1,
+ * otherwise return 0
+ */
#define CFS_FAIL_CHECK_ORSET(id, value) \
cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 0)
#define CFS_FAIL_CHECK_ORSET_QUIET(id, value) \
cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 1)
-/* If id hit cfs_fail_loc, cfs_fail_loc = value and return 1,
- * otherwise return 0 */
+/*
+ * If id hit cfs_fail_loc, cfs_fail_loc = value and return 1,
+ * otherwise return 0
+ */
#define CFS_FAIL_CHECK_RESET(id, value) \
cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 0)
#define CFS_FAIL_CHECK_RESET_QUIET(id, value) \
cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 1)
-static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
+static inline int cfs_fail_timeout_set(u32 id, u32 value, int ms, int set)
{
if (unlikely(CFS_FAIL_PRECHECK(id)))
return __cfs_fail_timeout_set(id, value, ms, set);
@@ -138,8 +144,10 @@ static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
#define CFS_FAIL_TIMEOUT_MS(id, ms) \
cfs_fail_timeout_set(id, 0, ms, CFS_FAIL_LOC_NOSET)
-/* If id hit cfs_fail_loc, cfs_fail_loc |= value and
- * sleep seconds or milliseconds */
+/*
+ * If id hit cfs_fail_loc, cfs_fail_loc |= value and
+ * sleep seconds or milliseconds
+ */
#define CFS_FAIL_TIMEOUT_ORSET(id, value, secs) \
cfs_fail_timeout_set(id, value, secs * 1000, CFS_FAIL_LOC_ORSET)
@@ -152,13 +160,14 @@ static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
#define CFS_FAULT_CHECK(id) \
CFS_FAIL_CHECK(CFS_FAULT | (id))
-/* The idea here is to synchronise two threads to force a race. The
+/*
+ * The idea here is to synchronise two threads to force a race. The
* first thread that calls this with a matching fail_loc is put to
* sleep. The next thread that calls with the same fail_loc wakes up
- * the first and continues. */
-static inline void cfs_race(__u32 id)
+ * the first and continues.
+ */
+static inline void cfs_race(u32 id)
{
-
if (CFS_FAIL_PRECHECK(id)) {
if (unlikely(__cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET))) {
int rc;
@@ -166,7 +175,7 @@ static inline void cfs_race(__u32 id)
cfs_race_state = 0;
CERROR("cfs_race id %x sleeping\n", id);
rc = wait_event_interruptible(cfs_race_waitq,
- cfs_race_state != 0);
+ !!cfs_race_state);
CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc);
} else {
CERROR("cfs_fail_race id %x waking\n", id);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
index 6949a1846635..0cc2fc465c1a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -57,8 +57,10 @@
/** disable debug */
#define CFS_HASH_DEBUG_NONE 0
-/** record hash depth and output to console when it's too deep,
- * computing overhead is low but consume more memory */
+/*
+ * record hash depth and output to console when it's too deep,
+ * computing overhead is low but consume more memory
+ */
#define CFS_HASH_DEBUG_1 1
/** expensive, check key validation */
#define CFS_HASH_DEBUG_2 2
@@ -87,8 +89,8 @@ union cfs_hash_lock {
*/
struct cfs_hash_bucket {
union cfs_hash_lock hsb_lock; /**< bucket lock */
- __u32 hsb_count; /**< current entries */
- __u32 hsb_version; /**< change version */
+ u32 hsb_count; /**< current entries */
+ u32 hsb_version; /**< change version */
unsigned int hsb_index; /**< index of bucket */
int hsb_depmax; /**< max depth on bucket */
long hsb_head[0]; /**< hash-head array */
@@ -123,38 +125,40 @@ enum cfs_hash_tag {
* . Some functions will be disabled with this flag, i.e:
* cfs_hash_for_each_empty, cfs_hash_rehash
*/
- CFS_HASH_NO_LOCK = 1 << 0,
+ CFS_HASH_NO_LOCK = BIT(0),
/** no bucket lock, use one spinlock to protect the whole hash */
- CFS_HASH_NO_BKTLOCK = 1 << 1,
+ CFS_HASH_NO_BKTLOCK = BIT(1),
/** rwlock to protect bucket */
- CFS_HASH_RW_BKTLOCK = 1 << 2,
+ CFS_HASH_RW_BKTLOCK = BIT(2),
/** spinlock to protect bucket */
- CFS_HASH_SPIN_BKTLOCK = 1 << 3,
+ CFS_HASH_SPIN_BKTLOCK = BIT(3),
/** always add new item to tail */
- CFS_HASH_ADD_TAIL = 1 << 4,
+ CFS_HASH_ADD_TAIL = BIT(4),
/** hash-table doesn't have refcount on item */
- CFS_HASH_NO_ITEMREF = 1 << 5,
+ CFS_HASH_NO_ITEMREF = BIT(5),
/** big name for param-tree */
- CFS_HASH_BIGNAME = 1 << 6,
+ CFS_HASH_BIGNAME = BIT(6),
/** track global count */
- CFS_HASH_COUNTER = 1 << 7,
+ CFS_HASH_COUNTER = BIT(7),
/** rehash item by new key */
- CFS_HASH_REHASH_KEY = 1 << 8,
+ CFS_HASH_REHASH_KEY = BIT(8),
/** Enable dynamic hash resizing */
- CFS_HASH_REHASH = 1 << 9,
+ CFS_HASH_REHASH = BIT(9),
/** can shrink hash-size */
- CFS_HASH_SHRINK = 1 << 10,
+ CFS_HASH_SHRINK = BIT(10),
/** assert hash is empty on exit */
- CFS_HASH_ASSERT_EMPTY = 1 << 11,
+ CFS_HASH_ASSERT_EMPTY = BIT(11),
/** record hlist depth */
- CFS_HASH_DEPTH = 1 << 12,
+ CFS_HASH_DEPTH = BIT(12),
/**
* rehash is always scheduled in a different thread, so current
* change on hash table is non-blocking
*/
- CFS_HASH_NBLK_CHANGE = 1 << 13,
- /** NB, we typed hs_flags as __u16, please change it
- * if you need to extend >=16 flags */
+ CFS_HASH_NBLK_CHANGE = BIT(13),
+ /**
+ * NB, we typed hs_flags as u16, please change it
+ * if you need to extend >=16 flags
+ */
};
/** most used attributes */
@@ -201,8 +205,10 @@ enum cfs_hash_tag {
*/
struct cfs_hash {
- /** serialize with rehash, or serialize all operations if
- * the hash-table has CFS_HASH_NO_BKTLOCK */
+ /**
+ * serialize with rehash, or serialize all operations if
+ * the hash-table has CFS_HASH_NO_BKTLOCK
+ */
union cfs_hash_lock hs_lock;
/** hash operations */
struct cfs_hash_ops *hs_ops;
@@ -215,31 +221,31 @@ struct cfs_hash {
/** total number of items on this hash-table */
atomic_t hs_count;
/** hash flags, see cfs_hash_tag for detail */
- __u16 hs_flags;
+ u16 hs_flags;
/** # of extra-bytes for bucket, for user saving extended attributes */
- __u16 hs_extra_bytes;
+ u16 hs_extra_bytes;
/** wants to iterate */
- __u8 hs_iterating;
+ u8 hs_iterating;
/** hash-table is dying */
- __u8 hs_exiting;
+ u8 hs_exiting;
/** current hash bits */
- __u8 hs_cur_bits;
+ u8 hs_cur_bits;
/** min hash bits */
- __u8 hs_min_bits;
+ u8 hs_min_bits;
/** max hash bits */
- __u8 hs_max_bits;
+ u8 hs_max_bits;
/** bits for rehash */
- __u8 hs_rehash_bits;
+ u8 hs_rehash_bits;
/** bits for each bucket */
- __u8 hs_bkt_bits;
+ u8 hs_bkt_bits;
/** resize min threshold */
- __u16 hs_min_theta;
+ u16 hs_min_theta;
/** resize max threshold */
- __u16 hs_max_theta;
+ u16 hs_max_theta;
/** resize count */
- __u32 hs_rehash_count;
+ u32 hs_rehash_count;
/** # of iterators (caller of cfs_hash_for_each_*) */
- __u32 hs_iterators;
+ u32 hs_iterators;
/** rehash workitem */
struct cfs_workitem hs_rehash_wi;
/** refcount on this hash table */
@@ -291,8 +297,8 @@ struct cfs_hash_hlist_ops {
struct cfs_hash_ops {
/** return hashed value from @key */
- unsigned (*hs_hash)(struct cfs_hash *hs, const void *key,
- unsigned mask);
+ unsigned int (*hs_hash)(struct cfs_hash *hs, const void *key,
+ unsigned int mask);
/** return key address of @hnode */
void * (*hs_key)(struct hlist_node *hnode);
/** copy key from @hnode to @key */
@@ -317,110 +323,112 @@ struct cfs_hash_ops {
/** total number of buckets in @hs */
#define CFS_HASH_NBKT(hs) \
- (1U << ((hs)->hs_cur_bits - (hs)->hs_bkt_bits))
+ BIT((hs)->hs_cur_bits - (hs)->hs_bkt_bits)
/** total number of buckets in @hs while rehashing */
#define CFS_HASH_RH_NBKT(hs) \
- (1U << ((hs)->hs_rehash_bits - (hs)->hs_bkt_bits))
+ BIT((hs)->hs_rehash_bits - (hs)->hs_bkt_bits)
/** number of hlist for in bucket */
-#define CFS_HASH_BKT_NHLIST(hs) (1U << (hs)->hs_bkt_bits)
+#define CFS_HASH_BKT_NHLIST(hs) BIT((hs)->hs_bkt_bits)
/** total number of hlist in @hs */
-#define CFS_HASH_NHLIST(hs) (1U << (hs)->hs_cur_bits)
+#define CFS_HASH_NHLIST(hs) BIT((hs)->hs_cur_bits)
/** total number of hlist in @hs while rehashing */
-#define CFS_HASH_RH_NHLIST(hs) (1U << (hs)->hs_rehash_bits)
+#define CFS_HASH_RH_NHLIST(hs) BIT((hs)->hs_rehash_bits)
static inline int
cfs_hash_with_no_lock(struct cfs_hash *hs)
{
/* caller will serialize all operations for this hash-table */
- return (hs->hs_flags & CFS_HASH_NO_LOCK) != 0;
+ return hs->hs_flags & CFS_HASH_NO_LOCK;
}
static inline int
cfs_hash_with_no_bktlock(struct cfs_hash *hs)
{
/* no bucket lock, one single lock to protect the hash-table */
- return (hs->hs_flags & CFS_HASH_NO_BKTLOCK) != 0;
+ return hs->hs_flags & CFS_HASH_NO_BKTLOCK;
}
static inline int
cfs_hash_with_rw_bktlock(struct cfs_hash *hs)
{
/* rwlock to protect hash bucket */
- return (hs->hs_flags & CFS_HASH_RW_BKTLOCK) != 0;
+ return hs->hs_flags & CFS_HASH_RW_BKTLOCK;
}
static inline int
cfs_hash_with_spin_bktlock(struct cfs_hash *hs)
{
/* spinlock to protect hash bucket */
- return (hs->hs_flags & CFS_HASH_SPIN_BKTLOCK) != 0;
+ return hs->hs_flags & CFS_HASH_SPIN_BKTLOCK;
}
static inline int
cfs_hash_with_add_tail(struct cfs_hash *hs)
{
- return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0;
+ return hs->hs_flags & CFS_HASH_ADD_TAIL;
}
static inline int
cfs_hash_with_no_itemref(struct cfs_hash *hs)
{
- /* hash-table doesn't keep refcount on item,
+ /*
+ * hash-table doesn't keep refcount on item,
* item can't be removed from hash unless it's
- * ZERO refcount */
- return (hs->hs_flags & CFS_HASH_NO_ITEMREF) != 0;
+ * ZERO refcount
+ */
+ return hs->hs_flags & CFS_HASH_NO_ITEMREF;
}
static inline int
cfs_hash_with_bigname(struct cfs_hash *hs)
{
- return (hs->hs_flags & CFS_HASH_BIGNAME) != 0;
+ return hs->hs_flags & CFS_HASH_BIGNAME;
}
static inline int
cfs_hash_with_counter(struct cfs_hash *hs)
{
- return (hs->hs_flags & CFS_HASH_COUNTER) != 0;
+ return hs->hs_flags & CFS_HASH_COUNTER;
}
static inline int
cfs_hash_with_rehash(struct cfs_hash *hs)
{
- return (hs->hs_flags & CFS_HASH_REHASH) != 0;
+ return hs->hs_flags & CFS_HASH_REHASH;
}
static inline int
cfs_hash_with_rehash_key(struct cfs_hash *hs)
{
- return (hs->hs_flags & CFS_HASH_REHASH_KEY) != 0;
+ return hs->hs_flags & CFS_HASH_REHASH_KEY;
}
static inline int
cfs_hash_with_shrink(struct cfs_hash *hs)
{
- return (hs->hs_flags & CFS_HASH_SHRINK) != 0;
+ return hs->hs_flags & CFS_HASH_SHRINK;
}
static inline int
cfs_hash_with_assert_empty(struct cfs_hash *hs)
{
- return (hs->hs_flags & CFS_HASH_ASSERT_EMPTY) != 0;
+ return hs->hs_flags & CFS_HASH_ASSERT_EMPTY;
}
static inline int
cfs_hash_with_depth(struct cfs_hash *hs)
{
- return (hs->hs_flags & CFS_HASH_DEPTH) != 0;
+ return hs->hs_flags & CFS_HASH_DEPTH;
}
static inline int
cfs_hash_with_nblk_change(struct cfs_hash *hs)
{
- return (hs->hs_flags & CFS_HASH_NBLK_CHANGE) != 0;
+ return hs->hs_flags & CFS_HASH_NBLK_CHANGE;
}
static inline int
@@ -434,14 +442,14 @@ static inline int
cfs_hash_is_rehashing(struct cfs_hash *hs)
{
/* rehash is launched */
- return hs->hs_rehash_bits != 0;
+ return !!hs->hs_rehash_bits;
}
static inline int
cfs_hash_is_iterating(struct cfs_hash *hs)
{
/* someone is calling cfs_hash_for_each_* */
- return hs->hs_iterating || hs->hs_iterators != 0;
+ return hs->hs_iterating || hs->hs_iterators;
}
static inline int
@@ -453,7 +461,7 @@ cfs_hash_bkt_size(struct cfs_hash *hs)
}
static inline unsigned
-cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned mask)
+cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned int mask)
{
return hs->hs_ops->hs_hash(hs, key, mask);
}
@@ -562,7 +570,7 @@ cfs_hash_bd_index_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
}
static inline void
-cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned index,
+cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned int index,
struct cfs_hash_bd *bd)
{
bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits];
@@ -576,14 +584,14 @@ cfs_hash_bd_extra_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
cfs_hash_bkt_size(hs) - hs->hs_extra_bytes;
}
-static inline __u32
+static inline u32
cfs_hash_bd_version_get(struct cfs_hash_bd *bd)
{
/* need hold cfs_hash_bd_lock */
return bd->bd_bucket->hsb_version;
}
-static inline __u32
+static inline u32
cfs_hash_bd_count_get(struct cfs_hash_bd *bd)
{
/* need hold cfs_hash_bd_lock */
@@ -669,10 +677,10 @@ cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
/* Hash init/cleanup functions */
struct cfs_hash *
-cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
- unsigned bkt_bits, unsigned extra_bytes,
- unsigned min_theta, unsigned max_theta,
- struct cfs_hash_ops *ops, unsigned flags);
+cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits,
+ unsigned int bkt_bits, unsigned int extra_bytes,
+ unsigned int min_theta, unsigned int max_theta,
+ struct cfs_hash_ops *ops, unsigned int flags);
struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs);
void cfs_hash_putref(struct cfs_hash *hs);
@@ -700,27 +708,28 @@ typedef int (*cfs_hash_for_each_cb_t)(struct cfs_hash *hs,
void *
cfs_hash_lookup(struct cfs_hash *hs, const void *key);
void
-cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
+cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb, void *data);
void
-cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
+cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb,
+ void *data);
int
-cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t,
- void *data);
+cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb,
+ void *data, int start);
int
-cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t,
+cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb,
void *data);
void
cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
- cfs_hash_for_each_cb_t, void *data);
+ cfs_hash_for_each_cb_t cb, void *data);
typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data);
void
-cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t, void *data);
+cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t cb, void *data);
void
-cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
- cfs_hash_for_each_cb_t, void *data);
+cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned int hindex,
+ cfs_hash_for_each_cb_t cb, void *data);
int cfs_hash_is_empty(struct cfs_hash *hs);
-__u64 cfs_hash_size_get(struct cfs_hash *hs);
+u64 cfs_hash_size_get(struct cfs_hash *hs);
/*
* Rehash - Theta is calculated to be the average chained
@@ -766,8 +775,8 @@ cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
#endif /* CFS_HASH_DEBUG_LEVEL */
#define CFS_HASH_THETA_BITS 10
-#define CFS_HASH_MIN_THETA (1U << (CFS_HASH_THETA_BITS - 1))
-#define CFS_HASH_MAX_THETA (1U << (CFS_HASH_THETA_BITS + 1))
+#define CFS_HASH_MIN_THETA BIT(CFS_HASH_THETA_BITS - 1)
+#define CFS_HASH_MAX_THETA BIT(CFS_HASH_THETA_BITS + 1)
/* Return integer component of theta */
static inline int __cfs_hash_theta_int(int theta)
@@ -792,8 +801,8 @@ static inline void
__cfs_hash_set_theta(struct cfs_hash *hs, int min, int max)
{
LASSERT(min < max);
- hs->hs_min_theta = (__u16)min;
- hs->hs_max_theta = (__u16)max;
+ hs->hs_min_theta = (u16)min;
+ hs->hs_max_theta = (u16)max;
}
/* Generic debug formatting routines mainly for proc handler */
@@ -805,11 +814,11 @@ void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m);
* Generic djb2 hash algorithm for character arrays.
*/
static inline unsigned
-cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask)
+cfs_hash_djb2_hash(const void *key, size_t size, unsigned int mask)
{
- unsigned i, hash = 5381;
+ unsigned int i, hash = 5381;
- LASSERT(key != NULL);
+ LASSERT(key);
for (i = 0; i < size; i++)
hash = hash * 33 + ((char *)key)[i];
@@ -821,7 +830,7 @@ cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask)
* Generic u32 hash algorithm.
*/
static inline unsigned
-cfs_hash_u32_hash(const __u32 key, unsigned mask)
+cfs_hash_u32_hash(const u32 key, unsigned int mask)
{
return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask);
}
@@ -830,9 +839,9 @@ cfs_hash_u32_hash(const __u32 key, unsigned mask)
* Generic u64 hash algorithm.
*/
static inline unsigned
-cfs_hash_u64_hash(const __u64 key, unsigned mask)
+cfs_hash_u64_hash(const u64 key, unsigned int mask)
{
- return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
+ return ((unsigned int)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
}
/** iterate over all buckets in @bds (array of struct cfs_hash_bd) */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index e0e1a5d0949d..aab15d8112a4 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -75,7 +75,7 @@ do { \
#define KLASSERT(e) LASSERT(e)
-void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *);
+void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msg);
#define LBUG() \
do { \
@@ -96,7 +96,7 @@ do { \
#define LIBCFS_ALLOC_POST(ptr, size) \
do { \
- if (unlikely((ptr) == NULL)) { \
+ if (unlikely(!(ptr))) { \
CERROR("LNET: out of memory at %s:%d (tried to alloc '" \
#ptr "' = %d)\n", __FILE__, __LINE__, (int)(size)); \
} else { \
@@ -147,7 +147,7 @@ do { \
#define LIBCFS_FREE(ptr, size) \
do { \
- if (unlikely((ptr) == NULL)) { \
+ if (unlikely(!(ptr))) { \
CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \
"%s:%d\n", (int)(size), __FILE__, __LINE__); \
break; \
@@ -169,8 +169,6 @@ do { \
#define ntohs(x) ___ntohs(x)
#endif
-void libcfs_run_upcall(char **argv);
-void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *);
void libcfs_debug_dumplog(void);
int libcfs_debug_init(unsigned long bufsize);
int libcfs_debug_cleanup(void);
@@ -280,7 +278,7 @@ do { \
#define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr)))
/** Compile-time assertion.
-
+ *
* Check an invariant described by a constant expression at compile time by
* forcing a compiler error if it does not hold. \a cond must be a constant
* expression as defined by the ISO C Standard:
@@ -306,7 +304,8 @@ do { \
/* --------------------------------------------------------------------
* Light-weight trace
* Support for temporary event tracing with minimal Heisenberg effect.
- * -------------------------------------------------------------------- */
+ * --------------------------------------------------------------------
+ */
#define MKSTR(ptr) ((ptr)) ? (ptr) : ""
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
index 0ee60ff336f2..41795d9b3b9b 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
@@ -62,9 +62,9 @@ struct cfs_range_expr {
* Link to cfs_expr_list::el_exprs.
*/
struct list_head re_link;
- __u32 re_lo;
- __u32 re_hi;
- __u32 re_stride;
+ u32 re_lo;
+ u32 re_hi;
+ u32 re_stride;
};
struct cfs_expr_list {
@@ -74,24 +74,26 @@ struct cfs_expr_list {
char *cfs_trimwhite(char *str);
int cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res);
-int cfs_str2num_check(char *str, int nob, unsigned *num,
- unsigned min, unsigned max);
-int cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list);
+int cfs_str2num_check(char *str, int nob, unsigned int *num,
+ unsigned int min, unsigned int max);
+int cfs_expr_list_match(u32 value, struct cfs_expr_list *expr_list);
int cfs_expr_list_print(char *buffer, int count,
struct cfs_expr_list *expr_list);
int cfs_expr_list_values(struct cfs_expr_list *expr_list,
- int max, __u32 **values);
+ int max, u32 **values);
static inline void
-cfs_expr_list_values_free(__u32 *values, int num)
+cfs_expr_list_values_free(u32 *values, int num)
{
- /* This array is allocated by LIBCFS_ALLOC(), so it shouldn't be freed
+ /*
+ * This array is allocated by LIBCFS_ALLOC(), so it shouldn't be freed
* by OBD_FREE() if it's called by module other than libcfs & LNet,
- * otherwise we will see fake memory leak */
+ * otherwise we will see fake memory leak
+ */
LIBCFS_FREE(values, num * sizeof(values[0]));
}
void cfs_expr_list_free(struct cfs_expr_list *expr_list);
-int cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
+int cfs_expr_list_parse(char *str, int len, unsigned int min, unsigned int max,
struct cfs_expr_list **elpp);
void cfs_expr_list_free_list(struct list_head *list);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
index a7e1340e69a1..2accd9a85472 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
@@ -62,9 +62,9 @@
struct cfs_wi_sched;
-void cfs_wi_sched_destroy(struct cfs_wi_sched *);
+void cfs_wi_sched_destroy(struct cfs_wi_sched *sched);
int cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, int cpt,
- int nthrs, struct cfs_wi_sched **);
+ int nthrs, struct cfs_wi_sched **sched_pp);
struct cfs_workitem;
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
index f63cb47bc309..dd0cd0442b86 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
@@ -52,17 +52,17 @@ struct cfs_cpu_partition {
/* nodes mask for this partition */
nodemask_t *cpt_nodemask;
/* spread rotor for NUMA allocator */
- unsigned cpt_spread_rotor;
+ unsigned int cpt_spread_rotor;
};
/** descriptor for CPU partitions */
struct cfs_cpt_table {
/* version, reserved for hotplug */
- unsigned ctb_version;
+ unsigned int ctb_version;
/* spread rotor for NUMA allocator */
- unsigned ctb_spread_rotor;
+ unsigned int ctb_spread_rotor;
/* # of CPU partitions */
- unsigned ctb_nparts;
+ unsigned int ctb_nparts;
/* partitions tables */
struct cfs_cpu_partition *ctb_parts;
/* shadow HW CPU to CPU partition ID */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
index b646acd1f7e7..709e1ce98d8d 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
@@ -76,23 +76,23 @@ static inline long cfs_duration_sec(long d)
#define cfs_time_current_64 get_jiffies_64
-static inline __u64 cfs_time_add_64(__u64 t, __u64 d)
+static inline u64 cfs_time_add_64(u64 t, u64 d)
{
return t + d;
}
-static inline __u64 cfs_time_shift_64(int seconds)
+static inline u64 cfs_time_shift_64(int seconds)
{
return cfs_time_add_64(cfs_time_current_64(),
cfs_time_seconds(seconds));
}
-static inline int cfs_time_before_64(__u64 t1, __u64 t2)
+static inline int cfs_time_before_64(u64 t1, u64 t2)
{
return (__s64)t2 - (__s64)t1 > 0;
}
-static inline int cfs_time_beforeq_64(__u64 t1, __u64 t2)
+static inline int cfs_time_beforeq_64(u64 t1, u64 t2)
{
return (__s64)t2 - (__s64)t1 >= 0;
}
diff --git a/drivers/staging/lustre/include/linux/lnet/lnetst.h b/drivers/staging/lustre/include/linux/lnet/lnetst.h
index 417044552d3f..8a84888635ff 100644
--- a/drivers/staging/lustre/include/linux/lnet/lnetst.h
+++ b/drivers/staging/lustre/include/linux/lnet/lnetst.h
@@ -244,7 +244,7 @@ typedef struct {
int lstio_ses_timeout; /* IN: session timeout */
int lstio_ses_force; /* IN: force create ? */
/** IN: session features */
- unsigned lstio_ses_feats;
+ unsigned int lstio_ses_feats;
lst_sid_t __user *lstio_ses_idp; /* OUT: session id */
int lstio_ses_nmlen; /* IN: name length */
char __user *lstio_ses_namep; /* IN: session name */
@@ -255,7 +255,7 @@ typedef struct {
lst_sid_t __user *lstio_ses_idp; /* OUT: session id */
int __user *lstio_ses_keyp; /* OUT: local key */
/** OUT: session features */
- unsigned __user *lstio_ses_featp;
+ unsigned int __user *lstio_ses_featp;
lstcon_ndlist_ent_t __user *lstio_ses_ndinfo; /* OUT: */
int lstio_ses_nmlen; /* IN: name length */
char __user *lstio_ses_namep; /* OUT: session name */
@@ -328,7 +328,7 @@ typedef struct {
char __user *lstio_grp_namep; /* IN: group name */
int lstio_grp_count; /* IN: # of nodes */
/** OUT: session features */
- unsigned __user *lstio_grp_featp;
+ unsigned int __user *lstio_grp_featp;
lnet_process_id_t __user *lstio_grp_idsp; /* IN: nodes */
struct list_head __user *lstio_grp_resultp; /* OUT: list head of
result buffer */
@@ -490,6 +490,8 @@ typedef struct {
int blk_size; /* size (bytes) */
int blk_time; /* time of running the test*/
int blk_flags; /* reserved flags */
+ int blk_cli_off; /* bulk offset on client */
+ int blk_srv_off; /* reserved: bulk offset on server */
} lst_test_bulk_param_t;
typedef struct {
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 9e8802181452..7f761b327166 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -1489,7 +1489,7 @@ out_fpo:
static void kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps,
struct list_head *zombies)
{
- if (!fps->fps_net) /* intialized? */
+ if (!fps->fps_net) /* initialized? */
return;
spin_lock(&fps->fps_lock);
@@ -1637,7 +1637,7 @@ int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
{
__u64 *pages = tx->tx_pages;
bool is_rx = (rd != tx->tx_rd);
- bool tx_pages_mapped = 0;
+ bool tx_pages_mapped = false;
struct kib_fmr_pool *fpo;
int npages = 0;
__u64 version;
@@ -1812,7 +1812,7 @@ static void kiblnd_destroy_pool_list(struct list_head *head)
static void kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies)
{
- if (!ps->ps_net) /* intialized? */
+ if (!ps->ps_net) /* initialized? */
return;
spin_lock(&ps->ps_lock);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index b27de8888149..c7917abf9944 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1912,12 +1912,12 @@ kiblnd_close_conn_locked(struct kib_conn *conn, int error)
libcfs_nid2str(peer->ibp_nid));
} else {
CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
- libcfs_nid2str(peer->ibp_nid), error,
- list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
- list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
- list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
- list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
- list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
+ libcfs_nid2str(peer->ibp_nid), error,
+ list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
+ list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
+ list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
+ list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
+ list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
}
dev = ((struct kib_net *)peer->ibp_ni->ni_data)->ibn_dev;
@@ -2643,7 +2643,7 @@ kiblnd_check_reconnect(struct kib_conn *conn, int version,
if (incarnation)
peer->ibp_incarnation = incarnation;
out:
- write_unlock_irqrestore(glock, flags);
+ write_unlock_irqrestore(glock, flags);
CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n",
libcfs_nid2str(peer->ibp_nid),
@@ -2651,7 +2651,7 @@ out:
reason, IBLND_MSG_VERSION, version, msg_size,
conn->ibc_queue_depth, queue_dep,
conn->ibc_max_frags, frag_num);
- /**
+ /**
* if conn::ibc_reconnect is TRUE, connd will reconnect to the peer
* while destroying the zombie
*/
@@ -2976,7 +2976,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
case RDMA_CM_EVENT_ADDR_ERROR:
peer = (struct kib_peer *)cmid->context;
CNETERR("%s: ADDR ERROR %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
+ libcfs_nid2str(peer->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
kiblnd_peer_decref(peer);
return -EHOSTUNREACH; /* rc destroys cmid */
@@ -3021,7 +3021,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
return kiblnd_active_connect(cmid);
CNETERR("Can't resolve route for %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
+ libcfs_nid2str(peer->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer, 1, event->status);
kiblnd_peer_decref(peer);
return event->status; /* rc destroys cmid */
@@ -3031,7 +3031,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
CNETERR("%s: UNREACHABLE %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
kiblnd_connreq_done(conn, -ENETDOWN);
kiblnd_conn_decref(conn);
return 0;
@@ -3269,14 +3269,14 @@ kiblnd_disconnect_conn(struct kib_conn *conn)
#define KIB_RECONN_HIGH_RACE 10
/**
* Allow connd to take a break and handle other things after consecutive
- * reconnection attemps.
+ * reconnection attempts.
*/
#define KIB_RECONN_BREAK 100
int
kiblnd_connd(void *arg)
{
- spinlock_t *lock= &kiblnd_data.kib_connd_lock;
+ spinlock_t *lock = &kiblnd_data.kib_connd_lock;
wait_queue_t wait;
unsigned long flags;
struct kib_conn *conn;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index cbc9a9c5385f..b74cf635faee 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -96,7 +96,8 @@ ksocknal_destroy_route(struct ksock_route *route)
}
static int
-ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni, lnet_process_id_t id)
+ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni,
+ lnet_process_id_t id)
{
int cpt = lnet_cpt_of_nid(id.nid);
struct ksock_net *net = ni->ni_data;
@@ -319,7 +320,8 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
}
static void
-ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
+ksocknal_associate_route_conn_locked(struct ksock_route *route,
+ struct ksock_conn *conn)
{
struct ksock_peer *peer = route->ksnr_peer;
int type = conn->ksnc_type;
@@ -821,7 +823,8 @@ ksocknal_select_ips(struct ksock_peer *peer, __u32 *peerips, int n_peerips)
if (k < peer->ksnp_n_passive_ips) /* using it already */
continue;
- k = ksocknal_match_peerip(iface, peerips, n_peerips);
+ k = ksocknal_match_peerip(iface, peerips,
+ n_peerips);
xor = ip ^ peerips[k];
this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0;
@@ -1302,8 +1305,11 @@ ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
/* Take packets blocking for this connection. */
list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
- if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO)
- continue;
+ int match = conn->ksnc_proto->pro_match_tx(conn, tx,
+ tx->tx_nonblk);
+
+ if (match == SOCKNAL_MATCH_NO)
+ continue;
list_del(&tx->tx_list);
ksocknal_queue_tx_locked(tx, conn);
@@ -1493,8 +1499,8 @@ ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
}
- peer->ksnp_proto = NULL; /* renegotiate protocol version */
- peer->ksnp_error = error; /* stash last conn close reason */
+ peer->ksnp_proto = NULL; /* renegotiate protocol version */
+ peer->ksnp_error = error; /* stash last conn close reason */
if (list_empty(&peer->ksnp_routes)) {
/*
@@ -1786,7 +1792,8 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
(id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
continue;
- count += ksocknal_close_peer_conns_locked(peer, ipaddr, 0);
+ count += ksocknal_close_peer_conns_locked(peer, ipaddr,
+ 0);
}
}
@@ -2026,7 +2033,10 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
}
rc = 0;
- /* NB only new connections will pay attention to the new interface! */
+ /*
+ * NB only new connections will pay attention to the
+ * new interface!
+ */
}
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2200,8 +2210,9 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
int txmem;
int rxmem;
int nagle;
- struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
+ struct ksock_conn *conn;
+ conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
if (!conn)
return -ENOENT;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index e6ca0cf52691..842c45393b38 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -84,7 +84,8 @@ struct ksock_sched { /* per scheduler state */
struct list_head kss_zombie_noop_txs; /* zombie noop tx list */
wait_queue_head_t kss_waitq; /* where scheduler sleeps */
int kss_nconns; /* # connections assigned to
- * this scheduler */
+ * this scheduler
+ */
struct ksock_sched_info *kss_info; /* owner of it */
};
@@ -110,15 +111,19 @@ struct ksock_interface { /* in-use interface */
struct ksock_tunables {
int *ksnd_timeout; /* "stuck" socket timeout
- * (seconds) */
+ * (seconds)
+ */
int *ksnd_nscheds; /* # scheduler threads in each
- * pool while starting */
+ * pool while starting
+ */
int *ksnd_nconnds; /* # connection daemons */
int *ksnd_nconnds_max; /* max # connection daemons */
int *ksnd_min_reconnectms; /* first connection retry after
- * (ms)... */
+ * (ms)...
+ */
int *ksnd_max_reconnectms; /* ...exponentially increasing to
- * this */
+ * this
+ */
int *ksnd_eager_ack; /* make TCP ack eagerly? */
int *ksnd_typed_conns; /* drive sockets by type? */
int *ksnd_min_bulk; /* smallest "large" message */
@@ -126,9 +131,11 @@ struct ksock_tunables {
int *ksnd_rx_buffer_size; /* socket rx buffer size */
int *ksnd_nagle; /* enable NAGLE? */
int *ksnd_round_robin; /* round robin for multiple
- * interfaces */
+ * interfaces
+ */
int *ksnd_keepalive; /* # secs for sending keepalive
- * NOOP */
+ * NOOP
+ */
int *ksnd_keepalive_idle; /* # idle secs before 1st probe
*/
int *ksnd_keepalive_count; /* # probes */
@@ -137,20 +144,26 @@ struct ksock_tunables {
int *ksnd_peertxcredits; /* # concurrent sends to 1 peer
*/
int *ksnd_peerrtrcredits; /* # per-peer router buffer
- * credits */
+ * credits
+ */
int *ksnd_peertimeout; /* seconds to consider peer dead
*/
int *ksnd_enable_csum; /* enable check sum */
int *ksnd_inject_csum_error; /* set non-zero to inject
- * checksum error */
+ * checksum error
+ */
int *ksnd_nonblk_zcack; /* always send zc-ack on
- * non-blocking connection */
+ * non-blocking connection
+ */
unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload
- * size */
+ * size
+ */
int *ksnd_zc_recv; /* enable ZC receive (for
- * Chelsio TOE) */
+ * Chelsio TOE)
+ */
int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to
- * enable ZC receive */
+ * enable ZC receive
+ */
};
struct ksock_net {
@@ -174,9 +187,11 @@ struct ksock_nal_data {
int ksnd_nnets; /* # networks set up */
struct list_head ksnd_nets; /* list of nets */
rwlock_t ksnd_global_lock; /* stabilize peer/conn
- * ops */
+ * ops
+ */
struct list_head *ksnd_peers; /* hash table of all my
- * known peers */
+ * known peers
+ */
int ksnd_peer_hash_size; /* size of ksnd_peers */
int ksnd_nthreads; /* # live threads */
@@ -187,11 +202,14 @@ struct ksock_nal_data {
atomic_t ksnd_nactive_txs; /* #active txs */
struct list_head ksnd_deathrow_conns; /* conns to close:
- * reaper_lock*/
+ * reaper_lock
+ */
struct list_head ksnd_zombie_conns; /* conns to free:
- * reaper_lock */
+ * reaper_lock
+ */
struct list_head ksnd_enomem_conns; /* conns to retry:
- * reaper_lock*/
+ * reaper_lock
+ */
wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */
unsigned long ksnd_reaper_waketime; /* when reaper will wake
*/
@@ -201,30 +219,34 @@ struct ksock_nal_data {
int ksnd_stall_tx; /* test sluggish sender
*/
int ksnd_stall_rx; /* test sluggish
- * receiver */
-
+ * receiver
+ */
struct list_head ksnd_connd_connreqs; /* incoming connection
- * requests */
+ * requests
+ */
struct list_head ksnd_connd_routes; /* routes waiting to be
- * connected */
+ * connected
+ */
wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */
int ksnd_connd_connecting; /* # connds connecting
*/
time64_t ksnd_connd_failed_stamp;/* time stamp of the
* last failed
- * connecting attempt */
+ * connecting attempt
+ */
time64_t ksnd_connd_starting_stamp;/* time stamp of the
* last starting connd
*/
- unsigned ksnd_connd_starting; /* # starting connd */
- unsigned ksnd_connd_running; /* # running connd */
+ unsigned int ksnd_connd_starting; /* # starting connd */
+ unsigned int ksnd_connd_running; /* # running connd */
spinlock_t ksnd_connd_lock; /* serialise */
struct list_head ksnd_idle_noop_txs; /* list head for freed
- * noop tx */
+ * noop tx
+ */
spinlock_t ksnd_tx_lock; /* serialise, g_lock
- * unsafe */
-
+ * unsafe
+ */
};
#define SOCKNAL_INIT_NOTHING 0
@@ -304,18 +326,21 @@ struct ksock_conn {
struct list_head ksnc_list; /* stash on peer's conn list */
struct socket *ksnc_sock; /* actual socket */
void *ksnc_saved_data_ready; /* socket's original
- * data_ready() callback */
+ * data_ready() callback
+ */
void *ksnc_saved_write_space; /* socket's original
- * write_space() callback */
+ * write_space() callback
+ */
atomic_t ksnc_conn_refcount;/* conn refcount */
atomic_t ksnc_sock_refcount;/* sock refcount */
struct ksock_sched *ksnc_scheduler; /* who schedules this connection
- */
+ */
__u32 ksnc_myipaddr; /* my IP */
__u32 ksnc_ipaddr; /* peer's IP */
int ksnc_port; /* peer's port */
signed int ksnc_type:3; /* type of connection, should be
- * signed value */
+ * signed value
+ */
unsigned int ksnc_closing:1; /* being shut down */
unsigned int ksnc_flip:1; /* flip or not, only for V2.x */
unsigned int ksnc_zc_capable:1; /* enable to ZC */
@@ -323,9 +348,11 @@ struct ksock_conn {
/* reader */
struct list_head ksnc_rx_list; /* where I enq waiting input or a
- * forwarding descriptor */
+ * forwarding descriptor
+ */
unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times
- * out */
+ * out
+ */
__u8 ksnc_rx_started; /* started receiving a message */
__u8 ksnc_rx_ready; /* data ready to read */
__u8 ksnc_rx_scheduled; /* being progressed */
@@ -338,7 +365,8 @@ struct ksock_conn {
lnet_kiov_t *ksnc_rx_kiov; /* the page frags */
union ksock_rxiovspace ksnc_rx_iov_space; /* space for frag descriptors */
__u32 ksnc_rx_csum; /* partial checksum for incoming
- * data */
+ * data
+ */
void *ksnc_cookie; /* rx lnet_finalize passthru arg
*/
ksock_msg_t ksnc_msg; /* incoming message buffer:
@@ -346,14 +374,16 @@ struct ksock_conn {
* whole struct
* V1.x message is a bare
* lnet_hdr_t, it's stored in
- * ksnc_msg.ksm_u.lnetmsg */
-
+ * ksnc_msg.ksm_u.lnetmsg
+ */
/* WRITER */
struct list_head ksnc_tx_list; /* where I enq waiting for output
- * space */
+ * space
+ */
struct list_head ksnc_tx_queue; /* packets waiting to be sent */
- struct ksock_tx *ksnc_tx_carrier; /* next TX that can carry a LNet
- * message or ZC-ACK */
+ struct ksock_tx *ksnc_tx_carrier; /* next TX that can carry a LNet
+ * message or ZC-ACK
+ */
unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out
*/
int ksnc_tx_bufnob; /* send buffer marker */
@@ -361,7 +391,8 @@ struct ksock_conn {
int ksnc_tx_ready; /* write space */
int ksnc_tx_scheduled; /* being progressed */
unsigned long ksnc_tx_last_post; /* time stamp of the last posted
- * TX */
+ * TX
+ */
};
struct ksock_route {
@@ -370,20 +401,24 @@ struct ksock_route {
struct ksock_peer *ksnr_peer; /* owning peer */
atomic_t ksnr_refcount; /* # users */
unsigned long ksnr_timeout; /* when (in jiffies) reconnection
- * can happen next */
+ * can happen next
+ */
long ksnr_retry_interval; /* how long between retries */
__u32 ksnr_myipaddr; /* my IP */
__u32 ksnr_ipaddr; /* IP address to connect to */
int ksnr_port; /* port to connect to */
unsigned int ksnr_scheduled:1; /* scheduled for attention */
unsigned int ksnr_connecting:1; /* connection establishment in
- * progress */
+ * progress
+ */
unsigned int ksnr_connected:4; /* connections established by
- * type */
+ * type
+ */
unsigned int ksnr_deleted:1; /* been removed from peer? */
unsigned int ksnr_share_count; /* created explicitly? */
int ksnr_conn_count; /* # conns established by this
- * route */
+ * route
+ */
};
#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */
@@ -391,7 +426,8 @@ struct ksock_route {
struct ksock_peer {
struct list_head ksnp_list; /* stash on global peer list */
unsigned long ksnp_last_alive; /* when (in jiffies) I was last
- * alive */
+ * alive
+ */
lnet_process_id_t ksnp_id; /* who's on the other end(s) */
atomic_t ksnp_refcount; /* # users */
int ksnp_sharecount; /* lconf usage counter */
@@ -408,7 +444,8 @@ struct ksock_peer {
struct list_head ksnp_tx_queue; /* waiting packets */
spinlock_t ksnp_lock; /* serialize, g_lock unsafe */
struct list_head ksnp_zc_req_list; /* zero copy requests wait for
- * ACK */
+ * ACK
+ */
unsigned long ksnp_send_keepalive; /* time to send keepalive */
lnet_ni_t *ksnp_ni; /* which network */
int ksnp_n_passive_ips; /* # of... */
@@ -429,7 +466,8 @@ extern struct ksock_tunables ksocknal_tunables;
#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */
#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */
#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not
- * preferred */
+ * preferred
+ */
struct ksock_proto {
/* version number of protocol */
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index c1c6f604e6ad..972f6094be75 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -620,7 +620,8 @@ ksocknal_launch_all_connections_locked(struct ksock_peer *peer)
}
struct ksock_conn *
-ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonblk)
+ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx,
+ int nonblk)
{
struct list_head *tmp;
struct ksock_conn *conn;
@@ -630,10 +631,12 @@ ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonb
int fnob = 0;
list_for_each(tmp, &peer->ksnp_conns) {
- struct ksock_conn *c = list_entry(tmp, struct ksock_conn, ksnc_list);
- int nob = atomic_read(&c->ksnc_tx_nob) +
- c->ksnc_sock->sk->sk_wmem_queued;
- int rc;
+ struct ksock_conn *c;
+ int nob, rc;
+
+ c = list_entry(tmp, struct ksock_conn, ksnc_list);
+ nob = atomic_read(&c->ksnc_tx_nob) +
+ c->ksnc_sock->sk->sk_wmem_queued;
LASSERT(!c->ksnc_closing);
LASSERT(c->ksnc_proto &&
@@ -752,9 +755,9 @@ ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
LASSERT(msg->ksm_zc_cookies[1]);
LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
+ /* ZC ACK piggybacked on ztx release tx later */
if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
- ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
-
+ ztx = tx;
} else {
/*
* It's a normal packet - can it piggback a noop zc-ack that
@@ -796,7 +799,8 @@ ksocknal_find_connectable_route_locked(struct ksock_peer *peer)
LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
- if (route->ksnr_scheduled) /* connections being established */
+ /* connections being established */
+ if (route->ksnr_scheduled)
continue;
/* all route types connected ? */
@@ -1514,7 +1518,10 @@ int ksocknal_scheduler(void *arg)
rc = ksocknal_process_transmit(conn, tx);
if (rc == -ENOMEM || rc == -EAGAIN) {
- /* Incomplete send: replace tx on HEAD of tx_queue */
+ /*
+ * Incomplete send: replace tx on HEAD of
+ * tx_queue
+ */
spin_lock_bh(&sched->kss_lock);
list_add(&tx->tx_list, &conn->ksnc_tx_queue);
} else {
@@ -1724,7 +1731,8 @@ ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
timeout = active ? *ksocknal_tunables.ksnd_timeout :
lnet_acceptor_timeout();
- rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof(hello->kshm_magic), timeout);
+ rc = lnet_sock_read(sock, &hello->kshm_magic,
+ sizeof(hello->kshm_magic), timeout);
if (rc) {
CERROR("Error %d reading HELLO from %pI4h\n",
rc, &conn->ksnc_ipaddr);
@@ -1798,7 +1806,8 @@ ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
/* Userspace NAL assigns peer process ID from socket */
recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
- recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
+ recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
+ conn->ksnc_ipaddr);
} else {
recv_id.nid = hello->kshm_src_nid;
recv_id.pid = hello->kshm_src_pid;
@@ -1882,7 +1891,8 @@ ksocknal_connect(struct ksock_route *route)
if (peer->ksnp_accepting > 0) {
CDEBUG(D_NET,
"peer %s(%d) already connecting to me, retry later.\n",
- libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting);
+ libcfs_nid2str(peer->ksnp_id.nid),
+ peer->ksnp_accepting);
retry_later = 1;
}
@@ -2241,7 +2251,8 @@ ksocknal_connd(void *arg)
/* Nothing to do for 'timeout' */
set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
+ add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq,
+ &wait);
spin_unlock_bh(connd_lock);
nloops = 0;
@@ -2371,7 +2382,8 @@ ksocknal_send_keepalive_locked(struct ksock_peer *peer)
struct ksock_conn *conn;
struct ksock_tx *tx;
- if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
+ /* last_alive will be updated by create_conn */
+ if (list_empty(&peer->ksnp_conns))
return 0;
if (peer->ksnp_proto != &ksocknal_protocol_v3x)
@@ -2473,8 +2485,8 @@ ksocknal_check_peer_timeouts(int idx)
* holding only shared lock
*/
if (!list_empty(&peer->ksnp_tx_queue)) {
- struct ksock_tx *tx = list_entry(peer->ksnp_tx_queue.next,
- struct ksock_tx, tx_list);
+ tx = list_entry(peer->ksnp_tx_queue.next,
+ struct ksock_tx, tx_list);
if (cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline)) {
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index 6c95e989ca12..4bcab4bcc2de 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -202,7 +202,8 @@ ksocknal_lib_recv_iov(struct ksock_conn *conn)
fragnob = sum;
conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
- iov[i].iov_base, fragnob);
+ iov[i].iov_base,
+ fragnob);
}
conn->ksnc_msg.ksm_csum = saved_csum;
}
@@ -291,7 +292,8 @@ ksocknal_lib_csum_tx(struct ksock_tx *tx)
}
int
-ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, int *rxmem, int *nagle)
+ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem,
+ int *rxmem, int *nagle)
{
struct socket *sock = conn->ksnc_sock;
int len;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index 82e174f6d9fe..8f0ff6ca1f39 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -194,7 +194,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
}
if (!tx->tx_msg.ksm_zc_cookies[0]) {
- /* NOOP tx has only one ZC-ACK cookie, can carry at least one more */
+ /*
+ * NOOP tx has only one ZC-ACK cookie,
+ * can carry at least one more
+ */
if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
tx->tx_msg.ksm_zc_cookies[1] = cookie;
@@ -203,7 +206,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
}
if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) {
- /* not likely to carry more ACKs, skip it to simplify logic */
+ /*
+ * not likely to carry more ACKs, skip it
+ * to simplify logic
+ */
ksocknal_next_tx_carrier(conn);
}
@@ -237,7 +243,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
}
} else {
- /* ksm_zc_cookies[0] < ksm_zc_cookies[1], it is range of cookies */
+ /*
+ * ksm_zc_cookies[0] < ksm_zc_cookies[1],
+ * it is range of cookies
+ */
if (cookie >= tx->tx_msg.ksm_zc_cookies[0] &&
cookie <= tx->tx_msg.ksm_zc_cookies[1]) {
CWARN("%s: duplicated ZC cookie: %llu\n",
@@ -425,7 +434,8 @@ ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2)
tx_zc_list) {
__u64 c = tx->tx_msg.ksm_zc_cookies[0];
- if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
+ if (c == cookie1 || c == cookie2 ||
+ (cookie1 < c && c < cookie2)) {
tx->tx_msg.ksm_zc_cookies[0] = 0;
list_del(&tx->tx_zc_list);
list_add(&tx->tx_zc_list, &zlist);
@@ -639,7 +649,8 @@ out:
}
static int
-ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello, int timeout)
+ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello,
+ int timeout)
{
struct socket *sock = conn->ksnc_sock;
int rc;
@@ -737,7 +748,10 @@ ksocknal_pack_msg_v2(struct ksock_tx *tx)
tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
tx->tx_resid = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
}
- /* Don't checksum before start sending, because packet can be piggybacked with ACK */
+ /*
+ * Don't checksum before start sending, because packet can be
+ * piggybacked with ACK
+ */
}
static void
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index 23b36b890964..a38db2322225 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -57,7 +57,7 @@ static int libcfs_param_debug_mb_set(const char *val,
const struct kernel_param *kp)
{
int rc;
- unsigned num;
+ unsigned int num;
rc = kstrtouint(val, 0, &num);
if (rc < 0)
@@ -228,7 +228,8 @@ int libcfs_panic_in_progress;
static const char *
libcfs_debug_subsys2str(int subsys)
{
- static const char *libcfs_debug_subsystems[] = LIBCFS_DEBUG_SUBSYS_NAMES;
+ static const char * const libcfs_debug_subsystems[] =
+ LIBCFS_DEBUG_SUBSYS_NAMES;
if (subsys >= ARRAY_SIZE(libcfs_debug_subsystems))
return NULL;
@@ -240,7 +241,8 @@ libcfs_debug_subsys2str(int subsys)
static const char *
libcfs_debug_dbg2str(int debug)
{
- static const char *libcfs_debug_masks[] = LIBCFS_DEBUG_MASKS_NAMES;
+ static const char * const libcfs_debug_masks[] =
+ LIBCFS_DEBUG_MASKS_NAMES;
if (debug >= ARRAY_SIZE(libcfs_debug_masks))
return NULL;
@@ -253,17 +255,17 @@ libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys)
{
const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str :
libcfs_debug_dbg2str;
- int len = 0;
- const char *token;
- int i;
+ int len = 0;
+ const char *token;
+ int i;
- if (mask == 0) { /* "0" */
+ if (!mask) { /* "0" */
if (size > 0)
str[0] = '0';
len = 1;
} else { /* space-separated tokens */
for (i = 0; i < 32; i++) {
- if ((mask & (1 << i)) == 0)
+ if (!(mask & (1 << i)))
continue;
token = fn(i);
@@ -276,7 +278,7 @@ libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys)
len++;
}
- while (*token != 0) {
+ while (*token) {
if (len < size)
str[len] = *token;
token++;
@@ -299,10 +301,10 @@ libcfs_debug_str2mask(int *mask, const char *str, int is_subsys)
{
const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str :
libcfs_debug_dbg2str;
- int m = 0;
- int matched;
- int n;
- int t;
+ int m = 0;
+ int matched;
+ int n;
+ int t;
/* Allow a number for backwards compatibility */
@@ -313,7 +315,7 @@ libcfs_debug_str2mask(int *mask, const char *str, int is_subsys)
t = sscanf(str, "%i%n", &m, &matched);
if (t >= 1 && matched == n) {
/* don't print warning for lctl set_param debug=0 or -1 */
- if (m != 0 && m != -1)
+ if (m && m != -1)
CWARN("You are trying to use a numerical value for the mask - this will be deprecated in a future release.\n");
*mask = m;
return 0;
@@ -387,8 +389,8 @@ EXPORT_SYMBOL(libcfs_debug_dumplog);
int libcfs_debug_init(unsigned long bufsize)
{
- int rc = 0;
unsigned int max = libcfs_debug_mb;
+ int rc = 0;
init_waitqueue_head(&debug_ctlwq);
@@ -414,9 +416,9 @@ int libcfs_debug_init(unsigned long bufsize)
max = max / num_possible_cpus();
max <<= (20 - PAGE_SHIFT);
}
- rc = cfs_tracefile_init(max);
- if (rc == 0) {
+ rc = cfs_tracefile_init(max);
+ if (!rc) {
libcfs_register_panic_notifier();
libcfs_debug_mb = cfs_trace_get_debug_mb();
}
diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c
index e4b1a0a86eae..12dd50ad4efb 100644
--- a/drivers/staging/lustre/lnet/libcfs/fail.c
+++ b/drivers/staging/lustre/lnet/libcfs/fail.c
@@ -46,7 +46,7 @@ EXPORT_SYMBOL(cfs_race_waitq);
int cfs_race_state;
EXPORT_SYMBOL(cfs_race_state);
-int __cfs_fail_check_set(__u32 id, __u32 value, int set)
+int __cfs_fail_check_set(u32 id, u32 value, int set)
{
static atomic_t cfs_fail_count = ATOMIC_INIT(0);
@@ -113,6 +113,7 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set)
break;
case CFS_FAIL_LOC_RESET:
cfs_fail_loc = value;
+ atomic_set(&cfs_fail_count, 0);
break;
default:
LASSERTF(0, "called with bad set %u\n", set);
@@ -123,7 +124,7 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set)
}
EXPORT_SYMBOL(__cfs_fail_check_set);
-int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
+int __cfs_fail_timeout_set(u32 id, u32 value, int ms, int set)
{
int ret;
diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c
index 23283b6e09ab..c93c59d8fe6c 100644
--- a/drivers/staging/lustre/lnet/libcfs/hash.c
+++ b/drivers/staging/lustre/lnet/libcfs/hash.c
@@ -289,7 +289,7 @@ cfs_hash_hd_hhead_size(struct cfs_hash *hs)
static struct hlist_head *
cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
- struct cfs_hash_head_dep *head;
+ struct cfs_hash_head_dep *head;
head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
return &head[bd->bd_offset].hd_head;
@@ -492,7 +492,7 @@ cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
cfs_hash_bd_from_key(hs, hs->hs_buckets,
hs->hs_cur_bits, key, bd);
} else {
- LASSERT(hs->hs_rehash_bits != 0);
+ LASSERT(hs->hs_rehash_bits);
cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
hs->hs_rehash_bits, key, bd);
}
@@ -507,14 +507,14 @@ cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
bd->bd_bucket->hsb_depmax = dep_cur;
# if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
- if (likely(warn_on_depth == 0 ||
+ if (likely(!warn_on_depth ||
max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
return;
spin_lock(&hs->hs_dep_lock);
- hs->hs_dep_max = dep_cur;
- hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
- hs->hs_dep_off = bd->bd_offset;
+ hs->hs_dep_max = dep_cur;
+ hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
+ hs->hs_dep_off = bd->bd_offset;
hs->hs_dep_bits = hs->hs_cur_bits;
spin_unlock(&hs->hs_dep_lock);
@@ -531,7 +531,7 @@ cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
cfs_hash_bd_dep_record(hs, bd, rc);
bd->bd_bucket->hsb_version++;
- if (unlikely(bd->bd_bucket->hsb_version == 0))
+ if (unlikely(!bd->bd_bucket->hsb_version))
bd->bd_bucket->hsb_version++;
bd->bd_bucket->hsb_count++;
@@ -551,7 +551,7 @@ cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
LASSERT(bd->bd_bucket->hsb_count > 0);
bd->bd_bucket->hsb_count--;
bd->bd_bucket->hsb_version++;
- if (unlikely(bd->bd_bucket->hsb_version == 0))
+ if (unlikely(!bd->bd_bucket->hsb_version))
bd->bd_bucket->hsb_version++;
if (cfs_hash_with_counter(hs)) {
@@ -571,7 +571,7 @@ cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
int rc;
- if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
+ if (!cfs_hash_bd_compare(bd_old, bd_new))
return;
/* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
@@ -584,11 +584,11 @@ cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
LASSERT(obkt->hsb_count > 0);
obkt->hsb_count--;
obkt->hsb_version++;
- if (unlikely(obkt->hsb_version == 0))
+ if (unlikely(!obkt->hsb_version))
obkt->hsb_version++;
nbkt->hsb_count++;
nbkt->hsb_version++;
- if (unlikely(nbkt->hsb_version == 0))
+ if (unlikely(!nbkt->hsb_version))
nbkt->hsb_version++;
}
@@ -629,7 +629,7 @@ cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd);
struct hlist_node *ehnode;
struct hlist_node *match;
- int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
+ int intent_add = intent & CFS_HS_LOOKUP_MASK_ADD;
/* with this function, we can avoid a lot of useless refcount ops,
* which are expensive atomic operations most time.
@@ -643,13 +643,13 @@ cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
continue;
/* match and ... */
- if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
+ if (intent & CFS_HS_LOOKUP_MASK_DEL) {
cfs_hash_bd_del_locked(hs, bd, ehnode);
return ehnode;
}
/* caller wants refcount? */
- if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
+ if (intent & CFS_HS_LOOKUP_MASK_REF)
cfs_hash_get(hs, ehnode);
return ehnode;
}
@@ -682,7 +682,7 @@ EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
static void
cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned n, int excl)
+ unsigned int n, int excl)
{
struct cfs_hash_bucket *prev = NULL;
int i;
@@ -704,7 +704,7 @@ cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
static void
cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned n, int excl)
+ unsigned int n, int excl)
{
struct cfs_hash_bucket *prev = NULL;
int i;
@@ -719,10 +719,10 @@ cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
static struct hlist_node *
cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned n, const void *key)
+ unsigned int n, const void *key)
{
struct hlist_node *ehnode;
- unsigned i;
+ unsigned int i;
cfs_hash_for_each_bd(bds, n, i) {
ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
@@ -735,12 +735,12 @@ cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
static struct hlist_node *
cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned n, const void *key,
+ unsigned int n, const void *key,
struct hlist_node *hnode, int noref)
{
struct hlist_node *ehnode;
int intent;
- unsigned i;
+ unsigned int i;
LASSERT(hnode);
intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK;
@@ -766,7 +766,7 @@ cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
static struct hlist_node *
cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned n, const void *key,
+ unsigned int n, const void *key,
struct hlist_node *hnode)
{
struct hlist_node *ehnode;
@@ -815,7 +815,7 @@ cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
return;
}
- LASSERT(hs->hs_rehash_bits != 0);
+ LASSERT(hs->hs_rehash_bits);
cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
hs->hs_rehash_bits, key, &bds[1]);
@@ -883,7 +883,7 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
struct cfs_hash_bucket **new_bkts;
int i;
- LASSERT(old_size == 0 || old_bkts);
+ LASSERT(!old_size || old_bkts);
if (old_bkts && old_size == new_size)
return old_bkts;
@@ -908,9 +908,9 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
return NULL;
}
- new_bkts[i]->hsb_index = i;
- new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
- new_bkts[i]->hsb_depmax = -1; /* unknown */
+ new_bkts[i]->hsb_index = i;
+ new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
+ new_bkts[i]->hsb_depmax = -1; /* unknown */
bd.bd_bucket = new_bkts[i];
cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
INIT_HLIST_HEAD(hhead);
@@ -950,9 +950,9 @@ static int cfs_hash_dep_print(struct cfs_workitem *wi)
int bits;
spin_lock(&hs->hs_dep_lock);
- dep = hs->hs_dep_max;
- bkt = hs->hs_dep_bkt;
- off = hs->hs_dep_off;
+ dep = hs->hs_dep_max;
+ bkt = hs->hs_dep_bkt;
+ off = hs->hs_dep_off;
bits = hs->hs_dep_bits;
spin_unlock(&hs->hs_dep_lock);
@@ -976,7 +976,7 @@ static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
return;
spin_lock(&hs->hs_dep_lock);
- while (hs->hs_dep_bits != 0) {
+ while (hs->hs_dep_bits) {
spin_unlock(&hs->hs_dep_lock);
cond_resched();
spin_lock(&hs->hs_dep_lock);
@@ -992,10 +992,10 @@ static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
#endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
struct cfs_hash *
-cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
- unsigned bkt_bits, unsigned extra_bytes,
- unsigned min_theta, unsigned max_theta,
- struct cfs_hash_ops *ops, unsigned flags)
+cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits,
+ unsigned int bkt_bits, unsigned int extra_bytes,
+ unsigned int min_theta, unsigned int max_theta,
+ struct cfs_hash_ops *ops, unsigned int flags)
{
struct cfs_hash *hs;
int len;
@@ -1010,18 +1010,17 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
LASSERT(ops->hs_get);
LASSERT(ops->hs_put_locked);
- if ((flags & CFS_HASH_REHASH) != 0)
+ if (flags & CFS_HASH_REHASH)
flags |= CFS_HASH_COUNTER; /* must have counter */
LASSERT(cur_bits > 0);
LASSERT(cur_bits >= bkt_bits);
LASSERT(max_bits >= cur_bits && max_bits < 31);
- LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
- LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
- (flags & CFS_HASH_NO_LOCK) == 0));
- LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0, ops->hs_keycpy));
+ LASSERT(ergo(!(flags & CFS_HASH_REHASH), cur_bits == max_bits));
+ LASSERT(ergo(flags & CFS_HASH_REHASH, !(flags & CFS_HASH_NO_LOCK)));
+ LASSERT(ergo(flags & CFS_HASH_REHASH_KEY, ops->hs_keycpy));
- len = (flags & CFS_HASH_BIGNAME) == 0 ?
+ len = !(flags & CFS_HASH_BIGNAME) ?
CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
if (!hs)
@@ -1036,12 +1035,12 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
cfs_hash_lock_setup(hs);
cfs_hash_hlist_setup(hs);
- hs->hs_cur_bits = (__u8)cur_bits;
- hs->hs_min_bits = (__u8)cur_bits;
- hs->hs_max_bits = (__u8)max_bits;
- hs->hs_bkt_bits = (__u8)bkt_bits;
+ hs->hs_cur_bits = (u8)cur_bits;
+ hs->hs_min_bits = (u8)cur_bits;
+ hs->hs_max_bits = (u8)max_bits;
+ hs->hs_bkt_bits = (u8)bkt_bits;
- hs->hs_ops = ops;
+ hs->hs_ops = ops;
hs->hs_extra_bytes = extra_bytes;
hs->hs_rehash_bits = 0;
cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
@@ -1107,12 +1106,12 @@ cfs_hash_destroy(struct cfs_hash *hs)
cfs_hash_exit(hs, hnode);
}
}
- LASSERT(bd.bd_bucket->hsb_count == 0);
+ LASSERT(!bd.bd_bucket->hsb_count);
cfs_hash_bd_unlock(hs, &bd, 1);
cond_resched();
}
- LASSERT(atomic_read(&hs->hs_count) == 0);
+ LASSERT(!atomic_read(&hs->hs_count));
cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
0, CFS_HASH_NBKT(hs));
@@ -1216,7 +1215,7 @@ cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
struct cfs_hash_bd bds[2];
int bits = 0;
- LASSERT(hlist_unhashed(hnode));
+ LASSERTF(hlist_unhashed(hnode), "hnode = %p\n", hnode);
cfs_hash_lock(hs, 0);
cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
@@ -1293,7 +1292,7 @@ cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
}
if (hnode) {
- obj = cfs_hash_object(hs, hnode);
+ obj = cfs_hash_object(hs, hnode);
bits = cfs_hash_rehash_bits(hs);
}
@@ -1388,7 +1387,7 @@ cfs_hash_for_each_exit(struct cfs_hash *hs)
bits = cfs_hash_rehash_bits(hs);
cfs_hash_unlock(hs, 1);
/* NB: it's race on cfs_has_t::hs_iterating, see above */
- if (remained == 0)
+ if (!remained)
hs->hs_iterating = 0;
if (bits > 0) {
cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
@@ -1406,14 +1405,14 @@ cfs_hash_for_each_exit(struct cfs_hash *hs)
* . if @removal_safe is true, use can remove current item by
* cfs_hash_bd_del_locked
*/
-static __u64
+static u64
cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
void *data, int remove_safe)
{
struct hlist_node *hnode;
struct hlist_node *pos;
struct cfs_hash_bd bd;
- __u64 count = 0;
+ u64 count = 0;
int excl = !!remove_safe;
int loop = 0;
int i;
@@ -1526,7 +1525,7 @@ cfs_hash_is_empty(struct cfs_hash *hs)
}
EXPORT_SYMBOL(cfs_hash_is_empty);
-__u64
+u64
cfs_hash_size_get(struct cfs_hash *hs)
{
return cfs_hash_with_counter(hs) ?
@@ -1552,26 +1551,33 @@ EXPORT_SYMBOL(cfs_hash_size_get);
*/
static int
cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
- void *data)
+ void *data, int start)
{
struct hlist_node *hnode;
struct hlist_node *tmp;
struct cfs_hash_bd bd;
- __u32 version;
+ u32 version;
int count = 0;
int stop_on_change;
- int rc;
+ int end = -1;
+ int rc = 0;
int i;
stop_on_change = cfs_hash_with_rehash_key(hs) ||
!cfs_hash_with_no_itemref(hs) ||
!hs->hs_ops->hs_put_locked;
cfs_hash_lock(hs, 0);
+again:
LASSERT(!cfs_hash_is_rehashing(hs));
cfs_hash_for_each_bucket(hs, &bd, i) {
struct hlist_head *hhead;
+ if (i < start)
+ continue;
+ else if (end > 0 && i >= end)
+ break;
+
cfs_hash_bd_lock(hs, &bd, 0);
version = cfs_hash_bd_version_get(&bd);
@@ -1611,14 +1617,19 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
if (rc) /* callback wants to break iteration */
break;
}
- cfs_hash_unlock(hs, 0);
+ if (start > 0 && !rc) {
+ end = start;
+ start = 0;
+ goto again;
+ }
+ cfs_hash_unlock(hs, 0);
return count;
}
int
cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
- void *data)
+ void *data, int start)
{
if (cfs_hash_with_no_lock(hs) ||
cfs_hash_with_rehash_key(hs) ||
@@ -1630,7 +1641,7 @@ cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
return -EOPNOTSUPP;
cfs_hash_for_each_enter(hs);
- cfs_hash_for_each_relax(hs, func, data);
+ cfs_hash_for_each_relax(hs, func, data, start);
cfs_hash_for_each_exit(hs);
return 0;
@@ -1652,7 +1663,7 @@ int
cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
void *data)
{
- unsigned i = 0;
+ unsigned int i = 0;
if (cfs_hash_with_no_lock(hs))
return -EOPNOTSUPP;
@@ -1662,7 +1673,7 @@ cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
return -EOPNOTSUPP;
cfs_hash_for_each_enter(hs);
- while (cfs_hash_for_each_relax(hs, func, data)) {
+ while (cfs_hash_for_each_relax(hs, func, data, 0)) {
CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
hs->hs_name, i++);
}
@@ -1672,7 +1683,7 @@ cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
EXPORT_SYMBOL(cfs_hash_for_each_empty);
void
-cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
+cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned int hindex,
cfs_hash_for_each_cb_t func, void *data)
{
struct hlist_head *hhead;
@@ -1704,7 +1715,7 @@ EXPORT_SYMBOL(cfs_hash_hlist_for_each);
* the passed callback @func and pass to it as an argument each hash
* item and the private @data. During the callback the bucket lock
* is held so the callback must never sleep.
- */
+ */
void
cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
cfs_hash_for_each_cb_t func, void *data)
@@ -1936,7 +1947,7 @@ out:
/* can't refer to @hs anymore because it could be destroyed */
if (bkts)
cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
- if (rc != 0)
+ if (rc)
CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
/* return 1 only if cfs_wi_exit is called */
return rc == -ESRCH;
@@ -2005,7 +2016,7 @@ cfs_hash_full_bkts(struct cfs_hash *hs)
if (!hs->hs_rehash_buckets)
return hs->hs_buckets;
- LASSERT(hs->hs_rehash_bits != 0);
+ LASSERT(hs->hs_rehash_bits);
return hs->hs_rehash_bits > hs->hs_cur_bits ?
hs->hs_rehash_buckets : hs->hs_buckets;
}
@@ -2017,7 +2028,7 @@ cfs_hash_full_nbkt(struct cfs_hash *hs)
if (!hs->hs_rehash_buckets)
return CFS_HASH_NBKT(hs);
- LASSERT(hs->hs_rehash_bits != 0);
+ LASSERT(hs->hs_rehash_bits);
return hs->hs_rehash_bits > hs->hs_cur_bits ?
CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
}
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
index 33352af6c27f..55caa19def51 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(cfs_cpt_table_free);
int
cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
{
- int rc;
+ int rc;
rc = snprintf(buf, len, "%d\t: %d\n", 0, 0);
len -= rc;
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
index 83543f928279..1967b97c4afc 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
@@ -52,9 +52,9 @@ struct cfs_percpt_lock *
cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
struct lock_class_key *keys)
{
- struct cfs_percpt_lock *pcl;
- spinlock_t *lock;
- int i;
+ struct cfs_percpt_lock *pcl;
+ spinlock_t *lock;
+ int i;
/* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
LIBCFS_ALLOC(pcl, sizeof(*pcl));
@@ -73,7 +73,7 @@ cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
cfs_percpt_for_each(lock, i, pcl->pcl_locks) {
spin_lock_init(lock);
- if (keys != NULL)
+ if (keys)
lockdep_set_class(lock, &keys[i]);
}
@@ -94,8 +94,8 @@ void
cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
__acquires(pcl->pcl_locks)
{
- int ncpt = cfs_cpt_number(pcl->pcl_cptab);
- int i;
+ int ncpt = cfs_cpt_number(pcl->pcl_cptab);
+ int i;
LASSERT(index >= CFS_PERCPT_LOCK_EX && index < ncpt);
@@ -114,7 +114,7 @@ cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
/* exclusive lock request */
for (i = 0; i < ncpt; i++) {
spin_lock(pcl->pcl_locks[i]);
- if (i == 0) {
+ if (!i) {
LASSERT(!pcl->pcl_locked);
/* nobody should take private lock after this
* so I wouldn't starve for too long time
@@ -130,8 +130,8 @@ void
cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
__releases(pcl->pcl_locks)
{
- int ncpt = cfs_cpt_number(pcl->pcl_cptab);
- int i;
+ int ncpt = cfs_cpt_number(pcl->pcl_cptab);
+ int i;
index = ncpt == 1 ? 0 : index;
@@ -141,7 +141,7 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
}
for (i = ncpt - 1; i >= 0; i--) {
- if (i == 0) {
+ if (!i) {
LASSERT(pcl->pcl_locked);
pcl->pcl_locked = 0;
}
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
index d0e81bb41cdc..ef085ba23194 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
@@ -43,8 +43,8 @@ struct cfs_var_array {
void
cfs_percpt_free(void *vars)
{
- struct cfs_var_array *arr;
- int i;
+ struct cfs_var_array *arr;
+ int i;
arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
@@ -72,9 +72,9 @@ EXPORT_SYMBOL(cfs_percpt_free);
void *
cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size)
{
- struct cfs_var_array *arr;
- int count;
- int i;
+ struct cfs_var_array *arr;
+ int count;
+ int i;
count = cfs_cpt_number(cptab);
@@ -120,8 +120,8 @@ EXPORT_SYMBOL(cfs_percpt_number);
void
cfs_array_free(void *vars)
{
- struct cfs_var_array *arr;
- int i;
+ struct cfs_var_array *arr;
+ int i;
arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
@@ -144,15 +144,15 @@ EXPORT_SYMBOL(cfs_array_free);
void *
cfs_array_alloc(int count, unsigned int size)
{
- struct cfs_var_array *arr;
- int i;
+ struct cfs_var_array *arr;
+ int i;
LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count]));
if (!arr)
return NULL;
- arr->va_count = count;
- arr->va_size = size;
+ arr->va_count = count;
+ arr->va_size = size;
for (i = 0; i < count; i++) {
LIBCFS_ALLOC(arr->va_ptrs[i], size);
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
index 56a614d7713b..02de1ee720fd 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
@@ -79,7 +79,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
for (i = 0; i < 32; i++) {
debugstr = bit2str(i);
if (debugstr && strlen(debugstr) == len &&
- strncasecmp(str, debugstr, len) == 0) {
+ !strncasecmp(str, debugstr, len)) {
if (op == '-')
newmask &= ~(1 << i);
else
@@ -89,7 +89,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
}
}
if (!found && len == 3 &&
- (strncasecmp(str, "ALL", len) == 0)) {
+ !strncasecmp(str, "ALL", len)) {
if (op == '-')
newmask = minmask;
else
@@ -112,7 +112,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
char *cfs_firststr(char *str, size_t size)
{
size_t i = 0;
- char *end;
+ char *end;
/* trim leading spaces */
while (i < size && *str && isspace(*str)) {
@@ -182,7 +182,7 @@ cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res)
next->ls_len--;
}
- if (next->ls_len == 0) /* whitespaces only */
+ if (!next->ls_len) /* whitespaces only */
return 0;
if (*next->ls_str == delim) {
@@ -222,8 +222,8 @@ EXPORT_SYMBOL(cfs_gettok);
* \retval 0 otherwise
*/
int
-cfs_str2num_check(char *str, int nob, unsigned *num,
- unsigned min, unsigned max)
+cfs_str2num_check(char *str, int nob, unsigned int *num,
+ unsigned int min, unsigned int max)
{
bool all_numbers = true;
char *endp, cache;
@@ -273,11 +273,11 @@ EXPORT_SYMBOL(cfs_str2num_check);
* -ENOMEM will be returned.
*/
static int
-cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max,
+cfs_range_expr_parse(struct cfs_lstr *src, unsigned int min, unsigned int max,
int bracketed, struct cfs_range_expr **expr)
{
- struct cfs_range_expr *re;
- struct cfs_lstr tok;
+ struct cfs_range_expr *re;
+ struct cfs_lstr tok;
LIBCFS_ALLOC(re, sizeof(*re));
if (!re)
@@ -391,7 +391,7 @@ cfs_expr_list_print(char *buffer, int count, struct cfs_expr_list *expr_list)
i += scnprintf(buffer + i, count - i, "[");
list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
- if (j++ != 0)
+ if (j++)
i += scnprintf(buffer + i, count - i, ",");
i += cfs_range_expr_print(buffer + i, count - i, expr,
numexprs > 1);
@@ -411,13 +411,13 @@ EXPORT_SYMBOL(cfs_expr_list_print);
* \retval 0 otherwise
*/
int
-cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list)
+cfs_expr_list_match(u32 value, struct cfs_expr_list *expr_list)
{
- struct cfs_range_expr *expr;
+ struct cfs_range_expr *expr;
list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
if (value >= expr->re_lo && value <= expr->re_hi &&
- ((value - expr->re_lo) % expr->re_stride) == 0)
+ !((value - expr->re_lo) % expr->re_stride))
return 1;
}
@@ -433,21 +433,21 @@ EXPORT_SYMBOL(cfs_expr_list_match);
* \retval < 0 for failure
*/
int
-cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp)
+cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, u32 **valpp)
{
- struct cfs_range_expr *expr;
- __u32 *val;
- int count = 0;
- int i;
+ struct cfs_range_expr *expr;
+ u32 *val;
+ int count = 0;
+ int i;
list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
for (i = expr->re_lo; i <= expr->re_hi; i++) {
- if (((i - expr->re_lo) % expr->re_stride) == 0)
+ if (!((i - expr->re_lo) % expr->re_stride))
count++;
}
}
- if (count == 0) /* empty expression list */
+ if (!count) /* empty expression list */
return 0;
if (count > max) {
@@ -463,7 +463,7 @@ cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp)
count = 0;
list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
for (i = expr->re_lo; i <= expr->re_hi; i++) {
- if (((i - expr->re_lo) % expr->re_stride) == 0)
+ if (!((i - expr->re_lo) % expr->re_stride))
val[count++] = i;
}
}
@@ -501,13 +501,13 @@ EXPORT_SYMBOL(cfs_expr_list_free);
* \retval -errno otherwise
*/
int
-cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
+cfs_expr_list_parse(char *str, int len, unsigned int min, unsigned int max,
struct cfs_expr_list **elpp)
{
- struct cfs_expr_list *expr_list;
- struct cfs_range_expr *expr;
- struct cfs_lstr src;
- int rc;
+ struct cfs_expr_list *expr_list;
+ struct cfs_range_expr *expr;
+ struct cfs_lstr src;
+ int rc;
LIBCFS_ALLOC(expr_list, sizeof(*expr_list));
if (!expr_list)
@@ -533,18 +533,18 @@ cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
}
rc = cfs_range_expr_parse(&tok, min, max, 1, &expr);
- if (rc != 0)
+ if (rc)
break;
list_add_tail(&expr->re_link, &expr_list->el_exprs);
}
} else {
rc = cfs_range_expr_parse(&src, min, max, 0, &expr);
- if (rc == 0)
+ if (!rc)
list_add_tail(&expr->re_link, &expr_list->el_exprs);
}
- if (rc != 0)
+ if (rc)
cfs_expr_list_free(expr_list);
else
*elpp = expr_list;
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
index e8b1a61420de..6b9cf06e8df2 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
@@ -55,6 +55,8 @@ MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions");
* i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket
* are NUMA node ID, number before bracket is CPU partition ID.
*
+ * i.e: "N", shortcut expression to create CPT from NUMA & CPU topology
+ *
* NB: If user specified cpu_pattern, cpu_npartitions will be ignored
*/
static char *cpu_pattern = "";
@@ -88,7 +90,7 @@ cfs_node_to_cpumask(int node, cpumask_t *mask)
void
cfs_cpt_table_free(struct cfs_cpt_table *cptab)
{
- int i;
+ int i;
if (cptab->ctb_cpu2cpt) {
LIBCFS_FREE(cptab->ctb_cpu2cpt,
@@ -126,7 +128,7 @@ struct cfs_cpt_table *
cfs_cpt_table_alloc(unsigned int ncpt)
{
struct cfs_cpt_table *cptab;
- int i;
+ int i;
LIBCFS_ALLOC(cptab, sizeof(*cptab));
if (!cptab)
@@ -177,10 +179,10 @@ EXPORT_SYMBOL(cfs_cpt_table_alloc);
int
cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
{
- char *tmp = buf;
- int rc = 0;
- int i;
- int j;
+ char *tmp = buf;
+ int rc = 0;
+ int i;
+ int j;
for (i = 0; i < cptab->ctb_nparts; i++) {
if (len > 0) {
@@ -271,7 +273,7 @@ EXPORT_SYMBOL(cfs_cpt_nodemask);
int
cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
{
- int node;
+ int node;
LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts);
@@ -311,8 +313,8 @@ EXPORT_SYMBOL(cfs_cpt_set_cpu);
void
cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
{
- int node;
- int i;
+ int node;
+ int i;
LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
@@ -371,9 +373,9 @@ EXPORT_SYMBOL(cfs_cpt_unset_cpu);
int
cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
{
- int i;
+ int i;
- if (cpumask_weight(mask) == 0 ||
+ if (!cpumask_weight(mask) ||
cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) {
CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n",
cpt);
@@ -392,7 +394,7 @@ EXPORT_SYMBOL(cfs_cpt_set_cpumask);
void
cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
{
- int i;
+ int i;
for_each_cpu(i, mask)
cfs_cpt_unset_cpu(cptab, cpt, i);
@@ -402,8 +404,8 @@ EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
int
cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
{
- cpumask_t *mask;
- int rc;
+ cpumask_t *mask;
+ int rc;
if (node < 0 || node >= MAX_NUMNODES) {
CDEBUG(D_INFO,
@@ -449,7 +451,7 @@ EXPORT_SYMBOL(cfs_cpt_unset_node);
int
cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
{
- int i;
+ int i;
for_each_node_mask(i, *mask) {
if (!cfs_cpt_set_node(cptab, cpt, i))
@@ -463,7 +465,7 @@ EXPORT_SYMBOL(cfs_cpt_set_nodemask);
void
cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
{
- int i;
+ int i;
for_each_node_mask(i, *mask)
cfs_cpt_unset_node(cptab, cpt, i);
@@ -473,8 +475,8 @@ EXPORT_SYMBOL(cfs_cpt_unset_nodemask);
void
cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt)
{
- int last;
- int i;
+ int last;
+ int i;
if (cpt == CFS_CPT_ANY) {
last = cptab->ctb_nparts - 1;
@@ -493,10 +495,10 @@ EXPORT_SYMBOL(cfs_cpt_clear);
int
cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
{
- nodemask_t *mask;
- int weight;
- int rotor;
- int node;
+ nodemask_t *mask;
+ int weight;
+ int rotor;
+ int node;
/* convert CPU partition ID to HW node id */
@@ -514,7 +516,7 @@ cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
rotor %= weight;
for_each_node_mask(node, *mask) {
- if (rotor-- == 0)
+ if (!rotor--)
return node;
}
@@ -526,8 +528,8 @@ EXPORT_SYMBOL(cfs_cpt_spread_node);
int
cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
{
- int cpu = smp_processor_id();
- int cpt = cptab->ctb_cpu2cpt[cpu];
+ int cpu = smp_processor_id();
+ int cpt = cptab->ctb_cpu2cpt[cpu];
if (cpt < 0) {
if (!remap)
@@ -555,10 +557,10 @@ EXPORT_SYMBOL(cfs_cpt_of_cpu);
int
cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
{
- cpumask_t *cpumask;
- nodemask_t *nodemask;
- int rc;
- int i;
+ cpumask_t *cpumask;
+ nodemask_t *nodemask;
+ int rc;
+ int i;
LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
@@ -582,7 +584,7 @@ cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
rc = set_cpus_allowed_ptr(current, cpumask);
set_mems_allowed(*nodemask);
- if (rc == 0)
+ if (!rc)
schedule(); /* switch to allowed CPU */
return rc;
@@ -601,10 +603,10 @@ static int
cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
cpumask_t *node, int number)
{
- cpumask_t *socket = NULL;
- cpumask_t *core = NULL;
- int rc = 0;
- int cpu;
+ cpumask_t *socket = NULL;
+ cpumask_t *core = NULL;
+ int rc = 0;
+ int cpu;
LASSERT(number > 0);
@@ -638,7 +640,7 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
LASSERT(!cpumask_empty(socket));
while (!cpumask_empty(socket)) {
- int i;
+ int i;
/* get cpumask for hts in the same core */
cpumask_copy(core, topology_sibling_cpumask(cpu));
@@ -656,14 +658,14 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
goto out;
}
- if (--number == 0)
+ if (!--number)
goto out;
}
cpu = cpumask_first(socket);
}
}
- out:
+out:
if (socket)
LIBCFS_FREE(socket, cpumask_size());
if (core)
@@ -676,9 +678,9 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
static unsigned int
cfs_cpt_num_estimate(void)
{
- unsigned nnode = num_online_nodes();
- unsigned ncpu = num_online_cpus();
- unsigned ncpt;
+ unsigned int nnode = num_online_nodes();
+ unsigned int ncpu = num_online_cpus();
+ unsigned int ncpt;
if (ncpu <= CPT_WEIGHT_MIN) {
ncpt = 1;
@@ -703,14 +705,14 @@ cfs_cpt_num_estimate(void)
ncpt = nnode;
- out:
+out:
#if (BITS_PER_LONG == 32)
/* config many CPU partitions on 32-bit system could consume
* too much memory
*/
ncpt = min(2U, ncpt);
#endif
- while (ncpu % ncpt != 0)
+ while (ncpu % ncpt)
ncpt--; /* worst case is 1 */
return ncpt;
@@ -720,11 +722,11 @@ static struct cfs_cpt_table *
cfs_cpt_table_create(int ncpt)
{
struct cfs_cpt_table *cptab = NULL;
- cpumask_t *mask = NULL;
- int cpt = 0;
- int num;
- int rc;
- int i;
+ cpumask_t *mask = NULL;
+ int cpt = 0;
+ int num;
+ int rc;
+ int i;
rc = cfs_cpt_num_estimate();
if (ncpt <= 0)
@@ -735,7 +737,7 @@ cfs_cpt_table_create(int ncpt)
ncpt, rc);
}
- if (num_online_cpus() % ncpt != 0) {
+ if (num_online_cpus() % ncpt) {
CERROR("CPU number %d is not multiple of cpu_npartition %d, please try different cpu_npartitions value or set pattern string by cpu_pattern=STRING\n",
(int)num_online_cpus(), ncpt);
goto failed;
@@ -748,7 +750,7 @@ cfs_cpt_table_create(int ncpt)
}
num = num_online_cpus() / ncpt;
- if (num == 0) {
+ if (!num) {
CERROR("CPU changed while setting CPU partition\n");
goto failed;
}
@@ -764,7 +766,7 @@ cfs_cpt_table_create(int ncpt)
while (!cpumask_empty(mask)) {
struct cfs_cpu_partition *part;
- int n;
+ int n;
/*
* Each emulated NUMA node has all allowed CPUs in
@@ -817,27 +819,36 @@ cfs_cpt_table_create(int ncpt)
static struct cfs_cpt_table *
cfs_cpt_table_create_pattern(char *pattern)
{
- struct cfs_cpt_table *cptab;
- char *str = pattern;
- int node = 0;
- int high;
- int ncpt;
- int c;
-
- for (ncpt = 0;; ncpt++) { /* quick scan bracket */
- str = strchr(str, '[');
- if (!str)
- break;
- str++;
- }
+ struct cfs_cpt_table *cptab;
+ char *str;
+ int node = 0;
+ int high;
+ int ncpt = 0;
+ int cpt;
+ int rc;
+ int c;
+ int i;
str = cfs_trimwhite(pattern);
if (*str == 'n' || *str == 'N') {
pattern = str + 1;
- node = 1;
+ if (*pattern != '\0') {
+ node = 1;
+ } else { /* shortcut to create CPT from NUMA & CPU topology */
+ node = -1;
+ ncpt = num_online_nodes();
+ }
+ }
+
+ if (!ncpt) { /* scanning bracket which is mark of partition */
+ for (str = pattern;; str++, ncpt++) {
+ str = strchr(str, '[');
+ if (!str)
+ break;
+ }
}
- if (ncpt == 0 ||
+ if (!ncpt ||
(node && ncpt > num_online_nodes()) ||
(!node && ncpt > num_online_cpus())) {
CERROR("Invalid pattern %s, or too many partitions %d\n",
@@ -845,25 +856,39 @@ cfs_cpt_table_create_pattern(char *pattern)
return NULL;
}
- high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
-
cptab = cfs_cpt_table_alloc(ncpt);
if (!cptab) {
CERROR("Failed to allocate cpu partition table\n");
return NULL;
}
+ if (node < 0) { /* shortcut to create CPT from NUMA & CPU topology */
+ cpt = 0;
+
+ for_each_online_node(i) {
+ if (cpt >= ncpt) {
+ CERROR("CPU changed while setting CPU partition table, %d/%d\n",
+ cpt, ncpt);
+ goto failed;
+ }
+
+ rc = cfs_cpt_set_node(cptab, cpt++, i);
+ if (!rc)
+ goto failed;
+ }
+ return cptab;
+ }
+
+ high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
+
for (str = cfs_trimwhite(pattern), c = 0;; c++) {
- struct cfs_range_expr *range;
- struct cfs_expr_list *el;
- char *bracket = strchr(str, '[');
- int cpt;
- int rc;
- int i;
- int n;
+ struct cfs_range_expr *range;
+ struct cfs_expr_list *el;
+ char *bracket = strchr(str, '[');
+ int n;
if (!bracket) {
- if (*str != 0) {
+ if (*str) {
CERROR("Invalid pattern %s\n", str);
goto failed;
}
@@ -886,7 +911,7 @@ cfs_cpt_table_create_pattern(char *pattern)
goto failed;
}
- if (cfs_cpt_weight(cptab, cpt) != 0) {
+ if (cfs_cpt_weight(cptab, cpt)) {
CERROR("Partition %d has already been set.\n", cpt);
goto failed;
}
@@ -905,14 +930,14 @@ cfs_cpt_table_create_pattern(char *pattern)
}
if (cfs_expr_list_parse(str, (bracket - str) + 1,
- 0, high, &el) != 0) {
+ 0, high, &el)) {
CERROR("Can't parse number range: %s\n", str);
goto failed;
}
list_for_each_entry(range, &el->el_exprs, re_link) {
for (i = range->re_lo; i <= range->re_hi; i++) {
- if ((i - range->re_lo) % range->re_stride != 0)
+ if ((i - range->re_lo) % range->re_stride)
continue;
rc = node ? cfs_cpt_set_node(cptab, cpt, i) :
@@ -945,8 +970,8 @@ cfs_cpt_table_create_pattern(char *pattern)
static int
cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
- unsigned int cpu = (unsigned long)hcpu;
- bool warn;
+ unsigned int cpu = (unsigned long)hcpu;
+ bool warn;
switch (action) {
case CPU_DEAD:
@@ -1019,7 +1044,7 @@ cfs_cpu_init(void)
register_hotcpu_notifier(&cfs_cpu_notifier);
#endif
- if (*cpu_pattern != 0) {
+ if (*cpu_pattern) {
cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern);
if (!cfs_cpt_table) {
CERROR("Failed to create cptab from pattern %s\n",
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
index 7f56d2c9dd00..68e34b4a76c9 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
@@ -64,7 +64,7 @@ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
unsigned int key_len)
{
struct crypto_ahash *tfm;
- int err = 0;
+ int err = 0;
*type = cfs_crypto_hash_type(hash_alg);
@@ -93,12 +93,12 @@ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
if (key)
err = crypto_ahash_setkey(tfm, key, key_len);
- else if ((*type)->cht_key != 0)
+ else if ((*type)->cht_key)
err = crypto_ahash_setkey(tfm,
(unsigned char *)&((*type)->cht_key),
(*type)->cht_size);
- if (err != 0) {
+ if (err) {
ahash_request_free(*req);
crypto_free_ahash(tfm);
return err;
@@ -147,16 +147,16 @@ int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
unsigned char *key, unsigned int key_len,
unsigned char *hash, unsigned int *hash_len)
{
- struct scatterlist sl;
+ struct scatterlist sl;
struct ahash_request *req;
- int err;
- const struct cfs_crypto_hash_type *type;
+ int err;
+ const struct cfs_crypto_hash_type *type;
- if (!buf || buf_len == 0 || !hash_len)
+ if (!buf || !buf_len || !hash_len)
return -EINVAL;
err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
- if (err != 0)
+ if (err)
return err;
if (!hash || *hash_len < type->cht_size) {
@@ -177,7 +177,7 @@ int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
EXPORT_SYMBOL(cfs_crypto_hash_digest);
/**
- * Allocate and initialize desriptor for hash algorithm.
+ * Allocate and initialize descriptor for hash algorithm.
*
* This should be used to initialize a hash descriptor for multiple calls
* to a single hash function when computing the hash across multiple
@@ -198,8 +198,8 @@ cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg,
unsigned char *key, unsigned int key_len)
{
struct ahash_request *req;
- int err;
- const struct cfs_crypto_hash_type *type;
+ int err;
+ const struct cfs_crypto_hash_type *type;
err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
@@ -273,7 +273,7 @@ EXPORT_SYMBOL(cfs_crypto_hash_update);
int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
unsigned char *hash, unsigned int *hash_len)
{
- int err;
+ int err;
struct ahash_request *req = (void *)hdesc;
int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
@@ -312,8 +312,8 @@ static void cfs_crypto_performance_test(enum cfs_crypto_hash_alg hash_alg)
{
int buf_len = max(PAGE_SIZE, 1048576UL);
void *buf;
- unsigned long start, end;
- int bcount, err = 0;
+ unsigned long start, end;
+ int bcount, err = 0;
struct page *page;
unsigned char hash[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
unsigned int hash_len = sizeof(hash);
@@ -358,7 +358,7 @@ out_err:
CDEBUG(D_INFO, "Crypto hash algorithm %s test error: rc = %d\n",
cfs_crypto_hash_name(hash_alg), err);
} else {
- unsigned long tmp;
+ unsigned long tmp;
tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) *
1000) / (1024 * 1024);
@@ -440,6 +440,6 @@ int cfs_crypto_register(void)
*/
void cfs_crypto_unregister(void)
{
- if (adler32 == 0)
+ if (!adler32)
cfs_crypto_adler32_unregister();
}
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
index 18e8cd4d8758..d0b3aa80cfa6 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
@@ -1,4 +1,4 @@
- /*
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
index 435b784c52f8..39a72e3f0c18 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
@@ -57,7 +57,6 @@
#include <linux/kallsyms.h>
-char lnet_upcall[1024] = "/usr/lib/lustre/lnet_upcall";
char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall";
/**
@@ -68,11 +67,12 @@ char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall";
void libcfs_run_debug_log_upcall(char *file)
{
char *argv[3];
- int rc;
- char *envp[] = {
+ int rc;
+ static const char * const envp[] = {
"HOME=/",
"PATH=/sbin:/bin:/usr/sbin:/usr/bin",
- NULL};
+ NULL
+ };
argv[0] = lnet_debug_log_upcall;
@@ -81,7 +81,7 @@ void libcfs_run_debug_log_upcall(char *file)
argv[2] = NULL;
- rc = call_usermodehelper(argv[0], argv, envp, 1);
+ rc = call_usermodehelper(argv[0], argv, (char **)envp, 1);
if (rc < 0 && rc != -ENOENT) {
CERROR("Error %d invoking LNET debug log upcall %s %s; check /sys/kernel/debug/lnet/debug_log_upcall\n",
rc, argv[0], argv[1]);
@@ -91,57 +91,6 @@ void libcfs_run_debug_log_upcall(char *file)
}
}
-void libcfs_run_upcall(char **argv)
-{
- int rc;
- int argc;
- char *envp[] = {
- "HOME=/",
- "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
- NULL};
-
- argv[0] = lnet_upcall;
- argc = 1;
- while (argv[argc])
- argc++;
-
- LASSERT(argc >= 2);
-
- rc = call_usermodehelper(argv[0], argv, envp, 1);
- if (rc < 0 && rc != -ENOENT) {
- CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; check /sys/kernel/debug/lnet/upcall\n",
- rc, argv[0], argv[1],
- argc < 3 ? "" : ",", argc < 3 ? "" : argv[2],
- argc < 4 ? "" : ",", argc < 4 ? "" : argv[3],
- argc < 5 ? "" : ",", argc < 5 ? "" : argv[4],
- argc < 6 ? "" : ",...");
- } else {
- CDEBUG(D_HA, "Invoked LNET upcall %s %s%s%s%s%s%s%s%s\n",
- argv[0], argv[1],
- argc < 3 ? "" : ",", argc < 3 ? "" : argv[2],
- argc < 4 ? "" : ",", argc < 4 ? "" : argv[3],
- argc < 5 ? "" : ",", argc < 5 ? "" : argv[4],
- argc < 6 ? "" : ",...");
- }
-}
-
-void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *msgdata)
-{
- char *argv[6];
- char buf[32];
-
- snprintf(buf, sizeof(buf), "%d", msgdata->msg_line);
-
- argv[1] = "LBUG";
- argv[2] = (char *)msgdata->msg_file;
- argv[3] = (char *)msgdata->msg_fn;
- argv[4] = buf;
- argv[5] = NULL;
-
- libcfs_run_upcall(argv);
-}
-EXPORT_SYMBOL(libcfs_run_lbug_upcall);
-
/* coverity[+kill] */
void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
{
@@ -156,7 +105,6 @@ void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
dump_stack();
if (!libcfs_panic_on_lbug)
libcfs_debug_dumplog();
- libcfs_run_lbug_upcall(msgdata);
if (libcfs_panic_on_lbug)
panic("LBUG");
set_task_state(current, TASK_UNINTERRUPTIBLE);
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
index 38308f8b6aae..3f5d58babc2f 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
@@ -83,7 +83,7 @@ static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n");
return true;
}
- if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) {
+ if ((u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) {
CERROR("LIBCFS ioctl: packlen != ioc_len\n");
return true;
}
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
index 291d286eab48..cf902154f0aa 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
@@ -45,8 +45,8 @@
sigset_t
cfs_block_allsigs(void)
{
- unsigned long flags;
- sigset_t old;
+ unsigned long flags;
+ sigset_t old;
spin_lock_irqsave(&current->sighand->siglock, flags);
old = current->blocked;
@@ -60,8 +60,8 @@ EXPORT_SYMBOL(cfs_block_allsigs);
sigset_t cfs_block_sigs(unsigned long sigs)
{
- unsigned long flags;
- sigset_t old;
+ unsigned long flags;
+ sigset_t old;
spin_lock_irqsave(&current->sighand->siglock, flags);
old = current->blocked;
@@ -91,7 +91,7 @@ EXPORT_SYMBOL(cfs_block_sigsinv);
void
cfs_restore_sigs(sigset_t old)
{
- unsigned long flags;
+ unsigned long flags;
spin_lock_irqsave(&current->sighand->siglock, flags);
current->blocked = old;
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
index 8b551d2708ba..75eb84e7f0f8 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
@@ -49,8 +49,8 @@ static DECLARE_RWSEM(cfs_tracefile_sem);
int cfs_tracefile_init_arch(void)
{
- int i;
- int j;
+ int i;
+ int j;
struct cfs_trace_cpu_data *tcd;
/* initialize trace_data */
@@ -85,14 +85,14 @@ int cfs_tracefile_init_arch(void)
out:
cfs_tracefile_fini_arch();
- printk(KERN_ERR "lnet: Not enough memory\n");
+ pr_err("lnet: Not enough memory\n");
return -ENOMEM;
}
void cfs_tracefile_fini_arch(void)
{
- int i;
- int j;
+ int i;
+ int j;
for (i = 0; i < num_possible_cpus(); i++)
for (j = 0; j < 3; j++) {
@@ -224,26 +224,26 @@ void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
{
char *prefix = "Lustre", *ptype = NULL;
- if ((mask & D_EMERG) != 0) {
+ if (mask & D_EMERG) {
prefix = dbghdr_to_err_string(hdr);
ptype = KERN_EMERG;
- } else if ((mask & D_ERROR) != 0) {
+ } else if (mask & D_ERROR) {
prefix = dbghdr_to_err_string(hdr);
ptype = KERN_ERR;
- } else if ((mask & D_WARNING) != 0) {
+ } else if (mask & D_WARNING) {
prefix = dbghdr_to_info_string(hdr);
ptype = KERN_WARNING;
- } else if ((mask & (D_CONSOLE | libcfs_printk)) != 0) {
+ } else if (mask & (D_CONSOLE | libcfs_printk)) {
prefix = dbghdr_to_info_string(hdr);
ptype = KERN_INFO;
}
- if ((mask & D_CONSOLE) != 0) {
- printk("%s%s: %.*s", ptype, prefix, len, buf);
+ if (mask & D_CONSOLE) {
+ pr_info("%s%s: %.*s", ptype, prefix, len, buf);
} else {
- printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
- hdr->ph_pid, hdr->ph_extern_pid, file, hdr->ph_line_num,
- fn, len, buf);
+ pr_info("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
+ hdr->ph_pid, hdr->ph_extern_pid, file,
+ hdr->ph_line_num, fn, len, buf);
}
}
diff --git a/drivers/staging/lustre/lnet/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c
index 86b4d25cad46..161e04226521 100644
--- a/drivers/staging/lustre/lnet/libcfs/module.c
+++ b/drivers/staging/lustre/lnet/libcfs/module.c
@@ -183,12 +183,12 @@ EXPORT_SYMBOL(lprocfs_call_handler);
static int __proc_dobitmasks(void *data, int write,
loff_t pos, void __user *buffer, int nob)
{
- const int tmpstrlen = 512;
- char *tmpstr;
- int rc;
+ const int tmpstrlen = 512;
+ char *tmpstr;
+ int rc;
unsigned int *mask = data;
- int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0;
- int is_printk = (mask == &libcfs_printk) ? 1 : 0;
+ int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0;
+ int is_printk = (mask == &libcfs_printk) ? 1 : 0;
rc = cfs_trace_allocate_string_buffer(&tmpstr, tmpstrlen);
if (rc < 0)
@@ -293,8 +293,8 @@ static int __proc_cpt_table(void *data, int write,
loff_t pos, void __user *buffer, int nob)
{
char *buf = NULL;
- int len = 4096;
- int rc = 0;
+ int len = 4096;
+ int rc = 0;
if (write)
return -EPERM;
@@ -365,14 +365,6 @@ static struct ctl_table lnet_table[] = {
.mode = 0444,
.proc_handler = &proc_cpt_table,
},
-
- {
- .procname = "upcall",
- .data = lnet_upcall,
- .maxlen = sizeof(lnet_upcall),
- .mode = 0644,
- .proc_handler = &proc_dostring,
- },
{
.procname = "debug_log_upcall",
.data = lnet_debug_log_upcall,
@@ -547,7 +539,7 @@ static int libcfs_init(void)
}
rc = cfs_cpu_init();
- if (rc != 0)
+ if (rc)
goto cleanup_debug;
rc = misc_register(&libcfs_dev);
@@ -566,7 +558,7 @@ static int libcfs_init(void)
rc = min(cfs_cpt_weight(cfs_cpt_table, CFS_CPT_ANY), 4);
rc = cfs_wi_sched_create("cfs_rh", cfs_cpt_table, CFS_CPT_ANY,
rc, &cfs_sched_rehash);
- if (rc != 0) {
+ if (rc) {
CERROR("Startup workitem scheduler: error: %d\n", rc);
goto cleanup_deregister;
}
diff --git a/drivers/staging/lustre/lnet/libcfs/prng.c b/drivers/staging/lustre/lnet/libcfs/prng.c
index a9bdb284fd15..21d5a3912c5f 100644
--- a/drivers/staging/lustre/lnet/libcfs/prng.c
+++ b/drivers/staging/lustre/lnet/libcfs/prng.c
@@ -33,7 +33,7 @@
* x(n)=a*x(n-1)+carry mod 2^16 and y(n)=b*y(n-1)+carry mod 2^16,
* number and carry packed within the same 32 bit integer.
* algorithm recommended by Marsaglia
-*/
+ */
#include "../../include/linux/libcfs/libcfs.h"
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index 1c7efdfaffcf..d7b29f8997c0 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -59,13 +59,13 @@ struct page_collection {
* ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
* only ->tcd_pages are spilled.
*/
- int pc_want_daemon_pages;
+ int pc_want_daemon_pages;
};
struct tracefiled_ctl {
struct completion tctl_start;
struct completion tctl_stop;
- wait_queue_head_t tctl_waitq;
+ wait_queue_head_t tctl_waitq;
pid_t tctl_pid;
atomic_t tctl_shutdown;
};
@@ -77,24 +77,24 @@ struct cfs_trace_page {
/*
* page itself
*/
- struct page *page;
+ struct page *page;
/*
* linkage into one of the lists in trace_data_union or
* page_collection
*/
- struct list_head linkage;
+ struct list_head linkage;
/*
* number of bytes used within this page
*/
- unsigned int used;
+ unsigned int used;
/*
* cpu that owns this page
*/
- unsigned short cpu;
+ unsigned short cpu;
/*
* type(context) of this page
*/
- unsigned short type;
+ unsigned short type;
};
static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
@@ -108,7 +108,7 @@ cfs_tage_from_list(struct list_head *list)
static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
{
- struct page *page;
+ struct page *page;
struct cfs_trace_page *tage;
/* My caller is trying to free memory */
@@ -236,7 +236,7 @@ static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
INIT_LIST_HEAD(&pc.pc_pages);
list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
- if (pgcount-- == 0)
+ if (!pgcount--)
break;
list_move_tail(&tage->linkage, &pc.pc_pages);
@@ -278,7 +278,7 @@ int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
const char *format, ...)
{
va_list args;
- int rc;
+ int rc;
va_start(args, format);
rc = libcfs_debug_vmsg2(msgdata, format, args, NULL);
@@ -293,21 +293,21 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
const char *format2, ...)
{
struct cfs_trace_cpu_data *tcd = NULL;
- struct ptldebug_header header = {0};
- struct cfs_trace_page *tage;
+ struct ptldebug_header header = { 0 };
+ struct cfs_trace_page *tage;
/* string_buf is used only if tcd != NULL, and is always set then */
- char *string_buf = NULL;
- char *debug_buf;
- int known_size;
- int needed = 85; /* average message length */
- int max_nob;
- va_list ap;
- int depth;
- int i;
- int remain;
- int mask = msgdata->msg_mask;
- const char *file = kbasename(msgdata->msg_file);
- struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
+ char *string_buf = NULL;
+ char *debug_buf;
+ int known_size;
+ int needed = 85; /* average message length */
+ int max_nob;
+ va_list ap;
+ int depth;
+ int i;
+ int remain;
+ int mask = msgdata->msg_mask;
+ const char *file = kbasename(msgdata->msg_file);
+ struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
tcd = cfs_trace_get_tcd();
@@ -320,7 +320,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
if (!tcd) /* arch may not log in IRQ context */
goto console;
- if (tcd->tcd_cur_pages == 0)
+ if (!tcd->tcd_cur_pages)
header.ph_flags |= PH_FLAG_FIRST_RECORD;
if (tcd->tcd_shutting_down) {
@@ -423,7 +423,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
__LASSERT(tage->used <= PAGE_SIZE);
console:
- if ((mask & libcfs_printk) == 0) {
+ if (!(mask & libcfs_printk)) {
/* no console output requested */
if (tcd)
cfs_trace_put_tcd(tcd);
@@ -432,7 +432,7 @@ console:
if (cdls) {
if (libcfs_console_ratelimit &&
- cdls->cdls_next != 0 && /* not first time ever */
+ cdls->cdls_next && /* not first time ever */
!cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
/* skipping a console message */
cdls->cdls_count++;
@@ -489,7 +489,7 @@ console:
put_cpu();
}
- if (cdls && cdls->cdls_count != 0) {
+ if (cdls && cdls->cdls_count) {
string_buf = cfs_trace_get_console_buffer();
needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
@@ -535,9 +535,9 @@ panic_collect_pages(struct page_collection *pc)
* CPUs have been stopped during a panic. If this isn't true for some
* arch, this will have to be implemented separately in each arch.
*/
- int i;
- int j;
struct cfs_trace_cpu_data *tcd;
+ int i;
+ int j;
INIT_LIST_HEAD(&pc->pc_pages);
@@ -698,11 +698,11 @@ void cfs_trace_debug_print(void)
int cfs_tracefile_dump_all_pages(char *filename)
{
- struct page_collection pc;
- struct file *filp;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
- char *buf;
+ struct page_collection pc;
+ struct file *filp;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
+ char *buf;
mm_segment_t __oldfs;
int rc;
@@ -778,7 +778,7 @@ void cfs_trace_flush_pages(void)
int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
const char __user *usr_buffer, int usr_buffer_nob)
{
- int nob;
+ int nob;
if (usr_buffer_nob > knl_buffer_nob)
return -EOVERFLOW;
@@ -810,7 +810,7 @@ int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
* NB if 'append' != NULL, it's a single character to append to the
* copied out string - usually "\n" or "" (i.e. a terminating zero byte)
*/
- int nob = strlen(knl_buffer);
+ int nob = strlen(knl_buffer);
if (nob > usr_buffer_nob)
nob = usr_buffer_nob;
@@ -843,16 +843,16 @@ int cfs_trace_allocate_string_buffer(char **str, int nob)
int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
{
- char *str;
- int rc;
+ char *str;
+ int rc;
rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
- if (rc != 0)
+ if (rc)
return rc;
rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
usr_str, usr_str_nob);
- if (rc != 0)
+ if (rc)
goto out;
if (str[0] != '/') {
@@ -867,17 +867,17 @@ out:
int cfs_trace_daemon_command(char *str)
{
- int rc = 0;
+ int rc = 0;
cfs_tracefile_write_lock();
- if (strcmp(str, "stop") == 0) {
+ if (!strcmp(str, "stop")) {
cfs_tracefile_write_unlock();
cfs_trace_stop_thread();
cfs_tracefile_write_lock();
memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
- } else if (strncmp(str, "size=", 5) == 0) {
+ } else if (!strncmp(str, "size=", 5)) {
unsigned long tmp;
rc = kstrtoul(str + 5, 10, &tmp);
@@ -909,15 +909,15 @@ int cfs_trace_daemon_command(char *str)
int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
{
char *str;
- int rc;
+ int rc;
rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
- if (rc != 0)
+ if (rc)
return rc;
rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
usr_str, usr_str_nob);
- if (rc == 0)
+ if (!rc)
rc = cfs_trace_daemon_command(str);
kfree(str);
@@ -1003,7 +1003,7 @@ static int tracefiled(void *arg)
filp = NULL;
cfs_tracefile_read_lock();
- if (cfs_tracefile[0] != 0) {
+ if (cfs_tracefile[0]) {
filp = filp_open(cfs_tracefile,
O_CREAT | O_RDWR | O_LARGEFILE,
0600);
@@ -1072,7 +1072,7 @@ static int tracefiled(void *arg)
__LASSERT(list_empty(&pc.pc_pages));
end_loop:
if (atomic_read(&tctl->tctl_shutdown)) {
- if (last_loop == 0) {
+ if (!last_loop) {
last_loop = 1;
continue;
} else {
@@ -1135,13 +1135,13 @@ void cfs_trace_stop_thread(void)
int cfs_tracefile_init(int max_pages)
{
struct cfs_trace_cpu_data *tcd;
- int i;
- int j;
- int rc;
- int factor;
+ int i;
+ int j;
+ int rc;
+ int factor;
rc = cfs_tracefile_init_arch();
- if (rc != 0)
+ if (rc)
return rc;
cfs_tcd_for_each(tcd, i, j) {
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h
index d878676bc375..f644cbc5a277 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.h
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h
@@ -45,7 +45,7 @@ enum cfs_trace_buf_type {
/* trace file lock routines */
#define TRACEFILE_NAME_SIZE 1024
-extern char cfs_tracefile[TRACEFILE_NAME_SIZE];
+extern char cfs_tracefile[TRACEFILE_NAME_SIZE];
extern long long cfs_tracefile_size;
void libcfs_run_debug_log_upcall(char *file);
@@ -80,7 +80,7 @@ int cfs_trace_get_debug_mb(void);
void libcfs_debug_dumplog_internal(void *arg);
void libcfs_register_panic_notifier(void);
void libcfs_unregister_panic_notifier(void);
-extern int libcfs_panic_in_progress;
+extern int libcfs_panic_in_progress;
int cfs_trace_max_debug_mb(void);
#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
@@ -113,14 +113,14 @@ union cfs_trace_data_union {
* tcd_for_each_type_lock
*/
spinlock_t tcd_lock;
- unsigned long tcd_lock_flags;
+ unsigned long tcd_lock_flags;
/*
* pages with trace records not yet processed by tracefiled.
*/
- struct list_head tcd_pages;
+ struct list_head tcd_pages;
/* number of pages on ->tcd_pages */
- unsigned long tcd_cur_pages;
+ unsigned long tcd_cur_pages;
/*
* pages with trace records already processed by
@@ -132,9 +132,9 @@ union cfs_trace_data_union {
* (put_pages_on_daemon_list()). LRU pages from this list are
* discarded when list grows too large.
*/
- struct list_head tcd_daemon_pages;
+ struct list_head tcd_daemon_pages;
/* number of pages on ->tcd_daemon_pages */
- unsigned long tcd_cur_daemon_pages;
+ unsigned long tcd_cur_daemon_pages;
/*
* Maximal number of pages allowed on ->tcd_pages and
@@ -142,7 +142,7 @@ union cfs_trace_data_union {
* Always TCD_MAX_PAGES * tcd_pages_factor / 100 in current
* implementation.
*/
- unsigned long tcd_max_pages;
+ unsigned long tcd_max_pages;
/*
* preallocated pages to write trace records into. Pages from
@@ -166,15 +166,15 @@ union cfs_trace_data_union {
* TCD_STOCK_PAGES pagesful are consumed by trace records all
* emitted in non-blocking contexts. Which is quite unlikely.
*/
- struct list_head tcd_stock_pages;
+ struct list_head tcd_stock_pages;
/* number of pages on ->tcd_stock_pages */
- unsigned long tcd_cur_stock_pages;
+ unsigned long tcd_cur_stock_pages;
- unsigned short tcd_shutting_down;
- unsigned short tcd_cpu;
- unsigned short tcd_type;
+ unsigned short tcd_shutting_down;
+ unsigned short tcd_cpu;
+ unsigned short tcd_type;
/* The factors to share debug memory. */
- unsigned short tcd_pages_factor;
+ unsigned short tcd_pages_factor;
} tcd;
char __pad[L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))];
};
diff --git a/drivers/staging/lustre/lnet/libcfs/workitem.c b/drivers/staging/lustre/lnet/libcfs/workitem.c
index e98c818a14fb..d0512da6bcde 100644
--- a/drivers/staging/lustre/lnet/libcfs/workitem.c
+++ b/drivers/staging/lustre/lnet/libcfs/workitem.c
@@ -45,7 +45,7 @@ struct cfs_wi_sched {
/* chain on global list */
struct list_head ws_list;
/** serialised workitems */
- spinlock_t ws_lock;
+ spinlock_t ws_lock;
/** where schedulers sleep */
wait_queue_head_t ws_waitq;
/** concurrent workitems */
@@ -59,26 +59,26 @@ struct cfs_wi_sched {
*/
struct list_head ws_rerunq;
/** CPT-table for this scheduler */
- struct cfs_cpt_table *ws_cptab;
+ struct cfs_cpt_table *ws_cptab;
/** CPT id for affinity */
- int ws_cpt;
+ int ws_cpt;
/** number of scheduled workitems */
- int ws_nscheduled;
+ int ws_nscheduled;
/** started scheduler thread, protected by cfs_wi_data::wi_glock */
- unsigned int ws_nthreads:30;
+ unsigned int ws_nthreads:30;
/** shutting down, protected by cfs_wi_data::wi_glock */
- unsigned int ws_stopping:1;
+ unsigned int ws_stopping:1;
/** serialize starting thread, protected by cfs_wi_data::wi_glock */
- unsigned int ws_starting:1;
+ unsigned int ws_starting:1;
/** scheduler name */
- char ws_name[CFS_WS_NAME_LEN];
+ char ws_name[CFS_WS_NAME_LEN];
};
static struct cfs_workitem_data {
/** serialize */
spinlock_t wi_glock;
/** list of all schedulers */
- struct list_head wi_scheds;
+ struct list_head wi_scheds;
/** WI module is initialized */
int wi_init;
/** shutting down the whole WI module */
@@ -136,7 +136,7 @@ EXPORT_SYMBOL(cfs_wi_exit);
int
cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
{
- int rc;
+ int rc;
LASSERT(!in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping);
@@ -202,13 +202,13 @@ EXPORT_SYMBOL(cfs_wi_schedule);
static int cfs_wi_scheduler(void *arg)
{
- struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg;
+ struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg;
cfs_block_allsigs();
/* CPT affinity scheduler? */
if (sched->ws_cptab)
- if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt) != 0)
+ if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt))
CWARN("Failed to bind %s on CPT %d\n",
sched->ws_name, sched->ws_cpt);
@@ -223,8 +223,8 @@ static int cfs_wi_scheduler(void *arg)
spin_lock(&sched->ws_lock);
while (!sched->ws_stopping) {
- int nloops = 0;
- int rc;
+ int nloops = 0;
+ int rc;
struct cfs_workitem *wi;
while (!list_empty(&sched->ws_runq) &&
@@ -238,16 +238,16 @@ static int cfs_wi_scheduler(void *arg)
LASSERT(sched->ws_nscheduled > 0);
sched->ws_nscheduled--;
- wi->wi_running = 1;
+ wi->wi_running = 1;
wi->wi_scheduled = 0;
spin_unlock(&sched->ws_lock);
nloops++;
- rc = (*wi->wi_action) (wi);
+ rc = (*wi->wi_action)(wi);
spin_lock(&sched->ws_lock);
- if (rc != 0) /* WI should be dead, even be freed! */
+ if (rc) /* WI should be dead, even be freed! */
continue;
wi->wi_running = 0;
@@ -273,7 +273,7 @@ static int cfs_wi_scheduler(void *arg)
spin_unlock(&sched->ws_lock);
rc = wait_event_interruptible_exclusive(sched->ws_waitq,
- !cfs_wi_sched_cansleep(sched));
+ !cfs_wi_sched_cansleep(sched));
spin_lock(&sched->ws_lock);
}
@@ -289,7 +289,7 @@ static int cfs_wi_scheduler(void *arg)
void
cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
{
- int i;
+ int i;
LASSERT(cfs_wi_data.wi_init);
LASSERT(!cfs_wi_data.wi_stopping);
@@ -325,7 +325,7 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
list_del(&sched->ws_list);
spin_unlock(&cfs_wi_data.wi_glock);
- LASSERT(sched->ws_nscheduled == 0);
+ LASSERT(!sched->ws_nscheduled);
LIBCFS_FREE(sched, sizeof(*sched));
}
@@ -335,8 +335,8 @@ int
cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
int cpt, int nthrs, struct cfs_wi_sched **sched_pp)
{
- struct cfs_wi_sched *sched;
- int rc;
+ struct cfs_wi_sched *sched;
+ int rc;
LASSERT(cfs_wi_data.wi_init);
LASSERT(!cfs_wi_data.wi_stopping);
@@ -364,7 +364,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
rc = 0;
while (nthrs > 0) {
- char name[16];
+ char name[16];
struct task_struct *task;
spin_lock(&cfs_wi_data.wi_glock);
@@ -431,7 +431,7 @@ cfs_wi_startup(void)
void
cfs_wi_shutdown(void)
{
- struct cfs_wi_sched *sched;
+ struct cfs_wi_sched *sched;
struct cfs_wi_sched *temp;
spin_lock(&cfs_wi_data.wi_glock);
@@ -447,7 +447,7 @@ cfs_wi_shutdown(void)
list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
spin_lock(&cfs_wi_data.wi_glock);
- while (sched->ws_nthreads != 0) {
+ while (sched->ws_nthreads) {
spin_unlock(&cfs_wi_data.wi_glock);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1) / 20);
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 4daf828198c3..b2ba10d59f84 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -1551,16 +1551,16 @@ LNetNIInit(lnet_pid_t requested_pid)
rc = lnet_check_routes();
if (rc)
- goto err_destory_routes;
+ goto err_destroy_routes;
rc = lnet_rtrpools_alloc(im_a_router);
if (rc)
- goto err_destory_routes;
+ goto err_destroy_routes;
}
rc = lnet_acceptor_start();
if (rc)
- goto err_destory_routes;
+ goto err_destroy_routes;
the_lnet.ln_refcount = 1;
/* Now I may use my own API functions... */
@@ -1587,7 +1587,7 @@ err_stop_ping:
err_acceptor_stop:
the_lnet.ln_refcount = 0;
lnet_acceptor_stop();
-err_destory_routes:
+err_destroy_routes:
if (!the_lnet.ln_nis_from_mod_params)
lnet_destroy_routes();
err_shutdown_lndnis:
diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c
index b430046dc294..eb796a86e6ab 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-me.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-me.c
@@ -271,21 +271,3 @@ lnet_me_unlink(lnet_me_t *me)
lnet_res_lh_invalidate(&me->me_lh);
lnet_me_free(me);
}
-
-#if 0
-static void
-lib_me_dump(lnet_me_t *me)
-{
- CWARN("Match Entry %p (%#llx)\n", me,
- me->me_lh.lh_cookie);
-
- CWARN("\tMatch/Ignore\t= %016lx / %016lx\n",
- me->me_match_bits, me->me_ignore_bits);
-
- CWARN("\tMD\t= %p\n", me->md);
- CWARN("\tprev\t= %p\n",
- list_entry(me->me_list.prev, lnet_me_t, me_list));
- CWARN("\tnext\t= %p\n",
- list_entry(me->me_list.next, lnet_me_t, me_list));
-}
-#endif
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index 48e6f8f2392f..f3dd6e42f4d4 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -192,6 +192,7 @@ lnet_copy_iov2iter(struct iov_iter *to,
left = siov->iov_len - soffset;
do {
size_t n, copy = left;
+
LASSERT(nsiov > 0);
if (copy > nob)
diff --git a/drivers/staging/lustre/lnet/lnet/nidstrings.c b/drivers/staging/lustre/lnet/lnet/nidstrings.c
index a6d7a6159b8f..a9fe3e69daae 100644
--- a/drivers/staging/lustre/lnet/lnet/nidstrings.c
+++ b/drivers/staging/lustre/lnet/lnet/nidstrings.c
@@ -193,7 +193,7 @@ add_nidrange(const struct cfs_lstr *src,
struct netstrfns *nf;
struct nidrange *nr;
int endlen;
- unsigned netnum;
+ unsigned int netnum;
if (src->ls_len >= LNET_NIDSTR_SIZE)
return NULL;
@@ -247,10 +247,8 @@ parse_nidrange(struct cfs_lstr *src, struct list_head *nidlist)
{
struct cfs_lstr addrrange;
struct cfs_lstr net;
- struct cfs_lstr tmp;
struct nidrange *nr;
- tmp = *src;
if (!cfs_gettok(src, '@', &addrrange))
goto failed;
@@ -1156,7 +1154,7 @@ EXPORT_SYMBOL(libcfs_nid2str_r);
static struct netstrfns *
libcfs_str2net_internal(const char *str, __u32 *net)
{
- struct netstrfns *uninitialized_var(nf);
+ struct netstrfns *nf = NULL;
int nob;
unsigned int netnum;
int i;
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index 063ad55ec950..8afa0abf15cd 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -903,6 +903,7 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway)
{
lnet_rc_data_t *rcd = NULL;
lnet_ping_info_t *pi;
+ lnet_md_t md;
int rc;
int i;
@@ -925,15 +926,15 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway)
}
rcd->rcd_pinginfo = pi;
+ md.start = pi;
+ md.user_ptr = rcd;
+ md.length = LNET_PINGINFO_SIZE;
+ md.threshold = LNET_MD_THRESH_INF;
+ md.options = LNET_MD_TRUNCATE;
+ md.eq_handle = the_lnet.ln_rc_eqh;
+
LASSERT(!LNetHandleIsInvalid(the_lnet.ln_rc_eqh));
- rc = LNetMDBind((lnet_md_t){.start = pi,
- .user_ptr = rcd,
- .length = LNET_PINGINFO_SIZE,
- .threshold = LNET_MD_THRESH_INF,
- .options = LNET_MD_TRUNCATE,
- .eq_handle = the_lnet.ln_rc_eqh},
- LNET_UNLINK,
- &rcd->rcd_mdh);
+ rc = LNetMDBind(md, LNET_UNLINK, &rcd->rcd_mdh);
if (rc < 0) {
CERROR("Can't bind MD: %d\n", rc);
goto out;
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index b20c5d394e3b..67b460f41d6e 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -44,6 +44,10 @@ static int brw_inject_errors;
module_param(brw_inject_errors, int, 0644);
MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default");
+#define BRW_POISON 0xbeefbeefbeefbeefULL
+#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL
+#define BRW_MSIZE sizeof(u64)
+
static void
brw_client_fini(struct sfw_test_instance *tsi)
{
@@ -67,6 +71,7 @@ brw_client_init(struct sfw_test_instance *tsi)
{
struct sfw_session *sn = tsi->tsi_batch->bat_session;
int flags;
+ int off;
int npg;
int len;
int opc;
@@ -87,6 +92,7 @@ brw_client_init(struct sfw_test_instance *tsi)
* but we have to keep it for compatibility
*/
len = npg * PAGE_SIZE;
+ off = 0;
} else {
struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
@@ -99,9 +105,13 @@ brw_client_init(struct sfw_test_instance *tsi)
opc = breq->blk_opc;
flags = breq->blk_flags;
len = breq->blk_len;
- npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ off = breq->blk_offset & ~PAGE_MASK;
+ npg = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
+ if (off % BRW_MSIZE)
+ return -EINVAL;
+
if (npg > LNET_MAX_IOV || npg <= 0)
return -EINVAL;
@@ -114,7 +124,7 @@ brw_client_init(struct sfw_test_instance *tsi)
list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
bulk = srpc_alloc_bulk(lnet_cpt_of_nid(tsu->tsu_dest.nid),
- npg, len, opc == LST_BRW_READ);
+ off, npg, len, opc == LST_BRW_READ);
if (!bulk) {
brw_client_fini(tsi);
return -ENOMEM;
@@ -126,12 +136,7 @@ brw_client_init(struct sfw_test_instance *tsi)
return 0;
}
-#define BRW_POISON 0xbeefbeefbeefbeefULL
-#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL
-#define BRW_MSIZE sizeof(__u64)
-
-static int
-brw_inject_one_error(void)
+int brw_inject_one_error(void)
{
struct timespec64 ts;
@@ -147,12 +152,13 @@ brw_inject_one_error(void)
}
static void
-brw_fill_page(struct page *pg, int pattern, __u64 magic)
+brw_fill_page(struct page *pg, int off, int len, int pattern, __u64 magic)
{
- char *addr = page_address(pg);
+ char *addr = page_address(pg) + off;
int i;
LASSERT(addr);
+ LASSERT(!(off % BRW_MSIZE) && !(len % BRW_MSIZE));
if (pattern == LST_BRW_CHECK_NONE)
return;
@@ -162,14 +168,16 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
if (pattern == LST_BRW_CHECK_SIMPLE) {
memcpy(addr, &magic, BRW_MSIZE);
- addr += PAGE_SIZE - BRW_MSIZE;
- memcpy(addr, &magic, BRW_MSIZE);
+ if (len > BRW_MSIZE) {
+ addr += PAGE_SIZE - BRW_MSIZE;
+ memcpy(addr, &magic, BRW_MSIZE);
+ }
return;
}
if (pattern == LST_BRW_CHECK_FULL) {
- for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++)
- memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
+ for (i = 0; i < len; i += BRW_MSIZE)
+ memcpy(addr + i, &magic, BRW_MSIZE);
return;
}
@@ -177,13 +185,14 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
}
static int
-brw_check_page(struct page *pg, int pattern, __u64 magic)
+brw_check_page(struct page *pg, int off, int len, int pattern, __u64 magic)
{
- char *addr = page_address(pg);
+ char *addr = page_address(pg) + off;
__u64 data = 0; /* make compiler happy */
int i;
LASSERT(addr);
+ LASSERT(!(off % BRW_MSIZE) && !(len % BRW_MSIZE));
if (pattern == LST_BRW_CHECK_NONE)
return 0;
@@ -193,21 +202,21 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
if (data != magic)
goto bad_data;
- addr += PAGE_SIZE - BRW_MSIZE;
- data = *((__u64 *)addr);
- if (data != magic)
- goto bad_data;
-
+ if (len > BRW_MSIZE) {
+ addr += PAGE_SIZE - BRW_MSIZE;
+ data = *((__u64 *)addr);
+ if (data != magic)
+ goto bad_data;
+ }
return 0;
}
if (pattern == LST_BRW_CHECK_FULL) {
- for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) {
- data = *(((__u64 *)addr) + i);
+ for (i = 0; i < len; i += BRW_MSIZE) {
+ data = *(u64 *)(addr + i);
if (data != magic)
goto bad_data;
}
-
return 0;
}
@@ -226,8 +235,12 @@ brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
struct page *pg;
for (i = 0; i < bk->bk_niov; i++) {
+ int off, len;
+
pg = bk->bk_iovs[i].bv_page;
- brw_fill_page(pg, pattern, magic);
+ off = bk->bk_iovs[i].bv_offset;
+ len = bk->bk_iovs[i].bv_len;
+ brw_fill_page(pg, off, len, pattern, magic);
}
}
@@ -238,8 +251,12 @@ brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
struct page *pg;
for (i = 0; i < bk->bk_niov; i++) {
+ int off, len;
+
pg = bk->bk_iovs[i].bv_page;
- if (brw_check_page(pg, pattern, magic)) {
+ off = bk->bk_iovs[i].bv_offset;
+ len = bk->bk_iovs[i].bv_len;
+ if (brw_check_page(pg, off, len, pattern, magic)) {
CERROR("Bulk page %p (%d/%d) is corrupted!\n",
pg, i, bk->bk_niov);
return 1;
@@ -276,6 +293,7 @@ brw_client_prep_rpc(struct sfw_test_unit *tsu,
len = npg * PAGE_SIZE;
} else {
struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
+ int off;
/*
* I should never get this step if it's unknown feature
@@ -286,7 +304,8 @@ brw_client_prep_rpc(struct sfw_test_unit *tsu,
opc = breq->blk_opc;
flags = breq->blk_flags;
len = breq->blk_len;
- npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ off = breq->blk_offset;
+ npg = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index b786f8b4a73d..94383023c1be 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -315,7 +315,7 @@ lst_group_update_ioctl(lstio_group_update_args_t *args)
static int
lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
{
- unsigned feats;
+ unsigned int feats;
int rc;
char *name;
@@ -742,6 +742,10 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
PAGE_SIZE - sizeof(struct lstcon_test)))
return -EINVAL;
+ /* Enforce zero parameter length if there's no parameter */
+ if (!args->lstio_tes_param && args->lstio_tes_param_len)
+ return -EINVAL;
+
LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
if (!batch_name)
return rc;
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 55afb53b0743..994422c62487 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -86,8 +86,9 @@ lstcon_rpc_done(struct srpc_client_rpc *rpc)
}
static int
-lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned feats,
- int bulk_npg, int bulk_len, int embedded, struct lstcon_rpc *crpc)
+lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned int feats,
+ int bulk_npg, int bulk_len, int embedded,
+ struct lstcon_rpc *crpc)
{
crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service,
feats, bulk_npg, bulk_len,
@@ -111,7 +112,7 @@ lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned feats,
}
static int
-lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned feats,
+lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned int feats,
int bulk_npg, int bulk_len, struct lstcon_rpc **crpcpp)
{
struct lstcon_rpc *crpc = NULL;
@@ -292,8 +293,8 @@ lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error)
spin_lock(&rpc->crpc_lock);
- if (!crpc->crp_posted || /* not posted */
- crpc->crp_stamp) { /* rpc done or aborted already */
+ if (!crpc->crp_posted || /* not posted */
+ crpc->crp_stamp) { /* rpc done or aborted already */
if (!crpc->crp_stamp) {
crpc->crp_stamp = cfs_time_current();
crpc->crp_status = -EINTR;
@@ -589,7 +590,7 @@ lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans)
int
lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
- unsigned feats, struct lstcon_rpc **crpc)
+ unsigned int feats, struct lstcon_rpc **crpc)
{
struct srpc_mksn_reqst *msrq;
struct srpc_rmsn_reqst *rsrq;
@@ -627,7 +628,8 @@ lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
}
int
-lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc)
+lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned int feats,
+ struct lstcon_rpc **crpc)
{
struct srpc_debug_reqst *drq;
int rc;
@@ -645,7 +647,7 @@ lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **c
}
int
-lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
+lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned int feats,
struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc)
{
struct lstcon_batch *batch;
@@ -678,7 +680,8 @@ lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
}
int
-lstcon_statrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc)
+lstcon_statrpc_prep(struct lstcon_node *nd, unsigned int feats,
+ struct lstcon_rpc **crpc)
{
struct srpc_stat_reqst *srq;
int rc;
@@ -776,7 +779,8 @@ lstcon_pingrpc_prep(lst_test_ping_param_t *param, struct srpc_test_reqst *req)
}
static int
-lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req)
+lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param,
+ struct srpc_test_reqst *req)
{
struct test_bulk_req *brq = &req->tsr_u.bulk_v0;
@@ -789,20 +793,21 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req
}
static int
-lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req)
+lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, bool is_client,
+ struct srpc_test_reqst *req)
{
struct test_bulk_req_v1 *brq = &req->tsr_u.bulk_v1;
brq->blk_opc = param->blk_opc;
brq->blk_flags = param->blk_flags;
brq->blk_len = param->blk_size;
- brq->blk_offset = 0; /* reserved */
+ brq->blk_offset = is_client ? param->blk_cli_off : param->blk_srv_off;
return 0;
}
int
-lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
+lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned int feats,
struct lstcon_test *test, struct lstcon_rpc **crpc)
{
struct lstcon_group *sgrp = test->tes_src_grp;
@@ -897,7 +902,8 @@ lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
&test->tes_param[0], trq);
} else {
rc = lstcon_bulkrpc_v1_prep((lst_test_bulk_param_t *)
- &test->tes_param[0], trq);
+ &test->tes_param[0],
+ trq->tsr_is_client, trq);
}
break;
@@ -1084,7 +1090,7 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist,
struct lstcon_ndlink *ndl;
struct lstcon_node *nd;
struct lstcon_rpc *rpc;
- unsigned feats;
+ unsigned int feats;
int rc;
/* Creating session RPG for list of nodes */
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
index 7ec6fc96959e..e629e87c461c 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -78,8 +78,8 @@ struct lstcon_rpc_trans {
struct list_head tas_olink; /* link chain on owner list */
struct list_head tas_link; /* link chain on global list */
int tas_opc; /* operation code of transaction */
- unsigned tas_feats_updated; /* features mask is uptodate */
- unsigned tas_features; /* test features mask */
+ unsigned int tas_feats_updated; /* features mask is uptodate */
+ unsigned int tas_features; /* test features mask */
wait_queue_head_t tas_waitq; /* wait queue head */
atomic_t tas_remaining; /* # of un-scheduled rpcs */
struct list_head tas_rpcs_list; /* queued requests */
@@ -106,14 +106,16 @@ typedef int (*lstcon_rpc_readent_func_t)(int, struct srpc_msg *,
lstcon_rpc_ent_t __user *);
int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
- unsigned version, struct lstcon_rpc **crpc);
+ unsigned int version, struct lstcon_rpc **crpc);
int lstcon_dbgrpc_prep(struct lstcon_node *nd,
- unsigned version, struct lstcon_rpc **crpc);
-int lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
- struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc);
-int lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
- struct lstcon_test *test, struct lstcon_rpc **crpc);
-int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned version,
+ unsigned int version, struct lstcon_rpc **crpc);
+int lstcon_batrpc_prep(struct lstcon_node *nd, int transop,
+ unsigned int version, struct lstcon_tsb_hdr *tsb,
+ struct lstcon_rpc **crpc);
+int lstcon_testrpc_prep(struct lstcon_node *nd, int transop,
+ unsigned int version, struct lstcon_test *test,
+ struct lstcon_rpc **crpc);
+int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned int version,
struct lstcon_rpc **crpc);
void lstcon_rpc_put(struct lstcon_rpc *crpc);
int lstcon_rpc_trans_prep(struct list_head *translist,
@@ -129,7 +131,8 @@ int lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
lstcon_rpc_readent_func_t readent);
void lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error);
void lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans);
-void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *req);
+void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans,
+ struct lstcon_rpc *req);
int lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout);
int lstcon_rpc_pinger_start(void);
void lstcon_rpc_pinger_stop(void);
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index a0fcbf3bcc95..1456d2395cc9 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -86,7 +86,7 @@ lstcon_node_find(lnet_process_id_t id, struct lstcon_node **ndpp, int create)
if (!create)
return -ENOENT;
- LIBCFS_ALLOC(*ndpp, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
+ LIBCFS_ALLOC(*ndpp, sizeof(**ndpp) + sizeof(*ndl));
if (!*ndpp)
return -ENOMEM;
@@ -131,12 +131,12 @@ lstcon_node_put(struct lstcon_node *nd)
list_del(&ndl->ndl_link);
list_del(&ndl->ndl_hlink);
- LIBCFS_FREE(nd, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
+ LIBCFS_FREE(nd, sizeof(*nd) + sizeof(*ndl));
}
static int
-lstcon_ndlink_find(struct list_head *hash,
- lnet_process_id_t id, struct lstcon_ndlink **ndlpp, int create)
+lstcon_ndlink_find(struct list_head *hash, lnet_process_id_t id,
+ struct lstcon_ndlink **ndlpp, int create)
{
unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
struct lstcon_ndlink *ndl;
@@ -230,7 +230,8 @@ lstcon_group_addref(struct lstcon_group *grp)
grp->grp_ref++;
}
-static void lstcon_group_ndlink_release(struct lstcon_group *, struct lstcon_ndlink *);
+static void lstcon_group_ndlink_release(struct lstcon_group *,
+ struct lstcon_ndlink *);
static void
lstcon_group_drain(struct lstcon_group *grp, int keep)
@@ -397,7 +398,8 @@ lstcon_sesrpc_readent(int transop, struct srpc_msg *msg,
static int
lstcon_group_nodes_add(struct lstcon_group *grp,
int count, lnet_process_id_t __user *ids_up,
- unsigned *featp, struct list_head __user *result_up)
+ unsigned int *featp,
+ struct list_head __user *result_up)
{
struct lstcon_rpc_trans *trans;
struct lstcon_ndlink *ndl;
@@ -542,7 +544,8 @@ lstcon_group_add(char *name)
int
lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up,
- unsigned *featp, struct list_head __user *result_up)
+ unsigned int *featp,
+ struct list_head __user *result_up)
{
struct lstcon_group *grp;
int rc;
@@ -820,7 +823,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p,
lstcon_group_decref(grp);
- return 0;
+ return rc;
}
static int
@@ -1181,7 +1184,8 @@ lstcon_testrpc_condition(int transop, struct lstcon_node *nd, void *arg)
}
static int
-lstcon_test_nodes_add(struct lstcon_test *test, struct list_head __user *result_up)
+lstcon_test_nodes_add(struct lstcon_test *test,
+ struct list_head __user *result_up)
{
struct lstcon_rpc_trans *trans;
struct lstcon_group *grp;
@@ -1364,7 +1368,8 @@ out:
}
static int
-lstcon_test_find(struct lstcon_batch *batch, int idx, struct lstcon_test **testpp)
+lstcon_test_find(struct lstcon_batch *batch, int idx,
+ struct lstcon_test **testpp)
{
struct lstcon_test *test;
@@ -1702,7 +1707,7 @@ lstcon_new_session_id(lst_sid_t *sid)
}
int
-lstcon_session_new(char *name, int key, unsigned feats,
+lstcon_session_new(char *name, int key, unsigned int feats,
int timeout, int force, lst_sid_t __user *sid_up)
{
int rc = 0;
@@ -1868,7 +1873,7 @@ lstcon_session_end(void)
}
int
-lstcon_session_feats_check(unsigned feats)
+lstcon_session_feats_check(unsigned int feats)
{
int rc = 0;
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index 78388a611c22..5dc1de48a10e 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -92,14 +92,16 @@ struct lstcon_batch {
int bat_ntest; /* # of test */
int bat_state; /* state of the batch */
int bat_arg; /* parameter for run|stop, timeout
- * for run, force for stop */
+ * for run, force for stop
+ */
char bat_name[LST_NAME_SIZE];/* name of batch */
struct list_head bat_test_list; /* list head of tests (struct lstcon_test)
*/
struct list_head bat_trans_list; /* list head of transaction */
struct list_head bat_cli_list; /* list head of client nodes
- * (struct lstcon_node) */
+ * (struct lstcon_node)
+ */
struct list_head *bat_cli_hash; /* hash table of client nodes */
struct list_head bat_srv_list; /* list head of server nodes */
struct list_head *bat_srv_hash; /* hash table of server nodes */
@@ -144,13 +146,14 @@ struct lstcon_session {
int ses_timeout; /* timeout in seconds */
time64_t ses_laststamp; /* last operation stamp (seconds)
*/
- unsigned ses_features; /* tests features of the session
+ unsigned int ses_features; /* tests features of the session
*/
- unsigned ses_feats_updated:1; /* features are synced with
- * remote test nodes */
- unsigned ses_force:1; /* force creating */
- unsigned ses_shutdown:1; /* session is shutting down */
- unsigned ses_expired:1; /* console is timedout */
+ unsigned int ses_feats_updated:1; /* features are synced with
+ * remote test nodes
+ */
+ unsigned int ses_force:1; /* force creating */
+ unsigned int ses_shutdown:1; /* session is shutting down */
+ unsigned int ses_expired:1; /* console is timedout */
__u64 ses_id_cookie; /* batch id cookie */
char ses_name[LST_NAME_SIZE];/* session name */
struct lstcon_rpc_trans *ses_ping; /* session pinger */
@@ -188,14 +191,14 @@ int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
int lstcon_console_init(void);
int lstcon_console_fini(void);
int lstcon_session_match(lst_sid_t sid);
-int lstcon_session_new(char *name, int key, unsigned version,
+int lstcon_session_new(char *name, int key, unsigned int version,
int timeout, int flags, lst_sid_t __user *sid_up);
int lstcon_session_info(lst_sid_t __user *sid_up, int __user *key,
unsigned __user *verp, lstcon_ndlist_ent_t __user *entp,
char __user *name_up, int len);
int lstcon_session_end(void);
int lstcon_session_debug(int timeout, struct list_head __user *result_up);
-int lstcon_session_feats_check(unsigned feats);
+int lstcon_session_feats_check(unsigned int feats);
int lstcon_batch_debug(int timeout, char *name,
int client, struct list_head __user *result_up);
int lstcon_group_debug(int timeout, char *name,
@@ -207,7 +210,7 @@ int lstcon_group_del(char *name);
int lstcon_group_clean(char *name, int args);
int lstcon_group_refresh(char *name, struct list_head __user *result_up);
int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t __user *nds_up,
- unsigned *featp, struct list_head __user *result_up);
+ unsigned int *featp, struct list_head __user *result_up);
int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t __user *nds_up,
struct list_head __user *result_up);
int lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gent_up,
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index abbd6287b4bd..48dcc330dc9b 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -131,7 +131,8 @@ sfw_find_test_case(int id)
}
static int
-sfw_register_test(struct srpc_service *service, struct sfw_test_client_ops *cliops)
+sfw_register_test(struct srpc_service *service,
+ struct sfw_test_client_ops *cliops)
{
struct sfw_test_case *tsc;
@@ -254,7 +255,7 @@ sfw_session_expired(void *data)
static inline void
sfw_init_session(struct sfw_session *sn, lst_sid_t sid,
- unsigned features, const char *name)
+ unsigned int features, const char *name)
{
struct stt_timer *timer = &sn->sn_timer;
@@ -469,7 +470,8 @@ sfw_make_session(struct srpc_mksn_reqst *request, struct srpc_mksn_reply *reply)
}
static int
-sfw_remove_session(struct srpc_rmsn_reqst *request, struct srpc_rmsn_reply *reply)
+sfw_remove_session(struct srpc_rmsn_reqst *request,
+ struct srpc_rmsn_reply *reply)
{
struct sfw_session *sn = sfw_data.fw_session;
@@ -501,7 +503,8 @@ sfw_remove_session(struct srpc_rmsn_reqst *request, struct srpc_rmsn_reply *repl
}
static int
-sfw_debug_session(struct srpc_debug_reqst *request, struct srpc_debug_reply *reply)
+sfw_debug_session(struct srpc_debug_reqst *request,
+ struct srpc_debug_reply *reply)
{
struct sfw_session *sn = sfw_data.fw_session;
@@ -897,7 +900,7 @@ sfw_test_rpc_done(struct srpc_client_rpc *rpc)
int
sfw_create_test_rpc(struct sfw_test_unit *tsu, lnet_process_id_t peer,
- unsigned features, int nblk, int blklen,
+ unsigned int features, int nblk, int blklen,
struct srpc_client_rpc **rpcpp)
{
struct srpc_client_rpc *rpc = NULL;
@@ -1064,7 +1067,8 @@ sfw_stop_batch(struct sfw_batch *tsb, int force)
}
static int
-sfw_query_batch(struct sfw_batch *tsb, int testidx, struct srpc_batch_reply *reply)
+sfw_query_batch(struct sfw_batch *tsb, int testidx,
+ struct srpc_batch_reply *reply)
{
struct sfw_test_instance *tsi;
@@ -1101,7 +1105,7 @@ sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
LASSERT(!rpc->srpc_bulk);
LASSERT(npages > 0 && npages <= LNET_MAX_IOV);
- rpc->srpc_bulk = srpc_alloc_bulk(cpt, npages, len, sink);
+ rpc->srpc_bulk = srpc_alloc_bulk(cpt, 0, npages, len, sink);
if (!rpc->srpc_bulk)
return -ENOMEM;
@@ -1179,7 +1183,8 @@ sfw_add_test(struct srpc_server_rpc *rpc)
}
static int
-sfw_control_batch(struct srpc_batch_reqst *request, struct srpc_batch_reply *reply)
+sfw_control_batch(struct srpc_batch_reqst *request,
+ struct srpc_batch_reply *reply)
{
struct sfw_session *sn = sfw_data.fw_session;
int rc = 0;
@@ -1225,7 +1230,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
struct srpc_msg *reply = &rpc->srpc_replymsg;
struct srpc_msg *request = &rpc->srpc_reqstbuf->buf_msg;
- unsigned features = LST_FEATS_MASK;
+ unsigned int features = LST_FEATS_MASK;
int rc = 0;
LASSERT(!sfw_data.fw_active_srpc);
@@ -1375,7 +1380,7 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
struct srpc_client_rpc *
sfw_create_rpc(lnet_process_id_t peer, int service,
- unsigned features, int nbulkiov, int bulklen,
+ unsigned int features, int nbulkiov, int bulklen,
void (*done)(struct srpc_client_rpc *), void *priv)
{
struct srpc_client_rpc *rpc = NULL;
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index 9331ca4e3606..b9601b00a273 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -159,8 +159,8 @@ ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
ktime_get_real_ts64(&ts);
CDEBUG(D_NET, "%d reply in %u usec\n", reply->pnr_seq,
- (unsigned)((ts.tv_sec - reqst->pnr_time_sec) * 1000000 +
- (ts.tv_nsec / NSEC_PER_USEC - reqst->pnr_time_usec)));
+ (unsigned int)((ts.tv_sec - reqst->pnr_time_sec) * 1000000 +
+ (ts.tv_nsec / NSEC_PER_USEC - reqst->pnr_time_usec)));
}
static int
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index f5619d8744ef..ce9de8c9be57 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -84,14 +84,13 @@ void srpc_set_counters(const srpc_counters_t *cnt)
}
static int
-srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int nob)
+srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int off,
+ int nob)
{
- nob = min_t(int, nob, PAGE_SIZE);
+ LASSERT(off < PAGE_SIZE);
+ LASSERT(nob > 0 && nob <= PAGE_SIZE);
- LASSERT(nob > 0);
- LASSERT(i >= 0 && i < bk->bk_niov);
-
- bk->bk_iovs[i].bv_offset = 0;
+ bk->bk_iovs[i].bv_offset = off;
bk->bk_iovs[i].bv_page = pg;
bk->bk_iovs[i].bv_len = nob;
return nob;
@@ -117,7 +116,8 @@ srpc_free_bulk(struct srpc_bulk *bk)
}
struct srpc_bulk *
-srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
+srpc_alloc_bulk(int cpt, unsigned int bulk_off, unsigned int bulk_npg,
+ unsigned int bulk_len, int sink)
{
struct srpc_bulk *bk;
int i;
@@ -148,8 +148,11 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
return NULL;
}
- nob = srpc_add_bulk_page(bk, pg, i, bulk_len);
+ nob = min_t(unsigned int, bulk_off + bulk_len, PAGE_SIZE) -
+ bulk_off;
+ srpc_add_bulk_page(bk, pg, i, bulk_off, nob);
bulk_len -= nob;
+ bulk_off = 0;
}
return bk;
@@ -693,7 +696,8 @@ srpc_finish_service(struct srpc_service *sv)
/* called with sv->sv_lock held */
static void
-srpc_service_recycle_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
+srpc_service_recycle_buffer(struct srpc_service_cd *scd,
+ struct srpc_buffer *buf)
__must_hold(&scd->scd_lock)
{
if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
index 4ab2ee264004..f353a634cc8e 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.h
+++ b/drivers/staging/lustre/lnet/selftest/rpc.h
@@ -113,7 +113,8 @@ struct srpc_join_reply {
__u32 join_status; /* returned status */
lst_sid_t join_sid; /* session id */
__u32 join_timeout; /* # seconds' inactivity to
- * expire */
+ * expire
+ */
char join_session[LST_NAME_SIZE]; /* session name */
} WIRE_ATTR;
@@ -175,7 +176,7 @@ struct test_bulk_req_v1 {
__u16 blk_opc; /* bulk operation code */
__u16 blk_flags; /* data check flags */
__u32 blk_len; /* data length */
- __u32 blk_offset; /* reserved: offset */
+ __u32 blk_offset; /* offset */
} WIRE_ATTR;
struct test_ping_req {
@@ -190,7 +191,8 @@ struct srpc_test_reqst {
lst_bid_t tsr_bid; /* batch id */
__u32 tsr_service; /* test type: bulk|ping|... */
__u32 tsr_loop; /* test client loop count or
- * # server buffers needed */
+ * # server buffers needed
+ */
__u32 tsr_concur; /* concurrency of test */
__u8 tsr_is_client; /* is test client or not */
__u8 tsr_stop_onerr; /* stop on error */
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index d033ac03d953..c8833a016b6d 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -131,7 +131,8 @@ srpc_service2reply(int service)
enum srpc_event_type {
SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source)
- * received */
+ * received
+ */
SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */
SRPC_BULK_GET_RPLD = 3, /* active bulk GET replied (sink) */
SRPC_REPLY_RCVD = 4, /* incoming reply received */
@@ -295,7 +296,8 @@ struct srpc_service_cd {
#define SFW_TEST_WI_MIN 256
#define SFW_TEST_WI_MAX 2048
/* extra buffers for tolerating buggy peers, or unbalanced number
- * of peers between partitions */
+ * of peers between partitions
+ */
#define SFW_TEST_WI_EXTRA 64
/* number of server workitems (mini-thread) for framework service */
@@ -347,9 +349,11 @@ struct sfw_batch {
struct sfw_test_client_ops {
int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test
- * client */
+ * client
+ */
void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test
- * client */
+ * client
+ */
int (*tso_prep_rpc)(struct sfw_test_unit *tsu,
lnet_process_id_t dest,
struct srpc_client_rpc **rpc); /* prep a tests rpc */
@@ -374,7 +378,8 @@ struct sfw_test_instance {
spinlock_t tsi_lock; /* serialize */
unsigned int tsi_stopping:1; /* test is stopping */
atomic_t tsi_nactive; /* # of active test
- * unit */
+ * unit
+ */
struct list_head tsi_units; /* test units */
struct list_head tsi_free_rpcs; /* free rpcs */
struct list_head tsi_active_rpcs; /* active rpcs */
@@ -386,8 +391,10 @@ struct sfw_test_instance {
} tsi_u;
};
-/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
- * pages are not used */
+/*
+ * XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
+ * pages are not used
+ */
#define SFW_MAX_CONCUR LST_MAX_CONCUR
#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
@@ -410,10 +417,10 @@ struct sfw_test_case {
struct srpc_client_rpc *
sfw_create_rpc(lnet_process_id_t peer, int service,
- unsigned features, int nbulkiov, int bulklen,
+ unsigned int features, int nbulkiov, int bulklen,
void (*done)(struct srpc_client_rpc *), void *priv);
int sfw_create_test_rpc(struct sfw_test_unit *tsu,
- lnet_process_id_t peer, unsigned features,
+ lnet_process_id_t peer, unsigned int features,
int nblk, int blklen, struct srpc_client_rpc **rpc);
void sfw_abort_rpc(struct srpc_client_rpc *rpc);
void sfw_post_rpc(struct srpc_client_rpc *rpc);
@@ -434,8 +441,9 @@ srpc_create_client_rpc(lnet_process_id_t peer, int service,
void srpc_post_rpc(struct srpc_client_rpc *rpc);
void srpc_abort_rpc(struct srpc_client_rpc *rpc, int why);
void srpc_free_bulk(struct srpc_bulk *bk);
-struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned bulk_npg,
- unsigned bulk_len, int sink);
+struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned int off,
+ unsigned int bulk_npg, unsigned int bulk_len,
+ int sink);
int srpc_send_rpc(struct swi_workitem *wi);
int srpc_send_reply(struct srpc_server_rpc *rpc);
int srpc_add_service(struct srpc_service *sv);
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index dcd22580b1f0..2fe692df19d0 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -46,16 +46,17 @@
* to cover a time period of 1024 seconds into the future before wrapping.
*/
#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */
-#define STTIMER_SLOTTIME (1 << STTIMER_MINPOLL)
+#define STTIMER_SLOTTIME BIT(STTIMER_MINPOLL)
#define STTIMER_SLOTTIMEMASK (~(STTIMER_SLOTTIME - 1))
-#define STTIMER_NSLOTS (1 << 7)
+#define STTIMER_NSLOTS BIT(7)
#define STTIMER_SLOT(t) (&stt_data.stt_hash[(((t) >> STTIMER_MINPOLL) & \
(STTIMER_NSLOTS - 1))])
static struct st_timer_data {
spinlock_t stt_lock;
unsigned long stt_prev_slot; /* start time of the slot processed
- * previously */
+ * previously
+ */
struct list_head stt_hash[STTIMER_NSLOTS];
int stt_shuttingdown;
wait_queue_head_t stt_waitq;
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index edd72b926f81..999f250ceed0 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -74,7 +74,7 @@ static int seq_client_rpc(struct lu_client_seq *seq,
/* Zero out input range, this is not recovery yet. */
in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE);
- range_init(in);
+ lu_seq_range_init(in);
ptlrpc_request_set_replen(req);
@@ -112,25 +112,21 @@ static int seq_client_rpc(struct lu_client_seq *seq,
ptlrpc_at_set_req_timeout(req);
- if (opc != SEQ_ALLOC_SUPER && seq->lcs_type == LUSTRE_SEQ_METADATA)
- mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
rc = ptlrpc_queue_wait(req);
- if (opc != SEQ_ALLOC_SUPER && seq->lcs_type == LUSTRE_SEQ_METADATA)
- mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
if (rc)
goto out_req;
out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE);
*output = *out;
- if (!range_is_sane(output)) {
+ if (!lu_seq_range_is_sane(output)) {
CERROR("%s: Invalid range received from server: "
DRANGE "\n", seq->lcs_name, PRANGE(output));
rc = -EINVAL;
goto out_req;
}
- if (range_is_exhausted(output)) {
+ if (lu_seq_range_is_exhausted(output)) {
CERROR("%s: Range received from server is exhausted: "
DRANGE "]\n", seq->lcs_name, PRANGE(output));
rc = -EINVAL;
@@ -170,9 +166,9 @@ static int seq_client_alloc_seq(const struct lu_env *env,
{
int rc;
- LASSERT(range_is_sane(&seq->lcs_space));
+ LASSERT(lu_seq_range_is_sane(&seq->lcs_space));
- if (range_is_exhausted(&seq->lcs_space)) {
+ if (lu_seq_range_is_exhausted(&seq->lcs_space)) {
rc = seq_client_alloc_meta(env, seq);
if (rc) {
CERROR("%s: Can't allocate new meta-sequence, rc %d\n",
@@ -185,7 +181,7 @@ static int seq_client_alloc_seq(const struct lu_env *env,
rc = 0;
}
- LASSERT(!range_is_exhausted(&seq->lcs_space));
+ LASSERT(!lu_seq_range_is_exhausted(&seq->lcs_space));
*seqnr = seq->lcs_space.lsr_start;
seq->lcs_space.lsr_start += 1;
@@ -320,7 +316,7 @@ void seq_client_flush(struct lu_client_seq *seq)
seq->lcs_space.lsr_index = -1;
- range_init(&seq->lcs_space);
+ lu_seq_range_init(&seq->lcs_space);
mutex_unlock(&seq->lcs_mutex);
}
EXPORT_SYMBOL(seq_client_flush);
diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c
index 3ed32d77f38b..97d4849c7199 100644
--- a/drivers/staging/lustre/lustre/fid/lproc_fid.c
+++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c
@@ -83,7 +83,7 @@ ldebugfs_fid_write_common(const char __user *buffer, size_t count,
(unsigned long long *)&tmp.lsr_end);
if (rc != 2)
return -EINVAL;
- if (!range_is_sane(&tmp) || range_is_zero(&tmp) ||
+ if (!lu_seq_range_is_sane(&tmp) || lu_seq_range_is_zero(&tmp) ||
tmp.lsr_start < range->lsr_start || tmp.lsr_end > range->lsr_end)
return -EINVAL;
*range = tmp;
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 0100a935f4ff..11f697496180 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -143,7 +143,7 @@ restart_fixup:
c_range = &f_curr->fce_range;
n_range = &f_next->fce_range;
- LASSERT(range_is_sane(c_range));
+ LASSERT(lu_seq_range_is_sane(c_range));
if (&f_next->fce_list == head)
break;
@@ -358,7 +358,7 @@ struct fld_cache_entry
{
struct fld_cache_entry *f_new;
- LASSERT(range_is_sane(range));
+ LASSERT(lu_seq_range_is_sane(range));
f_new = kzalloc(sizeof(*f_new), GFP_NOFS);
if (!f_new)
@@ -503,7 +503,7 @@ int fld_cache_lookup(struct fld_cache *cache,
}
prev = flde;
- if (range_within(&flde->fce_range, seq)) {
+ if (lu_seq_range_within(&flde->fce_range, seq)) {
*range = flde->fce_range;
cache->fci_stat.fst_cache++;
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index 08eaec735d6f..4a7f0b71c48d 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -62,11 +62,6 @@
#include "../include/lustre_req_layout.h"
#include "../include/lustre_fld.h"
-enum {
- LUSTRE_FLD_INIT = 1 << 0,
- LUSTRE_FLD_RUN = 1 << 1
-};
-
struct fld_stats {
__u64 fst_count;
__u64 fst_cache;
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index 0de72b717ce5..4cade7a16800 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -159,11 +159,6 @@ int fld_client_add_target(struct lu_client_fld *fld,
LASSERT(name);
LASSERT(tar->ft_srv || tar->ft_exp);
- if (fld->lcf_flags != LUSTRE_FLD_INIT) {
- CERROR("%s: Attempt to add target %s (idx %llu) on fly - skip it\n",
- fld->lcf_name, name, tar->ft_idx);
- return 0;
- }
CDEBUG(D_INFO, "%s: Adding target %s (idx %llu)\n",
fld->lcf_name, name, tar->ft_idx);
@@ -282,7 +277,6 @@ int fld_client_init(struct lu_client_fld *fld,
fld->lcf_count = 0;
spin_lock_init(&fld->lcf_lock);
fld->lcf_hash = &fld_hash[hash];
- fld->lcf_flags = LUSTRE_FLD_INIT;
INIT_LIST_HEAD(&fld->lcf_targets);
cache_size = FLD_CLIENT_CACHE_SIZE /
@@ -421,8 +415,6 @@ int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds,
struct lu_fld_target *target;
int rc;
- fld->lcf_flags |= LUSTRE_FLD_RUN;
-
rc = fld_cache_lookup(fld->lcf_cache, seq, &res);
if (rc == 0) {
*mds = res.lsr_index;
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index 89292c93dcd5..dc685610c4c4 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -59,10 +59,6 @@
* read/write system call it is associated with the single user
* thread, that issued the system call).
*
- * - cl_req represents a collection of pages for a transfer. cl_req is
- * constructed by req-forming engine that tries to saturate
- * transport with large and continuous transfers.
- *
* Terminology
*
* - to avoid confusion high-level I/O operation like read or write system
@@ -103,11 +99,8 @@
struct inode;
struct cl_device;
-struct cl_device_operations;
struct cl_object;
-struct cl_object_page_operations;
-struct cl_object_lock_operations;
struct cl_page;
struct cl_page_slice;
@@ -120,27 +113,7 @@ struct cl_page_operations;
struct cl_io;
struct cl_io_slice;
-struct cl_req;
-struct cl_req_slice;
-
-/**
- * Operations for each data device in the client stack.
- *
- * \see vvp_cl_ops, lov_cl_ops, lovsub_cl_ops, osc_cl_ops
- */
-struct cl_device_operations {
- /**
- * Initialize cl_req. This method is called top-to-bottom on all
- * devices in the stack to get them a chance to allocate layer-private
- * data, and to attach them to the cl_req by calling
- * cl_req_slice_add().
- *
- * \see osc_req_init(), lov_req_init(), lovsub_req_init()
- * \see vvp_req_init()
- */
- int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req);
-};
+struct cl_req_attr;
/**
* Device in the client stack.
@@ -150,8 +123,6 @@ struct cl_device_operations {
struct cl_device {
/** Super-class. */
struct lu_device cd_lu_dev;
- /** Per-layer operation vector. */
- const struct cl_device_operations *cd_ops;
};
/** \addtogroup cl_object cl_object
@@ -267,7 +238,7 @@ struct cl_object_conf {
/**
* Object layout. This is consumed by lov.
*/
- struct lustre_md *coc_md;
+ struct lu_buf coc_layout;
/**
* Description of particular stripe location in the
* cluster. This is consumed by osc.
@@ -301,6 +272,26 @@ enum {
OBJECT_CONF_WAIT = 2
};
+enum {
+ CL_LAYOUT_GEN_NONE = (u32)-2, /* layout lock was cancelled */
+ CL_LAYOUT_GEN_EMPTY = (u32)-1, /* for empty layout */
+};
+
+struct cl_layout {
+ /** the buffer to return the layout in lov_mds_md format. */
+ struct lu_buf cl_buf;
+ /** size of layout in lov_mds_md format. */
+ size_t cl_size;
+ /** Layout generation. */
+ u32 cl_layout_gen;
+ /**
+ * True if this is a released file.
+ * Temporarily added for released file truncate in ll_setattr_raw().
+ * It will be removed later. -Jinshan
+ */
+ bool cl_is_released;
+};
+
/**
* Operations implemented for each cl object layer.
*
@@ -400,6 +391,27 @@ struct cl_object_operations {
*/
int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj,
struct lov_user_md __user *lum);
+ /**
+ * Get FIEMAP mapping from the object.
+ */
+ int (*coo_fiemap)(const struct lu_env *env, struct cl_object *obj,
+ struct ll_fiemap_info_key *fmkey,
+ struct fiemap *fiemap, size_t *buflen);
+ /**
+ * Get layout and generation of the object.
+ */
+ int (*coo_layout_get)(const struct lu_env *env, struct cl_object *obj,
+ struct cl_layout *layout);
+ /**
+ * Get maximum size of the object.
+ */
+ loff_t (*coo_maxbytes)(struct cl_object *obj);
+ /**
+ * Set request attributes.
+ */
+ void (*coo_req_attr_set)(const struct lu_env *env,
+ struct cl_object *obj,
+ struct cl_req_attr *attr);
};
/**
@@ -591,7 +603,7 @@ enum cl_page_state {
*
* - [cl_page_state::CPS_PAGEOUT] page is dirty, the
* req-formation engine decides that it wants to include this page
- * into an cl_req being constructed, and yanks it from the cache;
+ * into an RPC being constructed, and yanks it from the cache;
*
* - [cl_page_state::CPS_FREEING] VM callback is executed to
* evict the page form the memory;
@@ -660,7 +672,7 @@ enum cl_page_state {
* Page is being read in, as a part of a transfer. This is quite
* similar to the cl_page_state::CPS_PAGEOUT state, except that
* read-in is always "immediate"---there is no such thing a sudden
- * construction of read cl_req from cached, presumably not up to date,
+ * construction of read request from cached, presumably not up to date,
* pages.
*
* Underlying VM page is locked for the duration of transfer.
@@ -714,8 +726,6 @@ struct cl_page {
struct list_head cp_batch;
/** List of slices. Immutable after creation. */
struct list_head cp_layers;
- /** Linkage of pages within cl_req. */
- struct list_head cp_flight;
/**
* Page state. This field is const to avoid accidental update, it is
* modified only internally within cl_page.c. Protected by a VM lock.
@@ -732,12 +742,6 @@ struct cl_page {
* by sub-io. Protected by a VM lock.
*/
struct cl_io *cp_owner;
- /**
- * Owning IO request in cl_page_state::CPS_PAGEOUT and
- * cl_page_state::CPS_PAGEIN states. This field is maintained only in
- * the top-level pages. Protected by a VM lock.
- */
- struct cl_req *cp_req;
/** List of references to this page, for debugging. */
struct lu_ref cp_reference;
/** Link to an object, for debugging. */
@@ -779,7 +783,6 @@ enum cl_lock_mode {
/**
* Requested transfer type.
- * \ingroup cl_req
*/
enum cl_req_type {
CRT_READ,
@@ -884,26 +887,6 @@ struct cl_page_operations {
/** Destructor. Frees resources and slice itself. */
void (*cpo_fini)(const struct lu_env *env,
struct cl_page_slice *slice);
-
- /**
- * Checks whether the page is protected by a cl_lock. This is a
- * per-layer method, because certain layers have ways to check for the
- * lock much more efficiently than through the generic locks scan, or
- * implement locking mechanisms separate from cl_lock, e.g.,
- * LL_FILE_GROUP_LOCKED in vvp. If \a pending is true, check for locks
- * being canceled, or scheduled for cancellation as soon as the last
- * user goes away, too.
- *
- * \retval -EBUSY: page is protected by a lock of a given mode;
- * \retval -ENODATA: page is not protected by a lock;
- * \retval 0: this layer cannot decide.
- *
- * \see cl_page_is_under_lock()
- */
- int (*cpo_is_under_lock)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io, pgoff_t *max);
-
/**
* Optional debugging helper. Prints given page slice.
*
@@ -915,8 +898,7 @@ struct cl_page_operations {
/**
* \name transfer
*
- * Transfer methods. See comment on cl_req for a description of
- * transfer formation and life-cycle.
+ * Transfer methods.
*
* @{
*/
@@ -962,7 +944,7 @@ struct cl_page_operations {
int ioret);
/**
* Called when cached page is about to be added to the
- * cl_req as a part of req formation.
+ * ptlrpc request as a part of req formation.
*
* \return 0 : proceed with this page;
* \return -EAGAIN : skip this page;
@@ -1365,7 +1347,6 @@ struct cl_2queue {
* (3) sort all locks to avoid dead-locks, and acquire them
*
* (4) process the chunk: call per-page methods
- * (cl_io_operations::cio_read_page() for read,
* cl_io_operations::cio_prepare_write(),
* cl_io_operations::cio_commit_write() for write)
*
@@ -1388,6 +1369,8 @@ enum cl_io_type {
CIT_WRITE,
/** truncate, utime system calls */
CIT_SETATTR,
+ /** get data version */
+ CIT_DATA_VERSION,
/**
* page fault handling
*/
@@ -1467,6 +1450,31 @@ struct cl_io_slice {
typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
struct cl_page *);
+
+struct cl_read_ahead {
+ /*
+ * Maximum page index the readahead window will end.
+ * This is determined DLM lock coverage, RPC and stripe boundary.
+ * cra_end is included.
+ */
+ pgoff_t cra_end;
+ /*
+ * Release routine. If readahead holds resources underneath, this
+ * function should be called to release it.
+ */
+ void (*cra_release)(const struct lu_env *env, void *cbdata);
+ /* Callback data for cra_release routine */
+ void *cra_cbdata;
+};
+
+static inline void cl_read_ahead_release(const struct lu_env *env,
+ struct cl_read_ahead *ra)
+{
+ if (ra->cra_release)
+ ra->cra_release(env, ra->cra_cbdata);
+ memset(ra, 0, sizeof(*ra));
+}
+
/**
* Per-layer io operations.
* \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
@@ -1573,16 +1581,13 @@ struct cl_io_operations {
struct cl_page_list *queue, int from, int to,
cl_commit_cbt cb);
/**
- * Read missing page.
- *
- * Called by a top-level cl_io_operations::op[CIT_READ]::cio_start()
- * method, when it hits not-up-to-date page in the range. Optional.
+ * Decide maximum read ahead extent
*
* \pre io->ci_type == CIT_READ
*/
- int (*cio_read_page)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- const struct cl_page_slice *page);
+ int (*cio_read_ahead)(const struct lu_env *env,
+ const struct cl_io_slice *slice,
+ pgoff_t start, struct cl_read_ahead *ra);
/**
* Optional debugging helper. Print given io slice.
*/
@@ -1765,10 +1770,15 @@ struct cl_io {
struct cl_io_rw_common ci_rw;
struct cl_setattr_io {
struct ost_lvb sa_attr;
+ unsigned int sa_attr_flags;
unsigned int sa_valid;
int sa_stripe_index;
- struct lu_fid *sa_parent_fid;
+ const struct lu_fid *sa_parent_fid;
} ci_setattr;
+ struct cl_data_version_io {
+ u64 dv_data_version;
+ int dv_flags;
+ } ci_data_version;
struct cl_fault_io {
/** page index within file. */
pgoff_t ft_index;
@@ -1836,179 +1846,20 @@ struct cl_io {
/** @} cl_io */
-/** \addtogroup cl_req cl_req
- * @{
- */
-/** \struct cl_req
- * Transfer.
- *
- * There are two possible modes of transfer initiation on the client:
- *
- * - immediate transfer: this is started when a high level io wants a page
- * or a collection of pages to be transferred right away. Examples:
- * read-ahead, synchronous read in the case of non-page aligned write,
- * page write-out as a part of extent lock cancellation, page write-out
- * as a part of memory cleansing. Immediate transfer can be both
- * cl_req_type::CRT_READ and cl_req_type::CRT_WRITE;
- *
- * - opportunistic transfer (cl_req_type::CRT_WRITE only), that happens
- * when io wants to transfer a page to the server some time later, when
- * it can be done efficiently. Example: pages dirtied by the write(2)
- * path.
- *
- * In any case, transfer takes place in the form of a cl_req, which is a
- * representation for a network RPC.
- *
- * Pages queued for an opportunistic transfer are cached until it is decided
- * that efficient RPC can be composed of them. This decision is made by "a
- * req-formation engine", currently implemented as a part of osc
- * layer. Req-formation depends on many factors: the size of the resulting
- * RPC, whether or not multi-object RPCs are supported by the server,
- * max-rpc-in-flight limitations, size of the dirty cache, etc.
- *
- * For the immediate transfer io submits a cl_page_list, that req-formation
- * engine slices into cl_req's, possibly adding cached pages to some of
- * the resulting req's.
- *
- * Whenever a page from cl_page_list is added to a newly constructed req, its
- * cl_page_operations::cpo_prep() layer methods are called. At that moment,
- * page state is atomically changed from cl_page_state::CPS_OWNED to
- * cl_page_state::CPS_PAGEOUT or cl_page_state::CPS_PAGEIN, cl_page::cp_owner
- * is zeroed, and cl_page::cp_req is set to the
- * req. cl_page_operations::cpo_prep() method at the particular layer might
- * return -EALREADY to indicate that it does not need to submit this page
- * at all. This is possible, for example, if page, submitted for read,
- * became up-to-date in the meantime; and for write, the page don't have
- * dirty bit marked. \see cl_io_submit_rw()
- *
- * Whenever a cached page is added to a newly constructed req, its
- * cl_page_operations::cpo_make_ready() layer methods are called. At that
- * moment, page state is atomically changed from cl_page_state::CPS_CACHED to
- * cl_page_state::CPS_PAGEOUT, and cl_page::cp_req is set to
- * req. cl_page_operations::cpo_make_ready() method at the particular layer
- * might return -EAGAIN to indicate that this page is not eligible for the
- * transfer right now.
- *
- * FUTURE
- *
- * Plan is to divide transfers into "priority bands" (indicated when
- * submitting cl_page_list, and queuing a page for the opportunistic transfer)
- * and allow glueing of cached pages to immediate transfers only within single
- * band. This would make high priority transfers (like lock cancellation or
- * memory pressure induced write-out) really high priority.
- *
- */
-
/**
* Per-transfer attributes.
*/
struct cl_req_attr {
+ enum cl_req_type cra_type;
+ u64 cra_flags;
+ struct cl_page *cra_page;
+
/** Generic attributes for the server consumption. */
struct obdo *cra_oa;
/** Jobid */
char cra_jobid[LUSTRE_JOBID_SIZE];
};
-/**
- * Transfer request operations definable at every layer.
- *
- * Concurrency: transfer formation engine synchronizes calls to all transfer
- * methods.
- */
-struct cl_req_operations {
- /**
- * Invoked top-to-bottom by cl_req_prep() when transfer formation is
- * complete (all pages are added).
- *
- * \see osc_req_prep()
- */
- int (*cro_prep)(const struct lu_env *env,
- const struct cl_req_slice *slice);
- /**
- * Called top-to-bottom to fill in \a oa fields. This is called twice
- * with different flags, see bug 10150 and osc_build_req().
- *
- * \param obj an object from cl_req which attributes are to be set in
- * \a oa.
- *
- * \param oa struct obdo where attributes are placed
- *
- * \param flags \a oa fields to be filled.
- */
- void (*cro_attr_set)(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags);
- /**
- * Called top-to-bottom from cl_req_completion() to notify layers that
- * transfer completed. Has to free all state allocated by
- * cl_device_operations::cdo_req_init().
- */
- void (*cro_completion)(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret);
-};
-
-/**
- * A per-object state that (potentially multi-object) transfer request keeps.
- */
-struct cl_req_obj {
- /** object itself */
- struct cl_object *ro_obj;
- /** reference to cl_req_obj::ro_obj. For debugging. */
- struct lu_ref_link ro_obj_ref;
- /* something else? Number of pages for a given object? */
-};
-
-/**
- * Transfer request.
- *
- * Transfer requests are not reference counted, because IO sub-system owns
- * them exclusively and knows when to free them.
- *
- * Life cycle.
- *
- * cl_req is created by cl_req_alloc() that calls
- * cl_device_operations::cdo_req_init() device methods to allocate per-req
- * state in every layer.
- *
- * Then pages are added (cl_req_page_add()), req keeps track of all objects it
- * contains pages for.
- *
- * Once all pages were collected, cl_page_operations::cpo_prep() method is
- * called top-to-bottom. At that point layers can modify req, let it pass, or
- * deny it completely. This is to support things like SNS that have transfer
- * ordering requirements invisible to the individual req-formation engine.
- *
- * On transfer completion (or transfer timeout, or failure to initiate the
- * transfer of an allocated req), cl_req_operations::cro_completion() method
- * is called, after execution of cl_page_operations::cpo_completion() of all
- * req's pages.
- */
-struct cl_req {
- enum cl_req_type crq_type;
- /** A list of pages being transferred */
- struct list_head crq_pages;
- /** Number of pages in cl_req::crq_pages */
- unsigned crq_nrpages;
- /** An array of objects which pages are in ->crq_pages */
- struct cl_req_obj *crq_o;
- /** Number of elements in cl_req::crq_objs[] */
- unsigned crq_nrobjs;
- struct list_head crq_layers;
-};
-
-/**
- * Per-layer state for request.
- */
-struct cl_req_slice {
- struct cl_req *crs_req;
- struct cl_device *crs_dev;
- struct list_head crs_linkage;
- const struct cl_req_operations *crs_ops;
-};
-
-/* @} cl_req */
-
enum cache_stats_item {
/** how many cache lookups were performed */
CS_lookup = 0,
@@ -2153,9 +2004,6 @@ void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
const struct cl_lock_operations *ops);
void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
struct cl_object *obj, const struct cl_io_operations *ops);
-void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
- struct cl_device *dev,
- const struct cl_req_operations *ops);
/** @} helpers */
/** \defgroup cl_object cl_object
@@ -2183,6 +2031,12 @@ int cl_object_prune(const struct lu_env *env, struct cl_object *obj);
void cl_object_kill(const struct lu_env *env, struct cl_object *obj);
int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
struct lov_user_md __user *lum);
+int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj,
+ struct ll_fiemap_info_key *fmkey, struct fiemap *fiemap,
+ size_t *buflen);
+int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj,
+ struct cl_layout *cl);
+loff_t cl_object_maxbytes(struct cl_object *obj);
/**
* Returns true, iff \a o0 and \a o1 are slices of the same object.
@@ -2302,8 +2156,6 @@ void cl_page_discard(const struct lu_env *env, struct cl_io *io,
void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg);
void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate);
-int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, pgoff_t *max_index);
loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
size_t cl_page_size(const struct cl_object *obj);
@@ -2414,8 +2266,6 @@ int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
struct cl_io_lock_link *link);
int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
struct cl_lock_descr *descr);
-int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page);
int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
enum cl_req_type iot, struct cl_2queue *queue);
int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
@@ -2424,6 +2274,8 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
struct cl_page_list *queue, int from, int to,
cl_commit_cbt cb);
+int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
+ pgoff_t start, struct cl_read_ahead *ra);
int cl_io_is_going(const struct lu_env *env);
/**
@@ -2520,19 +2372,8 @@ void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page);
/** @} cl_page_list */
-/** \defgroup cl_req cl_req
- * @{
- */
-struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
- enum cl_req_type crt, int nr_objects);
-
-void cl_req_page_add(const struct lu_env *env, struct cl_req *req,
- struct cl_page *page);
-void cl_req_page_done(const struct lu_env *env, struct cl_page *page);
-int cl_req_prep(const struct lu_env *env, struct cl_req *req);
-void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
- struct cl_req_attr *attr, u64 flags);
-void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret);
+void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
+ struct cl_req_attr *attr);
/** \defgroup cl_sync_io cl_sync_io
* @{
@@ -2568,8 +2409,6 @@ void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
/** @} cl_sync_io */
-/** @} cl_req */
-
/** \defgroup cl_env cl_env
*
* lu_env handling for a client.
@@ -2593,35 +2432,13 @@ void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
* - allocation and destruction of environment is amortized by caching no
* longer used environments instead of destroying them;
*
- * - there is a notion of "current" environment, attached to the kernel
- * data structure representing current thread Top-level lustre code
- * allocates an environment and makes it current, then calls into
- * non-lustre code, that in turn calls lustre back. Low-level lustre
- * code thus called can fetch environment created by the top-level code
- * and reuse it, avoiding additional environment allocation.
- * Right now, three interfaces can attach the cl_env to running thread:
- * - cl_env_get
- * - cl_env_implant
- * - cl_env_reexit(cl_env_reenter had to be called priorly)
- *
* \see lu_env, lu_context, lu_context_key
* @{
*/
-struct cl_env_nest {
- int cen_refcheck;
- void *cen_cookie;
-};
-
struct lu_env *cl_env_get(int *refcheck);
struct lu_env *cl_env_alloc(int *refcheck, __u32 tags);
-struct lu_env *cl_env_nested_get(struct cl_env_nest *nest);
void cl_env_put(struct lu_env *env, int *refcheck);
-void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env);
-void *cl_env_reenter(void);
-void cl_env_reexit(void *cookie);
-void cl_env_implant(struct lu_env *env, int *refcheck);
-void cl_env_unplant(struct lu_env *env, int *refcheck);
unsigned int cl_env_cache_purge(unsigned int nr);
struct lu_env *cl_env_percpu_get(void);
void cl_env_percpu_put(struct lu_env *env);
diff --git a/drivers/staging/lustre/lustre/include/llog_swab.h b/drivers/staging/lustre/lustre/include/llog_swab.h
new file mode 100644
index 000000000000..fd7ffb154ad1
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/llog_swab.h
@@ -0,0 +1,65 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2014, Intel Corporation.
+ *
+ * Copyright 2015 Cray Inc, all rights reserved.
+ * Author: Ben Evans.
+ *
+ * We assume all nodes are either little-endian or big-endian, and we
+ * always send messages in the sender's native format. The receiver
+ * detects the message format by checking the 'magic' field of the message
+ * (see lustre_msg_swabbed() below).
+ *
+ * Each type has corresponding 'lustre_swab_xxxtypexxx()' routines
+ * are implemented in ptlrpc/pack_generic.c. These 'swabbers' convert the
+ * type from "other" endian, in-place in the message buffer.
+ *
+ * A swabber takes a single pointer argument. The caller must already have
+ * verified that the length of the message buffer >= sizeof (type).
+ *
+ * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
+ * may be defined that swabs just the variable part, after the caller has
+ * verified that the message buffer is large enough.
+ */
+
+#ifndef _LLOG_SWAB_H_
+#define _LLOG_SWAB_H_
+
+#include "lustre/lustre_idl.h"
+struct lustre_cfg;
+
+void lustre_swab_lu_fid(struct lu_fid *fid);
+void lustre_swab_ost_id(struct ost_id *oid);
+void lustre_swab_llogd_body(struct llogd_body *d);
+void lustre_swab_llog_hdr(struct llog_log_hdr *h);
+void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
+void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
+void lustre_swab_lu_seq_range(struct lu_seq_range *range);
+void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
+void lustre_swab_cfg_marker(struct cfg_marker *marker,
+ int swab, int size);
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index cc0713ef8ae5..62753dae0bfa 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -43,6 +43,8 @@
#include <linux/spinlock.h>
#include <linux/types.h>
+#include "../../include/linux/libcfs/libcfs.h"
+#include "lustre_cfg.h"
#include "lustre/lustre_idl.h"
struct lprocfs_vars {
@@ -540,7 +542,8 @@ lprocfs_alloc_stats(unsigned int num, enum lprocfs_stats_flags flags);
void lprocfs_clear_stats(struct lprocfs_stats *stats);
void lprocfs_free_stats(struct lprocfs_stats **stats);
void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
- unsigned conf, const char *name, const char *units);
+ unsigned int conf, const char *name,
+ const char *units);
struct obd_export;
int lprocfs_exp_cleanup(struct obd_export *exp);
struct dentry *ldebugfs_add_simple(struct dentry *root,
@@ -701,9 +704,9 @@ static struct lustre_attr lustre_attr_##name = __ATTR(name, mode, show, store)
extern const struct sysfs_ops lustre_sysfs_ops;
struct root_squash_info;
-int lprocfs_wr_root_squash(const char *buffer, unsigned long count,
+int lprocfs_wr_root_squash(const char __user *buffer, unsigned long count,
struct root_squash_info *squash, char *name);
-int lprocfs_wr_nosquash_nids(const char *buffer, unsigned long count,
+int lprocfs_wr_nosquash_nids(const char __user *buffer, unsigned long count,
struct root_squash_info *squash, char *name);
/* all quota proc functions */
diff --git a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
index c2340d643e84..b8ad5559a3b9 100644
--- a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
+++ b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
@@ -41,79 +41,24 @@
#ifndef _LUSTRE_FIEMAP_H
#define _LUSTRE_FIEMAP_H
-struct ll_fiemap_extent {
- __u64 fe_logical; /* logical offset in bytes for the start of
- * the extent from the beginning of the file
- */
- __u64 fe_physical; /* physical offset in bytes for the start
- * of the extent from the beginning of the disk
- */
- __u64 fe_length; /* length in bytes for this extent */
- __u64 fe_reserved64[2];
- __u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */
- __u32 fe_device; /* device number for this extent */
- __u32 fe_reserved[2];
-};
-
-struct ll_user_fiemap {
- __u64 fm_start; /* logical offset (inclusive) at
- * which to start mapping (in)
- */
- __u64 fm_length; /* logical length of mapping which
- * userspace wants (in)
- */
- __u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */
- __u32 fm_mapped_extents;/* number of extents that were mapped (out) */
- __u32 fm_extent_count; /* size of fm_extents array (in) */
- __u32 fm_reserved;
- struct ll_fiemap_extent fm_extents[0]; /* array of mapped extents (out) */
-};
-
-#define FIEMAP_MAX_OFFSET (~0ULL)
+#ifndef __KERNEL__
+#include <stddef.h>
+#include <fiemap.h>
+#endif
-#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before
- * map
- */
-#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute
- * tree
- */
-#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */
-#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */
-#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending.
- * Sets EXTENT_UNKNOWN.
- */
-#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read
- * while fs is unmounted
- */
-#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs.
- * Sets EXTENT_NO_DIRECT.
- */
-#define FIEMAP_EXTENT_NOT_ALIGNED 0x00000100 /* Extent offsets may not be
- * block aligned.
- */
-#define FIEMAP_EXTENT_DATA_INLINE 0x00000200 /* Data mixed with metadata.
- * Sets EXTENT_NOT_ALIGNED.*/
-#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block.
- * Sets EXTENT_NOT_ALIGNED.
- */
-#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but
- * no data (i.e. zero).
- */
-#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively
- * support extents. Result
- * merged for efficiency.
- */
+/* XXX: We use fiemap_extent::fe_reserved[0] */
+#define fe_device fe_reserved[0]
static inline size_t fiemap_count_to_size(size_t extent_count)
{
- return (sizeof(struct ll_user_fiemap) + extent_count *
- sizeof(struct ll_fiemap_extent));
+ return sizeof(struct fiemap) + extent_count *
+ sizeof(struct fiemap_extent);
}
static inline unsigned fiemap_size_to_count(size_t array_size)
{
- return ((array_size - sizeof(struct ll_user_fiemap)) /
- sizeof(struct ll_fiemap_extent));
+ return (array_size - sizeof(struct fiemap)) /
+ sizeof(struct fiemap_extent);
}
#define FIEMAP_FLAG_DEVICE_ORDER 0x40000000 /* return device ordered mapping */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 72eaee95c6b8..65ce503ad595 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -48,8 +48,7 @@
* that the Lustre wire protocol is not influenced by external dependencies.
*
* The only other acceptable items in this file are VERY SIMPLE accessor
- * functions to avoid callers grubbing inside the structures, and the
- * prototypes of the swabber functions for each struct. Nothing that
+ * functions to avoid callers grubbing inside the structures. Nothing that
* depends on external functions or definitions should be in here.
*
* Structs must be properly aligned to put 64-bit values on an 8-byte
@@ -64,23 +63,6 @@
* in the code to ensure that new/old clients that see this larger struct
* do not fail, otherwise you need to implement protocol compatibility).
*
- * We assume all nodes are either little-endian or big-endian, and we
- * always send messages in the sender's native format. The receiver
- * detects the message format by checking the 'magic' field of the message
- * (see lustre_msg_swabbed() below).
- *
- * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines,
- * implemented either here, inline (trivial implementations) or in
- * ptlrpc/pack_generic.c. These 'swabbers' convert the type from "other"
- * endian, in-place in the message buffer.
- *
- * A swabber takes a single pointer argument. The caller must already have
- * verified that the length of the message buffer >= sizeof (type).
- *
- * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
- * may be defined that swabs just the variable part, after the caller has
- * verified that the message buffer is large enough.
- *
* @{
*/
@@ -192,113 +174,6 @@ struct lu_seq_range_array {
#define LU_SEQ_RANGE_MASK 0x3
-static inline unsigned fld_range_type(const struct lu_seq_range *range)
-{
- return range->lsr_flags & LU_SEQ_RANGE_MASK;
-}
-
-static inline bool fld_range_is_ost(const struct lu_seq_range *range)
-{
- return fld_range_type(range) == LU_SEQ_RANGE_OST;
-}
-
-static inline bool fld_range_is_mdt(const struct lu_seq_range *range)
-{
- return fld_range_type(range) == LU_SEQ_RANGE_MDT;
-}
-
-/**
- * This all range is only being used when fld client sends fld query request,
- * but it does not know whether the seq is MDT or OST, so it will send req
- * with ALL type, which means either seq type gotten from lookup can be
- * expected.
- */
-static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
-{
- return fld_range_type(range) == LU_SEQ_RANGE_ANY;
-}
-
-static inline void fld_range_set_type(struct lu_seq_range *range,
- unsigned flags)
-{
- range->lsr_flags |= flags;
-}
-
-static inline void fld_range_set_mdt(struct lu_seq_range *range)
-{
- fld_range_set_type(range, LU_SEQ_RANGE_MDT);
-}
-
-static inline void fld_range_set_ost(struct lu_seq_range *range)
-{
- fld_range_set_type(range, LU_SEQ_RANGE_OST);
-}
-
-static inline void fld_range_set_any(struct lu_seq_range *range)
-{
- fld_range_set_type(range, LU_SEQ_RANGE_ANY);
-}
-
-/**
- * returns width of given range \a r
- */
-
-static inline __u64 range_space(const struct lu_seq_range *range)
-{
- return range->lsr_end - range->lsr_start;
-}
-
-/**
- * initialize range to zero
- */
-
-static inline void range_init(struct lu_seq_range *range)
-{
- memset(range, 0, sizeof(*range));
-}
-
-/**
- * check if given seq id \a s is within given range \a r
- */
-
-static inline bool range_within(const struct lu_seq_range *range,
- __u64 s)
-{
- return s >= range->lsr_start && s < range->lsr_end;
-}
-
-static inline bool range_is_sane(const struct lu_seq_range *range)
-{
- return (range->lsr_end >= range->lsr_start);
-}
-
-static inline bool range_is_zero(const struct lu_seq_range *range)
-{
- return (range->lsr_start == 0 && range->lsr_end == 0);
-}
-
-static inline bool range_is_exhausted(const struct lu_seq_range *range)
-
-{
- return range_space(range) == 0;
-}
-
-/* return 0 if two range have the same location */
-static inline int range_compare_loc(const struct lu_seq_range *r1,
- const struct lu_seq_range *r2)
-{
- return r1->lsr_index != r2->lsr_index ||
- r1->lsr_flags != r2->lsr_flags;
-}
-
-#define DRANGE "[%#16.16Lx-%#16.16Lx):%x:%s"
-
-#define PRANGE(range) \
- (range)->lsr_start, \
- (range)->lsr_end, \
- (range)->lsr_index, \
- fld_range_is_mdt(range) ? "mdt" : "ost"
-
/** \defgroup lu_fid lu_fid
* @{
*/
@@ -310,7 +185,7 @@ static inline int range_compare_loc(const struct lu_seq_range *r1,
*/
enum lma_compat {
LMAC_HSM = 0x00000001,
- LMAC_SOM = 0x00000002,
+/* LMAC_SOM = 0x00000002, obsolete since 2.8.0 */
LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */
LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
* under /O/<seq>/d<x>.
@@ -644,13 +519,14 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
{
if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
if (oid >= IDIF_MAX_OID) {
- CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
+ CERROR("Too large OID %#llx to set MDT0 " DOSTID "\n",
+ oid, POSTID(oi));
return;
}
oi->oi.oi_id = oid;
} else if (fid_is_idif(&oi->oi_fid)) {
if (oid >= IDIF_MAX_OID) {
- CERROR("Bad %llu to set "DOSTID"\n",
+ CERROR("Too large OID %#llx to set IDIF " DOSTID "\n",
oid, POSTID(oi));
return;
}
@@ -676,7 +552,7 @@ static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
if (fid_is_idif(fid)) {
if (oid >= IDIF_MAX_OID) {
- CERROR("Too large OID %#llx to set IDIF "DFID"\n",
+ CERROR("Too large OID %#llx to set IDIF " DFID "\n",
(unsigned long long)oid, PFID(fid));
return -EBADF;
}
@@ -685,7 +561,7 @@ static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
fid->f_ver = oid >> 48;
} else {
if (oid >= OBIF_MAX_OID) {
- CERROR("Too large OID %#llx to set REG "DFID"\n",
+ CERROR("Too large OID %#llx to set REG " DFID "\n",
(unsigned long long)oid, PFID(fid));
return -EBADF;
}
@@ -785,8 +661,6 @@ static inline ino_t lu_igif_ino(const struct lu_fid *fid)
return fid_seq(fid);
}
-void lustre_swab_ost_id(struct ost_id *oid);
-
/**
* Get inode generation from a igif.
* \param fid a igif to get inode generation from.
@@ -847,9 +721,6 @@ static inline bool fid_is_sane(const struct lu_fid *fid)
fid_seq_is_rsvd(fid_seq(fid)));
}
-void lustre_swab_lu_fid(struct lu_fid *fid);
-void lustre_swab_lu_seq_range(struct lu_seq_range *range);
-
static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
{
return memcmp(f0, f1, sizeof(*f0)) == 0;
@@ -1099,8 +970,10 @@ struct ptlrpc_body_v3 {
__u32 pb_version;
__u32 pb_opc;
__u32 pb_status;
- __u64 pb_last_xid;
- __u64 pb_last_seen;
+ __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
+ __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */
+ __u16 pb_padding0;
+ __u32 pb_padding1;
__u64 pb_last_committed;
__u64 pb_transno;
__u32 pb_flags;
@@ -1112,8 +985,11 @@ struct ptlrpc_body_v3 {
__u64 pb_slv;
/* VBR: pre-versions */
__u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
+ __u64 pb_mbits; /**< match bits for bulk request */
/* padding for future needs */
- __u64 pb_padding[4];
+ __u64 pb_padding64_0;
+ __u64 pb_padding64_1;
+ __u64 pb_padding64_2;
char pb_jobid[LUSTRE_JOBID_SIZE];
};
@@ -1125,8 +1001,10 @@ struct ptlrpc_body_v2 {
__u32 pb_version;
__u32 pb_opc;
__u32 pb_status;
- __u64 pb_last_xid;
- __u64 pb_last_seen;
+ __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
+ __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */
+ __u16 pb_padding0;
+ __u32 pb_padding1;
__u64 pb_last_committed;
__u64 pb_transno;
__u32 pb_flags;
@@ -1140,12 +1018,13 @@ struct ptlrpc_body_v2 {
__u64 pb_slv;
/* VBR: pre-versions */
__u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
+ __u64 pb_mbits; /**< unused in V2 */
/* padding for future needs */
- __u64 pb_padding[4];
+ __u64 pb_padding64_0;
+ __u64 pb_padding64_1;
+ __u64 pb_padding64_2;
};
-void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
-
/* message body offset for lustre_msg_v2 */
/* ptlrpc body offset in all request/reply messages */
#define MSG_PTLRPC_BODY_OFF 0
@@ -1282,7 +1161,16 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
*/
#define OBD_CONNECT_LFSCK 0x40000000000000ULL/* support online LFSCK */
#define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL/* close file in unlink */
+#define OBD_CONNECT_MULTIMODRPCS 0x200000000000000ULL /* support multiple modify
+ * RPCs in parallel
+ */
#define OBD_CONNECT_DIR_STRIPE 0x400000000000000ULL/* striped DNE dir */
+#define OBD_CONNECT_SUBTREE 0x800000000000000ULL /* fileset mount */
+#define OBD_CONNECT_LOCK_AHEAD 0x1000000000000000ULL /* lock ahead */
+/** bulk matchbits is sent within ptlrpc_body */
+#define OBD_CONNECT_BULK_MBITS 0x2000000000000000ULL
+#define OBD_CONNECT_OBDOPACK 0x4000000000000000ULL /* compact OUT obdo */
+#define OBD_CONNECT_FLAGS2 0x8000000000000000ULL /* second flags word */
/* XXX README XXX:
* Please DO NOT add flag values here before first ensuring that this same
@@ -1313,25 +1201,6 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
* If we eventually have separate connect data for different types, which we
* almost certainly will, then perhaps we stick a union in here.
*/
-struct obd_connect_data_v1 {
- __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
- __u32 ocd_version; /* lustre release version number */
- __u32 ocd_grant; /* initial cache grant amount (bytes) */
- __u32 ocd_index; /* LOV index to connect to */
- __u32 ocd_brw_size; /* Maximum BRW size in bytes, must be 2^n */
- __u64 ocd_ibits_known; /* inode bits this client understands */
- __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
- __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
- __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
- __u32 ocd_unused; /* also fix lustre_swab_connect */
- __u64 ocd_transno; /* first transno from client to be replayed */
- __u32 ocd_group; /* MDS group on OST */
- __u32 ocd_cksum_types; /* supported checksum algorithms */
- __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
- __u32 ocd_instance; /* also fix lustre_swab_connect */
- __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
-};
-
struct obd_connect_data {
__u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
__u32 ocd_version; /* lustre release version number */
@@ -1354,8 +1223,10 @@ struct obd_connect_data {
* any field after ocd_maxbytes on the receiver without a valid flag
* may result in out-of-bound memory access and kernel oops.
*/
- __u64 padding1; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */
+ __u16 ocd_maxmodrpcs; /* Maximum modify RPCs in parallel */
+ __u16 padding0; /* added 2.1.0. also fix lustre_swab_connect */
+ __u32 padding1; /* added 2.1.0. also fix lustre_swab_connect */
+ __u64 ocd_connect_flags2;
__u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */
__u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */
__u64 padding5; /* added 2.1.0. also fix lustre_swab_connect */
@@ -1380,8 +1251,6 @@ struct obd_connect_data {
* reserve the flag for future use.
*/
-void lustre_swab_connect(struct obd_connect_data *ocd);
-
/*
* Supported checksum algorithms. Up to 32 checksum types are supported.
* (32-bit mask stored in obd_connect_data::ocd_cksum_types)
@@ -1416,7 +1285,7 @@ enum ost_cmd {
OST_STATFS = 13,
OST_SYNC = 16,
OST_SET_INFO = 17,
- OST_QUOTACHECK = 18,
+ OST_QUOTACHECK = 18, /* not used since 2.4 */
OST_QUOTACTL = 19,
OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
OST_LAST_OPC
@@ -1580,8 +1449,6 @@ static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
}
-/* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */
-
#define MAX_MD_SIZE \
(sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
#define MIN_MD_SIZE \
@@ -1674,7 +1541,7 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
#define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */
#define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */
/*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */
-#define OBD_MD_FLCOOKIE (0x00800000ULL) /* log cancellation cookie */
+/* OBD_MD_FLCOOKIE (0x00800000ULL) obsolete in 2.8 */
#define OBD_MD_FLGROUP (0x01000000ULL) /* group */
#define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */
#define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */
@@ -1713,7 +1580,9 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
/* OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) lfs rgetfacl, obsolete */
#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
-#define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */
+#define OBD_MD_CLOSE_INTENT_EXECED (0x0020000000000000ULL) /* close intent
+ * executed
+ */
#define OBD_MD_DEFAULT_MEA (0x0040000000000000ULL) /* default MEA */
@@ -1742,11 +1611,6 @@ struct hsm_state_set {
__u64 hss_clearmask;
};
-void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
-
-void lustre_swab_obd_statfs(struct obd_statfs *os);
-
/* ost_body.data values for OST_BRW */
#define OBD_BRW_READ 0x01
@@ -1786,14 +1650,16 @@ struct obd_ioobj {
__u32 ioo_bufcnt; /* number of niobufs for this object */
};
+/*
+ * NOTE: IOOBJ_MAX_BRW_BITS defines the _offset_ of the max_brw field in
+ * ioo_max_brw, NOT the maximum number of bits in PTLRPC_BULK_OPS_BITS.
+ * That said, ioo_max_brw is a 32-bit field so the limit is also 16 bits.
+ */
#define IOOBJ_MAX_BRW_BITS 16
-#define IOOBJ_TYPE_MASK ((1U << IOOBJ_MAX_BRW_BITS) - 1)
#define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
#define ioobj_max_brw_set(ioo, num) \
do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
-void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
-
/* multiple of 8 bytes => can array */
struct niobuf_remote {
__u64 rnb_offset;
@@ -1801,8 +1667,6 @@ struct niobuf_remote {
__u32 rnb_flags;
};
-void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
-
/* lock value block communicated between the filter and llite */
/* OST_LVB_ERR_INIT is needed because the return code in rc is
@@ -1824,8 +1688,6 @@ struct ost_lvb_v1 {
__u64 lvb_blocks;
};
-void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
-
struct ost_lvb {
__u64 lvb_size;
__s64 lvb_mtime;
@@ -1838,8 +1700,6 @@ struct ost_lvb {
__u32 lvb_padding;
};
-void lustre_swab_ost_lvb(struct ost_lvb *lvb);
-
/*
* lquota data structures
*/
@@ -1866,8 +1726,6 @@ struct obd_quotactl {
struct obd_dqblk qc_dqblk;
};
-void lustre_swab_obd_quotactl(struct obd_quotactl *q);
-
#define Q_COPY(out, in, member) (out)->member = (in)->member
#define QCTL_COPY(out, in) \
@@ -1905,8 +1763,6 @@ struct lquota_lvb {
__u64 lvb_pad1;
};
-void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
-
/* op codes */
enum quota_cmd {
QUOTA_DQACQ = 601,
@@ -1933,9 +1789,9 @@ enum mds_cmd {
MDS_PIN = 42, /* obsolete, never used in a release */
MDS_UNPIN = 43, /* obsolete, never used in a release */
MDS_SYNC = 44,
- MDS_DONE_WRITING = 45,
+ MDS_DONE_WRITING = 45, /* obsolete since 2.8.0 */
MDS_SET_INFO = 46,
- MDS_QUOTACHECK = 47,
+ MDS_QUOTACHECK = 47, /* not used since 2.4 */
MDS_QUOTACTL = 48,
MDS_GETXATTR = 49,
MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */
@@ -1972,8 +1828,6 @@ enum mdt_reint_cmd {
REINT_MAX
};
-void lustre_swab_generic_32s(__u32 *val);
-
/* the disposition of the intent outlines what was executed */
#define DISP_IT_EXECD 0x00000001
#define DISP_LOOKUP_EXECD 0x00000002
@@ -2031,36 +1885,19 @@ enum {
#define MDS_STATUS_CONN 1
#define MDS_STATUS_LOV 2
-/* mdt_thread_info.mti_flags. */
-enum md_op_flags {
- /* The flag indicates Size-on-MDS attributes are changed. */
- MF_SOM_CHANGE = (1 << 0),
- /* Flags indicates an epoch opens or closes. */
- MF_EPOCH_OPEN = (1 << 1),
- MF_EPOCH_CLOSE = (1 << 2),
- MF_MDC_CANCEL_FID1 = (1 << 3),
- MF_MDC_CANCEL_FID2 = (1 << 4),
- MF_MDC_CANCEL_FID3 = (1 << 5),
- MF_MDC_CANCEL_FID4 = (1 << 6),
- /* There is a pending attribute update. */
- MF_SOM_AU = (1 << 7),
- /* Cancel OST locks while getattr OST attributes. */
- MF_GETATTR_LOCK = (1 << 8),
- MF_GET_MDT_IDX = (1 << 9),
-};
-
-#define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE)
-
-#define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1
-
/* these should be identical to their EXT4_*_FL counterparts, they are
* redefined here only to avoid dragging in fs/ext4/ext4.h
*/
#define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */
#define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */
#define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */
+#define LUSTRE_NODUMP_FL 0x00000040 /* do not dump file */
#define LUSTRE_NOATIME_FL 0x00000080 /* do not update atime */
+#define LUSTRE_INDEX_FL 0x00001000 /* hash-indexed directory */
#define LUSTRE_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */
+#define LUSTRE_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
+#define LUSTRE_DIRECTIO_FL 0x00100000 /* Use direct i/o */
+#define LUSTRE_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */
/* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
* for the client inode i_flags. The LUSTRE_*_FL are the Lustre wire
@@ -2113,7 +1950,7 @@ struct mdt_body {
__u32 mbo_mode;
__u32 mbo_uid;
__u32 mbo_gid;
- __u32 mbo_flags;
+ __u32 mbo_flags; /* LUSTRE_*_FL file attributes */
__u32 mbo_rdev;
__u32 mbo_nlink; /* #bytes to read in the case of MDS_READPAGE */
__u32 mbo_unused2; /* was "generation" until 2.4.0 */
@@ -2121,7 +1958,7 @@ struct mdt_body {
__u32 mbo_eadatasize;
__u32 mbo_aclsize;
__u32 mbo_max_mdsize;
- __u32 mbo_max_cookiesize;
+ __u32 mbo_unused3; /* was max_cookiesize until 2.8 */
__u32 mbo_uid_h; /* high 32-bits of uid, for FUID */
__u32 mbo_gid_h; /* high 32-bits of gid, for FUID */
__u32 mbo_padding_5; /* also fix lustre_swab_mdt_body */
@@ -2132,17 +1969,13 @@ struct mdt_body {
__u64 mbo_padding_10;
}; /* 216 */
-void lustre_swab_mdt_body(struct mdt_body *b);
-
struct mdt_ioepoch {
- struct lustre_handle handle;
- __u64 ioepoch;
- __u32 flags;
- __u32 padding;
+ struct lustre_handle mio_handle;
+ __u64 mio_unused1; /* was ioepoch */
+ __u32 mio_unused2; /* was flags */
+ __u32 mio_padding;
};
-void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
-
/* permissions for md_perm.mp_perm */
enum {
CFS_SETUID_PERM = 0x01,
@@ -2178,8 +2011,6 @@ struct mdt_rec_setattr {
__u32 sa_padding_5;
};
-void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
-
/*
* Attribute flags used in mdt_rec_setattr::sa_valid.
* The kernel's #defines for ATTR_* should not be used over the network
@@ -2207,12 +2038,9 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
#define MDS_FMODE_CLOSED 00000000
#define MDS_FMODE_EXEC 00000004
-/* IO Epoch is opened on a closed file. */
-#define MDS_FMODE_EPOCH 01000000
-/* IO Epoch is opened on a file truncate. */
-#define MDS_FMODE_TRUNC 02000000
-/* Size-on-MDS Attribute Update is pending. */
-#define MDS_FMODE_SOM 04000000
+/* MDS_FMODE_EPOCH 01000000 obsolete since 2.8.0 */
+/* MDS_FMODE_TRUNC 02000000 obsolete since 2.8.0 */
+/* MDS_FMODE_SOM 04000000 obsolete since 2.8.0 */
#define MDS_OPEN_CREATED 00000010
#define MDS_OPEN_CROSS 00000020
@@ -2258,7 +2086,7 @@ enum mds_op_bias {
MDS_CROSS_REF = 1 << 1,
MDS_VTX_BYPASS = 1 << 2,
MDS_PERM_BYPASS = 1 << 3,
- MDS_SOM = 1 << 4,
+/* MDS_SOM = 1 << 4, obsolete since 2.8.0 */
MDS_QUOTA_IGNORE = 1 << 5,
MDS_CLOSE_CLEANUP = 1 << 6,
MDS_KEEP_ORPHAN = 1 << 7,
@@ -2268,6 +2096,7 @@ enum mds_op_bias {
MDS_OWNEROVERRIDE = 1 << 11,
MDS_HSM_RELEASE = 1 << 12,
MDS_RENAME_MIGRATE = BIT(13),
+ MDS_CLOSE_LAYOUT_SWAP = BIT(14),
};
/* instance of mdt_reint_rec */
@@ -2456,8 +2285,6 @@ struct mdt_rec_reint {
__u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
};
-void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
-
/* lmv structures */
struct lmv_desc {
__u32 ld_tgt_count; /* how many MDS's */
@@ -2547,8 +2374,6 @@ union lmv_mds_md {
struct lmv_user_md lmv_user_md;
};
-void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm);
-
static inline ssize_t lmv_mds_md_size(int stripe_count, unsigned int lmm_magic)
{
ssize_t len = -EINVAL;
@@ -2652,8 +2477,6 @@ struct lov_desc {
#define ld_magic ld_active_tgt_count /* for swabbing from llogs */
-void lustre_swab_lov_desc(struct lov_desc *ld);
-
/*
* LDLM requests:
*/
@@ -2749,24 +2572,38 @@ struct ldlm_flock_wire {
* on the resource type.
*/
-typedef union {
+union ldlm_wire_policy_data {
struct ldlm_extent l_extent;
struct ldlm_flock_wire l_flock;
struct ldlm_inodebits l_inodebits;
-} ldlm_wire_policy_data_t;
+};
union ldlm_gl_desc {
struct ldlm_gl_lquota_desc lquota_desc;
};
-void lustre_swab_gl_desc(union ldlm_gl_desc *);
+enum ldlm_intent_flags {
+ IT_OPEN = BIT(0),
+ IT_CREAT = BIT(1),
+ IT_OPEN_CREAT = BIT(1) | BIT(0),
+ IT_READDIR = BIT(2),
+ IT_GETATTR = BIT(3),
+ IT_LOOKUP = BIT(4),
+ IT_UNLINK = BIT(5),
+ IT_TRUNC = BIT(6),
+ IT_GETXATTR = BIT(7),
+ IT_EXEC = BIT(8),
+ IT_PIN = BIT(9),
+ IT_LAYOUT = BIT(10),
+ IT_QUOTA_DQACQ = BIT(11),
+ IT_QUOTA_CONN = BIT(12),
+ IT_SETXATTR = BIT(13),
+};
struct ldlm_intent {
__u64 opc;
};
-void lustre_swab_ldlm_intent(struct ldlm_intent *i);
-
struct ldlm_resource_desc {
enum ldlm_type lr_type;
__u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */
@@ -2777,7 +2614,7 @@ struct ldlm_lock_desc {
struct ldlm_resource_desc l_resource;
enum ldlm_mode l_req_mode;
enum ldlm_mode l_granted_mode;
- ldlm_wire_policy_data_t l_policy_data;
+ union ldlm_wire_policy_data l_policy_data;
};
#define LDLM_LOCKREQ_HANDLES 2
@@ -2790,8 +2627,6 @@ struct ldlm_request {
struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
};
-void lustre_swab_ldlm_request(struct ldlm_request *rq);
-
/* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
* Otherwise, 2 are available.
*/
@@ -2813,8 +2648,6 @@ struct ldlm_reply {
__u64 lock_policy_res2;
};
-void lustre_swab_ldlm_reply(struct ldlm_reply *r);
-
#define ldlm_flags_to_wire(flags) ((__u32)(flags))
#define ldlm_flags_from_wire(flags) ((__u64)(flags))
@@ -2858,8 +2691,6 @@ struct mgs_target_info {
char mti_params[MTI_PARAM_MAXLEN];
};
-void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
-
struct mgs_nidtbl_entry {
__u64 mne_version; /* table version of this entry */
__u32 mne_instance; /* target instance # */
@@ -2874,8 +2705,6 @@ struct mgs_nidtbl_entry {
} u;
};
-void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
-
struct mgs_config_body {
char mcb_name[MTI_NAME_MAXLEN]; /* logname */
__u64 mcb_offset; /* next index of config log to request */
@@ -2885,15 +2714,11 @@ struct mgs_config_body {
__u32 mcb_units; /* # of units for bulk transfer */
};
-void lustre_swab_mgs_config_body(struct mgs_config_body *body);
-
struct mgs_config_res {
__u64 mcr_offset; /* index of last config log */
__u64 mcr_size; /* size of the log */
};
-void lustre_swab_mgs_config_res(struct mgs_config_res *body);
-
/* Config marker flags (in config log) */
#define CM_START 0x01
#define CM_END 0x02
@@ -2913,8 +2738,6 @@ struct cfg_marker {
char cm_comment[MTI_NAME_MAXLEN];
};
-void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size);
-
/*
* Opcodes for multiple servers.
*/
@@ -2922,7 +2745,7 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size);
enum obd_cmd {
OBD_PING = 400,
OBD_LOG_CANCEL,
- OBD_QC_CALLBACK,
+ OBD_QC_CALLBACK, /* not used since 2.4 */
OBD_IDX_READ,
OBD_LAST_OPC
};
@@ -3155,23 +2978,32 @@ struct llog_gen_rec {
struct llog_rec_tail lgr_tail;
};
-/* On-disk header structure of each log object, stored in little endian order */
-#define LLOG_CHUNK_SIZE 8192
-#define LLOG_HEADER_SIZE (96)
-#define LLOG_BITMAP_BYTES (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE)
-
-#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
-
/* flags for the logs */
enum llog_flag {
LLOG_F_ZAP_WHEN_EMPTY = 0x1,
LLOG_F_IS_CAT = 0x2,
LLOG_F_IS_PLAIN = 0x4,
LLOG_F_EXT_JOBID = BIT(3),
+ LLOG_F_IS_FIXSIZE = BIT(4),
+ /*
+ * Note: Flags covered by LLOG_F_EXT_MASK will be inherited from
+ * catlog to plain log, so do not add LLOG_F_IS_FIXSIZE here,
+ * because the catlog record is usually fixed size, but its plain
+ * log record can be variable
+ */
LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID,
};
+/* On-disk header structure of each log object, stored in little endian order */
+#define LLOG_MIN_CHUNK_SIZE 8192
+#define LLOG_HEADER_SIZE (96) /* sizeof (llog_log_hdr) +
+ * sizeof(llh_tail) - sizeof(llh_bitmap)
+ */
+#define LLOG_BITMAP_BYTES (LLOG_MIN_CHUNK_SIZE - LLOG_HEADER_SIZE)
+#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
+
+/* flags for the logs */
struct llog_log_hdr {
struct llog_rec_hdr llh_hdr;
__s64 llh_timestamp;
@@ -3183,13 +3015,30 @@ struct llog_log_hdr {
/* for a catalog the first plain slot is next to it */
struct obd_uuid llh_tgtuuid;
__u32 llh_reserved[LLOG_HEADER_SIZE / sizeof(__u32) - 23];
+ /* These fields must always be at the end of the llog_log_hdr.
+ * Note: llh_bitmap size is variable because llog chunk size could be
+ * bigger than LLOG_MIN_CHUNK_SIZE, i.e. sizeof(llog_log_hdr) > 8192
+ * bytes, and the real size is stored in llh_hdr.lrh_len, which means
+ * llh_tail should only be referred by LLOG_HDR_TAIL().
+ * But this structure is also used by client/server llog interface
+ * (see llog_client.c), it will be kept in its original way to avoid
+ * compatibility issue.
+ */
__u32 llh_bitmap[LLOG_BITMAP_BYTES / sizeof(__u32)];
struct llog_rec_tail llh_tail;
} __packed;
-#define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
- llh->llh_bitmap_offset - \
- sizeof(llh->llh_tail)) * 8)
+#undef LLOG_HEADER_SIZE
+#undef LLOG_BITMAP_BYTES
+
+#define LLOG_HDR_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
+ llh->llh_bitmap_offset - \
+ sizeof(llh->llh_tail)) * 8)
+#define LLOG_HDR_BITMAP(llh) (__u32 *)((char *)(llh) + \
+ (llh)->llh_bitmap_offset)
+#define LLOG_HDR_TAIL(llh) ((struct llog_rec_tail *)((char *)llh + \
+ llh->llh_hdr.lrh_len - \
+ sizeof(llh->llh_tail)))
/** log cookies are used to reference a specific log file and a record
* therein
@@ -3259,7 +3108,8 @@ struct obdo {
__u32 o_parent_ver;
struct lustre_handle o_handle; /* brw: lock handle to prolong locks
*/
- struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS
+ struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS,
+ * obsolete in 2.8, reused in OSP
*/
__u32 o_uid_h;
__u32 o_gid_h;
@@ -3333,30 +3183,11 @@ struct ost_body {
/* Key for FIEMAP to be used in get_info calls */
struct ll_fiemap_info_key {
- char name[8];
- struct obdo oa;
- struct ll_user_fiemap fiemap;
+ char lfik_name[8];
+ struct obdo lfik_oa;
+ struct fiemap lfik_fiemap;
};
-void lustre_swab_ost_body(struct ost_body *b);
-void lustre_swab_ost_last_id(__u64 *id);
-void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
-
-void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
-void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
-void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
- int stripe_count);
-void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
-
-/* llog_swab.c */
-void lustre_swab_llogd_body(struct llogd_body *d);
-void lustre_swab_llog_hdr(struct llog_log_hdr *h);
-void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
-void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
-
-struct lustre_cfg;
-void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
-
/* Functions for dumping PTLRPC fields */
void dump_rniobuf(struct niobuf_remote *rnb);
void dump_ioo(struct obd_ioobj *nb);
@@ -3394,8 +3225,6 @@ struct lustre_capa {
__u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */
} __packed;
-void lustre_swab_lustre_capa(struct lustre_capa *c);
-
/** lustre_capa::lc_opc */
enum {
CAPA_OPC_BODY_WRITE = 1 << 0, /**< write object data */
@@ -3458,8 +3287,6 @@ struct getinfo_fid2path {
char gf_path[0];
} __packed;
-void lustre_swab_fid2path(struct getinfo_fid2path *gf);
-
/** path2parent request/reply structures */
struct getparent {
struct lu_fid gp_fid; /**< parent FID */
@@ -3486,8 +3313,6 @@ struct layout_intent {
__u64 li_end;
};
-void lustre_swab_layout_intent(struct layout_intent *li);
-
/**
* On the wire version of hsm_progress structure.
*
@@ -3506,13 +3331,6 @@ struct hsm_progress_kernel {
__u64 hpk_padding2;
} __packed;
-void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-void lustre_swab_hsm_current_action(struct hsm_current_action *action);
-void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
-void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
-void lustre_swab_hsm_request(struct hsm_request *hr);
-
/** layout swap request structure
* fid1 and fid2 are in mdt_body
*/
@@ -3520,8 +3338,6 @@ struct mdc_swap_layouts {
__u64 msl_flags;
} __packed;
-void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
-
struct close_data {
struct lustre_handle cd_handle;
struct lu_fid cd_fid;
@@ -3529,7 +3345,5 @@ struct close_data {
__u64 cd_reserved[8];
};
-void lustre_swab_close_data(struct close_data *data);
-
#endif
/** @} lustreidl */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h
index f3d7c94c3b50..eb08df33b2db 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h
@@ -363,8 +363,8 @@ obd_ioctl_unpack(struct obd_ioctl_data *data, char *pbuf, int max_len)
/* OBD_IOC_LOV_GETSTRIPE 155 LL_IOC_LOV_GETSTRIPE */
/* OBD_IOC_LOV_SETEA 156 LL_IOC_LOV_SETEA */
/* lustre/lustre_user.h 157-159 */
-#define OBD_IOC_QUOTACHECK _IOW('f', 160, int)
-#define OBD_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *)
+/* OBD_IOC_QUOTACHECK _IOW('f', 160, int) */
+/* OBD_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *) */
#define OBD_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
/* lustre/lustre_user.h 163-176 */
#define OBD_IOC_CHANGELOG_REG _IOW('f', 177, struct obd_ioctl_data)
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 6fc985571cba..3301ad652db1 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -63,9 +63,13 @@
#if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64)
typedef struct stat64 lstat_t;
#define lstat_f lstat64
+#define fstat_f fstat64
+#define fstatat_f fstatat64
#else
typedef struct stat lstat_t;
#define lstat_f lstat
+#define fstat_f fstat
+#define fstatat_f fstatat
#endif
#define HAVE_LOV_USER_MDS_DATA
@@ -82,7 +86,6 @@ typedef struct stat lstat_t;
#define FSFILT_IOC_SETVERSION _IOW('f', 4, long)
#define FSFILT_IOC_GETVERSION_OLD _IOR('v', 1, long)
#define FSFILT_IOC_SETVERSION_OLD _IOW('v', 2, long)
-#define FSFILT_IOC_FIEMAP _IOWR('f', 11, struct ll_user_fiemap)
#endif
/* FIEMAP flags supported by Lustre */
@@ -235,7 +238,7 @@ struct ost_id {
/* #define LL_IOC_POLL_QUOTACHECK 161 OBD_IOC_POLL_QUOTACHECK */
/* #define LL_IOC_QUOTACTL 162 OBD_IOC_QUOTACTL */
#define IOC_OBD_STATFS _IOWR('f', 164, struct obd_statfs *)
-#define IOC_LOV_GETINFO _IOWR('f', 165, struct lov_user_mds_data *)
+/* IOC_LOV_GETINFO 165 obsolete */
#define LL_IOC_FLUSHCTX _IOW('f', 166, long)
/* LL_IOC_RMTACL 167 obsolete */
#define LL_IOC_GETOBDCOUNT _IOR('f', 168, long)
@@ -343,6 +346,9 @@ enum ll_lease_type {
#define LOV_ALL_STRIPES 0xffff /* only valid for directories */
#define LOV_V1_INSANE_STRIPE_COUNT 65532 /* maximum stripe count bz13933 */
+#define XATTR_LUSTRE_PREFIX "lustre."
+#define XATTR_LUSTRE_LOV "lustre.lov"
+
#define lov_user_ost_data lov_user_ost_data_v1
struct lov_user_ost_data_v1 { /* per-stripe data structure */
struct ost_id l_ost_oi; /* OST object ID */
@@ -451,8 +457,6 @@ static inline int lmv_user_md_size(int stripes, int lmm_magic)
stripes * sizeof(struct lmv_user_mds_data);
}
-void lustre_swab_lmv_user_md(struct lmv_user_md *lum);
-
struct ll_recreate_obj {
__u64 lrc_id;
__u32 lrc_ost_idx;
@@ -522,25 +526,20 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen)
}
/* printf display format
- * e.g. printf("file FID is "DFID"\n", PFID(fid));
+ * * usage: printf("file FID is "DFID"\n", PFID(fid));
*/
#define FID_NOBRACE_LEN 40
#define FID_LEN (FID_NOBRACE_LEN + 2)
#define DFID_NOBRACE "%#llx:0x%x:0x%x"
#define DFID "["DFID_NOBRACE"]"
-#define PFID(fid) \
- (fid)->f_seq, \
- (fid)->f_oid, \
- (fid)->f_ver
+#define PFID(fid) (unsigned long long)(fid)->f_seq, (fid)->f_oid, (fid)->f_ver
-/* scanf input parse format -- strip '[' first.
- * e.g. sscanf(fidstr, SFID, RFID(&fid));
+/* scanf input parse format for fids in DFID_NOBRACE format
+ * Need to strip '[' from DFID format first or use "["SFID"]" at caller.
+ * usage: sscanf(fidstr, SFID, RFID(&fid));
*/
#define SFID "0x%llx:0x%x:0x%x"
-#define RFID(fid) \
- &((fid)->f_seq), \
- &((fid)->f_oid), \
- &((fid)->f_ver)
+#define RFID(fid) &((fid)->f_seq), &((fid)->f_oid), &((fid)->f_ver)
/********* Quotas **********/
@@ -551,23 +550,18 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen)
#define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */
/* these must be explicitly translated into linux Q_* in ll_dir_ioctl */
-#define LUSTRE_Q_QUOTAON 0x800002 /* turn quotas on */
-#define LUSTRE_Q_QUOTAOFF 0x800003 /* turn quotas off */
+#define LUSTRE_Q_QUOTAON 0x800002 /* deprecated as of 2.4 */
+#define LUSTRE_Q_QUOTAOFF 0x800003 /* deprecated as of 2.4 */
#define LUSTRE_Q_GETINFO 0x800005 /* get information about quota files */
#define LUSTRE_Q_SETINFO 0x800006 /* set information about quota files */
#define LUSTRE_Q_GETQUOTA 0x800007 /* get user quota structure */
#define LUSTRE_Q_SETQUOTA 0x800008 /* set user quota structure */
/* lustre-specific control commands */
-#define LUSTRE_Q_INVALIDATE 0x80000b /* invalidate quota data */
-#define LUSTRE_Q_FINVALIDATE 0x80000c /* invalidate filter quota data */
+#define LUSTRE_Q_INVALIDATE 0x80000b /* deprecated as of 2.4 */
+#define LUSTRE_Q_FINVALIDATE 0x80000c /* deprecated as of 2.4 */
#define UGQUOTA 2 /* set both USRQUOTA and GRPQUOTA */
-struct if_quotacheck {
- char obd_type[16];
- struct obd_uuid obd_uuid;
-};
-
#define IDENTITY_DOWNCALL_MAGIC 0x6d6dd629
/* permission */
@@ -649,6 +643,7 @@ struct if_quotactl {
#define SWAP_LAYOUTS_CHECK_DV2 (1 << 1)
#define SWAP_LAYOUTS_KEEP_MTIME (1 << 2)
#define SWAP_LAYOUTS_KEEP_ATIME (1 << 3)
+#define SWAP_LAYOUTS_CLOSE BIT(4)
/* Swap XATTR_NAME_HSM as well, only on the MDT so far */
#define SWAP_LAYOUTS_MDS_HSM (1 << 31)
@@ -999,6 +994,7 @@ struct ioc_data_version {
* See HSM_FLAGS below.
*/
enum hsm_states {
+ HS_NONE = 0x00000000,
HS_EXISTS = 0x00000001,
HS_DIRTY = 0x00000002,
HS_RELEASED = 0x00000004,
diff --git a/drivers/staging/lustre/lustre/include/lustre_compat.h b/drivers/staging/lustre/lustre/include/lustre_compat.h
index 567c438e93cb..300e96fb032a 100644
--- a/drivers/staging/lustre/lustre/include/lustre_compat.h
+++ b/drivers/staging/lustre/lustre/include/lustre_compat.h
@@ -74,4 +74,6 @@
# define ext2_find_next_zero_bit find_next_zero_bit_le
#endif
+#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
+
#endif /* _LUSTRE_COMPAT_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index d03534432624..b7e61d082e55 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -59,7 +59,7 @@ struct obd_device;
#define OBD_LDLM_DEVICENAME "ldlm"
#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
-#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))
+#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(3900)) /* 65 min */
#define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024
/**
@@ -86,10 +86,10 @@ enum ldlm_error {
* decisions about lack of conflicts or do any autonomous lock granting without
* first speaking to a server.
*/
-typedef enum {
+enum ldlm_side {
LDLM_NAMESPACE_SERVER = 1 << 0,
LDLM_NAMESPACE_CLIENT = 1 << 1
-} ldlm_side_t;
+};
/**
* The blocking callback is overloaded to perform two functions. These flags
@@ -359,7 +359,7 @@ struct ldlm_namespace {
struct obd_device *ns_obd;
/** Flag indicating if namespace is on client instead of server */
- ldlm_side_t ns_client;
+ enum ldlm_side ns_client;
/** Resource hash table for namespace. */
struct cfs_hash *ns_rs_hash;
@@ -550,20 +550,18 @@ struct ldlm_flock {
__u64 owner;
__u64 blocking_owner;
struct obd_export *blocking_export;
- /* Protected by the hash lock */
- __u32 blocking_refs;
__u32 pid;
};
-typedef union {
+union ldlm_policy_data {
struct ldlm_extent l_extent;
struct ldlm_flock l_flock;
struct ldlm_inodebits l_inodebits;
-} ldlm_policy_data_t;
+};
void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
- const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
+ const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy);
enum lvb_type {
LVB_T_NONE = 0,
@@ -692,7 +690,7 @@ struct ldlm_lock {
* Representation of private data specific for a lock type.
* Examples are: extent range for extent lock or bitmask for ibits locks
*/
- ldlm_policy_data_t l_policy_data;
+ union ldlm_policy_data l_policy_data;
/**
* Lock state flags. Protected by lr_lock.
@@ -967,8 +965,8 @@ struct ldlm_ast_work {
* Common ldlm_enqueue parameters
*/
struct ldlm_enqueue_info {
- __u32 ei_type; /** Type of the lock being enqueued. */
- __u32 ei_mode; /** Mode of the lock being enqueued. */
+ enum ldlm_type ei_type; /** Type of the lock being enqueued. */
+ enum ldlm_mode ei_mode; /** Mode of the lock being enqueued. */
void *ei_cb_bl; /** blocking lock callback */
void *ei_cb_cp; /** lock completion callback */
void *ei_cb_gl; /** lock glimpse callback */
@@ -979,7 +977,7 @@ struct ldlm_enqueue_info {
extern struct obd_ops ldlm_obd_ops;
extern char *ldlm_lockname[];
-char *ldlm_it2str(int it);
+const char *ldlm_it2str(enum ldlm_intent_flags it);
/**
* Just a fancy CDEBUG call with log level preset to LDLM_DEBUG.
@@ -1168,16 +1166,18 @@ do { \
struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
void ldlm_lock_put(struct ldlm_lock *lock);
void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
-void ldlm_lock_addref(const struct lustre_handle *lockh, __u32 mode);
-int ldlm_lock_addref_try(const struct lustre_handle *lockh, __u32 mode);
-void ldlm_lock_decref(const struct lustre_handle *lockh, __u32 mode);
-void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, __u32 mode);
+void ldlm_lock_addref(const struct lustre_handle *lockh, enum ldlm_mode mode);
+int ldlm_lock_addref_try(const struct lustre_handle *lockh,
+ enum ldlm_mode mode);
+void ldlm_lock_decref(const struct lustre_handle *lockh, enum ldlm_mode mode);
+void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh,
+ enum ldlm_mode mode);
void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
void ldlm_lock_allow_match(struct ldlm_lock *lock);
void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
const struct ldlm_res_id *,
- enum ldlm_type type, ldlm_policy_data_t *,
+ enum ldlm_type type, union ldlm_policy_data *,
enum ldlm_mode mode, struct lustre_handle *,
int unref);
enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh,
@@ -1189,7 +1189,7 @@ void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
/* resource.c */
struct ldlm_namespace *
ldlm_namespace_new(struct obd_device *obd, char *name,
- ldlm_side_t client, enum ldlm_appetite apt,
+ enum ldlm_side client, enum ldlm_appetite apt,
enum ldlm_ns_type ns_type);
int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags);
void ldlm_namespace_get(struct ldlm_namespace *ns);
@@ -1208,7 +1208,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res,
struct ldlm_lock *lock);
void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
-void ldlm_dump_all_namespaces(ldlm_side_t client, int level);
+void ldlm_dump_all_namespaces(enum ldlm_side client, int level);
void ldlm_namespace_dump(int level, struct ldlm_namespace *);
void ldlm_resource_dump(int level, struct ldlm_resource *);
int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
@@ -1241,7 +1241,7 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
struct ldlm_enqueue_info *einfo,
const struct ldlm_res_id *res_id,
- ldlm_policy_data_t const *policy, __u64 *flags,
+ union ldlm_policy_data const *policy, __u64 *flags,
void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
struct lustre_handle *lockh, int async);
int ldlm_prep_enqueue_req(struct obd_export *exp,
@@ -1265,13 +1265,13 @@ int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
enum ldlm_cancel_flags flags, void *opaque);
int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
- ldlm_policy_data_t *policy,
+ union ldlm_policy_data *policy,
enum ldlm_mode mode,
enum ldlm_cancel_flags flags,
void *opaque);
int ldlm_cancel_resource_local(struct ldlm_resource *res,
struct list_head *cancels,
- ldlm_policy_data_t *policy,
+ union ldlm_policy_data *policy,
enum ldlm_mode mode, __u64 lock_flags,
enum ldlm_cancel_flags cancel_flags,
void *opaque);
@@ -1333,7 +1333,7 @@ int ldlm_pools_init(void);
void ldlm_pools_fini(void);
int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
- int idx, ldlm_side_t client);
+ int idx, enum ldlm_side client);
void ldlm_pool_fini(struct ldlm_pool *pl);
void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock);
void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock);
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index 316780693193..b5a1aadbcb93 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -150,6 +150,7 @@
#include "../../include/linux/libcfs/libcfs.h"
#include "lustre/lustre_idl.h"
+#include "seq_range.h"
struct lu_env;
struct lu_site;
diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h
index 932410d3e3cc..6ef1b03cb986 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fld.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fld.h
@@ -103,8 +103,6 @@ struct lu_client_fld {
/** Client fld debugfs entry name. */
char lcf_name[LUSTRE_MDT_MAXNAMELEN];
-
- int lcf_flags;
};
/* Client methods */
diff --git a/drivers/staging/lustre/lustre/include/lustre_ha.h b/drivers/staging/lustre/lustre/include/lustre_ha.h
index cde7ed702c86..dec1e99d594d 100644
--- a/drivers/staging/lustre/lustre/include/lustre_ha.h
+++ b/drivers/staging/lustre/lustre/include/lustre_ha.h
@@ -53,6 +53,7 @@ void ptlrpc_activate_import(struct obd_import *imp);
void ptlrpc_deactivate_import(struct obd_import *imp);
void ptlrpc_invalidate_import(struct obd_import *imp);
void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt);
+void ptlrpc_pinger_force(struct obd_import *imp);
/** @} ha */
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index 5461ba33d90c..f0c931ce1a67 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -185,6 +185,11 @@ struct obd_import {
struct list_head *imp_replay_cursor;
/** @} */
+ /** List of not replied requests */
+ struct list_head imp_unreplied_list;
+ /** Known maximal replied XID */
+ __u64 imp_known_replied_xid;
+
/** obd device for this import */
struct obd_device *imp_obd;
@@ -294,7 +299,9 @@ struct obd_import {
*/
imp_force_reconnect:1,
/* import has tried to connect with server */
- imp_connect_tried:1;
+ imp_connect_tried:1,
+ /* connected but not FULL yet */
+ imp_connected:1;
__u32 imp_connect_op;
struct obd_connect_data imp_connect_data;
__u64 imp_connect_flags_orig;
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index 6b231913ba2e..27f3148c4344 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -350,8 +350,6 @@ do { \
l_wait_event_exclusive_head(wq, condition, &lwi); \
})
-#define LIBLUSTRE_CLIENT (0)
-
/** @} lib */
#endif /* _LUSTRE_LIB_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_lmv.h b/drivers/staging/lustre/lustre/include/lustre_lmv.h
index d7f7afa8dfa7..5aa3645e64dc 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lmv.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lmv.h
@@ -76,18 +76,7 @@ lsm_md_eq(const struct lmv_stripe_md *lsm1, const struct lmv_stripe_md *lsm2)
union lmv_mds_md;
-int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
- const union lmv_mds_md *lmm, int stripe_count);
-
-static inline int lmv_alloc_memmd(struct lmv_stripe_md **lsmp, int stripe_count)
-{
- return lmv_unpack_md(NULL, lsmp, NULL, stripe_count);
-}
-
-static inline void lmv_free_memmd(struct lmv_stripe_md *lsm)
-{
- lmv_unpack_md(NULL, &lsm, NULL, 0);
-}
+void lmv_free_memmd(struct lmv_stripe_md *lsm);
static inline void lmv1_le_to_cpu(struct lmv_mds_md_v1 *lmv_dst,
const struct lmv_mds_md_v1 *lmv_src)
diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h
index 995b266932e3..35e37eb1bc2c 100644
--- a/drivers/staging/lustre/lustre/include/lustre_log.h
+++ b/drivers/staging/lustre/lustre/include/lustre_log.h
@@ -214,6 +214,7 @@ struct llog_handle {
spinlock_t lgh_hdr_lock; /* protect lgh_hdr data */
struct llog_logid lgh_id; /* id of this log */
struct llog_log_hdr *lgh_hdr;
+ size_t lgh_hdr_size;
int lgh_last_idx;
int lgh_cur_idx; /* used during llog_process */
__u64 lgh_cur_offset; /* used during llog_process */
@@ -244,6 +245,11 @@ struct llog_ctxt {
struct mutex loc_mutex; /* protect loc_imp */
atomic_t loc_refcount;
long loc_flags; /* flags, see above defines */
+ /*
+ * llog chunk size, and llog record size can not be bigger than
+ * loc_chunk_size
+ */
+ __u32 loc_chunk_size;
};
#define LLOG_PROC_BREAK 0x0001
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index 8fc2d3f2dfd6..198ceb0c66f9 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -156,16 +156,39 @@ static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck,
mutex_unlock(&lck->rpcl_mutex);
}
+static inline void mdc_get_mod_rpc_slot(struct ptlrpc_request *req,
+ struct lookup_intent *it)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ u32 opc;
+ u16 tag;
+
+ opc = lustre_msg_get_opc(req->rq_reqmsg);
+ tag = obd_get_mod_rpc_slot(cli, opc, it);
+ lustre_msg_set_tag(req->rq_reqmsg, tag);
+}
+
+static inline void mdc_put_mod_rpc_slot(struct ptlrpc_request *req,
+ struct lookup_intent *it)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ u32 opc;
+ u16 tag;
+
+ opc = lustre_msg_get_opc(req->rq_reqmsg);
+ tag = lustre_msg_get_tag(req->rq_reqmsg);
+ obd_put_mod_rpc_slot(cli, opc, it, tag);
+}
+
/**
- * Update the maximum possible easize and cookiesize.
+ * Update the maximum possible easize.
*
- * The values are learned from ptlrpc replies sent by the MDT. The
- * default easize and cookiesize is initialized to the minimum value but
- * allowed to grow up to a single page in size if required to handle the
+ * This value is learned from ptlrpc replies sent by the MDT. The
+ * default easize is initialized to the minimum value but allowed
+ * to grow up to a single page in size if required to handle the
* common case.
*
- * \see client_obd::cl_default_mds_easize and
- * client_obd::cl_default_mds_cookiesize
+ * \see client_obd::cl_default_mds_easize
*
* \param[in] exp export for MDC device
* \param[in] body body of ptlrpc reply from MDT
@@ -176,7 +199,7 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
{
if (body->mbo_valid & OBD_MD_FLMODEASIZE) {
struct client_obd *cli = &exp->exp_obd->u.cli;
- u32 def_cookiesize, def_easize;
+ u32 def_easize;
if (cli->cl_max_mds_easize < body->mbo_max_mdsize)
cli->cl_max_mds_easize = body->mbo_max_mdsize;
@@ -184,13 +207,6 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
def_easize = min_t(__u32, body->mbo_max_mdsize,
OBD_MAX_DEFAULT_EA_SIZE);
cli->cl_default_mds_easize = def_easize;
-
- if (cli->cl_max_mds_cookiesize < body->mbo_max_cookiesize)
- cli->cl_max_mds_cookiesize = body->mbo_max_cookiesize;
-
- def_cookiesize = min_t(__u32, body->mbo_max_cookiesize,
- OBD_MAX_DEFAULT_COOKIE_SIZE);
- cli->cl_default_mds_cookiesize = def_cookiesize;
}
}
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index e9aba99ee52a..411eb0dc7f38 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -50,6 +50,7 @@
* @{
*/
+#include <linux/uio.h>
#include "../../include/linux/libcfs/libcfs.h"
#include "../../include/linux/lnet/nidstr.h"
#include "../../include/linux/lnet/api.h"
@@ -68,13 +69,17 @@
#define PTLRPC_MD_OPTIONS 0
/**
- * Max # of bulk operations in one request.
+ * log2 max # of bulk operations in one request: 2=4MB/RPC, 5=32MB/RPC, ...
* In order for the client and server to properly negotiate the maximum
* possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two
* value. The client is free to limit the actual RPC size for any bulk
* transfer via cl_max_pages_per_rpc to some non-power-of-two value.
+ * NOTE: This is limited to 16 (=64GB RPCs) by IOOBJ_MAX_BRW_BITS.
*/
-#define PTLRPC_BULK_OPS_BITS 2
+#define PTLRPC_BULK_OPS_BITS 4
+#if PTLRPC_BULK_OPS_BITS > 16
+#error "More than 65536 BRW RPCs not allowed by IOOBJ_MAX_BRW_BITS."
+#endif
#define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS)
/**
* PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and
@@ -437,6 +442,10 @@ struct ptlrpc_reply_state {
unsigned long rs_committed:1;/* the transaction was committed
* and the rs was dispatched
*/
+ atomic_t rs_refcount; /* number of users */
+ /** Number of locks awaiting client ACK */
+ int rs_nlocks;
+
/** Size of the state */
int rs_size;
/** opcode */
@@ -449,7 +458,6 @@ struct ptlrpc_reply_state {
struct ptlrpc_service_part *rs_svcpt;
/** Lnet metadata handle for the reply */
lnet_handle_md_t rs_md_h;
- atomic_t rs_refcount;
/** Context for the service thread */
struct ptlrpc_svc_ctx *rs_svc_ctx;
@@ -466,8 +474,6 @@ struct ptlrpc_reply_state {
*/
struct lustre_msg *rs_msg; /* reply message */
- /** Number of locks awaiting client ACK */
- int rs_nlocks;
/** Handles of locks awaiting client reply ACK */
struct lustre_handle rs_locks[RS_MAX_LOCKS];
/** Lock modes of locks in \a rs_locks */
@@ -515,717 +521,7 @@ struct lu_env;
struct ldlm_lock;
-/**
- * \defgroup nrs Network Request Scheduler
- * @{
- */
-struct ptlrpc_nrs_policy;
-struct ptlrpc_nrs_resource;
-struct ptlrpc_nrs_request;
-
-/**
- * NRS control operations.
- *
- * These are common for all policies.
- */
-enum ptlrpc_nrs_ctl {
- /**
- * Not a valid opcode.
- */
- PTLRPC_NRS_CTL_INVALID,
- /**
- * Activate the policy.
- */
- PTLRPC_NRS_CTL_START,
- /**
- * Reserved for multiple primary policies, which may be a possibility
- * in the future.
- */
- PTLRPC_NRS_CTL_STOP,
- /**
- * Policies can start using opcodes from this value and onwards for
- * their own purposes; the assigned value itself is arbitrary.
- */
- PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20,
-};
-
-/**
- * ORR policy operations
- */
-enum nrs_ctl_orr {
- NRS_CTL_ORR_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC,
- NRS_CTL_ORR_WR_QUANTUM,
- NRS_CTL_ORR_RD_OFF_TYPE,
- NRS_CTL_ORR_WR_OFF_TYPE,
- NRS_CTL_ORR_RD_SUPP_REQ,
- NRS_CTL_ORR_WR_SUPP_REQ,
-};
-
-/**
- * NRS policy operations.
- *
- * These determine the behaviour of a policy, and are called in response to
- * NRS core events.
- */
-struct ptlrpc_nrs_pol_ops {
- /**
- * Called during policy registration; this operation is optional.
- *
- * \param[in,out] policy The policy being initialized
- */
- int (*op_policy_init)(struct ptlrpc_nrs_policy *policy);
- /**
- * Called during policy unregistration; this operation is optional.
- *
- * \param[in,out] policy The policy being unregistered/finalized
- */
- void (*op_policy_fini)(struct ptlrpc_nrs_policy *policy);
- /**
- * Called when activating a policy via lprocfs; policies allocate and
- * initialize their resources here; this operation is optional.
- *
- * \param[in,out] policy The policy being started
- *
- * \see nrs_policy_start_locked()
- */
- int (*op_policy_start)(struct ptlrpc_nrs_policy *policy);
- /**
- * Called when deactivating a policy via lprocfs; policies deallocate
- * their resources here; this operation is optional
- *
- * \param[in,out] policy The policy being stopped
- *
- * \see nrs_policy_stop0()
- */
- void (*op_policy_stop)(struct ptlrpc_nrs_policy *policy);
- /**
- * Used for policy-specific operations; i.e. not generic ones like
- * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
- * to an ioctl; this operation is optional.
- *
- * \param[in,out] policy The policy carrying out operation \a opc
- * \param[in] opc The command operation being carried out
- * \param[in,out] arg An generic buffer for communication between the
- * user and the control operation
- *
- * \retval -ve error
- * \retval 0 success
- *
- * \see ptlrpc_nrs_policy_control()
- */
- int (*op_policy_ctl)(struct ptlrpc_nrs_policy *policy,
- enum ptlrpc_nrs_ctl opc, void *arg);
-
- /**
- * Called when obtaining references to the resources of the resource
- * hierarchy for a request that has arrived for handling at the PTLRPC
- * service. Policies should return -ve for requests they do not wish
- * to handle. This operation is mandatory.
- *
- * \param[in,out] policy The policy we're getting resources for.
- * \param[in,out] nrq The request we are getting resources for.
- * \param[in] parent The parent resource of the resource being
- * requested; set to NULL if none.
- * \param[out] resp The resource is to be returned here; the
- * fallback policy in an NRS head should
- * \e always return a non-NULL pointer value.
- * \param[in] moving_req When set, signifies that this is an attempt
- * to obtain resources for a request being moved
- * to the high-priority NRS head by
- * ldlm_lock_reorder_req().
- * This implies two things:
- * 1. We are under obd_export::exp_rpc_lock and
- * so should not sleep.
- * 2. We should not perform non-idempotent or can
- * skip performing idempotent operations that
- * were carried out when resources were first
- * taken for the request when it was initialized
- * in ptlrpc_nrs_req_initialize().
- *
- * \retval 0, +ve The level of the returned resource in the resource
- * hierarchy; currently only 0 (for a non-leaf resource)
- * and 1 (for a leaf resource) are supported by the
- * framework.
- * \retval -ve error
- *
- * \see ptlrpc_nrs_req_initialize()
- * \see ptlrpc_nrs_hpreq_add_nolock()
- */
- int (*op_res_get)(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq,
- const struct ptlrpc_nrs_resource *parent,
- struct ptlrpc_nrs_resource **resp,
- bool moving_req);
- /**
- * Called when releasing references taken for resources in the resource
- * hierarchy for the request; this operation is optional.
- *
- * \param[in,out] policy The policy the resource belongs to
- * \param[in] res The resource to be freed
- *
- * \see ptlrpc_nrs_req_finalize()
- * \see ptlrpc_nrs_hpreq_add_nolock()
- */
- void (*op_res_put)(struct ptlrpc_nrs_policy *policy,
- const struct ptlrpc_nrs_resource *res);
-
- /**
- * Obtains a request for handling from the policy, and optionally
- * removes the request from the policy; this operation is mandatory.
- *
- * \param[in,out] policy The policy to poll
- * \param[in] peek When set, signifies that we just want to
- * examine the request, and not handle it, so the
- * request is not removed from the policy.
- * \param[in] force When set, it will force a policy to return a
- * request if it has one queued.
- *
- * \retval NULL No request available for handling
- * \retval valid-pointer The request polled for handling
- *
- * \see ptlrpc_nrs_req_get_nolock()
- */
- struct ptlrpc_nrs_request *
- (*op_req_get)(struct ptlrpc_nrs_policy *policy, bool peek,
- bool force);
- /**
- * Called when attempting to add a request to a policy for later
- * handling; this operation is mandatory.
- *
- * \param[in,out] policy The policy on which to enqueue \a nrq
- * \param[in,out] nrq The request to enqueue
- *
- * \retval 0 success
- * \retval != 0 error
- *
- * \see ptlrpc_nrs_req_add_nolock()
- */
- int (*op_req_enqueue)(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
- /**
- * Removes a request from the policy's set of pending requests. Normally
- * called after a request has been polled successfully from the policy
- * for handling; this operation is mandatory.
- *
- * \param[in,out] policy The policy the request \a nrq belongs to
- * \param[in,out] nrq The request to dequeue
- */
- void (*op_req_dequeue)(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
- /**
- * Called after the request being carried out. Could be used for
- * job/resource control; this operation is optional.
- *
- * \param[in,out] policy The policy which is stopping to handle request
- * \a nrq
- * \param[in,out] nrq The request
- *
- * \pre assert_spin_locked(&svcpt->scp_req_lock)
- *
- * \see ptlrpc_nrs_req_stop_nolock()
- */
- void (*op_req_stop)(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
- /**
- * Registers the policy's lprocfs interface with a PTLRPC service.
- *
- * \param[in] svc The service
- *
- * \retval 0 success
- * \retval != 0 error
- */
- int (*op_lprocfs_init)(struct ptlrpc_service *svc);
- /**
- * Unegisters the policy's lprocfs interface with a PTLRPC service.
- *
- * In cases of failed policy registration in
- * \e ptlrpc_nrs_policy_register(), this function may be called for a
- * service which has not registered the policy successfully, so
- * implementations of this method should make sure their operations are
- * safe in such cases.
- *
- * \param[in] svc The service
- */
- void (*op_lprocfs_fini)(struct ptlrpc_service *svc);
-};
-
-/**
- * Policy flags
- */
-enum nrs_policy_flags {
- /**
- * Fallback policy, use this flag only on a single supported policy per
- * service. The flag cannot be used on policies that use
- * \e PTLRPC_NRS_FL_REG_EXTERN
- */
- PTLRPC_NRS_FL_FALLBACK = (1 << 0),
- /**
- * Start policy immediately after registering.
- */
- PTLRPC_NRS_FL_REG_START = (1 << 1),
- /**
- * This is a policy registering from a module different to the one NRS
- * core ships in (currently ptlrpc).
- */
- PTLRPC_NRS_FL_REG_EXTERN = (1 << 2),
-};
-
-/**
- * NRS queue type.
- *
- * Denotes whether an NRS instance is for handling normal or high-priority
- * RPCs, or whether an operation pertains to one or both of the NRS instances
- * in a service.
- */
-enum ptlrpc_nrs_queue_type {
- PTLRPC_NRS_QUEUE_REG = (1 << 0),
- PTLRPC_NRS_QUEUE_HP = (1 << 1),
- PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
-};
-
-/**
- * NRS head
- *
- * A PTLRPC service has at least one NRS head instance for handling normal
- * priority RPCs, and may optionally have a second NRS head instance for
- * handling high-priority RPCs. Each NRS head maintains a list of available
- * policies, of which one and only one policy is acting as the fallback policy,
- * and optionally a different policy may be acting as the primary policy. For
- * all RPCs handled by this NRS head instance, NRS core will first attempt to
- * enqueue the RPC using the primary policy (if any). The fallback policy is
- * used in the following cases:
- * - when there was no primary policy in the
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request
- * was initialized.
- * - when the primary policy that was at the
- * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
- * RPC was initialized, denoted it did not wish, or for some other reason was
- * not able to handle the request, by returning a non-valid NRS resource
- * reference.
- * - when the primary policy that was at the
- * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
- * RPC was initialized, fails later during the request enqueueing stage.
- *
- * \see nrs_resource_get_safe()
- * \see nrs_request_enqueue()
- */
-struct ptlrpc_nrs {
- spinlock_t nrs_lock;
- /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
- /**
- * List of registered policies
- */
- struct list_head nrs_policy_list;
- /**
- * List of policies with queued requests. Policies that have any
- * outstanding requests are queued here, and this list is queried
- * in a round-robin manner from NRS core when obtaining a request
- * for handling. This ensures that requests from policies that at some
- * point transition away from the
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
- */
- struct list_head nrs_policy_queued;
- /**
- * Service partition for this NRS head
- */
- struct ptlrpc_service_part *nrs_svcpt;
- /**
- * Primary policy, which is the preferred policy for handling RPCs
- */
- struct ptlrpc_nrs_policy *nrs_policy_primary;
- /**
- * Fallback policy, which is the backup policy for handling RPCs
- */
- struct ptlrpc_nrs_policy *nrs_policy_fallback;
- /**
- * This NRS head handles either HP or regular requests
- */
- enum ptlrpc_nrs_queue_type nrs_queue_type;
- /**
- * # queued requests from all policies in this NRS head
- */
- unsigned long nrs_req_queued;
- /**
- * # scheduled requests from all policies in this NRS head
- */
- unsigned long nrs_req_started;
- /**
- * # policies on this NRS
- */
- unsigned nrs_num_pols;
- /**
- * This NRS head is in progress of starting a policy
- */
- unsigned nrs_policy_starting:1;
- /**
- * In progress of shutting down the whole NRS head; used during
- * unregistration
- */
- unsigned nrs_stopping:1;
-};
-
-#define NRS_POL_NAME_MAX 16
-
-struct ptlrpc_nrs_pol_desc;
-
-/**
- * Service compatibility predicate; this determines whether a policy is adequate
- * for handling RPCs of a particular PTLRPC service.
- *
- * XXX:This should give the same result during policy registration and
- * unregistration, and for all partitions of a service; so the result should not
- * depend on temporal service or other properties, that may influence the
- * result.
- */
-typedef bool (*nrs_pol_desc_compat_t) (const struct ptlrpc_service *svc,
- const struct ptlrpc_nrs_pol_desc *desc);
-
-struct ptlrpc_nrs_pol_conf {
- /**
- * Human-readable policy name
- */
- char nc_name[NRS_POL_NAME_MAX];
- /**
- * NRS operations for this policy
- */
- const struct ptlrpc_nrs_pol_ops *nc_ops;
- /**
- * Service compatibility predicate
- */
- nrs_pol_desc_compat_t nc_compat;
- /**
- * Set for policies that support a single ptlrpc service, i.e. ones that
- * have \a pd_compat set to nrs_policy_compat_one(). The variable value
- * depicts the name of the single service that such policies are
- * compatible with.
- */
- const char *nc_compat_svc_name;
- /**
- * Owner module for this policy descriptor; policies registering from a
- * different module to the one the NRS framework is held within
- * (currently ptlrpc), should set this field to THIS_MODULE.
- */
- struct module *nc_owner;
- /**
- * Policy registration flags; a bitmask of \e nrs_policy_flags
- */
- unsigned nc_flags;
-};
-
-/**
- * NRS policy registering descriptor
- *
- * Is used to hold a description of a policy that can be passed to NRS core in
- * order to register the policy with NRS heads in different PTLRPC services.
- */
-struct ptlrpc_nrs_pol_desc {
- /**
- * Human-readable policy name
- */
- char pd_name[NRS_POL_NAME_MAX];
- /**
- * Link into nrs_core::nrs_policies
- */
- struct list_head pd_list;
- /**
- * NRS operations for this policy
- */
- const struct ptlrpc_nrs_pol_ops *pd_ops;
- /**
- * Service compatibility predicate
- */
- nrs_pol_desc_compat_t pd_compat;
- /**
- * Set for policies that are compatible with only one PTLRPC service.
- *
- * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
- */
- const char *pd_compat_svc_name;
- /**
- * Owner module for this policy descriptor.
- *
- * We need to hold a reference to the module whenever we might make use
- * of any of the module's contents, i.e.
- * - If one or more instances of the policy are at a state where they
- * might be handling a request, i.e.
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
- * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
- * is taken on the module when
- * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
- * becomes 0, so that we hold only one reference to the module maximum
- * at any time.
- *
- * We do not need to hold a reference to the module, even though we
- * might use code and data from the module, in the following cases:
- * - During external policy registration, because this should happen in
- * the module's init() function, in which case the module is safe from
- * removal because a reference is being held on the module by the
- * kernel, and iirc kmod (and I guess module-init-tools also) will
- * serialize any racing processes properly anyway.
- * - During external policy unregistration, because this should happen
- * in a module's exit() function, and any attempts to start a policy
- * instance would need to take a reference on the module, and this is
- * not possible once we have reached the point where the exit()
- * handler is called.
- * - During service registration and unregistration, as service setup
- * and cleanup, and policy registration, unregistration and policy
- * instance starting, are serialized by \e nrs_core::nrs_mutex, so
- * as long as users adhere to the convention of registering policies
- * in init() and unregistering them in module exit() functions, there
- * should not be a race between these operations.
- * - During any policy-specific lprocfs operations, because a reference
- * is held by the kernel on a proc entry that has been entered by a
- * syscall, so as long as proc entries are removed during unregistration time,
- * then unregistration and lprocfs operations will be properly
- * serialized.
- */
- struct module *pd_owner;
- /**
- * Bitmask of \e nrs_policy_flags
- */
- unsigned pd_flags;
- /**
- * # of references on this descriptor
- */
- atomic_t pd_refs;
-};
-
-/**
- * NRS policy state
- *
- * Policies transition from one state to the other during their lifetime
- */
-enum ptlrpc_nrs_pol_state {
- /**
- * Not a valid policy state.
- */
- NRS_POL_STATE_INVALID,
- /**
- * Policies are at this state either at the start of their life, or
- * transition here when the user selects a different policy to act
- * as the primary one.
- */
- NRS_POL_STATE_STOPPED,
- /**
- * Policy is progress of stopping
- */
- NRS_POL_STATE_STOPPING,
- /**
- * Policy is in progress of starting
- */
- NRS_POL_STATE_STARTING,
- /**
- * A policy is in this state in two cases:
- * - it is the fallback policy, which is always in this state.
- * - it has been activated by the user; i.e. it is the primary policy,
- */
- NRS_POL_STATE_STARTED,
-};
-
-/**
- * NRS policy information
- *
- * Used for obtaining information for the status of a policy via lprocfs
- */
-struct ptlrpc_nrs_pol_info {
- /**
- * Policy name
- */
- char pi_name[NRS_POL_NAME_MAX];
- /**
- * Current policy state
- */
- enum ptlrpc_nrs_pol_state pi_state;
- /**
- * # RPCs enqueued for later dispatching by the policy
- */
- long pi_req_queued;
- /**
- * # RPCs started for dispatch by the policy
- */
- long pi_req_started;
- /**
- * Is this a fallback policy?
- */
- unsigned pi_fallback:1;
-};
-
-/**
- * NRS policy
- *
- * There is one instance of this for each policy in each NRS head of each
- * PTLRPC service partition.
- */
-struct ptlrpc_nrs_policy {
- /**
- * Linkage into the NRS head's list of policies,
- * ptlrpc_nrs:nrs_policy_list
- */
- struct list_head pol_list;
- /**
- * Linkage into the NRS head's list of policies with enqueued
- * requests ptlrpc_nrs:nrs_policy_queued
- */
- struct list_head pol_list_queued;
- /**
- * Current state of this policy
- */
- enum ptlrpc_nrs_pol_state pol_state;
- /**
- * Bitmask of nrs_policy_flags
- */
- unsigned pol_flags;
- /**
- * # RPCs enqueued for later dispatching by the policy
- */
- long pol_req_queued;
- /**
- * # RPCs started for dispatch by the policy
- */
- long pol_req_started;
- /**
- * Usage Reference count taken on the policy instance
- */
- long pol_ref;
- /**
- * The NRS head this policy has been created at
- */
- struct ptlrpc_nrs *pol_nrs;
- /**
- * Private policy data; varies by policy type
- */
- void *pol_private;
- /**
- * Policy descriptor for this policy instance.
- */
- struct ptlrpc_nrs_pol_desc *pol_desc;
-};
-
-/**
- * NRS resource
- *
- * Resources are embedded into two types of NRS entities:
- * - Inside NRS policies, in the policy's private data in
- * ptlrpc_nrs_policy::pol_private
- * - In objects that act as prime-level scheduling entities in different NRS
- * policies; e.g. on a policy that performs round robin or similar order
- * scheduling across client NIDs, there would be one NRS resource per unique
- * client NID. On a policy which performs round robin scheduling across
- * backend filesystem objects, there would be one resource associated with
- * each of the backend filesystem objects partaking in the scheduling
- * performed by the policy.
- *
- * NRS resources share a parent-child relationship, in which resources embedded
- * in policy instances are the parent entities, with all scheduling entities
- * a policy schedules across being the children, thus forming a simple resource
- * hierarchy. This hierarchy may be extended with one or more levels in the
- * future if the ability to have more than one primary policy is added.
- *
- * Upon request initialization, references to the then active NRS policies are
- * taken and used to later handle the dispatching of the request with one of
- * these policies.
- *
- * \see nrs_resource_get_safe()
- * \see ptlrpc_nrs_req_add()
- */
-struct ptlrpc_nrs_resource {
- /**
- * This NRS resource's parent; is NULL for resources embedded in NRS
- * policy instances; i.e. those are top-level ones.
- */
- struct ptlrpc_nrs_resource *res_parent;
- /**
- * The policy associated with this resource.
- */
- struct ptlrpc_nrs_policy *res_policy;
-};
-
-enum {
- NRS_RES_FALLBACK,
- NRS_RES_PRIMARY,
- NRS_RES_MAX
-};
-
-/* \name fifo
- *
- * FIFO policy
- *
- * This policy is a logical wrapper around previous, non-NRS functionality.
- * It dispatches RPCs in the same order as they arrive from the network. This
- * policy is currently used as the fallback policy, and the only enabled policy
- * on all NRS heads of all PTLRPC service partitions.
- * @{
- */
-
-/**
- * Private data structure for the FIFO policy
- */
-struct nrs_fifo_head {
- /**
- * Resource object for policy instance.
- */
- struct ptlrpc_nrs_resource fh_res;
- /**
- * List of queued requests.
- */
- struct list_head fh_list;
- /**
- * For debugging purposes.
- */
- __u64 fh_sequence;
-};
-
-struct nrs_fifo_req {
- struct list_head fr_list;
- __u64 fr_sequence;
-};
-
-/** @} fifo */
-
-/**
- * NRS request
- *
- * Instances of this object exist embedded within ptlrpc_request; the main
- * purpose of this object is to hold references to the request's resources
- * for the lifetime of the request, and to hold properties that policies use
- * use for determining the request's scheduling priority.
- */
-struct ptlrpc_nrs_request {
- /**
- * The request's resource hierarchy.
- */
- struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX];
- /**
- * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the
- * policy that was used to enqueue the request.
- *
- * \see nrs_request_enqueue()
- */
- unsigned nr_res_idx;
- unsigned nr_initialized:1;
- unsigned nr_enqueued:1;
- unsigned nr_started:1;
- unsigned nr_finalized:1;
-
- /**
- * Policy-specific fields, used for determining a request's scheduling
- * priority, and other supporting functionality.
- */
- union {
- /**
- * Fields for the FIFO policy
- */
- struct nrs_fifo_req fifo;
- } nr_u;
- /**
- * Externally-registering policies may want to use this to allocate
- * their own request properties.
- */
- void *ext;
-};
-
-/** @} nrs */
+#include "lustre_nrs.h"
/**
* Basic request prioritization operations structure.
@@ -1304,6 +600,8 @@ struct ptlrpc_cli_req {
union ptlrpc_async_args cr_async_args;
/** Opaq data for replay and commit callbacks. */
void *cr_cb_data;
+ /** Link to the imp->imp_unreplied_list */
+ struct list_head cr_unreplied_list;
/**
* Commit callback, called when request is committed and about to be
* freed.
@@ -1343,6 +641,7 @@ struct ptlrpc_cli_req {
#define rq_interpret_reply rq_cli.cr_reply_interp
#define rq_async_args rq_cli.cr_async_args
#define rq_cb_data rq_cli.cr_cb_data
+#define rq_unreplied_list rq_cli.cr_unreplied_list
#define rq_commit_cb rq_cli.cr_commit_cb
#define rq_replay_cb rq_cli.cr_replay_cb
@@ -1505,6 +804,8 @@ struct ptlrpc_request {
__u64 rq_transno;
/** xid */
__u64 rq_xid;
+ /** bulk match bits */
+ u64 rq_mbits;
/**
* List item to for replay list. Not yet committed requests get linked
* there.
@@ -1793,10 +1094,93 @@ struct ptlrpc_bulk_page {
struct page *bp_page;
};
-#define BULK_GET_SOURCE 0
-#define BULK_PUT_SINK 1
-#define BULK_GET_SINK 2
-#define BULK_PUT_SOURCE 3
+enum ptlrpc_bulk_op_type {
+ PTLRPC_BULK_OP_ACTIVE = 0x00000001,
+ PTLRPC_BULK_OP_PASSIVE = 0x00000002,
+ PTLRPC_BULK_OP_PUT = 0x00000004,
+ PTLRPC_BULK_OP_GET = 0x00000008,
+ PTLRPC_BULK_BUF_KVEC = 0x00000010,
+ PTLRPC_BULK_BUF_KIOV = 0x00000020,
+ PTLRPC_BULK_GET_SOURCE = PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_GET,
+ PTLRPC_BULK_PUT_SINK = PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_PUT,
+ PTLRPC_BULK_GET_SINK = PTLRPC_BULK_OP_ACTIVE | PTLRPC_BULK_OP_GET,
+ PTLRPC_BULK_PUT_SOURCE = PTLRPC_BULK_OP_ACTIVE | PTLRPC_BULK_OP_PUT,
+};
+
+static inline bool ptlrpc_is_bulk_op_get(enum ptlrpc_bulk_op_type type)
+{
+ return (type & PTLRPC_BULK_OP_GET) == PTLRPC_BULK_OP_GET;
+}
+
+static inline bool ptlrpc_is_bulk_get_source(enum ptlrpc_bulk_op_type type)
+{
+ return (type & PTLRPC_BULK_GET_SOURCE) == PTLRPC_BULK_GET_SOURCE;
+}
+
+static inline bool ptlrpc_is_bulk_put_sink(enum ptlrpc_bulk_op_type type)
+{
+ return (type & PTLRPC_BULK_PUT_SINK) == PTLRPC_BULK_PUT_SINK;
+}
+
+static inline bool ptlrpc_is_bulk_get_sink(enum ptlrpc_bulk_op_type type)
+{
+ return (type & PTLRPC_BULK_GET_SINK) == PTLRPC_BULK_GET_SINK;
+}
+
+static inline bool ptlrpc_is_bulk_put_source(enum ptlrpc_bulk_op_type type)
+{
+ return (type & PTLRPC_BULK_PUT_SOURCE) == PTLRPC_BULK_PUT_SOURCE;
+}
+
+static inline bool ptlrpc_is_bulk_desc_kvec(enum ptlrpc_bulk_op_type type)
+{
+ return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV))
+ == PTLRPC_BULK_BUF_KVEC;
+}
+
+static inline bool ptlrpc_is_bulk_desc_kiov(enum ptlrpc_bulk_op_type type)
+{
+ return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV))
+ == PTLRPC_BULK_BUF_KIOV;
+}
+
+static inline bool ptlrpc_is_bulk_op_active(enum ptlrpc_bulk_op_type type)
+{
+ return ((type & PTLRPC_BULK_OP_ACTIVE) |
+ (type & PTLRPC_BULK_OP_PASSIVE)) == PTLRPC_BULK_OP_ACTIVE;
+}
+
+static inline bool ptlrpc_is_bulk_op_passive(enum ptlrpc_bulk_op_type type)
+{
+ return ((type & PTLRPC_BULK_OP_ACTIVE) |
+ (type & PTLRPC_BULK_OP_PASSIVE)) == PTLRPC_BULK_OP_PASSIVE;
+}
+
+struct ptlrpc_bulk_frag_ops {
+ /**
+ * Add a page \a page to the bulk descriptor \a desc
+ * Data to transfer in the page starts at offset \a pageoffset and
+ * amount of data to transfer from the page is \a len
+ */
+ void (*add_kiov_frag)(struct ptlrpc_bulk_desc *desc,
+ struct page *page, int pageoffset, int len);
+
+ /*
+ * Add a \a fragment to the bulk descriptor \a desc.
+ * Data to transfer in the fragment is pointed to by \a frag
+ * The size of the fragment is \a len
+ */
+ int (*add_iov_frag)(struct ptlrpc_bulk_desc *desc, void *frag, int len);
+
+ /**
+ * Uninitialize and free bulk descriptor \a desc.
+ * Works on bulk descriptors both from server and client side.
+ */
+ void (*release_frags)(struct ptlrpc_bulk_desc *desc);
+};
+
+extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops;
+extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops;
/**
* Definition of bulk descriptor.
@@ -1811,14 +1195,14 @@ struct ptlrpc_bulk_page {
struct ptlrpc_bulk_desc {
/** completed with failure */
unsigned long bd_failure:1;
- /** {put,get}{source,sink} */
- unsigned long bd_type:2;
/** client side */
unsigned long bd_registered:1;
/** For serialization with callback */
spinlock_t bd_lock;
/** Import generation when request for this bulk was sent */
int bd_import_generation;
+ /** {put,get}{source,sink}{kvec,kiov} */
+ enum ptlrpc_bulk_op_type bd_type;
/** LNet portal for this bulk */
__u32 bd_portal;
/** Server side - export this bulk created for */
@@ -1827,13 +1211,14 @@ struct ptlrpc_bulk_desc {
struct obd_import *bd_import;
/** Back pointer to the request */
struct ptlrpc_request *bd_req;
+ struct ptlrpc_bulk_frag_ops *bd_frag_ops;
wait_queue_head_t bd_waitq; /* server side only WQ */
int bd_iov_count; /* # entries in bd_iov */
int bd_max_iov; /* allocated size of bd_iov */
int bd_nob; /* # bytes covered */
int bd_nob_transferred; /* # bytes GOT/PUT */
- __u64 bd_last_xid;
+ u64 bd_last_mbits;
struct ptlrpc_cb_id bd_cbid; /* network callback info */
lnet_nid_t bd_sender; /* stash event::sender */
@@ -1842,14 +1227,31 @@ struct ptlrpc_bulk_desc {
/** array of associated MDs */
lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT];
- /*
- * encrypt iov, size is either 0 or bd_iov_count.
- */
- lnet_kiov_t *bd_enc_iov;
-
- lnet_kiov_t bd_iov[0];
+ union {
+ struct {
+ /*
+ * encrypt iov, size is either 0 or bd_iov_count.
+ */
+ struct bio_vec *bd_enc_vec;
+ struct bio_vec *bd_vec; /* Array of bio_vecs */
+ } bd_kiov;
+
+ struct {
+ struct kvec *bd_enc_kvec;
+ struct kvec *bd_kvec; /* Array of kvecs */
+ } bd_kvec;
+ } bd_u;
};
+#define GET_KIOV(desc) ((desc)->bd_u.bd_kiov.bd_vec)
+#define BD_GET_KIOV(desc, i) ((desc)->bd_u.bd_kiov.bd_vec[i])
+#define GET_ENC_KIOV(desc) ((desc)->bd_u.bd_kiov.bd_enc_vec)
+#define BD_GET_ENC_KIOV(desc, i) ((desc)->bd_u.bd_kiov.bd_enc_vec[i])
+#define GET_KVEC(desc) ((desc)->bd_u.bd_kvec.bd_kvec)
+#define BD_GET_KVEC(desc, i) ((desc)->bd_u.bd_kvec.bd_kvec[i])
+#define GET_ENC_KVEC(desc) ((desc)->bd_u.bd_kvec.bd_enc_kvec)
+#define BD_GET_ENC_KVEC(desc, i) ((desc)->bd_u.bd_kvec.bd_enc_kvec[i])
+
enum {
SVC_STOPPED = 1 << 0,
SVC_STOPPING = 1 << 1,
@@ -2464,21 +1866,17 @@ int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
void ptlrpc_req_finished(struct ptlrpc_request *request);
struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
- unsigned npages, unsigned max_brw,
- unsigned type, unsigned portal);
-void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin);
-static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk)
-{
- __ptlrpc_free_bulk(bulk, 1);
-}
-
-static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk)
-{
- __ptlrpc_free_bulk(bulk, 0);
-}
-
+ unsigned int nfrags,
+ unsigned int max_brw,
+ unsigned int type,
+ unsigned int portal,
+ const struct ptlrpc_bulk_frag_ops *ops);
+
+int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc,
+ void *frag, int len);
void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
- struct page *page, int pageoffset, int len, int);
+ struct page *page, int pageoffset, int len,
+ int pin);
static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
struct page *page, int pageoffset,
int len)
@@ -2493,6 +1891,16 @@ static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
__ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
}
+void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk);
+
+static inline void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc)
+{
+ int i;
+
+ for (i = 0; i < desc->bd_iov_count ; i++)
+ put_page(BD_GET_KIOV(desc, i).bv_page);
+}
+
void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
struct obd_import *imp);
__u64 ptlrpc_next_xid(void);
@@ -2652,6 +2060,7 @@ struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
__u32 lustre_msg_get_type(struct lustre_msg *msg);
void lustre_msg_add_version(struct lustre_msg *msg, u32 version);
__u32 lustre_msg_get_opc(struct lustre_msg *msg);
+__u16 lustre_msg_get_tag(struct lustre_msg *msg);
__u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
__u64 *lustre_msg_get_versions(struct lustre_msg *msg);
__u64 lustre_msg_get_transno(struct lustre_msg *msg);
@@ -2670,6 +2079,8 @@ void lustre_msg_set_handle(struct lustre_msg *msg,
struct lustre_handle *handle);
void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
+void lustre_msg_set_last_xid(struct lustre_msg *msg, u64 last_xid);
+void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag);
void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
@@ -2679,6 +2090,7 @@ void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid);
void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
+void lustre_msg_set_mbits(struct lustre_msg *msg, u64 mbits);
static inline void
lustre_shrink_reply(struct ptlrpc_request *req, int segment,
diff --git a/drivers/staging/lustre/lustre/include/lustre_nrs.h b/drivers/staging/lustre/lustre/include/lustre_nrs.h
new file mode 100644
index 000000000000..a5028aaa19cd
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_nrs.h
@@ -0,0 +1,717 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * Copyright 2012 Xyratex Technology Limited
+ */
+/*
+ *
+ * Network Request Scheduler (NRS)
+ *
+ */
+
+#ifndef _LUSTRE_NRS_H
+#define _LUSTRE_NRS_H
+
+/**
+ * \defgroup nrs Network Request Scheduler
+ * @{
+ */
+struct ptlrpc_nrs_policy;
+struct ptlrpc_nrs_resource;
+struct ptlrpc_nrs_request;
+
+/**
+ * NRS control operations.
+ *
+ * These are common for all policies.
+ */
+enum ptlrpc_nrs_ctl {
+ /**
+ * Not a valid opcode.
+ */
+ PTLRPC_NRS_CTL_INVALID,
+ /**
+ * Activate the policy.
+ */
+ PTLRPC_NRS_CTL_START,
+ /**
+ * Reserved for multiple primary policies, which may be a possibility
+ * in the future.
+ */
+ PTLRPC_NRS_CTL_STOP,
+ /**
+ * Policies can start using opcodes from this value and onwards for
+ * their own purposes; the assigned value itself is arbitrary.
+ */
+ PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20,
+};
+
+/**
+ * NRS policy operations.
+ *
+ * These determine the behaviour of a policy, and are called in response to
+ * NRS core events.
+ */
+struct ptlrpc_nrs_pol_ops {
+ /**
+ * Called during policy registration; this operation is optional.
+ *
+ * \param[in,out] policy The policy being initialized
+ */
+ int (*op_policy_init)(struct ptlrpc_nrs_policy *policy);
+ /**
+ * Called during policy unregistration; this operation is optional.
+ *
+ * \param[in,out] policy The policy being unregistered/finalized
+ */
+ void (*op_policy_fini)(struct ptlrpc_nrs_policy *policy);
+ /**
+ * Called when activating a policy via lprocfs; policies allocate and
+ * initialize their resources here; this operation is optional.
+ *
+ * \param[in,out] policy The policy being started
+ *
+ * \see nrs_policy_start_locked()
+ */
+ int (*op_policy_start)(struct ptlrpc_nrs_policy *policy);
+ /**
+ * Called when deactivating a policy via lprocfs; policies deallocate
+ * their resources here; this operation is optional
+ *
+ * \param[in,out] policy The policy being stopped
+ *
+ * \see nrs_policy_stop0()
+ */
+ void (*op_policy_stop)(struct ptlrpc_nrs_policy *policy);
+ /**
+ * Used for policy-specific operations; i.e. not generic ones like
+ * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
+ * to an ioctl; this operation is optional.
+ *
+ * \param[in,out] policy The policy carrying out operation \a opc
+ * \param[in] opc The command operation being carried out
+ * \param[in,out] arg An generic buffer for communication between the
+ * user and the control operation
+ *
+ * \retval -ve error
+ * \retval 0 success
+ *
+ * \see ptlrpc_nrs_policy_control()
+ */
+ int (*op_policy_ctl)(struct ptlrpc_nrs_policy *policy,
+ enum ptlrpc_nrs_ctl opc, void *arg);
+
+ /**
+ * Called when obtaining references to the resources of the resource
+ * hierarchy for a request that has arrived for handling at the PTLRPC
+ * service. Policies should return -ve for requests they do not wish
+ * to handle. This operation is mandatory.
+ *
+ * \param[in,out] policy The policy we're getting resources for.
+ * \param[in,out] nrq The request we are getting resources for.
+ * \param[in] parent The parent resource of the resource being
+ * requested; set to NULL if none.
+ * \param[out] resp The resource is to be returned here; the
+ * fallback policy in an NRS head should
+ * \e always return a non-NULL pointer value.
+ * \param[in] moving_req When set, signifies that this is an attempt
+ * to obtain resources for a request being moved
+ * to the high-priority NRS head by
+ * ldlm_lock_reorder_req().
+ * This implies two things:
+ * 1. We are under obd_export::exp_rpc_lock and
+ * so should not sleep.
+ * 2. We should not perform non-idempotent or can
+ * skip performing idempotent operations that
+ * were carried out when resources were first
+ * taken for the request when it was initialized
+ * in ptlrpc_nrs_req_initialize().
+ *
+ * \retval 0, +ve The level of the returned resource in the resource
+ * hierarchy; currently only 0 (for a non-leaf resource)
+ * and 1 (for a leaf resource) are supported by the
+ * framework.
+ * \retval -ve error
+ *
+ * \see ptlrpc_nrs_req_initialize()
+ * \see ptlrpc_nrs_hpreq_add_nolock()
+ * \see ptlrpc_nrs_req_hp_move()
+ */
+ int (*op_res_get)(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq,
+ const struct ptlrpc_nrs_resource *parent,
+ struct ptlrpc_nrs_resource **resp,
+ bool moving_req);
+ /**
+ * Called when releasing references taken for resources in the resource
+ * hierarchy for the request; this operation is optional.
+ *
+ * \param[in,out] policy The policy the resource belongs to
+ * \param[in] res The resource to be freed
+ *
+ * \see ptlrpc_nrs_req_finalize()
+ * \see ptlrpc_nrs_hpreq_add_nolock()
+ * \see ptlrpc_nrs_req_hp_move()
+ */
+ void (*op_res_put)(struct ptlrpc_nrs_policy *policy,
+ const struct ptlrpc_nrs_resource *res);
+
+ /**
+ * Obtains a request for handling from the policy, and optionally
+ * removes the request from the policy; this operation is mandatory.
+ *
+ * \param[in,out] policy The policy to poll
+ * \param[in] peek When set, signifies that we just want to
+ * examine the request, and not handle it, so the
+ * request is not removed from the policy.
+ * \param[in] force When set, it will force a policy to return a
+ * request if it has one queued.
+ *
+ * \retval NULL No request available for handling
+ * \retval valid-pointer The request polled for handling
+ *
+ * \see ptlrpc_nrs_req_get_nolock()
+ */
+ struct ptlrpc_nrs_request *
+ (*op_req_get)(struct ptlrpc_nrs_policy *policy, bool peek,
+ bool force);
+ /**
+ * Called when attempting to add a request to a policy for later
+ * handling; this operation is mandatory.
+ *
+ * \param[in,out] policy The policy on which to enqueue \a nrq
+ * \param[in,out] nrq The request to enqueue
+ *
+ * \retval 0 success
+ * \retval != 0 error
+ *
+ * \see ptlrpc_nrs_req_add_nolock()
+ */
+ int (*op_req_enqueue)(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq);
+ /**
+ * Removes a request from the policy's set of pending requests. Normally
+ * called after a request has been polled successfully from the policy
+ * for handling; this operation is mandatory.
+ *
+ * \param[in,out] policy The policy the request \a nrq belongs to
+ * \param[in,out] nrq The request to dequeue
+ *
+ * \see ptlrpc_nrs_req_del_nolock()
+ */
+ void (*op_req_dequeue)(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq);
+ /**
+ * Called after the request being carried out. Could be used for
+ * job/resource control; this operation is optional.
+ *
+ * \param[in,out] policy The policy which is stopping to handle request
+ * \a nrq
+ * \param[in,out] nrq The request
+ *
+ * \pre assert_spin_locked(&svcpt->scp_req_lock)
+ *
+ * \see ptlrpc_nrs_req_stop_nolock()
+ */
+ void (*op_req_stop)(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq);
+ /**
+ * Registers the policy's lprocfs interface with a PTLRPC service.
+ *
+ * \param[in] svc The service
+ *
+ * \retval 0 success
+ * \retval != 0 error
+ */
+ int (*op_lprocfs_init)(struct ptlrpc_service *svc);
+ /**
+ * Unegisters the policy's lprocfs interface with a PTLRPC service.
+ *
+ * In cases of failed policy registration in
+ * \e ptlrpc_nrs_policy_register(), this function may be called for a
+ * service which has not registered the policy successfully, so
+ * implementations of this method should make sure their operations are
+ * safe in such cases.
+ *
+ * \param[in] svc The service
+ */
+ void (*op_lprocfs_fini)(struct ptlrpc_service *svc);
+};
+
+/**
+ * Policy flags
+ */
+enum nrs_policy_flags {
+ /**
+ * Fallback policy, use this flag only on a single supported policy per
+ * service. The flag cannot be used on policies that use
+ * \e PTLRPC_NRS_FL_REG_EXTERN
+ */
+ PTLRPC_NRS_FL_FALLBACK = BIT(0),
+ /**
+ * Start policy immediately after registering.
+ */
+ PTLRPC_NRS_FL_REG_START = BIT(1),
+ /**
+ * This is a policy registering from a module different to the one NRS
+ * core ships in (currently ptlrpc).
+ */
+ PTLRPC_NRS_FL_REG_EXTERN = BIT(2),
+};
+
+/**
+ * NRS queue type.
+ *
+ * Denotes whether an NRS instance is for handling normal or high-priority
+ * RPCs, or whether an operation pertains to one or both of the NRS instances
+ * in a service.
+ */
+enum ptlrpc_nrs_queue_type {
+ PTLRPC_NRS_QUEUE_REG = BIT(0),
+ PTLRPC_NRS_QUEUE_HP = BIT(1),
+ PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
+};
+
+/**
+ * NRS head
+ *
+ * A PTLRPC service has at least one NRS head instance for handling normal
+ * priority RPCs, and may optionally have a second NRS head instance for
+ * handling high-priority RPCs. Each NRS head maintains a list of available
+ * policies, of which one and only one policy is acting as the fallback policy,
+ * and optionally a different policy may be acting as the primary policy. For
+ * all RPCs handled by this NRS head instance, NRS core will first attempt to
+ * enqueue the RPC using the primary policy (if any). The fallback policy is
+ * used in the following cases:
+ * - when there was no primary policy in the
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request
+ * was initialized.
+ * - when the primary policy that was at the
+ * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
+ * RPC was initialized, denoted it did not wish, or for some other reason was
+ * not able to handle the request, by returning a non-valid NRS resource
+ * reference.
+ * - when the primary policy that was at the
+ * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
+ * RPC was initialized, fails later during the request enqueueing stage.
+ *
+ * \see nrs_resource_get_safe()
+ * \see nrs_request_enqueue()
+ */
+struct ptlrpc_nrs {
+ spinlock_t nrs_lock;
+ /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
+ /**
+ * List of registered policies
+ */
+ struct list_head nrs_policy_list;
+ /**
+ * List of policies with queued requests. Policies that have any
+ * outstanding requests are queued here, and this list is queried
+ * in a round-robin manner from NRS core when obtaining a request
+ * for handling. This ensures that requests from policies that at some
+ * point transition away from the
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
+ */
+ struct list_head nrs_policy_queued;
+ /**
+ * Service partition for this NRS head
+ */
+ struct ptlrpc_service_part *nrs_svcpt;
+ /**
+ * Primary policy, which is the preferred policy for handling RPCs
+ */
+ struct ptlrpc_nrs_policy *nrs_policy_primary;
+ /**
+ * Fallback policy, which is the backup policy for handling RPCs
+ */
+ struct ptlrpc_nrs_policy *nrs_policy_fallback;
+ /**
+ * This NRS head handles either HP or regular requests
+ */
+ enum ptlrpc_nrs_queue_type nrs_queue_type;
+ /**
+ * # queued requests from all policies in this NRS head
+ */
+ unsigned long nrs_req_queued;
+ /**
+ * # scheduled requests from all policies in this NRS head
+ */
+ unsigned long nrs_req_started;
+ /**
+ * # policies on this NRS
+ */
+ unsigned int nrs_num_pols;
+ /**
+ * This NRS head is in progress of starting a policy
+ */
+ unsigned int nrs_policy_starting:1;
+ /**
+ * In progress of shutting down the whole NRS head; used during
+ * unregistration
+ */
+ unsigned int nrs_stopping:1;
+ /**
+ * NRS policy is throttling request
+ */
+ unsigned int nrs_throttling:1;
+};
+
+#define NRS_POL_NAME_MAX 16
+#define NRS_POL_ARG_MAX 16
+
+struct ptlrpc_nrs_pol_desc;
+
+/**
+ * Service compatibility predicate; this determines whether a policy is adequate
+ * for handling RPCs of a particular PTLRPC service.
+ *
+ * XXX:This should give the same result during policy registration and
+ * unregistration, and for all partitions of a service; so the result should not
+ * depend on temporal service or other properties, that may influence the
+ * result.
+ */
+typedef bool (*nrs_pol_desc_compat_t)(const struct ptlrpc_service *svc,
+ const struct ptlrpc_nrs_pol_desc *desc);
+
+struct ptlrpc_nrs_pol_conf {
+ /**
+ * Human-readable policy name
+ */
+ char nc_name[NRS_POL_NAME_MAX];
+ /**
+ * NRS operations for this policy
+ */
+ const struct ptlrpc_nrs_pol_ops *nc_ops;
+ /**
+ * Service compatibility predicate
+ */
+ nrs_pol_desc_compat_t nc_compat;
+ /**
+ * Set for policies that support a single ptlrpc service, i.e. ones that
+ * have \a pd_compat set to nrs_policy_compat_one(). The variable value
+ * depicts the name of the single service that such policies are
+ * compatible with.
+ */
+ const char *nc_compat_svc_name;
+ /**
+ * Owner module for this policy descriptor; policies registering from a
+ * different module to the one the NRS framework is held within
+ * (currently ptlrpc), should set this field to THIS_MODULE.
+ */
+ struct module *nc_owner;
+ /**
+ * Policy registration flags; a bitmask of \e nrs_policy_flags
+ */
+ unsigned int nc_flags;
+};
+
+/**
+ * NRS policy registering descriptor
+ *
+ * Is used to hold a description of a policy that can be passed to NRS core in
+ * order to register the policy with NRS heads in different PTLRPC services.
+ */
+struct ptlrpc_nrs_pol_desc {
+ /**
+ * Human-readable policy name
+ */
+ char pd_name[NRS_POL_NAME_MAX];
+ /**
+ * Link into nrs_core::nrs_policies
+ */
+ struct list_head pd_list;
+ /**
+ * NRS operations for this policy
+ */
+ const struct ptlrpc_nrs_pol_ops *pd_ops;
+ /**
+ * Service compatibility predicate
+ */
+ nrs_pol_desc_compat_t pd_compat;
+ /**
+ * Set for policies that are compatible with only one PTLRPC service.
+ *
+ * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
+ */
+ const char *pd_compat_svc_name;
+ /**
+ * Owner module for this policy descriptor.
+ *
+ * We need to hold a reference to the module whenever we might make use
+ * of any of the module's contents, i.e.
+ * - If one or more instances of the policy are at a state where they
+ * might be handling a request, i.e.
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
+ * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
+ * is taken on the module when
+ * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
+ * becomes 0, so that we hold only one reference to the module maximum
+ * at any time.
+ *
+ * We do not need to hold a reference to the module, even though we
+ * might use code and data from the module, in the following cases:
+ * - During external policy registration, because this should happen in
+ * the module's init() function, in which case the module is safe from
+ * removal because a reference is being held on the module by the
+ * kernel, and iirc kmod (and I guess module-init-tools also) will
+ * serialize any racing processes properly anyway.
+ * - During external policy unregistration, because this should happen
+ * in a module's exit() function, and any attempts to start a policy
+ * instance would need to take a reference on the module, and this is
+ * not possible once we have reached the point where the exit()
+ * handler is called.
+ * - During service registration and unregistration, as service setup
+ * and cleanup, and policy registration, unregistration and policy
+ * instance starting, are serialized by \e nrs_core::nrs_mutex, so
+ * as long as users adhere to the convention of registering policies
+ * in init() and unregistering them in module exit() functions, there
+ * should not be a race between these operations.
+ * - During any policy-specific lprocfs operations, because a reference
+ * is held by the kernel on a proc entry that has been entered by a
+ * syscall, so as long as proc entries are removed during
+ * unregistration time, then unregistration and lprocfs operations
+ * will be properly serialized.
+ */
+ struct module *pd_owner;
+ /**
+ * Bitmask of \e nrs_policy_flags
+ */
+ unsigned int pd_flags;
+ /**
+ * # of references on this descriptor
+ */
+ atomic_t pd_refs;
+};
+
+/**
+ * NRS policy state
+ *
+ * Policies transition from one state to the other during their lifetime
+ */
+enum ptlrpc_nrs_pol_state {
+ /**
+ * Not a valid policy state.
+ */
+ NRS_POL_STATE_INVALID,
+ /**
+ * Policies are at this state either at the start of their life, or
+ * transition here when the user selects a different policy to act
+ * as the primary one.
+ */
+ NRS_POL_STATE_STOPPED,
+ /**
+ * Policy is progress of stopping
+ */
+ NRS_POL_STATE_STOPPING,
+ /**
+ * Policy is in progress of starting
+ */
+ NRS_POL_STATE_STARTING,
+ /**
+ * A policy is in this state in two cases:
+ * - it is the fallback policy, which is always in this state.
+ * - it has been activated by the user; i.e. it is the primary policy,
+ */
+ NRS_POL_STATE_STARTED,
+};
+
+/**
+ * NRS policy information
+ *
+ * Used for obtaining information for the status of a policy via lprocfs
+ */
+struct ptlrpc_nrs_pol_info {
+ /**
+ * Policy name
+ */
+ char pi_name[NRS_POL_NAME_MAX];
+ /**
+ * Policy argument
+ */
+ char pi_arg[NRS_POL_ARG_MAX];
+ /**
+ * Current policy state
+ */
+ enum ptlrpc_nrs_pol_state pi_state;
+ /**
+ * # RPCs enqueued for later dispatching by the policy
+ */
+ long pi_req_queued;
+ /**
+ * # RPCs started for dispatch by the policy
+ */
+ long pi_req_started;
+ /**
+ * Is this a fallback policy?
+ */
+ unsigned pi_fallback:1;
+};
+
+/**
+ * NRS policy
+ *
+ * There is one instance of this for each policy in each NRS head of each
+ * PTLRPC service partition.
+ */
+struct ptlrpc_nrs_policy {
+ /**
+ * Linkage into the NRS head's list of policies,
+ * ptlrpc_nrs:nrs_policy_list
+ */
+ struct list_head pol_list;
+ /**
+ * Linkage into the NRS head's list of policies with enqueued
+ * requests ptlrpc_nrs:nrs_policy_queued
+ */
+ struct list_head pol_list_queued;
+ /**
+ * Current state of this policy
+ */
+ enum ptlrpc_nrs_pol_state pol_state;
+ /**
+ * Bitmask of nrs_policy_flags
+ */
+ unsigned int pol_flags;
+ /**
+ * # RPCs enqueued for later dispatching by the policy
+ */
+ long pol_req_queued;
+ /**
+ * # RPCs started for dispatch by the policy
+ */
+ long pol_req_started;
+ /**
+ * Usage Reference count taken on the policy instance
+ */
+ long pol_ref;
+ /**
+ * Human-readable policy argument
+ */
+ char pol_arg[NRS_POL_ARG_MAX];
+ /**
+ * The NRS head this policy has been created at
+ */
+ struct ptlrpc_nrs *pol_nrs;
+ /**
+ * Private policy data; varies by policy type
+ */
+ void *pol_private;
+ /**
+ * Policy descriptor for this policy instance.
+ */
+ struct ptlrpc_nrs_pol_desc *pol_desc;
+};
+
+/**
+ * NRS resource
+ *
+ * Resources are embedded into two types of NRS entities:
+ * - Inside NRS policies, in the policy's private data in
+ * ptlrpc_nrs_policy::pol_private
+ * - In objects that act as prime-level scheduling entities in different NRS
+ * policies; e.g. on a policy that performs round robin or similar order
+ * scheduling across client NIDs, there would be one NRS resource per unique
+ * client NID. On a policy which performs round robin scheduling across
+ * backend filesystem objects, there would be one resource associated with
+ * each of the backend filesystem objects partaking in the scheduling
+ * performed by the policy.
+ *
+ * NRS resources share a parent-child relationship, in which resources embedded
+ * in policy instances are the parent entities, with all scheduling entities
+ * a policy schedules across being the children, thus forming a simple resource
+ * hierarchy. This hierarchy may be extended with one or more levels in the
+ * future if the ability to have more than one primary policy is added.
+ *
+ * Upon request initialization, references to the then active NRS policies are
+ * taken and used to later handle the dispatching of the request with one of
+ * these policies.
+ *
+ * \see nrs_resource_get_safe()
+ * \see ptlrpc_nrs_req_add()
+ */
+struct ptlrpc_nrs_resource {
+ /**
+ * This NRS resource's parent; is NULL for resources embedded in NRS
+ * policy instances; i.e. those are top-level ones.
+ */
+ struct ptlrpc_nrs_resource *res_parent;
+ /**
+ * The policy associated with this resource.
+ */
+ struct ptlrpc_nrs_policy *res_policy;
+};
+
+enum {
+ NRS_RES_FALLBACK,
+ NRS_RES_PRIMARY,
+ NRS_RES_MAX
+};
+
+#include "lustre_nrs_fifo.h"
+
+/**
+ * NRS request
+ *
+ * Instances of this object exist embedded within ptlrpc_request; the main
+ * purpose of this object is to hold references to the request's resources
+ * for the lifetime of the request, and to hold properties that policies use
+ * use for determining the request's scheduling priority.
+ **/
+struct ptlrpc_nrs_request {
+ /**
+ * The request's resource hierarchy.
+ */
+ struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX];
+ /**
+ * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the
+ * policy that was used to enqueue the request.
+ *
+ * \see nrs_request_enqueue()
+ */
+ unsigned int nr_res_idx;
+ unsigned int nr_initialized:1;
+ unsigned int nr_enqueued:1;
+ unsigned int nr_started:1;
+ unsigned int nr_finalized:1;
+
+ /**
+ * Policy-specific fields, used for determining a request's scheduling
+ * priority, and other supporting functionality.
+ */
+ union {
+ /**
+ * Fields for the FIFO policy
+ */
+ struct nrs_fifo_req fifo;
+ } nr_u;
+ /**
+ * Externally-registering policies may want to use this to allocate
+ * their own request properties.
+ */
+ void *ext;
+};
+
+/** @} nrs */
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h b/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h
new file mode 100644
index 000000000000..3b5418eac6c4
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h
@@ -0,0 +1,70 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * Copyright 2012 Xyratex Technology Limited
+ */
+/*
+ *
+ * Network Request Scheduler (NRS) First-in First-out (FIFO) policy
+ *
+ */
+
+#ifndef _LUSTRE_NRS_FIFO_H
+#define _LUSTRE_NRS_FIFO_H
+
+/* \name fifo
+ *
+ * FIFO policy
+ *
+ * This policy is a logical wrapper around previous, non-NRS functionality.
+ * It dispatches RPCs in the same order as they arrive from the network. This
+ * policy is currently used as the fallback policy, and the only enabled policy
+ * on all NRS heads of all PTLRPC service partitions.
+ * @{
+ */
+
+/**
+ * Private data structure for the FIFO policy
+ */
+struct nrs_fifo_head {
+ /**
+ * Resource object for policy instance.
+ */
+ struct ptlrpc_nrs_resource fh_res;
+ /**
+ * List of queued requests.
+ */
+ struct list_head fh_list;
+ /**
+ * For debugging purposes.
+ */
+ __u64 fh_sequence;
+};
+
+struct nrs_fifo_req {
+ struct list_head fr_list;
+ __u64 fr_sequence;
+};
+
+/** @} fifo */
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
index a13558e53274..fbcd39572cd0 100644
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
@@ -148,13 +148,12 @@ extern struct req_format RQF_MDS_GETATTR;
*/
extern struct req_format RQF_MDS_GETATTR_NAME;
extern struct req_format RQF_MDS_CLOSE;
-extern struct req_format RQF_MDS_RELEASE_CLOSE;
+extern struct req_format RQF_MDS_INTENT_CLOSE;
extern struct req_format RQF_MDS_CONNECT;
extern struct req_format RQF_MDS_DISCONNECT;
extern struct req_format RQF_MDS_GET_INFO;
extern struct req_format RQF_MDS_READPAGE;
extern struct req_format RQF_MDS_WRITEPAGE;
-extern struct req_format RQF_MDS_DONE_WRITING;
extern struct req_format RQF_MDS_REINT;
extern struct req_format RQF_MDS_REINT_CREATE;
extern struct req_format RQF_MDS_REINT_CREATE_ACL;
@@ -166,10 +165,9 @@ extern struct req_format RQF_MDS_REINT_LINK;
extern struct req_format RQF_MDS_REINT_RENAME;
extern struct req_format RQF_MDS_REINT_SETATTR;
extern struct req_format RQF_MDS_REINT_SETXATTR;
-extern struct req_format RQF_MDS_QUOTACHECK;
extern struct req_format RQF_MDS_QUOTACTL;
-extern struct req_format RQF_QC_CALLBACK;
extern struct req_format RQF_MDS_SWAP_LAYOUTS;
+extern struct req_format RQF_MDS_REINT_MIGRATE;
/* MDS hsm formats */
extern struct req_format RQF_MDS_HSM_STATE_GET;
extern struct req_format RQF_MDS_HSM_STATE_SET;
@@ -181,7 +179,6 @@ extern struct req_format RQF_MDS_HSM_REQUEST;
/* OST req_format */
extern struct req_format RQF_OST_CONNECT;
extern struct req_format RQF_OST_DISCONNECT;
-extern struct req_format RQF_OST_QUOTACHECK;
extern struct req_format RQF_OST_QUOTACTL;
extern struct req_format RQF_OST_GETATTR;
extern struct req_format RQF_OST_SETATTR;
diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h
index 90c183424802..03a970bcac55 100644
--- a/drivers/staging/lustre/lustre/include/lustre_sec.h
+++ b/drivers/staging/lustre/lustre/include/lustre_sec.h
@@ -50,6 +50,7 @@ struct brw_page;
/* Linux specific */
struct key;
struct seq_file;
+struct lustre_cfg;
/*
* forward declaration
@@ -1029,6 +1030,8 @@ int sptlrpc_target_export_check(struct obd_export *exp,
/* bulk security api */
void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc);
+int get_free_pages_in_pool(void);
+int pool_is_at_full_capacity(void);
int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc);
diff --git a/drivers/staging/lustre/lustre/include/lustre_swab.h b/drivers/staging/lustre/lustre/include/lustre_swab.h
new file mode 100644
index 000000000000..26d01c2d6633
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_swab.h
@@ -0,0 +1,102 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2014, Intel Corporation.
+ *
+ * Copyright 2015 Cray Inc, all rights reserved.
+ * Author: Ben Evans.
+ *
+ * We assume all nodes are either little-endian or big-endian, and we
+ * always send messages in the sender's native format. The receiver
+ * detects the message format by checking the 'magic' field of the message
+ * (see lustre_msg_swabbed() below).
+ *
+ * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines
+ * are implemented in ptlrpc/lustre_swab.c. These 'swabbers' convert the
+ * type from "other" endian, in-place in the message buffer.
+ *
+ * A swabber takes a single pointer argument. The caller must already have
+ * verified that the length of the message buffer >= sizeof (type).
+ *
+ * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
+ * may be defined that swabs just the variable part, after the caller has
+ * verified that the message buffer is large enough.
+ */
+
+#ifndef _LUSTRE_SWAB_H_
+#define _LUSTRE_SWAB_H_
+
+#include "lustre/lustre_idl.h"
+
+void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
+void lustre_swab_connect(struct obd_connect_data *ocd);
+void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
+void lustre_swab_obd_statfs(struct obd_statfs *os);
+void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
+void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
+void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
+void lustre_swab_ost_lvb(struct ost_lvb *lvb);
+void lustre_swab_obd_quotactl(struct obd_quotactl *q);
+void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
+void lustre_swab_generic_32s(__u32 *val);
+void lustre_swab_mdt_body(struct mdt_body *b);
+void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
+void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
+void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
+void lustre_swab_lmv_desc(struct lmv_desc *ld);
+void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm);
+void lustre_swab_lov_desc(struct lov_desc *ld);
+void lustre_swab_gl_desc(union ldlm_gl_desc *desc);
+void lustre_swab_ldlm_intent(struct ldlm_intent *i);
+void lustre_swab_ldlm_request(struct ldlm_request *rq);
+void lustre_swab_ldlm_reply(struct ldlm_reply *r);
+void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
+void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
+void lustre_swab_mgs_config_body(struct mgs_config_body *body);
+void lustre_swab_mgs_config_res(struct mgs_config_res *body);
+void lustre_swab_ost_body(struct ost_body *b);
+void lustre_swab_ost_last_id(__u64 *id);
+void lustre_swab_fiemap(struct fiemap *fiemap);
+void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
+void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
+void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
+ int stripe_count);
+void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
+void lustre_swab_lustre_capa(struct lustre_capa *c);
+void lustre_swab_lustre_capa_key(struct lustre_capa_key *k);
+void lustre_swab_fid2path(struct getinfo_fid2path *gf);
+void lustre_swab_layout_intent(struct layout_intent *li);
+void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+void lustre_swab_hsm_current_action(struct hsm_current_action *action);
+void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
+void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
+void lustre_swab_hsm_request(struct hsm_request *hr);
+void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
+void lustre_swab_close_data(struct close_data *data);
+void lustre_swab_lmv_user_md(struct lmv_user_md *lum);
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index f6fc4dd05bd6..0f48e9c3d9e3 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -73,70 +73,17 @@ static inline void loi_init(struct lov_oinfo *loi)
{
}
-/*
- * If we are unable to get the maximum object size from the OST in
- * ocd_maxbytes using OBD_CONNECT_MAXBYTES, then we fall back to using
- * the old maximum object size from ext3.
- */
-#define LUSTRE_EXT3_STRIPE_MAXBYTES 0x1fffffff000ULL
-
-struct lov_stripe_md {
- atomic_t lsm_refc;
- spinlock_t lsm_lock;
- pid_t lsm_lock_owner; /* debugging */
-
- /* maximum possible file size, might change as OSTs status changes,
- * e.g. disconnected, deactivated
- */
- __u64 lsm_maxbytes;
- struct ost_id lsm_oi;
- __u32 lsm_magic;
- __u32 lsm_stripe_size;
- __u32 lsm_pattern; /* striping pattern (RAID0, RAID1) */
- __u16 lsm_stripe_count;
- __u16 lsm_layout_gen;
- char lsm_pool_name[LOV_MAXPOOLNAME + 1];
- struct lov_oinfo *lsm_oinfo[0];
-};
-
-static inline bool lsm_is_released(struct lov_stripe_md *lsm)
-{
- return !!(lsm->lsm_pattern & LOV_PATTERN_F_RELEASED);
-}
-
-static inline bool lsm_has_objects(struct lov_stripe_md *lsm)
-{
- if (!lsm)
- return false;
- if (lsm_is_released(lsm))
- return false;
- return true;
-}
-
-static inline int lov_stripe_md_size(unsigned int stripe_count)
-{
- struct lov_stripe_md lsm;
-
- return sizeof(lsm) + stripe_count * sizeof(lsm.lsm_oinfo[0]);
-}
-
+struct lov_stripe_md;
struct obd_info;
typedef int (*obd_enqueue_update_f)(void *cookie, int rc);
/* obd info for a particular level (lov, osc). */
struct obd_info {
- /* Flags used for set request specific flags:
- - while lock handling, the flags obtained on the enqueue
- request are set here.
- - while stats, the flags used for control delay/resend.
- - while setattr, the flags used for distinguish punch operation
- */
+ /* OBD_STATFS_* flags */
__u64 oi_flags;
/* lsm data specific for every OSC. */
struct lov_stripe_md *oi_md;
- /* obdo data specific for every OSC, if needed at all. */
- struct obdo *oi_oa;
/* statfs data specific for every OSC, if needed at all. */
struct obd_statfs *oi_osfs;
/* An update callback which is called to update some data on upper
@@ -204,7 +151,6 @@ enum obd_cl_sem_lock_class {
* on the MDS.
*/
#define OBD_MAX_DEFAULT_EA_SIZE 4096
-#define OBD_MAX_DEFAULT_COOKIE_SIZE 4096
struct mdc_rpc_lock;
struct obd_import;
@@ -214,7 +160,7 @@ struct client_obd {
struct obd_import *cl_import; /* ptlrpc connection state */
size_t cl_conn_count;
/*
- * Cache maximum and default values for easize and cookiesize. This is
+ * Cache maximum and default values for easize. This is
* strictly a performance optimization to minimize calls to
* obd_size_diskmd(). The default values are used to calculate the
* initial size of a request buffer. The ptlrpc layer will resize the
@@ -235,18 +181,6 @@ struct client_obd {
* run-time if a larger observed size is advertised by the MDT.
*/
u32 cl_max_mds_easize;
- /* Default cookie size for llog cookies (see struct llog_cookie). It is
- * initialized to zero at mount-time, then it tracks the largest
- * observed cookie size advertised by the MDT, up to a maximum value of
- * OBD_MAX_DEFAULT_COOKIE_SIZE. Note that llog_cookies are not
- * used by clients communicating with MDS versions 2.4.0 and later.
- */
- u32 cl_default_mds_cookiesize;
- /* Maximum possible cookie size computed at mount-time based on
- * the number of OSTs in the filesystem. May be increased at
- * run-time if a larger observed size is advertised by the MDT.
- */
- u32 cl_max_mds_cookiesize;
enum lustre_sec_part cl_sp_me;
enum lustre_sec_part cl_sp_to;
@@ -313,15 +247,42 @@ struct client_obd {
struct obd_histogram cl_read_offset_hist;
struct obd_histogram cl_write_offset_hist;
- /* lru for osc caching pages */
+ /* LRU for osc caching pages */
struct cl_client_cache *cl_cache;
- struct list_head cl_lru_osc; /* member of cl_cache->ccc_lru */
+ /** member of cl_cache->ccc_lru */
+ struct list_head cl_lru_osc;
+ /** # of available LRU slots left in the per-OSC cache.
+ * Available LRU slots are shared by all OSCs of the same file system,
+ * therefore this is a pointer to cl_client_cache::ccc_lru_left.
+ */
atomic_long_t *cl_lru_left;
+ /** # of busy LRU pages. A page is considered busy if it's in writeback
+ * queue, or in transfer. Busy pages can't be discarded so they are not
+ * in LRU cache.
+ */
atomic_long_t cl_lru_busy;
+ /** # of LRU pages in the cache for this client_obd */
atomic_long_t cl_lru_in_list;
+ /** # of threads are shrinking LRU cache. To avoid contention, it's not
+ * allowed to have multiple threads shrinking LRU cache.
+ */
atomic_t cl_lru_shrinkers;
- struct list_head cl_lru_list; /* lru page list */
- spinlock_t cl_lru_list_lock; /* page list protector */
+ /** The time when this LRU cache was last used. */
+ time64_t cl_lru_last_used;
+ /** stats: how many reclaims have happened for this client_obd.
+ * reclaim and shrink - shrink is async, voluntarily rebalancing;
+ * reclaim is sync, initiated by IO thread when the LRU slots are
+ * in shortage.
+ */
+ u64 cl_lru_reclaim;
+ /** List of LRU pages for this client_obd */
+ struct list_head cl_lru_list;
+ /** Lock for LRU page list */
+ spinlock_t cl_lru_list_lock;
+ /** # of unstable pages in this client_obd.
+ * An unstable page is a page state that WRITE RPC has finished but
+ * the transaction has NOT yet committed.
+ */
atomic_long_t cl_unstable_count;
/* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
@@ -329,7 +290,17 @@ struct client_obd {
wait_queue_head_t cl_destroy_waitq;
struct mdc_rpc_lock *cl_rpc_lock;
- struct mdc_rpc_lock *cl_close_lock;
+
+ /* modify rpcs in flight
+ * currently used for metadata only
+ */
+ spinlock_t cl_mod_rpcs_lock;
+ u16 cl_max_mod_rpcs_in_flight;
+ u16 cl_mod_rpcs_in_flight;
+ u16 cl_close_rpcs_in_flight;
+ wait_queue_head_t cl_mod_rpcs_waitq;
+ unsigned long *cl_mod_tag_bitmap;
+ struct obd_histogram cl_mod_rpcs_hist;
/* mgc datastruct */
atomic_t cl_mgc_refcount;
@@ -345,13 +316,6 @@ struct client_obd {
/* also protected by the poorly named _loi_list_lock lock above */
struct osc_async_rc cl_ar;
- /* used by quotacheck when the servers are older than 2.4 */
- int cl_qchk_stat; /* quotacheck stat of the peer */
-#define CL_NOT_QUOTACHECKED 1 /* client->cl_qchk_stat init value */
-#if OBD_OCD_VERSION(2, 7, 53, 0) < LUSTRE_VERSION_CODE
-#warning "please consider removing quotacheck compatibility code"
-#endif
-
/* sequence manager */
struct lu_client_seq *cl_seq;
@@ -454,8 +418,6 @@ struct lmv_obd {
int connected;
int max_easize;
int max_def_easize;
- int max_cookiesize;
- int max_def_cookiesize;
u32 tgts_size; /* size of tgts array */
struct lmv_tgt_desc **tgts;
@@ -469,9 +431,9 @@ struct niobuf_local {
__u32 lnb_page_offset;
__u32 lnb_len;
__u32 lnb_flags;
+ int lnb_rc;
struct page *lnb_page;
void *lnb_data;
- int lnb_rc;
};
#define LUSTRE_FLD_NAME "fld"
@@ -512,21 +474,6 @@ struct niobuf_local {
/* Don't conflict with on-wire flags OBD_BRW_WRITE, etc */
#define N_LOCAL_TEMP_PAGE 0x10000000
-struct obd_trans_info {
- __u64 oti_xid;
- /* Only used on the server side for tracking acks. */
- struct oti_req_ack_lock {
- struct lustre_handle lock;
- __u32 mode;
- } oti_ack_locks[4];
- void *oti_handle;
- struct llog_cookie oti_onecookie;
- struct llog_cookie *oti_logcookies;
-
- /** VBR: versions */
- __u64 oti_pre_version;
-};
-
/*
* Events signalled through obd_notify() upcall-chain.
*/
@@ -587,15 +534,14 @@ struct lvfs_run_ctxt {
struct obd_device {
struct obd_type *obd_type;
- __u32 obd_magic;
+ u32 obd_magic; /* OBD_DEVICE_MAGIC */
+ int obd_minor; /* device number: lctl dl */
+ struct lu_device *obd_lu_dev;
/* common and UUID name of this device */
- char obd_name[MAX_OBD_NAME];
- struct obd_uuid obd_uuid;
-
- struct lu_device *obd_lu_dev;
+ struct obd_uuid obd_uuid;
+ char obd_name[MAX_OBD_NAME];
- int obd_minor;
/* bitfield modification is protected by obd_dev_lock */
unsigned long obd_attached:1, /* finished attach */
obd_set_up:1, /* finished setup */
@@ -619,22 +565,22 @@ struct obd_device {
unsigned long obd_recovery_expired:1;
/* uuid-export hash body */
struct cfs_hash *obd_uuid_hash;
- atomic_t obd_refcount;
wait_queue_head_t obd_refcount_waitq;
struct list_head obd_exports;
struct list_head obd_unlinked_exports;
struct list_head obd_delayed_exports;
+ atomic_t obd_refcount;
int obd_num_exports;
spinlock_t obd_nid_lock;
struct ldlm_namespace *obd_namespace;
struct ptlrpc_client obd_ldlm_client; /* XXX OST/MDS only */
/* a spinlock is OK for what we do now, may need a semaphore later */
spinlock_t obd_dev_lock; /* protect OBD bitfield above */
- struct mutex obd_dev_mutex;
- __u64 obd_last_committed;
spinlock_t obd_osfs_lock;
struct obd_statfs obd_osfs; /* locked by obd_osfs_lock */
__u64 obd_osfs_age;
+ u64 obd_last_committed;
+ struct mutex obd_dev_mutex;
struct lvfs_run_ctxt obd_lvfs_ctxt;
struct obd_llog_group obd_olg; /* default llog group */
struct obd_device *obd_observer;
@@ -648,12 +594,13 @@ struct obd_device {
struct lov_obd lov;
struct lmv_obd lmv;
} u;
+
/* Fields used by LProcFS */
- unsigned int obd_cntr_base;
- struct lprocfs_stats *obd_stats;
+ struct lprocfs_stats *obd_stats;
+ unsigned int obd_cntr_base;
- unsigned int md_cntr_base;
- struct lprocfs_stats *md_stats;
+ struct lprocfs_stats *md_stats;
+ unsigned int md_cntr_base;
struct dentry *obd_debugfs_entry;
struct dentry *obd_svc_debugfs_entry;
@@ -665,9 +612,11 @@ struct obd_device {
/**
* Ldlm pool part. Save last calculated SLV and Limit.
*/
- rwlock_t obd_pool_lock;
- int obd_pool_limit;
- __u64 obd_pool_slv;
+ rwlock_t obd_pool_lock;
+ u64 obd_pool_slv;
+ int obd_pool_limit;
+
+ int obd_conn_inprogress;
/**
* A list of outstanding class_incref()'s against this obd. For
@@ -675,19 +624,10 @@ struct obd_device {
*/
struct lu_ref obd_reference;
- int obd_conn_inprogress;
-
struct kobject obd_kobj; /* sysfs object */
struct completion obd_kobj_unregister;
};
-enum obd_cleanup_stage {
-/* Special case hack for MDS LOVs */
- OBD_CLEANUP_EARLY,
-/* can be directly mapped to .ldto_device_fini() */
- OBD_CLEANUP_EXPORTS,
-};
-
/* get/set_info keys */
#define KEY_ASYNC "async"
#define KEY_CHANGELOG_CLEAR "changelog_clear"
@@ -704,7 +644,6 @@ enum obd_cleanup_stage {
#define KEY_INTERMDS "inter_mds"
#define KEY_LAST_ID "last_id"
#define KEY_LAST_FID "last_fid"
-#define KEY_LOVDESC "lovdesc"
#define KEY_MAX_EASIZE "max_easize"
#define KEY_DEFAULT_EASIZE "default_easize"
#define KEY_MGSSEC "mgssec"
@@ -720,22 +659,6 @@ enum obd_cleanup_stage {
struct lu_context;
-/* /!\ must be coherent with include/linux/namei.h on patched kernel */
-#define IT_OPEN (1 << 0)
-#define IT_CREAT (1 << 1)
-#define IT_READDIR (1 << 2)
-#define IT_GETATTR (1 << 3)
-#define IT_LOOKUP (1 << 4)
-#define IT_UNLINK (1 << 5)
-#define IT_TRUNC (1 << 6)
-#define IT_GETXATTR (1 << 7)
-#define IT_EXEC (1 << 8)
-#define IT_PIN (1 << 9)
-#define IT_LAYOUT (1 << 10)
-#define IT_QUOTA_DQACQ (1 << 11)
-#define IT_QUOTA_CONN (1 << 12)
-#define IT_SETXATTR (1 << 13)
-
static inline int it_to_lock_mode(struct lookup_intent *it)
{
/* CREAT needs to be tested before open (both could be set) */
@@ -755,6 +678,14 @@ static inline int it_to_lock_mode(struct lookup_intent *it)
return -EINVAL;
}
+enum md_op_flags {
+ MF_MDC_CANCEL_FID1 = BIT(0),
+ MF_MDC_CANCEL_FID2 = BIT(1),
+ MF_MDC_CANCEL_FID3 = BIT(2),
+ MF_MDC_CANCEL_FID4 = BIT(3),
+ MF_GET_MDT_IDX = BIT(4),
+};
+
enum md_cli_flags {
CLI_SET_MEA = BIT(0),
CLI_RM_ENTRY = BIT(1),
@@ -789,8 +720,6 @@ struct md_op_data {
__u64 op_valid;
loff_t op_attr_blocks;
- /* Size-on-MDS epoch and flags. */
- __u64 op_ioepoch;
__u32 op_flags;
/* Various operation flags. */
@@ -839,15 +768,13 @@ struct obd_ops {
int (*iocontrol)(unsigned int cmd, struct obd_export *exp, int len,
void *karg, void __user *uarg);
int (*get_info)(const struct lu_env *env, struct obd_export *,
- __u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm);
+ __u32 keylen, void *key, __u32 *vallen, void *val);
int (*set_info_async)(const struct lu_env *, struct obd_export *,
__u32 keylen, void *key,
__u32 vallen, void *val,
struct ptlrpc_request_set *set);
int (*setup)(struct obd_device *dev, struct lustre_cfg *cfg);
- int (*precleanup)(struct obd_device *dev,
- enum obd_cleanup_stage cleanup_stage);
+ int (*precleanup)(struct obd_device *dev);
int (*cleanup)(struct obd_device *dev);
int (*process_config)(struct obd_device *dev, u32 len, void *data);
int (*postrecov)(struct obd_device *dev);
@@ -887,35 +814,23 @@ struct obd_ops {
struct obd_statfs *osfs, __u64 max_age, __u32 flags);
int (*statfs_async)(struct obd_export *exp, struct obd_info *oinfo,
__u64 max_age, struct ptlrpc_request_set *set);
- int (*packmd)(struct obd_export *exp, struct lov_mds_md **disk_tgt,
- struct lov_stripe_md *mem_src);
- int (*unpackmd)(struct obd_export *exp,
- struct lov_stripe_md **mem_tgt,
- struct lov_mds_md *disk_src, int disk_len);
int (*create)(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct obd_trans_info *oti);
+ struct obdo *oa);
int (*destroy)(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct obd_trans_info *oti);
+ struct obdo *oa);
int (*setattr)(const struct lu_env *, struct obd_export *exp,
- struct obd_info *oinfo, struct obd_trans_info *oti);
- int (*setattr_async)(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct ptlrpc_request_set *rqset);
+ struct obdo *oa);
int (*getattr)(const struct lu_env *env, struct obd_export *exp,
- struct obd_info *oinfo);
- int (*getattr_async)(struct obd_export *exp, struct obd_info *oinfo,
- struct ptlrpc_request_set *set);
+ struct obdo *oa);
int (*preprw)(const struct lu_env *env, int cmd,
struct obd_export *exp, struct obdo *oa, int objcount,
struct obd_ioobj *obj, struct niobuf_remote *remote,
- int *nr_pages, struct niobuf_local *local,
- struct obd_trans_info *oti);
+ int *nr_pages, struct niobuf_local *local);
int (*commitrw)(const struct lu_env *env, int cmd,
struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
struct niobuf_remote *remote, int pages,
- struct niobuf_local *local,
- struct obd_trans_info *oti, int rc);
+ struct niobuf_local *local, int rc);
int (*init_export)(struct obd_export *exp);
int (*destroy_export)(struct obd_export *exp);
@@ -930,8 +845,6 @@ struct obd_ops {
struct obd_uuid *(*get_uuid)(struct obd_export *exp);
/* quota methods */
- int (*quotacheck)(struct obd_device *, struct obd_export *,
- struct obd_quotactl *);
int (*quotactl)(struct obd_device *, struct obd_export *,
struct obd_quotactl *);
@@ -954,7 +867,7 @@ struct obd_ops {
/* lmv structures */
struct lustre_md {
struct mdt_body *body;
- struct lov_stripe_md *lsm;
+ struct lu_buf layout;
struct lmv_stripe_md *lmv;
#ifdef CONFIG_FS_POSIX_ACL
struct posix_acl *posix_acl;
@@ -992,10 +905,8 @@ struct md_ops {
int (*create)(struct obd_export *, struct md_op_data *,
const void *, size_t, umode_t, uid_t, gid_t,
cfs_cap_t, __u64, struct ptlrpc_request **);
- int (*done_writing)(struct obd_export *, struct md_op_data *,
- struct md_open_data *);
int (*enqueue)(struct obd_export *, struct ldlm_enqueue_info *,
- const ldlm_policy_data_t *,
+ const union ldlm_policy_data *,
struct lookup_intent *, struct md_op_data *,
struct lustre_handle *, __u64);
int (*getattr)(struct obd_export *, struct md_op_data *,
@@ -1012,8 +923,7 @@ struct md_ops {
const char *, size_t, const char *, size_t,
struct ptlrpc_request **);
int (*setattr)(struct obd_export *, struct md_op_data *, void *,
- size_t, void *, size_t, struct ptlrpc_request **,
- struct md_open_data **mod);
+ size_t, struct ptlrpc_request **);
int (*sync)(struct obd_export *, const struct lu_fid *,
struct ptlrpc_request **);
int (*read_page)(struct obd_export *, struct md_op_data *,
@@ -1030,7 +940,7 @@ struct md_ops {
u64, const char *, const char *, int, int, int,
struct ptlrpc_request **);
- int (*init_ea_size)(struct obd_export *, u32, u32, u32, u32);
+ int (*init_ea_size)(struct obd_export *, u32, u32);
int (*get_lustre_md)(struct obd_export *, struct ptlrpc_request *,
struct obd_export *, struct obd_export *,
@@ -1052,11 +962,11 @@ struct md_ops {
enum ldlm_mode (*lock_match)(struct obd_export *, __u64,
const struct lu_fid *, enum ldlm_type,
- ldlm_policy_data_t *, enum ldlm_mode,
+ union ldlm_policy_data *, enum ldlm_mode,
struct lustre_handle *);
int (*cancel_unused)(struct obd_export *, const struct lu_fid *,
- ldlm_policy_data_t *, enum ldlm_mode,
+ union ldlm_policy_data *, enum ldlm_mode,
enum ldlm_cancel_flags flags, void *opaque);
int (*get_fid_from_lsm)(struct obd_export *,
@@ -1071,6 +981,8 @@ struct md_ops {
int (*revalidate_lock)(struct obd_export *, struct lookup_intent *,
struct lu_fid *, __u64 *bits);
+ int (*unpackmd)(struct obd_export *exp, struct lmv_stripe_md **plsm,
+ const union lmv_mds_md *lmv, size_t lmv_size);
/*
* NOTE: If adding ops, add another LPROCFS_MD_OP_INIT() line to
* lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
@@ -1078,33 +990,6 @@ struct md_ops {
*/
};
-struct lsm_operations {
- void (*lsm_free)(struct lov_stripe_md *);
- void (*lsm_stripe_by_index)(struct lov_stripe_md *, int *, u64 *,
- u64 *);
- void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, u64 *,
- u64 *);
- int (*lsm_lmm_verify)(struct lov_mds_md *lmm, int lmm_bytes,
- __u16 *stripe_count);
- int (*lsm_unpackmd)(struct lov_obd *lov, struct lov_stripe_md *lsm,
- struct lov_mds_md *lmm);
-};
-
-extern const struct lsm_operations lsm_v1_ops;
-extern const struct lsm_operations lsm_v3_ops;
-static inline const struct lsm_operations *lsm_op_find(int magic)
-{
- switch (magic) {
- case LOV_MAGIC_V1:
- return &lsm_v1_ops;
- case LOV_MAGIC_V3:
- return &lsm_v3_ops;
- default:
- CERROR("Cannot recognize lsm_magic %08x\n", magic);
- return NULL;
- }
-}
-
static inline struct md_open_data *obd_mod_alloc(void)
{
struct md_open_data *mod;
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 16094dbec08b..7ec25202cd22 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -100,6 +100,13 @@ int obd_get_request_slot(struct client_obd *cli);
void obd_put_request_slot(struct client_obd *cli);
__u32 obd_get_max_rpcs_in_flight(struct client_obd *cli);
int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max);
+int obd_set_max_mod_rpcs_in_flight(struct client_obd *cli, u16 max);
+int obd_mod_rpc_stats_seq_show(struct client_obd *cli, struct seq_file *seq);
+
+u16 obd_get_mod_rpc_slot(struct client_obd *cli, u32 opc,
+ struct lookup_intent *it);
+void obd_put_mod_rpc_slot(struct client_obd *cli, u32 opc,
+ struct lookup_intent *it, u16 tag);
struct llog_handle;
struct llog_rec_hdr;
@@ -175,10 +182,13 @@ struct lustre_profile {
char *lp_profile;
char *lp_dt;
char *lp_md;
+ int lp_refs;
+ bool lp_list_deleted;
};
struct lustre_profile *class_get_profile(const char *prof);
void class_del_profile(const char *prof);
+void class_put_profile(struct lustre_profile *lprof);
void class_del_profiles(void);
#if LUSTRE_TRACKS_LOCK_EXP_REFS
@@ -269,10 +279,8 @@ static inline int lprocfs_climp_check(struct obd_device *obd)
struct inode;
struct lu_attr;
struct obdo;
-void obdo_refresh_inode(struct inode *dst, const struct obdo *src, u32 valid);
void obdo_to_ioobj(const struct obdo *oa, struct obd_ioobj *ioobj);
-void md_from_obdo(struct md_op_data *op_data, const struct obdo *oa, u32 valid);
#define OBT(dev) (dev)->obd_type
#define OBP(dev, op) (dev)->obd_type->typ_dt_ops->op
@@ -417,16 +425,14 @@ static inline int class_devno_max(void)
static inline int obd_get_info(const struct lu_env *env,
struct obd_export *exp, __u32 keylen,
- void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm)
+ void *key, __u32 *vallen, void *val)
{
int rc;
EXP_CHECK_DT_OP(exp, get_info);
EXP_COUNTER_INCREMENT(exp, get_info);
- rc = OBP(exp->exp_obd, get_info)(env, exp, keylen, key, vallen, val,
- lsm);
+ rc = OBP(exp->exp_obd, get_info)(env, exp, keylen, key, vallen, val);
return rc;
}
@@ -505,8 +511,7 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
return rc;
}
-static inline int obd_precleanup(struct obd_device *obd,
- enum obd_cleanup_stage cleanup_stage)
+static inline int obd_precleanup(struct obd_device *obd)
{
int rc;
DECLARE_LU_VARS(ldt, d);
@@ -517,20 +522,18 @@ static inline int obd_precleanup(struct obd_device *obd,
ldt = obd->obd_type->typ_lu;
d = obd->obd_lu_dev;
if (ldt && d) {
- if (cleanup_stage == OBD_CLEANUP_EXPORTS) {
- struct lu_env env;
+ struct lu_env env;
- rc = lu_env_init(&env, ldt->ldt_ctx_tags);
- if (rc == 0) {
- ldt->ldt_ops->ldto_device_fini(&env, d);
- lu_env_fini(&env);
- }
+ rc = lu_env_init(&env, ldt->ldt_ctx_tags);
+ if (!rc) {
+ ldt->ldt_ops->ldto_device_fini(&env, d);
+ lu_env_fini(&env);
}
}
OBD_CHECK_DT_OP(obd, precleanup, 0);
OBD_COUNTER_INCREMENT(obd, precleanup);
- rc = OBP(obd, precleanup)(obd, cleanup_stage);
+ rc = OBP(obd, precleanup)(obd);
return rc;
}
@@ -612,181 +615,51 @@ obd_process_config(struct obd_device *obd, int datalen, void *data)
return rc;
}
-/* Pack an in-memory MD struct for storage on disk.
- * Returns +ve size of packed MD (0 for free), or -ve error.
- *
- * If @disk_tgt == NULL, MD size is returned (max size if @mem_src == NULL).
- * If @*disk_tgt != NULL and @mem_src == NULL, @*disk_tgt will be freed.
- * If @*disk_tgt == NULL, it will be allocated
- */
-static inline int obd_packmd(struct obd_export *exp,
- struct lov_mds_md **disk_tgt,
- struct lov_stripe_md *mem_src)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, packmd);
- EXP_COUNTER_INCREMENT(exp, packmd);
-
- rc = OBP(exp->exp_obd, packmd)(exp, disk_tgt, mem_src);
- return rc;
-}
-
-static inline int obd_size_diskmd(struct obd_export *exp,
- struct lov_stripe_md *mem_src)
-{
- return obd_packmd(exp, NULL, mem_src);
-}
-
-static inline int obd_free_diskmd(struct obd_export *exp,
- struct lov_mds_md **disk_tgt)
-{
- LASSERT(disk_tgt);
- LASSERT(*disk_tgt);
- /*
- * LU-2590, for caller's convenience, *disk_tgt could be host
- * endianness, it needs swab to LE if necessary, while just
- * lov_mds_md header needs it for figuring out how much memory
- * needs to be freed.
- */
- if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) &&
- (((*disk_tgt)->lmm_magic == LOV_MAGIC_V1) ||
- ((*disk_tgt)->lmm_magic == LOV_MAGIC_V3)))
- lustre_swab_lov_mds_md(*disk_tgt);
- return obd_packmd(exp, disk_tgt, NULL);
-}
-
-/* Unpack an MD struct from disk to in-memory format.
- * Returns +ve size of unpacked MD (0 for free), or -ve error.
- *
- * If @mem_tgt == NULL, MD size is returned (max size if @disk_src == NULL).
- * If @*mem_tgt != NULL and @disk_src == NULL, @*mem_tgt will be freed.
- * If @*mem_tgt == NULL, it will be allocated
- */
-static inline int obd_unpackmd(struct obd_export *exp,
- struct lov_stripe_md **mem_tgt,
- struct lov_mds_md *disk_src,
- int disk_len)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, unpackmd);
- EXP_COUNTER_INCREMENT(exp, unpackmd);
-
- rc = OBP(exp->exp_obd, unpackmd)(exp, mem_tgt, disk_src, disk_len);
- return rc;
-}
-
-static inline int obd_free_memmd(struct obd_export *exp,
- struct lov_stripe_md **mem_tgt)
-{
- int rc;
-
- LASSERT(mem_tgt);
- LASSERT(*mem_tgt);
- rc = obd_unpackmd(exp, mem_tgt, NULL, 0);
- *mem_tgt = NULL;
- return rc;
-}
-
static inline int obd_create(const struct lu_env *env, struct obd_export *exp,
- struct obdo *obdo, struct obd_trans_info *oti)
+ struct obdo *obdo)
{
int rc;
EXP_CHECK_DT_OP(exp, create);
EXP_COUNTER_INCREMENT(exp, create);
- rc = OBP(exp->exp_obd, create)(env, exp, obdo, oti);
+ rc = OBP(exp->exp_obd, create)(env, exp, obdo);
return rc;
}
static inline int obd_destroy(const struct lu_env *env, struct obd_export *exp,
- struct obdo *obdo, struct obd_trans_info *oti)
+ struct obdo *obdo)
{
int rc;
EXP_CHECK_DT_OP(exp, destroy);
EXP_COUNTER_INCREMENT(exp, destroy);
- rc = OBP(exp->exp_obd, destroy)(env, exp, obdo, oti);
+ rc = OBP(exp->exp_obd, destroy)(env, exp, obdo);
return rc;
}
static inline int obd_getattr(const struct lu_env *env, struct obd_export *exp,
- struct obd_info *oinfo)
+ struct obdo *oa)
{
int rc;
EXP_CHECK_DT_OP(exp, getattr);
EXP_COUNTER_INCREMENT(exp, getattr);
- rc = OBP(exp->exp_obd, getattr)(env, exp, oinfo);
- return rc;
-}
-
-static inline int obd_getattr_async(struct obd_export *exp,
- struct obd_info *oinfo,
- struct ptlrpc_request_set *set)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, getattr_async);
- EXP_COUNTER_INCREMENT(exp, getattr_async);
-
- rc = OBP(exp->exp_obd, getattr_async)(exp, oinfo, set);
+ rc = OBP(exp->exp_obd, getattr)(env, exp, oa);
return rc;
}
static inline int obd_setattr(const struct lu_env *env, struct obd_export *exp,
- struct obd_info *oinfo,
- struct obd_trans_info *oti)
+ struct obdo *oa)
{
int rc;
EXP_CHECK_DT_OP(exp, setattr);
EXP_COUNTER_INCREMENT(exp, setattr);
- rc = OBP(exp->exp_obd, setattr)(env, exp, oinfo, oti);
- return rc;
-}
-
-/* This performs all the requests set init/wait/destroy actions. */
-static inline int obd_setattr_rqset(struct obd_export *exp,
- struct obd_info *oinfo,
- struct obd_trans_info *oti)
-{
- struct ptlrpc_request_set *set = NULL;
- int rc;
-
- EXP_CHECK_DT_OP(exp, setattr_async);
- EXP_COUNTER_INCREMENT(exp, setattr_async);
-
- set = ptlrpc_prep_set();
- if (!set)
- return -ENOMEM;
-
- rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set);
- if (rc == 0)
- rc = ptlrpc_set_wait(set);
- ptlrpc_set_destroy(set);
- return rc;
-}
-
-/* This adds all the requests into @set if @set != NULL, otherwise
- * all requests are sent asynchronously without waiting for response.
- */
-static inline int obd_setattr_async(struct obd_export *exp,
- struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct ptlrpc_request_set *set)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, setattr_async);
- EXP_COUNTER_INCREMENT(exp, setattr_async);
-
- rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set);
+ rc = OBP(exp->exp_obd, setattr)(env, exp, oa);
return rc;
}
@@ -1053,15 +926,16 @@ static inline int obd_statfs_rqset(struct obd_export *exp,
__u32 flags)
{
struct ptlrpc_request_set *set = NULL;
- struct obd_info oinfo = { };
+ struct obd_info oinfo = {
+ .oi_osfs = osfs,
+ .oi_flags = flags,
+ };
int rc = 0;
- set = ptlrpc_prep_set();
+ set = ptlrpc_prep_set();
if (!set)
return -ENOMEM;
- oinfo.oi_osfs = osfs;
- oinfo.oi_flags = flags;
rc = obd_statfs_async(exp, &oinfo, max_age, set);
if (rc == 0)
rc = ptlrpc_set_wait(set);
@@ -1112,8 +986,7 @@ static inline int obd_preprw(const struct lu_env *env, int cmd,
struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
struct niobuf_remote *remote, int *pages,
- struct niobuf_local *local,
- struct obd_trans_info *oti)
+ struct niobuf_local *local)
{
int rc;
@@ -1121,7 +994,7 @@ static inline int obd_preprw(const struct lu_env *env, int cmd,
EXP_COUNTER_INCREMENT(exp, preprw);
rc = OBP(exp->exp_obd, preprw)(env, cmd, exp, oa, objcount, obj, remote,
- pages, local, oti);
+ pages, local);
return rc;
}
@@ -1129,14 +1002,13 @@ static inline int obd_commitrw(const struct lu_env *env, int cmd,
struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
struct niobuf_remote *rnb, int pages,
- struct niobuf_local *local,
- struct obd_trans_info *oti, int rc)
+ struct niobuf_local *local, int rc)
{
EXP_CHECK_DT_OP(exp, commitrw);
EXP_COUNTER_INCREMENT(exp, commitrw);
rc = OBP(exp->exp_obd, commitrw)(env, cmd, exp, oa, objcount, obj,
- rnb, pages, local, oti, rc);
+ rnb, pages, local, rc);
return rc;
}
@@ -1219,18 +1091,6 @@ static inline int obd_notify_observer(struct obd_device *observer,
return rc1 ? rc1 : rc2;
}
-static inline int obd_quotacheck(struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, quotacheck);
- EXP_COUNTER_INCREMENT(exp, quotacheck);
-
- rc = OBP(exp->exp_obd, quotacheck)(exp->exp_obd, exp, oqctl);
- return rc;
-}
-
static inline int obd_quotactl(struct obd_export *exp,
struct obd_quotactl *oqctl)
{
@@ -1346,21 +1206,9 @@ static inline int md_create(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
-static inline int md_done_writing(struct obd_export *exp,
- struct md_op_data *op_data,
- struct md_open_data *mod)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, done_writing);
- EXP_MD_COUNTER_INCREMENT(exp, done_writing);
- rc = MDP(exp->exp_obd, done_writing)(exp, op_data, mod);
- return rc;
-}
-
static inline int md_enqueue(struct obd_export *exp,
struct ldlm_enqueue_info *einfo,
- const ldlm_policy_data_t *policy,
+ const union ldlm_policy_data *policy,
struct lookup_intent *it,
struct md_op_data *op_data,
struct lustre_handle *lockh,
@@ -1428,16 +1276,14 @@ static inline int md_rename(struct obd_export *exp, struct md_op_data *op_data,
}
static inline int md_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, size_t ealen, void *ea2, size_t ea2len,
- struct ptlrpc_request **request,
- struct md_open_data **mod)
+ void *ea, size_t ealen,
+ struct ptlrpc_request **request)
{
int rc;
EXP_CHECK_MD_OP(exp, setattr);
EXP_MD_COUNTER_INCREMENT(exp, setattr);
- rc = MDP(exp->exp_obd, setattr)(exp, op_data, ea, ealen,
- ea2, ea2len, request, mod);
+ rc = MDP(exp->exp_obd, setattr)(exp, op_data, ea, ealen, request);
return rc;
}
@@ -1561,7 +1407,7 @@ static inline int md_set_lock_data(struct obd_export *exp,
static inline int md_cancel_unused(struct obd_export *exp,
const struct lu_fid *fid,
- ldlm_policy_data_t *policy,
+ union ldlm_policy_data *policy,
enum ldlm_mode mode,
enum ldlm_cancel_flags flags,
void *opaque)
@@ -1579,7 +1425,7 @@ static inline int md_cancel_unused(struct obd_export *exp,
static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags,
const struct lu_fid *fid,
enum ldlm_type type,
- ldlm_policy_data_t *policy,
+ union ldlm_policy_data *policy,
enum ldlm_mode mode,
struct lustre_handle *lockh)
{
@@ -1589,14 +1435,12 @@ static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags,
policy, mode, lockh);
}
-static inline int md_init_ea_size(struct obd_export *exp, int easize,
- int def_asize, int cookiesize,
- int def_cookiesize)
+static inline int md_init_ea_size(struct obd_export *exp, u32 easize,
+ u32 def_asize)
{
EXP_CHECK_MD_OP(exp, init_ea_size);
EXP_MD_COUNTER_INCREMENT(exp, init_ea_size);
- return MDP(exp->exp_obd, init_ea_size)(exp, easize, def_asize,
- cookiesize, def_cookiesize);
+ return MDP(exp->exp_obd, init_ea_size)(exp, easize, def_asize);
}
static inline int md_intent_getattr_async(struct obd_export *exp,
@@ -1636,6 +1480,24 @@ static inline int md_get_fid_from_lsm(struct obd_export *exp,
return rc;
}
+/* Unpack an MD struct from disk to in-memory format.
+ * Returns +ve size of unpacked MD (0 for free), or -ve error.
+ *
+ * If *plsm != NULL and lmm == NULL then *lsm will be freed.
+ * If *plsm == NULL then it will be allocated.
+ */
+static inline int md_unpackmd(struct obd_export *exp,
+ struct lmv_stripe_md **plsm,
+ const union lmv_mds_md *lmm, size_t lmm_size)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, unpackmd);
+ EXP_MD_COUNTER_INCREMENT(exp, unpackmd);
+ rc = MDP(exp->exp_obd, unpackmd)(exp, plsm, lmm, lmm_size);
+ return rc;
+}
+
/* OBD Metadata Support */
int obd_init_caches(void);
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index b346a7f10aa4..aaedec7d793c 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -172,14 +172,14 @@ extern char obd_jobid_var[];
#define OBD_FAIL_MDS_ALL_REQUEST_NET 0x123
#define OBD_FAIL_MDS_SYNC_NET 0x124
#define OBD_FAIL_MDS_SYNC_PACK 0x125
-#define OBD_FAIL_MDS_DONE_WRITING_NET 0x126
-#define OBD_FAIL_MDS_DONE_WRITING_PACK 0x127
+/* OBD_FAIL_MDS_DONE_WRITING_NET 0x126 obsolete since 2.8.0 */
+/* OBD_FAIL_MDS_DONE_WRITING_PACK 0x127 obsolete since 2.8.0 */
#define OBD_FAIL_MDS_ALLOC_OBDO 0x128
#define OBD_FAIL_MDS_PAUSE_OPEN 0x129
#define OBD_FAIL_MDS_STATFS_LCW_SLEEP 0x12a
#define OBD_FAIL_MDS_OPEN_CREATE 0x12b
#define OBD_FAIL_MDS_OST_SETATTR 0x12c
-#define OBD_FAIL_MDS_QUOTACHECK_NET 0x12d
+/* OBD_FAIL_MDS_QUOTACHECK_NET 0x12d obsolete since 2.4 */
#define OBD_FAIL_MDS_QUOTACTL_NET 0x12e
#define OBD_FAIL_MDS_CLIENT_ADD 0x12f
#define OBD_FAIL_MDS_GETXATTR_NET 0x130
@@ -264,7 +264,7 @@ extern char obd_jobid_var[];
#define OBD_FAIL_OST_ENOSPC 0x215
#define OBD_FAIL_OST_EROFS 0x216
#define OBD_FAIL_OST_ENOENT 0x217
-#define OBD_FAIL_OST_QUOTACHECK_NET 0x218
+/* OBD_FAIL_OST_QUOTACHECK_NET 0x218 obsolete since 2.4 */
#define OBD_FAIL_OST_QUOTACTL_NET 0x219
#define OBD_FAIL_OST_CHECKSUM_RECEIVE 0x21a
#define OBD_FAIL_OST_CHECKSUM_SEND 0x21b
@@ -321,6 +321,8 @@ extern char obd_jobid_var[];
#define OBD_FAIL_LDLM_CP_CB_WAIT4 0x322
#define OBD_FAIL_LDLM_CP_CB_WAIT5 0x323
+#define OBD_FAIL_LDLM_GRANT_CHECK 0x32a
+
/* LOCKLESS IO */
#define OBD_FAIL_LDLM_SET_CONTENTION 0x385
@@ -343,6 +345,7 @@ extern char obd_jobid_var[];
#define OBD_FAIL_OSC_CP_ENQ_RACE 0x410
#define OBD_FAIL_OSC_NO_GRANT 0x411
#define OBD_FAIL_OSC_DELAY_SETTIME 0x412
+#define OBD_FAIL_OSC_DELAY_IO 0x414
#define OBD_FAIL_PTLRPC 0x500
#define OBD_FAIL_PTLRPC_ACK 0x501
@@ -373,7 +376,7 @@ extern char obd_jobid_var[];
#define OBD_FAIL_OBD_PING_NET 0x600
#define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601
#define OBD_FAIL_OBD_LOGD_NET 0x602
-#define OBD_FAIL_OBD_QC_CALLBACK_NET 0x603
+/* OBD_FAIL_OBD_QC_CALLBACK_NET 0x603 obsolete since 2.4 */
#define OBD_FAIL_OBD_DQACQ 0x604
#define OBD_FAIL_OBD_LLOG_SETUP 0x605
#define OBD_FAIL_OBD_LOG_CANCEL_REP 0x606
@@ -458,6 +461,8 @@ extern char obd_jobid_var[];
#define OBD_FAIL_LOV_INIT 0x1403
#define OBD_FAIL_GLIMPSE_DELAY 0x1404
#define OBD_FAIL_LLITE_XATTR_ENOMEM 0x1405
+#define OBD_FAIL_MAKE_LOVEA_HOLE 0x1406
+#define OBD_FAIL_LLITE_LOST_LAYOUT 0x1407
#define OBD_FAIL_GETATTR_DELAY 0x1409
#define OBD_FAIL_FID_INDIR 0x1501
diff --git a/drivers/staging/lustre/lustre/include/seq_range.h b/drivers/staging/lustre/lustre/include/seq_range.h
new file mode 100644
index 000000000000..30c4dd66d5c4
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/seq_range.h
@@ -0,0 +1,199 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2014, Intel Corporation.
+ *
+ * Copyright 2015 Cray Inc, all rights reserved.
+ * Author: Ben Evans.
+ *
+ * Define lu_seq_range associated functions
+ */
+
+#ifndef _SEQ_RANGE_H_
+#define _SEQ_RANGE_H_
+
+#include "lustre/lustre_idl.h"
+
+/**
+ * computes the sequence range type \a range
+ */
+
+static inline unsigned int fld_range_type(const struct lu_seq_range *range)
+{
+ return range->lsr_flags & LU_SEQ_RANGE_MASK;
+}
+
+/**
+ * Is this sequence range an OST? \a range
+ */
+
+static inline bool fld_range_is_ost(const struct lu_seq_range *range)
+{
+ return fld_range_type(range) == LU_SEQ_RANGE_OST;
+}
+
+/**
+ * Is this sequence range an MDT? \a range
+ */
+
+static inline bool fld_range_is_mdt(const struct lu_seq_range *range)
+{
+ return fld_range_type(range) == LU_SEQ_RANGE_MDT;
+}
+
+/**
+ * ANY range is only used when the fld client sends a fld query request,
+ * but it does not know whether the seq is an MDT or OST, so it will send the
+ * request with ANY type, which means any seq type from the lookup can be
+ * expected. /a range
+ */
+static inline unsigned int fld_range_is_any(const struct lu_seq_range *range)
+{
+ return fld_range_type(range) == LU_SEQ_RANGE_ANY;
+}
+
+/**
+ * Apply flags to range \a range \a flags
+ */
+
+static inline void fld_range_set_type(struct lu_seq_range *range,
+ unsigned int flags)
+{
+ range->lsr_flags |= flags;
+}
+
+/**
+ * Add MDT to range type \a range
+ */
+
+static inline void fld_range_set_mdt(struct lu_seq_range *range)
+{
+ fld_range_set_type(range, LU_SEQ_RANGE_MDT);
+}
+
+/**
+ * Add OST to range type \a range
+ */
+
+static inline void fld_range_set_ost(struct lu_seq_range *range)
+{
+ fld_range_set_type(range, LU_SEQ_RANGE_OST);
+}
+
+/**
+ * Add ANY to range type \a range
+ */
+
+static inline void fld_range_set_any(struct lu_seq_range *range)
+{
+ fld_range_set_type(range, LU_SEQ_RANGE_ANY);
+}
+
+/**
+ * computes width of given sequence range \a range
+ */
+
+static inline u64 lu_seq_range_space(const struct lu_seq_range *range)
+{
+ return range->lsr_end - range->lsr_start;
+}
+
+/**
+ * initialize range to zero \a range
+ */
+
+static inline void lu_seq_range_init(struct lu_seq_range *range)
+{
+ memset(range, 0, sizeof(*range));
+}
+
+/**
+ * check if given seq id \a s is within given range \a range
+ */
+
+static inline bool lu_seq_range_within(const struct lu_seq_range *range,
+ u64 seq)
+{
+ return seq >= range->lsr_start && seq < range->lsr_end;
+}
+
+/**
+ * Is the range sane? Is the end after the beginning? \a range
+ */
+
+static inline bool lu_seq_range_is_sane(const struct lu_seq_range *range)
+{
+ return range->lsr_end >= range->lsr_start;
+}
+
+/**
+ * Is the range 0? \a range
+ */
+
+static inline bool lu_seq_range_is_zero(const struct lu_seq_range *range)
+{
+ return range->lsr_start == 0 && range->lsr_end == 0;
+}
+
+/**
+ * Is the range out of space? \a range
+ */
+
+static inline bool lu_seq_range_is_exhausted(const struct lu_seq_range *range)
+{
+ return lu_seq_range_space(range) == 0;
+}
+
+/**
+ * return 0 if two ranges have the same location, nonzero if they are
+ * different \a r1 \a r2
+ */
+
+static inline int lu_seq_range_compare_loc(const struct lu_seq_range *r1,
+ const struct lu_seq_range *r2)
+{
+ return r1->lsr_index != r2->lsr_index ||
+ r1->lsr_flags != r2->lsr_flags;
+}
+
+#if !defined(__REQ_LAYOUT_USER__)
+/**
+ * byte swap range structure \a range
+ */
+
+void lustre_swab_lu_seq_range(struct lu_seq_range *range);
+#endif
+/**
+ * printf string and argument list for sequence range
+ */
+#define DRANGE "[%#16.16llx-%#16.16llx]:%x:%s"
+
+#define PRANGE(range) \
+ (range)->lsr_start, \
+ (range)->lsr_end, \
+ (range)->lsr_index, \
+ fld_range_is_mdt(range) ? "mdt" : "ost"
+
+#endif
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index ecf472e4813d..32b73ee62639 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -193,6 +193,26 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
* add the locks into grant list, for debug purpose, ..
*/
ldlm_resource_add_lock(res, &res->lr_granted, lock);
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) {
+ struct ldlm_lock *lck;
+
+ list_for_each_entry_reverse(lck, &res->lr_granted,
+ l_res_link) {
+ if (lck == lock)
+ continue;
+ if (lockmode_compat(lck->l_granted_mode,
+ lock->l_granted_mode))
+ continue;
+ if (ldlm_extent_overlap(&lck->l_req_extent,
+ &lock->l_req_extent)) {
+ CDEBUG(D_ERROR, "granting conflicting lock %p %p\n",
+ lck, lock);
+ ldlm_resource_dump(D_ERROR, res);
+ LBUG();
+ }
+ }
+ }
}
/** Remove cancelled lock from resource interval tree. */
@@ -220,8 +240,8 @@ void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
}
}
-void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy)
{
memset(lpolicy, 0, sizeof(*lpolicy));
lpolicy->l_extent.start = wpolicy->l_extent.start;
@@ -229,8 +249,8 @@ void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
lpolicy->l_extent.gid = wpolicy->l_extent.gid;
}
-void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy)
{
memset(wpolicy, 0, sizeof(*wpolicy));
wpolicy->l_extent.start = lpolicy->l_extent.start;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index 861f36f039b5..722160784f83 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -612,22 +612,8 @@ granted:
}
EXPORT_SYMBOL(ldlm_flock_completion_ast);
-void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
-{
- memset(lpolicy, 0, sizeof(*lpolicy));
- lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
- lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
- lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
- /* Compat code, old clients had no idea about owner field and
- * relied solely on pid for ownership. Introduced in LU-104, 2.1,
- * April 2011
- */
- lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
-}
-
-void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy)
{
memset(lpolicy, 0, sizeof(*lpolicy));
lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
@@ -636,8 +622,8 @@ void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
}
-void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy)
{
memset(wpolicy, 0, sizeof(*wpolicy));
wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
index 79f4e6fa193e..8e1709dc073c 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
@@ -54,15 +54,15 @@
#include "../include/lustre_lib.h"
#include "ldlm_internal.h"
-void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy)
{
memset(lpolicy, 0, sizeof(*lpolicy));
lpolicy->l_inodebits.bits = wpolicy->l_inodebits.bits;
}
-void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy)
{
memset(wpolicy, 0, sizeof(*wpolicy));
wpolicy->l_inodebits.bits = lpolicy->l_inodebits.bits;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index 5e82cfc245b2..5c02501d0560 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -39,13 +39,13 @@ extern struct list_head ldlm_srv_namespace_list;
extern struct mutex ldlm_cli_namespace_lock;
extern struct list_head ldlm_cli_active_namespace_list;
-static inline int ldlm_namespace_nr_read(ldlm_side_t client)
+static inline int ldlm_namespace_nr_read(enum ldlm_side client)
{
return client == LDLM_NAMESPACE_SERVER ?
ldlm_srv_namespace_nr : ldlm_cli_namespace_nr;
}
-static inline void ldlm_namespace_nr_inc(ldlm_side_t client)
+static inline void ldlm_namespace_nr_inc(enum ldlm_side client)
{
if (client == LDLM_NAMESPACE_SERVER)
ldlm_srv_namespace_nr++;
@@ -53,7 +53,7 @@ static inline void ldlm_namespace_nr_inc(ldlm_side_t client)
ldlm_cli_namespace_nr++;
}
-static inline void ldlm_namespace_nr_dec(ldlm_side_t client)
+static inline void ldlm_namespace_nr_dec(enum ldlm_side client)
{
if (client == LDLM_NAMESPACE_SERVER)
ldlm_srv_namespace_nr--;
@@ -61,13 +61,13 @@ static inline void ldlm_namespace_nr_dec(ldlm_side_t client)
ldlm_cli_namespace_nr--;
}
-static inline struct list_head *ldlm_namespace_list(ldlm_side_t client)
+static inline struct list_head *ldlm_namespace_list(enum ldlm_side client)
{
return client == LDLM_NAMESPACE_SERVER ?
&ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list;
}
-static inline struct mutex *ldlm_namespace_lock(ldlm_side_t client)
+static inline struct mutex *ldlm_namespace_lock(enum ldlm_side client)
{
return client == LDLM_NAMESPACE_SERVER ?
&ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
@@ -79,22 +79,23 @@ static inline int ldlm_ns_empty(struct ldlm_namespace *ns)
return atomic_read(&ns->ns_bref) == 0;
}
-void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *, ldlm_side_t);
+void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *,
+ enum ldlm_side);
void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *,
- ldlm_side_t);
-struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t);
+ enum ldlm_side);
+struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side);
/* ldlm_request.c */
/* Cancel lru flag, it indicates we cancel aged locks. */
enum {
- LDLM_CANCEL_AGED = 1 << 0, /* Cancel aged locks (non lru resize). */
- LDLM_CANCEL_PASSED = 1 << 1, /* Cancel passed number of locks. */
- LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */
- LDLM_CANCEL_LRUR = 1 << 3, /* Cancel locks from lru resize. */
- LDLM_CANCEL_NO_WAIT = 1 << 4, /* Cancel locks w/o blocking (neither
- * sending nor waiting for any rpcs)
- */
- LDLM_CANCEL_LRUR_NO_WAIT = 1 << 5, /* LRUR + NO_WAIT */
+ LDLM_LRU_FLAG_AGED = BIT(0), /* Cancel aged locks (non lru resize). */
+ LDLM_LRU_FLAG_PASSED = BIT(1), /* Cancel passed number of locks. */
+ LDLM_LRU_FLAG_SHRINK = BIT(2), /* Cancel locks from shrinker. */
+ LDLM_LRU_FLAG_LRUR = BIT(3), /* Cancel locks from lru resize. */
+ LDLM_LRU_FLAG_NO_WAIT = BIT(4), /* Cancel locks w/o blocking (neither
+ * sending nor waiting for any rpcs)
+ */
+ LDLM_LRU_FLAG_LRUR_NO_WAIT = BIT(5), /* LRUR + NO_WAIT */
};
int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
@@ -137,10 +138,10 @@ ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *,
void *data, __u32 lvb_len, enum lvb_type lvb_type);
enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **,
void *cookie, __u64 *flags);
-void ldlm_lock_addref_internal(struct ldlm_lock *, __u32 mode);
-void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, __u32 mode);
-void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode);
-void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode);
+void ldlm_lock_addref_internal(struct ldlm_lock *, enum ldlm_mode mode);
+void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode);
+void ldlm_lock_decref_internal(struct ldlm_lock *, enum ldlm_mode mode);
+void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode);
int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
enum ldlm_desc_ast_t ast_type);
int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use);
@@ -311,28 +312,25 @@ static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
return ret;
}
-typedef void (*ldlm_policy_wire_to_local_t)(const ldlm_wire_policy_data_t *,
- ldlm_policy_data_t *);
-
-typedef void (*ldlm_policy_local_to_wire_t)(const ldlm_policy_data_t *,
- ldlm_wire_policy_data_t *);
-
-void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-void ldlm_plain_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
-void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
-void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
-void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-
-void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
+typedef void (*ldlm_policy_wire_to_local_t)(const union ldlm_wire_policy_data *,
+ union ldlm_policy_data *);
+
+typedef void (*ldlm_policy_local_to_wire_t)(const union ldlm_policy_data *,
+ union ldlm_wire_policy_data *);
+
+void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy);
+void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy);
+void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy);
+void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy);
+void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy);
+void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy);
+void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy);
+void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 153e990c494e..9be01426c955 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -170,6 +170,9 @@ int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
ptlrpc_connection_put(dlmexp->exp_connection);
dlmexp->exp_connection = NULL;
}
+
+ if (dlmexp)
+ class_export_put(dlmexp);
}
list_del(&imp_conn->oic_item);
@@ -372,6 +375,25 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
} else {
cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT;
}
+
+ spin_lock_init(&cli->cl_mod_rpcs_lock);
+ spin_lock_init(&cli->cl_mod_rpcs_hist.oh_lock);
+ cli->cl_max_mod_rpcs_in_flight = 0;
+ cli->cl_mod_rpcs_in_flight = 0;
+ cli->cl_close_rpcs_in_flight = 0;
+ init_waitqueue_head(&cli->cl_mod_rpcs_waitq);
+ cli->cl_mod_tag_bitmap = NULL;
+
+ if (connect_op == MDS_CONNECT) {
+ cli->cl_max_mod_rpcs_in_flight = cli->cl_max_rpcs_in_flight - 1;
+ cli->cl_mod_tag_bitmap = kcalloc(BITS_TO_LONGS(OBD_MAX_RIF_MAX),
+ sizeof(long), GFP_NOFS);
+ if (!cli->cl_mod_tag_bitmap) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ }
+
rc = ldlm_get_ref();
if (rc) {
CERROR("ldlm_get_ref failed: %d\n", rc);
@@ -399,9 +421,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
}
cli->cl_import = imp;
- /* cli->cl_max_mds_{easize,cookiesize} updated by mdc_init_ea_size() */
+ /* cli->cl_max_mds_easize updated by mdc_init_ea_size() */
cli->cl_max_mds_easize = sizeof(struct lov_mds_md_v3);
- cli->cl_max_mds_cookiesize = sizeof(struct llog_cookie);
if (LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
if (!strcmp(lustre_cfg_string(lcfg, 3), "inactive")) {
@@ -425,8 +446,6 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
goto err_import;
}
- cli->cl_qchk_stat = CL_NOT_QUOTACHECKED;
-
return rc;
err_import:
@@ -434,12 +453,16 @@ err_import:
err_ldlm:
ldlm_put_ref();
err:
+ kfree(cli->cl_mod_tag_bitmap);
+ cli->cl_mod_tag_bitmap = NULL;
return rc;
}
EXPORT_SYMBOL(client_obd_setup);
int client_obd_cleanup(struct obd_device *obddev)
{
+ struct client_obd *cli = &obddev->u.cli;
+
ldlm_namespace_free_post(obddev->obd_namespace);
obddev->obd_namespace = NULL;
@@ -447,6 +470,10 @@ int client_obd_cleanup(struct obd_device *obddev)
LASSERT(!obddev->u.cli.cl_import);
ldlm_put_ref();
+
+ kfree(cli->cl_mod_tag_bitmap);
+ cli->cl_mod_tag_bitmap = NULL;
+
return 0;
}
EXPORT_SYMBOL(client_obd_cleanup);
@@ -461,6 +488,7 @@ int client_connect_import(const struct lu_env *env,
struct obd_import *imp = cli->cl_import;
struct obd_connect_data *ocd;
struct lustre_handle conn = { 0 };
+ bool is_mdc = false;
int rc;
*exp = NULL;
@@ -487,6 +515,10 @@ int client_connect_import(const struct lu_env *env,
ocd = &imp->imp_connect_data;
if (data) {
*ocd = *data;
+ is_mdc = !strncmp(imp->imp_obd->obd_type->typ_name,
+ LUSTRE_MDC_NAME, 3);
+ if (is_mdc)
+ data->ocd_connect_flags |= OBD_CONNECT_MULTIMODRPCS;
imp->imp_connect_flags_orig = data->ocd_connect_flags;
}
@@ -502,6 +534,11 @@ int client_connect_import(const struct lu_env *env,
ocd->ocd_connect_flags, "old %#llx, new %#llx\n",
data->ocd_connect_flags, ocd->ocd_connect_flags);
data->ocd_connect_flags = ocd->ocd_connect_flags;
+ /* clear the flag as it was not set and is not known
+ * by upper layers
+ */
+ if (is_mdc)
+ data->ocd_connect_flags &= ~OBD_CONNECT_MULTIMODRPCS;
}
ptlrpc_pinger_add_import(imp);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index 3c48b4fb96f1..a4a291acb659 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -39,6 +39,7 @@
#include "../../include/linux/libcfs/libcfs.h"
#include "../include/lustre_intent.h"
+#include "../include/lustre_swab.h"
#include "../include/obd_class.h"
#include "ldlm_internal.h"
@@ -63,17 +64,10 @@ static char *ldlm_typename[] = {
[LDLM_IBITS] = "IBT",
};
-static ldlm_policy_wire_to_local_t ldlm_policy_wire18_to_local[] = {
+static ldlm_policy_wire_to_local_t ldlm_policy_wire_to_local[] = {
[LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
[LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
- [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire18_to_local,
- [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
-};
-
-static ldlm_policy_wire_to_local_t ldlm_policy_wire21_to_local[] = {
- [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
- [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
- [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire21_to_local,
+ [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire_to_local,
[LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
};
@@ -88,8 +82,8 @@ static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = {
* Converts lock policy from local format to on the wire lock_desc format
*/
static void ldlm_convert_policy_to_wire(enum ldlm_type type,
- const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+ const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy)
{
ldlm_policy_local_to_wire_t convert;
@@ -102,23 +96,17 @@ static void ldlm_convert_policy_to_wire(enum ldlm_type type,
* Converts lock policy from on the wire lock_desc format to local format
*/
void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
- const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+ const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy)
{
ldlm_policy_wire_to_local_t convert;
- int new_client;
- /** some badness for 2.0.0 clients, but 2.0.0 isn't supported */
- new_client = (exp_connect_flags(exp) & OBD_CONNECT_FULL20) != 0;
- if (new_client)
- convert = ldlm_policy_wire21_to_local[type - LDLM_MIN_TYPE];
- else
- convert = ldlm_policy_wire18_to_local[type - LDLM_MIN_TYPE];
+ convert = ldlm_policy_wire_to_local[type - LDLM_MIN_TYPE];
convert(wpolicy, lpolicy);
}
-char *ldlm_it2str(int it)
+const char *ldlm_it2str(enum ldlm_intent_flags it)
{
switch (it) {
case IT_OPEN:
@@ -140,7 +128,7 @@ char *ldlm_it2str(int it)
case IT_LAYOUT:
return "layout";
default:
- CERROR("Unknown intent %d\n", it);
+ CERROR("Unknown intent 0x%08x\n", it);
return "UNKNOWN";
}
}
@@ -512,7 +500,6 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
return 0;
}
-EXPORT_SYMBOL(ldlm_lock_change_resource);
/** \defgroup ldlm_handles LDLM HANDLES
* Ways to get hold of locks without any addresses.
@@ -595,7 +582,6 @@ void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
&lock->l_policy_data,
&desc->l_policy_data);
}
-EXPORT_SYMBOL(ldlm_lock2desc);
/**
* Add a lock to list of conflicting locks to send AST to.
@@ -658,7 +644,7 @@ static void ldlm_add_ast_work_item(struct ldlm_lock *lock,
* r/w reference type is determined by \a mode
* Calls ldlm_lock_addref_internal.
*/
-void ldlm_lock_addref(const struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_addref(const struct lustre_handle *lockh, enum ldlm_mode mode)
{
struct ldlm_lock *lock;
@@ -676,7 +662,8 @@ EXPORT_SYMBOL(ldlm_lock_addref);
* Removes lock from LRU if it is there.
* Assumes the LDLM lock is already locked.
*/
-void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock,
+ enum ldlm_mode mode)
{
ldlm_lock_remove_from_lru(lock);
if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
@@ -700,7 +687,7 @@ void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
*
* \retval -EAGAIN lock is being canceled.
*/
-int ldlm_lock_addref_try(const struct lustre_handle *lockh, __u32 mode)
+int ldlm_lock_addref_try(const struct lustre_handle *lockh, enum ldlm_mode mode)
{
struct ldlm_lock *lock;
int result;
@@ -726,7 +713,7 @@ EXPORT_SYMBOL(ldlm_lock_addref_try);
* Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work.
* Only called for local locks.
*/
-void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_addref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
{
lock_res_and_lock(lock);
ldlm_lock_addref_internal_nolock(lock, mode);
@@ -740,7 +727,8 @@ void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
* Does NOT add lock to LRU if no r/w references left to accommodate flock locks
* that cannot be placed in LRU.
*/
-void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock,
+ enum ldlm_mode mode)
{
LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
@@ -766,7 +754,7 @@ void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
* on the namespace.
* For blocked LDLM locks if r/w count drops to zero, blocking_ast is called.
*/
-void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
{
struct ldlm_namespace *ns;
@@ -786,11 +774,16 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
}
if (!lock->l_readers && !lock->l_writers &&
- ldlm_is_cbpending(lock)) {
+ (ldlm_is_cbpending(lock) || lock->l_req_mode == LCK_GROUP)) {
/* If we received a blocked AST and this was the last reference,
* run the callback.
+ * Group locks are special:
+ * They must not go in LRU, but they are not called back
+ * like non-group locks, instead they are manually released.
+ * They have an l_writers reference which they keep until
+ * they are manually released, so we remove them when they have
+ * no more reader or writer references. - LU-6368
*/
-
LDLM_DEBUG(lock, "final decref done on cbpending lock");
LDLM_LOCK_GET(lock); /* dropped by bl thread */
@@ -832,7 +825,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
/**
* Decrease reader/writer refcount for LDLM lock with handle \a lockh
*/
-void ldlm_lock_decref(const struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_decref(const struct lustre_handle *lockh, enum ldlm_mode mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
@@ -846,10 +839,9 @@ EXPORT_SYMBOL(ldlm_lock_decref);
* Decrease reader/writer refcount for LDLM lock with handle
* \a lockh and mark it for subsequent cancellation once r/w refcount
* drops to zero instead of putting into LRU.
- *
- * Typical usage is for GROUP locks which we cannot allow to be cached.
*/
-void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh,
+ enum ldlm_mode mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
@@ -1055,88 +1047,173 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
}
/**
- * Search for a lock with given properties in a queue.
+ * Describe the overlap between two locks. itree_overlap_cb data.
+ */
+struct lock_match_data {
+ struct ldlm_lock *lmd_old;
+ struct ldlm_lock *lmd_lock;
+ enum ldlm_mode *lmd_mode;
+ union ldlm_policy_data *lmd_policy;
+ __u64 lmd_flags;
+ int lmd_unref;
+};
+
+/**
+ * Check if the given @lock meets the criteria for a match.
+ * A reference on the lock is taken if matched.
*
- * \retval a referenced lock or NULL. See the flag descriptions below, in the
- * comment above ldlm_lock_match
+ * \param lock test-against this lock
+ * \param data parameters
*/
-static struct ldlm_lock *search_queue(struct list_head *queue,
- enum ldlm_mode *mode,
- ldlm_policy_data_t *policy,
- struct ldlm_lock *old_lock,
- __u64 flags, int unref)
+static int lock_matches(struct ldlm_lock *lock, struct lock_match_data *data)
{
- struct ldlm_lock *lock;
- struct list_head *tmp;
+ union ldlm_policy_data *lpol = &lock->l_policy_data;
+ enum ldlm_mode match;
- list_for_each(tmp, queue) {
- enum ldlm_mode match;
+ if (lock == data->lmd_old)
+ return INTERVAL_ITER_STOP;
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
-
- if (lock == old_lock)
- break;
+ /*
+ * Check if this lock can be matched.
+ * Used by LU-2919(exclusive open) for open lease lock
+ */
+ if (ldlm_is_excl(lock))
+ return INTERVAL_ITER_CONT;
- /* Check if this lock can be matched.
- * Used by LU-2919(exclusive open) for open lease lock
- */
- if (ldlm_is_excl(lock))
- continue;
+ /*
+ * llite sometimes wants to match locks that will be
+ * canceled when their users drop, but we allow it to match
+ * if it passes in CBPENDING and the lock still has users.
+ * this is generally only going to be used by children
+ * whose parents already hold a lock so forward progress
+ * can still happen.
+ */
+ if (ldlm_is_cbpending(lock) &&
+ !(data->lmd_flags & LDLM_FL_CBPENDING))
+ return INTERVAL_ITER_CONT;
- /* llite sometimes wants to match locks that will be
- * canceled when their users drop, but we allow it to match
- * if it passes in CBPENDING and the lock still has users.
- * this is generally only going to be used by children
- * whose parents already hold a lock so forward progress
- * can still happen.
- */
- if (ldlm_is_cbpending(lock) && !(flags & LDLM_FL_CBPENDING))
- continue;
- if (!unref && ldlm_is_cbpending(lock) &&
- lock->l_readers == 0 && lock->l_writers == 0)
- continue;
+ if (!data->lmd_unref && ldlm_is_cbpending(lock) &&
+ !lock->l_readers && !lock->l_writers)
+ return INTERVAL_ITER_CONT;
- if (!(lock->l_req_mode & *mode))
- continue;
- match = lock->l_req_mode;
+ if (!(lock->l_req_mode & *data->lmd_mode))
+ return INTERVAL_ITER_CONT;
+ match = lock->l_req_mode;
- if (lock->l_resource->lr_type == LDLM_EXTENT &&
- (lock->l_policy_data.l_extent.start >
- policy->l_extent.start ||
- lock->l_policy_data.l_extent.end < policy->l_extent.end))
- continue;
+ switch (lock->l_resource->lr_type) {
+ case LDLM_EXTENT:
+ if (lpol->l_extent.start > data->lmd_policy->l_extent.start ||
+ lpol->l_extent.end < data->lmd_policy->l_extent.end)
+ return INTERVAL_ITER_CONT;
if (unlikely(match == LCK_GROUP) &&
- lock->l_resource->lr_type == LDLM_EXTENT &&
- policy->l_extent.gid != LDLM_GID_ANY &&
- lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
- continue;
-
- /* We match if we have existing lock with same or wider set
+ data->lmd_policy->l_extent.gid != LDLM_GID_ANY &&
+ lpol->l_extent.gid != data->lmd_policy->l_extent.gid)
+ return INTERVAL_ITER_CONT;
+ break;
+ case LDLM_IBITS:
+ /*
+ * We match if we have existing lock with same or wider set
* of bits.
*/
- if (lock->l_resource->lr_type == LDLM_IBITS &&
- ((lock->l_policy_data.l_inodebits.bits &
- policy->l_inodebits.bits) !=
- policy->l_inodebits.bits))
- continue;
+ if ((lpol->l_inodebits.bits &
+ data->lmd_policy->l_inodebits.bits) !=
+ data->lmd_policy->l_inodebits.bits)
+ return INTERVAL_ITER_CONT;
+ break;
+ default:
+ break;
+ }
+ /*
+ * We match if we have existing lock with same or wider set
+ * of bits.
+ */
+ if (!data->lmd_unref && LDLM_HAVE_MASK(lock, GONE))
+ return INTERVAL_ITER_CONT;
+
+ if ((data->lmd_flags & LDLM_FL_LOCAL_ONLY) &&
+ !ldlm_is_local(lock))
+ return INTERVAL_ITER_CONT;
+
+ if (data->lmd_flags & LDLM_FL_TEST_LOCK) {
+ LDLM_LOCK_GET(lock);
+ ldlm_lock_touch_in_lru(lock);
+ } else {
+ ldlm_lock_addref_internal_nolock(lock, match);
+ }
+
+ *data->lmd_mode = match;
+ data->lmd_lock = lock;
+
+ return INTERVAL_ITER_STOP;
+}
+
+static unsigned int itree_overlap_cb(struct interval_node *in, void *args)
+{
+ struct ldlm_interval *node = to_ldlm_interval(in);
+ struct lock_match_data *data = args;
+ struct ldlm_lock *lock;
+ int rc;
+
+ list_for_each_entry(lock, &node->li_group, l_sl_policy) {
+ rc = lock_matches(lock, data);
+ if (rc == INTERVAL_ITER_STOP)
+ return INTERVAL_ITER_STOP;
+ }
+ return INTERVAL_ITER_CONT;
+}
+
+/**
+ * Search for a lock with given parameters in interval trees.
+ *
+ * \param res search for a lock in this resource
+ * \param data parameters
+ *
+ * \retval a referenced lock or NULL.
+ */
+static struct ldlm_lock *search_itree(struct ldlm_resource *res,
+ struct lock_match_data *data)
+{
+ struct interval_node_extent ext = {
+ .start = data->lmd_policy->l_extent.start,
+ .end = data->lmd_policy->l_extent.end
+ };
+ int idx;
+
+ for (idx = 0; idx < LCK_MODE_NUM; idx++) {
+ struct ldlm_interval_tree *tree = &res->lr_itree[idx];
- if (!unref && LDLM_HAVE_MASK(lock, GONE))
+ if (!tree->lit_root)
continue;
- if ((flags & LDLM_FL_LOCAL_ONLY) && !ldlm_is_local(lock))
+ if (!(tree->lit_mode & *data->lmd_mode))
continue;
- if (flags & LDLM_FL_TEST_LOCK) {
- LDLM_LOCK_GET(lock);
- ldlm_lock_touch_in_lru(lock);
- } else {
- ldlm_lock_addref_internal_nolock(lock, match);
- }
- *mode = match;
- return lock;
+ interval_search(tree->lit_root, &ext,
+ itree_overlap_cb, data);
}
+ return data->lmd_lock;
+}
+/**
+ * Search for a lock with given properties in a queue.
+ *
+ * \param queue search for a lock in this queue
+ * \param data parameters
+ *
+ * \retval a referenced lock or NULL.
+ */
+static struct ldlm_lock *search_queue(struct list_head *queue,
+ struct lock_match_data *data)
+{
+ struct ldlm_lock *lock;
+ int rc;
+
+ list_for_each_entry(lock, queue, l_res_link) {
+ rc = lock_matches(lock, data);
+ if (rc == INTERVAL_ITER_STOP)
+ return data->lmd_lock;
+ }
return NULL;
}
@@ -1147,7 +1224,6 @@ void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
wake_up_all(&lock->l_waitq);
}
}
-EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
/**
* Mark lock as "matchable" by OST.
@@ -1208,35 +1284,45 @@ EXPORT_SYMBOL(ldlm_lock_allow_match);
enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
const struct ldlm_res_id *res_id,
enum ldlm_type type,
- ldlm_policy_data_t *policy,
+ union ldlm_policy_data *policy,
enum ldlm_mode mode,
struct lustre_handle *lockh, int unref)
{
+ struct lock_match_data data = {
+ .lmd_old = NULL,
+ .lmd_lock = NULL,
+ .lmd_mode = &mode,
+ .lmd_policy = policy,
+ .lmd_flags = flags,
+ .lmd_unref = unref,
+ };
struct ldlm_resource *res;
- struct ldlm_lock *lock, *old_lock = NULL;
+ struct ldlm_lock *lock;
int rc = 0;
if (!ns) {
- old_lock = ldlm_handle2lock(lockh);
- LASSERT(old_lock);
+ data.lmd_old = ldlm_handle2lock(lockh);
+ LASSERT(data.lmd_old);
- ns = ldlm_lock_to_ns(old_lock);
- res_id = &old_lock->l_resource->lr_name;
- type = old_lock->l_resource->lr_type;
- mode = old_lock->l_req_mode;
+ ns = ldlm_lock_to_ns(data.lmd_old);
+ res_id = &data.lmd_old->l_resource->lr_name;
+ type = data.lmd_old->l_resource->lr_type;
+ *data.lmd_mode = data.lmd_old->l_req_mode;
}
res = ldlm_resource_get(ns, NULL, res_id, type, 0);
if (IS_ERR(res)) {
- LASSERT(!old_lock);
+ LASSERT(!data.lmd_old);
return 0;
}
LDLM_RESOURCE_ADDREF(res);
lock_res(res);
- lock = search_queue(&res->lr_granted, &mode, policy, old_lock,
- flags, unref);
+ if (res->lr_type == LDLM_EXTENT)
+ lock = search_itree(res, &data);
+ else
+ lock = search_queue(&res->lr_granted, &data);
if (lock) {
rc = 1;
goto out;
@@ -1245,14 +1331,12 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
rc = 0;
goto out;
}
- lock = search_queue(&res->lr_waiting, &mode, policy, old_lock,
- flags, unref);
+ lock = search_queue(&res->lr_waiting, &data);
if (lock) {
rc = 1;
goto out;
}
-
- out:
+out:
unlock_res(res);
LDLM_RESOURCE_DELREF(res);
ldlm_resource_putref(res);
@@ -1324,8 +1408,8 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
(type == LDLM_PLAIN || type == LDLM_IBITS) ?
res_id->name[3] : policy->l_extent.end);
}
- if (old_lock)
- LDLM_LOCK_PUT(old_lock);
+ if (data.lmd_old)
+ LDLM_LOCK_PUT(data.lmd_old);
return rc ? mode : 0;
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index fde697ebaadc..12647af5a336 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -511,23 +511,6 @@ static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
CWARN("Send reply failed, maybe cause bug 21636.\n");
}
-static int ldlm_handle_qc_callback(struct ptlrpc_request *req)
-{
- struct obd_quotactl *oqctl;
- struct client_obd *cli = &req->rq_export->exp_obd->u.cli;
-
- oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- if (!oqctl) {
- CERROR("Can't unpack obd_quotactl\n");
- return -EPROTO;
- }
-
- oqctl->qc_stat = ptlrpc_status_ntoh(oqctl->qc_stat);
-
- cli->cl_qchk_stat = oqctl->qc_stat;
- return 0;
-}
-
/* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
static int ldlm_callback_handler(struct ptlrpc_request *req)
{
@@ -577,13 +560,6 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
rc = ldlm_handle_setinfo(req);
ldlm_callback_reply(req, rc);
return 0;
- case OBD_QC_CALLBACK:
- req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
- return 0;
- rc = ldlm_handle_qc_callback(req);
- ldlm_callback_reply(req, rc);
- return 0;
default:
CERROR("unknown opcode %u\n",
lustre_msg_get_opc(req->rq_reqmsg));
@@ -858,7 +834,6 @@ int ldlm_get_ref(void)
return rc;
}
-EXPORT_SYMBOL(ldlm_get_ref);
void ldlm_put_ref(void)
{
@@ -875,7 +850,6 @@ void ldlm_put_ref(void)
}
mutex_unlock(&ldlm_ref_mutex);
}
-EXPORT_SYMBOL(ldlm_put_ref);
static ssize_t cancel_unused_locks_before_replay_show(struct kobject *kobj,
struct attribute *attr,
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
index 0aed39c46154..862ea0a1dc97 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
@@ -54,14 +54,14 @@
#include "ldlm_internal.h"
-void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy)
{
/* No policy for plain locks */
}
-void ldlm_plain_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy)
{
/* No policy for plain locks */
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 9a1136e32dfc..8dfb3c8e6b7a 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -293,7 +293,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
* take into account pl->pl_recalc_time here.
*/
ret = ldlm_cancel_lru(container_of(pl, struct ldlm_namespace, ns_pool),
- 0, LCF_ASYNC, LDLM_CANCEL_LRUR);
+ 0, LCF_ASYNC, LDLM_LRU_FLAG_LRUR);
out:
spin_lock(&pl->pl_lock);
@@ -339,7 +339,7 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
if (nr == 0)
return (unused / 100) * sysctl_vfs_cache_pressure;
else
- return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
+ return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_LRU_FLAG_SHRINK);
}
static const struct ldlm_pool_ops ldlm_cli_pool_ops = {
@@ -356,10 +356,10 @@ static int ldlm_pool_recalc(struct ldlm_pool *pl)
u32 recalc_interval_sec;
int count;
- recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
+ recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
if (recalc_interval_sec > 0) {
spin_lock(&pl->pl_lock);
- recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
+ recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
if (recalc_interval_sec > 0) {
/*
@@ -382,7 +382,7 @@ static int ldlm_pool_recalc(struct ldlm_pool *pl)
count);
}
- recalc_interval_sec = pl->pl_recalc_time - ktime_get_seconds() +
+ recalc_interval_sec = pl->pl_recalc_time - ktime_get_real_seconds() +
pl->pl_recalc_period;
if (recalc_interval_sec <= 0) {
/* DEBUG: should be re-removed after LU-4536 is fixed */
@@ -651,13 +651,13 @@ static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl)
}
int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
- int idx, ldlm_side_t client)
+ int idx, enum ldlm_side client)
{
int rc;
spin_lock_init(&pl->pl_lock);
atomic_set(&pl->pl_granted, 0);
- pl->pl_recalc_time = ktime_get_seconds();
+ pl->pl_recalc_time = ktime_get_real_seconds();
atomic_set(&pl->pl_lock_volume_factor, 1);
atomic_set(&pl->pl_grant_rate, 0);
@@ -684,7 +684,6 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
return rc;
}
-EXPORT_SYMBOL(ldlm_pool_init);
void ldlm_pool_fini(struct ldlm_pool *pl)
{
@@ -698,7 +697,6 @@ void ldlm_pool_fini(struct ldlm_pool *pl)
*/
POISON(pl, 0x5a, sizeof(*pl));
}
-EXPORT_SYMBOL(ldlm_pool_fini);
/**
* Add new taken ldlm lock \a lock into pool \a pl accounting.
@@ -724,7 +722,6 @@ void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
* with too long call paths.
*/
}
-EXPORT_SYMBOL(ldlm_pool_add);
/**
* Remove ldlm lock \a lock from pool \a pl accounting.
@@ -743,7 +740,6 @@ void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
}
-EXPORT_SYMBOL(ldlm_pool_del);
/**
* Returns current \a pl SLV.
@@ -792,13 +788,12 @@ static struct completion ldlm_pools_comp;
* count locks from all namespaces (if possible). Returns number of
* cached locks.
*/
-static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
+static unsigned long ldlm_pools_count(enum ldlm_side client, gfp_t gfp_mask)
{
unsigned long total = 0;
int nr_ns;
struct ldlm_namespace *ns;
struct ldlm_namespace *ns_old = NULL; /* loop detection */
- void *cookie;
if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
return 0;
@@ -806,8 +801,6 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
- cookie = cl_env_reenter();
-
/*
* Find out how many resources we may release.
*/
@@ -816,7 +809,6 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
mutex_lock(ldlm_namespace_lock(client));
if (list_empty(ldlm_namespace_list(client))) {
mutex_unlock(ldlm_namespace_lock(client));
- cl_env_reexit(cookie);
return 0;
}
ns = ldlm_namespace_first_locked(client);
@@ -842,22 +834,19 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
ldlm_namespace_put(ns);
}
- cl_env_reexit(cookie);
return total;
}
-static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask)
+static unsigned long ldlm_pools_scan(enum ldlm_side client, int nr,
+ gfp_t gfp_mask)
{
unsigned long freed = 0;
int tmp, nr_ns;
struct ldlm_namespace *ns;
- void *cookie;
if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
return -1;
- cookie = cl_env_reenter();
-
/*
* Shrink at least ldlm_namespace_nr_read(client) namespaces.
*/
@@ -887,7 +876,6 @@ static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask)
freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
ldlm_namespace_put(ns);
}
- cl_env_reexit(cookie);
/*
* we only decrease the SLV in server pools shrinker, return
* SHRINK_STOP to kernel to avoid needless loop. LU-1128
@@ -908,7 +896,7 @@ static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
sc->gfp_mask);
}
-static int ldlm_pools_recalc(ldlm_side_t client)
+static int ldlm_pools_recalc(enum ldlm_side client)
{
struct ldlm_namespace *ns;
struct ldlm_namespace *ns_old = NULL;
@@ -1095,7 +1083,6 @@ int ldlm_pools_init(void)
return rc;
}
-EXPORT_SYMBOL(ldlm_pools_init);
void ldlm_pools_fini(void)
{
@@ -1104,4 +1091,3 @@ void ldlm_pools_fini(void)
ldlm_pools_thread_stop();
}
-EXPORT_SYMBOL(ldlm_pools_fini);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index 35ba6f14d95f..c1f8693f94a5 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -93,11 +93,7 @@ static int ldlm_expired_completion_wait(void *data)
if (!lock->l_conn_export) {
static unsigned long next_dump, last_dump;
- LCONSOLE_WARN("lock timed out (enqueued at %lld, %llds ago)\n",
- (s64)lock->l_last_activity,
- (s64)(ktime_get_real_seconds() -
- lock->l_last_activity));
- LDLM_DEBUG(lock, "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep",
+ LDLM_ERROR(lock, "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep",
(s64)lock->l_last_activity,
(s64)(ktime_get_real_seconds() -
lock->l_last_activity));
@@ -475,12 +471,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
"client-side enqueue, new policy data");
}
- if ((*flags) & LDLM_FL_AST_SENT ||
- /* Cancel extent locks as soon as possible on a liblustre client,
- * because it cannot handle asynchronous ASTs robustly (see
- * bug 7311).
- */
- (LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) {
+ if ((*flags) & LDLM_FL_AST_SENT) {
lock_res_and_lock(lock);
lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
unlock_res_and_lock(lock);
@@ -602,7 +593,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
flags = ns_connect_lru_resize(ns) ?
- LDLM_CANCEL_LRUR_NO_WAIT : LDLM_CANCEL_AGED;
+ LDLM_LRU_FLAG_LRUR_NO_WAIT : LDLM_LRU_FLAG_AGED;
to_free = !ns_connect_lru_resize(ns) &&
opc == LDLM_ENQUEUE ? 1 : 0;
@@ -657,6 +648,27 @@ int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
}
EXPORT_SYMBOL(ldlm_prep_enqueue_req);
+static struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp,
+ int lvb_len)
+{
+ struct ptlrpc_request *req;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return ERR_PTR(rc);
+ }
+
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len);
+ ptlrpc_request_set_replen(req);
+ return req;
+}
+
/**
* Client-side lock enqueue.
*
@@ -670,7 +682,7 @@ EXPORT_SYMBOL(ldlm_prep_enqueue_req);
int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
struct ldlm_enqueue_info *einfo,
const struct ldlm_res_id *res_id,
- ldlm_policy_data_t const *policy, __u64 *flags,
+ union ldlm_policy_data const *policy, __u64 *flags,
void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
struct lustre_handle *lockh, int async)
{
@@ -727,17 +739,14 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
lock->l_last_activity = ktime_get_real_seconds();
/* lock not sent to server yet */
-
if (!reqp || !*reqp) {
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_LDLM_ENQUEUE,
- LUSTRE_DLM_VERSION,
- LDLM_ENQUEUE);
- if (!req) {
+ req = ldlm_enqueue_pack(exp, lvb_len);
+ if (IS_ERR(req)) {
failed_lock_cleanup(ns, lock, einfo->ei_mode);
LDLM_LOCK_RELEASE(lock);
- return -ENOMEM;
+ return PTR_ERR(req);
}
+
req_passed_in = 0;
if (reqp)
*reqp = req;
@@ -757,24 +766,6 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
body->lock_flags = ldlm_flags_to_wire(*flags);
body->lock_handle[0] = *lockh;
- /* Continue as normal. */
- if (!req_passed_in) {
- if (lvb_len > 0)
- req_capsule_extend(&req->rq_pill,
- &RQF_LDLM_ENQUEUE_LVB);
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
- lvb_len);
- ptlrpc_request_set_replen(req);
- }
-
- /*
- * Liblustre client doesn't get extent locks, except for O_APPEND case
- * where [0, OBD_OBJECT_EOF] lock is taken, or truncate, where
- * [i_size, OBD_OBJECT_EOF] lock is taken.
- */
- LASSERT(ergo(LIBLUSTRE_CLIENT, einfo->ei_type != LDLM_EXTENT ||
- policy->l_extent.end == OBD_OBJECT_EOF));
-
if (async) {
LASSERT(reqp);
return 0;
@@ -1022,7 +1013,6 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req)
return 0;
}
-EXPORT_SYMBOL(ldlm_cli_update_pool);
/**
* Client side lock cancel.
@@ -1067,7 +1057,7 @@ int ldlm_cli_cancel(const struct lustre_handle *lockh,
ns = ldlm_lock_to_ns(lock);
flags = ns_connect_lru_resize(ns) ?
- LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
+ LDLM_LRU_FLAG_LRUR : LDLM_LRU_FLAG_AGED;
count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
LCF_BL_AST, flags);
}
@@ -1125,7 +1115,6 @@ int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
return count;
}
-EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
/**
* Cancel as many locks as possible w/o sending any RPCs (e.g. to write back
@@ -1184,6 +1173,14 @@ static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
if (count && added >= count)
return LDLM_POLICY_KEEP_LOCK;
+ /*
+ * Despite of the LV, It doesn't make sense to keep the lock which
+ * is unused for ns_max_age time.
+ */
+ if (cfs_time_after(cfs_time_current(),
+ cfs_time_add(lock->l_last_used, ns->ns_max_age)))
+ return LDLM_POLICY_CANCEL_LOCK;
+
slv = ldlm_pool_get_slv(pl);
lvf = ldlm_pool_get_lvf(pl);
la = cfs_duration_sec(cfs_time_sub(cur, lock->l_last_used));
@@ -1287,21 +1284,21 @@ typedef enum ldlm_policy_res (*ldlm_cancel_lru_policy_t)(
static ldlm_cancel_lru_policy_t
ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
{
- if (flags & LDLM_CANCEL_NO_WAIT)
+ if (flags & LDLM_LRU_FLAG_NO_WAIT)
return ldlm_cancel_no_wait_policy;
if (ns_connect_lru_resize(ns)) {
- if (flags & LDLM_CANCEL_SHRINK)
+ if (flags & LDLM_LRU_FLAG_SHRINK)
/* We kill passed number of old locks. */
return ldlm_cancel_passed_policy;
- else if (flags & LDLM_CANCEL_LRUR)
+ else if (flags & LDLM_LRU_FLAG_LRUR)
return ldlm_cancel_lrur_policy;
- else if (flags & LDLM_CANCEL_PASSED)
+ else if (flags & LDLM_LRU_FLAG_PASSED)
return ldlm_cancel_passed_policy;
- else if (flags & LDLM_CANCEL_LRUR_NO_WAIT)
+ else if (flags & LDLM_LRU_FLAG_LRUR_NO_WAIT)
return ldlm_cancel_lrur_no_wait_policy;
} else {
- if (flags & LDLM_CANCEL_AGED)
+ if (flags & LDLM_LRU_FLAG_AGED)
return ldlm_cancel_aged_policy;
}
@@ -1325,21 +1322,21 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
*
* Calling policies for enabled LRU resize:
* ----------------------------------------
- * flags & LDLM_CANCEL_LRUR - use LRU resize policy (SLV from server) to
- * cancel not more than \a count locks;
+ * flags & LDLM_LRU_FLAG_LRUR - use LRU resize policy (SLV from server) to
+ * cancel not more than \a count locks;
*
- * flags & LDLM_CANCEL_PASSED - cancel \a count number of old locks (located at
- * the beginning of LRU list);
+ * flags & LDLM_LRU_FLAG_PASSED - cancel \a count number of old locks (located at
+ * the beginning of LRU list);
*
- * flags & LDLM_CANCEL_SHRINK - cancel not more than \a count locks according to
- * memory pressure policy function;
+ * flags & LDLM_LRU_FLAG_SHRINK - cancel not more than \a count locks according to
+ * memory pressure policy function;
*
- * flags & LDLM_CANCEL_AGED - cancel \a count locks according to "aged policy".
+ * flags & LDLM_LRU_FLAG_AGED - cancel \a count locks according to "aged policy".
*
- * flags & LDLM_CANCEL_NO_WAIT - cancel as many unused locks as possible
- * (typically before replaying locks) w/o
- * sending any RPCs or waiting for any
- * outstanding RPC to complete.
+ * flags & LDLM_LRU_FLAG_NO_WAIT - cancel as many unused locks as possible
+ * (typically before replaying locks) w/o
+ * sending any RPCs or waiting for any
+ * outstanding RPC to complete.
*/
static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
struct list_head *cancels, int count, int max,
@@ -1348,7 +1345,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock, *next;
int added = 0, unused, remained;
- int no_wait = flags & (LDLM_CANCEL_NO_WAIT | LDLM_CANCEL_LRUR_NO_WAIT);
+ int no_wait = flags & (LDLM_LRU_FLAG_NO_WAIT | LDLM_LRU_FLAG_LRUR_NO_WAIT);
spin_lock(&ns->ns_lock);
unused = ns->ns_nr_unused;
@@ -1531,7 +1528,7 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
*/
int ldlm_cancel_resource_local(struct ldlm_resource *res,
struct list_head *cancels,
- ldlm_policy_data_t *policy,
+ union ldlm_policy_data *policy,
enum ldlm_mode mode, __u64 lock_flags,
enum ldlm_cancel_flags cancel_flags,
void *opaque)
@@ -1648,7 +1645,7 @@ EXPORT_SYMBOL(ldlm_cli_cancel_list);
*/
int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
- ldlm_policy_data_t *policy,
+ union ldlm_policy_data *policy,
enum ldlm_mode mode,
enum ldlm_cancel_flags flags,
void *opaque)
@@ -1723,7 +1720,7 @@ int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
opaque);
} else {
cfs_hash_for_each_nolock(ns->ns_rs_hash,
- ldlm_cli_hash_cancel_unused, &arg);
+ ldlm_cli_hash_cancel_unused, &arg, 0);
return ELDLM_OK;
}
}
@@ -1796,7 +1793,7 @@ static void ldlm_namespace_foreach(struct ldlm_namespace *ns,
};
cfs_hash_for_each_nolock(ns->ns_rs_hash,
- ldlm_res_iter_helper, &helper);
+ ldlm_res_iter_helper, &helper, 0);
}
/* non-blocking function to manipulate a lock whose cb_data is being put away.
@@ -1840,7 +1837,7 @@ static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
* bug 17614: locks being actively cancelled. Get a reference
* on a lock so that it does not disappear under us (e.g. due to cancel)
*/
- if (!(lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCELING))) {
+ if (!(lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_BL_DONE))) {
list_add(&lock->l_pending_chain, list);
LDLM_LOCK_GET(lock);
}
@@ -1909,7 +1906,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
int flags;
/* Bug 11974: Do not replay a lock which is actively being canceled */
- if (ldlm_is_canceling(lock)) {
+ if (ldlm_is_bl_done(lock)) {
LDLM_DEBUG(lock, "Not replaying canceled lock:");
return 0;
}
@@ -2003,11 +2000,11 @@ static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
ldlm_ns_name(ns), ns->ns_nr_unused);
/* We don't need to care whether or not LRU resize is enabled
- * because the LDLM_CANCEL_NO_WAIT policy doesn't use the
+ * because the LDLM_LRU_FLAG_NO_WAIT policy doesn't use the
* count parameter
*/
canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
- LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
+ LCF_LOCAL, LDLM_LRU_FLAG_NO_WAIT);
CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
canceled, ldlm_ns_name(ns));
@@ -2048,4 +2045,3 @@ int ldlm_replay_locks(struct obd_import *imp)
return rc;
}
-EXPORT_SYMBOL(ldlm_replay_locks);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index a09c25aea698..b22f5bae7201 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -226,7 +226,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
/* Try to cancel all @ns_nr_unused locks. */
canceled = ldlm_cancel_lru(ns, unused, 0,
- LDLM_CANCEL_PASSED);
+ LDLM_LRU_FLAG_PASSED);
if (canceled < unused) {
CDEBUG(D_DLMTRACE,
"not all requested locks are canceled, requested: %d, canceled: %d\n",
@@ -237,7 +237,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
} else {
tmp = ns->ns_max_unused;
ns->ns_max_unused = 0;
- ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED);
+ ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED);
ns->ns_max_unused = tmp;
}
return count;
@@ -262,7 +262,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
"changing namespace %s unused locks from %u to %u\n",
ldlm_ns_name(ns), ns->ns_nr_unused,
(unsigned int)tmp);
- ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED);
+ ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
if (!lru_resize) {
CDEBUG(D_DLMTRACE,
@@ -276,7 +276,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
ldlm_ns_name(ns), ns->ns_max_unused,
(unsigned int)tmp);
ns->ns_max_unused = (unsigned int)tmp;
- ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED);
+ ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
/* Make sure that LRU resize was originally supported before
* turning it on here.
@@ -445,8 +445,8 @@ static struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
return res;
}
-static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
- const void *key, unsigned mask)
+static unsigned int ldlm_res_hop_hash(struct cfs_hash *hs,
+ const void *key, unsigned int mask)
{
const struct ldlm_res_id *id = key;
unsigned int val = 0;
@@ -457,8 +457,8 @@ static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
return val & mask;
}
-static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
- const void *key, unsigned mask)
+static unsigned int ldlm_res_hop_fid_hash(struct cfs_hash *hs,
+ const void *key, unsigned int mask)
{
const struct ldlm_res_id *id = key;
struct lu_fid fid;
@@ -612,7 +612,7 @@ static struct ldlm_ns_hash_def ldlm_ns_hash_defs[] = {
/** Register \a ns in the list of namespaces */
static void ldlm_namespace_register(struct ldlm_namespace *ns,
- ldlm_side_t client)
+ enum ldlm_side client)
{
mutex_lock(ldlm_namespace_lock(client));
LASSERT(list_empty(&ns->ns_list_chain));
@@ -625,7 +625,7 @@ static void ldlm_namespace_register(struct ldlm_namespace *ns,
* Create and initialize new empty namespace.
*/
struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
- ldlm_side_t client,
+ enum ldlm_side client,
enum ldlm_appetite apt,
enum ldlm_ns_type ns_type)
{
@@ -855,8 +855,10 @@ int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
return ELDLM_OK;
}
- cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean, &flags);
- cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain, NULL);
+ cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean,
+ &flags, 0);
+ cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain,
+ NULL, 0);
return ELDLM_OK;
}
EXPORT_SYMBOL(ldlm_namespace_cleanup);
@@ -952,7 +954,7 @@ void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
/** Unregister \a ns from the list of namespaces. */
static void ldlm_namespace_unregister(struct ldlm_namespace *ns,
- ldlm_side_t client)
+ enum ldlm_side client)
{
mutex_lock(ldlm_namespace_lock(client));
LASSERT(!list_empty(&ns->ns_list_chain));
@@ -999,7 +1001,6 @@ void ldlm_namespace_get(struct ldlm_namespace *ns)
{
atomic_inc(&ns->ns_bref);
}
-EXPORT_SYMBOL(ldlm_namespace_get);
/* This is only for callers that care about refcount */
static int ldlm_namespace_get_return(struct ldlm_namespace *ns)
@@ -1014,11 +1015,10 @@ void ldlm_namespace_put(struct ldlm_namespace *ns)
spin_unlock(&ns->ns_lock);
}
}
-EXPORT_SYMBOL(ldlm_namespace_put);
/** Should be called with ldlm_namespace_lock(client) taken. */
void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
- ldlm_side_t client)
+ enum ldlm_side client)
{
LASSERT(!list_empty(&ns->ns_list_chain));
LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
@@ -1027,7 +1027,7 @@ void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
/** Should be called with ldlm_namespace_lock(client) taken. */
void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
- ldlm_side_t client)
+ enum ldlm_side client)
{
LASSERT(!list_empty(&ns->ns_list_chain));
LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
@@ -1035,7 +1035,7 @@ void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
}
/** Should be called with ldlm_namespace_lock(client) taken. */
-struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
+struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client)
{
LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
LASSERT(!list_empty(ldlm_namespace_list(client)));
@@ -1305,7 +1305,7 @@ void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
* Print information about all locks in all namespaces on this node to debug
* log.
*/
-void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
+void ldlm_dump_all_namespaces(enum ldlm_side client, int level)
{
struct list_head *tmp;
@@ -1323,7 +1323,6 @@ void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
mutex_unlock(ldlm_namespace_lock(client));
}
-EXPORT_SYMBOL(ldlm_dump_all_namespaces);
static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *arg)
@@ -1355,12 +1354,11 @@ void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
cfs_hash_for_each_nolock(ns->ns_rs_hash,
ldlm_res_hash_dump,
- (void *)(unsigned long)level);
+ (void *)(unsigned long)level, 0);
spin_lock(&ns->ns_lock);
ns->ns_next_dump = cfs_time_shift(10);
spin_unlock(&ns->ns_lock);
}
-EXPORT_SYMBOL(ldlm_namespace_dump);
/**
* Print information about all locks in this resource to debug log.
diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile
index 1ac0940bd8df..322d4fa63f5d 100644
--- a/drivers/staging/lustre/lustre/llite/Makefile
+++ b/drivers/staging/lustre/lustre/llite/Makefile
@@ -1,7 +1,7 @@
obj-$(CONFIG_LUSTRE_FS) += lustre.o
-lustre-y := dcache.o dir.o file.o llite_close.o llite_lib.o llite_nfs.o \
- rw.o namei.o symlink.o llite_mmap.o range_lock.o \
- xattr.o xattr_cache.o rw26.o super25.o statahead.o \
- glimpse.o lcommon_cl.o lcommon_misc.o \
- vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o vvp_req.o \
+lustre-y := dcache.o dir.o file.o llite_lib.o llite_nfs.o \
+ rw.o rw26.o namei.o symlink.o llite_mmap.o range_lock.o \
+ xattr.o xattr_cache.o xattr_security.o \
+ super25.o statahead.o glimpse.o lcommon_cl.o lcommon_misc.o \
+ vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o \
lproc_llite.o
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 7f32a539d260..ea5d247a3f70 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -51,6 +51,8 @@
#include "../include/lustre_dlm.h"
#include "../include/lustre_fid.h"
#include "../include/lustre_kernelcomm.h"
+#include "../include/lustre_swab.h"
+
#include "llite_internal.h"
/*
@@ -410,6 +412,8 @@ static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump,
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
struct ll_sb_info *sbi = ll_i2sbi(parent);
+ struct inode *inode = NULL;
+ struct dentry dentry;
int err;
if (unlikely(lump->lum_magic != LMV_USER_MAGIC))
@@ -419,6 +423,10 @@ static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump,
PFID(ll_inode2fid(parent)), parent, dirname,
(int)lump->lum_stripe_offset, lump->lum_stripe_count);
+ if (lump->lum_stripe_count > 1 &&
+ !(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_DIR_STRIPE))
+ return -EINVAL;
+
if (lump->lum_magic != cpu_to_le32(LMV_USER_MAGIC))
lustre_swab_lmv_user_md(lump);
@@ -439,8 +447,17 @@ static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump,
from_kgid(&init_user_ns, current_fsgid()),
cfs_curproc_cap_pack(), 0, &request);
ll_finish_md_op_data(op_data);
+
+ err = ll_prep_inode(&inode, request, parent->i_sb, NULL);
if (err)
goto err_exit;
+
+ memset(&dentry, 0, sizeof(dentry));
+ dentry.d_inode = inode;
+
+ err = ll_init_security(&dentry, inode, parent);
+ iput(inode);
+
err_exit:
ptlrpc_req_finished(request);
return err;
@@ -501,8 +518,7 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
return PTR_ERR(op_data);
/* swabbing is done in lov_setstripe() on server side */
- rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size,
- NULL, 0, &req, NULL);
+ rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size, &req);
ll_finish_md_op_data(op_data);
ptlrpc_req_finished(req);
if (rc) {
@@ -682,7 +698,7 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct hsm_progress_kernel hpk;
- int rc;
+ int rc2, rc = 0;
/* Forge a hsm_progress based on data from copy. */
hpk.hpk_fid = copy->hc_hai.hai_fid;
@@ -732,10 +748,10 @@ progress:
/* On error, the request should be considered as completed */
if (hpk.hpk_errval > 0)
hpk.hpk_flags |= HP_FLAG_COMPLETED;
- rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
- &hpk, NULL);
+ rc2 = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
+ &hpk, NULL);
- return rc;
+ return rc ? rc : rc2;
}
/**
@@ -757,7 +773,7 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct hsm_progress_kernel hpk;
- int rc;
+ int rc2, rc = 0;
/* If you modify the logic here, also check llapi_hsm_copy_end(). */
/* Take care: copy->hc_hai.hai_action, len, gid and data are not
@@ -823,18 +839,18 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
* when the file will not be modified for some tunable
* time
*/
- /* we do not notify caller */
hpk.hpk_flags &= ~HP_FLAG_RETRY;
+ rc = -EBUSY;
/* hpk_errval must be >= 0 */
- hpk.hpk_errval = EBUSY;
+ hpk.hpk_errval = -rc;
}
}
progress:
- rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
- &hpk, NULL);
+ rc2 = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
+ &hpk, NULL);
- return rc;
+ return rc ? rc : rc2;
}
static int copy_and_ioctl(int cmd, struct obd_export *exp,
@@ -862,10 +878,6 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
int rc = 0;
switch (cmd) {
- case LUSTRE_Q_INVALIDATE:
- case LUSTRE_Q_FINVALIDATE:
- case Q_QUOTAON:
- case Q_QUOTAOFF:
case Q_SETQUOTA:
case Q_SETINFO:
if (!capable(CFS_CAP_SYS_ADMIN))
@@ -930,10 +942,6 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
QCTL_COPY(oqctl, qctl);
rc = obd_quotactl(sbi->ll_md_exp, oqctl);
if (rc) {
- if (rc != -EALREADY && cmd == Q_QUOTAON) {
- oqctl->qc_cmd = Q_QUOTAOFF;
- obd_quotactl(sbi->ll_md_exp, oqctl);
- }
kfree(oqctl);
return rc;
}
@@ -1077,7 +1085,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
goto out_free;
}
- rc = ll_get_fid_by_name(inode, filename, namelen, NULL);
+ rc = ll_get_fid_by_name(inode, filename, namelen, NULL, NULL);
if (rc < 0) {
CERROR("%s: lookup %.*s failed: rc = %d\n",
ll_get_fsname(inode->i_sb, NULL, 0), namelen,
@@ -1189,6 +1197,7 @@ lmv_out_free:
struct lmv_user_md *tmp = NULL;
union lmv_mds_md *lmm = NULL;
u64 valid = 0;
+ int max_stripe_count;
int stripe_count;
int mdt_index;
int lum_size;
@@ -1200,6 +1209,7 @@ lmv_out_free:
if (copy_from_user(&lum, ulmv, sizeof(*ulmv)))
return -EFAULT;
+ max_stripe_count = lum.lum_stripe_count;
/*
* lum_magic will indicate which stripe the ioctl will like
* to get, LMV_MAGIC_V1 is for normal LMV stripe, LMV_USER_MAGIC
@@ -1219,9 +1229,6 @@ lmv_out_free:
/* Get default LMV EA */
if (lum.lum_magic == LMV_USER_MAGIC) {
- if (rc)
- goto finish_req;
-
if (lmmsize > sizeof(*ulmv)) {
rc = -EINVAL;
goto finish_req;
@@ -1234,6 +1241,16 @@ lmv_out_free:
}
stripe_count = lmv_mds_md_stripe_count_get(lmm);
+ if (max_stripe_count < stripe_count) {
+ lum.lum_stripe_count = stripe_count;
+ if (copy_to_user(ulmv, &lum, sizeof(lum))) {
+ rc = -EFAULT;
+ goto finish_req;
+ }
+ rc = -E2BIG;
+ goto finish_req;
+ }
+
lum_size = lmv_user_md_size(stripe_count, LMV_MAGIC_V1);
tmp = kzalloc(lum_size, GFP_NOFS);
if (!tmp) {
@@ -1370,134 +1387,6 @@ out_req:
ll_putname(filename);
return rc;
}
- case IOC_LOV_GETINFO: {
- struct lov_user_mds_data __user *lumd;
- struct lov_stripe_md *lsm;
- struct lov_user_md __user *lum;
- struct lov_mds_md *lmm;
- int lmmsize;
- lstat_t st;
-
- lumd = (struct lov_user_mds_data __user *)arg;
- lum = &lumd->lmd_lmm;
-
- rc = ll_get_max_mdsize(sbi, &lmmsize);
- if (rc)
- return rc;
-
- lmm = libcfs_kvzalloc(lmmsize, GFP_NOFS);
- if (!lmm)
- return -ENOMEM;
- if (copy_from_user(lmm, lum, lmmsize)) {
- rc = -EFAULT;
- goto free_lmm;
- }
-
- switch (lmm->lmm_magic) {
- case LOV_USER_MAGIC_V1:
- if (cpu_to_le32(LOV_USER_MAGIC_V1) == LOV_USER_MAGIC_V1)
- break;
- /* swab objects first so that stripes num will be sane */
- lustre_swab_lov_user_md_objects(
- ((struct lov_user_md_v1 *)lmm)->lmm_objects,
- ((struct lov_user_md_v1 *)lmm)->lmm_stripe_count);
- lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
- break;
- case LOV_USER_MAGIC_V3:
- if (cpu_to_le32(LOV_USER_MAGIC_V3) == LOV_USER_MAGIC_V3)
- break;
- /* swab objects first so that stripes num will be sane */
- lustre_swab_lov_user_md_objects(
- ((struct lov_user_md_v3 *)lmm)->lmm_objects,
- ((struct lov_user_md_v3 *)lmm)->lmm_stripe_count);
- lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
- break;
- default:
- rc = -EINVAL;
- goto free_lmm;
- }
-
- rc = obd_unpackmd(sbi->ll_dt_exp, &lsm, lmm, lmmsize);
- if (rc < 0) {
- rc = -ENOMEM;
- goto free_lmm;
- }
-
- /* Perform glimpse_size operation. */
- memset(&st, 0, sizeof(st));
-
- rc = ll_glimpse_ioctl(sbi, lsm, &st);
- if (rc)
- goto free_lsm;
-
- if (copy_to_user(&lumd->lmd_st, &st, sizeof(st))) {
- rc = -EFAULT;
- goto free_lsm;
- }
-
-free_lsm:
- obd_free_memmd(sbi->ll_dt_exp, &lsm);
-free_lmm:
- kvfree(lmm);
- return rc;
- }
- case OBD_IOC_QUOTACHECK: {
- struct obd_quotactl *oqctl;
- int error = 0;
-
- if (!capable(CFS_CAP_SYS_ADMIN))
- return -EPERM;
-
- oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
- if (!oqctl)
- return -ENOMEM;
- oqctl->qc_type = arg;
- rc = obd_quotacheck(sbi->ll_md_exp, oqctl);
- if (rc < 0) {
- CDEBUG(D_INFO, "md_quotacheck failed: rc %d\n", rc);
- error = rc;
- }
-
- rc = obd_quotacheck(sbi->ll_dt_exp, oqctl);
- if (rc < 0)
- CDEBUG(D_INFO, "obd_quotacheck failed: rc %d\n", rc);
-
- kfree(oqctl);
- return error ?: rc;
- }
- case OBD_IOC_POLL_QUOTACHECK: {
- struct if_quotacheck *check;
-
- if (!capable(CFS_CAP_SYS_ADMIN))
- return -EPERM;
-
- check = kzalloc(sizeof(*check), GFP_NOFS);
- if (!check)
- return -ENOMEM;
-
- rc = obd_iocontrol(cmd, sbi->ll_md_exp, 0, (void *)check,
- NULL);
- if (rc) {
- CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc);
- if (copy_to_user((void __user *)arg, check,
- sizeof(*check)))
- CDEBUG(D_QUOTA, "copy_to_user failed\n");
- goto out_poll;
- }
-
- rc = obd_iocontrol(cmd, sbi->ll_dt_exp, 0, (void *)check,
- NULL);
- if (rc) {
- CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc);
- if (copy_to_user((void __user *)arg, check,
- sizeof(*check)))
- CDEBUG(D_QUOTA, "copy_to_user failed\n");
- goto out_poll;
- }
-out_poll:
- kfree(check);
- return rc;
- }
case OBD_IOC_QUOTACTL: {
struct if_quotactl *qctl;
@@ -1536,7 +1425,7 @@ out_quotactl:
exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp;
vallen = sizeof(count);
rc = obd_get_info(NULL, exp, sizeof(KEY_TGT_COUNT),
- KEY_TGT_COUNT, &vallen, &count, NULL);
+ KEY_TGT_COUNT, &vallen, &count);
if (rc) {
CERROR("get target count failed: %d\n", rc);
return rc;
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index e1d784bae064..f634c11216e6 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -44,6 +44,7 @@
#include <linux/mount.h>
#include "../include/lustre/ll_fiemap.h"
#include "../include/lustre/lustre_ioctl.h"
+#include "../include/lustre_swab.h"
#include "../include/cl_object.h"
#include "llite_internal.h"
@@ -75,60 +76,56 @@ static void ll_file_data_put(struct ll_file_data *fd)
kmem_cache_free(ll_file_data_slab, fd);
}
-void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
- struct lustre_handle *fh)
+/**
+ * Packs all the attributes into @op_data for the CLOSE rpc.
+ */
+static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
+ struct obd_client_handle *och)
{
- op_data->op_fid1 = ll_i2info(inode)->lli_fid;
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ ll_prep_md_op_data(op_data, inode, NULL, NULL,
+ 0, 0, LUSTRE_OPC_ANY, NULL);
+
op_data->op_attr.ia_mode = inode->i_mode;
op_data->op_attr.ia_atime = inode->i_atime;
op_data->op_attr.ia_mtime = inode->i_mtime;
op_data->op_attr.ia_ctime = inode->i_ctime;
op_data->op_attr.ia_size = i_size_read(inode);
+ op_data->op_attr.ia_valid |= ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
+ ATTR_MTIME | ATTR_MTIME_SET |
+ ATTR_CTIME | ATTR_CTIME_SET;
op_data->op_attr_blocks = inode->i_blocks;
op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags);
- op_data->op_ioepoch = ll_i2info(inode)->lli_ioepoch;
- if (fh)
- op_data->op_handle = *fh;
+ op_data->op_handle = och->och_fh;
- if (ll_i2info(inode)->lli_flags & LLIF_DATA_MODIFIED)
+ /*
+ * For HSM: if inode data has been modified, pack it so that
+ * MDT can set data dirty flag in the archive.
+ */
+ if (och->och_flags & FMODE_WRITE &&
+ test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags))
op_data->op_bias |= MDS_DATA_MODIFIED;
}
/**
- * Closes the IO epoch and packs all the attributes into @op_data for
- * the CLOSE rpc.
+ * Perform a close, possibly with a bias.
+ * The meaning of "data" depends on the value of "bias".
+ *
+ * If \a bias is MDS_HSM_RELEASE then \a data is a pointer to the data version.
+ * If \a bias is MDS_CLOSE_LAYOUT_SWAP then \a data is a pointer to the inode to
+ * swap layouts with.
*/
-static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
- struct obd_client_handle *och)
-{
- op_data->op_attr.ia_valid = ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
- ATTR_MTIME | ATTR_MTIME_SET |
- ATTR_CTIME | ATTR_CTIME_SET;
-
- if (!(och->och_flags & FMODE_WRITE))
- goto out;
-
- if (!exp_connect_som(ll_i2mdexp(inode)) || !S_ISREG(inode->i_mode))
- op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
- else
- ll_ioepoch_close(inode, op_data, &och, 0);
-
-out:
- ll_pack_inode2opdata(inode, op_data, &och->och_fh);
- ll_prep_md_op_data(op_data, inode, NULL, NULL,
- 0, 0, LUSTRE_OPC_ANY, NULL);
-}
-
static int ll_close_inode_openhandle(struct obd_export *md_exp,
- struct inode *inode,
struct obd_client_handle *och,
- const __u64 *data_version)
+ struct inode *inode,
+ enum mds_op_bias bias,
+ void *data)
{
struct obd_export *exp = ll_i2mdexp(inode);
struct md_op_data *op_data;
struct ptlrpc_request *req = NULL;
struct obd_device *obd = class_exp2obd(exp);
- int epoch_close = 1;
int rc;
if (!obd) {
@@ -150,65 +147,51 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
}
ll_prepare_close(inode, op_data, och);
- if (data_version) {
- /* Pass in data_version implies release. */
+ switch (bias) {
+ case MDS_CLOSE_LAYOUT_SWAP:
+ LASSERT(data);
+ op_data->op_bias |= MDS_CLOSE_LAYOUT_SWAP;
+ op_data->op_data_version = 0;
+ op_data->op_lease_handle = och->och_lease_handle;
+ op_data->op_fid2 = *ll_inode2fid(data);
+ break;
+
+ case MDS_HSM_RELEASE:
+ LASSERT(data);
op_data->op_bias |= MDS_HSM_RELEASE;
- op_data->op_data_version = *data_version;
+ op_data->op_data_version = *(__u64 *)data;
op_data->op_lease_handle = och->och_lease_handle;
op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
+ break;
+
+ default:
+ LASSERT(!data);
+ break;
}
- epoch_close = op_data->op_flags & MF_EPOCH_CLOSE;
+
rc = md_close(md_exp, op_data, och->och_mod, &req);
- if (rc == -EAGAIN) {
- /* This close must have the epoch closed. */
- LASSERT(epoch_close);
- /* MDS has instructed us to obtain Size-on-MDS attribute from
- * OSTs and send setattr to back to MDS.
- */
- rc = ll_som_update(inode, op_data);
- if (rc) {
- CERROR("%s: inode "DFID" mdc Size-on-MDS update failed: rc = %d\n",
- ll_i2mdexp(inode)->exp_obd->obd_name,
- PFID(ll_inode2fid(inode)), rc);
- rc = 0;
- }
- } else if (rc) {
+ if (rc) {
CERROR("%s: inode "DFID" mdc close failed: rc = %d\n",
ll_i2mdexp(inode)->exp_obd->obd_name,
PFID(ll_inode2fid(inode)), rc);
}
- /* DATA_MODIFIED flag was successfully sent on close, cancel data
- * modification flag.
- */
- if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
- struct ll_inode_info *lli = ll_i2info(inode);
-
- spin_lock(&lli->lli_lock);
- lli->lli_flags &= ~LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
- }
-
- if (rc == 0 && op_data->op_bias & MDS_HSM_RELEASE) {
+ if (op_data->op_bias & (MDS_HSM_RELEASE | MDS_CLOSE_LAYOUT_SWAP) &&
+ !rc) {
struct mdt_body *body;
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (!(body->mbo_valid & OBD_MD_FLRELEASED))
+ if (!(body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED))
rc = -EBUSY;
}
ll_finish_md_op_data(op_data);
out:
- if (exp_connect_som(exp) && !epoch_close &&
- S_ISREG(inode->i_mode) && (och->och_flags & FMODE_WRITE)) {
- ll_queue_done_writing(inode, LLIF_DONE_WRITING);
- } else {
- md_clear_open_replay_data(md_exp, och);
- /* Free @och if it is not waiting for DONE_WRITING. */
- och->och_fh.cookie = DEAD_HANDLE_MAGIC;
- kfree(och);
- }
+ md_clear_open_replay_data(md_exp, och);
+ och->och_fh.cookie = DEAD_HANDLE_MAGIC;
+ kfree(och);
+
if (req) /* This is close request */
ptlrpc_req_finished(req);
return rc;
@@ -252,7 +235,7 @@ int ll_md_real_close(struct inode *inode, fmode_t fmode)
* be closed.
*/
rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
- inode, och, NULL);
+ och, inode, 0, NULL);
}
return rc;
@@ -266,7 +249,9 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
int lockmode;
__u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
struct lustre_handle lockh;
- ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_OPEN} };
+ union ldlm_policy_data policy = {
+ .l_inodebits = { MDS_INODELOCK_OPEN }
+ };
int rc = 0;
/* clear group lock, if present */
@@ -288,7 +273,8 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
}
if (fd->fd_och) {
- rc = ll_close_inode_openhandle(md_exp, inode, fd->fd_och, NULL);
+ rc = ll_close_inode_openhandle(md_exp, fd->fd_och, inode, 0,
+ NULL);
fd->fd_och = NULL;
goto out;
}
@@ -437,20 +423,6 @@ out:
return rc;
}
-/**
- * Assign an obtained @ioepoch to client's inode. No lock is needed, MDS does
- * not believe attributes if a few ioepoch holders exist. Attributes for
- * previous ioepoch if new one is opened are also skipped by MDS.
- */
-void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch)
-{
- if (ioepoch && lli->lli_ioepoch != ioepoch) {
- lli->lli_ioepoch = ioepoch;
- CDEBUG(D_INODE, "Epoch %llu opened on "DFID"\n",
- ioepoch, PFID(&lli->lli_fid));
- }
-}
-
static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
struct obd_client_handle *och)
{
@@ -470,23 +442,17 @@ static int ll_local_open(struct file *file, struct lookup_intent *it,
struct ll_file_data *fd, struct obd_client_handle *och)
{
struct inode *inode = file_inode(file);
- struct ll_inode_info *lli = ll_i2info(inode);
LASSERT(!LUSTRE_FPRIVATE(file));
LASSERT(fd);
if (och) {
- struct mdt_body *body;
int rc;
rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
if (rc != 0)
return rc;
-
- body = req_capsule_server_get(&it->it_request->rq_pill,
- &RMF_MDT_BODY);
- ll_ioepoch_open(lli, body->mbo_ioepoch);
}
LUSTRE_FPRIVATE(file) = fd;
@@ -677,12 +643,6 @@ restart:
if (!S_ISREG(inode->i_mode))
goto out_och_free;
- if (!lli->lli_has_smd &&
- (cl_is_lov_delay_create(file->f_flags) ||
- (file->f_mode & FMODE_WRITE) == 0)) {
- CDEBUG(D_INODE, "object creation was delayed\n");
- goto out_och_free;
- }
cl_lov_delay_create_clear(&file->f_flags);
goto out_och_free;
@@ -867,7 +827,7 @@ out_close:
it.it_lock_mode = 0;
och->och_lease_handle.cookie = 0ULL;
}
- rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL);
+ rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, och, inode, 0, NULL);
if (rc2 < 0)
CERROR("%s: error closing file "DFID": %d\n",
ll_get_fsname(inode->i_sb, NULL, 0),
@@ -881,6 +841,69 @@ out:
}
/**
+ * Check whether a layout swap can be done between two inodes.
+ *
+ * \param[in] inode1 First inode to check
+ * \param[in] inode2 Second inode to check
+ *
+ * \retval 0 on success, layout swap can be performed between both inodes
+ * \retval negative error code if requirements are not met
+ */
+static int ll_check_swap_layouts_validity(struct inode *inode1,
+ struct inode *inode2)
+{
+ if (!S_ISREG(inode1->i_mode) || !S_ISREG(inode2->i_mode))
+ return -EINVAL;
+
+ if (inode_permission(inode1, MAY_WRITE) ||
+ inode_permission(inode2, MAY_WRITE))
+ return -EPERM;
+
+ if (inode1->i_sb != inode2->i_sb)
+ return -EXDEV;
+
+ return 0;
+}
+
+static int ll_swap_layouts_close(struct obd_client_handle *och,
+ struct inode *inode, struct inode *inode2)
+{
+ const struct lu_fid *fid1 = ll_inode2fid(inode);
+ const struct lu_fid *fid2;
+ int rc;
+
+ CDEBUG(D_INODE, "%s: biased close of file " DFID "\n",
+ ll_get_fsname(inode->i_sb, NULL, 0), PFID(fid1));
+
+ rc = ll_check_swap_layouts_validity(inode, inode2);
+ if (rc < 0)
+ goto out_free_och;
+
+ /* We now know that inode2 is a lustre inode */
+ fid2 = ll_inode2fid(inode2);
+
+ rc = lu_fid_cmp(fid1, fid2);
+ if (!rc) {
+ rc = -EINVAL;
+ goto out_free_och;
+ }
+
+ /*
+ * Close the file and swap layouts between inode & inode2.
+ * NB: lease lock handle is released in mdc_close_layout_swap_pack()
+ * because we still need it to pack l_remote_handle to MDT.
+ */
+ rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, och, inode,
+ MDS_CLOSE_LAYOUT_SWAP, inode2);
+
+ och = NULL; /* freed in ll_close_inode_openhandle() */
+
+out_free_och:
+ kfree(och);
+ return rc;
+}
+
+/**
* Release lease and close the file.
* It will check if the lease has ever broken.
*/
@@ -907,84 +930,7 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
*lease_broken = cancelled;
return ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
- inode, och, NULL);
-}
-
-/* Fills the obdo with the attributes for the lsm */
-static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
- struct obdo *obdo, __u64 ioepoch, int dv_flags)
-{
- struct ptlrpc_request_set *set;
- struct obd_info oinfo = { };
- int rc;
-
- LASSERT(lsm);
-
- oinfo.oi_md = lsm;
- oinfo.oi_oa = obdo;
- oinfo.oi_oa->o_oi = lsm->lsm_oi;
- oinfo.oi_oa->o_mode = S_IFREG;
- oinfo.oi_oa->o_ioepoch = ioepoch;
- oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE |
- OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
- OBD_MD_FLBLKSZ | OBD_MD_FLATIME |
- OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLGROUP | OBD_MD_FLEPOCH |
- OBD_MD_FLDATAVERSION;
- if (dv_flags & (LL_DV_WR_FLUSH | LL_DV_RD_FLUSH)) {
- oinfo.oi_oa->o_valid |= OBD_MD_FLFLAGS;
- oinfo.oi_oa->o_flags |= OBD_FL_SRVLOCK;
- if (dv_flags & LL_DV_WR_FLUSH)
- oinfo.oi_oa->o_flags |= OBD_FL_FLUSH;
- }
-
- set = ptlrpc_prep_set();
- if (!set) {
- CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM);
- rc = -ENOMEM;
- } else {
- rc = obd_getattr_async(exp, &oinfo, set);
- if (rc == 0)
- rc = ptlrpc_set_wait(set);
- ptlrpc_set_destroy(set);
- }
- if (rc == 0) {
- oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
- OBD_MD_FLATIME | OBD_MD_FLMTIME |
- OBD_MD_FLCTIME | OBD_MD_FLSIZE |
- OBD_MD_FLDATAVERSION | OBD_MD_FLFLAGS);
- if (dv_flags & LL_DV_WR_FLUSH &&
- !(oinfo.oi_oa->o_valid & OBD_MD_FLFLAGS &&
- oinfo.oi_oa->o_flags & OBD_FL_FLUSH))
- return -ENOTSUPP;
- }
- return rc;
-}
-
-/**
- * Performs the getattr on the inode and updates its fields.
- * If @sync != 0, perform the getattr under the server-side lock.
- */
-int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
- __u64 ioepoch, int sync)
-{
- struct lov_stripe_md *lsm;
- int rc;
-
- lsm = ccc_inode_lsm_get(inode);
- rc = ll_lsm_getattr(lsm, ll_i2dtexp(inode),
- obdo, ioepoch, sync ? LL_DV_RD_FLUSH : 0);
- if (rc == 0) {
- struct ost_id *oi = lsm ? &lsm->lsm_oi : &obdo->o_oi;
-
- obdo_refresh_inode(inode, obdo, obdo->o_valid);
- CDEBUG(D_INODE, "objid " DOSTID " size %llu, blocks %llu, blksize %lu\n",
- POSTID(oi), i_size_read(inode),
- (unsigned long long)inode->i_blocks,
- 1UL << inode->i_blkbits);
- }
- ccc_inode_lsm_put(inode, lsm);
- return rc;
+ och, inode, 0, NULL);
}
int ll_merge_attr(const struct lu_env *env, struct inode *inode)
@@ -1043,23 +989,6 @@ out_size_unlock:
return rc;
}
-int ll_glimpse_ioctl(struct ll_sb_info *sbi, struct lov_stripe_md *lsm,
- lstat_t *st)
-{
- struct obdo obdo = { 0 };
- int rc;
-
- rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, &obdo, 0, 0);
- if (rc == 0) {
- st->st_size = obdo.o_size;
- st->st_blocks = obdo.o_blocks;
- st->st_mtime = obdo.o_mtime;
- st->st_atime = obdo.o_atime;
- st->st_ctime = obdo.o_ctime;
- }
- return rc;
-}
-
static bool file_is_noatime(const struct file *file)
{
const struct vfsmount *mnt = file->f_path.mnt;
@@ -1117,9 +1046,11 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
{
struct ll_inode_info *lli = ll_i2info(file_inode(file));
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct vvp_io *vio = vvp_env_io(env);
struct range_lock range;
struct cl_io *io;
- ssize_t result;
+ ssize_t result = 0;
+ int rc = 0;
CDEBUG(D_VFSTRACE, "file: %pD, type: %d ppos: %llu, count: %zu\n",
file, iot, *ppos, count);
@@ -1151,18 +1082,15 @@ restart:
CDEBUG(D_VFSTRACE, "Range lock [%llu, %llu]\n",
range.rl_node.in_extent.start,
range.rl_node.in_extent.end);
- result = range_lock(&lli->lli_write_tree,
- &range);
- if (result < 0)
+ rc = range_lock(&lli->lli_write_tree, &range);
+ if (rc < 0)
goto out;
range_locked = true;
}
- down_read(&lli->lli_trunc_sem);
ll_cl_add(file, env, io);
- result = cl_io_loop(env, io);
+ rc = cl_io_loop(env, io);
ll_cl_remove(file, env);
- up_read(&lli->lli_trunc_sem);
if (range_locked) {
CDEBUG(D_VFSTRACE, "Range unlock [%llu, %llu]\n",
range.rl_node.in_extent.start,
@@ -1171,24 +1099,26 @@ restart:
}
} else {
/* cl_io_rw_init() handled IO */
- result = io->ci_result;
+ rc = io->ci_result;
}
if (io->ci_nob > 0) {
result = io->ci_nob;
+ count -= io->ci_nob;
*ppos = io->u.ci_wr.wr.crw_pos;
+
+ /* prepare IO restart */
+ if (count > 0)
+ args->u.normal.via_iter = vio->vui_iter;
}
- goto out;
out:
cl_io_fini(env, io);
- /* If any bit been read/written (result != 0), we just return
- * short read/write instead of restart io.
- */
- if ((result == 0 || result == -ENODATA) && io->ci_need_restart) {
- CDEBUG(D_VFSTRACE, "Restart %s on %pD from %lld, count:%zu\n",
+
+ if ((!rc || rc == -ENODATA) && count > 0 && io->ci_need_restart) {
+ CDEBUG(D_VFSTRACE, "%s: restart %s from %lld, count:%zu, result: %zd\n",
+ file_dentry(file)->d_name.name,
iot == CIT_READ ? "read" : "write",
- file, *ppos, count);
- LASSERTF(io->ci_nob == 0, "%zd\n", io->ci_nob);
+ *ppos, count, result);
goto restart;
}
@@ -1201,13 +1131,19 @@ out:
ll_stats_ops_tally(ll_i2sbi(file_inode(file)),
LPROC_LL_WRITE_BYTES, result);
fd->fd_write_failed = false;
- } else if (result != -ERESTARTSYS) {
+ } else if (!result && !rc) {
+ rc = io->ci_result;
+ if (rc < 0)
+ fd->fd_write_failed = true;
+ else
+ fd->fd_write_failed = false;
+ } else if (rc != -ERESTARTSYS) {
fd->fd_write_failed = true;
}
}
CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result);
- return result;
+ return result > 0 ? result : rc;
}
static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -1259,37 +1195,22 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
__u64 flags, struct lov_user_md *lum,
int lum_size)
{
- struct lov_stripe_md *lsm = NULL;
struct lookup_intent oit = {
.it_op = IT_OPEN,
.it_flags = flags | MDS_OPEN_BY_FID,
};
int rc = 0;
- lsm = ccc_inode_lsm_get(inode);
- if (lsm) {
- ccc_inode_lsm_put(inode, lsm);
- CDEBUG(D_IOCTL, "stripe already exists for inode "DFID"\n",
- PFID(ll_inode2fid(inode)));
- rc = -EEXIST;
- goto out;
- }
-
ll_inode_size_lock(inode);
rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
if (rc < 0)
goto out_unlock;
- rc = oit.it_status;
- if (rc < 0)
- goto out_unlock;
ll_release_openhandle(inode, &oit);
out_unlock:
ll_inode_size_unlock(inode);
ll_intent_release(&oit);
- ccc_inode_lsm_put(inode, lsm);
-out:
return rc;
}
@@ -1566,7 +1487,7 @@ int ll_release_openhandle(struct inode *inode, struct lookup_intent *it)
ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
- inode, och, NULL);
+ och, inode, 0, NULL);
out:
/* this one is in place of ll_file_open */
if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
@@ -1579,15 +1500,17 @@ out:
/**
* Get size for inode for which FIEMAP mapping is requested.
* Make the FIEMAP get_info call and returns the result.
+ *
+ * \param fiemap kernel buffer to hold extens
+ * \param num_bytes kernel buffer size
*/
-static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
+static int ll_do_fiemap(struct inode *inode, struct fiemap *fiemap,
size_t num_bytes)
{
- struct obd_export *exp = ll_i2dtexp(inode);
- struct lov_stripe_md *lsm = NULL;
- struct ll_fiemap_info_key fm_key = { .name = KEY_FIEMAP, };
- __u32 vallen = num_bytes;
- int rc;
+ struct ll_fiemap_info_key fmkey = { .lfik_name = KEY_FIEMAP, };
+ struct lu_env *env;
+ int refcheck;
+ int rc = 0;
/* Checks for fiemap flags */
if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
@@ -1602,21 +1525,9 @@ static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
return rc;
}
- lsm = ccc_inode_lsm_get(inode);
- if (!lsm)
- return -ENOENT;
-
- /* If the stripe_count > 1 and the application does not understand
- * DEVICE_ORDER flag, then it cannot interpret the extents correctly.
- */
- if (lsm->lsm_stripe_count > 1 &&
- !(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) {
- rc = -EOPNOTSUPP;
- goto out;
- }
-
- fm_key.oa.o_oi = lsm->lsm_oi;
- fm_key.oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
if (i_size_read(inode) == 0) {
rc = ll_glimpse_size(inode);
@@ -1624,24 +1535,23 @@ static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
goto out;
}
- obdo_from_inode(&fm_key.oa, inode, OBD_MD_FLSIZE);
- obdo_set_parent_fid(&fm_key.oa, &ll_i2info(inode)->lli_fid);
+ fmkey.lfik_oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
+ obdo_from_inode(&fmkey.lfik_oa, inode, OBD_MD_FLSIZE);
+ obdo_set_parent_fid(&fmkey.lfik_oa, &ll_i2info(inode)->lli_fid);
+
/* If filesize is 0, then there would be no objects for mapping */
- if (fm_key.oa.o_size == 0) {
+ if (fmkey.lfik_oa.o_size == 0) {
fiemap->fm_mapped_extents = 0;
rc = 0;
goto out;
}
- memcpy(&fm_key.fiemap, fiemap, sizeof(*fiemap));
-
- rc = obd_get_info(NULL, exp, sizeof(fm_key), &fm_key, &vallen,
- fiemap, lsm);
- if (rc)
- CERROR("obd_get_info failed: rc = %d\n", rc);
+ memcpy(&fmkey.lfik_fiemap, fiemap, sizeof(*fiemap));
+ rc = cl_object_fiemap(env, ll_i2info(inode)->lli_clob,
+ &fmkey, fiemap, &num_bytes);
out:
- ccc_inode_lsm_put(inode, lsm);
+ cl_env_put(env, &refcheck);
return rc;
}
@@ -1689,113 +1599,56 @@ gf_free:
return rc;
}
-static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg)
-{
- struct ll_user_fiemap *fiemap_s;
- size_t num_bytes, ret_bytes;
- unsigned int extent_count;
- int rc = 0;
-
- /* Get the extent count so we can calculate the size of
- * required fiemap buffer
- */
- if (get_user(extent_count,
- &((struct ll_user_fiemap __user *)arg)->fm_extent_count))
- return -EFAULT;
-
- if (extent_count >=
- (SIZE_MAX - sizeof(*fiemap_s)) / sizeof(struct ll_fiemap_extent))
- return -EINVAL;
- num_bytes = sizeof(*fiemap_s) + (extent_count *
- sizeof(struct ll_fiemap_extent));
-
- fiemap_s = libcfs_kvzalloc(num_bytes, GFP_NOFS);
- if (!fiemap_s)
- return -ENOMEM;
-
- /* get the fiemap value */
- if (copy_from_user(fiemap_s, (struct ll_user_fiemap __user *)arg,
- sizeof(*fiemap_s))) {
- rc = -EFAULT;
- goto error;
- }
-
- /* If fm_extent_count is non-zero, read the first extent since
- * it is used to calculate end_offset and device from previous
- * fiemap call.
- */
- if (extent_count) {
- if (copy_from_user(&fiemap_s->fm_extents[0],
- (char __user *)arg + sizeof(*fiemap_s),
- sizeof(struct ll_fiemap_extent))) {
- rc = -EFAULT;
- goto error;
- }
- }
-
- rc = ll_do_fiemap(inode, fiemap_s, num_bytes);
- if (rc)
- goto error;
-
- ret_bytes = sizeof(struct ll_user_fiemap);
-
- if (extent_count != 0)
- ret_bytes += (fiemap_s->fm_mapped_extents *
- sizeof(struct ll_fiemap_extent));
-
- if (copy_to_user((void __user *)arg, fiemap_s, ret_bytes))
- rc = -EFAULT;
-
-error:
- kvfree(fiemap_s);
- return rc;
-}
-
/*
* Read the data_version for inode.
*
* This value is computed using stripe object version on OST.
* Version is computed using server side locking.
*
- * @param sync if do sync on the OST side;
+ * @param flags if do sync on the OST side;
* 0: no sync
* LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs
* LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs
*/
int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
{
- struct lov_stripe_md *lsm = NULL;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct obdo *obdo = NULL;
- int rc;
+ struct cl_object *obj = ll_i2info(inode)->lli_clob;
+ struct lu_env *env;
+ struct cl_io *io;
+ int refcheck;
+ int result;
- /* If no stripe, we consider version is 0. */
- lsm = ccc_inode_lsm_get(inode);
- if (!lsm_has_objects(lsm)) {
+ /* If no file object initialized, we consider its version is 0. */
+ if (!obj) {
*data_version = 0;
- CDEBUG(D_INODE, "No object for inode\n");
- rc = 0;
- goto out;
+ return 0;
}
- obdo = kzalloc(sizeof(*obdo), GFP_NOFS);
- if (!obdo) {
- rc = -ENOMEM;
- goto out;
- }
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
- rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, obdo, 0, flags);
- if (rc == 0) {
- if (!(obdo->o_valid & OBD_MD_FLDATAVERSION))
- rc = -EOPNOTSUPP;
- else
- *data_version = obdo->o_data_version;
- }
+ io = vvp_env_thread_io(env);
+ io->ci_obj = obj;
+ io->u.ci_data_version.dv_data_version = 0;
+ io->u.ci_data_version.dv_flags = flags;
- kfree(obdo);
-out:
- ccc_inode_lsm_put(inode, lsm);
- return rc;
+restart:
+ if (!cl_io_init(env, io, CIT_DATA_VERSION, io->ci_obj))
+ result = cl_io_loop(env, io);
+ else
+ result = io->ci_result;
+
+ *data_version = io->u.ci_data_version.dv_data_version;
+
+ cl_io_fini(env, io);
+
+ if (unlikely(io->ci_need_restart))
+ goto restart;
+
+ cl_env_put(env, &refcheck);
+
+ return result;
}
/*
@@ -1803,11 +1656,11 @@ out:
*/
int ll_hsm_release(struct inode *inode)
{
- struct cl_env_nest nest;
struct lu_env *env;
struct obd_client_handle *och = NULL;
__u64 data_version = 0;
int rc;
+ int refcheck;
CDEBUG(D_INODE, "%s: Releasing file "DFID".\n",
ll_get_fsname(inode->i_sb, NULL, 0),
@@ -1824,21 +1677,21 @@ int ll_hsm_release(struct inode *inode)
if (rc != 0)
goto out;
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env)) {
rc = PTR_ERR(env);
goto out;
}
ll_merge_attr(env, inode);
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
/* Release the file.
* NB: lease lock handle is released in mdc_hsm_release_pack() because
* we still need it to pack l_remote_handle to MDT.
*/
- rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och,
- &data_version);
+ rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, och, inode,
+ MDS_HSM_RELEASE, &data_version);
och = NULL;
out:
@@ -1849,10 +1702,12 @@ out:
}
struct ll_swap_stack {
- struct iattr ia1, ia2;
- __u64 dv1, dv2;
- struct inode *inode1, *inode2;
- bool check_dv1, check_dv2;
+ u64 dv1;
+ u64 dv2;
+ struct inode *inode1;
+ struct inode *inode2;
+ bool check_dv1;
+ bool check_dv2;
};
static int ll_swap_layouts(struct file *file1, struct file *file2,
@@ -1872,21 +1727,9 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
llss->inode1 = file_inode(file1);
llss->inode2 = file_inode(file2);
- if (!S_ISREG(llss->inode2->i_mode)) {
- rc = -EINVAL;
- goto free;
- }
-
- if (inode_permission(llss->inode1, MAY_WRITE) ||
- inode_permission(llss->inode2, MAY_WRITE)) {
- rc = -EPERM;
+ rc = ll_check_swap_layouts_validity(llss->inode1, llss->inode2);
+ if (rc < 0)
goto free;
- }
-
- if (llss->inode2->i_sb != llss->inode1->i_sb) {
- rc = -EXDEV;
- goto free;
- }
/* we use 2 bool because it is easier to swap than 2 bits */
if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV1)
@@ -1900,10 +1743,8 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
llss->dv2 = lsl->sl_dv2;
rc = lu_fid_cmp(ll_inode2fid(llss->inode1), ll_inode2fid(llss->inode2));
- if (rc == 0) /* same file, done! */ {
- rc = 0;
+ if (!rc) /* same file, done! */
goto free;
- }
if (rc < 0) { /* sequentialize it */
swap(llss->inode1, llss->inode2);
@@ -1925,19 +1766,6 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
}
}
- /* to be able to restore mtime and atime after swap
- * we need to first save them
- */
- if (lsl->sl_flags &
- (SWAP_LAYOUTS_KEEP_MTIME | SWAP_LAYOUTS_KEEP_ATIME)) {
- llss->ia1.ia_mtime = llss->inode1->i_mtime;
- llss->ia1.ia_atime = llss->inode1->i_atime;
- llss->ia1.ia_valid = ATTR_MTIME | ATTR_ATIME;
- llss->ia2.ia_mtime = llss->inode2->i_mtime;
- llss->ia2.ia_atime = llss->inode2->i_atime;
- llss->ia2.ia_valid = ATTR_MTIME | ATTR_ATIME;
- }
-
/* ultimate check, before swapping the layouts we check if
* dataversion has changed (if requested)
*/
@@ -1987,39 +1815,6 @@ putgl:
ll_put_grouplock(llss->inode1, file1, gid);
}
- /* rc can be set from obd_iocontrol() or from a GOTO(putgl, ...) */
- if (rc != 0)
- goto free;
-
- /* clear useless flags */
- if (!(lsl->sl_flags & SWAP_LAYOUTS_KEEP_MTIME)) {
- llss->ia1.ia_valid &= ~ATTR_MTIME;
- llss->ia2.ia_valid &= ~ATTR_MTIME;
- }
-
- if (!(lsl->sl_flags & SWAP_LAYOUTS_KEEP_ATIME)) {
- llss->ia1.ia_valid &= ~ATTR_ATIME;
- llss->ia2.ia_valid &= ~ATTR_ATIME;
- }
-
- /* update time if requested */
- rc = 0;
- if (llss->ia2.ia_valid != 0) {
- inode_lock(llss->inode1);
- rc = ll_setattr(file1->f_path.dentry, &llss->ia2);
- inode_unlock(llss->inode1);
- }
-
- if (llss->ia1.ia_valid != 0) {
- int rc1;
-
- inode_lock(llss->inode2);
- rc1 = ll_setattr(file2->f_path.dentry, &llss->ia1);
- inode_unlock(llss->inode2);
- if (rc == 0)
- rc = rc1;
- }
-
free:
kfree(llss);
@@ -2176,24 +1971,52 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
sizeof(struct lustre_swap_layouts)))
return -EFAULT;
- if ((file->f_flags & O_ACCMODE) == 0) /* O_RDONLY */
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
return -EPERM;
file2 = fget(lsl.sl_fd);
if (!file2)
return -EBADF;
- rc = -EPERM;
- if ((file2->f_flags & O_ACCMODE) != 0) /* O_WRONLY or O_RDWR */
+ /* O_WRONLY or O_RDWR */
+ if ((file2->f_flags & O_ACCMODE) == O_RDONLY) {
+ rc = -EPERM;
+ goto out;
+ }
+
+ if (lsl.sl_flags & SWAP_LAYOUTS_CLOSE) {
+ struct obd_client_handle *och = NULL;
+ struct ll_inode_info *lli;
+ struct inode *inode2;
+
+ if (lsl.sl_flags != SWAP_LAYOUTS_CLOSE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ lli = ll_i2info(inode);
+ mutex_lock(&lli->lli_och_mutex);
+ if (fd->fd_lease_och) {
+ och = fd->fd_lease_och;
+ fd->fd_lease_och = NULL;
+ }
+ mutex_unlock(&lli->lli_och_mutex);
+ if (!och) {
+ rc = -ENOLCK;
+ goto out;
+ }
+ inode2 = file_inode(file2);
+ rc = ll_swap_layouts_close(och, inode, inode2);
+ } else {
rc = ll_swap_layouts(file, file2, &lsl);
+ }
+out:
fput(file2);
return rc;
}
case LL_IOC_LOV_GETSTRIPE:
return ll_file_getstripe(inode,
(struct lov_user_md __user *)arg);
- case FSFILT_IOC_FIEMAP:
- return ll_ioctl_fiemap(inode, arg);
case FSFILT_IOC_GETFLAGS:
case FSFILT_IOC_SETFLAGS:
return ll_iocontrol(inode, file, cmd, arg);
@@ -2489,17 +2312,17 @@ static int ll_flush(struct file *file, fl_owner_t id)
int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
enum cl_fsync_mode mode, int ignore_layout)
{
- struct cl_env_nest nest;
struct lu_env *env;
struct cl_io *io;
struct cl_fsync_io *fio;
int result;
+ int refcheck;
if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL)
return -EINVAL;
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env))
return PTR_ERR(env);
@@ -2522,7 +2345,7 @@ int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
if (result == 0)
result = fio->fi_nr_written;
cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
return result;
}
@@ -2549,9 +2372,11 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
lli->lli_async_rc = 0;
if (rc == 0)
rc = err;
- err = lov_read_and_clear_async_rc(lli->lli_clob);
- if (rc == 0)
- rc = err;
+ if (lli->lli_clob) {
+ err = lov_read_and_clear_async_rc(lli->lli_clob);
+ if (rc == 0)
+ rc = err;
+ }
}
err = md_sync(ll_i2sbi(inode)->ll_md_exp, ll_inode2fid(inode), &req);
@@ -2588,7 +2413,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
};
struct md_op_data *op_data;
struct lustre_handle lockh = {0};
- ldlm_policy_data_t flock = { {0} };
+ union ldlm_policy_data flock = { { 0 } };
int fl_type = file_lock->fl_type;
__u64 flags = 0;
int rc;
@@ -2707,7 +2532,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
}
int ll_get_fid_by_name(struct inode *parent, const char *name,
- int namelen, struct lu_fid *fid)
+ int namelen, struct lu_fid *fid,
+ struct inode **inode)
{
struct md_op_data *op_data = NULL;
struct ptlrpc_request *req;
@@ -2719,7 +2545,7 @@ int ll_get_fid_by_name(struct inode *parent, const char *name,
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- op_data->op_valid = OBD_MD_FLID;
+ op_data->op_valid = OBD_MD_FLID | OBD_MD_FLTYPE;
rc = md_getattr_name(ll_i2sbi(parent)->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
if (rc < 0)
@@ -2732,6 +2558,9 @@ int ll_get_fid_by_name(struct inode *parent, const char *name,
}
if (fid)
*fid = body->mbo_fid1;
+
+ if (inode)
+ rc = ll_prep_inode(inode, req, parent->i_sb, NULL);
out_req:
ptlrpc_req_finished(req);
return rc;
@@ -2741,9 +2570,12 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
const char *name, int namelen)
{
struct ptlrpc_request *request = NULL;
+ struct obd_client_handle *och = NULL;
struct inode *child_inode = NULL;
struct dentry *dchild = NULL;
struct md_op_data *op_data;
+ struct mdt_body *body;
+ u64 data_version = 0;
struct qstr qstr;
int rc;
@@ -2762,22 +2594,25 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
dchild = d_lookup(file_dentry(file), &qstr);
if (dchild) {
op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
- if (dchild->d_inode) {
+ if (dchild->d_inode)
child_inode = igrab(dchild->d_inode);
- if (child_inode) {
- inode_lock(child_inode);
- op_data->op_fid3 = *ll_inode2fid(child_inode);
- ll_invalidate_aliases(child_inode);
- }
- }
dput(dchild);
- } else {
+ }
+
+ if (!child_inode) {
rc = ll_get_fid_by_name(parent, name, namelen,
- &op_data->op_fid3);
+ &op_data->op_fid3, &child_inode);
if (rc)
goto out_free;
}
+ if (!child_inode) {
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ inode_lock(child_inode);
+ op_data->op_fid3 = *ll_inode2fid(child_inode);
if (!fid_is_sane(&op_data->op_fid3)) {
CERROR("%s: migrate %s, but fid "DFID" is insane\n",
ll_get_fsname(parent->i_sb, NULL, 0), name,
@@ -2796,6 +2631,26 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
rc = 0;
goto out_free;
}
+again:
+ if (S_ISREG(child_inode->i_mode)) {
+ och = ll_lease_open(child_inode, NULL, FMODE_WRITE, 0);
+ if (IS_ERR(och)) {
+ rc = PTR_ERR(och);
+ och = NULL;
+ goto out_free;
+ }
+
+ rc = ll_data_version(child_inode, &data_version,
+ LL_DV_WR_FLUSH);
+ if (rc)
+ goto out_free;
+
+ op_data->op_handle = och->och_fh;
+ op_data->op_data = och->och_mod;
+ op_data->op_data_version = data_version;
+ op_data->op_lease_handle = och->och_lease_handle;
+ op_data->op_bias |= MDS_RENAME_MIGRATE;
+ }
op_data->op_mds = mdtidx;
op_data->op_cli_flags = CLI_MIGRATE;
@@ -2804,10 +2659,32 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
if (!rc)
ll_update_times(request, parent);
- ptlrpc_req_finished(request);
+ body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
+ if (!body) {
+ rc = -EPROTO;
+ goto out_free;
+ }
+
+ /*
+ * If the server does release layout lock, then we cleanup
+ * the client och here, otherwise release it in out_free:
+ */
+ if (och && body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED) {
+ obd_mod_put(och->och_mod);
+ md_clear_open_replay_data(ll_i2sbi(parent)->ll_md_exp, och);
+ och->och_fh.cookie = DEAD_HANDLE_MAGIC;
+ kfree(och);
+ och = NULL;
+ }
+ ptlrpc_req_finished(request);
+ /* Try again if the file layout has changed. */
+ if (rc == -EAGAIN && S_ISREG(child_inode->i_mode))
+ goto again;
out_free:
if (child_inode) {
+ if (och) /* close the file */
+ ll_lease_close(och, child_inode, NULL);
clear_nlink(child_inode);
inode_unlock(child_inode);
iput(child_inode);
@@ -2837,7 +2714,7 @@ int ll_have_md_lock(struct inode *inode, __u64 *bits,
enum ldlm_mode l_req_mode)
{
struct lustre_handle lockh;
- ldlm_policy_data_t policy;
+ union ldlm_policy_data policy;
enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ?
(LCK_CR | LCK_CW | LCK_PR | LCK_PW) : l_req_mode;
struct lu_fid *fid;
@@ -2878,7 +2755,7 @@ enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
struct lustre_handle *lockh, __u64 flags,
enum ldlm_mode mode)
{
- ldlm_policy_data_t policy = { .l_inodebits = {bits} };
+ union ldlm_policy_data policy = { .l_inodebits = { bits } };
struct lu_fid *fid;
fid = &ll_i2info(inode)->lli_fid;
@@ -2893,6 +2770,13 @@ static int ll_inode_revalidate_fini(struct inode *inode, int rc)
/* Already unlinked. Just update nlink and return success */
if (rc == -ENOENT) {
clear_nlink(inode);
+ /* If it is striped directory, and there is bad stripe
+ * Let's revalidate the dentry again, instead of returning
+ * error
+ */
+ if (S_ISDIR(inode->i_mode) && ll_i2info(inode)->lli_lsm_md)
+ return 0;
+
/* This path cannot be hit for regular files unless in
* case of obscure races, so no need to validate size.
*/
@@ -3040,6 +2924,8 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_mtime;
LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_ctime;
} else {
+ struct ll_inode_info *lli = ll_i2info(inode);
+
/* In case of restore, the MDT has the right size and has
* already send it back without granting the layout lock,
* inode is up-to-date so glimpse is useless.
@@ -3047,7 +2933,7 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
* restore the MDT holds the layout lock so the glimpse will
* block up to the end of restore (getattr will block)
*/
- if (!(ll_i2info(inode)->lli_flags & LLIF_FILE_RESTORING))
+ if (!test_bit(LLIF_FILE_RESTORING, &lli->lli_flags))
rc = ll_glimpse_size(inode);
}
return rc;
@@ -3095,13 +2981,12 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
{
int rc;
size_t num_bytes;
- struct ll_user_fiemap *fiemap;
+ struct fiemap *fiemap;
unsigned int extent_count = fieinfo->fi_extents_max;
num_bytes = sizeof(*fiemap) + (extent_count *
- sizeof(struct ll_fiemap_extent));
+ sizeof(struct fiemap_extent));
fiemap = libcfs_kvzalloc(num_bytes, GFP_NOFS);
-
if (!fiemap)
return -ENOMEM;
@@ -3109,9 +2994,10 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
fiemap->fm_extent_count = fieinfo->fi_extents_max;
fiemap->fm_start = start;
fiemap->fm_length = len;
+
if (extent_count > 0 &&
copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
- sizeof(struct ll_fiemap_extent)) != 0) {
+ sizeof(struct fiemap_extent))) {
rc = -EFAULT;
goto out;
}
@@ -3123,11 +3009,10 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (extent_count > 0 &&
copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
fiemap->fm_mapped_extents *
- sizeof(struct ll_fiemap_extent)) != 0) {
+ sizeof(struct fiemap_extent))) {
rc = -EFAULT;
goto out;
}
-
out:
kvfree(fiemap);
return rc;
@@ -3370,35 +3255,50 @@ ll_iocontrol_call(struct inode *inode, struct file *file,
int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct cl_env_nest nest;
+ struct cl_object *obj = lli->lli_clob;
struct lu_env *env;
- int result;
+ int rc;
+ int refcheck;
- if (!lli->lli_clob)
+ if (!obj)
return 0;
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env))
return PTR_ERR(env);
- result = cl_conf_set(env, lli->lli_clob, conf);
- cl_env_nested_put(&nest, env);
+ rc = cl_conf_set(env, obj, conf);
+ if (rc < 0)
+ goto out;
if (conf->coc_opc == OBJECT_CONF_SET) {
struct ldlm_lock *lock = conf->coc_lock;
+ struct cl_layout cl = {
+ .cl_layout_gen = 0,
+ };
LASSERT(lock);
LASSERT(ldlm_has_layout(lock));
- if (result == 0) {
- /* it can only be allowed to match after layout is
- * applied to inode otherwise false layout would be
- * seen. Applying layout should happen before dropping
- * the intent lock.
- */
- ldlm_lock_allow_match(lock);
- }
+
+ /* it can only be allowed to match after layout is
+ * applied to inode otherwise false layout would be
+ * seen. Applying layout should happen before dropping
+ * the intent lock.
+ */
+ ldlm_lock_allow_match(lock);
+
+ rc = cl_object_layout_get(env, obj, &cl);
+ if (rc < 0)
+ goto out;
+
+ CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n",
+ PFID(&lli->lli_fid), ll_layout_version_get(lli),
+ cl.cl_layout_gen);
+ ll_layout_version_set(lli, cl.cl_layout_gen);
}
- return result;
+out:
+ cl_env_put(env, &refcheck);
+ return rc;
}
/* Fetch layout from MDT with getxattr request, if it's not ready yet */
@@ -3477,12 +3377,11 @@ out:
* in this function.
*/
static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
- struct inode *inode, __u32 *gen, bool reconf)
+ struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ldlm_lock *lock;
- struct lustre_md md = { NULL };
struct cl_object_conf conf;
int rc = 0;
bool lvb_ready;
@@ -3494,8 +3393,8 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
LASSERT(lock);
LASSERT(ldlm_has_layout(lock));
- LDLM_DEBUG(lock, "File "DFID"(%p) being reconfigured: %d",
- PFID(&lli->lli_fid), inode, reconf);
+ LDLM_DEBUG(lock, "File " DFID "(%p) being reconfigured",
+ PFID(&lli->lli_fid), inode);
/* in case this is a caching lock and reinstate with new inode */
md_set_lock_data(sbi->ll_md_exp, lockh, inode, NULL);
@@ -3506,15 +3405,8 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
/* checking lvb_ready is racy but this is okay. The worst case is
* that multi processes may configure the file on the same time.
*/
- if (lvb_ready || !reconf) {
- rc = -ENODATA;
- if (lvb_ready) {
- /* layout_gen must be valid if layout lock is not
- * cancelled and stripe has already set
- */
- *gen = ll_layout_version_get(lli);
- rc = 0;
- }
+ if (lvb_ready) {
+ rc = 0;
goto out;
}
@@ -3524,39 +3416,19 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
/* for layout lock, lmm is returned in lock's lvb.
* lvb_data is immutable if the lock is held so it's safe to access it
- * without res lock. See the description in ldlm_lock_decref_internal()
- * for the condition to free lvb_data of layout lock
- */
- if (lock->l_lvb_data) {
- rc = obd_unpackmd(sbi->ll_dt_exp, &md.lsm,
- lock->l_lvb_data, lock->l_lvb_len);
- if (rc >= 0) {
- *gen = LL_LAYOUT_GEN_EMPTY;
- if (md.lsm)
- *gen = md.lsm->lsm_layout_gen;
- rc = 0;
- } else {
- CERROR("%s: file " DFID " unpackmd error: %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(&lli->lli_fid), rc);
- }
- }
- if (rc < 0)
- goto out;
-
- /* set layout to file. Unlikely this will fail as old layout was
+ * without res lock.
+ *
+ * set layout to file. Unlikely this will fail as old layout was
* surely eliminated
*/
memset(&conf, 0, sizeof(conf));
conf.coc_opc = OBJECT_CONF_SET;
conf.coc_inode = inode;
conf.coc_lock = lock;
- conf.u.coc_md = &md;
+ conf.u.coc_layout.lb_buf = lock->l_lvb_data;
+ conf.u.coc_layout.lb_len = lock->l_lvb_len;
rc = ll_layout_conf(inode, &conf);
- if (md.lsm)
- obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
-
/* refresh layout failed, need to wait */
wait_layout = rc == -EBUSY;
@@ -3584,20 +3456,7 @@ out:
return rc;
}
-/**
- * This function checks if there exists a LAYOUT lock on the client side,
- * or enqueues it if it doesn't have one in cache.
- *
- * This function will not hold layout lock so it may be revoked any time after
- * this function returns. Any operations depend on layout should be redone
- * in that case.
- *
- * This function should be called before lov_io_init() to get an uptodate
- * layout version, the caller should save the version number and after IO
- * is finished, this function should be called again to verify that layout
- * is not changed during IO time.
- */
-int ll_layout_refresh(struct inode *inode, __u32 *gen)
+static int ll_layout_refresh_locked(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
@@ -3613,17 +3472,6 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
};
int rc;
- *gen = ll_layout_version_get(lli);
- if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK) || *gen != LL_LAYOUT_GEN_NONE)
- return 0;
-
- /* sanity checks */
- LASSERT(fid_is_sane(ll_inode2fid(inode)));
- LASSERT(S_ISREG(inode->i_mode));
-
- /* take layout lock mutex to enqueue layout lock exclusively. */
- mutex_lock(&lli->lli_layout_mutex);
-
again:
/* mostly layout lock is caching on the local side, so try to match
* it before grabbing layout lock mutex.
@@ -3631,20 +3479,16 @@ again:
mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
LCK_CR | LCK_CW | LCK_PR | LCK_PW);
if (mode != 0) { /* hit cached lock */
- rc = ll_layout_lock_set(&lockh, mode, inode, gen, true);
+ rc = ll_layout_lock_set(&lockh, mode, inode);
if (rc == -EAGAIN)
goto again;
-
- mutex_unlock(&lli->lli_layout_mutex);
return rc;
}
op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
0, 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data)) {
- mutex_unlock(&lli->lli_layout_mutex);
+ if (IS_ERR(op_data))
return PTR_ERR(op_data);
- }
/* have to enqueue one */
memset(&it, 0, sizeof(it));
@@ -3668,10 +3512,50 @@ again:
if (rc == 0) {
/* set lock data in case this is a new lock */
ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
- rc = ll_layout_lock_set(&lockh, mode, inode, gen, true);
+ rc = ll_layout_lock_set(&lockh, mode, inode);
if (rc == -EAGAIN)
goto again;
}
+
+ return rc;
+}
+
+/**
+ * This function checks if there exists a LAYOUT lock on the client side,
+ * or enqueues it if it doesn't have one in cache.
+ *
+ * This function will not hold layout lock so it may be revoked any time after
+ * this function returns. Any operations depend on layout should be redone
+ * in that case.
+ *
+ * This function should be called before lov_io_init() to get an uptodate
+ * layout version, the caller should save the version number and after IO
+ * is finished, this function should be called again to verify that layout
+ * is not changed during IO time.
+ */
+int ll_layout_refresh(struct inode *inode, __u32 *gen)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ int rc;
+
+ *gen = ll_layout_version_get(lli);
+ if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK) || *gen != CL_LAYOUT_GEN_NONE)
+ return 0;
+
+ /* sanity checks */
+ LASSERT(fid_is_sane(ll_inode2fid(inode)));
+ LASSERT(S_ISREG(inode->i_mode));
+
+ /* take layout lock mutex to enqueue layout lock exclusively. */
+ mutex_lock(&lli->lli_layout_mutex);
+
+ rc = ll_layout_refresh_locked(inode);
+ if (rc < 0)
+ goto out;
+
+ *gen = ll_layout_version_get(lli);
+out:
mutex_unlock(&lli->lli_layout_mutex);
return rc;
diff --git a/drivers/staging/lustre/lustre/llite/glimpse.c b/drivers/staging/lustre/lustre/llite/glimpse.c
index 22507b9c6d69..504498de536e 100644
--- a/drivers/staging/lustre/lustre/llite/glimpse.c
+++ b/drivers/staging/lustre/lustre/llite/glimpse.c
@@ -80,69 +80,60 @@ blkcnt_t dirty_cnt(struct inode *inode)
int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
struct inode *inode, struct cl_object *clob, int agl)
{
- struct ll_inode_info *lli = ll_i2info(inode);
const struct lu_fid *fid = lu_object_fid(&clob->co_lu);
- int result;
+ struct cl_lock *lock = vvp_env_lock(env);
+ struct cl_lock_descr *descr = &lock->cll_descr;
+ int result = 0;
+
+ CDEBUG(D_DLMTRACE, "Glimpsing inode " DFID "\n", PFID(fid));
+
+ /* NOTE: this looks like DLM lock request, but it may
+ * not be one. Due to CEF_ASYNC flag (translated
+ * to LDLM_FL_HAS_INTENT by osc), this is
+ * glimpse request, that won't revoke any
+ * conflicting DLM locks held. Instead,
+ * ll_glimpse_callback() will be called on each
+ * client holding a DLM lock against this file,
+ * and resulting size will be returned for each
+ * stripe. DLM lock on [0, EOF] is acquired only
+ * if there were no conflicting locks. If there
+ * were conflicting locks, enqueuing or waiting
+ * fails with -ENAVAIL, but valid inode
+ * attributes are returned anyway.
+ */
+ *descr = whole_file;
+ descr->cld_obj = clob;
+ descr->cld_mode = CLM_READ;
+ descr->cld_enq_flags = CEF_ASYNC | CEF_MUST;
+ if (agl)
+ descr->cld_enq_flags |= CEF_AGL;
+ /*
+ * CEF_ASYNC is used because glimpse sub-locks cannot
+ * deadlock (because they never conflict with other
+ * locks) and, hence, can be enqueued out-of-order.
+ *
+ * CEF_MUST protects glimpse lock from conversion into
+ * a lockless mode.
+ */
+ result = cl_lock_request(env, io, lock);
+ if (result < 0)
+ return result;
- result = 0;
- if (!(lli->lli_flags & LLIF_MDS_SIZE_LOCK)) {
- CDEBUG(D_DLMTRACE, "Glimpsing inode " DFID "\n", PFID(fid));
- if (lli->lli_has_smd) {
- struct cl_lock *lock = vvp_env_lock(env);
- struct cl_lock_descr *descr = &lock->cll_descr;
-
- /* NOTE: this looks like DLM lock request, but it may
- * not be one. Due to CEF_ASYNC flag (translated
- * to LDLM_FL_HAS_INTENT by osc), this is
- * glimpse request, that won't revoke any
- * conflicting DLM locks held. Instead,
- * ll_glimpse_callback() will be called on each
- * client holding a DLM lock against this file,
- * and resulting size will be returned for each
- * stripe. DLM lock on [0, EOF] is acquired only
- * if there were no conflicting locks. If there
- * were conflicting locks, enqueuing or waiting
- * fails with -ENAVAIL, but valid inode
- * attributes are returned anyway.
- */
- *descr = whole_file;
- descr->cld_obj = clob;
- descr->cld_mode = CLM_READ;
- descr->cld_enq_flags = CEF_ASYNC | CEF_MUST;
- if (agl)
- descr->cld_enq_flags |= CEF_AGL;
+ if (!agl) {
+ ll_merge_attr(env, inode);
+ if (i_size_read(inode) > 0 && !inode->i_blocks) {
/*
- * CEF_ASYNC is used because glimpse sub-locks cannot
- * deadlock (because they never conflict with other
- * locks) and, hence, can be enqueued out-of-order.
- *
- * CEF_MUST protects glimpse lock from conversion into
- * a lockless mode.
+ * LU-417: Add dirty pages block count
+ * lest i_blocks reports 0, some "cp" or
+ * "tar" may think it's a completely
+ * sparse file and skip it.
*/
- result = cl_lock_request(env, io, lock);
- if (result < 0)
- return result;
-
- if (!agl) {
- ll_merge_attr(env, inode);
- if (i_size_read(inode) > 0 &&
- inode->i_blocks == 0) {
- /*
- * LU-417: Add dirty pages block count
- * lest i_blocks reports 0, some "cp" or
- * "tar" may think it's a completely
- * sparse file and skip it.
- */
- inode->i_blocks = dirty_cnt(inode);
- }
- }
- cl_lock_release(env, lock);
- } else {
- CDEBUG(D_DLMTRACE, "No objects for inode\n");
- ll_merge_attr(env, inode);
+ inode->i_blocks = dirty_cnt(inode);
}
}
+ cl_lock_release(env, lock);
+
return result;
}
@@ -212,39 +203,3 @@ again:
}
return result;
}
-
-int cl_local_size(struct inode *inode)
-{
- struct lu_env *env = NULL;
- struct cl_io *io = NULL;
- struct cl_object *clob;
- int result;
- int refcheck;
-
- if (!ll_i2info(inode)->lli_has_smd)
- return 0;
-
- result = cl_io_get(inode, &env, &io, &refcheck);
- if (result <= 0)
- return result;
-
- clob = io->ci_obj;
- result = cl_io_init(env, io, CIT_MISC, clob);
- if (result > 0) {
- result = io->ci_result;
- } else if (result == 0) {
- struct cl_lock *lock = vvp_env_lock(env);
-
- lock->cll_descr = whole_file;
- lock->cll_descr.cld_enq_flags = CEF_PEEK;
- lock->cll_descr.cld_obj = clob;
- result = cl_lock_request(env, io, lock);
- if (result == 0) {
- ll_merge_attr(env, inode);
- cl_lock_release(env, lock);
- }
- }
- cl_io_fini(env, io);
- cl_env_put(env, &refcheck);
- return result;
-}
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
index 084330d08f7a..dd1cfd8f5213 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
@@ -80,7 +80,8 @@ int cl_inode_fini_refcheck;
*/
static DEFINE_MUTEX(cl_inode_fini_guard);
-int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
+int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr,
+ unsigned int attr_flags)
{
struct lu_env *env;
struct cl_io *io;
@@ -92,14 +93,15 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
return PTR_ERR(env);
io = vvp_env_thread_io(env);
- io->ci_obj = ll_i2info(inode)->lli_clob;
+ io->ci_obj = obj;
io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
+ io->u.ci_setattr.sa_attr_flags = attr_flags;
io->u.ci_setattr.sa_valid = attr->ia_valid;
- io->u.ci_setattr.sa_parent_fid = ll_inode2fid(inode);
+ io->u.ci_setattr.sa_parent_fid = lu_object_fid(&obj->co_lu);
again:
if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
@@ -148,7 +150,7 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
struct cl_object_conf conf = {
.coc_inode = inode,
.u = {
- .coc_md = md
+ .coc_layout = md->layout,
}
};
int result = 0;
@@ -182,7 +184,6 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
* locked by I_NEW bit.
*/
lli->lli_clob = clob;
- lli->lli_has_smd = lsm_has_objects(md->lsm);
lu_object_ref_add(&clob->co_lu, "inode", inode);
} else {
result = PTR_ERR(clob);
@@ -245,15 +246,11 @@ void cl_inode_fini(struct inode *inode)
int emergency;
if (clob) {
- void *cookie;
-
- cookie = cl_env_reenter();
env = cl_env_get(&refcheck);
emergency = IS_ERR(env);
if (emergency) {
mutex_lock(&cl_inode_fini_guard);
LASSERT(cl_inode_fini_env);
- cl_env_implant(cl_inode_fini_env, &refcheck);
env = cl_inode_fini_env;
}
/*
@@ -265,13 +262,10 @@ void cl_inode_fini(struct inode *inode)
lu_object_ref_del(&clob->co_lu, "inode", inode);
cl_object_put_last(env, clob);
lli->lli_clob = NULL;
- if (emergency) {
- cl_env_unplant(cl_inode_fini_env, &refcheck);
+ if (emergency)
mutex_unlock(&cl_inode_fini_guard);
- } else {
+ else
cl_env_put(env, &refcheck);
- }
- cl_env_reexit(cookie);
}
}
@@ -302,22 +296,3 @@ __u32 cl_fid_build_gen(const struct lu_fid *fid)
gen = fid_flatten(fid) >> 32;
return gen;
}
-
-/* lsm is unreliable after hsm implementation as layout can be changed at
- * any time. This is only to support old, non-clio-ized interfaces. It will
- * cause deadlock if clio operations are called with this extra layout refcount
- * because in case the layout changed during the IO, ll_layout_refresh() will
- * have to wait for the refcount to become zero to destroy the older layout.
- *
- * Notice that the lsm returned by this function may not be valid unless called
- * inside layout lock - MDS_INODELOCK_LAYOUT.
- */
-struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
-{
- return lov_lsm_get(ll_i2info(inode)->lli_clob);
-}
-
-inline void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
-{
- lov_lsm_put(ll_i2info(inode)->lli_clob, lsm);
-}
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_misc.c b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
index fb346c12dad2..f48660ed350f 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
@@ -47,36 +47,29 @@
*/
int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
{
- struct lov_stripe_md lsm = { .lsm_magic = LOV_MAGIC_V3 };
- __u32 valsize = sizeof(struct lov_desc);
- int rc, easize, def_easize, cookiesize;
- struct lov_desc desc;
- __u16 stripes, def_stripes;
-
- rc = obd_get_info(NULL, dt_exp, sizeof(KEY_LOVDESC), KEY_LOVDESC,
- &valsize, &desc, NULL);
+ u32 val_size, max_easize, def_easize;
+ int rc;
+
+ val_size = sizeof(max_easize);
+ rc = obd_get_info(NULL, dt_exp, sizeof(KEY_MAX_EASIZE), KEY_MAX_EASIZE,
+ &val_size, &max_easize);
if (rc)
return rc;
- stripes = min_t(__u32, desc.ld_tgt_count, LOV_MAX_STRIPE_COUNT);
- lsm.lsm_stripe_count = stripes;
- easize = obd_size_diskmd(dt_exp, &lsm);
-
- def_stripes = min_t(__u32, desc.ld_default_stripe_count,
- LOV_MAX_STRIPE_COUNT);
- lsm.lsm_stripe_count = def_stripes;
- def_easize = obd_size_diskmd(dt_exp, &lsm);
-
- cookiesize = stripes * sizeof(struct llog_cookie);
+ val_size = sizeof(def_easize);
+ rc = obd_get_info(NULL, dt_exp, sizeof(KEY_DEFAULT_EASIZE),
+ KEY_DEFAULT_EASIZE, &val_size, &def_easize);
+ if (rc)
+ return rc;
- /* default cookiesize is 0 because from 2.4 server doesn't send
+ /*
+ * default cookiesize is 0 because from 2.4 server doesn't send
* llog cookies to client.
*/
- CDEBUG(D_HA,
- "updating def/max_easize: %d/%d def/max_cookiesize: 0/%d\n",
- def_easize, easize, cookiesize);
+ CDEBUG(D_HA, "updating def/max_easize: %d/%d\n",
+ def_easize, max_easize);
- rc = md_init_ea_size(md_exp, easize, def_easize, cookiesize, 0);
+ rc = md_init_ea_size(md_exp, max_easize, def_easize);
return rc;
}
@@ -169,13 +162,11 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
return rc;
}
- cg->lg_env = cl_env_get(&refcheck);
+ cg->lg_env = env;
cg->lg_io = io;
cg->lg_lock = lock;
cg->lg_gid = gid;
- LASSERT(cg->lg_env == env);
- cl_env_unplant(env, &refcheck);
return 0;
}
@@ -184,14 +175,10 @@ void cl_put_grouplock(struct ll_grouplock *cg)
struct lu_env *env = cg->lg_env;
struct cl_io *io = cg->lg_io;
struct cl_lock *lock = cg->lg_lock;
- int refcheck;
LASSERT(cg->lg_env);
LASSERT(cg->lg_gid);
- cl_env_implant(env, &refcheck);
- cl_env_put(env, &refcheck);
-
cl_lock_release(env, lock);
cl_io_fini(env, io);
cl_env_put(env, NULL);
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
deleted file mode 100644
index 8644631bf2ba..000000000000
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ /dev/null
@@ -1,395 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/llite_close.c
- *
- * Lustre Lite routines to issue a secondary close after writeback
- */
-
-#include <linux/module.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "llite_internal.h"
-
-/** records that a write is in flight */
-void vvp_write_pending(struct vvp_object *club, struct vvp_page *page)
-{
- struct ll_inode_info *lli = ll_i2info(club->vob_inode);
-
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_SOM_DIRTY;
- if (page && list_empty(&page->vpg_pending_linkage))
- list_add(&page->vpg_pending_linkage, &club->vob_pending_list);
- spin_unlock(&lli->lli_lock);
-}
-
-/** records that a write has completed */
-void vvp_write_complete(struct vvp_object *club, struct vvp_page *page)
-{
- struct ll_inode_info *lli = ll_i2info(club->vob_inode);
- int rc = 0;
-
- spin_lock(&lli->lli_lock);
- if (page && !list_empty(&page->vpg_pending_linkage)) {
- list_del_init(&page->vpg_pending_linkage);
- rc = 1;
- }
- spin_unlock(&lli->lli_lock);
- if (rc)
- ll_queue_done_writing(club->vob_inode, 0);
-}
-
-/** Queues DONE_WRITING if
- * - done writing is allowed;
- * - inode has no no dirty pages;
- */
-void ll_queue_done_writing(struct inode *inode, unsigned long flags)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob);
-
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= flags;
-
- if ((lli->lli_flags & LLIF_DONE_WRITING) &&
- list_empty(&club->vob_pending_list)) {
- struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
-
- if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CWARN("%s: file "DFID"(flags %u) Size-on-MDS valid, done writing allowed and no diry pages\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(ll_inode2fid(inode)), lli->lli_flags);
- /* DONE_WRITING is allowed and inode has no dirty page. */
- spin_lock(&lcq->lcq_lock);
-
- LASSERT(list_empty(&lli->lli_close_list));
- CDEBUG(D_INODE, "adding inode "DFID" to close list\n",
- PFID(ll_inode2fid(inode)));
- list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
-
- /* Avoid a concurrent insertion into the close thread queue:
- * an inode is already in the close thread, open(), write(),
- * close() happen, epoch is closed as the inode is marked as
- * LLIF_EPOCH_PENDING. When pages are written inode should not
- * be inserted into the queue again, clear this flag to avoid
- * it.
- */
- lli->lli_flags &= ~LLIF_DONE_WRITING;
-
- wake_up(&lcq->lcq_waitq);
- spin_unlock(&lcq->lcq_lock);
- }
- spin_unlock(&lli->lli_lock);
-}
-
-/** Pack SOM attributes info @opdata for CLOSE, DONE_WRITING rpc. */
-void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
-
- op_data->op_flags |= MF_SOM_CHANGE;
- /* Check if Size-on-MDS attributes are valid. */
- if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(ll_inode2fid(inode)), lli->lli_flags);
-
- if (!cl_local_size(inode)) {
- /* Send Size-on-MDS Attributes if valid. */
- op_data->op_attr.ia_valid |= ATTR_MTIME_SET | ATTR_CTIME_SET |
- ATTR_ATIME_SET | ATTR_SIZE | ATTR_BLOCKS;
- }
-}
-
-/** Closes ioepoch and packs Size-on-MDS attribute if needed into @op_data. */
-void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
- struct obd_client_handle **och, unsigned long flags)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob);
-
- spin_lock(&lli->lli_lock);
- if (!(list_empty(&club->vob_pending_list))) {
- if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
- LASSERT(*och);
- LASSERT(!lli->lli_pending_och);
- /* Inode is dirty and there is no pending write done
- * request yet, DONE_WRITE is to be sent later.
- */
- lli->lli_flags |= LLIF_EPOCH_PENDING;
- lli->lli_pending_och = *och;
- spin_unlock(&lli->lli_lock);
-
- inode = igrab(inode);
- LASSERT(inode);
- goto out;
- }
- if (flags & LLIF_DONE_WRITING) {
- /* Some pages are still dirty, it is early to send
- * DONE_WRITE. Wait until all pages will be flushed
- * and try DONE_WRITE again later.
- */
- LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
- lli->lli_flags |= LLIF_DONE_WRITING;
- spin_unlock(&lli->lli_lock);
-
- inode = igrab(inode);
- LASSERT(inode);
- goto out;
- }
- }
- CDEBUG(D_INODE, "Epoch %llu closed on "DFID"\n",
- ll_i2info(inode)->lli_ioepoch, PFID(&lli->lli_fid));
- op_data->op_flags |= MF_EPOCH_CLOSE;
-
- if (flags & LLIF_DONE_WRITING) {
- LASSERT(lli->lli_flags & LLIF_SOM_DIRTY);
- LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
- *och = lli->lli_pending_och;
- lli->lli_pending_och = NULL;
- lli->lli_flags &= ~LLIF_EPOCH_PENDING;
- } else {
- /* Pack Size-on-MDS inode attributes only if they has changed */
- if (!(lli->lli_flags & LLIF_SOM_DIRTY)) {
- spin_unlock(&lli->lli_lock);
- goto out;
- }
-
- /* There is a pending DONE_WRITE -- close epoch with no
- * attribute change.
- */
- if (lli->lli_flags & LLIF_EPOCH_PENDING) {
- spin_unlock(&lli->lli_lock);
- goto out;
- }
- }
-
- LASSERT(list_empty(&club->vob_pending_list));
- lli->lli_flags &= ~LLIF_SOM_DIRTY;
- spin_unlock(&lli->lli_lock);
- ll_done_writing_attr(inode, op_data);
-
-out:
- return;
-}
-
-/**
- * Cliens updates SOM attributes on MDS (including llog cookies):
- * obd_getattr with no lock and md_setattr.
- */
-int ll_som_update(struct inode *inode, struct md_op_data *op_data)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ptlrpc_request *request = NULL;
- __u32 old_flags;
- struct obdo *oa;
- int rc;
-
- LASSERT(op_data);
- if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(ll_inode2fid(inode)), lli->lli_flags);
-
- oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
- if (!oa) {
- CERROR("can't allocate memory for Size-on-MDS update.\n");
- return -ENOMEM;
- }
-
- old_flags = op_data->op_flags;
- op_data->op_flags = MF_SOM_CHANGE;
-
- /* If inode is already in another epoch, skip getattr from OSTs. */
- if (lli->lli_ioepoch == op_data->op_ioepoch) {
- rc = ll_inode_getattr(inode, oa, op_data->op_ioepoch,
- old_flags & MF_GETATTR_LOCK);
- if (rc) {
- oa->o_valid = 0;
- if (rc != -ENOENT)
- CERROR("%s: inode_getattr failed - unable to send a Size-on-MDS attribute update for inode "DFID": rc = %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(ll_inode2fid(inode)), rc);
- } else {
- CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
- PFID(&lli->lli_fid));
- }
- /* Install attributes into op_data. */
- md_from_obdo(op_data, oa, oa->o_valid);
- }
-
- rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data,
- NULL, 0, NULL, 0, &request, NULL);
- ptlrpc_req_finished(request);
-
- kmem_cache_free(obdo_cachep, oa);
- return rc;
-}
-
-/**
- * Closes the ioepoch and packs all the attributes into @op_data for
- * DONE_WRITING rpc.
- */
-static void ll_prepare_done_writing(struct inode *inode,
- struct md_op_data *op_data,
- struct obd_client_handle **och)
-{
- ll_ioepoch_close(inode, op_data, och, LLIF_DONE_WRITING);
- /* If there is no @och, we do not do D_W yet. */
- if (!*och)
- return;
-
- ll_pack_inode2opdata(inode, op_data, &(*och)->och_fh);
- ll_prep_md_op_data(op_data, inode, NULL, NULL,
- 0, 0, LUSTRE_OPC_ANY, NULL);
-}
-
-/** Send a DONE_WRITING rpc. */
-static void ll_done_writing(struct inode *inode)
-{
- struct obd_client_handle *och = NULL;
- struct md_op_data *op_data;
- int rc;
-
- LASSERT(exp_connect_som(ll_i2mdexp(inode)));
-
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (!op_data)
- return;
-
- ll_prepare_done_writing(inode, op_data, &och);
- /* If there is no @och, we do not do D_W yet. */
- if (!och)
- goto out;
-
- rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL);
- if (rc == -EAGAIN)
- /* MDS has instructed us to obtain Size-on-MDS attribute from
- * OSTs and send setattr to back to MDS.
- */
- rc = ll_som_update(inode, op_data);
- else if (rc) {
- CERROR("%s: inode "DFID" mdc done_writing failed: rc = %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(ll_inode2fid(inode)), rc);
- }
-out:
- ll_finish_md_op_data(op_data);
- if (och) {
- md_clear_open_replay_data(ll_i2sbi(inode)->ll_md_exp, och);
- kfree(och);
- }
-}
-
-static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
-{
- struct ll_inode_info *lli = NULL;
-
- spin_lock(&lcq->lcq_lock);
-
- if (!list_empty(&lcq->lcq_head)) {
- lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
- lli_close_list);
- list_del_init(&lli->lli_close_list);
- } else if (atomic_read(&lcq->lcq_stop)) {
- lli = ERR_PTR(-EALREADY);
- }
-
- spin_unlock(&lcq->lcq_lock);
- return lli;
-}
-
-static int ll_close_thread(void *arg)
-{
- struct ll_close_queue *lcq = arg;
-
- complete(&lcq->lcq_comp);
-
- while (1) {
- struct l_wait_info lwi = { 0 };
- struct ll_inode_info *lli;
- struct inode *inode;
-
- l_wait_event_exclusive(lcq->lcq_waitq,
- (lli = ll_close_next_lli(lcq)) != NULL,
- &lwi);
- if (IS_ERR(lli))
- break;
-
- inode = ll_info2i(lli);
- CDEBUG(D_INFO, "done_writing for inode "DFID"\n",
- PFID(ll_inode2fid(inode)));
- ll_done_writing(inode);
- iput(inode);
- }
-
- CDEBUG(D_INFO, "ll_close exiting\n");
- complete(&lcq->lcq_comp);
- return 0;
-}
-
-int ll_close_thread_start(struct ll_close_queue **lcq_ret)
-{
- struct ll_close_queue *lcq;
- struct task_struct *task;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
- return -EINTR;
-
- lcq = kzalloc(sizeof(*lcq), GFP_NOFS);
- if (!lcq)
- return -ENOMEM;
-
- spin_lock_init(&lcq->lcq_lock);
- INIT_LIST_HEAD(&lcq->lcq_head);
- init_waitqueue_head(&lcq->lcq_waitq);
- init_completion(&lcq->lcq_comp);
-
- task = kthread_run(ll_close_thread, lcq, "ll_close");
- if (IS_ERR(task)) {
- kfree(lcq);
- return PTR_ERR(task);
- }
-
- wait_for_completion(&lcq->lcq_comp);
- *lcq_ret = lcq;
- return 0;
-}
-
-void ll_close_thread_shutdown(struct ll_close_queue *lcq)
-{
- init_completion(&lcq->lcq_comp);
- atomic_inc(&lcq->lcq_stop);
- wake_up(&lcq->lcq_waitq);
- wait_for_completion(&lcq->lcq_comp);
- kfree(lcq);
-}
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 4bc551279aa4..2f46d475cd7d 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -97,31 +97,20 @@ struct ll_grouplock {
unsigned long lg_gid;
};
-enum lli_flags {
- /* MDS has an authority for the Size-on-MDS attributes. */
- LLIF_MDS_SIZE_LOCK = (1 << 0),
- /* Epoch close is postponed. */
- LLIF_EPOCH_PENDING = (1 << 1),
- /* DONE WRITING is allowed. */
- LLIF_DONE_WRITING = (1 << 2),
- /* Sizeon-on-MDS attributes are changed. An attribute update needs to
- * be sent to MDS.
- */
- LLIF_SOM_DIRTY = (1 << 3),
+enum ll_file_flags {
/* File data is modified. */
- LLIF_DATA_MODIFIED = (1 << 4),
+ LLIF_DATA_MODIFIED = 0,
/* File is being restored */
- LLIF_FILE_RESTORING = (1 << 5),
+ LLIF_FILE_RESTORING = 1,
/* Xattr cache is attached to the file */
- LLIF_XATTR_CACHE = (1 << 6),
+ LLIF_XATTR_CACHE = 2,
};
struct ll_inode_info {
__u32 lli_inode_magic;
- __u32 lli_flags;
- __u64 lli_ioepoch;
spinlock_t lli_lock;
+ unsigned long lli_flags;
struct posix_acl *lli_posix_acl;
/* identifying fields for both metadata and data stacks. */
@@ -129,14 +118,6 @@ struct ll_inode_info {
/* master inode fid for stripe directory */
struct lu_fid lli_pfid;
- struct list_head lli_close_list;
-
- /* handle is to be sent to MDS later on done_writing and setattr.
- * Open handle data are needed for the recovery to reconstruct
- * the inode state on the MDS. XXX: recovery is not ready yet.
- */
- struct obd_client_handle *lli_pending_och;
-
/* We need all three because every inode may be opened in different
* modes
*/
@@ -204,7 +185,6 @@ struct ll_inode_info {
struct {
struct mutex lli_size_mutex;
char *lli_symlink_name;
- __u64 lli_maxbytes;
/*
* struct rw_semaphore {
* signed long count; // align d.d_def_acl
@@ -245,7 +225,6 @@ struct ll_inode_info {
* In the future, if more members are added only for directory,
* some of the following members can be moved into u.f.
*/
- bool lli_has_smd;
struct cl_object *lli_clob;
/* mutex to request for layout lock exclusively. */
@@ -282,6 +261,9 @@ int ll_xattr_cache_destroy(struct inode *inode);
int ll_xattr_cache_get(struct inode *inode, const char *name,
char *buffer, size_t size, __u64 valid);
+int ll_init_security(struct dentry *dentry, struct inode *inode,
+ struct inode *dir);
+
/*
* Locking to guarantee consistency of non-atomic updates to long long i_size,
* consistency between file size and KMS.
@@ -400,7 +382,7 @@ enum stats_track_type {
#define LL_SBI_LOCALFLOCK 0x200 /* Local flocks support by kernel */
#define LL_SBI_LRU_RESIZE 0x400 /* lru resize support */
#define LL_SBI_LAZYSTATFS 0x800 /* lazystatfs mount option */
-#define LL_SBI_SOM_PREVIEW 0x1000 /* SOM preview mount option */
+/* LL_SBI_SOM_PREVIEW 0x1000 SOM preview mount option, obsolete */
#define LL_SBI_32BIT_API 0x2000 /* generate 32 bit inodes. */
#define LL_SBI_64BIT_HASH 0x4000 /* support 64-bits dir hash/offset */
#define LL_SBI_AGL_ENABLED 0x8000 /* enable agl */
@@ -409,6 +391,8 @@ enum stats_track_type {
#define LL_SBI_USER_FID2PATH 0x40000 /* allow fid2path by unprivileged users */
#define LL_SBI_XATTR_CACHE 0x80000 /* support for xattr cache */
#define LL_SBI_NOROOTSQUASH 0x100000 /* do not apply root squash */
+#define LL_SBI_ALWAYS_PING 0x200000 /* always ping even if server
+ * suppress_pings */
#define LL_SBI_FLAGS { \
"nolck", \
@@ -432,6 +416,7 @@ enum stats_track_type {
"user_fid2path",\
"xattr_cache", \
"norootsquash", \
+ "always_ping", \
}
/*
@@ -466,10 +451,10 @@ struct ll_sb_info {
int ll_flags;
unsigned int ll_umounting:1,
- ll_xattr_cache_enabled:1;
- struct lustre_client_ocd ll_lco;
+ ll_xattr_cache_enabled:1,
+ ll_client_common_fill_super_succeeded:1;
- struct ll_close_queue *ll_lcq;
+ struct lustre_client_ocd ll_lco;
struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
@@ -630,8 +615,6 @@ struct ll_file_data {
struct list_head fd_lccs; /* list of ll_cl_context */
};
-struct lov_stripe_md;
-
extern struct dentry *llite_root;
extern struct kset *llite_kset;
@@ -682,8 +665,6 @@ enum {
LPROC_LL_WRITE_BYTES,
LPROC_LL_BRW_READ,
LPROC_LL_BRW_WRITE,
- LPROC_LL_OSC_READ,
- LPROC_LL_OSC_WRITE,
LPROC_LL_IOCTL,
LPROC_LL_OPEN,
LPROC_LL_RELEASE,
@@ -741,9 +722,7 @@ int ll_writepage(struct page *page, struct writeback_control *wbc);
int ll_writepages(struct address_space *, struct writeback_control *wbc);
int ll_readpage(struct file *file, struct page *page);
void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
-int ll_readahead(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct ll_readahead_state *ras,
- bool hit);
+int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
struct ll_cl_context *ll_cl_find(struct file *file);
void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io);
void ll_cl_remove(struct file *file, const struct lu_env *env);
@@ -762,25 +741,14 @@ enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
enum ldlm_mode mode);
int ll_file_open(struct inode *inode, struct file *file);
int ll_file_release(struct inode *inode, struct file *file);
-int ll_glimpse_ioctl(struct ll_sb_info *sbi,
- struct lov_stripe_md *lsm, lstat_t *st);
-void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch);
int ll_release_openhandle(struct inode *, struct lookup_intent *);
int ll_md_real_close(struct inode *inode, fmode_t fmode);
-void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
- struct obd_client_handle **och, unsigned long flags);
-void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data);
-int ll_som_update(struct inode *inode, struct md_op_data *op_data);
-int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
- __u64 ioepoch, int sync);
-void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
- struct lustre_handle *fh);
int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat);
struct posix_acl *ll_get_acl(struct inode *inode, int type);
int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
const char *name, int namelen);
int ll_get_fid_by_name(struct inode *parent, const char *name,
- int namelen, struct lu_fid *fid);
+ int namelen, struct lu_fid *fid, struct inode **inode);
int ll_inode_permission(struct inode *inode, int mask);
int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
@@ -818,6 +786,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt);
void ll_put_super(struct super_block *sb);
void ll_kill_super(struct super_block *sb);
struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock);
+void ll_dir_clear_lsm_md(struct inode *inode);
void ll_clear_inode(struct inode *inode);
int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import);
int ll_setattr(struct dentry *de, struct iattr *attr);
@@ -891,18 +860,6 @@ int ll_dir_get_parent_fid(struct inode *dir, struct lu_fid *parent_fid);
/* llite/symlink.c */
extern const struct inode_operations ll_fast_symlink_inode_operations;
-/* llite/llite_close.c */
-struct ll_close_queue {
- spinlock_t lcq_lock;
- struct list_head lcq_head;
- wait_queue_head_t lcq_waitq;
- struct completion lcq_comp;
- atomic_t lcq_stop;
-};
-
-void vvp_write_pending(struct vvp_object *club, struct vvp_page *page);
-void vvp_write_complete(struct vvp_object *club, struct vvp_page *page);
-
/**
* IO arguments for various VFS I/O interfaces.
*/
@@ -945,15 +902,11 @@ static inline struct vvp_io_args *ll_env_args(const struct lu_env *env)
return &ll_env_info(env)->lti_args;
}
-void ll_queue_done_writing(struct inode *inode, unsigned long flags);
-void ll_close_thread_shutdown(struct ll_close_queue *lcq);
-int ll_close_thread_start(struct ll_close_queue **lcq_ret);
-
/* llite/llite_mmap.c */
int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last);
int ll_file_mmap(struct file *file, struct vm_area_struct *vma);
-void policy_from_vma(ldlm_policy_data_t *policy, struct vm_area_struct *vma,
+void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
unsigned long addr, size_t count);
struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
size_t count);
@@ -1024,9 +977,14 @@ static inline struct lu_fid *ll_inode2fid(struct inode *inode)
return fid;
}
-static inline __u64 ll_file_maxbytes(struct inode *inode)
+static inline loff_t ll_file_maxbytes(struct inode *inode)
{
- return ll_i2info(inode)->lli_maxbytes;
+ struct cl_object *obj = ll_i2info(inode)->lli_clob;
+
+ if (!obj)
+ return MAX_LFS_FILESIZE;
+
+ return min_t(loff_t, cl_object_maxbytes(obj), MAX_LFS_FILESIZE);
}
/* llite/xattr.c */
@@ -1043,17 +1001,18 @@ extern const struct xattr_handler *ll_xattr_handlers[];
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
int ll_xattr_list(struct inode *inode, const char *name, int type,
void *buffer, size_t size, __u64 valid);
+const struct xattr_handler *get_xattr_type(const char *name);
/**
* Common IO arguments for various VFS I/O interfaces.
*/
int cl_sb_init(struct super_block *sb);
int cl_sb_fini(struct super_block *sb);
-void ll_io_init(struct cl_io *io, const struct file *file, int write);
-void ras_update(struct ll_sb_info *sbi, struct inode *inode,
- struct ll_readahead_state *ras, unsigned long index,
- unsigned hit);
+enum ras_update_flags {
+ LL_RAS_HIT = 0x1,
+ LL_RAS_MMAP = 0x2
+};
void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
void ll_ra_stats_inc(struct inode *inode, enum ra_stat which);
@@ -1258,15 +1217,6 @@ struct ll_dio_pages {
int ldp_nr;
};
-static inline void cl_stats_tally(struct cl_device *dev, enum cl_req_type crt,
- int rc)
-{
- int opc = (crt == CRT_READ) ? LPROC_LL_OSC_READ :
- LPROC_LL_OSC_WRITE;
-
- ll_stats_ops_tally(ll_s2sbi(cl2vvp_dev(dev)->vdv_sb), opc, rc);
-}
-
ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
int rw, struct inode *inode,
struct ll_dio_pages *pv);
@@ -1365,11 +1315,6 @@ static inline void d_lustre_revalidate(struct dentry *dentry)
spin_unlock(&dentry->d_lock);
}
-enum {
- LL_LAYOUT_GEN_NONE = ((__u32)-2), /* layout lock was cancelled */
- LL_LAYOUT_GEN_EMPTY = ((__u32)-1) /* for empty layout */
-};
-
int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf);
int ll_layout_refresh(struct inode *inode, __u32 *gen);
int ll_layout_restore(struct inode *inode, loff_t start, __u64 length);
@@ -1383,14 +1328,14 @@ int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
int ll_getparent(struct file *file, struct getparent __user *arg);
/* lcommon_cl.c */
-int cl_setattr_ost(struct inode *inode, const struct iattr *attr);
+int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr,
+ unsigned int attr_flags);
extern struct lu_env *cl_inode_fini_env;
extern int cl_inode_fini_refcheck;
int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
void cl_inode_fini(struct inode *inode);
-int cl_local_size(struct inode *inode);
__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
__u32 cl_fid_build_gen(const struct lu_fid *fid);
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index e5c62f4ce3d8..25f5aed97f63 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -191,10 +191,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
OBD_CONNECT_FLOCK_DEAD |
OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
OBD_CONNECT_OPEN_BY_FID |
- OBD_CONNECT_DIR_STRIPE;
-
- if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
- data->ocd_connect_flags |= OBD_CONNECT_SOM;
+ OBD_CONNECT_DIR_STRIPE |
+ OBD_CONNECT_BULK_MBITS;
if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
@@ -226,6 +224,10 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
/* real client */
data->ocd_connect_flags |= OBD_CONNECT_REAL;
+ /* always ping even if server suppress_pings */
+ if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
+ data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
+
data->ocd_brw_size = MD_MAX_BRW_SIZE;
err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid,
@@ -288,7 +290,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
size = sizeof(*data);
err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
- KEY_CONN_DATA, &size, data, NULL);
+ KEY_CONN_DATA, &size, data);
if (err) {
CERROR("%s: Get connect data failed: rc = %d\n",
sbi->ll_md_exp->exp_obd->obd_name, err);
@@ -355,10 +357,9 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
OBD_CONNECT_EINPROGRESS |
OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
- OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
-
- if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
- data->ocd_connect_flags |= OBD_CONNECT_SOM;
+ OBD_CONNECT_LAYOUTLOCK |
+ OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
+ OBD_CONNECT_BULK_MBITS;
if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
/* OBD_CONNECT_CKSUM should always be set, even if checksums are
@@ -376,6 +377,10 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
+ /* always ping even if server suppress_pings */
+ if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
+ data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
+
CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
data->ocd_connect_flags,
data->ocd_version, data->ocd_grant);
@@ -475,8 +480,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
ptlrpc_req_finished(request);
if (IS_ERR(root)) {
- if (lmd.lsm)
- obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
#ifdef CONFIG_FS_POSIX_ACL
if (lmd.posix_acl) {
posix_acl_release(lmd.posix_acl);
@@ -488,12 +491,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
goto out_root;
}
- err = ll_close_thread_start(&sbi->ll_lcq);
- if (err) {
- CERROR("cannot start close thread: rc %d\n", err);
- goto out_root;
- }
-
checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
KEY_CHECKSUM, sizeof(checksum), &checksum,
@@ -572,10 +569,18 @@ int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
{
int size, rc;
- *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
+ size = sizeof(*lmmsize);
+ rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
+ KEY_MAX_EASIZE, &size, lmmsize);
+ if (rc) {
+ CERROR("%s: cannot get max LOV EA size: rc = %d\n",
+ sbi->ll_dt_exp->exp_obd->obd_name, rc);
+ return rc;
+ }
+
size = sizeof(int);
rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
- KEY_MAX_EASIZE, &size, lmmsize, NULL);
+ KEY_MAX_EASIZE, &size, lmmsize);
if (rc)
CERROR("Get max mdsize error rc %d\n", rc);
@@ -599,7 +604,7 @@ int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
size = sizeof(int);
rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
- KEY_DEFAULT_EASIZE, &size, lmmsize, NULL);
+ KEY_DEFAULT_EASIZE, &size, lmmsize);
if (rc)
CERROR("Get default mdsize error rc %d\n", rc);
@@ -633,8 +638,6 @@ static void client_common_put_super(struct super_block *sb)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
- ll_close_thread_shutdown(sbi->ll_lcq);
-
cl_sb_fini(sb);
obd_fid_fini(sbi->ll_dt_exp->exp_obd);
@@ -725,6 +728,18 @@ static int ll_options(char *options, int *flags)
*flags &= ~tmp;
goto next;
}
+ tmp = ll_set_opt("context", s1, 1);
+ if (tmp)
+ goto next;
+ tmp = ll_set_opt("fscontext", s1, 1);
+ if (tmp)
+ goto next;
+ tmp = ll_set_opt("defcontext", s1, 1);
+ if (tmp)
+ goto next;
+ tmp = ll_set_opt("rootcontext", s1, 1);
+ if (tmp)
+ goto next;
tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
if (tmp) {
*flags |= tmp;
@@ -766,11 +781,6 @@ static int ll_options(char *options, int *flags)
*flags &= ~tmp;
goto next;
}
- tmp = ll_set_opt("som_preview", s1, LL_SBI_SOM_PREVIEW);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
if (tmp) {
*flags |= tmp;
@@ -786,6 +796,11 @@ static int ll_options(char *options, int *flags)
*flags &= ~tmp;
goto next;
}
+ tmp = ll_set_opt("always_ping", s1, LL_SBI_ALWAYS_PING);
+ if (tmp) {
+ *flags |= tmp;
+ goto next;
+ }
LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
s1);
return -EINVAL;
@@ -804,14 +819,10 @@ void ll_lli_init(struct ll_inode_info *lli)
{
lli->lli_inode_magic = LLI_INODE_MAGIC;
lli->lli_flags = 0;
- lli->lli_ioepoch = 0;
- lli->lli_maxbytes = MAX_LFS_FILESIZE;
spin_lock_init(&lli->lli_lock);
lli->lli_posix_acl = NULL;
/* Do not set lli_fid, it has been initialized already. */
fid_zero(&lli->lli_pfid);
- INIT_LIST_HEAD(&lli->lli_close_list);
- lli->lli_pending_och = NULL;
lli->lli_mds_read_och = NULL;
lli->lli_mds_write_och = NULL;
lli->lli_mds_exec_och = NULL;
@@ -820,9 +831,8 @@ void ll_lli_init(struct ll_inode_info *lli)
lli->lli_open_fd_exec_count = 0;
mutex_init(&lli->lli_och_mutex);
spin_lock_init(&lli->lli_agl_lock);
- lli->lli_has_smd = false;
spin_lock_init(&lli->lli_layout_lock);
- ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE);
+ ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
lli->lli_clob = NULL;
init_rwsem(&lli->lli_xattrs_list_rwsem);
@@ -941,10 +951,14 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
/* connections, registrations, sb setup */
err = client_common_fill_super(sb, md, dt, mnt);
+ if (!err)
+ sbi->ll_client_common_fill_super_succeeded = 1;
out_free:
kfree(md);
kfree(dt);
+ if (lprof)
+ class_put_profile(lprof);
if (err)
ll_put_super(sb);
else if (sbi->ll_flags & LL_SBI_VERBOSE)
@@ -1002,7 +1016,7 @@ void ll_put_super(struct super_block *sb)
}
}
- if (sbi->ll_lcq) {
+ if (sbi->ll_client_common_fill_super_succeeded) {
/* Only if client_common_fill_super succeeded */
client_common_put_super(sb);
}
@@ -1057,7 +1071,7 @@ struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
return inode;
}
-static void ll_dir_clear_lsm_md(struct inode *inode)
+void ll_dir_clear_lsm_md(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
@@ -1205,16 +1219,44 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
/* set the directory layout */
if (!lli->lli_lsm_md) {
+ struct cl_attr *attr;
+
rc = ll_init_lsm_md(inode, md);
if (rc)
return rc;
- lli->lli_lsm_md = lsm;
/*
* set lsm_md to NULL, so the following free lustre_md
* will not free this lsm
*/
md->lmv = NULL;
+ lli->lli_lsm_md = lsm;
+
+ attr = kzalloc(sizeof(*attr), GFP_NOFS);
+ if (!attr)
+ return -ENOMEM;
+
+ /* validate the lsm */
+ rc = md_merge_attr(ll_i2mdexp(inode), lsm, attr,
+ ll_md_blocking_ast);
+ if (rc) {
+ kfree(attr);
+ return rc;
+ }
+
+ if (md->body->mbo_valid & OBD_MD_FLNLINK)
+ md->body->mbo_nlink = attr->cat_nlink;
+ if (md->body->mbo_valid & OBD_MD_FLSIZE)
+ md->body->mbo_size = attr->cat_size;
+ if (md->body->mbo_valid & OBD_MD_FLATIME)
+ md->body->mbo_atime = attr->cat_atime;
+ if (md->body->mbo_valid & OBD_MD_FLCTIME)
+ md->body->mbo_ctime = attr->cat_ctime;
+ if (md->body->mbo_valid & OBD_MD_FLMTIME)
+ md->body->mbo_mtime = attr->cat_mtime;
+
+ kfree(attr);
+
CDEBUG(D_INODE, "Set lsm %p magic %x to "DFID"\n", lsm,
lsm->lsm_md_magic, PFID(ll_inode2fid(inode)));
return 0;
@@ -1272,9 +1314,6 @@ void ll_clear_inode(struct inode *inode)
LASSERT(lli->lli_opendir_pid == 0);
}
- spin_lock(&lli->lli_lock);
- ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
- spin_unlock(&lli->lli_lock);
md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
LASSERT(!lli->lli_open_fd_write_count);
@@ -1313,13 +1352,11 @@ void ll_clear_inode(struct inode *inode)
* cl_object still uses inode lsm.
*/
cl_inode_fini(inode);
- lli->lli_has_smd = false;
}
#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
-static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
- struct md_open_data **mod)
+static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
{
struct lustre_md md;
struct inode *inode = d_inode(dentry);
@@ -1332,8 +1369,7 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0,
- &request, mod);
+ rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
if (rc) {
ptlrpc_req_finished(request);
if (rc == -ENOENT) {
@@ -1369,48 +1405,12 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
rc = simple_setattr(dentry, &op_data->op_attr);
op_data->op_attr.ia_valid = ia_valid;
- /* Extract epoch data if obtained. */
- op_data->op_handle = md.body->mbo_handle;
- op_data->op_ioepoch = md.body->mbo_ioepoch;
-
rc = ll_update_inode(inode, &md);
ptlrpc_req_finished(request);
return rc;
}
-/* Close IO epoch and send Size-on-MDS attribute update. */
-static int ll_setattr_done_writing(struct inode *inode,
- struct md_op_data *op_data,
- struct md_open_data *mod)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- int rc = 0;
-
- if (!S_ISREG(inode->i_mode))
- return 0;
-
- CDEBUG(D_INODE, "Epoch %llu closed on "DFID" for truncate\n",
- op_data->op_ioepoch, PFID(&lli->lli_fid));
-
- op_data->op_flags = MF_EPOCH_CLOSE;
- ll_done_writing_attr(inode, op_data);
- ll_pack_inode2opdata(inode, op_data, NULL);
-
- rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
- if (rc == -EAGAIN)
- /* MDS has instructed us to obtain Size-on-MDS attribute
- * from OSTs and send setattr to back to MDS.
- */
- rc = ll_som_update(inode, op_data);
- else if (rc) {
- CERROR("%s: inode "DFID" mdc truncate failed: rc = %d\n",
- ll_i2sbi(inode)->ll_md_exp->exp_obd->obd_name,
- PFID(ll_inode2fid(inode)), rc);
- }
- return rc;
-}
-
/* If this inode has objects allocated to it (lsm != NULL), then the OST
* object(s) determine the file size and mtime. Otherwise, the MDS will
* keep these values until such a time that objects are allocated for it.
@@ -1431,9 +1431,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
struct inode *inode = d_inode(dentry);
struct ll_inode_info *lli = ll_i2info(inode);
struct md_op_data *op_data = NULL;
- struct md_open_data *mod = NULL;
bool file_is_released = false;
- int rc = 0, rc1 = 0;
+ int rc = 0;
CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, valid %x, hsm_import %d\n",
ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode,
@@ -1503,14 +1502,33 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
* but other attributes must be set
*/
if (S_ISREG(inode->i_mode)) {
- struct lov_stripe_md *lsm;
+ struct cl_layout cl = {
+ .cl_is_released = false,
+ };
+ struct lu_env *env;
+ int refcheck;
__u32 gen;
- ll_layout_refresh(inode, &gen);
- lsm = ccc_inode_lsm_get(inode);
- if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED)
- file_is_released = true;
- ccc_inode_lsm_put(inode, lsm);
+ rc = ll_layout_refresh(inode, &gen);
+ if (rc < 0)
+ goto out;
+
+ /*
+ * XXX: the only place we need to know the layout type,
+ * this will be removed by a later patch. -Jinshan
+ */
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env)) {
+ rc = PTR_ERR(env);
+ goto out;
+ }
+
+ rc = cl_object_layout_get(env, lli->lli_clob, &cl);
+ cl_env_put(env, &refcheck);
+ if (rc < 0)
+ goto out;
+
+ file_is_released = cl.cl_is_released;
if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
if (file_is_released) {
@@ -1527,32 +1545,16 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
* modified, flag it.
*/
attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
op_data->op_bias |= MDS_DATA_MODIFIED;
}
}
memcpy(&op_data->op_attr, attr, sizeof(*attr));
- /* Open epoch for truncate. */
- if (exp_connect_som(ll_i2mdexp(inode)) && !hsm_import &&
- (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
- op_data->op_flags = MF_EPOCH_OPEN;
-
- rc = ll_md_setattr(dentry, op_data, &mod);
+ rc = ll_md_setattr(dentry, op_data);
if (rc)
goto out;
- /* RPC to MDT is sent, cancel data modification flag */
- if (op_data->op_bias & MDS_DATA_MODIFIED) {
- spin_lock(&lli->lli_lock);
- lli->lli_flags &= ~LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
- }
-
- ll_ioepoch_open(lli, op_data->op_ioepoch);
if (!S_ISREG(inode->i_mode) || file_is_released) {
rc = 0;
goto out;
@@ -1568,19 +1570,11 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
* setting times to past, but it is necessary due to possible
* time de-synchronization between MDT inode and OST objects
*/
- if (attr->ia_valid & ATTR_SIZE)
- down_write(&lli->lli_trunc_sem);
- rc = cl_setattr_ost(inode, attr);
- if (attr->ia_valid & ATTR_SIZE)
- up_write(&lli->lli_trunc_sem);
+ rc = cl_setattr_ost(ll_i2info(inode)->lli_clob, attr, 0);
}
out:
- if (op_data->op_ioepoch) {
- rc1 = ll_setattr_done_writing(inode, op_data, mod);
- if (!rc)
- rc = rc1;
- }
- ll_finish_md_op_data(op_data);
+ if (op_data)
+ ll_finish_md_op_data(op_data);
if (!S_ISDIR(inode->i_mode)) {
inode_lock(inode);
@@ -1736,19 +1730,10 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct mdt_body *body = md->body;
- struct lov_stripe_md *lsm = md->lsm;
struct ll_sb_info *sbi = ll_i2sbi(inode);
- LASSERT((lsm != NULL) == ((body->mbo_valid & OBD_MD_FLEASIZE) != 0));
- if (lsm) {
- if (!lli->lli_has_smd &&
- !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
- cl_file_inode_init(inode, md);
-
- lli->lli_maxbytes = lsm->lsm_maxbytes;
- if (lli->lli_maxbytes > MAX_LFS_FILESIZE)
- lli->lli_maxbytes = MAX_LFS_FILESIZE;
- }
+ if (body->mbo_valid & OBD_MD_FLEASIZE)
+ cl_file_inode_init(inode, md);
if (S_ISDIR(inode->i_mode)) {
int rc;
@@ -1828,48 +1813,11 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md)
LASSERT(fid_seq(&lli->lli_fid) != 0);
if (body->mbo_valid & OBD_MD_FLSIZE) {
- if (exp_connect_som(ll_i2mdexp(inode)) &&
- S_ISREG(inode->i_mode)) {
- struct lustre_handle lockh;
- enum ldlm_mode mode;
-
- /* As it is possible a blocking ast has been processed
- * by this time, we need to check there is an UPDATE
- * lock on the client and set LLIF_MDS_SIZE_LOCK holding
- * it.
- */
- mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
- &lockh, LDLM_FL_CBPENDING,
- LCK_CR | LCK_CW |
- LCK_PR | LCK_PW);
- if (mode) {
- if (lli->lli_flags & (LLIF_DONE_WRITING |
- LLIF_EPOCH_PENDING |
- LLIF_SOM_DIRTY)) {
- CERROR("%s: inode "DFID" flags %u still has size authority! do not trust the size got from MDS\n",
- sbi->ll_md_exp->exp_obd->obd_name,
- PFID(ll_inode2fid(inode)),
- lli->lli_flags);
- } else {
- /* Use old size assignment to avoid
- * deadlock bz14138 & bz14326
- */
- i_size_write(inode, body->mbo_size);
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
- spin_unlock(&lli->lli_lock);
- }
- ldlm_lock_decref(&lockh, mode);
- }
- } else {
- /* Use old size assignment to avoid
- * deadlock bz14138 & bz14326
- */
- i_size_write(inode, body->mbo_size);
+ i_size_write(inode, body->mbo_size);
- CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n",
- inode->i_ino, (unsigned long long)body->mbo_size);
- }
+ CDEBUG(D_VFSTRACE, "inode=" DFID ", updating i_size %llu\n",
+ PFID(ll_inode2fid(inode)),
+ (unsigned long long)body->mbo_size);
if (body->mbo_valid & OBD_MD_FLBLOCKS)
inode->i_blocks = body->mbo_blocks;
@@ -1877,7 +1825,7 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md)
if (body->mbo_valid & OBD_MD_TSTATE) {
if (body->mbo_t_state & MS_RESTORE)
- lli->lli_flags |= LLIF_FILE_RESTORING;
+ set_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
}
return 0;
@@ -1892,8 +1840,6 @@ int ll_read_inode2(struct inode *inode, void *opaque)
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
PFID(&lli->lli_fid), inode);
- LASSERT(!lli->lli_has_smd);
-
/* Core attributes from the MDS first. This is a new inode, and
* the VFS doesn't zero times in the core inode so we have to do
* it ourselves. They will be overwritten by either MDS or OST
@@ -1988,9 +1934,9 @@ int ll_iocontrol(struct inode *inode, struct file *file,
return put_user(flags, (int __user *)arg);
}
case FSFILT_IOC_SETFLAGS: {
- struct lov_stripe_md *lsm;
- struct obd_info oinfo = { };
struct md_op_data *op_data;
+ struct cl_object *obj;
+ struct iattr *attr;
if (get_user(flags, (int __user *)arg))
return -EFAULT;
@@ -2002,8 +1948,7 @@ int ll_iocontrol(struct inode *inode, struct file *file,
op_data->op_attr_flags = flags;
op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
- rc = md_setattr(sbi->ll_md_exp, op_data,
- NULL, 0, NULL, 0, &req, NULL);
+ rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
ll_finish_md_op_data(op_data);
ptlrpc_req_finished(req);
if (rc)
@@ -2011,30 +1956,17 @@ int ll_iocontrol(struct inode *inode, struct file *file,
inode->i_flags = ll_ext_to_inode_flags(flags);
- lsm = ccc_inode_lsm_get(inode);
- if (!lsm_has_objects(lsm)) {
- ccc_inode_lsm_put(inode, lsm);
+ obj = ll_i2info(inode)->lli_clob;
+ if (!obj)
return 0;
- }
- oinfo.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
- if (!oinfo.oi_oa) {
- ccc_inode_lsm_put(inode, lsm);
+ attr = kzalloc(sizeof(*attr), GFP_NOFS);
+ if (!attr)
return -ENOMEM;
- }
- oinfo.oi_md = lsm;
- oinfo.oi_oa->o_oi = lsm->lsm_oi;
- oinfo.oi_oa->o_flags = flags;
- oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS |
- OBD_MD_FLGROUP;
- obdo_set_parent_fid(oinfo.oi_oa, &ll_i2info(inode)->lli_fid);
- rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
- kmem_cache_free(obdo_cachep, oinfo.oi_oa);
- ccc_inode_lsm_put(inode, lsm);
-
- if (rc && rc != -EPERM && rc != -EACCES)
- CERROR("osc_setattr_async fails: rc = %d\n", rc);
+ attr->ia_valid = ATTR_ATTR_FLAG;
+ rc = cl_setattr_ost(obj, attr, flags);
+ kfree(attr);
return rc;
}
default:
@@ -2164,7 +2096,6 @@ void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
return;
op_data->op_fid1 = body->mbo_fid1;
- op_data->op_ioepoch = body->mbo_ioepoch;
op_data->op_handle = body->mbo_handle;
op_data->op_mod_time = get_seconds();
md_close(exp, op_data, NULL, &close_req);
@@ -2244,17 +2175,14 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
conf.coc_opc = OBJECT_CONF_SET;
conf.coc_inode = *inode;
conf.coc_lock = lock;
- conf.u.coc_md = &md;
+ conf.u.coc_layout = md.layout;
(void)ll_layout_conf(*inode, &conf);
}
LDLM_LOCK_PUT(lock);
}
out:
- if (md.lsm)
- obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
md_free_lustre_md(sbi->ll_md_exp, &md);
-
cleanup:
if (rc != 0 && it && it->it_op & IT_OPEN)
ll_open_cleanup(sb ? sb : (*inode)->i_sb, req);
@@ -2380,8 +2308,9 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
op_data->op_default_stripe_offset = -1;
if (S_ISDIR(i1->i_mode)) {
op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
- op_data->op_default_stripe_offset =
- ll_i2info(i1)->lli_def_stripe_offset;
+ if (opc == LUSTRE_OPC_MKDIR)
+ op_data->op_default_stripe_offset =
+ ll_i2info(i1)->lli_def_stripe_offset;
}
if (i2) {
@@ -2405,8 +2334,6 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
op_data->op_cap = cfs_curproc_cap_pack();
- op_data->op_bias = 0;
- op_data->op_cli_flags = 0;
if ((opc == LUSTRE_OPC_CREATE) && name &&
filename_is_volatile(name, namelen, &op_data->op_mds))
op_data->op_bias |= MDS_CREATE_VOLATILE;
@@ -2414,10 +2341,6 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
op_data->op_mds = 0;
op_data->op_data = data;
- /* When called by ll_setattr_raw, file is i1. */
- if (ll_i2info(i1)->lli_flags & LLIF_DATA_MODIFIED)
- op_data->op_bias |= MDS_DATA_MODIFIED;
-
return op_data;
}
@@ -2451,6 +2374,9 @@ int ll_show_options(struct seq_file *seq, struct dentry *dentry)
if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
seq_puts(seq, ",user_fid2path");
+ if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
+ seq_puts(seq, ",always_ping");
+
return 0;
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 436691814a5e..ee01f20d8b11 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -47,7 +47,7 @@
static const struct vm_operations_struct ll_file_vm_ops;
-void policy_from_vma(ldlm_policy_data_t *policy,
+void policy_from_vma(union ldlm_policy_data *policy,
struct vm_area_struct *vma, unsigned long addr,
size_t count)
{
@@ -80,43 +80,24 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
* API independent part for page fault initialization.
* \param vma - virtual memory area addressed to page fault
* \param env - corespondent lu_env to processing
- * \param nest - nested level
* \param index - page index corespondent to fault.
* \parm ra_flags - vma readahead flags.
*
- * \return allocated and initialized env for fault operation.
- * \retval EINVAL if env can't allocated
- * \return other error codes from cl_io_init.
+ * \return error codes from cl_io_init.
*/
static struct cl_io *
-ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
- struct cl_env_nest *nest, pgoff_t index,
- unsigned long *ra_flags)
+ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma,
+ pgoff_t index, unsigned long *ra_flags)
{
struct file *file = vma->vm_file;
struct inode *inode = file_inode(file);
struct cl_io *io;
struct cl_fault_io *fio;
- struct lu_env *env;
int rc;
- *env_ret = NULL;
if (ll_file_nolock(file))
return ERR_PTR(-EOPNOTSUPP);
- /*
- * page fault can be called when lustre IO is
- * already active for the current thread, e.g., when doing read/write
- * against user level buffer mapped from Lustre buffer. To avoid
- * stomping on existing context, optionally force an allocation of a new
- * one.
- */
- env = cl_env_nested_get(nest);
- if (IS_ERR(env))
- return ERR_PTR(-EINVAL);
-
- *env_ret = env;
-
restart:
io = vvp_env_thread_io(env);
io->ci_obj = ll_i2info(inode)->lli_clob;
@@ -155,7 +136,6 @@ restart:
if (io->ci_need_restart)
goto restart;
- cl_env_nested_put(nest, env);
io = ERR_PTR(rc);
}
@@ -169,13 +149,17 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
struct lu_env *env;
struct cl_io *io;
struct vvp_io *vio;
- struct cl_env_nest nest;
int result;
+ int refcheck;
sigset_t set;
struct inode *inode;
struct ll_inode_info *lli;
- io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ io = ll_fault_io_init(env, vma, vmpage->index, NULL);
if (IS_ERR(io)) {
result = PTR_ERR(io);
goto out;
@@ -231,17 +215,14 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
result = -EAGAIN;
}
- if (result == 0) {
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
- }
+ if (!result)
+ set_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
}
out_io:
cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
out:
+ cl_env_put(env, &refcheck);
CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
LASSERT(ergo(result == 0, PageLocked(vmpage)));
@@ -285,13 +266,19 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
struct vvp_io *vio = NULL;
struct page *vmpage;
unsigned long ra_flags;
- struct cl_env_nest nest;
- int result;
+ int result = 0;
int fault_ret = 0;
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
- io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags);
- if (IS_ERR(io))
- return to_fault_error(PTR_ERR(io));
+ io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags);
+ if (IS_ERR(io)) {
+ result = to_fault_error(PTR_ERR(io));
+ goto out;
+ }
result = io->ci_result;
if (result == 0) {
@@ -322,14 +309,15 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
}
}
cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
vma->vm_flags |= ra_flags;
+
+out:
+ cl_env_put(env, &refcheck);
if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
fault_ret |= to_fault_error(result);
- CDEBUG(D_MMAP, "%s fault %d/%d\n",
- current->comm, fault_ret, result);
+ CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
return fault_ret;
}
@@ -381,6 +369,7 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
bool retry;
int result;
+ file_update_time(vma->vm_file);
do {
retry = false;
result = ll_page_mkwrite0(vma, vmf->page, &retry);
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index 709230571b4b..c63236580b0f 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -226,7 +226,7 @@ static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen,
static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name,
int namelen, loff_t hash, u64 ino,
- unsigned type)
+ unsigned int type)
{
/* It is hack to access lde_fid for comparison with lgd_fid.
* So the input 'name' must be part of the 'lu_dirent'.
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 23fda9d98bff..03682c10fc9e 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -1060,10 +1060,6 @@ static const struct llite_file_opcode {
"brw_read" },
{ LPROC_LL_BRW_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_PAGES,
"brw_write" },
- { LPROC_LL_OSC_READ, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
- "osc_read" },
- { LPROC_LL_OSC_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
- "osc_write" },
{ LPROC_LL_IOCTL, LPROCFS_TYPE_REGS, "ioctl" },
{ LPROC_LL_OPEN, LPROCFS_TYPE_REGS, "open" },
{ LPROC_LL_RELEASE, LPROCFS_TYPE_REGS, "close" },
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 180f35e3afd9..9426759aedc9 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -113,13 +113,18 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
if (inode->i_state & I_NEW) {
rc = ll_read_inode2(inode, md);
if (!rc && S_ISREG(inode->i_mode) &&
- !ll_i2info(inode)->lli_clob) {
- CDEBUG(D_INODE, "%s: apply lsm %p to inode "DFID"\n",
- ll_get_fsname(sb, NULL, 0), md->lsm,
- PFID(ll_inode2fid(inode)));
+ !ll_i2info(inode)->lli_clob)
rc = cl_file_inode_init(inode, md);
- }
+
if (rc) {
+ /*
+ * Let's clear directory lsm here, otherwise
+ * make_bad_inode() will reset the inode mode
+ * to regular, then ll_clear_inode will not
+ * be able to clear lsm_md
+ */
+ if (S_ISDIR(inode->i_mode))
+ ll_dir_clear_lsm_md(inode);
make_bad_inode(inode);
unlock_new_inode(inode);
iput(inode);
@@ -132,6 +137,8 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
CDEBUG(D_VFSTRACE, "got inode: "DFID"(%p): rc = %d\n",
PFID(&md->body->mbo_fid1), inode, rc);
if (rc) {
+ if (S_ISDIR(inode->i_mode))
+ ll_dir_clear_lsm_md(inode);
iput(inode);
inode = ERR_PTR(rc);
}
@@ -258,7 +265,9 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
struct ll_inode_info *lli = ll_i2info(inode);
spin_lock(&lli->lli_lock);
- lli->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
+ LTIME_S(inode->i_mtime) = 0;
+ LTIME_S(inode->i_atime) = 0;
+ LTIME_S(inode->i_ctime) = 0;
spin_unlock(&lli->lli_lock);
}
@@ -287,11 +296,39 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
hash = cl_fid_build_ino(&lli->lli_pfid,
ll_need_32bit_api(ll_i2sbi(inode)));
-
- master_inode = ilookup5(inode->i_sb, hash,
- ll_test_inode_by_fid,
- (void *)&lli->lli_pfid);
- if (master_inode && !IS_ERR(master_inode)) {
+ /*
+ * Do not lookup the inode with ilookup5,
+ * otherwise it will cause dead lock,
+ *
+ * 1. Client1 send chmod req to the MDT0, then
+ * on MDT0, it enqueues master and all of its
+ * slaves lock, (mdt_attr_set() ->
+ * mdt_lock_slaves()), after gets master and
+ * stripe0 lock, it will send the enqueue req
+ * (for stripe1) to MDT1, then MDT1 finds the
+ * lock has been granted to client2. Then MDT1
+ * sends blocking ast to client2.
+ *
+ * 2. At the same time, client2 tries to unlink
+ * the striped dir (rm -rf striped_dir), and
+ * during lookup, it will hold the master inode
+ * of the striped directory, whose inode state
+ * is NEW, then tries to revalidate all of its
+ * slaves, (ll_prep_inode()->ll_iget()->
+ * ll_read_inode2()-> ll_update_inode().). And
+ * it will be blocked on the server side because
+ * of 1.
+ *
+ * 3. Then the client get the blocking_ast req,
+ * cancel the lock, but being blocked if using
+ * ->ilookup5()), because master inode state is
+ * NEW.
+ */
+ master_inode = ilookup5_nowait(inode->i_sb,
+ hash,
+ ll_test_inode_by_fid,
+ (void *)&lli->lli_pfid);
+ if (master_inode) {
ll_invalidate_negative_children(master_inode);
iput(master_inode);
}
@@ -535,6 +572,10 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
}
}
+ if (it->it_op & IT_OPEN && it->it_flags & FMODE_WRITE &&
+ dentry->d_sb->s_flags & MS_RDONLY)
+ return ERR_PTR(-EROFS);
+
if (it->it_op & IT_CREAT)
opc = LUSTRE_OPC_CREATE;
else
@@ -801,7 +842,8 @@ static int ll_create_it(struct inode *dir, struct dentry *dentry,
return PTR_ERR(inode);
d_instantiate(dentry, inode);
- return 0;
+
+ return ll_init_security(dentry, inode, dir);
}
void ll_update_times(struct ptlrpc_request *request, struct inode *inode)
@@ -896,6 +938,8 @@ again:
goto err_exit;
d_instantiate(dentry, inode);
+
+ err = ll_init_security(dentry, inode, dir);
err_exit:
if (request)
ptlrpc_req_finished(request);
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 76a6836cdf70..f10e092979fe 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -181,90 +181,73 @@ void ll_ras_enter(struct file *f)
spin_unlock(&ras->ras_lock);
}
-static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct cl_page *page,
- struct cl_object *clob, pgoff_t *max_index)
+/**
+ * Initiates read-ahead of a page with given index.
+ *
+ * \retval +ve: page was already uptodate so it will be skipped
+ * from being added;
+ * \retval -ve: page wasn't added to \a queue for error;
+ * \retval 0: page was added into \a queue for read ahead.
+ */
+static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue, pgoff_t index)
{
- struct page *vmpage = page->cp_vmpage;
+ enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
+ struct cl_object *clob = io->ci_obj;
+ struct inode *inode = vvp_object_inode(clob);
+ const char *msg = NULL;
+ struct cl_page *page;
struct vvp_page *vpg;
- int rc;
+ struct page *vmpage;
+ int rc = 0;
+
+ vmpage = grab_cache_page_nowait(inode->i_mapping, index);
+ if (!vmpage) {
+ which = RA_STAT_FAILED_GRAB_PAGE;
+ msg = "g_c_p_n failed";
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /* Check if vmpage was truncated or reclaimed */
+ if (vmpage->mapping != inode->i_mapping) {
+ which = RA_STAT_WRONG_GRAB_PAGE;
+ msg = "g_c_p_n returned invalid page";
+ rc = -EBUSY;
+ goto out;
+ }
+
+ page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
+ if (IS_ERR(page)) {
+ which = RA_STAT_FAILED_GRAB_PAGE;
+ msg = "cl_page_find failed";
+ rc = PTR_ERR(page);
+ goto out;
+ }
- rc = 0;
- cl_page_assume(env, io, page);
lu_ref_add(&page->cp_reference, "ra", current);
+ cl_page_assume(env, io, page);
vpg = cl2vvp_page(cl_object_page_slice(clob, page));
if (!vpg->vpg_defer_uptodate && !PageUptodate(vmpage)) {
- CDEBUG(D_READA, "page index %lu, max_index: %lu\n",
- vvp_index(vpg), *max_index);
- if (*max_index == 0 || vvp_index(vpg) > *max_index)
- rc = cl_page_is_under_lock(env, io, page, max_index);
- if (rc == 0) {
- vpg->vpg_defer_uptodate = 1;
- vpg->vpg_ra_used = 0;
- cl_page_list_add(queue, page);
- rc = 1;
- } else {
- cl_page_discard(env, io, page);
- rc = -ENOLCK;
- }
+ vpg->vpg_defer_uptodate = 1;
+ vpg->vpg_ra_used = 0;
+ cl_page_list_add(queue, page);
} else {
/* skip completed pages */
cl_page_unassume(env, io, page);
+ /* This page is already uptodate, returning a positive number
+ * to tell the callers about this
+ */
+ rc = 1;
}
+
lu_ref_del(&page->cp_reference, "ra", current);
cl_page_put(env, page);
- return rc;
-}
-
-/**
- * Initiates read-ahead of a page with given index.
- *
- * \retval +ve: page was added to \a queue.
- *
- * \retval -ENOLCK: there is no extent lock for this part of a file, stop
- * read-ahead.
- *
- * \retval -ve, 0: page wasn't added to \a queue for other reason.
- */
-static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue,
- pgoff_t index, pgoff_t *max_index)
-{
- struct cl_object *clob = io->ci_obj;
- struct inode *inode = vvp_object_inode(clob);
- struct page *vmpage;
- struct cl_page *page;
- enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
- int rc = 0;
- const char *msg = NULL;
-
- vmpage = grab_cache_page_nowait(inode->i_mapping, index);
+out:
if (vmpage) {
- /* Check if vmpage was truncated or reclaimed */
- if (vmpage->mapping == inode->i_mapping) {
- page = cl_page_find(env, clob, vmpage->index,
- vmpage, CPT_CACHEABLE);
- if (!IS_ERR(page)) {
- rc = cl_read_ahead_page(env, io, queue,
- page, clob, max_index);
- if (rc == -ENOLCK) {
- which = RA_STAT_FAILED_MATCH;
- msg = "lock match failed";
- }
- } else {
- which = RA_STAT_FAILED_GRAB_PAGE;
- msg = "cl_page_find failed";
- }
- } else {
- which = RA_STAT_WRONG_GRAB_PAGE;
- msg = "g_c_p_n returned invalid page";
- }
- if (rc != 1)
+ if (rc)
unlock_page(vmpage);
put_page(vmpage);
- } else {
- which = RA_STAT_FAILED_GRAB_PAGE;
- msg = "g_c_p_n failed";
}
if (msg) {
ll_ra_stats_inc(inode, which);
@@ -379,12 +362,12 @@ static int ll_read_ahead_pages(const struct lu_env *env,
struct cl_io *io, struct cl_page_list *queue,
struct ra_io_arg *ria,
unsigned long *reserved_pages,
- unsigned long *ra_end)
+ pgoff_t *ra_end)
{
+ struct cl_read_ahead ra = { 0 };
int rc, count = 0;
bool stride_ria;
pgoff_t page_idx;
- pgoff_t max_index = 0;
LASSERT(ria);
RIA_DEBUG(ria);
@@ -393,14 +376,23 @@ static int ll_read_ahead_pages(const struct lu_env *env,
for (page_idx = ria->ria_start;
page_idx <= ria->ria_end && *reserved_pages > 0; page_idx++) {
if (ras_inside_ra_window(page_idx, ria)) {
+ if (!ra.cra_end || ra.cra_end < page_idx) {
+ cl_read_ahead_release(env, &ra);
+
+ rc = cl_io_read_ahead(env, io, page_idx, &ra);
+ if (rc < 0)
+ break;
+
+ LASSERTF(ra.cra_end >= page_idx,
+ "object: %p, indcies %lu / %lu\n",
+ io->ci_obj, ra.cra_end, page_idx);
+ }
+
/* If the page is inside the read-ahead window*/
- rc = ll_read_ahead_page(env, io, queue,
- page_idx, &max_index);
- if (rc == 1) {
+ rc = ll_read_ahead_page(env, io, queue, page_idx);
+ if (!rc) {
(*reserved_pages)--;
count++;
- } else if (rc == -ENOLCK) {
- break;
}
} else if (stride_ria) {
/* If it is not in the read-ahead window, and it is
@@ -426,19 +418,21 @@ static int ll_read_ahead_pages(const struct lu_env *env,
}
}
}
+ cl_read_ahead_release(env, &ra);
+
*ra_end = page_idx;
return count;
}
-int ll_readahead(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct ll_readahead_state *ras,
- bool hit)
+static int ll_readahead(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue,
+ struct ll_readahead_state *ras, bool hit)
{
struct vvp_io *vio = vvp_env_io(env);
struct ll_thread_info *lti = ll_env_info(env);
struct cl_attr *attr = vvp_env_thread_attr(env);
- unsigned long start = 0, end = 0, reserved;
- unsigned long ra_end, len, mlen = 0;
+ unsigned long len, mlen = 0, reserved;
+ pgoff_t ra_end, start = 0, end = 0;
struct inode *inode;
struct ra_io_arg *ria = &lti->lti_ria;
struct cl_object *clob;
@@ -464,30 +458,25 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
spin_lock(&ras->ras_lock);
- /* Enlarge the RA window to encompass the full read */
- if (vio->vui_ra_valid &&
- ras->ras_window_start + ras->ras_window_len <
- vio->vui_ra_start + vio->vui_ra_count) {
- ras->ras_window_len = vio->vui_ra_start + vio->vui_ra_count -
- ras->ras_window_start;
- }
+ /**
+ * Note: other thread might rollback the ras_next_readahead,
+ * if it can not get the full size of prepared pages, see the
+ * end of this function. For stride read ahead, it needs to
+ * make sure the offset is no less than ras_stride_offset,
+ * so that stride read ahead can work correctly.
+ */
+ if (stride_io_mode(ras))
+ start = max(ras->ras_next_readahead, ras->ras_stride_offset);
+ else
+ start = ras->ras_next_readahead;
- /* Reserve a part of the read-ahead window that we'll be issuing */
- if (ras->ras_window_len > 0) {
- /*
- * Note: other thread might rollback the ras_next_readahead,
- * if it can not get the full size of prepared pages, see the
- * end of this function. For stride read ahead, it needs to
- * make sure the offset is no less than ras_stride_offset,
- * so that stride read ahead can work correctly.
- */
- if (stride_io_mode(ras))
- start = max(ras->ras_next_readahead,
- ras->ras_stride_offset);
- else
- start = ras->ras_next_readahead;
+ if (ras->ras_window_len > 0)
end = ras->ras_window_start + ras->ras_window_len - 1;
- }
+
+ /* Enlarge the RA window to encompass the full read */
+ if (vio->vui_ra_valid &&
+ end < vio->vui_ra_start + vio->vui_ra_count - 1)
+ end = vio->vui_ra_start + vio->vui_ra_count - 1;
if (end != 0) {
unsigned long rpc_boundary;
@@ -576,8 +565,8 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
* if the region we failed to issue read-ahead on is still ahead
* of the app and behind the next index to start read-ahead from
*/
- CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu\n",
- ra_end, end, ria->ria_end);
+ CDEBUG(D_READA, "ra_end = %lu end = %lu stride end = %lu pages = %d\n",
+ ra_end, end, ria->ria_end, ret);
if (ra_end != end + 1) {
ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END);
@@ -609,7 +598,7 @@ static void ras_reset(struct inode *inode, struct ll_readahead_state *ras,
ras->ras_consecutive_pages = 0;
ras->ras_window_len = 0;
ras_set_start(inode, ras, index);
- ras->ras_next_readahead = max(ras->ras_window_start, index);
+ ras->ras_next_readahead = max(ras->ras_window_start, index + 1);
RAS_CDEBUG(ras);
}
@@ -738,12 +727,13 @@ static void ras_increase_window(struct inode *inode,
ra->ra_max_pages_per_file);
}
-void ras_update(struct ll_sb_info *sbi, struct inode *inode,
- struct ll_readahead_state *ras, unsigned long index,
- unsigned hit)
+static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
+ struct ll_readahead_state *ras, unsigned long index,
+ enum ras_update_flags flags)
{
struct ll_ra_info *ra = &sbi->ll_ra_info;
int zero = 0, stride_detect = 0, ra_miss = 0;
+ bool hit = flags & LL_RAS_HIT;
spin_lock(&ras->ras_lock);
@@ -773,7 +763,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
* to for subsequent IO. The mmap case does not increment
* ras_requests and thus can never trigger this behavior.
*/
- if (ras->ras_requests == 2 && !ras->ras_request_index) {
+ if (ras->ras_requests >= 2 && !ras->ras_request_index) {
__u64 kms_pages;
kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
@@ -785,8 +775,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
if (kms_pages &&
kms_pages <= ra->ra_max_read_ahead_whole_pages) {
ras->ras_window_start = 0;
- ras->ras_last_readpage = 0;
- ras->ras_next_readahead = 0;
+ ras->ras_next_readahead = index + 1;
ras->ras_window_len = min(ra->ra_max_pages_per_file,
ra->ra_max_read_ahead_whole_pages);
goto out_unlock;
@@ -816,13 +805,20 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
if (ra_miss) {
if (index_in_stride_window(ras, index) &&
stride_io_mode(ras)) {
- /*If stride-RA hit cache miss, the stride dector
- *will not be reset to avoid the overhead of
- *redetecting read-ahead mode
- */
if (index != ras->ras_last_readpage + 1)
ras->ras_consecutive_pages = 0;
ras_reset(inode, ras, index);
+
+ /* If stride-RA hit cache miss, the stride
+ * detector will not be reset to avoid the
+ * overhead of redetecting read-ahead mode,
+ * but on the condition that the stride window
+ * is still intersect with normal sequential
+ * read-ahead window.
+ */
+ if (ras->ras_window_start <
+ ras->ras_stride_offset)
+ ras_stride_reset(ras);
RAS_CDEBUG(ras);
} else {
/* Reset both stride window and normal RA
@@ -867,8 +863,13 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
/* Trigger RA in the mmap case where ras_consecutive_requests
* is not incremented and thus can't be used to trigger RA
*/
- if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) {
- ras->ras_window_len = RAS_INCREASE_STEP(inode);
+ if (ras->ras_consecutive_pages >= 4 && flags & LL_RAS_MMAP) {
+ ras_increase_window(inode, ras, ra);
+ /*
+ * reset consecutive pages so that the readahead window can
+ * grow gradually.
+ */
+ ras->ras_consecutive_pages = 0;
goto out_unlock;
}
@@ -903,17 +904,17 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
struct cl_io *io;
struct cl_page *page;
struct cl_object *clob;
- struct cl_env_nest nest;
bool redirtied = false;
bool unlocked = false;
int result;
+ int refcheck;
LASSERT(PageLocked(vmpage));
LASSERT(!PageWriteback(vmpage));
LASSERT(ll_i2dtexp(inode));
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env)) {
result = PTR_ERR(env);
goto out;
@@ -978,7 +979,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
}
}
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
goto out;
out:
@@ -1088,6 +1089,63 @@ void ll_cl_remove(struct file *file, const struct lu_env *env)
write_unlock(&fd->fd_lock);
}
+static int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page)
+{
+ struct inode *inode = vvp_object_inode(page->cp_obj);
+ struct ll_file_data *fd = vvp_env_io(env)->vui_fd;
+ struct ll_readahead_state *ras = &fd->fd_ras;
+ struct cl_2queue *queue = &io->ci_queue;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct vvp_page *vpg;
+ int rc = 0;
+
+ vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page));
+ if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
+ sbi->ll_ra_info.ra_max_pages > 0) {
+ struct vvp_io *vio = vvp_env_io(env);
+ enum ras_update_flags flags = 0;
+
+ if (vpg->vpg_defer_uptodate)
+ flags |= LL_RAS_HIT;
+ if (!vio->vui_ra_valid)
+ flags |= LL_RAS_MMAP;
+ ras_update(sbi, inode, ras, vvp_index(vpg), flags);
+ }
+
+ if (vpg->vpg_defer_uptodate) {
+ vpg->vpg_ra_used = 1;
+ cl_page_export(env, page, 1);
+ }
+
+ cl_2queue_init(queue);
+ /*
+ * Add page into the queue even when it is marked uptodate above.
+ * this will unlock it automatically as part of cl_page_list_disown().
+ */
+ cl_page_list_add(&queue->c2_qin, page);
+ if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
+ sbi->ll_ra_info.ra_max_pages > 0) {
+ int rc2;
+
+ rc2 = ll_readahead(env, io, &queue->c2_qin, ras,
+ vpg->vpg_defer_uptodate);
+ CDEBUG(D_READA, DFID "%d pages read ahead at %lu\n",
+ PFID(ll_inode2fid(inode)), rc2, vvp_index(vpg));
+ }
+
+ if (queue->c2_qin.pl_nr > 0)
+ rc = cl_io_submit_rw(env, io, CRT_READ, queue);
+
+ /*
+ * Unlock unsent pages in case of error.
+ */
+ cl_page_list_disown(env, io, &queue->c2_qin);
+ cl_2queue_fini(env, queue);
+
+ return rc;
+}
+
int ll_readpage(struct file *file, struct page *vmpage)
{
struct cl_object *clob = ll_i2info(file_inode(file))->lli_clob;
@@ -1111,7 +1169,7 @@ int ll_readpage(struct file *file, struct page *vmpage)
LASSERT(page->cp_type == CPT_CACHEABLE);
if (likely(!PageUptodate(vmpage))) {
cl_page_assume(env, io, page);
- result = cl_io_read_page(env, io, page);
+ result = ll_io_read_page(env, io, page);
} else {
/* Page from a non-object file. */
unlock_page(vmpage);
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 26f3a37873a7..21e06e5b514e 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -71,8 +71,6 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
struct cl_page *page;
struct cl_object *obj;
- int refcheck;
-
LASSERT(PageLocked(vmpage));
LASSERT(!PageWriteback(vmpage));
@@ -82,28 +80,27 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
* happening with locked page too
*/
if (offset == 0 && length == PAGE_SIZE) {
- env = cl_env_get(&refcheck);
- if (!IS_ERR(env)) {
- inode = vmpage->mapping->host;
- obj = ll_i2info(inode)->lli_clob;
- if (obj) {
- page = cl_vmpage_page(vmpage, obj);
- if (page) {
- cl_page_delete(env, page);
- cl_page_put(env, page);
- }
- } else {
- LASSERT(vmpage->private == 0);
+ /* See the comment in ll_releasepage() */
+ env = cl_env_percpu_get();
+ LASSERT(!IS_ERR(env));
+ inode = vmpage->mapping->host;
+ obj = ll_i2info(inode)->lli_clob;
+ if (obj) {
+ page = cl_vmpage_page(vmpage, obj);
+ if (page) {
+ cl_page_delete(env, page);
+ cl_page_put(env, page);
}
- cl_env_put(env, &refcheck);
+ } else {
+ LASSERT(vmpage->private == 0);
}
+ cl_env_percpu_put(env);
}
}
static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
{
struct lu_env *env;
- void *cookie;
struct cl_object *obj;
struct cl_page *page;
struct address_space *mapping;
@@ -129,7 +126,6 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
if (!page)
return 1;
- cookie = cl_env_reenter();
env = cl_env_percpu_get();
LASSERT(!IS_ERR(env));
@@ -155,7 +151,6 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
cl_page_put(env, page);
cl_env_percpu_put(env);
- cl_env_reexit(cookie);
return result;
}
@@ -340,19 +335,15 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
{
- struct lu_env *env;
+ struct ll_cl_context *lcc;
+ const struct lu_env *env;
struct cl_io *io;
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
loff_t file_offset = iocb->ki_pos;
ssize_t count = iov_iter_count(iter);
ssize_t tot_bytes = 0, result = 0;
- struct ll_inode_info *lli = ll_i2info(inode);
long size = MAX_DIO_SIZE;
- int refcheck;
-
- if (!lli->lli_has_smd)
- return -EBADF;
/* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK))
@@ -367,9 +358,13 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
if (iov_iter_alignment(iter) & ~PAGE_MASK)
return -EINVAL;
- env = cl_env_get(&refcheck);
+ lcc = ll_cl_find(file);
+ if (!lcc)
+ return -EIO;
+
+ env = lcc->lcc_env;
LASSERT(!IS_ERR(env));
- io = vvp_env_io(env)->vui_cl.cis_io;
+ io = lcc->lcc_io;
LASSERT(io);
while (iov_iter_count(iter)) {
@@ -426,7 +421,6 @@ out:
vio->u.write.vui_written += tot_bytes;
}
- cl_env_put(env, &refcheck);
return tot_bytes ? tot_bytes : result;
}
@@ -466,13 +460,13 @@ static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
}
static int ll_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned int len, unsigned int flags,
struct page **pagep, void **fsdata)
{
struct ll_cl_context *lcc;
- const struct lu_env *env;
+ const struct lu_env *env = NULL;
struct cl_io *io;
- struct cl_page *page;
+ struct cl_page *page = NULL;
struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
pgoff_t index = pos >> PAGE_SHIFT;
struct page *vmpage = NULL;
@@ -484,6 +478,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
lcc = ll_cl_find(file);
if (!lcc) {
+ io = NULL;
result = -EIO;
goto out;
}
@@ -560,6 +555,12 @@ out:
unlock_page(vmpage);
put_page(vmpage);
}
+ if (!IS_ERR_OR_NULL(page)) {
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+ }
+ if (io)
+ io->ci_result = result;
} else {
*pagep = vmpage;
*fsdata = lcc;
@@ -576,7 +577,7 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
struct cl_io *io;
struct vvp_io *vio;
struct cl_page *page;
- unsigned from = pos & (PAGE_SIZE - 1);
+ unsigned int from = pos & (PAGE_SIZE - 1);
bool unplug = false;
int result = 0;
@@ -629,6 +630,8 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
file->f_flags & O_SYNC || IS_SYNC(file_inode(file)))
result = vvp_io_write_commit(env, io);
+ if (result < 0)
+ io->ci_result = result;
return result >= 0 ? copied : result;
}
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 0677513476ec..4769a2230ae1 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -659,8 +659,8 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
struct ll_inode_info *lli = ll_i2info(dir);
struct ll_statahead_info *sai = lli->lli_sai;
struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata;
+ wait_queue_head_t *waitq = NULL;
__u64 handle = 0;
- bool wakeup;
if (it_disposition(it, DISP_LOOKUP_NEG))
rc = -ENOENT;
@@ -693,7 +693,8 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
spin_lock(&lli->lli_sa_lock);
if (rc) {
- wakeup = __sa_make_ready(sai, entry, rc);
+ if (__sa_make_ready(sai, entry, rc))
+ waitq = &sai->sai_waitq;
} else {
entry->se_minfo = minfo;
entry->se_req = ptlrpc_request_addref(req);
@@ -704,13 +705,15 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
* with parent's lock held, for example: unlink.
*/
entry->se_handle = handle;
- wakeup = !sa_has_callback(sai);
+ if (!sa_has_callback(sai))
+ waitq = &sai->sai_thread.t_ctl_waitq;
+
list_add_tail(&entry->se_list, &sai->sai_interim_entries);
}
sai->sai_replied++;
- if (wakeup)
- wake_up(&sai->sai_thread.t_ctl_waitq);
+ if (waitq)
+ wake_up(waitq);
spin_unlock(&lli->lli_sa_lock);
return rc;
@@ -1397,10 +1400,10 @@ static int revalidate_statahead_dentry(struct inode *dir,
struct dentry **dentryp,
bool unplug)
{
+ struct ll_inode_info *lli = ll_i2info(dir);
struct sa_entry *entry = NULL;
struct l_wait_info lwi = { 0 };
struct ll_dentry_data *ldd;
- struct ll_inode_info *lli;
int rc = 0;
if ((*dentryp)->d_name.name[0] == '.') {
@@ -1446,7 +1449,9 @@ static int revalidate_statahead_dentry(struct inode *dir,
sa_handle_callback(sai);
if (!sa_ready(entry)) {
+ spin_lock(&lli->lli_sa_lock);
sai->sai_index_wait = entry->se_index;
+ spin_unlock(&lli->lli_sa_lock);
lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
LWI_ON_SIGNAL_NOOP, NULL);
rc = l_wait_event(sai->sai_waitq, sa_ready(entry), &lwi);
@@ -1475,6 +1480,7 @@ static int revalidate_statahead_dentry(struct inode *dir,
alias = ll_splice_alias(inode, *dentryp);
if (IS_ERR(alias)) {
+ ll_intent_release(&it);
rc = PTR_ERR(alias);
goto out_unplug;
}
@@ -1493,6 +1499,7 @@ static int revalidate_statahead_dentry(struct inode *dir,
*dentryp,
PFID(ll_inode2fid((*dentryp)->d_inode)),
PFID(ll_inode2fid(inode)));
+ ll_intent_release(&it);
rc = -ESTALE;
goto out_unplug;
}
@@ -1512,7 +1519,6 @@ out_unplug:
* dentry_may_statahead().
*/
ldd = ll_d2d(*dentryp);
- lli = ll_i2info(dir);
/* ldd can be NULL if llite lookup failed. */
if (ldd)
ldd->lld_sa_generation = lli->lli_sa_generation;
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index 8aa8ecc09a48..12c129f7e4ad 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -55,7 +55,6 @@
static struct kmem_cache *ll_thread_kmem;
struct kmem_cache *vvp_lock_kmem;
struct kmem_cache *vvp_object_kmem;
-struct kmem_cache *vvp_req_kmem;
static struct kmem_cache *vvp_session_kmem;
static struct kmem_cache *vvp_thread_kmem;
@@ -76,11 +75,6 @@ static struct lu_kmem_descr vvp_caches[] = {
.ckd_size = sizeof(struct vvp_object),
},
{
- .ckd_cache = &vvp_req_kmem,
- .ckd_name = "vvp_req_kmem",
- .ckd_size = sizeof(struct vvp_req),
- },
- {
.ckd_cache = &vvp_session_kmem,
.ckd_name = "vvp_session_kmem",
.ckd_size = sizeof(struct vvp_session)
@@ -177,10 +171,6 @@ static const struct lu_device_operations vvp_lu_ops = {
.ldo_object_alloc = vvp_object_alloc
};
-static const struct cl_device_operations vvp_cl_ops = {
- .cdo_req_init = vvp_req_init
-};
-
static struct lu_device *vvp_device_free(const struct lu_env *env,
struct lu_device *d)
{
@@ -213,7 +203,6 @@ static struct lu_device *vvp_device_alloc(const struct lu_env *env,
lud = &vdv->vdv_cl.cd_lu_dev;
cl_device_init(&vdv->vdv_cl, t);
vvp2lu_dev(vdv)->ld_ops = &vvp_lu_ops;
- vdv->vdv_cl.cd_ops = &vvp_cl_ops;
site = kzalloc(sizeof(*site), GFP_NOFS);
if (site) {
@@ -332,7 +321,6 @@ int cl_sb_init(struct super_block *sb)
cl = cl_type_setup(env, NULL, &vvp_device_type,
sbi->ll_dt_exp->exp_obd->obd_lu_dev);
if (!IS_ERR(cl)) {
- cl2vvp_dev(cl)->vdv_sb = sb;
sbi->ll_cl = cl;
sbi->ll_site = cl2lu_dev(cl)->ld_site;
}
@@ -521,11 +509,10 @@ static void vvp_pgcache_page_show(const struct lu_env *env,
vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
vmpage = vpg->vpg_page;
- seq_printf(seq, " %5i | %p %p %s %s %s %s | %p "DFID"(%p) %lu %u [",
+ seq_printf(seq, " %5i | %p %p %s %s %s | %p " DFID "(%p) %lu %u [",
0 /* gen */,
vpg, page,
"none",
- vpg->vpg_write_queued ? "wq" : "- ",
vpg->vpg_defer_uptodate ? "du" : "- ",
PageWriteback(vmpage) ? "wb" : "-",
vmpage, PFID(ll_inode2fid(vmpage->mapping->host)),
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
index 4464ad258387..c60d0414ac25 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_internal.h
+++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h
@@ -42,9 +42,7 @@
enum obd_notify_event;
struct inode;
-struct lov_stripe_md;
struct lustre_md;
-struct obd_capa;
struct obd_device;
struct obd_export;
struct page;
@@ -122,7 +120,6 @@ extern struct lu_context_key vvp_thread_key;
extern struct kmem_cache *vvp_lock_kmem;
extern struct kmem_cache *vvp_object_kmem;
-extern struct kmem_cache *vvp_req_kmem;
struct vvp_thread_info {
struct cl_lock vti_lock;
@@ -195,14 +192,6 @@ struct vvp_object {
struct inode *vob_inode;
/**
- * A list of dirty pages pending IO in the cache. Used by
- * SOM. Protected by ll_inode_info::lli_lock.
- *
- * \see vvp_page::vpg_pending_linkage
- */
- struct list_head vob_pending_list;
-
- /**
* Number of transient pages. This is no longer protected by i_sem,
* and needs to be atomic. This is not actually used for anything,
* and can probably be removed.
@@ -235,15 +224,7 @@ struct vvp_object {
struct vvp_page {
struct cl_page_slice vpg_cl;
unsigned int vpg_defer_uptodate:1,
- vpg_ra_used:1,
- vpg_write_queued:1;
- /**
- * Non-empty iff this page is already counted in
- * vvp_object::vob_pending_list. This list is only used as a flag,
- * that is, never iterated through, only checked for list_empty(), but
- * having a list is useful for debugging.
- */
- struct list_head vpg_pending_linkage;
+ vpg_ra_used:1;
/** VM page */
struct page *vpg_page;
};
@@ -260,7 +241,6 @@ static inline pgoff_t vvp_index(struct vvp_page *vvp)
struct vvp_device {
struct cl_device vdv_cl;
- struct super_block *vdv_sb;
struct cl_device *vdv_next;
};
@@ -268,10 +248,6 @@ struct vvp_lock {
struct cl_lock_slice vlk_cl;
};
-struct vvp_req {
- struct cl_req_slice vrq_cl;
-};
-
void *ccc_key_init(const struct lu_context *ctx,
struct lu_context_key *key);
void ccc_key_fini(const struct lu_context *ctx,
@@ -325,21 +301,8 @@ static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice)
# define CLOBINVRNT(env, clob, expr) \
((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
-/**
- * New interfaces to get and put lov_stripe_md from lov layer. This violates
- * layering because lov_stripe_md is supposed to be a private data in lov.
- *
- * NB: If you find you have to use these interfaces for your new code, please
- * think about it again. These interfaces may be removed in the future for
- * better layering.
- */
-struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
-void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
int lov_read_and_clear_async_rc(struct cl_object *clob);
-struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
-void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
-
int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
struct cl_io *io);
int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
@@ -347,8 +310,6 @@ int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
struct cl_lock *lock, const struct cl_io *io);
int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, pgoff_t index);
-int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req);
struct lu_object *vvp_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 2b7f182a15e2..0b6d388d8aa4 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -72,9 +72,10 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
/* don't need lock here to check lli_layout_gen as we have held
* extent lock and GROUP lock has to hold to swap layout
*/
- if (ll_layout_version_get(lli) != vio->vui_layout_gen) {
+ if (ll_layout_version_get(lli) != vio->vui_layout_gen ||
+ OBD_FAIL_CHECK_RESET(OBD_FAIL_LLITE_LOST_LAYOUT, 0)) {
io->ci_need_restart = 1;
- /* this will return application a short read/write */
+ /* this will cause a short read/write */
io->ci_continue = 0;
rc = false;
}
@@ -328,8 +329,8 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
vio->vui_layout_gen, gen);
/* today successful restore is the only possible case */
/* restore was done, clear restoring state */
- ll_i2info(vvp_object_inode(obj))->lli_flags &=
- ~LLIF_FILE_RESTORING;
+ clear_bit(LLIF_FILE_RESTORING,
+ &ll_i2info(inode)->lli_flags);
}
}
}
@@ -369,7 +370,7 @@ static int vvp_mmap_locks(const struct lu_env *env,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct cl_lock_descr *descr = &cti->vti_descr;
- ldlm_policy_data_t policy;
+ union ldlm_policy_data policy;
unsigned long addr;
ssize_t count;
int result = 0;
@@ -450,7 +451,8 @@ static void vvp_io_advance(const struct lu_env *env,
struct vvp_io *vio = cl2vvp_io(env, ios);
CLOBINVRNT(env, obj, vvp_object_invariant(obj));
- iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count -= nob);
+ vio->vui_tot_count -= nob;
+ iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count);
}
static void vvp_io_update_iov(const struct lu_env *env,
@@ -551,9 +553,16 @@ static int vvp_io_setattr_lock(const struct lu_env *env,
if (new_size == 0)
enqflags = CEF_DISCARD_DATA;
} else {
- if ((io->u.ci_setattr.sa_attr.lvb_mtime >=
- io->u.ci_setattr.sa_attr.lvb_ctime) ||
- (io->u.ci_setattr.sa_attr.lvb_atime >=
+ unsigned int valid = io->u.ci_setattr.sa_valid;
+
+ if (!(valid & TIMES_SET_FLAGS))
+ return 0;
+
+ if ((!(valid & ATTR_MTIME) ||
+ io->u.ci_setattr.sa_attr.lvb_mtime >=
+ io->u.ci_setattr.sa_attr.lvb_ctime) &&
+ (!(valid & ATTR_ATIME) ||
+ io->u.ci_setattr.sa_attr.lvb_atime >=
io->u.ci_setattr.sa_attr.lvb_ctime))
return 0;
new_size = 0;
@@ -580,14 +589,6 @@ static int vvp_do_vmtruncate(struct inode *inode, size_t size)
return result;
}
-static int vvp_io_setattr_trunc(const struct lu_env *env,
- const struct cl_io_slice *ios,
- struct inode *inode, loff_t size)
-{
- inode_dio_wait(inode);
- return 0;
-}
-
static int vvp_io_setattr_time(const struct lu_env *env,
const struct cl_io_slice *ios)
{
@@ -618,15 +619,20 @@ static int vvp_io_setattr_start(const struct lu_env *env,
{
struct cl_io *io = ios->cis_io;
struct inode *inode = vvp_object_inode(io->ci_obj);
- int result = 0;
+ struct ll_inode_info *lli = ll_i2info(inode);
- inode_lock(inode);
- if (cl_io_is_trunc(io))
- result = vvp_io_setattr_trunc(env, ios, inode,
- io->u.ci_setattr.sa_attr.lvb_size);
- if (result == 0)
- result = vvp_io_setattr_time(env, ios);
- return result;
+ if (cl_io_is_trunc(io)) {
+ down_write(&lli->lli_trunc_sem);
+ inode_lock(inode);
+ inode_dio_wait(inode);
+ } else {
+ inode_lock(inode);
+ }
+
+ if (io->u.ci_setattr.sa_valid & TIMES_SET_FLAGS)
+ return vvp_io_setattr_time(env, ios);
+
+ return 0;
}
static void vvp_io_setattr_end(const struct lu_env *env,
@@ -634,14 +640,18 @@ static void vvp_io_setattr_end(const struct lu_env *env,
{
struct cl_io *io = ios->cis_io;
struct inode *inode = vvp_object_inode(io->ci_obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
- if (cl_io_is_trunc(io))
+ if (cl_io_is_trunc(io)) {
/* Truncate in memory pages - they must be clean pages
* because osc has already notified to destroy osc_extents.
*/
vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
-
- inode_unlock(inode);
+ inode_unlock(inode);
+ up_write(&lli->lli_trunc_sem);
+ } else {
+ inode_unlock(inode);
+ }
}
static void vvp_io_setattr_fini(const struct lu_env *env,
@@ -657,6 +667,7 @@ static int vvp_io_read_start(const struct lu_env *env,
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
struct inode *inode = vvp_object_inode(obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
struct file *file = vio->vui_fd->fd_file;
int result;
@@ -669,6 +680,8 @@ static int vvp_io_read_start(const struct lu_env *env,
CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
+ down_read(&lli->lli_trunc_sem);
+
if (!can_populate_pages(env, io, inode))
return 0;
@@ -770,16 +783,11 @@ static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
struct cl_page *page)
{
- struct vvp_page *vpg;
struct page *vmpage = page->cp_vmpage;
- struct cl_object *clob = cl_io_top(io)->ci_obj;
SetPageUptodate(vmpage);
set_page_dirty(vmpage);
- vpg = cl2vvp_page(cl_object_page_slice(clob, page));
- vvp_write_pending(cl2vvp(clob), vpg);
-
cl_page_disown(env, io, page);
/* held in ll_cl_init() */
@@ -899,10 +907,13 @@ static int vvp_io_write_start(const struct lu_env *env,
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
struct inode *inode = vvp_object_inode(obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
ssize_t result = 0;
loff_t pos = io->u.ci_wr.wr.crw_pos;
size_t cnt = io->u.ci_wr.wr.crw_count;
+ down_read(&lli->lli_trunc_sem);
+
if (!can_populate_pages(env, io, inode))
return 0;
@@ -921,6 +932,20 @@ static int vvp_io_write_start(const struct lu_env *env,
CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
+ /*
+ * The maximum Lustre file size is variable, based on the OST maximum
+ * object size and number of stripes. This needs another check in
+ * addition to the VFS checks earlier.
+ */
+ if (pos + cnt > ll_file_maxbytes(inode)) {
+ CDEBUG(D_INODE,
+ "%s: file " DFID " offset %llu > maxbytes %llu\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), pos + cnt,
+ ll_file_maxbytes(inode));
+ return -EFBIG;
+ }
+
if (!vio->vui_iter) {
/* from a temp io in ll_cl_init(). */
result = 0;
@@ -957,11 +982,7 @@ static int vvp_io_write_start(const struct lu_env *env,
}
}
if (result > 0) {
- struct ll_inode_info *lli = ll_i2info(inode);
-
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
+ set_bit(LLIF_DATA_MODIFIED, &(ll_i2info(inode))->lli_flags);
if (result < cnt)
io->ci_continue = 0;
@@ -972,6 +993,15 @@ static int vvp_io_write_start(const struct lu_env *env,
return result;
}
+static void vvp_io_rw_end(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct inode *inode = vvp_object_inode(ios->cis_obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ up_read(&lli->lli_trunc_sem);
+}
+
static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
{
struct vm_fault *vmf = cfio->ft_vmf;
@@ -1014,13 +1044,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
struct cl_page *page)
{
- struct vvp_page *vpg;
- struct cl_object *clob = cl_io_top(io)->ci_obj;
-
set_page_dirty(page->cp_vmpage);
-
- vpg = cl2vvp_page(cl_object_page_slice(clob, page));
- vvp_write_pending(cl2vvp(clob), vpg);
}
static int vvp_io_fault_start(const struct lu_env *env,
@@ -1030,6 +1054,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
struct inode *inode = vvp_object_inode(obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
struct cl_fault_io *fio = &io->u.ci_fault;
struct vvp_fault_io *cfio = &vio->u.fault;
loff_t offset;
@@ -1039,11 +1064,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
loff_t size;
pgoff_t last_index;
- if (fio->ft_executable &&
- inode->i_mtime.tv_sec != vio->u.fault.ft_mtime)
- CWARN("binary "DFID
- " changed while waiting for the page fault lock\n",
- PFID(lu_object_fid(&obj->co_lu)));
+ down_read(&lli->lli_trunc_sem);
/* offset of the last byte on the page */
offset = cl_offset(obj, fio->ft_index + 1) - 1;
@@ -1192,6 +1213,17 @@ out:
return result;
}
+static void vvp_io_fault_end(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct inode *inode = vvp_object_inode(ios->cis_obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ CLOBINVRNT(env, ios->cis_io->ci_obj,
+ vvp_object_invariant(ios->cis_io->ci_obj));
+ up_read(&lli->lli_trunc_sem);
+}
+
static int vvp_io_fsync_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
@@ -1202,46 +1234,23 @@ static int vvp_io_fsync_start(const struct lu_env *env,
return 0;
}
-static int vvp_io_read_page(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice)
+static int vvp_io_read_ahead(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ pgoff_t start, struct cl_read_ahead *ra)
{
- struct cl_io *io = ios->cis_io;
- struct vvp_page *vpg = cl2vvp_page(slice);
- struct cl_page *page = slice->cpl_page;
- struct inode *inode = vvp_object_inode(slice->cpl_obj);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_file_data *fd = cl2vvp_io(env, ios)->vui_fd;
- struct ll_readahead_state *ras = &fd->fd_ras;
- struct cl_2queue *queue = &io->ci_queue;
-
- if (sbi->ll_ra_info.ra_max_pages_per_file &&
- sbi->ll_ra_info.ra_max_pages)
- ras_update(sbi, inode, ras, vvp_index(vpg),
- vpg->vpg_defer_uptodate);
-
- if (vpg->vpg_defer_uptodate) {
- vpg->vpg_ra_used = 1;
- cl_page_export(env, page, 1);
- }
- /*
- * Add page into the queue even when it is marked uptodate above.
- * this will unlock it automatically as part of cl_page_list_disown().
- */
+ int result = 0;
- cl_page_list_add(&queue->c2_qin, page);
- if (sbi->ll_ra_info.ra_max_pages_per_file &&
- sbi->ll_ra_info.ra_max_pages)
- ll_readahead(env, io, &queue->c2_qin, ras,
- vpg->vpg_defer_uptodate);
+ if (ios->cis_io->ci_type == CIT_READ ||
+ ios->cis_io->ci_type == CIT_FAULT) {
+ struct vvp_io *vio = cl2vvp_io(env, ios);
- return 0;
-}
+ if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ ra->cra_end = CL_PAGE_EOF;
+ result = 1; /* no need to call down */
+ }
+ }
-static void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- CLOBINVRNT(env, ios->cis_io->ci_obj,
- vvp_object_invariant(ios->cis_io->ci_obj));
+ return result;
}
static const struct cl_io_operations vvp_io_ops = {
@@ -1250,6 +1259,7 @@ static const struct cl_io_operations vvp_io_ops = {
.cio_fini = vvp_io_fini,
.cio_lock = vvp_io_read_lock,
.cio_start = vvp_io_read_start,
+ .cio_end = vvp_io_rw_end,
.cio_advance = vvp_io_advance,
},
[CIT_WRITE] = {
@@ -1258,6 +1268,7 @@ static const struct cl_io_operations vvp_io_ops = {
.cio_iter_fini = vvp_io_write_iter_fini,
.cio_lock = vvp_io_write_lock,
.cio_start = vvp_io_write_start,
+ .cio_end = vvp_io_rw_end,
.cio_advance = vvp_io_advance,
},
[CIT_SETATTR] = {
@@ -1272,7 +1283,7 @@ static const struct cl_io_operations vvp_io_ops = {
.cio_iter_init = vvp_io_fault_iter_init,
.cio_lock = vvp_io_fault_lock,
.cio_start = vvp_io_fault_start,
- .cio_end = vvp_io_end,
+ .cio_end = vvp_io_fault_end,
},
[CIT_FSYNC] = {
.cio_start = vvp_io_fsync_start,
@@ -1282,7 +1293,7 @@ static const struct cl_io_operations vvp_io_ops = {
.cio_fini = vvp_io_fini
}
},
- .cio_read_page = vvp_io_read_page,
+ .cio_read_ahead = vvp_io_read_ahead,
};
int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index b57195d15674..8e18cf86cefc 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -65,8 +65,7 @@ static int vvp_object_print(const struct lu_env *env, void *cookie,
struct inode *inode = obj->vob_inode;
struct ll_inode_info *lli;
- (*p)(env, cookie, "(%s %d %d) inode: %p ",
- list_empty(&obj->vob_pending_list) ? "-" : "+",
+ (*p)(env, cookie, "(%d %d) inode: %p ",
atomic_read(&obj->vob_transient_pages),
atomic_read(&obj->vob_mmap_cnt), inode);
if (inode) {
@@ -133,7 +132,7 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
CDEBUG(D_VFSTRACE, DFID ": losing layout lock\n",
PFID(&lli->lli_fid));
- ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE);
+ ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
/* Clean up page mmap for this inode.
* The reason for us to do this is that if the page has
@@ -146,27 +145,8 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
*/
unmap_mapping_range(conf->coc_inode->i_mapping,
0, OBD_OBJECT_EOF, 0);
-
- return 0;
}
- if (conf->coc_opc != OBJECT_CONF_SET)
- return 0;
-
- if (conf->u.coc_md && conf->u.coc_md->lsm) {
- CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n",
- PFID(&lli->lli_fid), lli->lli_layout_gen,
- conf->u.coc_md->lsm->lsm_layout_gen);
-
- lli->lli_has_smd = lsm_has_objects(conf->u.coc_md->lsm);
- ll_layout_version_set(lli, conf->u.coc_md->lsm->lsm_layout_gen);
- } else {
- CDEBUG(D_VFSTRACE, DFID ": layout nuked: %u.\n",
- PFID(&lli->lli_fid), lli->lli_layout_gen);
-
- lli->lli_has_smd = false;
- ll_layout_version_set(lli, LL_LAYOUT_GEN_EMPTY);
- }
return 0;
}
@@ -204,6 +184,26 @@ static int vvp_object_glimpse(const struct lu_env *env,
return 0;
}
+static void vvp_req_attr_set(const struct lu_env *env, struct cl_object *obj,
+ struct cl_req_attr *attr)
+{
+ u64 valid_flags = OBD_MD_FLTYPE;
+ struct inode *inode;
+ struct obdo *oa;
+
+ oa = attr->cra_oa;
+ inode = vvp_object_inode(obj);
+
+ if (attr->cra_type == CRT_WRITE)
+ valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
+ OBD_MD_FLUID | OBD_MD_FLGID;
+ obdo_from_inode(oa, inode, valid_flags & attr->cra_flags);
+ obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
+ if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID))
+ oa->o_parent_oid++;
+ memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid, LUSTRE_JOBID_SIZE);
+}
+
static const struct cl_object_operations vvp_ops = {
.coo_page_init = vvp_page_init,
.coo_lock_init = vvp_lock_init,
@@ -212,7 +212,8 @@ static const struct cl_object_operations vvp_ops = {
.coo_attr_update = vvp_attr_update,
.coo_conf_set = vvp_conf_set,
.coo_prune = vvp_prune,
- .coo_glimpse = vvp_object_glimpse
+ .coo_glimpse = vvp_object_glimpse,
+ .coo_req_attr_set = vvp_req_attr_set
};
static int vvp_object_init0(const struct lu_env *env,
@@ -240,7 +241,6 @@ static int vvp_object_init(const struct lu_env *env, struct lu_object *obj,
const struct cl_object_conf *cconf;
cconf = lu2cl_conf(conf);
- INIT_LIST_HEAD(&vob->vob_pending_list);
lu_object_add(obj, below);
result = vvp_object_init0(env, vob, cconf);
} else {
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 046e84d7a158..23d66308ff20 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -162,13 +162,10 @@ static void vvp_page_delete(const struct lu_env *env,
LASSERT((struct cl_page *)vmpage->private == page);
LASSERT(inode == vvp_object_inode(obj));
- vvp_write_complete(cl2vvp(obj), cl2vvp_page(slice));
-
/* Drop the reference count held in vvp_page_init */
refc = atomic_dec_return(&page->cp_ref);
LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
- ClearPageUptodate(vmpage);
ClearPagePrivate(vmpage);
vmpage->private = 0;
/*
@@ -221,8 +218,6 @@ static int vvp_page_prep_write(const struct lu_env *env,
if (!pg->cp_sync_io)
set_page_writeback(vmpage);
- vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
-
return 0;
}
@@ -287,19 +282,6 @@ static void vvp_page_completion_write(const struct lu_env *env,
CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
- /*
- * TODO: Actually it makes sense to add the page into oap pending
- * list again and so that we don't need to take the page out from
- * SoM write pending list, if we just meet a recoverable error,
- * -ENOMEM, etc.
- * To implement this, we just need to return a non zero value in
- * ->cpo_completion method. The underlying transfer should be notified
- * and then re-add the page into pending transfer queue. -jay
- */
-
- vpg->vpg_write_queued = 0;
- vvp_write_complete(cl2vvp(slice->cpl_obj), vpg);
-
if (pg->cp_sync_io) {
LASSERT(PageLocked(vmpage));
LASSERT(!PageWriteback(vmpage));
@@ -341,7 +323,6 @@ static int vvp_page_make_ready(const struct lu_env *env,
LASSERT(pg->cp_state == CPS_CACHED);
/* This actually clears the dirty bit in the radix tree. */
set_page_writeback(vmpage);
- vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
} else if (pg->cp_state == CPS_PAGEOUT) {
/* is it possible for osc_flush_async_page() to already
@@ -357,20 +338,6 @@ static int vvp_page_make_ready(const struct lu_env *env,
return result;
}
-static int vvp_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io, pgoff_t *max_index)
-{
- if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
- io->ci_type == CIT_FAULT) {
- struct vvp_io *vio = vvp_env_io(env);
-
- if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED))
- *max_index = CL_PAGE_EOF;
- }
- return 0;
-}
-
static int vvp_page_print(const struct lu_env *env,
const struct cl_page_slice *slice,
void *cookie, lu_printer_t printer)
@@ -378,9 +345,8 @@ static int vvp_page_print(const struct lu_env *env,
struct vvp_page *vpg = cl2vvp_page(slice);
struct page *vmpage = vpg->vpg_page;
- (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ",
- vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used,
- vpg->vpg_write_queued, vmpage);
+ (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d) vm@%p ",
+ vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage);
if (vmpage) {
(*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
(long)vmpage->flags, page_count(vmpage),
@@ -416,7 +382,6 @@ static const struct cl_page_operations vvp_page_ops = {
.cpo_is_vmlocked = vvp_page_is_vmlocked,
.cpo_fini = vvp_page_fini,
.cpo_print = vvp_page_print,
- .cpo_is_under_lock = vvp_page_is_under_lock,
.io = {
[CRT_READ] = {
.cpo_prep = vvp_page_prep_read,
@@ -515,7 +480,6 @@ static const struct cl_page_operations vvp_transient_page_ops = {
.cpo_fini = vvp_transient_page_fini,
.cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
.cpo_print = vvp_page_print,
- .cpo_is_under_lock = vvp_page_is_under_lock,
.io = {
[CRT_READ] = {
.cpo_prep = vvp_transient_page_prep,
@@ -539,7 +503,6 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
vpg->vpg_page = vmpage;
get_page(vmpage);
- INIT_LIST_HEAD(&vpg->vpg_pending_linkage);
if (page->cp_type == CPT_CACHEABLE) {
/* in cache, decref in vvp_page_delete */
atomic_inc(&page->cp_ref);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_req.c b/drivers/staging/lustre/lustre/llite/vvp_req.c
deleted file mode 100644
index e3f4c790d646..000000000000
--- a/drivers/staging/lustre/lustre/llite/vvp_req.c
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2014, Intel Corporation.
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "../include/lustre/lustre_idl.h"
-#include "../include/cl_object.h"
-#include "../include/obd.h"
-#include "../include/obd_support.h"
-#include "llite_internal.h"
-#include "vvp_internal.h"
-
-static inline struct vvp_req *cl2vvp_req(const struct cl_req_slice *slice)
-{
- return container_of0(slice, struct vvp_req, vrq_cl);
-}
-
-/**
- * Implementation of struct cl_req_operations::cro_attr_set() for VVP
- * layer. VVP is responsible for
- *
- * - o_[mac]time
- *
- * - o_mode
- *
- * - o_parent_seq
- *
- * - o_[ug]id
- *
- * - o_parent_oid
- *
- * - o_parent_ver
- *
- * - o_ioepoch,
- *
- */
-static void vvp_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags)
-{
- struct inode *inode;
- struct obdo *oa;
- u32 valid_flags;
-
- oa = attr->cra_oa;
- inode = vvp_object_inode(obj);
- valid_flags = OBD_MD_FLTYPE;
-
- if (slice->crs_req->crq_type == CRT_WRITE) {
- if (flags & OBD_MD_FLEPOCH) {
- oa->o_valid |= OBD_MD_FLEPOCH;
- oa->o_ioepoch = ll_i2info(inode)->lli_ioepoch;
- valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLUID | OBD_MD_FLGID;
- }
- }
- obdo_from_inode(oa, inode, valid_flags & flags);
- obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
- if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID))
- oa->o_parent_oid++;
- memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid,
- LUSTRE_JOBID_SIZE);
-}
-
-static void vvp_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret)
-{
- struct vvp_req *vrq;
-
- if (ioret > 0)
- cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
-
- vrq = cl2vvp_req(slice);
- kmem_cache_free(vvp_req_kmem, vrq);
-}
-
-static const struct cl_req_operations vvp_req_ops = {
- .cro_attr_set = vvp_req_attr_set,
- .cro_completion = vvp_req_completion
-};
-
-int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
-{
- struct vvp_req *vrq;
- int result;
-
- vrq = kmem_cache_zalloc(vvp_req_kmem, GFP_NOFS);
- if (vrq) {
- cl_req_slice_add(req, &vrq->vrq_cl, dev, &vvp_req_ops);
- result = 0;
- } else {
- result = -ENOMEM;
- }
- return result;
-}
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index e070adb7a3cc..7a848ebc57c1 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -44,48 +44,39 @@
#include "llite_internal.h"
-static
-int get_xattr_type(const char *name)
+const struct xattr_handler *get_xattr_type(const char *name)
{
- if (!strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS))
- return XATTR_ACL_ACCESS_T;
+ int i = 0;
- if (!strcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT))
- return XATTR_ACL_DEFAULT_T;
+ while (ll_xattr_handlers[i]) {
+ size_t len = strlen(ll_xattr_handlers[i]->prefix);
- if (!strncmp(name, XATTR_USER_PREFIX,
- sizeof(XATTR_USER_PREFIX) - 1))
- return XATTR_USER_T;
-
- if (!strncmp(name, XATTR_TRUSTED_PREFIX,
- sizeof(XATTR_TRUSTED_PREFIX) - 1))
- return XATTR_TRUSTED_T;
-
- if (!strncmp(name, XATTR_SECURITY_PREFIX,
- sizeof(XATTR_SECURITY_PREFIX) - 1))
- return XATTR_SECURITY_T;
-
- if (!strncmp(name, XATTR_LUSTRE_PREFIX,
- sizeof(XATTR_LUSTRE_PREFIX) - 1))
- return XATTR_LUSTRE_T;
-
- return XATTR_OTHER_T;
+ if (!strncmp(ll_xattr_handlers[i]->prefix, name, len))
+ return ll_xattr_handlers[i];
+ i++;
+ }
+ return NULL;
}
-static
-int xattr_type_filter(struct ll_sb_info *sbi, int xattr_type)
+static int xattr_type_filter(struct ll_sb_info *sbi,
+ const struct xattr_handler *handler)
{
- if ((xattr_type == XATTR_ACL_ACCESS_T ||
- xattr_type == XATTR_ACL_DEFAULT_T) &&
+ /* No handler means XATTR_OTHER_T */
+ if (!handler)
+ return -EOPNOTSUPP;
+
+ if ((handler->flags == XATTR_ACL_ACCESS_T ||
+ handler->flags == XATTR_ACL_DEFAULT_T) &&
!(sbi->ll_flags & LL_SBI_ACL))
return -EOPNOTSUPP;
- if (xattr_type == XATTR_USER_T && !(sbi->ll_flags & LL_SBI_USER_XATTR))
+ if (handler->flags == XATTR_USER_T &&
+ !(sbi->ll_flags & LL_SBI_USER_XATTR))
return -EOPNOTSUPP;
- if (xattr_type == XATTR_TRUSTED_T && !capable(CFS_CAP_SYS_ADMIN))
+
+ if (handler->flags == XATTR_TRUSTED_T &&
+ !capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
- if (xattr_type == XATTR_OTHER_T)
- return -EOPNOTSUPP;
return 0;
}
@@ -111,7 +102,7 @@ ll_xattr_set_common(const struct xattr_handler *handler,
valid = OBD_MD_FLXATTR;
}
- rc = xattr_type_filter(sbi, handler->flags);
+ rc = xattr_type_filter(sbi, handler);
if (rc)
return rc;
@@ -121,8 +112,9 @@ ll_xattr_set_common(const struct xattr_handler *handler,
return -EPERM;
/* b10667: ignore lustre special xattr for now */
- if ((handler->flags == XATTR_TRUSTED_T && !strcmp(name, "lov")) ||
- (handler->flags == XATTR_LUSTRE_T && !strcmp(name, "lov")))
+ if (!strcmp(name, "hsm") ||
+ ((handler->flags == XATTR_TRUSTED_T && !strcmp(name, "lov")) ||
+ (handler->flags == XATTR_LUSTRE_T && !strcmp(name, "lov"))))
return 0;
/* b15587: ignore security.capability xattr for now */
@@ -135,6 +127,11 @@ ll_xattr_set_common(const struct xattr_handler *handler,
strcmp(name, "selinux") == 0)
return -EOPNOTSUPP;
+ /*FIXME: enable IMA when the conditions are ready */
+ if (handler->flags == XATTR_SECURITY_T &&
+ (!strcmp(name, "ima") || !strcmp(name, "evm")))
+ return -EOPNOTSUPP;
+
sprintf(fullname, "%s%s\n", handler->prefix, name);
rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode),
valid, fullname, pv, size, 0, flags,
@@ -151,6 +148,37 @@ ll_xattr_set_common(const struct xattr_handler *handler,
return 0;
}
+static int get_hsm_state(struct inode *inode, u32 *hus_states)
+{
+ struct md_op_data *op_data;
+ struct hsm_user_state *hus;
+ int rc;
+
+ hus = kzalloc(sizeof(*hus), GFP_NOFS);
+ if (!hus)
+ return -ENOMEM;
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, hus);
+ if (!IS_ERR(op_data)) {
+ rc = obd_iocontrol(LL_IOC_HSM_STATE_GET, ll_i2mdexp(inode),
+ sizeof(*op_data), op_data, NULL);
+ if (!rc)
+ *hus_states = hus->hus_states;
+ else
+ CDEBUG(D_VFSTRACE, "obd_iocontrol failed. rc = %d\n",
+ rc);
+
+ ll_finish_md_op_data(op_data);
+ } else {
+ rc = PTR_ERR(op_data);
+ CDEBUG(D_VFSTRACE, "Could not prepare the opdata. rc = %d\n",
+ rc);
+ }
+ kfree(hus);
+ return rc;
+}
+
static int ll_xattr_set(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,
const char *name, const void *value, size_t size,
@@ -187,6 +215,31 @@ static int ll_xattr_set(const struct xattr_handler *handler,
if (lump && lump->lmm_stripe_offset == 0)
lump->lmm_stripe_offset = -1;
+ /* Avoid anyone directly setting the RELEASED flag. */
+ if (lump && (lump->lmm_pattern & LOV_PATTERN_F_RELEASED)) {
+ /* Only if we have a released flag check if the file
+ * was indeed archived.
+ */
+ u32 state = HS_NONE;
+
+ rc = get_hsm_state(inode, &state);
+ if (rc)
+ return rc;
+
+ if (!(state & HS_ARCHIVED)) {
+ CDEBUG(D_VFSTRACE,
+ "hus_states state = %x, pattern = %x\n",
+ state, lump->lmm_pattern);
+ /*
+ * Here the state is: real file is not
+ * archived but user is requesting to set
+ * the RELEASED flag so we mask off the
+ * released flag from the request
+ */
+ lump->lmm_pattern ^= LOV_PATTERN_F_RELEASED;
+ }
+ }
+
if (lump && S_ISREG(inode->i_mode)) {
__u64 it_flags = FMODE_WRITE;
int lum_size;
@@ -225,7 +278,8 @@ ll_xattr_list(struct inode *inode, const char *name, int type, void *buffer,
void *xdata;
int rc;
- if (sbi->ll_xattr_cache_enabled && type != XATTR_ACL_ACCESS_T) {
+ if (sbi->ll_xattr_cache_enabled && type != XATTR_ACL_ACCESS_T &&
+ (type != XATTR_SECURITY_T || strcmp(name, "security.selinux"))) {
rc = ll_xattr_cache_get(inode, name, buffer, size, valid);
if (rc == -EAGAIN)
goto getxattr_nocache;
@@ -313,7 +367,7 @@ static int ll_xattr_get_common(const struct xattr_handler *handler,
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
- rc = xattr_type_filter(sbi, handler->flags);
+ rc = xattr_type_filter(sbi, handler);
if (rc)
return rc;
@@ -353,80 +407,99 @@ static int ll_xattr_get_common(const struct xattr_handler *handler,
OBD_MD_FLXATTR);
}
-static int ll_xattr_get(const struct xattr_handler *handler,
- struct dentry *dentry, struct inode *inode,
- const char *name, void *buffer, size_t size)
+static ssize_t ll_getxattr_lov(struct inode *inode, void *buf, size_t buf_size)
{
- LASSERT(inode);
- LASSERT(name);
+ ssize_t rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
- PFID(ll_inode2fid(inode)), inode, name);
+ if (S_ISREG(inode->i_mode)) {
+ struct cl_object *obj = ll_i2info(inode)->lli_clob;
+ struct cl_layout cl = {
+ .cl_buf.lb_buf = buf,
+ .cl_buf.lb_len = buf_size,
+ };
+ struct lu_env *env;
+ int refcheck;
+
+ if (!obj)
+ return -ENODATA;
- if (!strcmp(name, "lov")) {
- struct lov_stripe_md *lsm;
- struct lov_user_md *lump;
- struct lov_mds_md *lmm = NULL;
- struct ptlrpc_request *request = NULL;
- int rc = 0, lmmsize = 0;
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
-
- if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
- return -ENODATA;
+ rc = cl_object_layout_get(env, obj, &cl);
+ if (rc < 0)
+ goto out_env;
- lsm = ccc_inode_lsm_get(inode);
- if (!lsm) {
- if (S_ISDIR(inode->i_mode)) {
- rc = ll_dir_getstripe(inode, (void **)&lmm,
- &lmmsize, &request, 0);
- } else {
- rc = -ENODATA;
- }
- } else {
- /* LSM is present already after lookup/getattr call.
- * we need to grab layout lock once it is implemented
- */
- rc = obd_packmd(ll_i2dtexp(inode), &lmm, lsm);
- lmmsize = rc;
+ if (!cl.cl_size) {
+ rc = -ENODATA;
+ goto out_env;
}
- ccc_inode_lsm_put(inode, lsm);
+ rc = cl.cl_size;
+
+ if (!buf_size)
+ goto out_env;
+
+ LASSERT(buf && rc <= buf_size);
+
+ /*
+ * Do not return layout gen for getxattr() since
+ * otherwise it would confuse tar --xattr by
+ * recognizing layout gen as stripe offset when the
+ * file is restored. See LU-2809.
+ */
+ ((struct lov_mds_md *)buf)->lmm_layout_gen = 0;
+out_env:
+ cl_env_put(env, &refcheck);
+
+ return rc;
+ } else if (S_ISDIR(inode->i_mode)) {
+ struct ptlrpc_request *req = NULL;
+ struct lov_mds_md *lmm = NULL;
+ int lmm_size = 0;
+
+ rc = ll_dir_getstripe(inode, (void **)&lmm, &lmm_size,
+ &req, 0);
if (rc < 0)
- goto out;
+ goto out_req;
- if (size == 0) {
- /* used to call ll_get_max_mdsize() forward to get
- * the maximum buffer size, while some apps (such as
- * rsync 3.0.x) care much about the exact xattr value
- * size
- */
- rc = lmmsize;
- goto out;
+ if (!buf_size) {
+ rc = lmm_size;
+ goto out_req;
}
- if (size < lmmsize) {
- CERROR("server bug: replied size %d > %d for %pd (%s)\n",
- lmmsize, (int)size, dentry, name);
+ if (buf_size < lmm_size) {
rc = -ERANGE;
- goto out;
+ goto out_req;
}
- lump = buffer;
- memcpy(lump, lmm, lmmsize);
- /* do not return layout gen for getxattr otherwise it would
- * confuse tar --xattr by recognizing layout gen as stripe
- * offset when the file is restored. See LU-2809.
- */
- lump->lmm_layout_gen = 0;
+ memcpy(buf, lmm, lmm_size);
+ rc = lmm_size;
+out_req:
+ if (req)
+ ptlrpc_req_finished(req);
- rc = lmmsize;
-out:
- if (request)
- ptlrpc_req_finished(request);
- else if (lmm)
- obd_free_diskmd(ll_i2dtexp(inode), &lmm);
return rc;
+ } else {
+ return -ENODATA;
+ }
+}
+
+static int ll_xattr_get(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size)
+{
+ LASSERT(inode);
+ LASSERT(name);
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), xattr %s\n",
+ PFID(ll_inode2fid(inode)), inode, name);
+
+ if (!strcmp(name, "lov")) {
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
+
+ return ll_getxattr_lov(inode, buffer, size);
}
return ll_xattr_get_common(handler, dentry, inode, name, buffer, size);
@@ -435,10 +508,10 @@ out:
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
struct inode *inode = d_inode(dentry);
- int rc = 0, rc2 = 0;
- struct lov_mds_md *lmm = NULL;
- struct ptlrpc_request *request = NULL;
- int lmmsize;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ char *xattr_name;
+ ssize_t rc, rc2;
+ size_t len, rem;
LASSERT(inode);
@@ -450,65 +523,48 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
rc = ll_xattr_list(inode, NULL, XATTR_OTHER_T, buffer, size,
OBD_MD_FLXATTRLS);
if (rc < 0)
- goto out;
-
- if (buffer) {
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- char *xattr_name = buffer;
- int xlen, rem = rc;
-
- while (rem > 0) {
- xlen = strnlen(xattr_name, rem - 1) + 1;
- rem -= xlen;
- if (xattr_type_filter(sbi,
- get_xattr_type(xattr_name)) == 0) {
- /* skip OK xattr type
- * leave it in buffer
- */
- xattr_name += xlen;
- continue;
- }
- /* move up remaining xattrs in buffer
- * removing the xattr that is not OK
- */
- memmove(xattr_name, xattr_name + xlen, rem);
- rc -= xlen;
+ return rc;
+ /*
+ * If we're being called to get the size of the xattr list
+ * (buf_size == 0) then just assume that a lustre.lov xattr
+ * exists.
+ */
+ if (!size)
+ return rc + sizeof(XATTR_LUSTRE_LOV);
+
+ xattr_name = buffer;
+ rem = rc;
+
+ while (rem > 0) {
+ len = strnlen(xattr_name, rem - 1) + 1;
+ rem -= len;
+ if (!xattr_type_filter(sbi, get_xattr_type(xattr_name))) {
+ /* Skip OK xattr type leave it in buffer */
+ xattr_name += len;
+ continue;
}
- }
- if (S_ISREG(inode->i_mode)) {
- if (!ll_i2info(inode)->lli_has_smd)
- rc2 = -1;
- } else if (S_ISDIR(inode->i_mode)) {
- rc2 = ll_dir_getstripe(inode, (void **)&lmm, &lmmsize,
- &request, 0);
+
+ /*
+ * Move up remaining xattrs in buffer
+ * removing the xattr that is not OK
+ */
+ memmove(xattr_name, xattr_name + len, rem);
+ rc -= len;
}
- if (rc2 < 0) {
- rc2 = 0;
- goto out;
- } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) {
- const int prefix_len = sizeof(XATTR_LUSTRE_PREFIX) - 1;
- const size_t name_len = sizeof("lov") - 1;
- const size_t total_len = prefix_len + name_len + 1;
-
- if (((rc + total_len) > size) && buffer) {
- ptlrpc_req_finished(request);
- return -ERANGE;
- }
+ rc2 = ll_getxattr_lov(inode, NULL, 0);
+ if (rc2 == -ENODATA)
+ return rc;
- if (buffer) {
- buffer += rc;
- memcpy(buffer, XATTR_LUSTRE_PREFIX, prefix_len);
- memcpy(buffer + prefix_len, "lov", name_len);
- buffer[prefix_len + name_len] = '\0';
- }
- rc2 = total_len;
- }
-out:
- ptlrpc_req_finished(request);
- rc = rc + rc2;
+ if (rc2 < 0)
+ return rc2;
- return rc;
+ if (size < rc + sizeof(XATTR_LUSTRE_LOV))
+ return -ERANGE;
+
+ memcpy(buffer + rc, XATTR_LUSTRE_LOV, sizeof(XATTR_LUSTRE_LOV));
+
+ return rc + sizeof(XATTR_LUSTRE_LOV);
}
static const struct xattr_handler ll_user_xattr_handler = {
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index 50a19a40bd4e..38f75f6aa887 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -26,8 +26,8 @@ struct ll_xattr_entry {
*/
char *xe_name; /* xattr name, \0-terminated */
char *xe_value; /* xattr value */
- unsigned xe_namelen; /* strlen(xe_name) + 1 */
- unsigned xe_vallen; /* xattr value length */
+ unsigned int xe_namelen; /* strlen(xe_name) + 1 */
+ unsigned int xe_vallen; /* xattr value length */
};
static struct kmem_cache *xattr_kmem;
@@ -60,7 +60,7 @@ void ll_xattr_fini(void)
static void ll_xattr_cache_init(struct ll_inode_info *lli)
{
INIT_LIST_HEAD(&lli->lli_xattrs);
- lli->lli_flags |= LLIF_XATTR_CACHE;
+ set_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
}
/**
@@ -104,7 +104,7 @@ static int ll_xattr_cache_find(struct list_head *cache,
static int ll_xattr_cache_add(struct list_head *cache,
const char *xattr_name,
const char *xattr_val,
- unsigned xattr_val_len)
+ unsigned int xattr_val_len)
{
struct ll_xattr_entry *xattr;
@@ -216,7 +216,7 @@ static int ll_xattr_cache_list(struct list_head *cache,
*/
static int ll_xattr_cache_valid(struct ll_inode_info *lli)
{
- return !!(lli->lli_flags & LLIF_XATTR_CACHE);
+ return test_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
}
/**
@@ -233,7 +233,8 @@ static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0)
; /* empty loop */
- lli->lli_flags &= ~LLIF_XATTR_CACHE;
+
+ clear_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
return 0;
}
@@ -415,6 +416,10 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
CDEBUG(D_CACHE, "not caching %s\n",
XATTR_NAME_ACL_ACCESS);
rc = 0;
+ } else if (!strcmp(xdata, "security.selinux")) {
+ /* Filter out security.selinux, it is cached in slab */
+ CDEBUG(D_CACHE, "not caching security.selinux\n");
+ rc = 0;
} else {
rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
*xsizes);
diff --git a/drivers/staging/lustre/lustre/llite/xattr_security.c b/drivers/staging/lustre/lustre/llite/xattr_security.c
new file mode 100644
index 000000000000..d61d8018001a
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/xattr_security.c
@@ -0,0 +1,88 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see http://www.gnu.org/licenses
+ *
+ * GPL HEADER END
+ */
+
+/*
+ * Copyright (c) 2014 Bull SAS
+ * Author: Sebastien Buisson sebastien.buisson@bull.net
+ */
+
+/*
+ * lustre/llite/xattr_security.c
+ * Handler for storing security labels as extended attributes.
+ */
+#include <linux/security.h>
+#include <linux/xattr.h>
+#include "llite_internal.h"
+
+/**
+ * A helper function for ll_security_inode_init_security()
+ * that takes care of setting xattrs
+ *
+ * Get security context of @inode from @xattr_array,
+ * and put it in 'security.xxx' xattr of dentry
+ * stored in @fs_info.
+ *
+ * \retval 0 success
+ * \retval -ENOMEM if no memory could be allocated for xattr name
+ * \retval < 0 failure to set xattr
+ */
+static int
+ll_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
+{
+ const struct xattr_handler *handler;
+ struct dentry *dentry = fs_info;
+ const struct xattr *xattr;
+ int err = 0;
+
+ handler = get_xattr_type(XATTR_SECURITY_PREFIX);
+ if (!handler)
+ return -ENXIO;
+
+ for (xattr = xattr_array; xattr->name; xattr++) {
+ err = handler->set(handler, dentry, inode, xattr->name,
+ xattr->value, xattr->value_len,
+ XATTR_CREATE);
+ if (err < 0)
+ break;
+ }
+ return err;
+}
+
+/**
+ * Initializes security context
+ *
+ * Get security context of @inode in @dir,
+ * and put it in 'security.xxx' xattr of @dentry.
+ *
+ * \retval 0 success, or SELinux is disabled
+ * \retval -ENOMEM if no memory could be allocated for xattr name
+ * \retval < 0 failure to get security context or set xattr
+ */
+int
+ll_init_security(struct dentry *dentry, struct inode *inode, struct inode *dir)
+{
+ if (!selinux_is_enabled())
+ return 0;
+
+ return security_inode_init_security(inode, dir, NULL,
+ &ll_initxattrs, dentry);
+}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index 9f4e826bb0af..b1071cf5a70c 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -223,7 +223,14 @@ int lmv_revalidate_slaves(struct obd_export *exp,
LASSERT(body);
if (unlikely(body->mbo_nlink < 2)) {
- CERROR("%s: nlink %d < 2 corrupt stripe %d "DFID":" DFID"\n",
+ /*
+ * If this is bad stripe, most likely due
+ * to the race between close(unlink) and
+ * getattr, let's return -EONENT, so llite
+ * will revalidate the dentry see
+ * ll_inode_revalidate_fini()
+ */
+ CDEBUG(D_INODE, "%s: nlink %d < 2 corrupt stripe %d "DFID":" DFID"\n",
obd->obd_name, body->mbo_nlink, i,
PFID(&lsm->lsm_md_oinfo[i].lmo_fid),
PFID(&lsm->lsm_md_oinfo[0].lmo_fid));
@@ -233,7 +240,7 @@ int lmv_revalidate_slaves(struct obd_export *exp,
it.it_lock_mode = 0;
}
- rc = -EIO;
+ rc = -ENOENT;
goto cleanup;
}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
index 52b03745ac19..12731a17e263 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
@@ -54,9 +54,6 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds);
int lmv_fid_alloc(const struct lu_env *env, struct obd_export *exp,
struct lu_fid *fid, struct md_op_data *op_data);
-int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
- const union lmv_mds_md *lmm, int stripe_count);
-
int lmv_revalidate_slaves(struct obd_export *exp,
const struct lmv_stripe_md *lsm,
ldlm_blocking_callback cb_blocking,
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 7dbb2b946acf..f124f6c05ea4 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -62,6 +62,7 @@ static void lmv_activate_target(struct lmv_obd *lmv,
tgt->ltd_active = activate;
lmv->desc.ld_active_tgt_count += (activate ? 1 : -1);
+ tgt->ltd_exp->exp_obd->obd_inactive = !activate;
}
/**
@@ -245,8 +246,7 @@ static int lmv_connect(const struct lu_env *env,
return rc;
}
-static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
- u32 cookiesize, u32 def_cookiesize)
+static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
@@ -262,14 +262,7 @@ static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
lmv->max_def_easize = def_easize;
change = 1;
}
- if (lmv->max_cookiesize < cookiesize) {
- lmv->max_cookiesize = cookiesize;
- change = 1;
- }
- if (lmv->max_def_cookiesize < def_cookiesize) {
- lmv->max_def_cookiesize = def_cookiesize;
- change = 1;
- }
+
if (change == 0)
return 0;
@@ -284,8 +277,7 @@ static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
continue;
}
- rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize,
- cookiesize, def_cookiesize);
+ rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize);
if (rc) {
CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d\n",
obd->obd_name, i, rc);
@@ -368,8 +360,7 @@ static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
tgt->ltd_exp = mdc_exp;
lmv->desc.ld_active_tgt_count++;
- md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize,
- lmv->max_cookiesize, lmv->max_def_cookiesize);
+ md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize);
CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n",
mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
@@ -396,27 +387,23 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
__u32 index, int gen)
{
struct lmv_obd *lmv = &obd->u.lmv;
+ struct obd_device *mdc_obd;
struct lmv_tgt_desc *tgt;
int orig_tgt_count = 0;
int rc = 0;
CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index);
- mutex_lock(&lmv->lmv_init_mutex);
-
- if (lmv->desc.ld_tgt_count == 0) {
- struct obd_device *mdc_obd;
-
- mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME,
- &obd->obd_uuid);
- if (!mdc_obd) {
- mutex_unlock(&lmv->lmv_init_mutex);
- CERROR("%s: Target %s not attached: rc = %d\n",
- obd->obd_name, uuidp->uuid, -EINVAL);
- return -EINVAL;
- }
+ mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME,
+ &obd->obd_uuid);
+ if (!mdc_obd) {
+ CERROR("%s: Target %s not attached: rc = %d\n",
+ obd->obd_name, uuidp->uuid, -EINVAL);
+ return -EINVAL;
}
+ mutex_lock(&lmv->lmv_init_mutex);
+
if ((index < lmv->tgts_size) && lmv->tgts[index]) {
tgt = lmv->tgts[index];
CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n",
@@ -472,22 +459,27 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
lmv->desc.ld_tgt_count = index + 1;
}
- if (lmv->connected) {
- rc = lmv_connect_mdc(obd, tgt);
- if (rc) {
- spin_lock(&lmv->lmv_lock);
- if (lmv->desc.ld_tgt_count == index + 1)
- lmv->desc.ld_tgt_count = orig_tgt_count;
- memset(tgt, 0, sizeof(*tgt));
- spin_unlock(&lmv->lmv_lock);
- } else {
- int easize = sizeof(struct lmv_stripe_md) +
- lmv->desc.ld_tgt_count * sizeof(struct lu_fid);
- lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0);
- }
+ if (!lmv->connected) {
+ /* lmv_check_connect() will connect this target. */
+ mutex_unlock(&lmv->lmv_init_mutex);
+ return rc;
}
+ /* Otherwise let's connect it ourselves */
mutex_unlock(&lmv->lmv_init_mutex);
+ rc = lmv_connect_mdc(obd, tgt);
+ if (rc) {
+ spin_lock(&lmv->lmv_lock);
+ if (lmv->desc.ld_tgt_count == index + 1)
+ lmv->desc.ld_tgt_count = orig_tgt_count;
+ memset(tgt, 0, sizeof(*tgt));
+ spin_unlock(&lmv->lmv_lock);
+ } else {
+ int easize = sizeof(struct lmv_stripe_md) +
+ lmv->desc.ld_tgt_count * sizeof(struct lu_fid);
+ lmv_init_ea_size(obd->obd_self_export, easize, 0);
+ }
+
return rc;
}
@@ -538,7 +530,7 @@ int lmv_check_connect(struct obd_device *obd)
class_export_put(lmv->exp);
lmv->connected = 1;
easize = lmv_mds_md_size(lmv->desc.ld_tgt_count, LMV_MAGIC);
- lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0);
+ lmv_init_ea_size(obd->obd_self_export, easize, 0);
mutex_unlock(&lmv->lmv_init_mutex);
return 0;
@@ -1128,9 +1120,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
mdc_obd = class_exp2obd(tgt->ltd_exp);
mdc_obd->obd_force = obddev->obd_force;
err = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
- if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
- return err;
- } else if (err) {
+ if (err) {
if (tgt->ltd_active) {
CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
tgt->ltd_uuid.uuid, i, cmd, err);
@@ -1284,7 +1274,6 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid);
lmv->desc.ld_tgt_count = 0;
lmv->desc.ld_active_tgt_count = 0;
- lmv->max_cookiesize = 0;
lmv->max_def_easize = 0;
lmv->max_easize = 0;
lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
@@ -1630,27 +1619,28 @@ lmv_locate_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
* ct_restore().
*/
if (op_data->op_bias & MDS_CREATE_VOLATILE &&
- (int)op_data->op_mds != -1 && lsm) {
+ (int)op_data->op_mds != -1) {
int i;
tgt = lmv_get_target(lmv, op_data->op_mds, NULL);
if (IS_ERR(tgt))
return tgt;
- /* refill the right parent fid */
- for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
- struct lmv_oinfo *oinfo;
+ if (lsm) {
+ /* refill the right parent fid */
+ for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
+ struct lmv_oinfo *oinfo;
- oinfo = &lsm->lsm_md_oinfo[i];
- if (oinfo->lmo_mds == op_data->op_mds) {
- *fid = oinfo->lmo_fid;
- break;
+ oinfo = &lsm->lsm_md_oinfo[i];
+ if (oinfo->lmo_mds == op_data->op_mds) {
+ *fid = oinfo->lmo_fid;
+ break;
+ }
}
- }
- /* Hmm, can not find the stripe by mdt_index(op_mds) */
- if (i == lsm->lsm_md_stripe_count)
- tgt = ERR_PTR(-EINVAL);
+ if (i == lsm->lsm_md_stripe_count)
+ *fid = lsm->lsm_md_oinfo[0].lmo_fid;
+ }
return tgt;
}
@@ -1728,30 +1718,9 @@ static int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
-static int lmv_done_writing(struct obd_export *exp,
- struct md_op_data *op_data,
- struct md_open_data *mod)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- rc = md_done_writing(tgt->ltd_exp, op_data, mod);
- return rc;
-}
-
static int
lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
- const ldlm_policy_data_t *policy,
+ const union ldlm_policy_data *policy,
struct lookup_intent *it, struct md_op_data *op_data,
struct lustre_handle *lockh, __u64 extra_lock_flags)
{
@@ -1847,7 +1816,7 @@ static int lmv_early_cancel(struct obd_export *exp, struct lmv_tgt_desc *tgt,
struct lu_fid *fid = md_op_data_fid(op_data, flag);
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
- ldlm_policy_data_t policy = { {0} };
+ union ldlm_policy_data policy = { { 0 } };
int rc = 0;
if (!fid_is_sane(fid))
@@ -1937,7 +1906,10 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
+ struct obd_export *target_exp;
struct lmv_tgt_desc *src_tgt;
+ struct lmv_tgt_desc *tgt_tgt;
+ struct mdt_body *body;
int rc;
LASSERT(oldlen != 0);
@@ -1977,6 +1949,10 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
if (rc)
return rc;
src_tgt = lmv_find_target(lmv, &op_data->op_fid3);
+ if (IS_ERR(src_tgt))
+ return PTR_ERR(src_tgt);
+
+ target_exp = src_tgt->ltd_exp;
} else {
if (op_data->op_mea1) {
struct lmv_stripe_md *lsm = op_data->op_mea1;
@@ -1985,29 +1961,27 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
oldlen,
&op_data->op_fid1,
&op_data->op_mds);
- if (IS_ERR(src_tgt))
- return PTR_ERR(src_tgt);
} else {
src_tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(src_tgt))
- return PTR_ERR(src_tgt);
-
- op_data->op_mds = src_tgt->ltd_idx;
}
+ if (IS_ERR(src_tgt))
+ return PTR_ERR(src_tgt);
if (op_data->op_mea2) {
struct lmv_stripe_md *lsm = op_data->op_mea2;
- const struct lmv_oinfo *oinfo;
- oinfo = lsm_name_to_stripe_info(lsm, new, newlen);
- if (IS_ERR(oinfo))
- return PTR_ERR(oinfo);
-
- op_data->op_fid2 = oinfo->lmo_fid;
+ tgt_tgt = lmv_locate_target_for_name(lmv, lsm, new,
+ newlen,
+ &op_data->op_fid2,
+ &op_data->op_mds);
+ } else {
+ tgt_tgt = lmv_find_target(lmv, &op_data->op_fid2);
}
+ if (IS_ERR(tgt_tgt))
+ return PTR_ERR(tgt_tgt);
+
+ target_exp = tgt_tgt->ltd_exp;
}
- if (IS_ERR(src_tgt))
- return PTR_ERR(src_tgt);
/*
* LOOKUP lock on src child (fid3) should also be cancelled for
@@ -2048,26 +2022,56 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
+retry_rename:
/*
* Cancel all the locks on tgt child (fid4).
*/
- if (fid_is_sane(&op_data->op_fid4))
+ if (fid_is_sane(&op_data->op_fid4)) {
+ struct lmv_tgt_desc *tgt;
+
rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx,
LCK_EX, MDS_INODELOCK_FULL,
MF_MDC_CANCEL_FID4);
+ if (rc)
+ return rc;
+
+ tgt = lmv_find_target(lmv, &op_data->op_fid4);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
- CDEBUG(D_INODE, DFID":m%d to "DFID"\n", PFID(&op_data->op_fid1),
- op_data->op_mds, PFID(&op_data->op_fid2));
+ /*
+ * Since the target child might be destroyed, and it might
+ * become orphan, and we can only check orphan on the local
+ * MDT right now, so we send rename request to the MDT where
+ * target child is located. If target child does not exist,
+ * then it will send the request to the target parent
+ */
+ target_exp = tgt->ltd_exp;
+ }
- rc = md_rename(src_tgt->ltd_exp, op_data, old, oldlen,
- new, newlen, request);
- return rc;
+ rc = md_rename(target_exp, op_data, old, oldlen, new, newlen, request);
+ if (rc && rc != -EREMOTE)
+ return rc;
+
+ body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
+ if (!body)
+ return -EPROTO;
+
+ /* Not cross-ref case, just get out of here. */
+ if (likely(!(body->mbo_valid & OBD_MD_MDS)))
+ return rc;
+
+ CDEBUG(D_INODE, "%s: try rename to another MDT for " DFID "\n",
+ exp->exp_obd->obd_name, PFID(&body->mbo_fid1));
+
+ op_data->op_fid4 = body->mbo_fid1;
+ ptlrpc_req_finished(*request);
+ *request = NULL;
+ goto retry_rename;
}
static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, size_t ealen, void *ea2, size_t ea2len,
- struct ptlrpc_request **request,
- struct md_open_data **mod)
+ void *ea, size_t ealen, struct ptlrpc_request **request)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
@@ -2086,10 +2090,7 @@ static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
if (IS_ERR(tgt))
return PTR_ERR(tgt);
- rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen, ea2,
- ea2len, request, mod);
-
- return rc;
+ return md_setattr(tgt->ltd_exp, op_data, ea, ealen, request);
}
static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
@@ -2623,23 +2624,10 @@ try_next_stripe:
goto retry_unlink;
}
-static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+static int lmv_precleanup(struct obd_device *obd)
{
- struct lmv_obd *lmv = &obd->u.lmv;
-
- switch (stage) {
- case OBD_CLEANUP_EARLY:
- /* XXX: here should be calling obd_precleanup() down to
- * stack.
- */
- break;
- case OBD_CLEANUP_EXPORTS:
- fld_client_debugfs_fini(&lmv->lmv_fld);
- lprocfs_obd_cleanup(obd);
- break;
- default:
- break;
- }
+ fld_client_debugfs_fini(&obd->u.lmv.lmv_fld);
+ lprocfs_obd_cleanup(obd);
return 0;
}
@@ -2654,14 +2642,12 @@ static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
* \param[in] key identifier of key to get value for
* \param[in] vallen size of \a val
* \param[out] val pointer to storage location for value
- * \param[in] lsm optional striping metadata of object
*
* \retval 0 on success
* \retval negative negated errno on failure
*/
static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
- __u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm)
+ __u32 keylen, void *key, __u32 *vallen, void *val)
{
struct obd_device *obd;
struct lmv_obd *lmv;
@@ -2693,7 +2679,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
continue;
if (!obd_get_info(env, tgt->ltd_exp, keylen, key,
- vallen, val, NULL))
+ vallen, val))
return 0;
}
return -EINVAL;
@@ -2709,7 +2695,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
* desc.
*/
rc = obd_get_info(env, lmv->tgts[0]->ltd_exp, keylen, key,
- vallen, val, NULL);
+ vallen, val);
if (!rc && KEY_IS(KEY_CONN_DATA))
exp->exp_connect_data = *(struct obd_connect_data *)val;
return rc;
@@ -2777,90 +2763,6 @@ static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
return -EINVAL;
}
-static int lmv_pack_md_v1(const struct lmv_stripe_md *lsm,
- struct lmv_mds_md_v1 *lmm1)
-{
- int cplen;
- int i;
-
- lmm1->lmv_magic = cpu_to_le32(lsm->lsm_md_magic);
- lmm1->lmv_stripe_count = cpu_to_le32(lsm->lsm_md_stripe_count);
- lmm1->lmv_master_mdt_index = cpu_to_le32(lsm->lsm_md_master_mdt_index);
- lmm1->lmv_hash_type = cpu_to_le32(lsm->lsm_md_hash_type);
- cplen = strlcpy(lmm1->lmv_pool_name, lsm->lsm_md_pool_name,
- sizeof(lmm1->lmv_pool_name));
- if (cplen >= sizeof(lmm1->lmv_pool_name))
- return -E2BIG;
-
- for (i = 0; i < lsm->lsm_md_stripe_count; i++)
- fid_cpu_to_le(&lmm1->lmv_stripe_fids[i],
- &lsm->lsm_md_oinfo[i].lmo_fid);
- return 0;
-}
-
-static int
-lmv_pack_md(union lmv_mds_md **lmmp, const struct lmv_stripe_md *lsm,
- int stripe_count)
-{
- int lmm_size = 0, rc = 0;
- bool allocated = false;
-
- LASSERT(lmmp);
-
- /* Free lmm */
- if (*lmmp && !lsm) {
- int stripe_cnt;
-
- stripe_cnt = lmv_mds_md_stripe_count_get(*lmmp);
- lmm_size = lmv_mds_md_size(stripe_cnt,
- le32_to_cpu((*lmmp)->lmv_magic));
- if (!lmm_size)
- return -EINVAL;
- kvfree(*lmmp);
- *lmmp = NULL;
- return 0;
- }
-
- /* Alloc lmm */
- if (!*lmmp && !lsm) {
- lmm_size = lmv_mds_md_size(stripe_count, LMV_MAGIC);
- LASSERT(lmm_size > 0);
- *lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
- if (!*lmmp)
- return -ENOMEM;
- lmv_mds_md_stripe_count_set(*lmmp, stripe_count);
- (*lmmp)->lmv_magic = cpu_to_le32(LMV_MAGIC);
- return lmm_size;
- }
-
- /* pack lmm */
- LASSERT(lsm);
- lmm_size = lmv_mds_md_size(lsm->lsm_md_stripe_count,
- lsm->lsm_md_magic);
- if (!*lmmp) {
- *lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
- if (!*lmmp)
- return -ENOMEM;
- allocated = true;
- }
-
- switch (lsm->lsm_md_magic) {
- case LMV_MAGIC_V1:
- rc = lmv_pack_md_v1(lsm, &(*lmmp)->lmv_md_v1);
- break;
- default:
- rc = -EINVAL;
- break;
- }
-
- if (rc && allocated) {
- kvfree(*lmmp);
- *lmmp = NULL;
- }
-
- return lmm_size;
-}
-
static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm,
const struct lmv_mds_md_v1 *lmm1)
{
@@ -2903,8 +2805,8 @@ static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm,
return rc;
}
-int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
- const union lmv_mds_md *lmm, int stripe_count)
+static int lmv_unpackmd(struct obd_export *exp, struct lmv_stripe_md **lsmp,
+ const union lmv_mds_md *lmm, size_t lmm_size)
{
struct lmv_stripe_md *lsm;
bool allocated = false;
@@ -2933,17 +2835,6 @@ int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
return 0;
}
- /* Alloc memmd */
- if (!lsm && !lmm) {
- lsm_size = lmv_stripe_md_size(stripe_count);
- lsm = libcfs_kvzalloc(lsm_size, GFP_NOFS);
- if (!lsm)
- return -ENOMEM;
- lsm->lsm_md_stripe_count = stripe_count;
- *lsmp = lsm;
- return 0;
- }
-
if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_STRIPE)
return -EPERM;
@@ -2991,38 +2882,17 @@ int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
}
return lsm_size;
}
-EXPORT_SYMBOL(lmv_unpack_md);
-static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
- struct lov_mds_md *lmm, int disk_len)
+void lmv_free_memmd(struct lmv_stripe_md *lsm)
{
- return lmv_unpack_md(exp, (struct lmv_stripe_md **)lsmp,
- (union lmv_mds_md *)lmm, disk_len);
-}
-
-static int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
- struct lov_stripe_md *lsm)
-{
- const struct lmv_stripe_md *lmv = (struct lmv_stripe_md *)lsm;
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv_obd = &obd->u.lmv;
- int stripe_count;
-
- if (!lmmp) {
- if (lsm)
- stripe_count = lmv->lsm_md_stripe_count;
- else
- stripe_count = lmv_obd->desc.ld_tgt_count;
-
- return lmv_mds_md_size(stripe_count, LMV_MAGIC_V1);
- }
-
- return lmv_pack_md((union lmv_mds_md **)lmmp, lmv, 0);
+ lmv_unpackmd(NULL, &lsm, NULL, 0);
}
+EXPORT_SYMBOL(lmv_free_memmd);
static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
- ldlm_policy_data_t *policy, enum ldlm_mode mode,
- enum ldlm_cancel_flags flags, void *opaque)
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode, enum ldlm_cancel_flags flags,
+ void *opaque)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
@@ -3064,7 +2934,7 @@ static int lmv_set_lock_data(struct obd_export *exp,
static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags,
const struct lu_fid *fid,
enum ldlm_type type,
- ldlm_policy_data_t *policy,
+ union ldlm_policy_data *policy,
enum ldlm_mode mode,
struct lustre_handle *lockh)
{
@@ -3271,32 +3141,6 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
return rc;
}
-static int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc = 0;
- u32 i;
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- int err;
-
- tgt = lmv->tgts[i];
- if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) {
- CERROR("lmv idx %d inactive\n", i);
- return -EIO;
- }
-
- err = obd_quotacheck(tgt->ltd_exp, oqctl);
- if (err && !rc)
- rc = err;
- }
-
- return rc;
-}
-
static int lmv_merge_attr(struct obd_export *exp,
const struct lmv_stripe_md *lsm,
struct cl_attr *attr,
@@ -3349,12 +3193,9 @@ static struct obd_ops lmv_obd_ops = {
.statfs = lmv_statfs,
.get_info = lmv_get_info,
.set_info_async = lmv_set_info_async,
- .packmd = lmv_packmd,
- .unpackmd = lmv_unpackmd,
.notify = lmv_notify,
.get_uuid = lmv_get_uuid,
.iocontrol = lmv_iocontrol,
- .quotacheck = lmv_quotacheck,
.quotactl = lmv_quotactl
};
@@ -3363,7 +3204,6 @@ static struct md_ops lmv_md_ops = {
.null_inode = lmv_null_inode,
.close = lmv_close,
.create = lmv_create,
- .done_writing = lmv_done_writing,
.enqueue = lmv_enqueue,
.getattr = lmv_getattr,
.getxattr = lmv_getxattr,
@@ -3388,6 +3228,7 @@ static struct md_ops lmv_md_ops = {
.intent_getattr_async = lmv_intent_getattr_async,
.revalidate_lock = lmv_revalidate_lock,
.get_fid_from_lsm = lmv_get_fid_from_lsm,
+ .unpackmd = lmv_unpackmd,
};
static int __init lmv_init(void)
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index 4d2b7d303fea..c49a34bf10e5 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -217,7 +217,7 @@ struct lov_object {
union lov_layout_state {
struct lov_layout_raid0 {
- unsigned lo_nr;
+ unsigned int lo_nr;
/**
* When this is true, lov_object::lo_attr contains
* valid up to date attributes for a top-level
@@ -412,7 +412,6 @@ struct lov_io_sub {
int sub_refcheck;
int sub_refcheck2;
int sub_reenter;
- void *sub_cookie;
};
/**
@@ -473,20 +472,6 @@ struct lov_session {
struct lov_sublock_env ls_subenv;
};
-/**
- * State of transfer for lov.
- */
-struct lov_req {
- struct cl_req_slice lr_cl;
-};
-
-/**
- * State of transfer for lovsub.
- */
-struct lovsub_req {
- struct cl_req_slice lsrq_cl;
-};
-
extern struct lu_device_type lov_device_type;
extern struct lu_device_type lovsub_device_type;
@@ -497,11 +482,9 @@ extern struct kmem_cache *lov_lock_kmem;
extern struct kmem_cache *lov_object_kmem;
extern struct kmem_cache *lov_thread_kmem;
extern struct kmem_cache *lov_session_kmem;
-extern struct kmem_cache *lov_req_kmem;
extern struct kmem_cache *lovsub_lock_kmem;
extern struct kmem_cache *lovsub_object_kmem;
-extern struct kmem_cache *lovsub_req_kmem;
extern struct kmem_cache *lov_lock_link_kmem;
@@ -700,11 +683,6 @@ static inline struct lov_page *cl2lov_page(const struct cl_page_slice *slice)
return container_of0(slice, struct lov_page, lps_cl);
}
-static inline struct lov_req *cl2lov_req(const struct cl_req_slice *slice)
-{
- return container_of0(slice, struct lov_req, lr_cl);
-}
-
static inline struct lovsub_page *
cl2lovsub_page(const struct cl_page_slice *slice)
{
@@ -712,11 +690,6 @@ cl2lovsub_page(const struct cl_page_slice *slice)
return container_of0(slice, struct lovsub_page, lsb_cl);
}
-static inline struct lovsub_req *cl2lovsub_req(const struct cl_req_slice *slice)
-{
- return container_of0(slice, struct lovsub_req, lsrq_cl);
-}
-
static inline struct lov_io *cl2lov_io(const struct lu_env *env,
const struct cl_io_slice *ios)
{
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index 056ae2ed88e8..7301f6e579a1 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -46,11 +46,9 @@ struct kmem_cache *lov_lock_kmem;
struct kmem_cache *lov_object_kmem;
struct kmem_cache *lov_thread_kmem;
struct kmem_cache *lov_session_kmem;
-struct kmem_cache *lov_req_kmem;
struct kmem_cache *lovsub_lock_kmem;
struct kmem_cache *lovsub_object_kmem;
-struct kmem_cache *lovsub_req_kmem;
struct kmem_cache *lov_lock_link_kmem;
@@ -79,11 +77,6 @@ struct lu_kmem_descr lov_caches[] = {
.ckd_size = sizeof(struct lov_session)
},
{
- .ckd_cache = &lov_req_kmem,
- .ckd_name = "lov_req_kmem",
- .ckd_size = sizeof(struct lov_req)
- },
- {
.ckd_cache = &lovsub_lock_kmem,
.ckd_name = "lovsub_lock_kmem",
.ckd_size = sizeof(struct lovsub_lock)
@@ -94,11 +87,6 @@ struct lu_kmem_descr lov_caches[] = {
.ckd_size = sizeof(struct lovsub_object)
},
{
- .ckd_cache = &lovsub_req_kmem,
- .ckd_name = "lovsub_req_kmem",
- .ckd_size = sizeof(struct lovsub_req)
- },
- {
.ckd_cache = &lov_lock_link_kmem,
.ckd_name = "lov_lock_link_kmem",
.ckd_size = sizeof(struct lov_lock_link)
@@ -110,25 +98,6 @@ struct lu_kmem_descr lov_caches[] = {
/*****************************************************************************
*
- * Lov transfer operations.
- *
- */
-
-static void lov_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret)
-{
- struct lov_req *lr;
-
- lr = cl2lov_req(slice);
- kmem_cache_free(lov_req_kmem, lr);
-}
-
-static const struct cl_req_operations lov_req_ops = {
- .cro_completion = lov_req_completion
-};
-
-/*****************************************************************************
- *
* Lov device and device type functions.
*
*/
@@ -248,26 +217,6 @@ static int lov_device_init(const struct lu_env *env, struct lu_device *d,
return rc;
}
-static int lov_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
-{
- struct lov_req *lr;
- int result;
-
- lr = kmem_cache_zalloc(lov_req_kmem, GFP_NOFS);
- if (lr) {
- cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
- result = 0;
- } else {
- result = -ENOMEM;
- }
- return result;
-}
-
-static const struct cl_device_operations lov_cl_ops = {
- .cdo_req_init = lov_req_init
-};
-
static void lov_emerg_free(struct lov_device_emerg **emrg, int nr)
{
int i;
@@ -478,7 +427,6 @@ static struct lu_device *lov_device_alloc(const struct lu_env *env,
cl_device_init(&ld->ld_cl, t);
d = lov2lu_dev(ld);
d->ld_ops = &lov_lu_ops;
- ld->ld_cl.cd_ops = &lov_cl_ops;
mutex_init(&ld->ld_mutex);
lockdep_set_class(&ld->ld_mutex, &cl_lov_device_mutex_class);
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index 214c561767e0..ac0bf64c08c1 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -76,18 +76,19 @@ static int lsm_lmm_verify_common(struct lov_mds_md *lmm, int lmm_bytes,
return 0;
}
-struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size)
+struct lov_stripe_md *lsm_alloc_plain(u16 stripe_count)
{
+ size_t oinfo_ptrs_size, lsm_size;
struct lov_stripe_md *lsm;
struct lov_oinfo *loi;
- int i, oinfo_ptrs_size;
+ int i;
LASSERT(stripe_count <= LOV_MAX_STRIPE_COUNT);
oinfo_ptrs_size = sizeof(struct lov_oinfo *) * stripe_count;
- *size = sizeof(struct lov_stripe_md) + oinfo_ptrs_size;
+ lsm_size = sizeof(*lsm) + oinfo_ptrs_size;
- lsm = libcfs_kvzalloc(*size, GFP_NOFS);
+ lsm = libcfs_kvzalloc(lsm_size, GFP_NOFS);
if (!lsm)
return NULL;
@@ -117,9 +118,43 @@ void lsm_free_plain(struct lov_stripe_md *lsm)
kvfree(lsm);
}
-static void lsm_unpackmd_common(struct lov_stripe_md *lsm,
- struct lov_mds_md *lmm)
+/*
+ * Find minimum stripe maxbytes value. For inactive or
+ * reconnecting targets use LUSTRE_EXT3_STRIPE_MAXBYTES.
+ */
+static loff_t lov_tgt_maxbytes(struct lov_tgt_desc *tgt)
+{
+ loff_t maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
+ struct obd_import *imp;
+
+ if (!tgt->ltd_active)
+ return maxbytes;
+
+ imp = tgt->ltd_obd->u.cli.cl_import;
+ if (!imp)
+ return maxbytes;
+
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state == LUSTRE_IMP_FULL &&
+ (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) &&
+ imp->imp_connect_data.ocd_maxbytes > 0)
+ maxbytes = imp->imp_connect_data.ocd_maxbytes;
+
+ spin_unlock(&imp->imp_lock);
+
+ return maxbytes;
+}
+
+static int lsm_unpackmd_common(struct lov_obd *lov,
+ struct lov_stripe_md *lsm,
+ struct lov_mds_md *lmm,
+ struct lov_ost_data_v1 *objects)
{
+ loff_t stripe_maxbytes = LLONG_MAX;
+ unsigned int stripe_count;
+ struct lov_oinfo *loi;
+ unsigned int i;
+
/*
* This supposes lov_mds_md_v1/v3 first fields are
* are the same
@@ -129,11 +164,54 @@ static void lsm_unpackmd_common(struct lov_stripe_md *lsm,
lsm->lsm_pattern = le32_to_cpu(lmm->lmm_pattern);
lsm->lsm_layout_gen = le16_to_cpu(lmm->lmm_layout_gen);
lsm->lsm_pool_name[0] = '\0';
+
+ stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
+
+ for (i = 0; i < stripe_count; i++) {
+ loff_t tgt_bytes;
+
+ loi = lsm->lsm_oinfo[i];
+ ostid_le_to_cpu(&objects[i].l_ost_oi, &loi->loi_oi);
+ loi->loi_ost_idx = le32_to_cpu(objects[i].l_ost_idx);
+ loi->loi_ost_gen = le32_to_cpu(objects[i].l_ost_gen);
+ if (lov_oinfo_is_dummy(loi))
+ continue;
+
+ if (loi->loi_ost_idx >= lov->desc.ld_tgt_count &&
+ !lov2obd(lov)->obd_process_conf) {
+ CERROR("%s: OST index %d more than OST count %d\n",
+ (char *)lov->desc.ld_uuid.uuid,
+ loi->loi_ost_idx, lov->desc.ld_tgt_count);
+ lov_dump_lmm_v1(D_WARNING, lmm);
+ return -EINVAL;
+ }
+
+ if (!lov->lov_tgts[loi->loi_ost_idx]) {
+ CERROR("%s: OST index %d missing\n",
+ (char *)lov->desc.ld_uuid.uuid,
+ loi->loi_ost_idx);
+ lov_dump_lmm_v1(D_WARNING, lmm);
+ continue;
+ }
+
+ tgt_bytes = lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx]);
+ stripe_maxbytes = min_t(loff_t, stripe_maxbytes, tgt_bytes);
+ }
+
+ if (stripe_maxbytes == LLONG_MAX)
+ stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
+
+ if (!lsm->lsm_stripe_count)
+ lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count;
+ else
+ lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count;
+
+ return 0;
}
static void
lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno,
- u64 *lov_off, u64 *swidth)
+ loff_t *lov_off, loff_t *swidth)
{
if (swidth)
*swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count;
@@ -141,36 +219,12 @@ lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno,
static void
lsm_stripe_by_offset_plain(struct lov_stripe_md *lsm, int *stripeno,
- u64 *lov_off, u64 *swidth)
+ loff_t *lov_off, loff_t *swidth)
{
if (swidth)
*swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count;
}
-/* Find minimum stripe maxbytes value. For inactive or
- * reconnecting targets use LUSTRE_EXT3_STRIPE_MAXBYTES.
- */
-static void lov_tgt_maxbytes(struct lov_tgt_desc *tgt, __u64 *stripe_maxbytes)
-{
- struct obd_import *imp = tgt->ltd_obd->u.cli.cl_import;
-
- if (!imp || !tgt->ltd_active) {
- *stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
- return;
- }
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_FULL &&
- (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) &&
- imp->imp_connect_data.ocd_maxbytes > 0) {
- if (*stripe_maxbytes > imp->imp_connect_data.ocd_maxbytes)
- *stripe_maxbytes = imp->imp_connect_data.ocd_maxbytes;
- } else {
- *stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
- }
- spin_unlock(&imp->imp_lock);
-}
-
static int lsm_lmm_verify_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes,
__u16 *stripe_count)
{
@@ -197,45 +251,7 @@ static int lsm_lmm_verify_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes,
static int lsm_unpackmd_v1(struct lov_obd *lov, struct lov_stripe_md *lsm,
struct lov_mds_md_v1 *lmm)
{
- struct lov_oinfo *loi;
- int i;
- int stripe_count;
- __u64 stripe_maxbytes = OBD_OBJECT_EOF;
-
- lsm_unpackmd_common(lsm, lmm);
-
- stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
-
- for (i = 0; i < stripe_count; i++) {
- /* XXX LOV STACKING call down to osc_unpackmd() */
- loi = lsm->lsm_oinfo[i];
- ostid_le_to_cpu(&lmm->lmm_objects[i].l_ost_oi, &loi->loi_oi);
- loi->loi_ost_idx = le32_to_cpu(lmm->lmm_objects[i].l_ost_idx);
- loi->loi_ost_gen = le32_to_cpu(lmm->lmm_objects[i].l_ost_gen);
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (loi->loi_ost_idx >= lov->desc.ld_tgt_count) {
- CERROR("OST index %d more than OST count %d\n",
- loi->loi_ost_idx, lov->desc.ld_tgt_count);
- lov_dump_lmm_v1(D_WARNING, lmm);
- return -EINVAL;
- }
- if (!lov->lov_tgts[loi->loi_ost_idx]) {
- CERROR("OST index %d missing\n", loi->loi_ost_idx);
- lov_dump_lmm_v1(D_WARNING, lmm);
- return -EINVAL;
- }
- /* calculate the minimum stripe max bytes */
- lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx],
- &stripe_maxbytes);
- }
-
- lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count;
- if (lsm->lsm_stripe_count == 0)
- lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count;
-
- return 0;
+ return lsm_unpackmd_common(lov, lsm, lmm, lmm->lmm_objects);
}
const struct lsm_operations lsm_v1_ops = {
@@ -275,55 +291,21 @@ static int lsm_lmm_verify_v3(struct lov_mds_md *lmmv1, int lmm_bytes,
}
static int lsm_unpackmd_v3(struct lov_obd *lov, struct lov_stripe_md *lsm,
- struct lov_mds_md *lmmv1)
+ struct lov_mds_md *lmm)
{
- struct lov_mds_md_v3 *lmm;
- struct lov_oinfo *loi;
- int i;
- int stripe_count;
- __u64 stripe_maxbytes = OBD_OBJECT_EOF;
- int cplen = 0;
+ struct lov_mds_md_v3 *lmm_v3 = (struct lov_mds_md_v3 *)lmm;
+ size_t cplen = 0;
+ int rc;
- lmm = (struct lov_mds_md_v3 *)lmmv1;
+ rc = lsm_unpackmd_common(lov, lsm, lmm, lmm_v3->lmm_objects);
+ if (rc)
+ return rc;
- lsm_unpackmd_common(lsm, (struct lov_mds_md_v1 *)lmm);
-
- stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
-
- cplen = strlcpy(lsm->lsm_pool_name, lmm->lmm_pool_name,
+ cplen = strlcpy(lsm->lsm_pool_name, lmm_v3->lmm_pool_name,
sizeof(lsm->lsm_pool_name));
if (cplen >= sizeof(lsm->lsm_pool_name))
return -E2BIG;
- for (i = 0; i < stripe_count; i++) {
- /* XXX LOV STACKING call down to osc_unpackmd() */
- loi = lsm->lsm_oinfo[i];
- ostid_le_to_cpu(&lmm->lmm_objects[i].l_ost_oi, &loi->loi_oi);
- loi->loi_ost_idx = le32_to_cpu(lmm->lmm_objects[i].l_ost_idx);
- loi->loi_ost_gen = le32_to_cpu(lmm->lmm_objects[i].l_ost_gen);
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (loi->loi_ost_idx >= lov->desc.ld_tgt_count) {
- CERROR("OST index %d more than OST count %d\n",
- loi->loi_ost_idx, lov->desc.ld_tgt_count);
- lov_dump_lmm_v3(D_WARNING, lmm);
- return -EINVAL;
- }
- if (!lov->lov_tgts[loi->loi_ost_idx]) {
- CERROR("OST index %d missing\n", loi->loi_ost_idx);
- lov_dump_lmm_v3(D_WARNING, lmm);
- return -EINVAL;
- }
- /* calculate the minimum stripe max bytes */
- lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx],
- &stripe_maxbytes);
- }
-
- lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count;
- if (lsm->lsm_stripe_count == 0)
- lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count;
-
return 0;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index 07e5ede3e952..774499c74daa 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -36,6 +36,77 @@
#include "../include/obd_class.h"
#include "../include/lustre/lustre_user.h"
+/*
+ * If we are unable to get the maximum object size from the OST in
+ * ocd_maxbytes using OBD_CONNECT_MAXBYTES, then we fall back to using
+ * the old maximum object size from ext3.
+ */
+#define LUSTRE_EXT3_STRIPE_MAXBYTES 0x1fffffff000ULL
+
+struct lov_stripe_md {
+ atomic_t lsm_refc;
+ spinlock_t lsm_lock;
+ pid_t lsm_lock_owner; /* debugging */
+
+ /*
+ * maximum possible file size, might change as OSTs status changes,
+ * e.g. disconnected, deactivated
+ */
+ loff_t lsm_maxbytes;
+ struct ost_id lsm_oi;
+ u32 lsm_magic;
+ u32 lsm_stripe_size;
+ u32 lsm_pattern; /* RAID0, RAID1, released, ... */
+ u16 lsm_stripe_count;
+ u16 lsm_layout_gen;
+ char lsm_pool_name[LOV_MAXPOOLNAME + 1];
+ struct lov_oinfo *lsm_oinfo[0];
+};
+
+static inline bool lsm_is_released(struct lov_stripe_md *lsm)
+{
+ return !!(lsm->lsm_pattern & LOV_PATTERN_F_RELEASED);
+}
+
+static inline bool lsm_has_objects(struct lov_stripe_md *lsm)
+{
+ if (!lsm)
+ return false;
+
+ if (lsm_is_released(lsm))
+ return false;
+
+ return true;
+}
+
+struct lsm_operations {
+ void (*lsm_free)(struct lov_stripe_md *);
+ void (*lsm_stripe_by_index)(struct lov_stripe_md *, int *, loff_t *,
+ loff_t *);
+ void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, loff_t *,
+ loff_t *);
+ int (*lsm_lmm_verify)(struct lov_mds_md *lmm, int lmm_bytes,
+ u16 *stripe_count);
+ int (*lsm_unpackmd)(struct lov_obd *lov, struct lov_stripe_md *lsm,
+ struct lov_mds_md *lmm);
+};
+
+extern const struct lsm_operations lsm_v1_ops;
+extern const struct lsm_operations lsm_v3_ops;
+
+static inline const struct lsm_operations *lsm_op_find(int magic)
+{
+ switch (magic) {
+ case LOV_MAGIC_V1:
+ return &lsm_v1_ops;
+ case LOV_MAGIC_V3:
+ return &lsm_v3_ops;
+ default:
+ CERROR("unrecognized lsm_magic %08x\n", magic);
+ return NULL;
+ }
+}
+
/* lov_do_div64(a, b) returns a % b, and a = a / b.
* The 32-bit code is LOV-specific due to knowing about stripe limits in
* order to reduce the divisor to a 32-bit number. If the divisor is
@@ -110,8 +181,6 @@ struct lov_request_set {
atomic_t set_completes;
atomic_t set_success;
atomic_t set_finish_checked;
- struct llog_cookie *set_cookies;
- int set_cookie_sent;
struct list_head set_list;
wait_queue_head_t set_waitq;
};
@@ -132,8 +201,6 @@ static inline void lov_put_reqset(struct lov_request_set *set)
(char *)((lv)->lov_tgts[index]->ltd_uuid.uuid)
/* lov_merge.c */
-void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
- struct lov_stripe_md *lsm, int stripeno, int *set);
int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
struct ost_lvb *lvb, __u64 *kms_place);
@@ -150,17 +217,9 @@ pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index,
int stripe);
/* lov_request.c */
-int lov_update_common_set(struct lov_request_set *set,
- struct lov_request *req, int rc);
int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo,
struct lov_request_set **reqset);
int lov_fini_getattr_set(struct lov_request_set *set);
-int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct lov_request_set **reqset);
-int lov_update_setattr_set(struct lov_request_set *set,
- struct lov_request *req, int rc);
-int lov_fini_setattr_set(struct lov_request_set *set);
int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
struct lov_request_set **reqset);
int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs,
@@ -186,12 +245,10 @@ int lov_del_target(struct obd_device *obd, __u32 index,
struct obd_uuid *uuidp, int gen);
/* lov_pack.c */
-int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmm,
- struct lov_stripe_md *lsm);
-int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
- struct lov_mds_md *lmm, int lmm_bytes);
-int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
- int pattern, int magic);
+ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf,
+ size_t buf_size);
+struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, struct lov_mds_md *lmm,
+ size_t lmm_size);
int lov_free_memmd(struct lov_stripe_md **lsmp);
void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm);
@@ -199,7 +256,7 @@ void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm);
void lov_dump_lmm_common(int level, void *lmmp);
/* lov_ea.c */
-struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size);
+struct lov_stripe_md *lsm_alloc_plain(u16 stripe_count);
void lsm_free_plain(struct lov_stripe_md *lsm);
void dump_lsm(unsigned int level, const struct lov_stripe_md *lsm);
@@ -244,4 +301,9 @@ static inline bool lov_oinfo_is_dummy(const struct lov_oinfo *loi)
return false;
}
+static inline struct obd_device *lov2obd(const struct lov_obd *lov)
+{
+ return container_of0(lov, struct obd_device, u.lov);
+}
+
#endif
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index d10157985ed9..002326c282a7 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -86,6 +86,8 @@ static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio,
switch (io->ci_type) {
case CIT_SETATTR: {
io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr;
+ io->u.ci_setattr.sa_attr_flags =
+ parent->u.ci_setattr.sa_attr_flags;
io->u.ci_setattr.sa_valid = parent->u.ci_setattr.sa_valid;
io->u.ci_setattr.sa_stripe_index = stripe;
io->u.ci_setattr.sa_parent_fid =
@@ -98,6 +100,12 @@ static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio,
}
break;
}
+ case CIT_DATA_VERSION: {
+ io->u.ci_data_version.dv_data_version = 0;
+ io->u.ci_data_version.dv_flags =
+ parent->u.ci_data_version.dv_flags;
+ break;
+ }
case CIT_FAULT: {
struct cl_object *obj = parent->ci_obj;
loff_t off = cl_offset(obj, parent->u.ci_fault.ft_index);
@@ -159,12 +167,7 @@ static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
sub->sub_env = ld->ld_emrg[stripe]->emrg_env;
sub->sub_borrowed = 1;
} else {
- void *cookie;
-
- /* obtain new environment */
- cookie = cl_env_reenter();
sub->sub_env = cl_env_get(&sub->sub_refcheck);
- cl_env_reexit(cookie);
if (IS_ERR(sub->sub_env))
result = PTR_ERR(sub->sub_env);
@@ -337,6 +340,11 @@ static int lov_io_slice_init(struct lov_io *lio, struct lov_object *obj,
lio->lis_endpos = OBD_OBJECT_EOF;
break;
+ case CIT_DATA_VERSION:
+ lio->lis_pos = 0;
+ lio->lis_endpos = OBD_OBJECT_EOF;
+ break;
+
case CIT_FAULT: {
pgoff_t index = io->u.ci_fault.ft_index;
@@ -514,6 +522,24 @@ static int lov_io_end_wrapper(const struct lu_env *env, struct cl_io *io)
return 0;
}
+static void
+lov_io_data_version_end(const struct lu_env *env, const struct cl_io_slice *ios)
+{
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct cl_io *parent = lio->lis_cl.cis_io;
+ struct lov_io_sub *sub;
+
+ list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
+ lov_io_end_wrapper(env, sub->sub_io);
+
+ parent->u.ci_data_version.dv_data_version +=
+ sub->sub_io->u.ci_data_version.dv_data_version;
+
+ if (!parent->ci_result)
+ parent->ci_result = sub->sub_io->ci_result;
+ }
+}
+
static int lov_io_iter_fini_wrapper(const struct lu_env *env, struct cl_io *io)
{
cl_io_iter_fini(env, io);
@@ -555,6 +581,65 @@ static void lov_io_unlock(const struct lu_env *env,
LASSERT(rc == 0);
}
+static int lov_io_read_ahead(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ pgoff_t start, struct cl_read_ahead *ra)
+{
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct lov_object *loo = lio->lis_object;
+ struct cl_object *obj = lov2cl(loo);
+ struct lov_layout_raid0 *r0 = lov_r0(loo);
+ unsigned int pps; /* pages per stripe */
+ struct lov_io_sub *sub;
+ pgoff_t ra_end;
+ loff_t suboff;
+ int stripe;
+ int rc;
+
+ stripe = lov_stripe_number(loo->lo_lsm, cl_offset(obj, start));
+ if (unlikely(!r0->lo_sub[stripe]))
+ return -EIO;
+
+ sub = lov_sub_get(env, lio, stripe);
+ if (IS_ERR(sub))
+ return PTR_ERR(sub);
+
+ lov_stripe_offset(loo->lo_lsm, cl_offset(obj, start), stripe, &suboff);
+ rc = cl_io_read_ahead(sub->sub_env, sub->sub_io,
+ cl_index(lovsub2cl(r0->lo_sub[stripe]), suboff),
+ ra);
+ lov_sub_put(sub);
+
+ CDEBUG(D_READA, DFID " cra_end = %lu, stripes = %d, rc = %d\n",
+ PFID(lu_object_fid(lov2lu(loo))), ra->cra_end, r0->lo_nr, rc);
+ if (rc)
+ return rc;
+
+ /**
+ * Adjust the stripe index by layout of raid0. ra->cra_end is
+ * the maximum page index covered by an underlying DLM lock.
+ * This function converts cra_end from stripe level to file
+ * level, and make sure it's not beyond stripe boundary.
+ */
+ if (r0->lo_nr == 1) /* single stripe file */
+ return 0;
+
+ /* cra_end is stripe level, convert it into file level */
+ ra_end = ra->cra_end;
+ if (ra_end != CL_PAGE_EOF)
+ ra_end = lov_stripe_pgoff(loo->lo_lsm, ra_end, stripe);
+
+ pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT;
+
+ CDEBUG(D_READA, DFID " max_index = %lu, pps = %u, stripe_size = %u, stripe no = %u, start index = %lu\n",
+ PFID(lu_object_fid(lov2lu(loo))), ra_end, pps,
+ loo->lo_lsm->lsm_stripe_size, stripe, start);
+
+ /* never exceed the end of the stripe */
+ ra->cra_end = min_t(pgoff_t, ra_end, start + pps - start % pps - 1);
+ return 0;
+}
+
/**
* lov implementation of cl_operations::cio_submit() method. It takes a list
* of pages in \a queue, splits it into per-stripe sub-lists, invokes
@@ -779,6 +864,15 @@ static const struct cl_io_operations lov_io_ops = {
.cio_start = lov_io_start,
.cio_end = lov_io_end
},
+ [CIT_DATA_VERSION] = {
+ .cio_fini = lov_io_fini,
+ .cio_iter_init = lov_io_iter_init,
+ .cio_iter_fini = lov_io_iter_fini,
+ .cio_lock = lov_io_lock,
+ .cio_unlock = lov_io_unlock,
+ .cio_start = lov_io_start,
+ .cio_end = lov_io_data_version_end,
+ },
[CIT_FAULT] = {
.cio_fini = lov_io_fini,
.cio_iter_init = lov_io_iter_init,
@@ -801,6 +895,7 @@ static const struct cl_io_operations lov_io_ops = {
.cio_fini = lov_io_fini
}
},
+ .cio_read_ahead = lov_io_read_ahead,
.cio_submit = lov_io_submit,
.cio_commit_async = lov_io_commit_async,
};
@@ -820,6 +915,13 @@ static void lov_empty_io_fini(const struct lu_env *env,
wake_up_all(&lov->lo_waitq);
}
+static int lov_empty_io_submit(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ enum cl_req_type crt, struct cl_2queue *queue)
+{
+ return -EBADF;
+}
+
static void lov_empty_impossible(const struct lu_env *env,
struct cl_io_slice *ios)
{
@@ -870,7 +972,7 @@ static const struct cl_io_operations lov_empty_io_ops = {
.cio_fini = lov_empty_io_fini
}
},
- .cio_submit = LOV_EMPTY_IMPOSSIBLE,
+ .cio_submit = lov_empty_io_submit,
.cio_commit_async = LOV_EMPTY_IMPOSSIBLE
};
@@ -909,6 +1011,7 @@ int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
break;
case CIT_FSYNC:
case CIT_SETATTR:
+ case CIT_DATA_VERSION:
result = 1;
break;
case CIT_WRITE:
@@ -944,6 +1047,7 @@ int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
LASSERTF(0, "invalid type %d\n", io->ci_type);
case CIT_MISC:
case CIT_FSYNC:
+ case CIT_DATA_VERSION:
result = 1;
break;
case CIT_SETATTR:
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index 674af106b50b..391dfd207177 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -104,53 +104,3 @@ int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
lvb->lvb_ctime = current_ctime;
return rc;
}
-
-void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
- struct lov_stripe_md *lsm, int stripeno, int *set)
-{
- valid &= src->o_valid;
-
- if (*set) {
- tgt->o_valid &= valid;
- if (valid & OBD_MD_FLSIZE) {
- /* this handles sparse files properly */
- u64 lov_size;
-
- lov_size = lov_stripe_size(lsm, src->o_size, stripeno);
- if (lov_size > tgt->o_size)
- tgt->o_size = lov_size;
- }
- if (valid & OBD_MD_FLBLOCKS)
- tgt->o_blocks += src->o_blocks;
- if (valid & OBD_MD_FLBLKSZ)
- tgt->o_blksize += src->o_blksize;
- if (valid & OBD_MD_FLCTIME && tgt->o_ctime < src->o_ctime)
- tgt->o_ctime = src->o_ctime;
- if (valid & OBD_MD_FLMTIME && tgt->o_mtime < src->o_mtime)
- tgt->o_mtime = src->o_mtime;
- if (valid & OBD_MD_FLDATAVERSION)
- tgt->o_data_version += src->o_data_version;
-
- /* handle flags */
- if (valid & OBD_MD_FLFLAGS)
- tgt->o_flags &= src->o_flags;
- else
- tgt->o_flags = 0;
- } else {
- memcpy(tgt, src, sizeof(*tgt));
- tgt->o_oi = lsm->lsm_oi;
- tgt->o_valid = valid;
- if (valid & OBD_MD_FLSIZE)
- tgt->o_size = lov_stripe_size(lsm, src->o_size,
- stripeno);
- tgt->o_flags = 0;
- if (valid & OBD_MD_FLFLAGS)
- tgt->o_flags = src->o_flags;
- }
-
- /* data_version needs to be valid on all stripes to be correct! */
- if (!(valid & OBD_MD_FLDATAVERSION))
- tgt->o_valid &= ~OBD_MD_FLDATAVERSION;
-
- *set += 1;
-}
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index b23016f7ec26..63b064523c6a 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -40,19 +40,20 @@
#define DEBUG_SUBSYSTEM S_LOV
#include "../../include/linux/libcfs/libcfs.h"
-#include "../include/obd_support.h"
-#include "../include/lustre/lustre_ioctl.h"
-#include "../include/lustre_lib.h"
-#include "../include/lustre_net.h"
#include "../include/lustre/lustre_idl.h"
+#include "../include/lustre/lustre_ioctl.h"
+
+#include "../include/cl_object.h"
#include "../include/lustre_dlm.h"
+#include "../include/lustre_fid.h"
+#include "../include/lustre_lib.h"
#include "../include/lustre_mds.h"
-#include "../include/obd_class.h"
-#include "../include/lprocfs_status.h"
+#include "../include/lustre_net.h"
#include "../include/lustre_param.h"
-#include "../include/cl_object.h"
-#include "../include/lustre/ll_fiemap.h"
-#include "../include/lustre_fid.h"
+#include "../include/lustre_swab.h"
+#include "../include/lprocfs_status.h"
+#include "../include/obd_class.h"
+#include "../include/obd_support.h"
#include "lov_internal.h"
@@ -826,29 +827,6 @@ out:
return rc;
}
-static int lov_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
-{
- struct lov_obd *lov = &obd->u.lov;
-
- switch (stage) {
- case OBD_CLEANUP_EARLY: {
- int i;
-
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
- continue;
- obd_precleanup(class_exp2obd(lov->lov_tgts[i]->ltd_exp),
- OBD_CLEANUP_EARLY);
- }
- break;
- }
- default:
- break;
- }
-
- return 0;
-}
-
static int lov_cleanup(struct obd_device *obd)
{
struct lov_obd *lov = &obd->u.lov;
@@ -972,163 +950,6 @@ out:
return rc;
}
-#define ASSERT_LSM_MAGIC(lsmp) \
-do { \
- LASSERT((lsmp)); \
- LASSERTF(((lsmp)->lsm_magic == LOV_MAGIC_V1 || \
- (lsmp)->lsm_magic == LOV_MAGIC_V3), \
- "%p->lsm_magic=%x\n", (lsmp), (lsmp)->lsm_magic); \
-} while (0)
-
-static int lov_getattr_interpret(struct ptlrpc_request_set *rqset,
- void *data, int rc)
-{
- struct lov_request_set *lovset = (struct lov_request_set *)data;
- int err;
-
- /* don't do attribute merge if this async op failed */
- if (rc)
- atomic_set(&lovset->set_completes, 0);
- err = lov_fini_getattr_set(lovset);
- return rc ? rc : err;
-}
-
-static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
- struct ptlrpc_request_set *rqset)
-{
- struct lov_request_set *lovset;
- struct lov_obd *lov;
- struct lov_request *req;
- int rc = 0, err;
-
- LASSERT(oinfo);
- ASSERT_LSM_MAGIC(oinfo->oi_md);
-
- if (!exp || !exp->exp_obd)
- return -ENODEV;
-
- lov = &exp->exp_obd->u.lov;
-
- rc = lov_prep_getattr_set(exp, oinfo, &lovset);
- if (rc)
- return rc;
-
- CDEBUG(D_INFO, "objid "DOSTID": %ux%u byte stripes\n",
- POSTID(&oinfo->oi_md->lsm_oi), oinfo->oi_md->lsm_stripe_count,
- oinfo->oi_md->lsm_stripe_size);
-
- list_for_each_entry(req, &lovset->set_list, rq_link) {
- CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n",
- POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
- POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
- rc = obd_getattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
- &req->rq_oi, rqset);
- if (rc) {
- CERROR("%s: getattr objid "DOSTID" subobj"
- DOSTID" on OST idx %d: rc = %d\n",
- exp->exp_obd->obd_name,
- POSTID(&oinfo->oi_oa->o_oi),
- POSTID(&req->rq_oi.oi_oa->o_oi),
- req->rq_idx, rc);
- goto out;
- }
- }
-
- if (!list_empty(&rqset->set_requests)) {
- LASSERT(rc == 0);
- LASSERT(!rqset->set_interpret);
- rqset->set_interpret = lov_getattr_interpret;
- rqset->set_arg = (void *)lovset;
- return rc;
- }
-out:
- if (rc)
- atomic_set(&lovset->set_completes, 0);
- err = lov_fini_getattr_set(lovset);
- return rc ? rc : err;
-}
-
-static int lov_setattr_interpret(struct ptlrpc_request_set *rqset,
- void *data, int rc)
-{
- struct lov_request_set *lovset = (struct lov_request_set *)data;
- int err;
-
- if (rc)
- atomic_set(&lovset->set_completes, 0);
- err = lov_fini_setattr_set(lovset);
- return rc ? rc : err;
-}
-
-/* If @oti is given, the request goes from MDS and responses from OSTs are not
- * needed. Otherwise, a client is waiting for responses.
- */
-static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct ptlrpc_request_set *rqset)
-{
- struct lov_request_set *set;
- struct lov_request *req;
- struct lov_obd *lov;
- int rc = 0;
-
- LASSERT(oinfo);
- ASSERT_LSM_MAGIC(oinfo->oi_md);
- if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) {
- LASSERT(oti);
- LASSERT(oti->oti_logcookies);
- }
-
- if (!exp || !exp->exp_obd)
- return -ENODEV;
-
- lov = &exp->exp_obd->u.lov;
- rc = lov_prep_setattr_set(exp, oinfo, oti, &set);
- if (rc)
- return rc;
-
- CDEBUG(D_INFO, "objid "DOSTID": %ux%u byte stripes\n",
- POSTID(&oinfo->oi_md->lsm_oi),
- oinfo->oi_md->lsm_stripe_count,
- oinfo->oi_md->lsm_stripe_size);
-
- list_for_each_entry(req, &set->set_list, rq_link) {
- if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
- oti->oti_logcookies = set->set_cookies + req->rq_stripe;
-
- CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n",
- POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
- POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
-
- rc = obd_setattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
- &req->rq_oi, oti, rqset);
- if (rc) {
- CERROR("error: setattr objid "DOSTID" subobj"
- DOSTID" on OST idx %d: rc = %d\n",
- POSTID(&set->set_oi->oi_oa->o_oi),
- POSTID(&req->rq_oi.oi_oa->o_oi),
- req->rq_idx, rc);
- break;
- }
- }
-
- /* If we are not waiting for responses on async requests, return. */
- if (rc || !rqset || list_empty(&rqset->set_requests)) {
- int err;
-
- if (rc)
- atomic_set(&set->set_completes, 0);
- err = lov_fini_setattr_set(set);
- return rc ? rc : err;
- }
-
- LASSERT(!rqset->set_interpret);
- rqset->set_interpret = lov_setattr_interpret;
- rqset->set_arg = (void *)set;
-
- return 0;
-}
-
int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc)
{
struct lov_request_set *lovset = (struct lov_request_set *)data;
@@ -1183,7 +1004,10 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp,
struct obd_statfs *osfs, __u64 max_age, __u32 flags)
{
struct ptlrpc_request_set *set = NULL;
- struct obd_info oinfo = { };
+ struct obd_info oinfo = {
+ .oi_osfs = osfs,
+ .oi_flags = flags,
+ };
int rc = 0;
/* for obdclass we forbid using obd_statfs_rqset, but prefer using async
@@ -1193,8 +1017,6 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp,
if (!set)
return -ENOMEM;
- oinfo.oi_osfs = osfs;
- oinfo.oi_flags = flags;
rc = lov_statfs_async(exp, &oinfo, max_age, set);
if (rc == 0)
rc = ptlrpc_set_wait(set);
@@ -1235,8 +1057,8 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
/* copy UUID */
if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd),
- min((int)data->ioc_plen2,
- (int)sizeof(struct obd_uuid))))
+ min_t(unsigned long, data->ioc_plen2,
+ sizeof(struct obd_uuid))))
return -EFAULT;
memcpy(&flags, data->ioc_inlbuf1, sizeof(__u32));
@@ -1249,8 +1071,8 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
if (rc)
return rc;
if (copy_to_user(data->ioc_pbuf1, &stat_buf,
- min((int)data->ioc_plen1,
- (int)sizeof(stat_buf))))
+ min_t(unsigned long, data->ioc_plen1,
+ sizeof(stat_buf))))
return -EFAULT;
break;
}
@@ -1367,8 +1189,6 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
osc_obd->obd_force = obddev->obd_force;
err = obd_iocontrol(cmd, lov->lov_tgts[i]->ltd_exp,
len, karg, uarg);
- if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK)
- return err;
if (err) {
if (lov->lov_tgts[i]->ltd_active) {
CDEBUG(err == -ENOTTY ?
@@ -1391,454 +1211,35 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
return rc;
}
-#define FIEMAP_BUFFER_SIZE 4096
-
-/**
- * Non-zero fe_logical indicates that this is a continuation FIEMAP
- * call. The local end offset and the device are sent in the first
- * fm_extent. This function calculates the stripe number from the index.
- * This function returns a stripe_no on which mapping is to be restarted.
- *
- * This function returns fm_end_offset which is the in-OST offset at which
- * mapping should be restarted. If fm_end_offset=0 is returned then caller
- * will re-calculate proper offset in next stripe.
- * Note that the first extent is passed to lov_get_info via the value field.
- *
- * \param fiemap fiemap request header
- * \param lsm striping information for the file
- * \param fm_start logical start of mapping
- * \param fm_end logical end of mapping
- * \param start_stripe starting stripe will be returned in this
- */
-static u64 fiemap_calc_fm_end_offset(struct ll_user_fiemap *fiemap,
- struct lov_stripe_md *lsm, u64 fm_start,
- u64 fm_end, int *start_stripe)
-{
- u64 local_end = fiemap->fm_extents[0].fe_logical;
- u64 lun_start, lun_end;
- u64 fm_end_offset;
- int stripe_no = -1, i;
-
- if (fiemap->fm_extent_count == 0 ||
- fiemap->fm_extents[0].fe_logical == 0)
- return 0;
-
- /* Find out stripe_no from ost_index saved in the fe_device */
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
-
- if (lov_oinfo_is_dummy(oinfo))
- continue;
-
- if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) {
- stripe_no = i;
- break;
- }
- }
- if (stripe_no == -1)
- return -EINVAL;
-
- /* If we have finished mapping on previous device, shift logical
- * offset to start of next device
- */
- if ((lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end,
- &lun_start, &lun_end)) != 0 &&
- local_end < lun_end) {
- fm_end_offset = local_end;
- *start_stripe = stripe_no;
- } else {
- /* This is a special value to indicate that caller should
- * calculate offset in next stripe.
- */
- fm_end_offset = 0;
- *start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count;
- }
-
- return fm_end_offset;
-}
-
-/**
- * We calculate on which OST the mapping will end. If the length of mapping
- * is greater than (stripe_size * stripe_count) then the last_stripe will
- * will be one just before start_stripe. Else we check if the mapping
- * intersects each OST and find last_stripe.
- * This function returns the last_stripe and also sets the stripe_count
- * over which the mapping is spread
- *
- * \param lsm striping information for the file
- * \param fm_start logical start of mapping
- * \param fm_end logical end of mapping
- * \param start_stripe starting stripe of the mapping
- * \param stripe_count the number of stripes across which to map is returned
- *
- * \retval last_stripe return the last stripe of the mapping
- */
-static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm, u64 fm_start,
- u64 fm_end, int start_stripe,
- int *stripe_count)
-{
- int last_stripe;
- u64 obd_start, obd_end;
- int i, j;
-
- if (fm_end - fm_start > lsm->lsm_stripe_size * lsm->lsm_stripe_count) {
- last_stripe = start_stripe < 1 ? lsm->lsm_stripe_count - 1 :
- start_stripe - 1;
- *stripe_count = lsm->lsm_stripe_count;
- } else {
- for (j = 0, i = start_stripe; j < lsm->lsm_stripe_count;
- i = (i + 1) % lsm->lsm_stripe_count, j++) {
- if ((lov_stripe_intersects(lsm, i, fm_start, fm_end,
- &obd_start, &obd_end)) == 0)
- break;
- }
- *stripe_count = j;
- last_stripe = (start_stripe + j - 1) % lsm->lsm_stripe_count;
- }
-
- return last_stripe;
-}
-
-/**
- * Set fe_device and copy extents from local buffer into main return buffer.
- *
- * \param fiemap fiemap request header
- * \param lcl_fm_ext array of local fiemap extents to be copied
- * \param ost_index OST index to be written into the fm_device field for each
- extent
- * \param ext_count number of extents to be copied
- * \param current_extent where to start copying in main extent array
- */
-static void fiemap_prepare_and_copy_exts(struct ll_user_fiemap *fiemap,
- struct ll_fiemap_extent *lcl_fm_ext,
- int ost_index, unsigned int ext_count,
- int current_extent)
-{
- char *to;
- int ext;
-
- for (ext = 0; ext < ext_count; ext++) {
- lcl_fm_ext[ext].fe_device = ost_index;
- lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
- }
-
- /* Copy fm_extent's from fm_local to return buffer */
- to = (char *)fiemap + fiemap_count_to_size(current_extent);
- memcpy(to, lcl_fm_ext, ext_count * sizeof(struct ll_fiemap_extent));
-}
-
-/**
- * Break down the FIEMAP request and send appropriate calls to individual OSTs.
- * This also handles the restarting of FIEMAP calls in case mapping overflows
- * the available number of extents in single call.
- */
-static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
- __u32 *vallen, void *val, struct lov_stripe_md *lsm)
-{
- struct ll_fiemap_info_key *fm_key = key;
- struct ll_user_fiemap *fiemap = val;
- struct ll_user_fiemap *fm_local = NULL;
- struct ll_fiemap_extent *lcl_fm_ext;
- int count_local;
- unsigned int get_num_extents = 0;
- int ost_index = 0, actual_start_stripe, start_stripe;
- u64 fm_start, fm_end, fm_length, fm_end_offset;
- u64 curr_loc;
- int current_extent = 0, rc = 0, i;
- /* Whether have we collected enough extents */
- bool enough = false;
- int ost_eof = 0; /* EOF for object */
- int ost_done = 0; /* done with required mapping for this OST? */
- int last_stripe;
- int cur_stripe = 0, cur_stripe_wrap = 0, stripe_count;
- unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
-
- if (!lsm_has_objects(lsm)) {
- if (lsm && lsm_is_released(lsm) && (fm_key->fiemap.fm_start <
- fm_key->oa.o_size)) {
- /*
- * released file, return a minimal FIEMAP if
- * request fits in file-size.
- */
- fiemap->fm_mapped_extents = 1;
- fiemap->fm_extents[0].fe_logical =
- fm_key->fiemap.fm_start;
- if (fm_key->fiemap.fm_start + fm_key->fiemap.fm_length <
- fm_key->oa.o_size) {
- fiemap->fm_extents[0].fe_length =
- fm_key->fiemap.fm_length;
- } else {
- fiemap->fm_extents[0].fe_length =
- fm_key->oa.o_size - fm_key->fiemap.fm_start;
- fiemap->fm_extents[0].fe_flags |=
- (FIEMAP_EXTENT_UNKNOWN |
- FIEMAP_EXTENT_LAST);
- }
- }
- rc = 0;
- goto out;
- }
-
- if (fiemap_count_to_size(fm_key->fiemap.fm_extent_count) < buffer_size)
- buffer_size = fiemap_count_to_size(fm_key->fiemap.fm_extent_count);
-
- fm_local = libcfs_kvzalloc(buffer_size, GFP_NOFS);
- if (!fm_local) {
- rc = -ENOMEM;
- goto out;
- }
- lcl_fm_ext = &fm_local->fm_extents[0];
-
- count_local = fiemap_size_to_count(buffer_size);
-
- memcpy(fiemap, &fm_key->fiemap, sizeof(*fiemap));
- fm_start = fiemap->fm_start;
- fm_length = fiemap->fm_length;
- /* Calculate start stripe, last stripe and length of mapping */
- start_stripe = lov_stripe_number(lsm, fm_start);
- actual_start_stripe = start_stripe;
- fm_end = (fm_length == ~0ULL ? fm_key->oa.o_size :
- fm_start + fm_length - 1);
- /* If fm_length != ~0ULL but fm_start+fm_length-1 exceeds file size */
- if (fm_end > fm_key->oa.o_size)
- fm_end = fm_key->oa.o_size;
-
- last_stripe = fiemap_calc_last_stripe(lsm, fm_start, fm_end,
- actual_start_stripe,
- &stripe_count);
-
- fm_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fm_start,
- fm_end, &start_stripe);
- if (fm_end_offset == -EINVAL) {
- rc = -EINVAL;
- goto out;
- }
-
- if (fiemap_count_to_size(fiemap->fm_extent_count) > *vallen)
- fiemap->fm_extent_count = fiemap_size_to_count(*vallen);
- if (fiemap->fm_extent_count == 0) {
- get_num_extents = 1;
- count_local = 0;
- }
- /* Check each stripe */
- for (cur_stripe = start_stripe, i = 0; i < stripe_count;
- i++, cur_stripe = (cur_stripe + 1) % lsm->lsm_stripe_count) {
- u64 req_fm_len; /* Stores length of required mapping */
- u64 len_mapped_single_call;
- u64 lun_start, lun_end, obd_object_end;
- unsigned int ext_count;
-
- cur_stripe_wrap = cur_stripe;
-
- /* Find out range of mapping on this stripe */
- if ((lov_stripe_intersects(lsm, cur_stripe, fm_start, fm_end,
- &lun_start, &obd_object_end)) == 0)
- continue;
-
- if (lov_oinfo_is_dummy(lsm->lsm_oinfo[cur_stripe])) {
- rc = -EIO;
- goto out;
- }
-
- /* If this is a continuation FIEMAP call and we are on
- * starting stripe then lun_start needs to be set to
- * fm_end_offset
- */
- if (fm_end_offset != 0 && cur_stripe == start_stripe)
- lun_start = fm_end_offset;
-
- if (fm_length != ~0ULL) {
- /* Handle fm_start + fm_length overflow */
- if (fm_start + fm_length < fm_start)
- fm_length = ~0ULL - fm_start;
- lun_end = lov_size_to_stripe(lsm, fm_start + fm_length,
- cur_stripe);
- } else {
- lun_end = ~0ULL;
- }
-
- if (lun_start == lun_end)
- continue;
-
- req_fm_len = obd_object_end - lun_start;
- fm_local->fm_length = 0;
- len_mapped_single_call = 0;
-
- /* If the output buffer is very large and the objects have many
- * extents we may need to loop on a single OST repeatedly
- */
- ost_eof = 0;
- ost_done = 0;
- do {
- if (get_num_extents == 0) {
- /* Don't get too many extents. */
- if (current_extent + count_local >
- fiemap->fm_extent_count)
- count_local = fiemap->fm_extent_count -
- current_extent;
- }
-
- lun_start += len_mapped_single_call;
- fm_local->fm_length = req_fm_len - len_mapped_single_call;
- req_fm_len = fm_local->fm_length;
- fm_local->fm_extent_count = enough ? 1 : count_local;
- fm_local->fm_mapped_extents = 0;
- fm_local->fm_flags = fiemap->fm_flags;
-
- fm_key->oa.o_oi = lsm->lsm_oinfo[cur_stripe]->loi_oi;
- ost_index = lsm->lsm_oinfo[cur_stripe]->loi_ost_idx;
-
- if (ost_index < 0 ||
- ost_index >= lov->desc.ld_tgt_count) {
- rc = -EINVAL;
- goto out;
- }
-
- /* If OST is inactive, return extent with UNKNOWN flag */
- if (!lov->lov_tgts[ost_index]->ltd_active) {
- fm_local->fm_flags |= FIEMAP_EXTENT_LAST;
- fm_local->fm_mapped_extents = 1;
-
- lcl_fm_ext[0].fe_logical = lun_start;
- lcl_fm_ext[0].fe_length = obd_object_end -
- lun_start;
- lcl_fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
-
- goto inactive_tgt;
- }
-
- fm_local->fm_start = lun_start;
- fm_local->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
- memcpy(&fm_key->fiemap, fm_local, sizeof(*fm_local));
- *vallen = fiemap_count_to_size(fm_local->fm_extent_count);
- rc = obd_get_info(NULL,
- lov->lov_tgts[ost_index]->ltd_exp,
- keylen, key, vallen, fm_local, lsm);
- if (rc != 0)
- goto out;
-
-inactive_tgt:
- ext_count = fm_local->fm_mapped_extents;
- if (ext_count == 0) {
- ost_done = 1;
- /* If last stripe has hole at the end,
- * then we need to return
- */
- if (cur_stripe_wrap == last_stripe) {
- fiemap->fm_mapped_extents = 0;
- goto finish;
- }
- break;
- } else if (enough) {
- /*
- * We've collected enough extents and there are
- * more extents after it.
- */
- goto finish;
- }
-
- /* If we just need num of extents then go to next device */
- if (get_num_extents) {
- current_extent += ext_count;
- break;
- }
-
- len_mapped_single_call =
- lcl_fm_ext[ext_count - 1].fe_logical -
- lun_start + lcl_fm_ext[ext_count - 1].fe_length;
-
- /* Have we finished mapping on this device? */
- if (req_fm_len <= len_mapped_single_call)
- ost_done = 1;
-
- /* Clear the EXTENT_LAST flag which can be present on
- * last extent
- */
- if (lcl_fm_ext[ext_count - 1].fe_flags &
- FIEMAP_EXTENT_LAST)
- lcl_fm_ext[ext_count - 1].fe_flags &=
- ~FIEMAP_EXTENT_LAST;
-
- curr_loc = lov_stripe_size(lsm,
- lcl_fm_ext[ext_count - 1].fe_logical +
- lcl_fm_ext[ext_count - 1].fe_length,
- cur_stripe);
- if (curr_loc >= fm_key->oa.o_size)
- ost_eof = 1;
-
- fiemap_prepare_and_copy_exts(fiemap, lcl_fm_ext,
- ost_index, ext_count,
- current_extent);
-
- current_extent += ext_count;
-
- /* Ran out of available extents? */
- if (current_extent >= fiemap->fm_extent_count)
- enough = true;
- } while (ost_done == 0 && ost_eof == 0);
-
- if (cur_stripe_wrap == last_stripe)
- goto finish;
- }
-
-finish:
- /* Indicate that we are returning device offsets unless file just has
- * single stripe
- */
- if (lsm->lsm_stripe_count > 1)
- fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
-
- if (get_num_extents)
- goto skip_last_device_calc;
-
- /* Check if we have reached the last stripe and whether mapping for that
- * stripe is done.
- */
- if (cur_stripe_wrap == last_stripe) {
- if (ost_done || ost_eof)
- fiemap->fm_extents[current_extent - 1].fe_flags |=
- FIEMAP_EXTENT_LAST;
- }
-
-skip_last_device_calc:
- fiemap->fm_mapped_extents = current_extent;
-
-out:
- kvfree(fm_local);
- return rc;
-}
-
static int lov_get_info(const struct lu_env *env, struct obd_export *exp,
- __u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm)
+ __u32 keylen, void *key, __u32 *vallen, void *val)
{
struct obd_device *obddev = class_exp2obd(exp);
struct lov_obd *lov = &obddev->u.lov;
- int rc;
+ struct lov_desc *ld = &lov->desc;
+ int rc = 0;
if (!vallen || !val)
return -EFAULT;
obd_getref(obddev);
- if (KEY_IS(KEY_LOVDESC)) {
- struct lov_desc *desc_ret = val;
- *desc_ret = lov->desc;
+ if (KEY_IS(KEY_MAX_EASIZE)) {
+ u32 max_stripe_count = min_t(u32, ld->ld_active_tgt_count,
+ LOV_MAX_STRIPE_COUNT);
- rc = 0;
- goto out;
- } else if (KEY_IS(KEY_FIEMAP)) {
- rc = lov_fiemap(lov, keylen, key, vallen, val, lsm);
- goto out;
+ *((u32 *)val) = lov_mds_md_size(max_stripe_count, LOV_MAGIC_V3);
+ } else if (KEY_IS(KEY_DEFAULT_EASIZE)) {
+ u32 def_stripe_count = min_t(u32, ld->ld_default_stripe_count,
+ LOV_MAX_STRIPE_COUNT);
+
+ *((u32 *)val) = lov_mds_md_size(def_stripe_count, LOV_MAGIC_V3);
} else if (KEY_IS(KEY_TGT_COUNT)) {
*((int *)val) = lov->desc.ld_tgt_count;
- rc = 0;
- goto out;
+ } else {
+ rc = -EINVAL;
}
- rc = -EINVAL;
-
-out:
obd_putref(obddev);
return rc;
}
@@ -1926,12 +1327,8 @@ static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
__u64 bhardlimit = 0;
int i, rc = 0;
- if (oqctl->qc_cmd != LUSTRE_Q_QUOTAON &&
- oqctl->qc_cmd != LUSTRE_Q_QUOTAOFF &&
- oqctl->qc_cmd != Q_GETOQUOTA &&
- oqctl->qc_cmd != Q_INITQUOTA &&
- oqctl->qc_cmd != LUSTRE_Q_SETQUOTA &&
- oqctl->qc_cmd != Q_FINVALIDATE) {
+ if (oqctl->qc_cmd != Q_GETOQUOTA &&
+ oqctl->qc_cmd != LUSTRE_Q_SETQUOTA) {
CERROR("bad quota opc %x for lov obd\n", oqctl->qc_cmd);
return -EFAULT;
}
@@ -1978,63 +1375,15 @@ static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
return rc;
}
-static int lov_quotacheck(struct obd_device *obd, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct lov_obd *lov = &obd->u.lov;
- int i, rc = 0;
-
- obd_getref(obd);
-
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- if (!lov->lov_tgts[i])
- continue;
-
- /* Skip quota check on the administratively disabled OSTs. */
- if (!lov->lov_tgts[i]->ltd_activate) {
- CWARN("lov idx %d was administratively disabled, skip quotacheck on it.\n",
- i);
- continue;
- }
-
- if (!lov->lov_tgts[i]->ltd_active) {
- CERROR("lov idx %d inactive\n", i);
- rc = -EIO;
- goto out;
- }
- }
-
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- int err;
-
- if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_activate)
- continue;
-
- err = obd_quotacheck(lov->lov_tgts[i]->ltd_exp, oqctl);
- if (err && !rc)
- rc = err;
- }
-
-out:
- obd_putref(obd);
-
- return rc;
-}
-
static struct obd_ops lov_obd_ops = {
.owner = THIS_MODULE,
.setup = lov_setup,
- .precleanup = lov_precleanup,
.cleanup = lov_cleanup,
/*.process_config = lov_process_config,*/
.connect = lov_connect,
.disconnect = lov_disconnect,
.statfs = lov_statfs,
.statfs_async = lov_statfs_async,
- .packmd = lov_packmd,
- .unpackmd = lov_unpackmd,
- .getattr_async = lov_getattr_async,
- .setattr_async = lov_setattr_async,
.iocontrol = lov_iocontrol,
.get_info = lov_get_info,
.set_info_async = lov_set_info_async,
@@ -2046,7 +1395,6 @@ static struct obd_ops lov_obd_ops = {
.getref = lov_getref,
.putref = lov_putref,
.quotactl = lov_quotactl,
- .quotacheck = lov_quotacheck,
};
struct kmem_cache *lov_oinfo_slab;
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index 52f736338887..76d4256fa828 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -39,6 +39,11 @@
#include "lov_cl_internal.h"
+static inline struct lov_device *lov_object_dev(struct lov_object *obj)
+{
+ return lu2lov_dev(obj->lo_cl.co_lu.lo_dev);
+}
+
/** \addtogroup lov
* @{
*/
@@ -51,7 +56,7 @@
struct lov_layout_operations {
int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
- struct lov_object *lov,
+ struct lov_object *lov, struct lov_stripe_md *lsm,
const struct cl_object_conf *conf,
union lov_layout_state *state);
int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
@@ -75,12 +80,11 @@ struct lov_layout_operations {
static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
-void lov_lsm_put(struct cl_object *unused, struct lov_stripe_md *lsm)
+static void lov_lsm_put(struct lov_stripe_md *lsm)
{
if (lsm)
lov_free_memmd(&lsm);
}
-EXPORT_SYMBOL(lov_lsm_put);
/*****************************************************************************
*
@@ -97,17 +101,17 @@ static void lov_install_empty(const struct lu_env *env,
*/
}
-static int lov_init_empty(const struct lu_env *env,
- struct lov_device *dev, struct lov_object *lov,
+static int lov_init_empty(const struct lu_env *env, struct lov_device *dev,
+ struct lov_object *lov, struct lov_stripe_md *lsm,
const struct cl_object_conf *conf,
- union lov_layout_state *state)
+ union lov_layout_state *state)
{
return 0;
}
static void lov_install_raid0(const struct lu_env *env,
struct lov_object *lov,
- union lov_layout_state *state)
+ union lov_layout_state *state)
{
}
@@ -212,8 +216,8 @@ static int lov_page_slice_fixup(struct lov_object *lov,
return cl_object_header(stripe)->coh_page_bufsize;
}
-static int lov_init_raid0(const struct lu_env *env,
- struct lov_device *dev, struct lov_object *lov,
+static int lov_init_raid0(const struct lu_env *env, struct lov_device *dev,
+ struct lov_object *lov, struct lov_stripe_md *lsm,
const struct cl_object_conf *conf,
union lov_layout_state *state)
{
@@ -223,7 +227,6 @@ static int lov_init_raid0(const struct lu_env *env,
struct cl_object *stripe;
struct lov_thread_info *lti = lov_env_info(env);
struct cl_object_conf *subconf = &lti->lti_stripe_conf;
- struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
struct lu_fid *ofid = &lti->lti_fid;
struct lov_layout_raid0 *r0 = &state->raid0;
@@ -298,13 +301,11 @@ out:
return result;
}
-static int lov_init_released(const struct lu_env *env,
- struct lov_device *dev, struct lov_object *lov,
+static int lov_init_released(const struct lu_env *env, struct lov_device *dev,
+ struct lov_object *lov, struct lov_stripe_md *lsm,
const struct cl_object_conf *conf,
union lov_layout_state *state)
{
- struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
-
LASSERT(lsm);
LASSERT(lsm_is_released(lsm));
LASSERT(!lov->lo_lsm);
@@ -313,6 +314,40 @@ static int lov_init_released(const struct lu_env *env,
return 0;
}
+static struct cl_object *lov_find_subobj(const struct lu_env *env,
+ struct lov_object *lov,
+ struct lov_stripe_md *lsm,
+ int stripe_idx)
+{
+ struct lov_device *dev = lu2lov_dev(lov2lu(lov)->lo_dev);
+ struct lov_oinfo *oinfo = lsm->lsm_oinfo[stripe_idx];
+ struct lov_thread_info *lti = lov_env_info(env);
+ struct lu_fid *ofid = &lti->lti_fid;
+ struct cl_device *subdev;
+ struct cl_object *result;
+ int ost_idx;
+ int rc;
+
+ if (lov->lo_type != LLT_RAID0) {
+ result = NULL;
+ goto out;
+ }
+
+ ost_idx = oinfo->loi_ost_idx;
+ rc = ostid_to_fid(ofid, &oinfo->loi_oi, ost_idx);
+ if (rc) {
+ result = NULL;
+ goto out;
+ }
+
+ subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
+ result = lov_sub_find(env, subdev, ofid, NULL);
+out:
+ if (!result)
+ result = ERR_PTR(-EINVAL);
+ return result;
+}
+
static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
union lov_layout_state *state)
{
@@ -687,31 +722,24 @@ static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
}
static int lov_layout_change(const struct lu_env *unused,
- struct lov_object *lov,
+ struct lov_object *lov, struct lov_stripe_md *lsm,
const struct cl_object_conf *conf)
{
- int result;
- enum lov_layout_type llt = LLT_EMPTY;
+ enum lov_layout_type llt = lov_type(lsm);
union lov_layout_state *state = &lov->u;
const struct lov_layout_operations *old_ops;
const struct lov_layout_operations *new_ops;
-
- void *cookie;
struct lu_env *env;
int refcheck;
+ int rc;
LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch));
- if (conf->u.coc_md)
- llt = lov_type(conf->u.coc_md->lsm);
- LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
-
- cookie = cl_env_reenter();
env = cl_env_get(&refcheck);
- if (IS_ERR(env)) {
- cl_env_reexit(cookie);
+ if (IS_ERR(env))
return PTR_ERR(env);
- }
+
+ LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
CDEBUG(D_INODE, DFID" from %s to %s\n",
PFID(lu_object_fid(lov2lu(lov))),
@@ -720,38 +748,37 @@ static int lov_layout_change(const struct lu_env *unused,
old_ops = &lov_dispatch[lov->lo_type];
new_ops = &lov_dispatch[llt];
- result = cl_object_prune(env, &lov->lo_cl);
- if (result != 0)
+ rc = cl_object_prune(env, &lov->lo_cl);
+ if (rc)
+ goto out;
+
+ rc = old_ops->llo_delete(env, lov, &lov->u);
+ if (rc)
goto out;
- result = old_ops->llo_delete(env, lov, &lov->u);
- if (result == 0) {
- old_ops->llo_fini(env, lov, &lov->u);
+ old_ops->llo_fini(env, lov, &lov->u);
- LASSERT(atomic_read(&lov->lo_active_ios) == 0);
+ LASSERT(!atomic_read(&lov->lo_active_ios));
- lov->lo_type = LLT_EMPTY;
- /* page bufsize fixup */
- cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
+ lov->lo_type = LLT_EMPTY;
+
+ /* page bufsize fixup */
+ cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
lov_page_slice_fixup(lov, NULL);
- result = new_ops->llo_init(env,
- lu2lov_dev(lov->lo_cl.co_lu.lo_dev),
- lov, conf, state);
- if (result == 0) {
- new_ops->llo_install(env, lov, state);
- lov->lo_type = llt;
- } else {
- new_ops->llo_delete(env, lov, state);
- new_ops->llo_fini(env, lov, state);
- /* this file becomes an EMPTY file. */
- }
+ rc = new_ops->llo_init(env, lov_object_dev(lov), lov, lsm, conf, state);
+ if (rc) {
+ new_ops->llo_delete(env, lov, state);
+ new_ops->llo_fini(env, lov, state);
+ /* this file becomes an EMPTY file. */
+ goto out;
}
+ new_ops->llo_install(env, lov, state);
+ lov->lo_type = llt;
out:
cl_env_put(env, &refcheck);
- cl_env_reexit(cookie);
- return result;
+ return rc;
}
/*****************************************************************************
@@ -762,26 +789,38 @@ out:
int lov_object_init(const struct lu_env *env, struct lu_object *obj,
const struct lu_object_conf *conf)
{
- struct lov_device *dev = lu2lov_dev(obj->lo_dev);
struct lov_object *lov = lu2lov(obj);
+ struct lov_device *dev = lov_object_dev(lov);
const struct cl_object_conf *cconf = lu2cl_conf(conf);
union lov_layout_state *set = &lov->u;
const struct lov_layout_operations *ops;
- int result;
+ struct lov_stripe_md *lsm = NULL;
+ int rc;
init_rwsem(&lov->lo_type_guard);
atomic_set(&lov->lo_active_ios, 0);
init_waitqueue_head(&lov->lo_waitq);
-
cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
+ lov->lo_type = LLT_EMPTY;
+ if (cconf->u.coc_layout.lb_buf) {
+ lsm = lov_unpackmd(dev->ld_lov,
+ cconf->u.coc_layout.lb_buf,
+ cconf->u.coc_layout.lb_len);
+ if (IS_ERR(lsm))
+ return PTR_ERR(lsm);
+ }
+
/* no locking is necessary, as object is being created */
- lov->lo_type = lov_type(cconf->u.coc_md->lsm);
+ lov->lo_type = lov_type(lsm);
ops = &lov_dispatch[lov->lo_type];
- result = ops->llo_init(env, dev, lov, cconf, set);
- if (result == 0)
+ rc = ops->llo_init(env, dev, lov, lsm, cconf, set);
+ if (!rc)
ops->llo_install(env, lov, set);
- return result;
+
+ lov_lsm_put(lsm);
+
+ return rc;
}
static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
@@ -791,6 +830,15 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
struct lov_object *lov = cl2lov(obj);
int result = 0;
+ if (conf->coc_opc == OBJECT_CONF_SET &&
+ conf->u.coc_layout.lb_buf) {
+ lsm = lov_unpackmd(lov_object_dev(lov)->ld_lov,
+ conf->u.coc_layout.lb_buf,
+ conf->u.coc_layout.lb_len);
+ if (IS_ERR(lsm))
+ return PTR_ERR(lsm);
+ }
+
lov_conf_lock(lov);
if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
lov->lo_layout_invalid = true;
@@ -810,8 +858,6 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
LASSERT(conf->coc_opc == OBJECT_CONF_SET);
- if (conf->u.coc_md)
- lsm = conf->u.coc_md->lsm;
if ((!lsm && !lov->lo_lsm) ||
((lsm && lov->lo_lsm) &&
(lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
@@ -829,11 +875,12 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
goto out;
}
- result = lov_layout_change(env, lov, conf);
+ result = lov_layout_change(env, lov, lsm, conf);
lov->lo_layout_invalid = result != 0;
out:
lov_conf_unlock(lov);
+ lov_lsm_put(lsm);
CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n",
PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
return result;
@@ -911,6 +958,473 @@ int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
io);
}
+/**
+ * We calculate on which OST the mapping will end. If the length of mapping
+ * is greater than (stripe_size * stripe_count) then the last_stripe will
+ * will be one just before start_stripe. Else we check if the mapping
+ * intersects each OST and find last_stripe.
+ * This function returns the last_stripe and also sets the stripe_count
+ * over which the mapping is spread
+ *
+ * \param lsm [in] striping information for the file
+ * \param fm_start [in] logical start of mapping
+ * \param fm_end [in] logical end of mapping
+ * \param start_stripe [in] starting stripe of the mapping
+ * \param stripe_count [out] the number of stripes across which to map is
+ * returned
+ *
+ * \retval last_stripe return the last stripe of the mapping
+ */
+static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm,
+ loff_t fm_start, loff_t fm_end,
+ int start_stripe, int *stripe_count)
+{
+ int last_stripe;
+ loff_t obd_start;
+ loff_t obd_end;
+ int i, j;
+
+ if (fm_end - fm_start > lsm->lsm_stripe_size * lsm->lsm_stripe_count) {
+ last_stripe = (start_stripe < 1 ? lsm->lsm_stripe_count - 1 :
+ start_stripe - 1);
+ *stripe_count = lsm->lsm_stripe_count;
+ } else {
+ for (j = 0, i = start_stripe; j < lsm->lsm_stripe_count;
+ i = (i + 1) % lsm->lsm_stripe_count, j++) {
+ if (!(lov_stripe_intersects(lsm, i, fm_start, fm_end,
+ &obd_start, &obd_end)))
+ break;
+ }
+ *stripe_count = j;
+ last_stripe = (start_stripe + j - 1) % lsm->lsm_stripe_count;
+ }
+
+ return last_stripe;
+}
+
+/**
+ * Set fe_device and copy extents from local buffer into main return buffer.
+ *
+ * \param fiemap [out] fiemap to hold all extents
+ * \param lcl_fm_ext [in] array of fiemap extents get from OSC layer
+ * \param ost_index [in] OST index to be written into the fm_device
+ * field for each extent
+ * \param ext_count [in] number of extents to be copied
+ * \param current_extent [in] where to start copying in the extent array
+ */
+static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap,
+ struct fiemap_extent *lcl_fm_ext,
+ int ost_index, unsigned int ext_count,
+ int current_extent)
+{
+ unsigned int ext;
+ char *to;
+
+ for (ext = 0; ext < ext_count; ext++) {
+ lcl_fm_ext[ext].fe_device = ost_index;
+ lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
+ }
+
+ /* Copy fm_extent's from fm_local to return buffer */
+ to = (char *)fiemap + fiemap_count_to_size(current_extent);
+ memcpy(to, lcl_fm_ext, ext_count * sizeof(struct fiemap_extent));
+}
+
+#define FIEMAP_BUFFER_SIZE 4096
+
+/**
+ * Non-zero fe_logical indicates that this is a continuation FIEMAP
+ * call. The local end offset and the device are sent in the first
+ * fm_extent. This function calculates the stripe number from the index.
+ * This function returns a stripe_no on which mapping is to be restarted.
+ *
+ * This function returns fm_end_offset which is the in-OST offset at which
+ * mapping should be restarted. If fm_end_offset=0 is returned then caller
+ * will re-calculate proper offset in next stripe.
+ * Note that the first extent is passed to lov_get_info via the value field.
+ *
+ * \param fiemap [in] fiemap request header
+ * \param lsm [in] striping information for the file
+ * \param fm_start [in] logical start of mapping
+ * \param fm_end [in] logical end of mapping
+ * \param start_stripe [out] starting stripe will be returned in this
+ */
+static loff_t fiemap_calc_fm_end_offset(struct fiemap *fiemap,
+ struct lov_stripe_md *lsm,
+ loff_t fm_start, loff_t fm_end,
+ int *start_stripe)
+{
+ loff_t local_end = fiemap->fm_extents[0].fe_logical;
+ loff_t lun_start, lun_end;
+ loff_t fm_end_offset;
+ int stripe_no = -1;
+ int i;
+
+ if (!fiemap->fm_extent_count || !fiemap->fm_extents[0].fe_logical)
+ return 0;
+
+ /* Find out stripe_no from ost_index saved in the fe_device */
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
+
+ if (lov_oinfo_is_dummy(oinfo))
+ continue;
+
+ if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) {
+ stripe_no = i;
+ break;
+ }
+ }
+
+ if (stripe_no == -1)
+ return -EINVAL;
+
+ /*
+ * If we have finished mapping on previous device, shift logical
+ * offset to start of next device
+ */
+ if (lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end,
+ &lun_start, &lun_end) &&
+ local_end < lun_end) {
+ fm_end_offset = local_end;
+ *start_stripe = stripe_no;
+ } else {
+ /* This is a special value to indicate that caller should
+ * calculate offset in next stripe.
+ */
+ fm_end_offset = 0;
+ *start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count;
+ }
+
+ return fm_end_offset;
+}
+
+/**
+ * Break down the FIEMAP request and send appropriate calls to individual OSTs.
+ * This also handles the restarting of FIEMAP calls in case mapping overflows
+ * the available number of extents in single call.
+ *
+ * \param env [in] lustre environment
+ * \param obj [in] file object
+ * \param fmkey [in] fiemap request header and other info
+ * \param fiemap [out] fiemap buffer holding retrived map extents
+ * \param buflen [in/out] max buffer length of @fiemap, when iterate
+ * each OST, it is used to limit max map needed
+ * \retval 0 success
+ * \retval < 0 error
+ */
+static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj,
+ struct ll_fiemap_info_key *fmkey,
+ struct fiemap *fiemap, size_t *buflen)
+{
+ struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov;
+ unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
+ struct fiemap_extent *lcl_fm_ext;
+ struct cl_object *subobj = NULL;
+ struct fiemap *fm_local = NULL;
+ struct lov_stripe_md *lsm;
+ loff_t fm_start;
+ loff_t fm_end;
+ loff_t fm_length;
+ loff_t fm_end_offset;
+ int count_local;
+ int ost_index = 0;
+ int start_stripe;
+ int current_extent = 0;
+ int rc = 0;
+ int last_stripe;
+ int cur_stripe = 0;
+ int cur_stripe_wrap = 0;
+ int stripe_count;
+ /* Whether have we collected enough extents */
+ bool enough = false;
+ /* EOF for object */
+ bool ost_eof = false;
+ /* done with required mapping for this OST? */
+ bool ost_done = false;
+
+ lsm = lov_lsm_addref(cl2lov(obj));
+ if (!lsm)
+ return -ENODATA;
+
+ /**
+ * If the stripe_count > 1 and the application does not understand
+ * DEVICE_ORDER flag, it cannot interpret the extents correctly.
+ */
+ if (lsm->lsm_stripe_count > 1 &&
+ !(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) {
+ rc = -ENOTSUPP;
+ goto out;
+ }
+
+ if (lsm_is_released(lsm)) {
+ if (fiemap->fm_start < fmkey->lfik_oa.o_size) {
+ /**
+ * released file, return a minimal FIEMAP if
+ * request fits in file-size.
+ */
+ fiemap->fm_mapped_extents = 1;
+ fiemap->fm_extents[0].fe_logical = fiemap->fm_start;
+ if (fiemap->fm_start + fiemap->fm_length <
+ fmkey->lfik_oa.o_size)
+ fiemap->fm_extents[0].fe_length =
+ fiemap->fm_length;
+ else
+ fiemap->fm_extents[0].fe_length =
+ fmkey->lfik_oa.o_size -
+ fiemap->fm_start;
+ fiemap->fm_extents[0].fe_flags |=
+ FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST;
+ }
+ rc = 0;
+ goto out;
+ }
+
+ if (fiemap_count_to_size(fiemap->fm_extent_count) < buffer_size)
+ buffer_size = fiemap_count_to_size(fiemap->fm_extent_count);
+
+ fm_local = libcfs_kvzalloc(buffer_size, GFP_NOFS);
+ if (!fm_local) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ lcl_fm_ext = &fm_local->fm_extents[0];
+ count_local = fiemap_size_to_count(buffer_size);
+
+ fm_start = fiemap->fm_start;
+ fm_length = fiemap->fm_length;
+ /* Calculate start stripe, last stripe and length of mapping */
+ start_stripe = lov_stripe_number(lsm, fm_start);
+ fm_end = (fm_length == ~0ULL) ? fmkey->lfik_oa.o_size :
+ fm_start + fm_length - 1;
+ /* If fm_length != ~0ULL but fm_start_fm_length-1 exceeds file size */
+ if (fm_end > fmkey->lfik_oa.o_size)
+ fm_end = fmkey->lfik_oa.o_size;
+
+ last_stripe = fiemap_calc_last_stripe(lsm, fm_start, fm_end,
+ start_stripe, &stripe_count);
+ fm_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fm_start, fm_end,
+ &start_stripe);
+ if (fm_end_offset == -EINVAL) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /**
+ * Requested extent count exceeds the fiemap buffer size, shrink our
+ * ambition.
+ */
+ if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen)
+ fiemap->fm_extent_count = fiemap_size_to_count(*buflen);
+ if (!fiemap->fm_extent_count)
+ count_local = 0;
+
+ /* Check each stripe */
+ for (cur_stripe = start_stripe; stripe_count > 0;
+ --stripe_count,
+ cur_stripe = (cur_stripe + 1) % lsm->lsm_stripe_count) {
+ loff_t req_fm_len; /* Stores length of required mapping */
+ loff_t len_mapped_single_call;
+ loff_t lun_start;
+ loff_t lun_end;
+ loff_t obd_object_end;
+ unsigned int ext_count;
+
+ cur_stripe_wrap = cur_stripe;
+
+ /* Find out range of mapping on this stripe */
+ if (!(lov_stripe_intersects(lsm, cur_stripe, fm_start, fm_end,
+ &lun_start, &obd_object_end)))
+ continue;
+
+ if (lov_oinfo_is_dummy(lsm->lsm_oinfo[cur_stripe])) {
+ rc = -EIO;
+ goto out;
+ }
+
+ /*
+ * If this is a continuation FIEMAP call and we are on
+ * starting stripe then lun_start needs to be set to
+ * fm_end_offset
+ */
+ if (fm_end_offset && cur_stripe == start_stripe)
+ lun_start = fm_end_offset;
+
+ if (fm_length != ~0ULL) {
+ /* Handle fm_start + fm_length overflow */
+ if (fm_start + fm_length < fm_start)
+ fm_length = ~0ULL - fm_start;
+ lun_end = lov_size_to_stripe(lsm, fm_start + fm_length,
+ cur_stripe);
+ } else {
+ lun_end = ~0ULL;
+ }
+
+ if (lun_start == lun_end)
+ continue;
+
+ req_fm_len = obd_object_end - lun_start;
+ fm_local->fm_length = 0;
+ len_mapped_single_call = 0;
+
+ /* find lobsub object */
+ subobj = lov_find_subobj(env, cl2lov(obj), lsm,
+ cur_stripe);
+ if (IS_ERR(subobj)) {
+ rc = PTR_ERR(subobj);
+ goto out;
+ }
+ /*
+ * If the output buffer is very large and the objects have many
+ * extents we may need to loop on a single OST repeatedly
+ */
+ ost_eof = false;
+ ost_done = false;
+ do {
+ if (fiemap->fm_extent_count > 0) {
+ /* Don't get too many extents. */
+ if (current_extent + count_local >
+ fiemap->fm_extent_count)
+ count_local = fiemap->fm_extent_count -
+ current_extent;
+ }
+
+ lun_start += len_mapped_single_call;
+ fm_local->fm_length = req_fm_len -
+ len_mapped_single_call;
+ req_fm_len = fm_local->fm_length;
+ fm_local->fm_extent_count = enough ? 1 : count_local;
+ fm_local->fm_mapped_extents = 0;
+ fm_local->fm_flags = fiemap->fm_flags;
+
+ ost_index = lsm->lsm_oinfo[cur_stripe]->loi_ost_idx;
+
+ if (ost_index < 0 ||
+ ost_index >= lov->desc.ld_tgt_count) {
+ rc = -EINVAL;
+ goto obj_put;
+ }
+ /*
+ * If OST is inactive, return extent with UNKNOWN
+ * flag.
+ */
+ if (!lov->lov_tgts[ost_index]->ltd_active) {
+ fm_local->fm_flags |= FIEMAP_EXTENT_LAST;
+ fm_local->fm_mapped_extents = 1;
+
+ lcl_fm_ext[0].fe_logical = lun_start;
+ lcl_fm_ext[0].fe_length = obd_object_end -
+ lun_start;
+ lcl_fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
+
+ goto inactive_tgt;
+ }
+
+ fm_local->fm_start = lun_start;
+ fm_local->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
+ memcpy(&fmkey->lfik_fiemap, fm_local, sizeof(*fm_local));
+ *buflen = fiemap_count_to_size(fm_local->fm_extent_count);
+
+ rc = cl_object_fiemap(env, subobj, fmkey, fm_local,
+ buflen);
+ if (rc)
+ goto obj_put;
+inactive_tgt:
+ ext_count = fm_local->fm_mapped_extents;
+ if (!ext_count) {
+ ost_done = true;
+ /*
+ * If last stripe has hold at the end,
+ * we need to return
+ */
+ if (cur_stripe_wrap == last_stripe) {
+ fiemap->fm_mapped_extents = 0;
+ goto finish;
+ }
+ break;
+ } else if (enough) {
+ /*
+ * We've collected enough extents and there are
+ * more extents after it.
+ */
+ goto finish;
+ }
+
+ /* If we just need num of extents, got to next device */
+ if (!fiemap->fm_extent_count) {
+ current_extent += ext_count;
+ break;
+ }
+
+ /* prepare to copy retrived map extents */
+ len_mapped_single_call =
+ lcl_fm_ext[ext_count - 1].fe_logical -
+ lun_start + lcl_fm_ext[ext_count - 1].fe_length;
+
+ /* Have we finished mapping on this device? */
+ if (req_fm_len <= len_mapped_single_call)
+ ost_done = true;
+
+ /*
+ * Clear the EXTENT_LAST flag which can be present on
+ * the last extent
+ */
+ if (lcl_fm_ext[ext_count - 1].fe_flags &
+ FIEMAP_EXTENT_LAST)
+ lcl_fm_ext[ext_count - 1].fe_flags &=
+ ~FIEMAP_EXTENT_LAST;
+
+ if (lov_stripe_size(lsm,
+ lcl_fm_ext[ext_count - 1].fe_logical +
+ lcl_fm_ext[ext_count - 1].fe_length,
+ cur_stripe) >= fmkey->lfik_oa.o_size)
+ ost_eof = true;
+
+ fiemap_prepare_and_copy_exts(fiemap, lcl_fm_ext,
+ ost_index, ext_count,
+ current_extent);
+ current_extent += ext_count;
+
+ /* Ran out of available extents? */
+ if (current_extent >= fiemap->fm_extent_count)
+ enough = true;
+ } while (!ost_done && !ost_eof);
+
+ cl_object_put(env, subobj);
+ subobj = NULL;
+
+ if (cur_stripe_wrap == last_stripe)
+ goto finish;
+ } /* for each stripe */
+finish:
+ /*
+ * Indicate that we are returning device offsets unless file just has
+ * single stripe
+ */
+ if (lsm->lsm_stripe_count > 1)
+ fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
+
+ if (!fiemap->fm_extent_count)
+ goto skip_last_device_calc;
+
+ /*
+ * Check if we have reached the last stripe and whether mapping for that
+ * stripe is done.
+ */
+ if ((cur_stripe_wrap == last_stripe) && (ost_done || ost_eof))
+ fiemap->fm_extents[current_extent - 1].fe_flags |=
+ FIEMAP_EXTENT_LAST;
+skip_last_device_calc:
+ fiemap->fm_mapped_extents = current_extent;
+obj_put:
+ if (subobj)
+ cl_object_put(env, subobj);
+out:
+ kvfree(fm_local);
+ lov_lsm_put(lsm);
+ return rc;
+}
+
static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
struct lov_user_md __user *lum)
{
@@ -923,10 +1437,53 @@ static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
return -ENODATA;
rc = lov_getstripe(cl2lov(obj), lsm, lum);
- lov_lsm_put(obj, lsm);
+ lov_lsm_put(lsm);
return rc;
}
+static int lov_object_layout_get(const struct lu_env *env,
+ struct cl_object *obj,
+ struct cl_layout *cl)
+{
+ struct lov_object *lov = cl2lov(obj);
+ struct lov_stripe_md *lsm = lov_lsm_addref(lov);
+ struct lu_buf *buf = &cl->cl_buf;
+ ssize_t rc;
+
+ if (!lsm) {
+ cl->cl_size = 0;
+ cl->cl_layout_gen = CL_LAYOUT_GEN_EMPTY;
+ cl->cl_is_released = false;
+
+ return 0;
+ }
+
+ cl->cl_size = lov_mds_md_size(lsm->lsm_stripe_count, lsm->lsm_magic);
+ cl->cl_layout_gen = lsm->lsm_layout_gen;
+ cl->cl_is_released = lsm_is_released(lsm);
+
+ rc = lov_lsm_pack(lsm, buf->lb_buf, buf->lb_len);
+ lov_lsm_put(lsm);
+
+ return rc < 0 ? rc : 0;
+}
+
+static loff_t lov_object_maxbytes(struct cl_object *obj)
+{
+ struct lov_object *lov = cl2lov(obj);
+ struct lov_stripe_md *lsm = lov_lsm_addref(lov);
+ loff_t maxbytes;
+
+ if (!lsm)
+ return LLONG_MAX;
+
+ maxbytes = lsm->lsm_maxbytes;
+
+ lov_lsm_put(lsm);
+
+ return maxbytes;
+}
+
static const struct cl_object_operations lov_ops = {
.coo_page_init = lov_page_init,
.coo_lock_init = lov_lock_init,
@@ -934,7 +1491,10 @@ static const struct cl_object_operations lov_ops = {
.coo_attr_get = lov_attr_get,
.coo_attr_update = lov_attr_update,
.coo_conf_set = lov_conf_set,
- .coo_getstripe = lov_object_getstripe
+ .coo_getstripe = lov_object_getstripe,
+ .coo_layout_get = lov_object_layout_get,
+ .coo_maxbytes = lov_object_maxbytes,
+ .coo_fiemap = lov_object_fiemap,
};
static const struct lu_object_operations lov_lu_obj_ops = {
@@ -986,22 +1546,6 @@ struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
return lsm;
}
-struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj)
-{
- struct lu_object *luobj;
- struct lov_stripe_md *lsm = NULL;
-
- if (!clobj)
- return NULL;
-
- luobj = lu_object_locate(&cl_object_header(clobj)->coh_lu,
- &lov_device_type);
- if (luobj)
- lsm = lov_lsm_addref(lu2lov(luobj));
- return lsm;
-}
-EXPORT_SYMBOL(lov_lsm_get);
-
int lov_read_and_clear_async_rc(struct cl_object *clob)
{
struct lu_object *luobj;
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index be6e9857ce2a..6c93d180aef7 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -38,14 +38,17 @@
#define DEBUG_SUBSYSTEM S_LOV
+#include "../include/lustre/lustre_idl.h"
+#include "../include/lustre/lustre_user.h"
+
#include "../include/lustre_net.h"
+#include "../include/lustre_swab.h"
#include "../include/obd.h"
#include "../include/obd_class.h"
#include "../include/obd_support.h"
-#include "../include/lustre/lustre_user.h"
-#include "lov_internal.h"
#include "lov_cl_internal.h"
+#include "lov_internal.h"
void lov_dump_lmm_common(int level, void *lmmp)
{
@@ -97,120 +100,54 @@ void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm)
le16_to_cpu(lmm->lmm_stripe_count));
}
-/* Pack LOV object metadata for disk storage. It is packed in LE byte
- * order and is opaque to the networking layer.
+/**
+ * Pack LOV striping metadata for disk storage format (in little
+ * endian byte order).
*
- * XXX In the future, this will be enhanced to get the EA size from the
- * underlying OSC device(s) to get their EA sizes so we can stack
- * LOVs properly. For now lov_mds_md_size() just assumes one u64
- * per stripe.
+ * This follows the getxattr() conventions. If \a buf_size is zero
+ * then return the size needed. If \a buf_size is too small then
+ * return -ERANGE. Otherwise return the size of the result.
*/
-int lov_obd_packmd(struct lov_obd *lov, struct lov_mds_md **lmmp,
- struct lov_stripe_md *lsm)
+ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf,
+ size_t buf_size)
{
- struct lov_mds_md_v1 *lmmv1;
- struct lov_mds_md_v3 *lmmv3;
- __u16 stripe_count;
struct lov_ost_data_v1 *lmm_objects;
- int lmm_size, lmm_magic;
- int i;
- int cplen = 0;
-
- if (lsm) {
- lmm_magic = lsm->lsm_magic;
- } else {
- if (lmmp && *lmmp)
- lmm_magic = le32_to_cpu((*lmmp)->lmm_magic);
- else
- /* lsm == NULL and lmmp == NULL */
- lmm_magic = LOV_MAGIC;
- }
-
- if ((lmm_magic != LOV_MAGIC_V1) &&
- (lmm_magic != LOV_MAGIC_V3)) {
- CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
- lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
- return -EINVAL;
- }
-
- if (lsm) {
- /* If we are just sizing the EA, limit the stripe count
- * to the actual number of OSTs in this filesystem.
- */
- if (!lmmp) {
- stripe_count = lov_get_stripecnt(lov, lmm_magic,
- lsm->lsm_stripe_count);
- lsm->lsm_stripe_count = stripe_count;
- } else if (!lsm_is_released(lsm)) {
- stripe_count = lsm->lsm_stripe_count;
- } else {
- stripe_count = 0;
- }
- } else {
- /*
- * To calculate maximum easize by active targets at present,
- * which is exactly the maximum easize to be seen by LOV
- */
- stripe_count = lov->desc.ld_active_tgt_count;
- }
+ struct lov_mds_md_v1 *lmmv1 = buf;
+ struct lov_mds_md_v3 *lmmv3 = buf;
+ size_t lmm_size;
+ unsigned int i;
- /* XXX LOV STACKING call into osc for sizes */
- lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
-
- if (!lmmp)
+ lmm_size = lov_mds_md_size(lsm->lsm_stripe_count, lsm->lsm_magic);
+ if (!buf_size)
return lmm_size;
- if (*lmmp && !lsm) {
- stripe_count = le16_to_cpu((*lmmp)->lmm_stripe_count);
- lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
- kvfree(*lmmp);
- *lmmp = NULL;
- return 0;
- }
-
- if (!*lmmp) {
- *lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
- if (!*lmmp)
- return -ENOMEM;
- }
-
- CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d\n",
- lmm_magic, lmm_size);
-
- lmmv1 = *lmmp;
- lmmv3 = (struct lov_mds_md_v3 *)*lmmp;
- if (lmm_magic == LOV_MAGIC_V3)
- lmmv3->lmm_magic = cpu_to_le32(LOV_MAGIC_V3);
- else
- lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
-
- if (!lsm)
- return lmm_size;
+ if (buf_size < lmm_size)
+ return -ERANGE;
- /* lmmv1 and lmmv3 point to the same struct and have the
+ /*
+ * lmmv1 and lmmv3 point to the same struct and have the
* same first fields
*/
+ lmmv1->lmm_magic = cpu_to_le32(lsm->lsm_magic);
lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
- lmmv1->lmm_stripe_count = cpu_to_le16(stripe_count);
+ lmmv1->lmm_stripe_count = cpu_to_le16(lsm->lsm_stripe_count);
lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
+
if (lsm->lsm_magic == LOV_MAGIC_V3) {
- cplen = strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
- sizeof(lmmv3->lmm_pool_name));
- if (cplen >= sizeof(lmmv3->lmm_pool_name))
- return -E2BIG;
+ CLASSERT(sizeof(lsm->lsm_pool_name) ==
+ sizeof(lmmv3->lmm_pool_name));
+ strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
+ sizeof(lmmv3->lmm_pool_name));
lmm_objects = lmmv3->lmm_objects;
} else {
lmm_objects = lmmv1->lmm_objects;
}
- for (i = 0; i < stripe_count; i++) {
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
struct lov_oinfo *loi = lsm->lsm_oinfo[i];
- /* XXX LOV STACKING call down to osc_packmd() to do packing */
- LASSERTF(ostid_id(&loi->loi_oi) != 0, "lmm_oi "DOSTID
- " stripe %u/%u idx %u\n", POSTID(&lmmv1->lmm_oi),
- i, stripe_count, loi->loi_ost_idx);
+
ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
@@ -219,15 +156,6 @@ int lov_obd_packmd(struct lov_obd *lov, struct lov_mds_md **lmmp,
return lmm_size;
}
-int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
- struct lov_stripe_md *lsm)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lov_obd *lov = &obd->u.lov;
-
- return lov_obd_packmd(lov, lmmp, lsm);
-}
-
/* Find the max stripecount we should use */
__u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
{
@@ -270,34 +198,34 @@ static int lov_verify_lmm(void *lmm, int lmm_bytes, __u16 *stripe_count)
return rc;
}
-int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
- int pattern, int magic)
+struct lov_stripe_md *lov_lsm_alloc(u16 stripe_count, u32 pattern, u32 magic)
{
- int i, lsm_size;
+ struct lov_stripe_md *lsm;
+ unsigned int i;
- CDEBUG(D_INFO, "alloc lsm, stripe_count %d\n", stripe_count);
+ CDEBUG(D_INFO, "alloc lsm, stripe_count %u\n", stripe_count);
- *lsmp = lsm_alloc_plain(stripe_count, &lsm_size);
- if (!*lsmp) {
- CERROR("can't allocate lsmp stripe_count %d\n", stripe_count);
- return -ENOMEM;
+ lsm = lsm_alloc_plain(stripe_count);
+ if (!lsm) {
+ CERROR("cannot allocate LSM stripe_count %u\n", stripe_count);
+ return ERR_PTR(-ENOMEM);
}
- atomic_set(&(*lsmp)->lsm_refc, 1);
- spin_lock_init(&(*lsmp)->lsm_lock);
- (*lsmp)->lsm_magic = magic;
- (*lsmp)->lsm_stripe_count = stripe_count;
- (*lsmp)->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES * stripe_count;
- (*lsmp)->lsm_pattern = pattern;
- (*lsmp)->lsm_pool_name[0] = '\0';
- (*lsmp)->lsm_layout_gen = 0;
+ atomic_set(&lsm->lsm_refc, 1);
+ spin_lock_init(&lsm->lsm_lock);
+ lsm->lsm_magic = magic;
+ lsm->lsm_stripe_count = stripe_count;
+ lsm->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES * stripe_count;
+ lsm->lsm_pattern = pattern;
+ lsm->lsm_pool_name[0] = '\0';
+ lsm->lsm_layout_gen = 0;
if (stripe_count > 0)
- (*lsmp)->lsm_oinfo[0]->loi_ost_idx = ~0;
+ lsm->lsm_oinfo[0]->loi_ost_idx = ~0;
for (i = 0; i < stripe_count; i++)
- loi_init((*lsmp)->lsm_oinfo[i]);
+ loi_init(lsm->lsm_oinfo[i]);
- return lsm_size;
+ return lsm;
}
int lov_free_memmd(struct lov_stripe_md **lsmp)
@@ -317,56 +245,34 @@ int lov_free_memmd(struct lov_stripe_md **lsmp)
/* Unpack LOV object metadata from disk storage. It is packed in LE byte
* order and is opaque to the networking layer.
*/
-int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
- struct lov_mds_md *lmm, int lmm_bytes)
+struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, struct lov_mds_md *lmm,
+ size_t lmm_size)
{
- struct obd_device *obd = class_exp2obd(exp);
- struct lov_obd *lov = &obd->u.lov;
- int rc = 0, lsm_size;
- __u16 stripe_count;
- __u32 magic;
- __u32 pattern;
-
- /* If passed an MDS struct use values from there, otherwise defaults */
- if (lmm) {
- rc = lov_verify_lmm(lmm, lmm_bytes, &stripe_count);
- if (rc)
- return rc;
- magic = le32_to_cpu(lmm->lmm_magic);
- pattern = le32_to_cpu(lmm->lmm_pattern);
- } else {
- magic = LOV_MAGIC;
- stripe_count = lov_get_stripecnt(lov, magic, 0);
- pattern = LOV_PATTERN_RAID0;
- }
+ struct lov_stripe_md *lsm;
+ u16 stripe_count;
+ u32 pattern;
+ u32 magic;
+ int rc;
- /* If we aren't passed an lsmp struct, we just want the size */
- if (!lsmp) {
- /* XXX LOV STACKING call into osc for sizes */
- LBUG();
- return lov_stripe_md_size(stripe_count);
- }
- /* If we are passed an allocated struct but nothing to unpack, free */
- if (*lsmp && !lmm) {
- lov_free_memmd(lsmp);
- return 0;
- }
+ rc = lov_verify_lmm(lmm, lmm_size, &stripe_count);
+ if (rc)
+ return ERR_PTR(rc);
- lsm_size = lov_alloc_memmd(lsmp, stripe_count, pattern, magic);
- if (lsm_size < 0)
- return lsm_size;
+ magic = le32_to_cpu(lmm->lmm_magic);
+ pattern = le32_to_cpu(lmm->lmm_pattern);
- /* If we are passed a pointer but nothing to unpack, we only alloc */
- if (!lmm)
- return lsm_size;
+ lsm = lov_lsm_alloc(stripe_count, pattern, magic);
+ if (IS_ERR(lsm))
+ return lsm;
- rc = lsm_op_find(magic)->lsm_unpackmd(lov, *lsmp, lmm);
+ LASSERT(lsm_op_find(magic));
+ rc = lsm_op_find(magic)->lsm_unpackmd(lov, lsm, lmm);
if (rc) {
- lov_free_memmd(lsmp);
- return rc;
+ lov_free_memmd(&lsm);
+ return ERR_PTR(rc);
}
- return lsm_size;
+ return lsm;
}
/* Retrieve object striping information.
@@ -378,15 +284,14 @@ int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
struct lov_user_md __user *lump)
{
- /*
- * XXX huge struct allocated on stack.
- */
/* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
- struct lov_obd *lov;
struct lov_user_md_v3 lum;
- struct lov_mds_md *lmmk = NULL;
- int rc, lmmk_size, lmm_size;
- int lum_size;
+ struct lov_mds_md *lmmk;
+ u32 stripe_count;
+ ssize_t lmm_size;
+ size_t lmmk_size;
+ size_t lum_size;
+ int rc;
mm_segment_t seg;
if (!lsm)
@@ -399,6 +304,18 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
seg = get_fs();
set_fs(KERNEL_DS);
+ if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
+ CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
+ lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
+ rc = -EIO;
+ goto out;
+ }
+
+ if (!lsm_is_released(lsm))
+ stripe_count = lsm->lsm_stripe_count;
+ else
+ stripe_count = 0;
+
/* we only need the header part from user space to get lmm_magic and
* lmm_stripe_count, (the header part is common to v1 and v3)
*/
@@ -417,32 +334,40 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
if (lum.lmm_stripe_count &&
(lum.lmm_stripe_count < lsm->lsm_stripe_count)) {
/* Return right size of stripe to user */
- lum.lmm_stripe_count = lsm->lsm_stripe_count;
+ lum.lmm_stripe_count = stripe_count;
rc = copy_to_user(lump, &lum, lum_size);
rc = -EOVERFLOW;
goto out;
}
- lov = lu2lov_dev(obj->lo_cl.co_lu.lo_dev)->ld_lov;
- rc = lov_obd_packmd(lov, &lmmk, lsm);
- if (rc < 0)
+ lmmk_size = lov_mds_md_size(stripe_count, lsm->lsm_magic);
+
+
+ lmmk = libcfs_kvzalloc(lmmk_size, GFP_NOFS);
+ if (!lmmk) {
+ rc = -ENOMEM;
goto out;
- lmmk_size = rc;
- lmm_size = rc;
- rc = 0;
+ }
+
+ lmm_size = lov_lsm_pack(lsm, lmmk, lmmk_size);
+ if (lmm_size < 0) {
+ rc = lmm_size;
+ goto out_free;
+ }
/* FIXME: Bug 1185 - copy fields properly when structs change */
/* struct lov_user_md_v3 and struct lov_mds_md_v3 must be the same */
CLASSERT(sizeof(lum) == sizeof(struct lov_mds_md_v3));
CLASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lmmk->lmm_objects[0]));
- if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) &&
- ((lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) ||
- (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)))) {
+ if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC &&
+ (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
+ lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3))) {
lustre_swab_lov_mds_md(lmmk);
lustre_swab_lov_user_md_objects(
(struct lov_user_ost_data *)lmmk->lmm_objects,
lmmk->lmm_stripe_count);
}
+
if (lum.lmm_magic == LOV_USER_MAGIC) {
/* User request for v1, we need skip lmm_pool_name */
if (lmmk->lmm_magic == LOV_MAGIC_V3) {
@@ -474,9 +399,11 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
((struct lov_user_md *)lmmk)->lmm_stripe_count = lum.lmm_stripe_count;
if (copy_to_user(lump, lmmk, lmm_size))
rc = -EFAULT;
+ else
+ rc = 0;
out_free:
- kfree(lmmk);
+ kvfree(lmmk);
out:
set_fs(seg);
return rc;
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
index 00bfabad78eb..62ceb6dfdfdf 100644
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ b/drivers/staging/lustre/lustre/lov/lov_page.c
@@ -49,51 +49,6 @@
*
*/
-/**
- * Adjust the stripe index by layout of raid0. @max_index is the maximum
- * page index covered by an underlying DLM lock.
- * This function converts max_index from stripe level to file level, and make
- * sure it's not beyond one stripe.
- */
-static int lov_raid0_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused,
- pgoff_t *max_index)
-{
- struct lov_object *loo = cl2lov(slice->cpl_obj);
- struct lov_layout_raid0 *r0 = lov_r0(loo);
- pgoff_t index = *max_index;
- unsigned int pps; /* pages per stripe */
-
- CDEBUG(D_READA, DFID "*max_index = %lu, nr = %d\n",
- PFID(lu_object_fid(lov2lu(loo))), index, r0->lo_nr);
-
- if (index == 0) /* the page is not covered by any lock */
- return 0;
-
- if (r0->lo_nr == 1) /* single stripe file */
- return 0;
-
- /* max_index is stripe level, convert it into file level */
- if (index != CL_PAGE_EOF) {
- int stripeno = lov_page_stripe(slice->cpl_page);
- *max_index = lov_stripe_pgoff(loo->lo_lsm, index, stripeno);
- }
-
- /* calculate the end of current stripe */
- pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT;
- index = slice->cpl_index + pps - slice->cpl_index % pps - 1;
-
- CDEBUG(D_READA, DFID "*max_index = %lu, index = %lu, pps = %u, stripe_size = %u, stripe no = %u, page index = %lu\n",
- PFID(lu_object_fid(lov2lu(loo))), *max_index, index, pps,
- loo->lo_lsm->lsm_stripe_size, lov_page_stripe(slice->cpl_page),
- slice->cpl_index);
-
- /* never exceed the end of the stripe */
- *max_index = min_t(pgoff_t, *max_index, index);
- return 0;
-}
-
static int lov_raid0_page_print(const struct lu_env *env,
const struct cl_page_slice *slice,
void *cookie, lu_printer_t printer)
@@ -104,7 +59,6 @@ static int lov_raid0_page_print(const struct lu_env *env,
}
static const struct cl_page_operations lov_raid0_page_ops = {
- .cpo_is_under_lock = lov_raid0_page_is_under_lock,
.cpo_print = lov_raid0_page_print
};
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index f8c8a361ef79..7daa8671fdc3 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -81,7 +81,8 @@ static void lov_pool_putref_locked(struct pool_desc *pool)
* Chapter 6.4.
* Addison Wesley, 1973
*/
-static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key, unsigned mask)
+static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key,
+ unsigned int mask)
{
int i;
__u32 result;
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index 09dcaf484c89..d43cc88ae641 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -44,7 +44,6 @@ static void lov_init_set(struct lov_request_set *set)
atomic_set(&set->set_completes, 0);
atomic_set(&set->set_success, 0);
atomic_set(&set->set_finish_checked, 0);
- set->set_cookies = NULL;
INIT_LIST_HEAD(&set->set_list);
atomic_set(&set->set_refcount, 1);
init_waitqueue_head(&set->set_waitq);
@@ -61,8 +60,6 @@ void lov_finish_set(struct lov_request_set *set)
rq_link);
list_del_init(&req->rq_link);
- if (req->rq_oi.oi_oa)
- kmem_cache_free(obdo_cachep, req->rq_oi.oi_oa);
kfree(req->rq_oi.oi_osfs);
kfree(req);
}
@@ -97,22 +94,6 @@ static void lov_update_set(struct lov_request_set *set,
wake_up(&set->set_waitq);
}
-int lov_update_common_set(struct lov_request_set *set,
- struct lov_request *req, int rc)
-{
- struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
-
- lov_update_set(set, req, rc);
-
- /* grace error on inactive ost */
- if (rc && !(lov->lov_tgts[req->rq_idx] &&
- lov->lov_tgts[req->rq_idx]->ltd_active))
- rc = 0;
-
- /* FIXME in raid1 regime, should return 0 */
- return rc;
-}
-
static void lov_set_add_req(struct lov_request *req,
struct lov_request_set *set)
{
@@ -183,279 +164,6 @@ out:
return rc;
}
-static int common_attr_done(struct lov_request_set *set)
-{
- struct lov_request *req;
- struct obdo *tmp_oa;
- int rc = 0, attrset = 0;
-
- if (!set->set_oi->oi_oa)
- return 0;
-
- if (!atomic_read(&set->set_success))
- return -EIO;
-
- tmp_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
- if (!tmp_oa) {
- rc = -ENOMEM;
- goto out;
- }
-
- list_for_each_entry(req, &set->set_list, rq_link) {
- if (!req->rq_complete || req->rq_rc)
- continue;
- if (req->rq_oi.oi_oa->o_valid == 0) /* inactive stripe */
- continue;
- lov_merge_attrs(tmp_oa, req->rq_oi.oi_oa,
- req->rq_oi.oi_oa->o_valid,
- set->set_oi->oi_md, req->rq_stripe, &attrset);
- }
- if (!attrset) {
- CERROR("No stripes had valid attrs\n");
- rc = -EIO;
- }
- if ((set->set_oi->oi_oa->o_valid & OBD_MD_FLEPOCH) &&
- (set->set_oi->oi_md->lsm_stripe_count != attrset)) {
- /* When we take attributes of some epoch, we require all the
- * ost to be active.
- */
- CERROR("Not all the stripes had valid attrs\n");
- rc = -EIO;
- goto out;
- }
-
- tmp_oa->o_oi = set->set_oi->oi_oa->o_oi;
- memcpy(set->set_oi->oi_oa, tmp_oa, sizeof(*set->set_oi->oi_oa));
-out:
- if (tmp_oa)
- kmem_cache_free(obdo_cachep, tmp_oa);
- return rc;
-}
-
-int lov_fini_getattr_set(struct lov_request_set *set)
-{
- int rc = 0;
-
- if (!set)
- return 0;
- LASSERT(set->set_exp);
- if (atomic_read(&set->set_completes))
- rc = common_attr_done(set);
-
- lov_put_reqset(set);
-
- return rc;
-}
-
-/* The callback for osc_getattr_async that finalizes a request info when a
- * response is received.
- */
-static int cb_getattr_update(void *cookie, int rc)
-{
- struct obd_info *oinfo = cookie;
- struct lov_request *lovreq;
-
- lovreq = container_of(oinfo, struct lov_request, rq_oi);
- return lov_update_common_set(lovreq->rq_rqset, lovreq, rc);
-}
-
-int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo,
- struct lov_request_set **reqset)
-{
- struct lov_request_set *set;
- struct lov_obd *lov = &exp->exp_obd->u.lov;
- int rc = 0, i;
-
- set = kzalloc(sizeof(*set), GFP_NOFS);
- if (!set)
- return -ENOMEM;
- lov_init_set(set);
-
- set->set_exp = exp;
- set->set_oi = oinfo;
-
- for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) {
- struct lov_oinfo *loi;
- struct lov_request *req;
-
- loi = oinfo->oi_md->lsm_oinfo[i];
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
- CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
- if (oinfo->oi_oa->o_valid & OBD_MD_FLEPOCH) {
- /* SOM requires all the OSTs to be active. */
- rc = -EIO;
- goto out_set;
- }
- continue;
- }
-
- req = kzalloc(sizeof(*req), GFP_NOFS);
- if (!req) {
- rc = -ENOMEM;
- goto out_set;
- }
-
- req->rq_stripe = i;
- req->rq_idx = loi->loi_ost_idx;
-
- req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
- if (!req->rq_oi.oi_oa) {
- kfree(req);
- rc = -ENOMEM;
- goto out_set;
- }
- memcpy(req->rq_oi.oi_oa, oinfo->oi_oa,
- sizeof(*req->rq_oi.oi_oa));
- req->rq_oi.oi_oa->o_oi = loi->loi_oi;
- req->rq_oi.oi_cb_up = cb_getattr_update;
-
- lov_set_add_req(req, set);
- }
- if (!set->set_count) {
- rc = -EIO;
- goto out_set;
- }
- *reqset = set;
- return rc;
-out_set:
- lov_fini_getattr_set(set);
- return rc;
-}
-
-int lov_fini_setattr_set(struct lov_request_set *set)
-{
- int rc = 0;
-
- if (!set)
- return 0;
- LASSERT(set->set_exp);
- if (atomic_read(&set->set_completes)) {
- rc = common_attr_done(set);
- /* FIXME update qos data here */
- }
-
- lov_put_reqset(set);
- return rc;
-}
-
-int lov_update_setattr_set(struct lov_request_set *set,
- struct lov_request *req, int rc)
-{
- struct lov_obd *lov = &req->rq_rqset->set_exp->exp_obd->u.lov;
- struct lov_stripe_md *lsm = req->rq_rqset->set_oi->oi_md;
-
- lov_update_set(set, req, rc);
-
- /* grace error on inactive ost */
- if (rc && !(lov->lov_tgts[req->rq_idx] &&
- lov->lov_tgts[req->rq_idx]->ltd_active))
- rc = 0;
-
- if (rc == 0) {
- if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLCTIME)
- lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_ctime =
- req->rq_oi.oi_oa->o_ctime;
- if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLMTIME)
- lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_mtime =
- req->rq_oi.oi_oa->o_mtime;
- if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLATIME)
- lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_atime =
- req->rq_oi.oi_oa->o_atime;
- }
-
- return rc;
-}
-
-/* The callback for osc_setattr_async that finalizes a request info when a
- * response is received.
- */
-static int cb_setattr_update(void *cookie, int rc)
-{
- struct obd_info *oinfo = cookie;
- struct lov_request *lovreq;
-
- lovreq = container_of(oinfo, struct lov_request, rq_oi);
- return lov_update_setattr_set(lovreq->rq_rqset, lovreq, rc);
-}
-
-int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct lov_request_set **reqset)
-{
- struct lov_request_set *set;
- struct lov_obd *lov = &exp->exp_obd->u.lov;
- int rc = 0, i;
-
- set = kzalloc(sizeof(*set), GFP_NOFS);
- if (!set)
- return -ENOMEM;
- lov_init_set(set);
-
- set->set_exp = exp;
- set->set_oi = oinfo;
- if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
- set->set_cookies = oti->oti_logcookies;
-
- for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) {
- struct lov_oinfo *loi = oinfo->oi_md->lsm_oinfo[i];
- struct lov_request *req;
-
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
- CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
- continue;
- }
-
- req = kzalloc(sizeof(*req), GFP_NOFS);
- if (!req) {
- rc = -ENOMEM;
- goto out_set;
- }
- req->rq_stripe = i;
- req->rq_idx = loi->loi_ost_idx;
-
- req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
- if (!req->rq_oi.oi_oa) {
- kfree(req);
- rc = -ENOMEM;
- goto out_set;
- }
- memcpy(req->rq_oi.oi_oa, oinfo->oi_oa,
- sizeof(*req->rq_oi.oi_oa));
- req->rq_oi.oi_oa->o_oi = loi->loi_oi;
- req->rq_oi.oi_oa->o_stripe_idx = i;
- req->rq_oi.oi_cb_up = cb_setattr_update;
-
- if (oinfo->oi_oa->o_valid & OBD_MD_FLSIZE) {
- int off = lov_stripe_offset(oinfo->oi_md,
- oinfo->oi_oa->o_size, i,
- &req->rq_oi.oi_oa->o_size);
-
- if (off < 0 && req->rq_oi.oi_oa->o_size)
- req->rq_oi.oi_oa->o_size--;
-
- CDEBUG(D_INODE, "stripe %d has size %llu/%llu\n",
- i, req->rq_oi.oi_oa->o_size,
- oinfo->oi_oa->o_size);
- }
- lov_set_add_req(req, set);
- }
- if (!set->set_count) {
- rc = -EIO;
- goto out_set;
- }
- *reqset = set;
- return rc;
-out_set:
- lov_fini_setattr_set(set);
- return rc;
-}
-
#define LOV_U64_MAX ((__u64)~0ULL)
#define LOV_SUM_MAX(tot, add) \
do { \
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
index b519a1940e1e..5d6536f8a4f7 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -44,46 +44,6 @@
/*****************************************************************************
*
- * Lovsub transfer operations.
- *
- */
-
-static void lovsub_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret)
-{
- struct lovsub_req *lsr;
-
- lsr = cl2lovsub_req(slice);
- kmem_cache_free(lovsub_req_kmem, lsr);
-}
-
-/**
- * Implementation of struct cl_req_operations::cro_attr_set() for lovsub
- * layer. Lov and lovsub are responsible only for struct obdo::o_stripe_idx
- * field, which is filled there.
- */
-static void lovsub_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags)
-{
- struct lovsub_object *subobj;
-
- subobj = cl2lovsub(obj);
- /*
- * There is no OBD_MD_* flag for obdo::o_stripe_idx, so set it
- * unconditionally. It never changes anyway.
- */
- attr->cra_oa->o_stripe_idx = subobj->lso_index;
-}
-
-static const struct cl_req_operations lovsub_req_ops = {
- .cro_attr_set = lovsub_req_attr_set,
- .cro_completion = lovsub_req_completion
-};
-
-/*****************************************************************************
- *
* Lov-sub device and device type functions.
*
*/
@@ -137,32 +97,12 @@ static struct lu_device *lovsub_device_free(const struct lu_env *env,
return next;
}
-static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
-{
- struct lovsub_req *lsr;
- int result;
-
- lsr = kmem_cache_zalloc(lovsub_req_kmem, GFP_NOFS);
- if (lsr) {
- cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops);
- result = 0;
- } else {
- result = -ENOMEM;
- }
- return result;
-}
-
static const struct lu_device_operations lovsub_lu_ops = {
.ldo_object_alloc = lovsub_object_alloc,
.ldo_process_config = NULL,
.ldo_recovery_complete = NULL
};
-static const struct cl_device_operations lovsub_cl_ops = {
- .cdo_req_init = lovsub_req_init
-};
-
static struct lu_device *lovsub_device_alloc(const struct lu_env *env,
struct lu_device_type *t,
struct lustre_cfg *cfg)
@@ -178,7 +118,6 @@ static struct lu_device *lovsub_device_alloc(const struct lu_env *env,
if (result == 0) {
d = lovsub2lu_dev(lsd);
d->ld_ops = &lovsub_lu_ops;
- lsd->acid_cl.cd_ops = &lovsub_cl_ops;
} else {
d = ERR_PTR(result);
}
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
index a2bac7a3b71b..011296ee16e6 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c
@@ -116,11 +116,31 @@ static int lovsub_object_glimpse(const struct lu_env *env,
return cl_object_glimpse(env, &los->lso_super->lo_cl, lvb);
}
+/**
+ * Implementation of struct cl_object_operations::coo_req_attr_set() for lovsub
+ * layer. Lov and lovsub are responsible only for struct obdo::o_stripe_idx
+ * field, which is filled there.
+ */
+static void lovsub_req_attr_set(const struct lu_env *env, struct cl_object *obj,
+ struct cl_req_attr *attr)
+{
+ struct lovsub_object *subobj = cl2lovsub(obj);
+
+ cl_req_attr_set(env, &subobj->lso_super->lo_cl, attr);
+
+ /*
+ * There is no OBD_MD_* flag for obdo::o_stripe_idx, so set it
+ * unconditionally. It never changes anyway.
+ */
+ attr->cra_oa->o_stripe_idx = subobj->lso_index;
+}
+
static const struct cl_object_operations lovsub_ops = {
.coo_page_init = lovsub_page_init,
.coo_lock_init = lovsub_lock_init,
.coo_attr_update = lovsub_attr_update,
- .coo_glimpse = lovsub_object_glimpse
+ .coo_glimpse = lovsub_object_glimpse,
+ .coo_req_attr_set = lovsub_req_attr_set
};
static const struct lu_object_operations lovsub_lu_obj_ops = {
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
index fca9450de57c..9021c465c044 100644
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
@@ -36,6 +36,42 @@
#include "../include/lprocfs_status.h"
#include "mdc_internal.h"
+static ssize_t active_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct obd_device *dev = container_of(kobj, struct obd_device,
+ obd_kobj);
+
+ return sprintf(buf, "%u\n", !dev->u.cli.cl_import->imp_deactive);
+}
+
+static ssize_t active_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct obd_device *dev = container_of(kobj, struct obd_device,
+ obd_kobj);
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buffer, 10, &val);
+ if (rc)
+ return rc;
+
+ if (val < 0 || val > 1)
+ return -ERANGE;
+
+ /* opposite senses */
+ if (dev->u.cli.cl_import->imp_deactive == val) {
+ rc = ptlrpc_set_import_active(dev->u.cli.cl_import, val);
+ if (rc)
+ count = rc;
+ } else {
+ CDEBUG(D_CONFIG, "activate %lu: ignoring repeat request\n", val);
+ }
+ return count;
+}
+LUSTRE_RW_ATTR(active);
+
static ssize_t max_rpcs_in_flight_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
@@ -73,6 +109,64 @@ static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
}
LUSTRE_RW_ATTR(max_rpcs_in_flight);
+static ssize_t max_mod_rpcs_in_flight_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct obd_device *dev = container_of(kobj, struct obd_device,
+ obd_kobj);
+ u16 max;
+ int len;
+
+ max = dev->u.cli.cl_max_mod_rpcs_in_flight;
+ len = sprintf(buf, "%hu\n", max);
+
+ return len;
+}
+
+static ssize_t max_mod_rpcs_in_flight_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer,
+ size_t count)
+{
+ struct obd_device *dev = container_of(kobj, struct obd_device,
+ obd_kobj);
+ u16 val;
+ int rc;
+
+ rc = kstrtou16(buffer, 10, &val);
+ if (rc)
+ return rc;
+
+ rc = obd_set_max_mod_rpcs_in_flight(&dev->u.cli, val);
+ if (rc)
+ count = rc;
+
+ return count;
+}
+LUSTRE_RW_ATTR(max_mod_rpcs_in_flight);
+
+static int mdc_rpc_stats_seq_show(struct seq_file *seq, void *v)
+{
+ struct obd_device *dev = seq->private;
+
+ return obd_mod_rpc_stats_seq_show(&dev->u.cli, seq);
+}
+
+static ssize_t mdc_rpc_stats_seq_write(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ struct obd_device *dev = seq->private;
+ struct client_obd *cli = &dev->u.cli;
+
+ lprocfs_oh_clear(&cli->cl_mod_rpcs_hist);
+
+ return len;
+}
+LPROC_SEQ_FOPS(mdc_rpc_stats);
+
LPROC_SEQ_FOPS_WR_ONLY(mdc, ping);
LPROC_SEQ_FOPS_RO_TYPE(mdc, connect_flags);
@@ -112,11 +206,15 @@ static struct lprocfs_vars lprocfs_mdc_obd_vars[] = {
{ "import", &mdc_import_fops, NULL, 0 },
{ "state", &mdc_state_fops, NULL, 0 },
{ "pinger_recov", &mdc_pinger_recov_fops, NULL, 0 },
+ { .name = "rpc_stats",
+ .fops = &mdc_rpc_stats_fops },
{ NULL }
};
static struct attribute *mdc_attrs[] = {
+ &lustre_attr_active.attr,
&lustre_attr_max_rpcs_in_flight.attr,
+ &lustre_attr_max_mod_rpcs_in_flight.attr,
&lustre_attr_max_pages_per_rpc.attr,
NULL,
};
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
index f446c1c2584b..881c6a0676a6 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h
+++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
@@ -46,7 +46,7 @@ void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, size_t size,
void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags,
struct md_op_data *data, size_t ea_size);
void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- void *ea, size_t ealen, void *ea2, size_t ea2len);
+ void *ea, size_t ealen);
void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
const void *data, size_t datalen, umode_t mode, uid_t uid,
gid_t gid, cfs_cap_t capability, __u64 rdev);
@@ -75,7 +75,7 @@ int mdc_intent_lock(struct obd_export *exp,
__u64 extra_lock_flags);
int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
- const ldlm_policy_data_t *policy,
+ const union ldlm_policy_data *policy,
struct lookup_intent *it, struct md_op_data *op_data,
struct lustre_handle *lockh, __u64 extra_lock_flags);
@@ -105,12 +105,11 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
const char *new, size_t newlen,
struct ptlrpc_request **request);
int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, size_t ealen, void *ea2, size_t ea2len,
- struct ptlrpc_request **request, struct md_open_data **mod);
+ void *ea, size_t ealen, struct ptlrpc_request **request);
int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request);
int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
- ldlm_policy_data_t *policy, enum ldlm_mode mode,
+ union ldlm_policy_data *policy, enum ldlm_mode mode,
enum ldlm_cancel_flags flags, void *opaque);
int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
@@ -122,7 +121,8 @@ int mdc_intent_getattr_async(struct obd_export *exp,
enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
const struct lu_fid *fid, enum ldlm_type type,
- ldlm_policy_data_t *policy, enum ldlm_mode mode,
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode,
struct lustre_handle *lockh);
static inline int mdc_prep_elc_req(struct obd_export *exp,
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index aac7e04873e2..f35e1f9afdef 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -139,7 +139,7 @@ void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
rec->cr_time = op_data->op_mod_time;
rec->cr_suppgid1 = op_data->op_suppgids[0];
rec->cr_suppgid2 = op_data->op_suppgids[1];
- flags = op_data->op_flags & MF_SOM_LOCAL_FLAGS;
+ flags = 0;
if (op_data->op_bias & MDS_CREATE_VOLATILE)
flags |= MDS_OPEN_VOLATILE;
set_mrc_cr_flags(rec, flags);
@@ -301,16 +301,16 @@ static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec,
static void mdc_ioepoch_pack(struct mdt_ioepoch *epoch,
struct md_op_data *op_data)
{
- memcpy(&epoch->handle, &op_data->op_handle, sizeof(epoch->handle));
- epoch->ioepoch = op_data->op_ioepoch;
- epoch->flags = op_data->op_flags & MF_SOM_LOCAL_FLAGS;
+ epoch->mio_handle = op_data->op_handle;
+ epoch->mio_unused1 = 0;
+ epoch->mio_unused2 = 0;
+ epoch->mio_padding = 0;
}
void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- void *ea, size_t ealen, void *ea2, size_t ea2len)
+ void *ea, size_t ealen)
{
struct mdt_rec_setattr *rec;
- struct mdt_ioepoch *epoch;
struct lov_user_md *lum = NULL;
CLASSERT(sizeof(struct mdt_rec_reint) ==
@@ -318,11 +318,6 @@ void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
mdc_setattr_pack_rec(rec, op_data);
- if (op_data->op_flags & (MF_SOM_CHANGE | MF_EPOCH_OPEN)) {
- epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
- mdc_ioepoch_pack(epoch, op_data);
- }
-
if (ealen == 0)
return;
@@ -335,12 +330,6 @@ void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
} else {
memcpy(lum, ea, ealen);
}
-
- if (ea2len == 0)
- return;
-
- memcpy(req_capsule_client_get(&req->rq_pill, &RMF_LOGCOOKIES), ea2,
- ea2len);
}
void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
@@ -387,6 +376,31 @@ void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen);
}
+static void mdc_intent_close_pack(struct ptlrpc_request *req,
+ struct md_op_data *op_data)
+{
+ enum mds_op_bias bias = op_data->op_bias;
+ struct close_data *data;
+ struct ldlm_lock *lock;
+
+ if (!(bias & (MDS_HSM_RELEASE | MDS_CLOSE_LAYOUT_SWAP |
+ MDS_RENAME_MIGRATE)))
+ return;
+
+ data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA);
+ LASSERT(data);
+
+ lock = ldlm_handle2lock(&op_data->op_lease_handle);
+ if (lock) {
+ data->cd_handle = lock->l_remote_handle;
+ LDLM_LOCK_PUT(lock);
+ }
+ ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);
+
+ data->cd_data_version = op_data->op_data_version;
+ data->cd_fid = op_data->op_fid2;
+}
+
void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
const char *old, size_t oldlen,
const char *new, size_t newlen)
@@ -415,6 +429,15 @@ void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
if (new)
mdc_pack_name(req, &RMF_SYMTGT, new, newlen);
+
+ if (op_data->op_cli_flags & CLI_MIGRATE &&
+ op_data->op_bias & MDS_RENAME_MIGRATE) {
+ struct mdt_ioepoch *epoch;
+
+ mdc_intent_close_pack(req, op_data);
+ epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
+ mdc_ioepoch_pack(epoch, op_data);
+ }
}
void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags,
@@ -441,27 +464,6 @@ void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags,
op_data->op_namelen);
}
-static void mdc_hsm_release_pack(struct ptlrpc_request *req,
- struct md_op_data *op_data)
-{
- if (op_data->op_bias & MDS_HSM_RELEASE) {
- struct close_data *data;
- struct ldlm_lock *lock;
-
- data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA);
-
- lock = ldlm_handle2lock(&op_data->op_lease_handle);
- if (lock) {
- data->cd_handle = lock->l_remote_handle;
- LDLM_LOCK_PUT(lock);
- }
- ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);
-
- data->cd_data_version = op_data->op_data_version;
- data->cd_fid = op_data->op_fid2;
- }
-}
-
void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
{
struct mdt_ioepoch *epoch;
@@ -484,5 +486,5 @@ void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
rec->sa_valid &= ~MDS_ATTR_ATIME;
mdc_ioepoch_pack(epoch, op_data);
- mdc_hsm_release_pack(req, op_data);
+ mdc_intent_close_pack(req, op_data);
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index f1f6c082fa42..54ebb9952d66 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -38,10 +38,12 @@
#include "../include/obd.h"
#include "../include/obd_class.h"
#include "../include/lustre_dlm.h"
-#include "../include/lustre_fid.h" /* fid_res_name_eq() */
+#include "../include/lustre_fid.h"
#include "../include/lustre_mdc.h"
#include "../include/lustre_net.h"
#include "../include/lustre_req_layout.h"
+#include "../include/lustre_swab.h"
+
#include "mdc_internal.h"
struct mdc_getattr_args {
@@ -131,7 +133,8 @@ int mdc_set_lock_data(struct obd_export *exp, const struct lustre_handle *lockh,
enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
const struct lu_fid *fid, enum ldlm_type type,
- ldlm_policy_data_t *policy, enum ldlm_mode mode,
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode,
struct lustre_handle *lockh)
{
struct ldlm_res_id res_id;
@@ -147,7 +150,7 @@ enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
int mdc_cancel_unused(struct obd_export *exp,
const struct lu_fid *fid,
- ldlm_policy_data_t *policy,
+ union ldlm_policy_data *policy,
enum ldlm_mode mode,
enum ldlm_cancel_flags flags,
void *opaque)
@@ -386,8 +389,6 @@ static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp,
req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
obddev->u.cli.cl_default_mds_easize);
- req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
- obddev->u.cli.cl_default_mds_cookiesize);
ptlrpc_request_set_replen(req);
return req;
}
@@ -688,20 +689,20 @@ static int mdc_finish_enqueue(struct obd_export *exp,
* we don't know in advance the file type.
*/
int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
- const ldlm_policy_data_t *policy,
+ const union ldlm_policy_data *policy,
struct lookup_intent *it, struct md_op_data *op_data,
struct lustre_handle *lockh, u64 extra_lock_flags)
{
- static const ldlm_policy_data_t lookup_policy = {
+ static const union ldlm_policy_data lookup_policy = {
.l_inodebits = { MDS_INODELOCK_LOOKUP }
};
- static const ldlm_policy_data_t update_policy = {
+ static const union ldlm_policy_data update_policy = {
.l_inodebits = { MDS_INODELOCK_UPDATE }
};
- static const ldlm_policy_data_t layout_policy = {
+ static const union ldlm_policy_data layout_policy = {
.l_inodebits = { MDS_INODELOCK_LAYOUT }
};
- static const ldlm_policy_data_t getxattr_policy = {
+ static const union ldlm_policy_data getxattr_policy = {
.l_inodebits = { MDS_INODELOCK_XATTR }
};
struct obd_device *obddev = class_exp2obd(exp);
@@ -762,27 +763,22 @@ resend:
if (IS_ERR(req))
return PTR_ERR(req);
- if (req && it && it->it_op & IT_CREAT)
- /* ask ptlrpc not to resend on EINPROGRESS since we have our own
- * retry logic
- */
- req->rq_no_retry_einprogress = 1;
-
if (resends) {
req->rq_generation_set = 1;
req->rq_import_generation = generation;
req->rq_sent = ktime_get_real_seconds() + resends;
}
- /* It is important to obtain rpc_lock first (if applicable), so that
- * threads that are serialised with rpc_lock are not polluting our
- * rpcs in flight counter. We do not do flock request limiting, though
+ /* It is important to obtain modify RPC slot first (if applicable), so
+ * that threads that are waiting for a modify RPC slot are not polluting
+ * our rpcs in flight counter.
+ * We do not do flock request limiting, though
*/
if (it) {
- mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
+ mdc_get_mod_rpc_slot(req, it);
rc = obd_get_request_slot(&obddev->u.cli);
if (rc != 0) {
- mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
+ mdc_put_mod_rpc_slot(req, it);
mdc_clear_replay_flag(req, 0);
ptlrpc_req_finished(req);
return rc;
@@ -809,7 +805,7 @@ resend:
}
obd_put_request_slot(&obddev->u.cli);
- mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
+ mdc_put_mod_rpc_slot(req, it);
if (rc < 0) {
CDEBUG(D_INFO, "%s: ldlm_cli_enqueue failed: rc = %d\n",
@@ -825,11 +821,12 @@ resend:
lockrep->lock_policy_res2 =
ptlrpc_status_ntoh(lockrep->lock_policy_res2);
- /* Retry the create infinitely when we get -EINPROGRESS from
- * server. This is required by the new quota design.
+ /*
+ * Retry infinitely when the server returns -EINPROGRESS for the
+ * intent operation, when server returns -EINPROGRESS for acquiring
+ * intent lock, we'll retry in after_reply().
*/
- if (it->it_op & IT_CREAT &&
- (int)lockrep->lock_policy_res2 == -EINPROGRESS) {
+ if (it->it_op && (int)lockrep->lock_policy_res2 == -EINPROGRESS) {
mdc_clear_replay_flag(req, rc);
ptlrpc_req_finished(req);
resends++;
@@ -931,7 +928,7 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
*/
lock = ldlm_handle2lock(lockh);
if (lock) {
- ldlm_policy_data_t policy = lock->l_policy_data;
+ union ldlm_policy_data policy = lock->l_policy_data;
LDLM_DEBUG(lock, "matching against this");
@@ -967,7 +964,7 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
*/
struct ldlm_res_id res_id;
struct lustre_handle lockh;
- ldlm_policy_data_t policy;
+ union ldlm_policy_data policy;
enum ldlm_mode mode;
if (it->it_lock_handle) {
@@ -1169,10 +1166,9 @@ int mdc_intent_getattr_async(struct obd_export *exp,
* for statahead currently. Consider CMD in future, such two bits
* maybe managed by different MDS, should be adjusted then.
*/
- ldlm_policy_data_t policy = {
- .l_inodebits = { MDS_INODELOCK_LOOKUP |
- MDS_INODELOCK_UPDATE }
- };
+ union ldlm_policy_data policy = {
+ .l_inodebits = { MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE }
+ };
int rc = 0;
__u64 flags = LDLM_FL_HAS_INTENT;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
index c921e471fa27..07b168490f09 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
@@ -40,17 +40,15 @@
#include "../include/lustre_fid.h"
/* mdc_setattr does its own semaphore handling */
-static int mdc_reint(struct ptlrpc_request *request,
- struct mdc_rpc_lock *rpc_lock,
- int level)
+static int mdc_reint(struct ptlrpc_request *request, int level)
{
int rc;
request->rq_send_state = level;
- mdc_get_rpc_lock(rpc_lock, NULL);
+ mdc_get_mod_rpc_slot(request, NULL);
rc = ptlrpc_queue_wait(request);
- mdc_put_rpc_lock(rpc_lock, NULL);
+ mdc_put_mod_rpc_slot(request, NULL);
if (rc)
CDEBUG(D_INFO, "error in handling %d\n", rc);
else if (!req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY))
@@ -68,7 +66,7 @@ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
__u64 bits)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- ldlm_policy_data_t policy = {};
+ union ldlm_policy_data policy = {};
struct ldlm_res_id res_id;
struct ldlm_resource *res;
int count;
@@ -99,13 +97,10 @@ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
}
int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, size_t ealen, void *ea2, size_t ea2len,
- struct ptlrpc_request **request, struct md_open_data **mod)
+ void *ea, size_t ealen, struct ptlrpc_request **request)
{
LIST_HEAD(cancels);
struct ptlrpc_request *req;
- struct mdc_rpc_lock *rpc_lock;
- struct obd_device *obd = exp->exp_obd;
int count = 0, rc;
__u64 bits;
@@ -122,12 +117,9 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
ldlm_lock_list_put(&cancels, l_bl_ast, count);
return -ENOMEM;
}
- if ((op_data->op_flags & (MF_SOM_CHANGE | MF_EPOCH_OPEN)) == 0)
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT,
- 0);
+ req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT, 0);
req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen);
- req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT,
- ea2len);
+ req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT, 0);
rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
if (rc) {
@@ -135,63 +127,21 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
- rpc_lock = obd->u.cli.cl_rpc_lock;
-
if (op_data->op_attr.ia_valid & (ATTR_MTIME | ATTR_CTIME))
CDEBUG(D_INODE, "setting mtime %ld, ctime %ld\n",
LTIME_S(op_data->op_attr.ia_mtime),
LTIME_S(op_data->op_attr.ia_ctime));
- mdc_setattr_pack(req, op_data, ea, ealen, ea2, ea2len);
+ mdc_setattr_pack(req, op_data, ea, ealen);
ptlrpc_request_set_replen(req);
- if (mod && (op_data->op_flags & MF_EPOCH_OPEN) &&
- req->rq_import->imp_replayable) {
- LASSERT(!*mod);
-
- *mod = obd_mod_alloc();
- if (!*mod) {
- DEBUG_REQ(D_ERROR, req, "Can't allocate md_open_data");
- } else {
- req->rq_replay = 1;
- req->rq_cb_data = *mod;
- (*mod)->mod_open_req = req;
- req->rq_commit_cb = mdc_commit_open;
- (*mod)->mod_is_create = true;
- /**
- * Take an extra reference on \var mod, it protects \var
- * mod from being freed on eviction (commit callback is
- * called despite rq_replay flag).
- * Will be put on mdc_done_writing().
- */
- obd_mod_get(*mod);
- }
- }
-
- rc = mdc_reint(req, rpc_lock, LUSTRE_IMP_FULL);
- /* Save the obtained info in the original RPC for the replay case. */
- if (rc == 0 && (op_data->op_flags & MF_EPOCH_OPEN)) {
- struct mdt_ioepoch *epoch;
- struct mdt_body *body;
+ rc = mdc_reint(req, LUSTRE_IMP_FULL);
- epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- epoch->handle = body->mbo_handle;
- epoch->ioepoch = body->mbo_ioepoch;
- req->rq_replay_cb = mdc_replay_open;
- /** bug 3633, open may be committed and estale answer is not error */
- } else if (rc == -ESTALE && (op_data->op_flags & MF_SOM_CHANGE)) {
- rc = 0;
- } else if (rc == -ERESTARTSYS) {
+ if (rc == -ERESTARTSYS)
rc = 0;
- }
+
*request = req;
- if (rc && req->rq_commit_cb) {
- /* Put an extra reference on \var mod on error case. */
- if (mod && *mod)
- obd_mod_put(*mod);
- req->rq_commit_cb(req);
- }
+
return rc;
}
@@ -264,7 +214,7 @@ rebuild:
}
level = LUSTRE_IMP_FULL;
resend:
- rc = mdc_reint(req, exp->exp_obd->u.cli.cl_rpc_lock, level);
+ rc = mdc_reint(req, level);
/* Resend if we were told to. */
if (rc == -ERESTARTSYS) {
@@ -332,13 +282,11 @@ int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
obd->u.cli.cl_default_mds_easize);
- req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
- obd->u.cli.cl_default_mds_cookiesize);
ptlrpc_request_set_replen(req);
*request = req;
- rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
+ rc = mdc_reint(req, LUSTRE_IMP_FULL);
if (rc == -ERESTARTSYS)
rc = 0;
return rc;
@@ -348,7 +296,6 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request)
{
LIST_HEAD(cancels);
- struct obd_device *obd = exp->exp_obd;
struct ptlrpc_request *req;
int count = 0, rc;
@@ -380,7 +327,7 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
mdc_link_pack(req, op_data);
ptlrpc_request_set_replen(req);
- rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
+ rc = mdc_reint(req, LUSTRE_IMP_FULL);
*request = req;
if (rc == -ERESTARTSYS)
rc = 0;
@@ -419,7 +366,8 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
MDS_INODELOCK_FULL);
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_REINT_RENAME);
+ op_data->op_cli_flags & CLI_MIGRATE ?
+ &RQF_MDS_REINT_MIGRATE : &RQF_MDS_REINT_RENAME);
if (!req) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
return -ENOMEM;
@@ -435,6 +383,23 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
+ if (op_data->op_cli_flags & CLI_MIGRATE && op_data->op_data) {
+ struct md_open_data *mod = op_data->op_data;
+
+ LASSERTF(mod->mod_open_req &&
+ mod->mod_open_req->rq_type != LI_POISON,
+ "POISONED open %p!\n", mod->mod_open_req);
+
+ DEBUG_REQ(D_HA, mod->mod_open_req, "matched open");
+ /*
+ * We no longer want to preserve this open for replay even
+ * though the open was committed. b=3632, b=3633
+ */
+ spin_lock(&mod->mod_open_req->rq_lock);
+ mod->mod_open_req->rq_replay = 0;
+ spin_unlock(&mod->mod_open_req->rq_lock);
+ }
+
if (exp_connect_cancelset(exp) && req)
ldlm_cli_cancel_list(&cancels, count, req, 0);
@@ -442,11 +407,9 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
obd->u.cli.cl_default_mds_easize);
- req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
- obd->u.cli.cl_default_mds_cookiesize);
ptlrpc_request_set_replen(req);
- rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
+ rc = mdc_reint(req, LUSTRE_IMP_FULL);
*request = req;
if (rc == -ERESTARTSYS)
rc = 0;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index f56ea643f9bf..2cfd913f9bc5 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -38,15 +38,18 @@
# include <linux/init.h>
# include <linux/utsname.h>
+#include "../include/cl_object.h"
+#include "../include/llog_swab.h"
+#include "../include/lprocfs_status.h"
#include "../include/lustre_acl.h"
+#include "../include/lustre_fid.h"
#include "../include/lustre/lustre_ioctl.h"
-#include "../include/obd_class.h"
+#include "../include/lustre_kernelcomm.h"
#include "../include/lustre_lmv.h"
-#include "../include/lustre_fid.h"
-#include "../include/lprocfs_status.h"
-#include "../include/lustre_param.h"
#include "../include/lustre_log.h"
-#include "../include/lustre_kernelcomm.h"
+#include "../include/lustre_param.h"
+#include "../include/lustre_swab.h"
+#include "../include/obd_class.h"
#include "mdc_internal.h"
@@ -327,12 +330,12 @@ static int mdc_xattr_common(struct obd_export *exp,
/* make rpc */
if (opcode == MDS_REINT)
- mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
+ mdc_get_mod_rpc_slot(req, NULL);
rc = ptlrpc_queue_wait(req);
if (opcode == MDS_REINT)
- mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
+ mdc_put_mod_rpc_slot(req, NULL);
if (rc)
ptlrpc_req_finished(req);
@@ -420,9 +423,6 @@ static int mdc_get_lustre_md(struct obd_export *exp,
md->body = req_capsule_server_get(pill, &RMF_MDT_BODY);
if (md->body->mbo_valid & OBD_MD_FLEASIZE) {
- int lmmsize;
- struct lov_mds_md *lmm;
-
if (!S_ISREG(md->body->mbo_mode)) {
CDEBUG(D_INFO,
"OBD_MD_FLEASIZE set, should be a regular file, but is not\n");
@@ -436,28 +436,18 @@ static int mdc_get_lustre_md(struct obd_export *exp,
rc = -EPROTO;
goto out;
}
- lmmsize = md->body->mbo_eadatasize;
- lmm = req_capsule_server_sized_get(pill, &RMF_MDT_MD, lmmsize);
- if (!lmm) {
- rc = -EPROTO;
- goto out;
- }
-
- rc = obd_unpackmd(dt_exp, &md->lsm, lmm, lmmsize);
- if (rc < 0)
- goto out;
- if (rc < (typeof(rc))sizeof(*md->lsm)) {
- CDEBUG(D_INFO,
- "lsm size too small: rc < sizeof (*md->lsm) (%d < %d)\n",
- rc, (int)sizeof(*md->lsm));
+ md->layout.lb_len = md->body->mbo_eadatasize;
+ md->layout.lb_buf = req_capsule_server_sized_get(pill,
+ &RMF_MDT_MD,
+ md->layout.lb_len);
+ if (!md->layout.lb_buf) {
rc = -EPROTO;
goto out;
}
-
} else if (md->body->mbo_valid & OBD_MD_FLDIREA) {
- int lmvsize;
- struct lov_mds_md *lmv;
+ const union lmv_mds_md *lmv;
+ size_t lmv_size;
if (!S_ISDIR(md->body->mbo_mode)) {
CDEBUG(D_INFO,
@@ -466,22 +456,21 @@ static int mdc_get_lustre_md(struct obd_export *exp,
goto out;
}
- if (md->body->mbo_eadatasize == 0) {
+ lmv_size = md->body->mbo_eadatasize;
+ if (!lmv_size) {
CDEBUG(D_INFO,
"OBD_MD_FLDIREA is set, but eadatasize 0\n");
return -EPROTO;
}
if (md->body->mbo_valid & OBD_MD_MEA) {
- lmvsize = md->body->mbo_eadatasize;
lmv = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
- lmvsize);
+ lmv_size);
if (!lmv) {
rc = -EPROTO;
goto out;
}
- rc = obd_unpackmd(md_exp, (void *)&md->lmv, lmv,
- lmvsize);
+ rc = md_unpackmd(md_exp, &md->lmv, lmv, lmv_size);
if (rc < 0)
goto out;
@@ -517,8 +506,6 @@ out:
#ifdef CONFIG_FS_POSIX_ACL
posix_acl_release(md->posix_acl);
#endif
- if (md->lsm)
- obd_free_memmd(dt_exp, &md->lsm);
}
return rc;
}
@@ -528,10 +515,6 @@ static int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
return 0;
}
-/**
- * Handles both OPEN and SETATTR RPCs for OPEN-CLOSE and SETATTR-DONE_WRITING
- * RPC chains.
- */
void mdc_replay_open(struct ptlrpc_request *req)
{
struct md_open_data *mod = req->rq_cb_data;
@@ -565,15 +548,15 @@ void mdc_replay_open(struct ptlrpc_request *req)
__u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg);
struct mdt_ioepoch *epoch;
- LASSERT(opc == MDS_CLOSE || opc == MDS_DONE_WRITING);
+ LASSERT(opc == MDS_CLOSE);
epoch = req_capsule_client_get(&close_req->rq_pill,
&RMF_MDT_EPOCH);
LASSERT(epoch);
if (och)
- LASSERT(!memcmp(&old, &epoch->handle, sizeof(old)));
+ LASSERT(!memcmp(&old, &epoch->mio_handle, sizeof(old)));
DEBUG_REQ(D_HA, close_req, "updating close body with new fh");
- epoch->handle = body->mbo_handle;
+ epoch->mio_handle = body->mbo_handle;
}
}
@@ -715,22 +698,6 @@ static int mdc_clear_open_replay_data(struct obd_export *exp,
return 0;
}
-/* Prepares the request for the replay by the given reply */
-static void mdc_close_handle_reply(struct ptlrpc_request *req,
- struct md_op_data *op_data, int rc) {
- struct mdt_body *repbody;
- struct mdt_ioepoch *epoch;
-
- if (req && rc == -EAGAIN) {
- repbody = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
-
- epoch->flags |= MF_SOM_AU;
- if (repbody->mbo_valid & OBD_MD_FLGETATTRLOCK)
- op_data->op_flags |= MF_GETATTR_LOCK;
- }
-}
-
static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
struct md_open_data *mod, struct ptlrpc_request **request)
{
@@ -740,9 +707,8 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
int rc;
int saved_rc = 0;
- req_fmt = &RQF_MDS_CLOSE;
if (op_data->op_bias & MDS_HSM_RELEASE) {
- req_fmt = &RQF_MDS_RELEASE_CLOSE;
+ req_fmt = &RQF_MDS_INTENT_CLOSE;
/* allocate a FID for volatile file */
rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
@@ -752,6 +718,10 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
/* save the errcode and proceed to close */
saved_rc = rc;
}
+ } else if (op_data->op_bias & MDS_CLOSE_LAYOUT_SWAP) {
+ req_fmt = &RQF_MDS_INTENT_CLOSE;
+ } else {
+ req_fmt = &RQF_MDS_CLOSE;
}
*request = NULL;
@@ -807,14 +777,12 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
obd->u.cli.cl_default_mds_easize);
- req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
- obd->u.cli.cl_default_mds_cookiesize);
ptlrpc_request_set_replen(req);
- mdc_get_rpc_lock(obd->u.cli.cl_close_lock, NULL);
+ mdc_get_mod_rpc_slot(req, NULL);
rc = ptlrpc_queue_wait(req);
- mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL);
+ mdc_put_mod_rpc_slot(req, NULL);
if (!req->rq_repmsg) {
CDEBUG(D_RPCTRACE, "request failed to send: %p, %d\n", req,
@@ -857,79 +825,9 @@ out:
obd_mod_put(mod);
}
*request = req;
- mdc_close_handle_reply(req, op_data, rc);
return rc < 0 ? rc : saved_rc;
}
-static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
- struct md_open_data *mod)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_DONE_WRITING);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_DONE_WRITING);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- if (mod) {
- LASSERTF(mod->mod_open_req &&
- mod->mod_open_req->rq_type != LI_POISON,
- "POISONED setattr %p!\n", mod->mod_open_req);
-
- mod->mod_close_req = req;
- DEBUG_REQ(D_HA, mod->mod_open_req, "matched setattr");
- /* We no longer want to preserve this setattr for replay even
- * though the open was committed. b=3632, b=3633
- */
- spin_lock(&mod->mod_open_req->rq_lock);
- mod->mod_open_req->rq_replay = 0;
- spin_unlock(&mod->mod_open_req->rq_lock);
- }
-
- mdc_close_pack(req, op_data);
- ptlrpc_request_set_replen(req);
-
- mdc_get_rpc_lock(obd->u.cli.cl_close_lock, NULL);
- rc = ptlrpc_queue_wait(req);
- mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL);
-
- if (rc == -ESTALE) {
- /**
- * it can be allowed error after 3633 if open or setattr were
- * committed and server failed before close was sent.
- * Let's check if mod exists and return no error in that case
- */
- if (mod) {
- if (mod->mod_open_req->rq_committed)
- rc = 0;
- }
- }
-
- if (mod) {
- if (rc != 0)
- mod->mod_close_req = NULL;
- LASSERT(mod->mod_open_req);
- mdc_free_open(mod);
-
- /* Since now, mod is accessed through setattr req only,
- * thus DW req does not keep a reference on mod anymore.
- */
- obd_mod_put(mod);
- }
-
- mdc_close_handle_reply(req, op_data, rc);
- ptlrpc_req_finished(req);
- return rc;
-}
-
static int mdc_getpage(struct obd_export *exp, const struct lu_fid *fid,
u64 offset, struct page **pages, int npages,
struct ptlrpc_request **request)
@@ -959,8 +857,10 @@ restart_bulk:
req->rq_request_portal = MDS_READPAGE_PORTAL;
ptlrpc_at_set_req_timeout(req);
- desc = ptlrpc_prep_bulk_imp(req, npages, 1, BULK_PUT_SINK,
- MDS_BULK_PORTAL);
+ desc = ptlrpc_prep_bulk_imp(req, npages, 1,
+ PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
+ MDS_BULK_PORTAL,
+ &ptlrpc_bulk_kiov_pin_ops);
if (!desc) {
ptlrpc_request_free(req);
return -ENOMEM;
@@ -968,7 +868,7 @@ restart_bulk:
/* NB req now owns desc and will free it when it gets freed */
for (i = 0; i < npages; i++)
- ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
+ desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, PAGE_SIZE);
mdc_readdir_pack(req, offset, PAGE_SIZE * npages, fid);
@@ -1546,7 +1446,7 @@ static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
/* Val is struct getinfo_fid2path result plus path */
vallen = sizeof(*gf) + gf->gf_pathlen;
- rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf, NULL);
+ rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf);
if (rc != 0 && rc != -EREMOTE)
goto out;
@@ -1558,8 +1458,11 @@ static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
goto out;
}
- CDEBUG(D_IOCTL, "path get "DFID" from %llu #%d\n%s\n",
- PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno, gf->gf_path);
+ CDEBUG(D_IOCTL, "path got " DFID " from %llu #%d: %s\n",
+ PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno,
+ gf->gf_pathlen < 512 ? gf->gf_path :
+ /* only log the last 512 characters of the path */
+ gf->gf_path + gf->gf_pathlen - 512);
out:
kfree(key);
@@ -1595,7 +1498,9 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp,
ptlrpc_request_set_replen(req);
- rc = mdc_queue_wait(req);
+ mdc_get_mod_rpc_slot(req, NULL);
+ rc = ptlrpc_queue_wait(req);
+ mdc_put_mod_rpc_slot(req, NULL);
out:
ptlrpc_req_finished(req);
return rc;
@@ -1773,7 +1678,9 @@ static int mdc_ioc_hsm_state_set(struct obd_export *exp,
ptlrpc_request_set_replen(req);
- rc = mdc_queue_wait(req);
+ mdc_get_mod_rpc_slot(req, NULL);
+ rc = ptlrpc_queue_wait(req);
+ mdc_put_mod_rpc_slot(req, NULL);
out:
ptlrpc_req_finished(req);
return rc;
@@ -1836,7 +1743,9 @@ static int mdc_ioc_hsm_request(struct obd_export *exp,
ptlrpc_request_set_replen(req);
- rc = mdc_queue_wait(req);
+ mdc_get_mod_rpc_slot(req, NULL);
+ rc = ptlrpc_queue_wait(req);
+ mdc_put_mod_rpc_slot(req, NULL);
out:
ptlrpc_req_finished(req);
return rc;
@@ -1957,10 +1866,8 @@ static int mdc_changelog_send_thread(void *csdata)
/* Send EOF no matter what our result */
kuch = changelog_kuc_hdr(cs->cs_buf, sizeof(*kuch), cs->cs_flags);
- if (kuch) {
- kuch->kuc_msgtype = CL_EOF;
- libcfs_kkuc_msg_put(cs->cs_fp, kuch);
- }
+ kuch->kuc_msgtype = CL_EOF;
+ libcfs_kkuc_msg_put(cs->cs_fp, kuch);
out:
fput(cs->cs_fp);
@@ -2015,52 +1922,6 @@ static int mdc_ioc_changelog_send(struct obd_device *obd,
static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
struct lustre_kernelcomm *lk);
-static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- struct ptlrpc_request *req;
- struct obd_quotactl *body;
- int rc;
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_MDS_QUOTACHECK, LUSTRE_MDS_VERSION,
- MDS_QUOTACHECK);
- if (!req)
- return -ENOMEM;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- *body = *oqctl;
-
- ptlrpc_request_set_replen(req);
-
- /* the next poll will find -ENODATA, that means quotacheck is
- * going on
- */
- cli->cl_qchk_stat = -ENODATA;
- rc = ptlrpc_queue_wait(req);
- if (rc)
- cli->cl_qchk_stat = rc;
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int mdc_quota_poll_check(struct obd_export *exp,
- struct if_quotacheck *qchk)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- int rc;
-
- qchk->obd_uuid = cli->cl_target_uuid;
- memcpy(qchk->obd_type, LUSTRE_MDS_NAME, strlen(LUSTRE_MDS_NAME));
-
- rc = cli->cl_qchk_stat;
- /* the client is not the previous one */
- if (rc == CL_NOT_QUOTACHECKED)
- rc = -EINTR;
- return rc;
-}
-
static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp,
struct obd_quotactl *oqctl)
{
@@ -2215,9 +2076,6 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
case IOC_OSC_SET_ACTIVE:
rc = ptlrpc_set_import_active(imp, data->ioc_offset);
goto out;
- case OBD_IOC_POLL_QUOTACHECK:
- rc = mdc_quota_poll_check(exp, (struct if_quotacheck *)karg);
- goto out;
case OBD_IOC_PING_TARGET:
rc = ptlrpc_obd_ping(obd);
goto out;
@@ -2528,8 +2386,7 @@ static int mdc_set_info_async(const struct lu_env *env,
}
static int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
- __u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm)
+ __u32 keylen, void *key, __u32 *vallen, void *val)
{
int rc = -EINVAL;
@@ -2733,29 +2590,17 @@ static void mdc_llog_finish(struct obd_device *obd)
static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
{
- struct client_obd *cli = &obd->u.cli;
struct lprocfs_static_vars lvars = { NULL };
int rc;
- cli->cl_rpc_lock = kzalloc(sizeof(*cli->cl_rpc_lock), GFP_NOFS);
- if (!cli->cl_rpc_lock)
- return -ENOMEM;
- mdc_init_rpc_lock(cli->cl_rpc_lock);
-
rc = ptlrpcd_addref();
if (rc < 0)
- goto err_rpc_lock;
-
- cli->cl_close_lock = kzalloc(sizeof(*cli->cl_close_lock), GFP_NOFS);
- if (!cli->cl_close_lock) {
- rc = -ENOMEM;
- goto err_ptlrpcd_decref;
- }
- mdc_init_rpc_lock(cli->cl_close_lock);
+ return rc;
rc = client_obd_setup(obd, cfg);
if (rc)
- goto err_close_lock;
+ goto err_ptlrpcd_decref;
+
lprocfs_mdc_init_vars(&lvars);
lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars);
sptlrpc_lprocfs_cliobd_attach(obd);
@@ -2769,29 +2614,25 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
if (rc) {
mdc_cleanup(obd);
CERROR("failed to setup llogging subsystems\n");
+ return rc;
}
return rc;
-err_close_lock:
- kfree(cli->cl_close_lock);
err_ptlrpcd_decref:
ptlrpcd_decref();
-err_rpc_lock:
- kfree(cli->cl_rpc_lock);
return rc;
}
-/* Initialize the default and maximum LOV EA and cookie sizes. This allows
+/* Initialize the default and maximum LOV EA sizes. This allows
* us to make MDS RPCs with large enough reply buffers to hold a default
- * sized EA and cookie without having to calculate this (via a call into the
+ * sized EA without having to calculate this (via a call into the
* LOV + OSCs) each time we make an RPC. The maximum size is also tracked
* but not used to avoid wastefully vmalloc()'ing large reply buffers when
* a large number of stripes is possible. If a larger reply buffer is
* required it will be reallocated in the ptlrpc layer due to overflow.
*/
-static int mdc_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
- u32 cookiesize, u32 def_cookiesize)
+static int mdc_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize)
{
struct obd_device *obd = exp->exp_obd;
struct client_obd *cli = &obd->u.cli;
@@ -2802,42 +2643,24 @@ static int mdc_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
if (cli->cl_default_mds_easize < def_easize)
cli->cl_default_mds_easize = def_easize;
- if (cli->cl_max_mds_cookiesize < cookiesize)
- cli->cl_max_mds_cookiesize = cookiesize;
-
- if (cli->cl_default_mds_cookiesize < def_cookiesize)
- cli->cl_default_mds_cookiesize = def_cookiesize;
-
return 0;
}
-static int mdc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+static int mdc_precleanup(struct obd_device *obd)
{
- switch (stage) {
- case OBD_CLEANUP_EARLY:
- break;
- case OBD_CLEANUP_EXPORTS:
- /* Failsafe, ok if racy */
- if (obd->obd_type->typ_refcnt <= 1)
- libcfs_kkuc_group_rem(0, KUC_GRP_HSM);
+ /* Failsafe, ok if racy */
+ if (obd->obd_type->typ_refcnt <= 1)
+ libcfs_kkuc_group_rem(0, KUC_GRP_HSM);
- obd_cleanup_client_import(obd);
- ptlrpc_lprocfs_unregister_obd(obd);
- lprocfs_obd_cleanup(obd);
-
- mdc_llog_finish(obd);
- break;
- }
+ obd_cleanup_client_import(obd);
+ ptlrpc_lprocfs_unregister_obd(obd);
+ lprocfs_obd_cleanup(obd);
+ mdc_llog_finish(obd);
return 0;
}
static int mdc_cleanup(struct obd_device *obd)
{
- struct client_obd *cli = &obd->u.cli;
-
- kfree(cli->cl_rpc_lock);
- kfree(cli->cl_close_lock);
-
ptlrpcd_decref();
return client_obd_cleanup(obd);
@@ -2881,7 +2704,6 @@ static struct obd_ops mdc_obd_ops = {
.process_config = mdc_process_config,
.get_uuid = mdc_get_uuid,
.quotactl = mdc_quotactl,
- .quotacheck = mdc_quotacheck
};
static struct md_ops mdc_md_ops = {
@@ -2889,7 +2711,6 @@ static struct md_ops mdc_md_ops = {
.null_inode = mdc_null_inode,
.close = mdc_close,
.create = mdc_create,
- .done_writing = mdc_done_writing,
.enqueue = mdc_enqueue,
.getattr = mdc_getattr,
.getattr_name = mdc_getattr_name,
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 23374cae5133..b9c522a3c7a4 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -38,11 +38,13 @@
#define D_MGC D_CONFIG /*|D_WARNING*/
#include <linux/module.h>
-#include "../include/obd_class.h"
-#include "../include/lustre_dlm.h"
+
#include "../include/lprocfs_status.h"
-#include "../include/lustre_log.h"
+#include "../include/lustre_dlm.h"
#include "../include/lustre_disk.h"
+#include "../include/lustre_log.h"
+#include "../include/lustre_swab.h"
+#include "../include/obd_class.h"
#include "mgc_internal.h"
@@ -373,7 +375,7 @@ out_err:
return rc;
}
-DEFINE_MUTEX(llog_process_lock);
+static DEFINE_MUTEX(llog_process_lock);
/** Stop watching for updates on this log.
*/
@@ -684,35 +686,33 @@ static int mgc_llog_fini(const struct lu_env *env, struct obd_device *obd)
}
static atomic_t mgc_count = ATOMIC_INIT(0);
-static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+static int mgc_precleanup(struct obd_device *obd)
{
int rc = 0;
int temp;
- switch (stage) {
- case OBD_CLEANUP_EARLY:
- break;
- case OBD_CLEANUP_EXPORTS:
- if (atomic_dec_and_test(&mgc_count)) {
- LASSERT(rq_state & RQ_RUNNING);
- /* stop requeue thread */
- temp = RQ_STOP;
- } else {
- /* wakeup requeue thread to clean our cld */
- temp = RQ_NOW | RQ_PRECLEANUP;
- }
- spin_lock(&config_list_lock);
- rq_state |= temp;
- spin_unlock(&config_list_lock);
- wake_up(&rq_waitq);
- if (temp & RQ_STOP)
- wait_for_completion(&rq_exit);
- obd_cleanup_client_import(obd);
- rc = mgc_llog_fini(NULL, obd);
- if (rc != 0)
- CERROR("failed to cleanup llogging subsystems\n");
- break;
+ if (atomic_dec_and_test(&mgc_count)) {
+ LASSERT(rq_state & RQ_RUNNING);
+ /* stop requeue thread */
+ temp = RQ_STOP;
+ } else {
+ /* wakeup requeue thread to clean our cld */
+ temp = RQ_NOW | RQ_PRECLEANUP;
}
+
+ spin_lock(&config_list_lock);
+ rq_state |= temp;
+ spin_unlock(&config_list_lock);
+ wake_up(&rq_waitq);
+
+ if (temp & RQ_STOP)
+ wait_for_completion(&rq_exit);
+ obd_cleanup_client_import(obd);
+
+ rc = mgc_llog_fini(NULL, obd);
+ if (rc)
+ CERROR("failed to cleanup llogging subsystems\n");
+
return rc;
}
@@ -887,8 +887,8 @@ static int mgc_set_mgs_param(struct obd_export *exp,
}
/* Take a config lock so we can get cancel notifications */
-static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
- __u32 type, ldlm_policy_data_t *policy, __u32 mode,
+static int mgc_enqueue(struct obd_export *exp, __u32 type,
+ union ldlm_policy_data *policy, __u32 mode,
__u64 *flags, void *bl_cb, void *cp_cb, void *gl_cb,
void *data, __u32 lvb_len, void *lvb_swabber,
struct lustre_handle *lockh)
@@ -1059,8 +1059,7 @@ static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
}
static int mgc_get_info(const struct lu_env *env, struct obd_export *exp,
- __u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *unused)
+ __u32 keylen, void *key, __u32 *vallen, void *val)
{
int rc = -EINVAL;
@@ -1387,15 +1386,17 @@ again:
body->mcb_units = nrpages;
/* allocate bulk transfer descriptor */
- desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, BULK_PUT_SINK,
- MGS_BULK_PORTAL);
+ desc = ptlrpc_prep_bulk_imp(req, nrpages, 1,
+ PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
+ MGS_BULK_PORTAL,
+ &ptlrpc_bulk_kiov_pin_ops);
if (!desc) {
rc = -ENOMEM;
goto out;
}
for (i = 0; i < nrpages; i++)
- ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
+ desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, PAGE_SIZE);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
@@ -1553,14 +1554,52 @@ out_free:
return rc;
}
-/** Get a config log from the MGS and process it.
- * This func is called for both clients and servers.
- * Copy the log locally before parsing it if appropriate (non-MGS server)
+static bool mgc_import_in_recovery(struct obd_import *imp)
+{
+ bool in_recovery = true;
+
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state == LUSTRE_IMP_FULL ||
+ imp->imp_state == LUSTRE_IMP_CLOSED)
+ in_recovery = false;
+ spin_unlock(&imp->imp_lock);
+
+ return in_recovery;
+}
+
+/**
+ * Get a configuration log from the MGS and process it.
+ *
+ * This function is called for both clients and servers to process the
+ * configuration log from the MGS. The MGC enqueues a DLM lock on the
+ * log from the MGS, and if the lock gets revoked the MGC will be notified
+ * by the lock cancellation callback that the config log has changed,
+ * and will enqueue another MGS lock on it, and then continue processing
+ * the new additions to the end of the log.
+ *
+ * Since the MGC import is not replayable, if the import is being evicted
+ * (rcl == -ESHUTDOWN, \see ptlrpc_import_delay_req()), retry to process
+ * the log until recovery is finished or the import is closed.
+ *
+ * Make a local copy of the log before parsing it if appropriate (non-MGS
+ * server) so that the server can start even when the MGS is down.
+ *
+ * There shouldn't be multiple processes running process_log at once --
+ * sounds like badness. It actually might be fine, as long as they're not
+ * trying to update from the same log simultaneously, in which case we
+ * should use a per-log semaphore instead of cld_lock.
+ *
+ * \param[in] mgc MGC device by which to fetch the configuration log
+ * \param[in] cld log processing state (stored in lock callback data)
+ *
+ * \retval 0 on success
+ * \retval negative errno on failure
*/
int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
{
struct lustre_handle lockh = { 0 };
__u64 flags = LDLM_FL_NO_LRU;
+ bool retry = false;
int rc = 0, rcl;
LASSERT(cld);
@@ -1570,6 +1609,7 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
* we're not trying to update from the same log
* simultaneously (in which case we should use a per-log sem.)
*/
+restart:
mutex_lock(&cld->cld_lock);
if (cld->cld_stopping) {
mutex_unlock(&cld->cld_lock);
@@ -1582,7 +1622,7 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
cld->cld_cfg.cfg_instance, cld->cld_cfg.cfg_last_idx + 1);
/* Get the cfg lock on the llog */
- rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, NULL, LDLM_PLAIN, NULL,
+ rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, LDLM_PLAIN, NULL,
LCK_CR, &flags, NULL, NULL, NULL,
cld, 0, NULL, &lockh);
if (rcl == 0) {
@@ -1593,18 +1633,57 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
} else {
CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl);
- /* mark cld_lostlock so that it will requeue
- * after MGC becomes available.
- */
- cld->cld_lostlock = 1;
+ if (rcl == -ESHUTDOWN &&
+ atomic_read(&mgc->u.cli.cl_mgc_refcount) > 0 && !retry) {
+ int secs = cfs_time_seconds(obd_timeout);
+ struct obd_import *imp;
+ struct l_wait_info lwi;
+
+ mutex_unlock(&cld->cld_lock);
+ imp = class_exp2cliimp(mgc->u.cli.cl_mgc_mgsexp);
+
+ /*
+ * Let's force the pinger, and wait the import to be
+ * connected, note: since mgc import is non-replayable,
+ * and even the import state is disconnected, it does
+ * not mean the "recovery" is stopped, so we will keep
+ * waitting until timeout or the import state is
+ * FULL or closed
+ */
+ ptlrpc_pinger_force(imp);
+
+ lwi = LWI_TIMEOUT(secs, NULL, NULL);
+ l_wait_event(imp->imp_recovery_waitq,
+ !mgc_import_in_recovery(imp), &lwi);
+
+ if (imp->imp_state == LUSTRE_IMP_FULL) {
+ retry = true;
+ goto restart;
+ } else {
+ mutex_lock(&cld->cld_lock);
+ cld->cld_lostlock = 1;
+ }
+ } else {
+ /* mark cld_lostlock so that it will requeue
+ * after MGC becomes available.
+ */
+ cld->cld_lostlock = 1;
+ }
/* Get extra reference, it will be put in requeue thread */
config_log_get(cld);
}
if (cld_is_recover(cld)) {
rc = 0; /* this is not a fatal error for recover log */
- if (rcl == 0)
+ if (!rcl) {
rc = mgc_process_recover_log(mgc, cld);
+ if (rc) {
+ CERROR("%s: recover log %s failed: rc = %d not fatal.\n",
+ mgc->obd_name, cld->cld_logname, rc);
+ rc = 0;
+ cld->cld_lostlock = 1;
+ }
+ }
} else {
rc = mgc_process_cfg_log(mgc, cld, rcl != 0);
}
diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile
index b42e109b30e0..af570c0db15b 100644
--- a/drivers/staging/lustre/lustre/obdclass/Makefile
+++ b/drivers/staging/lustre/lustre/obdclass/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_LUSTRE_FS) += obdclass.o
-obdclass-y := linux/linux-module.o linux/linux-obdo.o linux/linux-sysctl.o \
+obdclass-y := linux/linux-module.o linux/linux-sysctl.o \
llog.o llog_cat.o llog_obd.o llog_swab.o class_obd.o debug.o \
genops.o uuid.o lprocfs_status.o lprocfs_counters.o \
lustre_handles.o lustre_peer.o statfs_pack.o linkea.o \
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_internal.h b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
index e866754a42d5..7b403fbd5f94 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_internal.h
+++ b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
@@ -50,25 +50,6 @@ enum clt_nesting_level {
};
/**
- * Counters used to check correctness of cl_lock interface usage.
- */
-struct cl_thread_counters {
- /**
- * Number of outstanding calls to cl_lock_mutex_get() made by the
- * current thread. For debugging.
- */
- int ctc_nr_locks_locked;
- /** List of locked locks. */
- struct lu_ref ctc_locks_locked;
- /** Number of outstanding holds on locks. */
- int ctc_nr_held;
- /** Number of outstanding uses on locks. */
- int ctc_nr_used;
- /** Number of held extent locks. */
- int ctc_nr_locks_acquired;
-};
-
-/**
* Thread local state internal for generic cl-code.
*/
struct cl_thread_info {
@@ -83,10 +64,6 @@ struct cl_thread_info {
*/
struct cl_lock_descr clt_descr;
struct cl_page_list clt_list;
- /**
- * Counters for every level of lock nesting.
- */
- struct cl_thread_counters clt_counters[CNL_NR];
/** @} debugging */
/*
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index bc4b7b6b9a20..3f42457b0d7d 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -126,6 +126,7 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io)
switch (io->ci_type) {
case CIT_READ:
case CIT_WRITE:
+ case CIT_DATA_VERSION:
break;
case CIT_FAULT:
break;
@@ -411,7 +412,6 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
}
io->ci_state = CIS_UNLOCKED;
- LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
}
EXPORT_SYMBOL(cl_io_unlock);
@@ -586,67 +586,32 @@ void cl_io_end(const struct lu_env *env, struct cl_io *io)
}
EXPORT_SYMBOL(cl_io_end);
-static const struct cl_page_slice *
-cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
-{
- const struct cl_page_slice *slice;
-
- slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
- LINVRNT(slice);
- return slice;
-}
-
/**
- * Called by read io, when page has to be read from the server.
+ * Called by read io, to decide the readahead extent
*
- * \see cl_io_operations::cio_read_page()
+ * \see cl_io_operations::cio_read_ahead()
*/
-int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page)
+int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
+ pgoff_t start, struct cl_read_ahead *ra)
{
const struct cl_io_slice *scan;
- struct cl_2queue *queue;
int result = 0;
LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
- LINVRNT(cl_page_is_owned(page, io));
LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
LINVRNT(cl_io_invariant(io));
- queue = &io->ci_queue;
-
- cl_2queue_init(queue);
- /*
- * ->cio_read_page() methods called in the loop below are supposed to
- * never block waiting for network (the only subtle point is the
- * creation of new pages for read-ahead that might result in cache
- * shrinking, but currently only clean pages are shrunk and this
- * requires no network io).
- *
- * Should this ever starts blocking, retry loop would be needed for
- * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
- */
cl_io_for_each(scan, io) {
- if (scan->cis_iop->cio_read_page) {
- const struct cl_page_slice *slice;
+ if (!scan->cis_iop->cio_read_ahead)
+ continue;
- slice = cl_io_slice_page(scan, page);
- LINVRNT(slice);
- result = scan->cis_iop->cio_read_page(env, scan, slice);
- if (result != 0)
- break;
- }
+ result = scan->cis_iop->cio_read_ahead(env, scan, start, ra);
+ if (result)
+ break;
}
- if (result == 0 && queue->c2_qin.pl_nr > 0)
- result = cl_io_submit_rw(env, io, CRT_READ, queue);
- /*
- * Unlock unsent pages in case of error.
- */
- cl_page_list_disown(env, io, &queue->c2_qin);
- cl_2queue_fini(env, queue);
- return result;
+ return result > 0 ? 0 : result;
}
-EXPORT_SYMBOL(cl_io_read_page);
+EXPORT_SYMBOL(cl_io_read_ahead);
/**
* Commit a list of contiguous pages into writeback cache.
@@ -1080,235 +1045,18 @@ struct cl_io *cl_io_top(struct cl_io *io)
EXPORT_SYMBOL(cl_io_top);
/**
- * Adds request slice to the compound request.
- *
- * This is called by cl_device_operations::cdo_req_init() methods to add a
- * per-layer state to the request. New state is added at the end of
- * cl_req::crq_layers list, that is, it is at the bottom of the stack.
- *
- * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add()
- */
-void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
- struct cl_device *dev,
- const struct cl_req_operations *ops)
-{
- list_add_tail(&slice->crs_linkage, &req->crq_layers);
- slice->crs_dev = dev;
- slice->crs_ops = ops;
- slice->crs_req = req;
-}
-EXPORT_SYMBOL(cl_req_slice_add);
-
-static void cl_req_free(const struct lu_env *env, struct cl_req *req)
-{
- unsigned i;
-
- LASSERT(list_empty(&req->crq_pages));
- LASSERT(req->crq_nrpages == 0);
- LINVRNT(list_empty(&req->crq_layers));
- LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o));
-
- if (req->crq_o) {
- for (i = 0; i < req->crq_nrobjs; ++i) {
- struct cl_object *obj = req->crq_o[i].ro_obj;
-
- if (obj) {
- lu_object_ref_del_at(&obj->co_lu,
- &req->crq_o[i].ro_obj_ref,
- "cl_req", req);
- cl_object_put(env, obj);
- }
- }
- kfree(req->crq_o);
- }
- kfree(req);
-}
-
-static int cl_req_init(const struct lu_env *env, struct cl_req *req,
- struct cl_page *page)
-{
- struct cl_device *dev;
- struct cl_page_slice *slice;
- int result;
-
- result = 0;
- list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
- if (dev->cd_ops->cdo_req_init) {
- result = dev->cd_ops->cdo_req_init(env, dev, req);
- if (result != 0)
- break;
- }
- }
- return result;
-}
-
-/**
- * Invokes per-request transfer completion call-backs
- * (cl_req_operations::cro_completion()) bottom-to-top.
- */
-void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
-{
- struct cl_req_slice *slice;
-
- /*
- * for the lack of list_for_each_entry_reverse_safe()...
- */
- while (!list_empty(&req->crq_layers)) {
- slice = list_entry(req->crq_layers.prev,
- struct cl_req_slice, crs_linkage);
- list_del_init(&slice->crs_linkage);
- if (slice->crs_ops->cro_completion)
- slice->crs_ops->cro_completion(env, slice, rc);
- }
- cl_req_free(env, req);
-}
-EXPORT_SYMBOL(cl_req_completion);
-
-/**
- * Allocates new transfer request.
- */
-struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
- enum cl_req_type crt, int nr_objects)
-{
- struct cl_req *req;
-
- LINVRNT(nr_objects > 0);
-
- req = kzalloc(sizeof(*req), GFP_NOFS);
- if (req) {
- int result;
-
- req->crq_type = crt;
- INIT_LIST_HEAD(&req->crq_pages);
- INIT_LIST_HEAD(&req->crq_layers);
-
- req->crq_o = kcalloc(nr_objects, sizeof(req->crq_o[0]),
- GFP_NOFS);
- if (req->crq_o) {
- req->crq_nrobjs = nr_objects;
- result = cl_req_init(env, req, page);
- } else {
- result = -ENOMEM;
- }
- if (result != 0) {
- cl_req_completion(env, req, result);
- req = ERR_PTR(result);
- }
- } else {
- req = ERR_PTR(-ENOMEM);
- }
- return req;
-}
-EXPORT_SYMBOL(cl_req_alloc);
-
-/**
- * Adds a page to a request.
- */
-void cl_req_page_add(const struct lu_env *env,
- struct cl_req *req, struct cl_page *page)
-{
- struct cl_object *obj;
- struct cl_req_obj *rqo;
- unsigned int i;
-
- LASSERT(list_empty(&page->cp_flight));
- LASSERT(!page->cp_req);
-
- CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
- req, req->crq_type, req->crq_nrpages);
-
- list_add_tail(&page->cp_flight, &req->crq_pages);
- ++req->crq_nrpages;
- page->cp_req = req;
- obj = cl_object_top(page->cp_obj);
- for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
- if (!rqo->ro_obj) {
- rqo->ro_obj = obj;
- cl_object_get(obj);
- lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
- "cl_req", req);
- break;
- }
- }
- LASSERT(i < req->crq_nrobjs);
-}
-EXPORT_SYMBOL(cl_req_page_add);
-
-/**
- * Removes a page from a request.
- */
-void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
-{
- struct cl_req *req = page->cp_req;
-
- LASSERT(!list_empty(&page->cp_flight));
- LASSERT(req->crq_nrpages > 0);
-
- list_del_init(&page->cp_flight);
- --req->crq_nrpages;
- page->cp_req = NULL;
-}
-EXPORT_SYMBOL(cl_req_page_done);
-
-/**
- * Notifies layers that request is about to depart by calling
- * cl_req_operations::cro_prep() top-to-bottom.
- */
-int cl_req_prep(const struct lu_env *env, struct cl_req *req)
-{
- unsigned int i;
- int result;
- const struct cl_req_slice *slice;
-
- /*
- * Check that the caller of cl_req_alloc() didn't lie about the number
- * of objects.
- */
- for (i = 0; i < req->crq_nrobjs; ++i)
- LASSERT(req->crq_o[i].ro_obj);
-
- result = 0;
- list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
- if (slice->crs_ops->cro_prep) {
- result = slice->crs_ops->cro_prep(env, slice);
- if (result != 0)
- break;
- }
- }
- return result;
-}
-EXPORT_SYMBOL(cl_req_prep);
-
-/**
* Fills in attributes that are passed to server together with transfer. Only
* attributes from \a flags may be touched. This can be called multiple times
* for the same request.
*/
-void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
- struct cl_req_attr *attr, u64 flags)
+void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
+ struct cl_req_attr *attr)
{
- const struct cl_req_slice *slice;
- struct cl_page *page;
- unsigned int i;
-
- LASSERT(!list_empty(&req->crq_pages));
-
- /* Take any page to use as a model. */
- page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
-
- for (i = 0; i < req->crq_nrobjs; ++i) {
- list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
- const struct cl_page_slice *scan;
- const struct cl_object *obj;
-
- scan = cl_page_at(page,
- slice->crs_dev->cd_lu_dev.ld_type);
- obj = scan->cpl_obj;
- if (slice->crs_ops->cro_attr_set)
- slice->crs_ops->cro_attr_set(env, slice, obj,
- attr + i, flags);
- }
+ struct cl_object *scan;
+
+ cl_object_for_each(scan, obj) {
+ if (scan->co_ops->coo_req_attr_set)
+ scan->co_ops->coo_req_attr_set(env, scan, attr);
}
}
EXPORT_SYMBOL(cl_req_attr_set);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 3199dd4a3b72..f5d4e23c64b7 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -335,7 +335,7 @@ int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
if (obj->co_ops->coo_getstripe) {
result = obj->co_ops->coo_getstripe(env, obj, uarg);
if (result)
- break;
+ break;
}
}
return result;
@@ -343,6 +343,67 @@ int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
EXPORT_SYMBOL(cl_object_getstripe);
/**
+ * Get fiemap extents from file object.
+ *
+ * \param env [in] lustre environment
+ * \param obj [in] file object
+ * \param key [in] fiemap request argument
+ * \param fiemap [out] fiemap extents mapping retrived
+ * \param buflen [in] max buffer length of @fiemap
+ *
+ * \retval 0 success
+ * \retval < 0 error
+ */
+int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj,
+ struct ll_fiemap_info_key *key,
+ struct fiemap *fiemap, size_t *buflen)
+{
+ struct lu_object_header *top;
+ int result = 0;
+
+ top = obj->co_lu.lo_header;
+ list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+ if (obj->co_ops->coo_fiemap) {
+ result = obj->co_ops->coo_fiemap(env, obj, key, fiemap,
+ buflen);
+ if (result)
+ break;
+ }
+ }
+ return result;
+}
+EXPORT_SYMBOL(cl_object_fiemap);
+
+int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj,
+ struct cl_layout *cl)
+{
+ struct lu_object_header *top = obj->co_lu.lo_header;
+
+ list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+ if (obj->co_ops->coo_layout_get)
+ return obj->co_ops->coo_layout_get(env, obj, cl);
+ }
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(cl_object_layout_get);
+
+loff_t cl_object_maxbytes(struct cl_object *obj)
+{
+ struct lu_object_header *top = obj->co_lu.lo_header;
+ loff_t maxbytes = LLONG_MAX;
+
+ list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+ if (obj->co_ops->coo_maxbytes)
+ maxbytes = min_t(loff_t, obj->co_ops->coo_maxbytes(obj),
+ maxbytes);
+ }
+
+ return maxbytes;
+}
+EXPORT_SYMBOL(cl_object_maxbytes);
+
+/**
* Helper function removing all object locks, and marking object for
* deletion. All object pages must have been deleted at this point.
*
@@ -483,36 +544,20 @@ EXPORT_SYMBOL(cl_site_stats_print);
* bz20044, bz22683.
*/
-static LIST_HEAD(cl_envs);
-static unsigned int cl_envs_cached_nr;
-static unsigned int cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
- * for now.
- */
-static DEFINE_SPINLOCK(cl_envs_guard);
+static unsigned int cl_envs_cached_max = 32; /* XXX: prototype: arbitrary limit
+ * for now.
+ */
+static struct cl_env_cache {
+ rwlock_t cec_guard;
+ unsigned int cec_count;
+ struct list_head cec_envs;
+} *cl_envs = NULL;
struct cl_env {
void *ce_magic;
struct lu_env ce_lu;
struct lu_context ce_ses;
- /**
- * This allows cl_env to be entered into cl_env_hash which implements
- * the current thread -> client environment lookup.
- */
- struct hlist_node ce_node;
- /**
- * Owner for the current cl_env.
- *
- * If LL_TASK_CL_ENV is defined, this point to the owning current,
- * only for debugging purpose ;
- * Otherwise hash is used, and this is the key for cfs_hash.
- * Now current thread pid is stored. Note using thread pointer would
- * lead to unbalanced hash because of its specific allocation locality
- * and could be varied for different platforms and OSes, even different
- * OS versions.
- */
- void *ce_owner;
-
/*
* Linkage into global list of all client environments. Used for
* garbage collection.
@@ -536,122 +581,13 @@ static void cl_env_init0(struct cl_env *cle, void *debug)
{
LASSERT(cle->ce_ref == 0);
LASSERT(cle->ce_magic == &cl_env_init0);
- LASSERT(!cle->ce_debug && !cle->ce_owner);
+ LASSERT(!cle->ce_debug);
cle->ce_ref = 1;
cle->ce_debug = debug;
CL_ENV_INC(busy);
}
-/*
- * The implementation of using hash table to connect cl_env and thread
- */
-
-static struct cfs_hash *cl_env_hash;
-
-static unsigned cl_env_hops_hash(struct cfs_hash *lh,
- const void *key, unsigned mask)
-{
-#if BITS_PER_LONG == 64
- return cfs_hash_u64_hash((__u64)key, mask);
-#else
- return cfs_hash_u32_hash((__u32)key, mask);
-#endif
-}
-
-static void *cl_env_hops_obj(struct hlist_node *hn)
-{
- struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
-
- LASSERT(cle->ce_magic == &cl_env_init0);
- return (void *)cle;
-}
-
-static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn)
-{
- struct cl_env *cle = cl_env_hops_obj(hn);
-
- LASSERT(cle->ce_owner);
- return (key == cle->ce_owner);
-}
-
-static void cl_env_hops_noop(struct cfs_hash *hs, struct hlist_node *hn)
-{
- struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
-
- LASSERT(cle->ce_magic == &cl_env_init0);
-}
-
-static struct cfs_hash_ops cl_env_hops = {
- .hs_hash = cl_env_hops_hash,
- .hs_key = cl_env_hops_obj,
- .hs_keycmp = cl_env_hops_keycmp,
- .hs_object = cl_env_hops_obj,
- .hs_get = cl_env_hops_noop,
- .hs_put_locked = cl_env_hops_noop,
-};
-
-static inline struct cl_env *cl_env_fetch(void)
-{
- struct cl_env *cle;
-
- cle = cfs_hash_lookup(cl_env_hash, (void *)(long)current->pid);
- LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
- return cle;
-}
-
-static inline void cl_env_attach(struct cl_env *cle)
-{
- if (cle) {
- int rc;
-
- LASSERT(!cle->ce_owner);
- cle->ce_owner = (void *)(long)current->pid;
- rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
- &cle->ce_node);
- LASSERT(rc == 0);
- }
-}
-
-static inline void cl_env_do_detach(struct cl_env *cle)
-{
- void *cookie;
-
- LASSERT(cle->ce_owner == (void *)(long)current->pid);
- cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
- &cle->ce_node);
- LASSERT(cookie == cle);
- cle->ce_owner = NULL;
-}
-
-static int cl_env_store_init(void)
-{
- cl_env_hash = cfs_hash_create("cl_env",
- HASH_CL_ENV_BITS, HASH_CL_ENV_BITS,
- HASH_CL_ENV_BKT_BITS, 0,
- CFS_HASH_MIN_THETA,
- CFS_HASH_MAX_THETA,
- &cl_env_hops,
- CFS_HASH_RW_BKTLOCK);
- return cl_env_hash ? 0 : -ENOMEM;
-}
-
-static void cl_env_store_fini(void)
-{
- cfs_hash_putref(cl_env_hash);
-}
-
-static inline struct cl_env *cl_env_detach(struct cl_env *cle)
-{
- if (!cle)
- cle = cl_env_fetch();
-
- if (cle && cle->ce_owner)
- cl_env_do_detach(cle);
-
- return cle;
-}
-
static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
{
struct lu_env *env;
@@ -701,16 +637,20 @@ static struct lu_env *cl_env_obtain(void *debug)
{
struct cl_env *cle;
struct lu_env *env;
+ int cpu = get_cpu();
- spin_lock(&cl_envs_guard);
- LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
- if (cl_envs_cached_nr > 0) {
+ read_lock(&cl_envs[cpu].cec_guard);
+ LASSERT(equi(cl_envs[cpu].cec_count == 0,
+ list_empty(&cl_envs[cpu].cec_envs)));
+ if (cl_envs[cpu].cec_count > 0) {
int rc;
- cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
+ cle = container_of(cl_envs[cpu].cec_envs.next, struct cl_env,
+ ce_linkage);
list_del_init(&cle->ce_linkage);
- cl_envs_cached_nr--;
- spin_unlock(&cl_envs_guard);
+ cl_envs[cpu].cec_count--;
+ read_unlock(&cl_envs[cpu].cec_guard);
+ put_cpu();
env = &cle->ce_lu;
rc = lu_env_refill(env);
@@ -723,7 +663,8 @@ static struct lu_env *cl_env_obtain(void *debug)
env = ERR_PTR(rc);
}
} else {
- spin_unlock(&cl_envs_guard);
+ read_unlock(&cl_envs[cpu].cec_guard);
+ put_cpu();
env = cl_env_new(lu_context_tags_default,
lu_session_tags_default, debug);
}
@@ -735,27 +676,6 @@ static inline struct cl_env *cl_env_container(struct lu_env *env)
return container_of(env, struct cl_env, ce_lu);
}
-static struct lu_env *cl_env_peek(int *refcheck)
-{
- struct lu_env *env;
- struct cl_env *cle;
-
- CL_ENV_INC(lookup);
-
- /* check that we don't go far from untrusted pointer */
- CLASSERT(offsetof(struct cl_env, ce_magic) == 0);
-
- env = NULL;
- cle = cl_env_fetch();
- if (cle) {
- CL_ENV_INC(hit);
- env = &cle->ce_lu;
- *refcheck = ++cle->ce_ref;
- }
- CDEBUG(D_OTHER, "%d@%p\n", cle ? cle->ce_ref : 0, cle);
- return env;
-}
-
/**
* Returns lu_env: if there already is an environment associated with the
* current thread, it is returned, otherwise, new environment is allocated.
@@ -773,17 +693,13 @@ struct lu_env *cl_env_get(int *refcheck)
{
struct lu_env *env;
- env = cl_env_peek(refcheck);
- if (!env) {
- env = cl_env_obtain(__builtin_return_address(0));
- if (!IS_ERR(env)) {
- struct cl_env *cle;
+ env = cl_env_obtain(__builtin_return_address(0));
+ if (!IS_ERR(env)) {
+ struct cl_env *cle;
- cle = cl_env_container(env);
- cl_env_attach(cle);
- *refcheck = cle->ce_ref;
- CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
- }
+ cle = cl_env_container(env);
+ *refcheck = cle->ce_ref;
+ CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
}
return env;
}
@@ -798,7 +714,6 @@ struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
{
struct lu_env *env;
- LASSERT(!cl_env_peek(refcheck));
env = cl_env_new(tags, tags, __builtin_return_address(0));
if (!IS_ERR(env)) {
struct cl_env *cle;
@@ -813,7 +728,6 @@ EXPORT_SYMBOL(cl_env_alloc);
static void cl_env_exit(struct cl_env *cle)
{
- LASSERT(!cle->ce_owner);
lu_context_exit(&cle->ce_lu.le_ctx);
lu_context_exit(&cle->ce_ses);
}
@@ -826,20 +740,25 @@ static void cl_env_exit(struct cl_env *cle)
unsigned int cl_env_cache_purge(unsigned int nr)
{
struct cl_env *cle;
+ unsigned int i;
- spin_lock(&cl_envs_guard);
- for (; !list_empty(&cl_envs) && nr > 0; --nr) {
- cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
- list_del_init(&cle->ce_linkage);
- LASSERT(cl_envs_cached_nr > 0);
- cl_envs_cached_nr--;
- spin_unlock(&cl_envs_guard);
+ for_each_possible_cpu(i) {
+ write_lock(&cl_envs[i].cec_guard);
+ for (; !list_empty(&cl_envs[i].cec_envs) && nr > 0; --nr) {
+ cle = container_of(cl_envs[i].cec_envs.next,
+ struct cl_env, ce_linkage);
+ list_del_init(&cle->ce_linkage);
+ LASSERT(cl_envs[i].cec_count > 0);
+ cl_envs[i].cec_count--;
+ write_unlock(&cl_envs[i].cec_guard);
- cl_env_fini(cle);
- spin_lock(&cl_envs_guard);
+ cl_env_fini(cle);
+ write_lock(&cl_envs[i].cec_guard);
+ }
+ LASSERT(equi(cl_envs[i].cec_count == 0,
+ list_empty(&cl_envs[i].cec_envs)));
+ write_unlock(&cl_envs[i].cec_guard);
}
- LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
- spin_unlock(&cl_envs_guard);
return nr;
}
EXPORT_SYMBOL(cl_env_cache_purge);
@@ -862,8 +781,9 @@ void cl_env_put(struct lu_env *env, int *refcheck)
CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
if (--cle->ce_ref == 0) {
+ int cpu = get_cpu();
+
CL_ENV_DEC(busy);
- cl_env_detach(cle);
cle->ce_debug = NULL;
cl_env_exit(cle);
/*
@@ -872,107 +792,22 @@ void cl_env_put(struct lu_env *env, int *refcheck)
* Return environment to the cache only when it was allocated
* with the standard tags.
*/
- if (cl_envs_cached_nr < cl_envs_cached_max &&
+ if (cl_envs[cpu].cec_count < cl_envs_cached_max &&
(env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
(env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
- spin_lock(&cl_envs_guard);
- list_add(&cle->ce_linkage, &cl_envs);
- cl_envs_cached_nr++;
- spin_unlock(&cl_envs_guard);
+ read_lock(&cl_envs[cpu].cec_guard);
+ list_add(&cle->ce_linkage, &cl_envs[cpu].cec_envs);
+ cl_envs[cpu].cec_count++;
+ read_unlock(&cl_envs[cpu].cec_guard);
} else {
cl_env_fini(cle);
}
+ put_cpu();
}
}
EXPORT_SYMBOL(cl_env_put);
/**
- * Declares a point of re-entrancy.
- *
- * \see cl_env_reexit()
- */
-void *cl_env_reenter(void)
-{
- return cl_env_detach(NULL);
-}
-EXPORT_SYMBOL(cl_env_reenter);
-
-/**
- * Exits re-entrancy.
- */
-void cl_env_reexit(void *cookie)
-{
- cl_env_detach(NULL);
- cl_env_attach(cookie);
-}
-EXPORT_SYMBOL(cl_env_reexit);
-
-/**
- * Setup user-supplied \a env as a current environment. This is to be used to
- * guaranteed that environment exists even when cl_env_get() fails. It is up
- * to user to ensure proper concurrency control.
- *
- * \see cl_env_unplant()
- */
-void cl_env_implant(struct lu_env *env, int *refcheck)
-{
- struct cl_env *cle = cl_env_container(env);
-
- LASSERT(cle->ce_ref > 0);
-
- cl_env_attach(cle);
- cl_env_get(refcheck);
- CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
-}
-EXPORT_SYMBOL(cl_env_implant);
-
-/**
- * Detach environment installed earlier by cl_env_implant().
- */
-void cl_env_unplant(struct lu_env *env, int *refcheck)
-{
- struct cl_env *cle = cl_env_container(env);
-
- LASSERT(cle->ce_ref > 1);
-
- CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
-
- cl_env_detach(cle);
- cl_env_put(env, refcheck);
-}
-EXPORT_SYMBOL(cl_env_unplant);
-
-struct lu_env *cl_env_nested_get(struct cl_env_nest *nest)
-{
- struct lu_env *env;
-
- nest->cen_cookie = NULL;
- env = cl_env_peek(&nest->cen_refcheck);
- if (env) {
- if (!cl_io_is_going(env))
- return env;
- cl_env_put(env, &nest->cen_refcheck);
- nest->cen_cookie = cl_env_reenter();
- }
- env = cl_env_get(&nest->cen_refcheck);
- if (IS_ERR(env)) {
- cl_env_reexit(nest->cen_cookie);
- return env;
- }
-
- LASSERT(!cl_io_is_going(env));
- return env;
-}
-EXPORT_SYMBOL(cl_env_nested_get);
-
-void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env)
-{
- cl_env_put(env, &nest->cen_refcheck);
- cl_env_reexit(nest->cen_cookie);
-}
-EXPORT_SYMBOL(cl_env_nested_put);
-
-/**
* Converts struct ost_lvb to struct cl_attr.
*
* \see cl_attr2lvb
@@ -999,6 +834,10 @@ static int cl_env_percpu_init(void)
for_each_possible_cpu(i) {
struct lu_env *env;
+ rwlock_init(&cl_envs[i].cec_guard);
+ INIT_LIST_HEAD(&cl_envs[i].cec_envs);
+ cl_envs[i].cec_count = 0;
+
cle = &cl_env_percpu[i];
env = &cle->ce_lu;
@@ -1066,7 +905,6 @@ void cl_env_percpu_put(struct lu_env *env)
LASSERT(cle->ce_ref == 0);
CL_ENV_DEC(busy);
- cl_env_detach(cle);
cle->ce_debug = NULL;
put_cpu();
@@ -1080,7 +918,6 @@ struct lu_env *cl_env_percpu_get(void)
cle = &cl_env_percpu[get_cpu()];
cl_env_init0(cle, __builtin_return_address(0));
- cl_env_attach(cle);
return &cle->ce_lu;
}
EXPORT_SYMBOL(cl_env_percpu_get);
@@ -1144,51 +981,19 @@ LU_KEY_INIT_FINI(cl0, struct cl_thread_info);
static void *cl_key_init(const struct lu_context *ctx,
struct lu_context_key *key)
{
- struct cl_thread_info *info;
-
- info = cl0_key_init(ctx, key);
- if (!IS_ERR(info)) {
- size_t i;
-
- for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
- lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
- }
- return info;
+ return cl0_key_init(ctx, key);
}
static void cl_key_fini(const struct lu_context *ctx,
struct lu_context_key *key, void *data)
{
- struct cl_thread_info *info;
- size_t i;
-
- info = data;
- for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
- lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
cl0_key_fini(ctx, key, data);
}
-static void cl_key_exit(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct cl_thread_info *info = data;
- size_t i;
-
- for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) {
- LASSERT(info->clt_counters[i].ctc_nr_held == 0);
- LASSERT(info->clt_counters[i].ctc_nr_used == 0);
- LASSERT(info->clt_counters[i].ctc_nr_locks_acquired == 0);
- LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
- lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
- lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
- }
-}
-
static struct lu_context_key cl_key = {
.lct_tags = LCT_CL_THREAD,
.lct_init = cl_key_init,
.lct_fini = cl_key_fini,
- .lct_exit = cl_key_exit
};
static struct lu_kmem_descr cl_object_caches[] = {
@@ -1212,13 +1017,15 @@ int cl_global_init(void)
{
int result;
- result = cl_env_store_init();
- if (result)
- return result;
+ cl_envs = kzalloc(sizeof(*cl_envs) * num_possible_cpus(), GFP_KERNEL);
+ if (!cl_envs) {
+ result = -ENOMEM;
+ goto out;
+ }
result = lu_kmem_init(cl_object_caches);
if (result)
- goto out_store;
+ goto out_envs;
LU_CONTEXT_KEY_INIT(&cl_key);
result = lu_context_key_register(&cl_key);
@@ -1228,16 +1035,17 @@ int cl_global_init(void)
result = cl_env_percpu_init();
if (result)
/* no cl_env_percpu_fini on error */
- goto out_context;
+ goto out_keys;
return 0;
-out_context:
+out_keys:
lu_context_key_degister(&cl_key);
out_kmem:
lu_kmem_fini(cl_object_caches);
-out_store:
- cl_env_store_fini();
+out_envs:
+ kfree(cl_envs);
+out:
return result;
}
@@ -1249,5 +1057,5 @@ void cl_global_fini(void)
cl_env_percpu_fini();
lu_context_key_degister(&cl_key);
lu_kmem_fini(cl_object_caches);
- cl_env_store_fini();
+ kfree(cl_envs);
}
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 63973ba096da..cd9a40ca4448 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -99,7 +99,6 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page)
PASSERT(env, page, list_empty(&page->cp_batch));
PASSERT(env, page, !page->cp_owner);
- PASSERT(env, page, !page->cp_req);
PASSERT(env, page, page->cp_state == CPS_FREEING);
while (!list_empty(&page->cp_layers)) {
@@ -150,7 +149,6 @@ struct cl_page *cl_page_alloc(const struct lu_env *env,
page->cp_type = type;
INIT_LIST_HEAD(&page->cp_layers);
INIT_LIST_HEAD(&page->cp_batch);
- INIT_LIST_HEAD(&page->cp_flight);
lu_ref_init(&page->cp_reference);
head = o->co_lu.lo_header;
list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) {
@@ -390,30 +388,6 @@ EXPORT_SYMBOL(cl_page_at);
__result; \
})
-#define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...) \
-({ \
- const struct lu_env *__env = (_env); \
- struct cl_page *__page = (_page); \
- const struct cl_page_slice *__scan; \
- int __result; \
- ptrdiff_t __op = (_op); \
- int (*__method)_proto; \
- \
- __result = 0; \
- list_for_each_entry_reverse(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + __op); \
- if (__method) { \
- __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
- if (__result != 0) \
- break; \
- } \
- } \
- if (__result > 0) \
- __result = 0; \
- __result; \
-})
-
#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
do { \
const struct lu_env *__env = (_env); \
@@ -552,7 +526,6 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
io, nonblock);
if (result == 0) {
PASSERT(env, pg, !pg->cp_owner);
- PASSERT(env, pg, !pg->cp_req);
pg->cp_owner = cl_io_top(io);
cl_page_owner_set(pg);
if (pg->cp_state != CPS_FREEING) {
@@ -694,7 +667,7 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
PASSERT(env, pg, pg->cp_state != CPS_FREEING);
/*
- * Severe all ways to obtain new pointers to @pg.
+ * Sever all ways to obtain new pointers to @pg.
*/
cl_page_owner_clear(pg);
@@ -845,8 +818,6 @@ void cl_page_completion(const struct lu_env *env,
struct cl_sync_io *anchor = pg->cp_sync_io;
PASSERT(env, pg, crt < CRT_NR);
- /* cl_page::cp_req already cleared by the caller (osc_completion()) */
- PASSERT(env, pg, !pg->cp_req);
PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
@@ -860,16 +831,8 @@ void cl_page_completion(const struct lu_env *env,
if (anchor) {
LASSERT(pg->cp_sync_io == anchor);
pg->cp_sync_io = NULL;
- }
- /*
- * As page->cp_obj is pinned by a reference from page->cp_req, it is
- * safe to call cl_page_put() without risking object destruction in a
- * non-blocking context.
- */
- cl_page_put(env, pg);
-
- if (anchor)
cl_sync_io_note(env, anchor, ioret);
+ }
}
EXPORT_SYMBOL(cl_page_completion);
@@ -927,29 +890,6 @@ int cl_page_flush(const struct lu_env *env, struct cl_io *io,
EXPORT_SYMBOL(cl_page_flush);
/**
- * Checks whether page is protected by any extent lock is at least required
- * mode.
- *
- * \return the same as in cl_page_operations::cpo_is_under_lock() method.
- * \see cl_page_operations::cpo_is_under_lock()
- */
-int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, pgoff_t *max_index)
-{
- int rc;
-
- PINVRNT(env, page, cl_page_invariant(page));
-
- rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock),
- (const struct lu_env *,
- const struct cl_page_slice *,
- struct cl_io *, pgoff_t *),
- io, max_index);
- return rc;
-}
-EXPORT_SYMBOL(cl_page_is_under_lock);
-
-/**
* Tells transfer engine that only part of a page is to be transmitted.
*
* \see cl_page_operations::cpo_clip()
@@ -974,10 +914,10 @@ void cl_page_header_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_page *pg)
{
(*printer)(env, cookie,
- "page@%p[%d %p %d %d %p %p]\n",
+ "page@%p[%d %p %d %d %p]\n",
pg, atomic_read(&pg->cp_ref), pg->cp_obj,
pg->cp_state, pg->cp_type,
- pg->cp_owner, pg->cp_req);
+ pg->cp_owner);
}
EXPORT_SYMBOL(cl_page_header_print);
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index cf8bb2a2f40b..fa0d38ddccb2 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -907,6 +907,8 @@ struct obd_import *class_new_import(struct obd_device *obd)
INIT_LIST_HEAD(&imp->imp_sending_list);
INIT_LIST_HEAD(&imp->imp_delayed_list);
INIT_LIST_HEAD(&imp->imp_committed_list);
+ INIT_LIST_HEAD(&imp->imp_unreplied_list);
+ imp->imp_known_replied_xid = 0;
imp->imp_replay_cursor = &imp->imp_committed_list;
spin_lock_init(&imp->imp_lock);
imp->imp_last_success_conn = 0;
@@ -1408,13 +1410,33 @@ EXPORT_SYMBOL(obd_get_max_rpcs_in_flight);
int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max)
{
struct obd_request_slot_waiter *orsw;
+ const char *typ_name;
__u32 old;
int diff;
+ int rc;
int i;
if (max > OBD_MAX_RIF_MAX || max < 1)
return -ERANGE;
+ typ_name = cli->cl_import->imp_obd->obd_type->typ_name;
+ if (!strcmp(typ_name, LUSTRE_MDC_NAME)) {
+ /*
+ * adjust max_mod_rpcs_in_flight to ensure it is always
+ * strictly lower that max_rpcs_in_flight
+ */
+ if (max < 2) {
+ CERROR("%s: cannot set max_rpcs_in_flight to 1 because it must be higher than max_mod_rpcs_in_flight value\n",
+ cli->cl_import->imp_obd->obd_name);
+ return -ERANGE;
+ }
+ if (max <= cli->cl_max_mod_rpcs_in_flight) {
+ rc = obd_set_max_mod_rpcs_in_flight(cli, max - 1);
+ if (rc)
+ return rc;
+ }
+ }
+
spin_lock(&cli->cl_loi_list_lock);
old = cli->cl_max_rpcs_in_flight;
cli->cl_max_rpcs_in_flight = max;
@@ -1436,3 +1458,209 @@ int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max)
return 0;
}
EXPORT_SYMBOL(obd_set_max_rpcs_in_flight);
+
+int obd_set_max_mod_rpcs_in_flight(struct client_obd *cli, __u16 max)
+{
+ struct obd_connect_data *ocd;
+ u16 maxmodrpcs;
+ u16 prev;
+
+ if (max > OBD_MAX_RIF_MAX || max < 1)
+ return -ERANGE;
+
+ /* cannot exceed or equal max_rpcs_in_flight */
+ if (max >= cli->cl_max_rpcs_in_flight) {
+ CERROR("%s: can't set max_mod_rpcs_in_flight to a value (%hu) higher or equal to max_rpcs_in_flight value (%u)\n",
+ cli->cl_import->imp_obd->obd_name,
+ max, cli->cl_max_rpcs_in_flight);
+ return -ERANGE;
+ }
+
+ /* cannot exceed max modify RPCs in flight supported by the server */
+ ocd = &cli->cl_import->imp_connect_data;
+ if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
+ maxmodrpcs = ocd->ocd_maxmodrpcs;
+ else
+ maxmodrpcs = 1;
+ if (max > maxmodrpcs) {
+ CERROR("%s: can't set max_mod_rpcs_in_flight to a value (%hu) higher than max_mod_rpcs_per_client value (%hu) returned by the server at connection\n",
+ cli->cl_import->imp_obd->obd_name,
+ max, maxmodrpcs);
+ return -ERANGE;
+ }
+
+ spin_lock(&cli->cl_mod_rpcs_lock);
+
+ prev = cli->cl_max_mod_rpcs_in_flight;
+ cli->cl_max_mod_rpcs_in_flight = max;
+
+ /* wakeup waiters if limit has been increased */
+ if (cli->cl_max_mod_rpcs_in_flight > prev)
+ wake_up(&cli->cl_mod_rpcs_waitq);
+
+ spin_unlock(&cli->cl_mod_rpcs_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(obd_set_max_mod_rpcs_in_flight);
+
+#define pct(a, b) (b ? (a * 100) / b : 0)
+
+int obd_mod_rpc_stats_seq_show(struct client_obd *cli, struct seq_file *seq)
+{
+ unsigned long mod_tot = 0, mod_cum;
+ struct timespec64 now;
+ int i;
+
+ ktime_get_real_ts64(&now);
+
+ spin_lock(&cli->cl_mod_rpcs_lock);
+
+ seq_printf(seq, "snapshot_time: %llu.%9lu (secs.nsecs)\n",
+ (s64)now.tv_sec, (unsigned long)now.tv_nsec);
+ seq_printf(seq, "modify_RPCs_in_flight: %hu\n",
+ cli->cl_mod_rpcs_in_flight);
+
+ seq_puts(seq, "\n\t\t\tmodify\n");
+ seq_puts(seq, "rpcs in flight rpcs %% cum %%\n");
+
+ mod_tot = lprocfs_oh_sum(&cli->cl_mod_rpcs_hist);
+
+ mod_cum = 0;
+ for (i = 0; i < OBD_HIST_MAX; i++) {
+ unsigned long mod = cli->cl_mod_rpcs_hist.oh_buckets[i];
+
+ mod_cum += mod;
+ seq_printf(seq, "%d:\t\t%10lu %3lu %3lu\n",
+ i, mod, pct(mod, mod_tot),
+ pct(mod_cum, mod_tot));
+ if (mod_cum == mod_tot)
+ break;
+ }
+
+ spin_unlock(&cli->cl_mod_rpcs_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(obd_mod_rpc_stats_seq_show);
+#undef pct
+
+/*
+ * The number of modify RPCs sent in parallel is limited
+ * because the server has a finite number of slots per client to
+ * store request result and ensure reply reconstruction when needed.
+ * On the client, this limit is stored in cl_max_mod_rpcs_in_flight
+ * that takes into account server limit and cl_max_rpcs_in_flight
+ * value.
+ * On the MDC client, to avoid a potential deadlock (see Bugzilla 3462),
+ * one close request is allowed above the maximum.
+ */
+static inline bool obd_mod_rpc_slot_avail_locked(struct client_obd *cli,
+ bool close_req)
+{
+ bool avail;
+
+ /* A slot is available if
+ * - number of modify RPCs in flight is less than the max
+ * - it's a close RPC and no other close request is in flight
+ */
+ avail = cli->cl_mod_rpcs_in_flight < cli->cl_max_mod_rpcs_in_flight ||
+ (close_req && !cli->cl_close_rpcs_in_flight);
+
+ return avail;
+}
+
+static inline bool obd_mod_rpc_slot_avail(struct client_obd *cli,
+ bool close_req)
+{
+ bool avail;
+
+ spin_lock(&cli->cl_mod_rpcs_lock);
+ avail = obd_mod_rpc_slot_avail_locked(cli, close_req);
+ spin_unlock(&cli->cl_mod_rpcs_lock);
+ return avail;
+}
+
+/* Get a modify RPC slot from the obd client @cli according
+ * to the kind of operation @opc that is going to be sent
+ * and the intent @it of the operation if it applies.
+ * If the maximum number of modify RPCs in flight is reached
+ * the thread is put to sleep.
+ * Returns the tag to be set in the request message. Tag 0
+ * is reserved for non-modifying requests.
+ */
+u16 obd_get_mod_rpc_slot(struct client_obd *cli, __u32 opc,
+ struct lookup_intent *it)
+{
+ struct l_wait_info lwi = LWI_INTR(NULL, NULL);
+ bool close_req = false;
+ u16 i, max;
+
+ /* read-only metadata RPCs don't consume a slot on MDT
+ * for reply reconstruction
+ */
+ if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
+ it->it_op == IT_LAYOUT || it->it_op == IT_READDIR))
+ return 0;
+
+ if (opc == MDS_CLOSE)
+ close_req = true;
+
+ do {
+ spin_lock(&cli->cl_mod_rpcs_lock);
+ max = cli->cl_max_mod_rpcs_in_flight;
+ if (obd_mod_rpc_slot_avail_locked(cli, close_req)) {
+ /* there is a slot available */
+ cli->cl_mod_rpcs_in_flight++;
+ if (close_req)
+ cli->cl_close_rpcs_in_flight++;
+ lprocfs_oh_tally(&cli->cl_mod_rpcs_hist,
+ cli->cl_mod_rpcs_in_flight);
+ /* find a free tag */
+ i = find_first_zero_bit(cli->cl_mod_tag_bitmap,
+ max + 1);
+ LASSERT(i < OBD_MAX_RIF_MAX);
+ LASSERT(!test_and_set_bit(i, cli->cl_mod_tag_bitmap));
+ spin_unlock(&cli->cl_mod_rpcs_lock);
+ /* tag 0 is reserved for non-modify RPCs */
+ return i + 1;
+ }
+ spin_unlock(&cli->cl_mod_rpcs_lock);
+
+ CDEBUG(D_RPCTRACE, "%s: sleeping for a modify RPC slot opc %u, max %hu\n",
+ cli->cl_import->imp_obd->obd_name, opc, max);
+
+ l_wait_event(cli->cl_mod_rpcs_waitq,
+ obd_mod_rpc_slot_avail(cli, close_req), &lwi);
+ } while (true);
+}
+EXPORT_SYMBOL(obd_get_mod_rpc_slot);
+
+/*
+ * Put a modify RPC slot from the obd client @cli according
+ * to the kind of operation @opc that has been sent and the
+ * intent @it of the operation if it applies.
+ */
+void obd_put_mod_rpc_slot(struct client_obd *cli, u32 opc,
+ struct lookup_intent *it, u16 tag)
+{
+ bool close_req = false;
+
+ if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
+ it->it_op == IT_LAYOUT || it->it_op == IT_READDIR))
+ return;
+
+ if (opc == MDS_CLOSE)
+ close_req = true;
+
+ spin_lock(&cli->cl_mod_rpcs_lock);
+ cli->cl_mod_rpcs_in_flight--;
+ if (close_req)
+ cli->cl_close_rpcs_in_flight--;
+ /* release the tag in the bitmap */
+ LASSERT(tag - 1 < OBD_MAX_RIF_MAX);
+ LASSERT(test_and_clear_bit(tag - 1, cli->cl_mod_tag_bitmap) != 0);
+ spin_unlock(&cli->cl_mod_rpcs_lock);
+ wake_up(&cli->cl_mod_rpcs_waitq);
+}
+EXPORT_SYMBOL(obd_put_mod_rpc_slot);
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index be09e04b042f..9f5e8299d7e4 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -217,8 +217,8 @@ static ssize_t pinger_show(struct kobject *kobj, struct attribute *attr,
return sprintf(buf, "%s\n", "on");
}
-static ssize_t health_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
+static ssize_t
+health_check_show(struct kobject *kobj, struct attribute *attr, char *buf)
{
bool healthy = true;
int i;
@@ -311,14 +311,14 @@ EXPORT_SYMBOL_GPL(debugfs_lustre_root);
LUSTRE_RO_ATTR(version);
LUSTRE_RO_ATTR(pinger);
-LUSTRE_RO_ATTR(health);
+LUSTRE_RO_ATTR(health_check);
LUSTRE_RW_ATTR(jobid_var);
LUSTRE_RW_ATTR(jobid_name);
static struct attribute *lustre_attrs[] = {
&lustre_attr_version.attr,
&lustre_attr_pinger.attr,
- &lustre_attr_health.attr,
+ &lustre_attr_health_check.attr,
&lustre_attr_jobid_name.attr,
&lustre_attr_jobid_var.attr,
NULL,
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
deleted file mode 100644
index 41b77a30feb3..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/linux/linux-obdo.c
- *
- * Object Devices Class Driver
- * These are the only exported functions, they provide some generic
- * infrastructure for managing object devices
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/module.h>
-#include "../../include/obd_class.h"
-#include "../../include/lustre/lustre_idl.h"
-
-#include <linux/fs.h>
-
-void obdo_refresh_inode(struct inode *dst, const struct obdo *src, u32 valid)
-{
- valid &= src->o_valid;
-
- if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME))
- CDEBUG(D_INODE,
- "valid %#llx, cur time %lu/%lu, new %llu/%llu\n",
- src->o_valid, LTIME_S(dst->i_mtime),
- LTIME_S(dst->i_ctime), src->o_mtime, src->o_ctime);
-
- if (valid & OBD_MD_FLATIME && src->o_atime > LTIME_S(dst->i_atime))
- LTIME_S(dst->i_atime) = src->o_atime;
- if (valid & OBD_MD_FLMTIME && src->o_mtime > LTIME_S(dst->i_mtime))
- LTIME_S(dst->i_mtime) = src->o_mtime;
- if (valid & OBD_MD_FLCTIME && src->o_ctime > LTIME_S(dst->i_ctime))
- LTIME_S(dst->i_ctime) = src->o_ctime;
- if (valid & OBD_MD_FLSIZE)
- i_size_write(dst, src->o_size);
- /* optimum IO size */
- if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
- dst->i_blkbits = ffs(src->o_blksize) - 1;
-
- if (dst->i_blkbits < PAGE_SHIFT)
- dst->i_blkbits = PAGE_SHIFT;
-
- /* allocation of space */
- if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks)
- /*
- * XXX shouldn't overflow be checked here like in
- * obdo_to_inode().
- */
- dst->i_blocks = src->o_blocks;
-}
-EXPORT_SYMBOL(obdo_refresh_inode);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 43797f106745..736ea1067c93 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -43,8 +43,9 @@
#define DEBUG_SUBSYSTEM S_LOG
-#include "../include/obd_class.h"
+#include "../include/llog_swab.h"
#include "../include/lustre_log.h"
+#include "../include/obd_class.h"
#include "llog_internal.h"
/*
@@ -80,8 +81,7 @@ static void llog_free_handle(struct llog_handle *loghandle)
LASSERT(list_empty(&loghandle->u.phd.phd_entry));
else if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_CAT)
LASSERT(list_empty(&loghandle->u.chd.chd_head));
- LASSERT(sizeof(*loghandle->lgh_hdr) == LLOG_CHUNK_SIZE);
- kfree(loghandle->lgh_hdr);
+ kvfree(loghandle->lgh_hdr);
out:
kfree(loghandle);
}
@@ -115,20 +115,29 @@ static int llog_read_header(const struct lu_env *env,
rc = lop->lop_read_header(env, handle);
if (rc == LLOG_EEMPTY) {
struct llog_log_hdr *llh = handle->lgh_hdr;
+ size_t len;
+ /* lrh_len should be initialized in llog_init_handle */
handle->lgh_last_idx = 0; /* header is record with index 0 */
llh->llh_count = 1; /* for the header record */
llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC;
- llh->llh_hdr.lrh_len = LLOG_CHUNK_SIZE;
- llh->llh_tail.lrt_len = LLOG_CHUNK_SIZE;
+ LASSERT(handle->lgh_ctxt->loc_chunk_size >= LLOG_MIN_CHUNK_SIZE);
+ llh->llh_hdr.lrh_len = handle->lgh_ctxt->loc_chunk_size;
llh->llh_hdr.lrh_index = 0;
- llh->llh_tail.lrt_index = 0;
llh->llh_timestamp = ktime_get_real_seconds();
if (uuid)
memcpy(&llh->llh_tgtuuid, uuid,
sizeof(llh->llh_tgtuuid));
llh->llh_bitmap_offset = offsetof(typeof(*llh), llh_bitmap);
- ext2_set_bit(0, llh->llh_bitmap);
+ /*
+ * Since update llog header might also call this function,
+ * let's reset the bitmap to 0 here
+ */
+ len = llh->llh_hdr.lrh_len - llh->llh_bitmap_offset;
+ memset(LLOG_HDR_BITMAP(llh), 0, len - sizeof(llh->llh_tail));
+ ext2_set_bit(0, LLOG_HDR_BITMAP(llh));
+ LLOG_HDR_TAIL(llh)->lrt_len = llh->llh_hdr.lrh_len;
+ LLOG_HDR_TAIL(llh)->lrt_index = llh->llh_hdr.lrh_index;
rc = 0;
}
return rc;
@@ -137,16 +146,19 @@ static int llog_read_header(const struct lu_env *env,
int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
int flags, struct obd_uuid *uuid)
{
+ int chunk_size = handle->lgh_ctxt->loc_chunk_size;
enum llog_flag fmt = flags & LLOG_F_EXT_MASK;
struct llog_log_hdr *llh;
int rc;
LASSERT(!handle->lgh_hdr);
- llh = kzalloc(sizeof(*llh), GFP_NOFS);
+ LASSERT(chunk_size >= LLOG_MIN_CHUNK_SIZE);
+ llh = libcfs_kvzalloc(sizeof(*llh), GFP_NOFS);
if (!llh)
return -ENOMEM;
handle->lgh_hdr = llh;
+ handle->lgh_hdr_size = chunk_size;
/* first assign flags to use llog_client_ops */
llh->llh_flags = flags;
rc = llog_read_header(env, handle, uuid);
@@ -189,6 +201,7 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
LASSERT(list_empty(&handle->u.chd.chd_head));
INIT_LIST_HEAD(&handle->u.chd.chd_head);
llh->llh_size = sizeof(struct llog_logid_rec);
+ llh->llh_flags |= LLOG_F_IS_FIXSIZE;
} else if (!(flags & LLOG_F_IS_PLAIN)) {
CERROR("%s: unknown flags: %#x (expected %#x or %#x)\n",
handle->lgh_ctxt->loc_obd->obd_name,
@@ -198,7 +211,7 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
llh->llh_flags |= fmt;
out:
if (rc) {
- kfree(llh);
+ kvfree(llh);
handle->lgh_hdr = NULL;
}
return rc;
@@ -212,15 +225,21 @@ static int llog_process_thread(void *arg)
struct llog_log_hdr *llh = loghandle->lgh_hdr;
struct llog_process_cat_data *cd = lpi->lpi_catdata;
char *buf;
- __u64 cur_offset = LLOG_CHUNK_SIZE;
- __u64 last_offset;
+ u64 cur_offset, tmp_offset;
+ int chunk_size;
int rc = 0, index = 1, last_index;
int saved_index = 0;
int last_called_index = 0;
- LASSERT(llh);
+ if (!llh)
+ return -EINVAL;
+
+ cur_offset = llh->llh_hdr.lrh_len;
+ chunk_size = llh->llh_hdr.lrh_len;
+ /* expect chunk_size to be power of two */
+ LASSERT(is_power_of_2(chunk_size));
- buf = kzalloc(LLOG_CHUNK_SIZE, GFP_NOFS);
+ buf = libcfs_kvzalloc(chunk_size, GFP_NOFS);
if (!buf) {
lpi->lpi_rc = -ENOMEM;
return 0;
@@ -233,41 +252,53 @@ static int llog_process_thread(void *arg)
if (cd && cd->lpcd_last_idx)
last_index = cd->lpcd_last_idx;
else
- last_index = LLOG_BITMAP_BYTES * 8 - 1;
-
- /* Record is not in this buffer. */
- if (index > last_index)
- goto out;
+ last_index = LLOG_HDR_BITMAP_SIZE(llh) - 1;
while (rc == 0) {
+ unsigned int buf_offset = 0;
struct llog_rec_hdr *rec;
+ bool partial_chunk;
+ off_t chunk_offset;
/* skip records not set in bitmap */
while (index <= last_index &&
- !ext2_test_bit(index, llh->llh_bitmap))
+ !ext2_test_bit(index, LLOG_HDR_BITMAP(llh)))
++index;
- LASSERT(index <= last_index + 1);
- if (index == last_index + 1)
+ if (index > last_index)
break;
-repeat:
+
CDEBUG(D_OTHER, "index: %d last_index %d\n",
index, last_index);
-
+repeat:
/* get the buf with our target record; avoid old garbage */
- memset(buf, 0, LLOG_CHUNK_SIZE);
- last_offset = cur_offset;
+ memset(buf, 0, chunk_size);
rc = llog_next_block(lpi->lpi_env, loghandle, &saved_index,
- index, &cur_offset, buf, LLOG_CHUNK_SIZE);
+ index, &cur_offset, buf, chunk_size);
if (rc)
goto out;
+ /*
+ * NB: after llog_next_block() call the cur_offset is the
+ * offset of the next block after read one.
+ * The absolute offset of the current chunk is calculated
+ * from cur_offset value and stored in chunk_offset variable.
+ */
+ tmp_offset = cur_offset;
+ if (do_div(tmp_offset, chunk_size)) {
+ partial_chunk = true;
+ chunk_offset = cur_offset & ~(chunk_size - 1);
+ } else {
+ partial_chunk = false;
+ chunk_offset = cur_offset - chunk_size;
+ }
+
/* NB: when rec->lrh_len is accessed it is already swabbed
* since it is used at the "end" of the loop and the rec
* swabbing is done at the beginning of the loop.
*/
- for (rec = (struct llog_rec_hdr *)buf;
- (char *)rec < buf + LLOG_CHUNK_SIZE;
+ for (rec = (struct llog_rec_hdr *)(buf + buf_offset);
+ (char *)rec < buf + chunk_size;
rec = llog_rec_hdr_next(rec)) {
CDEBUG(D_OTHER, "processing rec 0x%p type %#x\n",
rec, rec->lrh_type);
@@ -278,15 +309,29 @@ repeat:
CDEBUG(D_OTHER, "after swabbing, type=%#x idx=%d\n",
rec->lrh_type, rec->lrh_index);
- if (rec->lrh_index == 0) {
- /* probably another rec just got added? */
- rc = 0;
- if (index <= loghandle->lgh_last_idx)
- goto repeat;
- goto out; /* no more records */
+ /*
+ * for partial chunk the end of it is zeroed, check
+ * for index 0 to distinguish it.
+ */
+ if (partial_chunk && !rec->lrh_index) {
+ /* concurrent llog_add() might add new records
+ * while llog_processing, check this is not
+ * the case and re-read the current chunk
+ * otherwise.
+ */
+ if (index > loghandle->lgh_last_idx) {
+ rc = 0;
+ goto out;
+ }
+ CDEBUG(D_OTHER, "Re-read last llog buffer for new records, index %u, last %u\n",
+ index, loghandle->lgh_last_idx);
+ /* save offset inside buffer for the re-read */
+ buf_offset = (char *)rec - (char *)buf;
+ cur_offset = chunk_offset;
+ goto repeat;
}
- if (rec->lrh_len == 0 ||
- rec->lrh_len > LLOG_CHUNK_SIZE) {
+
+ if (!rec->lrh_len || rec->lrh_len > chunk_size) {
CWARN("invalid length %d in llog record for index %d/%d\n",
rec->lrh_len,
rec->lrh_index, index);
@@ -300,32 +345,38 @@ repeat:
continue;
}
+ if (rec->lrh_index != index) {
+ CERROR("%s: Invalid record: index %u but expected %u\n",
+ loghandle->lgh_ctxt->loc_obd->obd_name,
+ rec->lrh_index, index);
+ rc = -ERANGE;
+ goto out;
+ }
+
CDEBUG(D_OTHER,
"lrh_index: %d lrh_len: %d (%d remains)\n",
rec->lrh_index, rec->lrh_len,
- (int)(buf + LLOG_CHUNK_SIZE - (char *)rec));
+ (int)(buf + chunk_size - (char *)rec));
loghandle->lgh_cur_idx = rec->lrh_index;
loghandle->lgh_cur_offset = (char *)rec - (char *)buf +
- last_offset;
+ chunk_offset;
/* if set, process the callback on this record */
- if (ext2_test_bit(index, llh->llh_bitmap)) {
+ if (ext2_test_bit(index, LLOG_HDR_BITMAP(llh))) {
rc = lpi->lpi_cb(lpi->lpi_env, loghandle, rec,
lpi->lpi_cbdata);
last_called_index = index;
if (rc)
goto out;
- } else {
- CDEBUG(D_OTHER, "Skipped index %d\n", index);
}
- /* next record, still in buffer? */
- ++index;
- if (index > last_index) {
+ /* exit if the last index is reached */
+ if (index >= last_index) {
rc = 0;
goto out;
}
+ index++;
}
}
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
index a4277d684614..8574ad401f66 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
@@ -158,6 +158,7 @@ int llog_setup(const struct lu_env *env, struct obd_device *obd,
mutex_init(&ctxt->loc_mutex);
ctxt->loc_exp = class_export_get(disk_obd->obd_self_export);
ctxt->loc_flags = LLOG_CTXT_FLAG_UNINITIALIZED;
+ ctxt->loc_chunk_size = LLOG_MIN_CHUNK_SIZE;
rc = llog_group_set_ctxt(olg, ctxt, index);
if (rc) {
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
index 8c4c1b3f1b45..723c212c6747 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
@@ -38,6 +38,7 @@
#define DEBUG_SUBSYSTEM S_LOG
+#include "../include/llog_swab.h"
#include "../include/lustre_log.h"
static void print_llogd_body(struct llogd_body *d)
@@ -244,7 +245,7 @@ void lustre_swab_llog_rec(struct llog_rec_hdr *rec)
__swab32s(&llh->llh_flags);
__swab32s(&llh->llh_size);
__swab32s(&llh->llh_cat_idx);
- tail = &llh->llh_tail;
+ tail = LLOG_HDR_TAIL(llh);
break;
}
case LLOG_LOGID_MAGIC:
@@ -290,8 +291,10 @@ static void print_llog_hdr(struct llog_log_hdr *h)
CDEBUG(D_OTHER, "\tllh_flags: %#x\n", h->llh_flags);
CDEBUG(D_OTHER, "\tllh_size: %#x\n", h->llh_size);
CDEBUG(D_OTHER, "\tllh_cat_idx: %#x\n", h->llh_cat_idx);
- CDEBUG(D_OTHER, "\tllh_tail.lrt_index: %#x\n", h->llh_tail.lrt_index);
- CDEBUG(D_OTHER, "\tllh_tail.lrt_len: %#x\n", h->llh_tail.lrt_len);
+ CDEBUG(D_OTHER, "\tllh_tail.lrt_index: %#x\n",
+ LLOG_HDR_TAIL(h)->lrt_index);
+ CDEBUG(D_OTHER, "\tllh_tail.lrt_len: %#x\n",
+ LLOG_HDR_TAIL(h)->lrt_len);
}
void lustre_swab_llog_hdr(struct llog_log_hdr *h)
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 852a5acfefab..2c99717b0aba 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -100,9 +100,13 @@ static const char * const obd_connect_names[] = {
"lfsck",
"unknown",
"unlink_close",
- "unknown",
+ "multi_mod_rpcs",
"dir_stripe",
- "unknown",
+ "subtree",
+ "lock_ahead",
+ "bulk_mbits",
+ "compact_obdo",
+ "second_flags",
NULL
};
@@ -127,7 +131,7 @@ EXPORT_SYMBOL(obd_connect_flags2str);
static void obd_connect_data_seqprint(struct seq_file *m,
struct obd_connect_data *ocd)
{
- int flags;
+ u64 flags;
LASSERT(ocd);
flags = ocd->ocd_connect_flags;
@@ -172,6 +176,9 @@ static void obd_connect_data_seqprint(struct seq_file *m,
if (flags & OBD_CONNECT_MAXBYTES)
seq_printf(m, " max_object_bytes: %llx\n",
ocd->ocd_maxbytes);
+ if (flags & OBD_CONNECT_MULTIMODRPCS)
+ seq_printf(m, " max_mod_rpcs: %hu\n",
+ ocd->ocd_maxmodrpcs);
}
int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
@@ -396,10 +403,17 @@ int lprocfs_wr_uint(struct file *file, const char __user *buffer,
char dummy[MAX_STRING_SIZE + 1], *end;
unsigned long tmp;
- dummy[MAX_STRING_SIZE] = '\0';
- if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
+ if (count >= sizeof(dummy))
+ return -EINVAL;
+
+ if (count == 0)
+ return 0;
+
+ if (copy_from_user(dummy, buffer, count))
return -EFAULT;
+ dummy[count] = '\0';
+
tmp = simple_strtoul(dummy, &end, 0);
if (dummy == end)
return -EINVAL;
@@ -1275,7 +1289,8 @@ int ldebugfs_register_stats(struct dentry *parent, const char *name,
EXPORT_SYMBOL_GPL(ldebugfs_register_stats);
void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
- unsigned conf, const char *name, const char *units)
+ unsigned int conf, const char *name,
+ const char *units)
{
struct lprocfs_counter_header *header;
struct lprocfs_counter *percpu_cntr;
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 054e567e6c8d..7971562a3efd 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -68,6 +68,7 @@ enum {
#define LU_SITE_BITS_MIN 12
#define LU_SITE_BITS_MAX 24
+#define LU_SITE_BITS_MAX_CL 19
/**
* total 256 buckets, we don't want too many buckets because:
* - consume too much memory
@@ -338,7 +339,7 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
struct cfs_hash_bd bd2;
struct list_head dispose;
int did_sth;
- unsigned int start;
+ unsigned int start = 0;
int count;
int bnr;
unsigned int i;
@@ -351,7 +352,8 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
* Under LRU list lock, scan LRU list and move unreferenced objects to
* the dispose list, removing them from LRU and hash table.
*/
- start = s->ls_purge_start;
+ if (nr != ~0)
+ start = s->ls_purge_start;
bnr = (nr == ~0) ? -1 : nr / (int)CFS_HASH_NBKT(s->ls_obj_hash) + 1;
again:
/*
@@ -877,6 +879,9 @@ static unsigned long lu_htable_order(struct lu_device *top)
unsigned long cache_size;
unsigned long bits;
+ if (!strcmp(top->ld_type->ldt_name, LUSTRE_VVP_NAME))
+ bits_max = LU_SITE_BITS_MAX_CL;
+
/*
* Calculate hash table size, assuming that we want reasonable
* performance when 20% of total memory is occupied by cache of
@@ -909,8 +914,8 @@ static unsigned long lu_htable_order(struct lu_device *top)
return clamp_t(typeof(bits), bits, LU_SITE_BITS_MIN, bits_max);
}
-static unsigned lu_obj_hop_hash(struct cfs_hash *hs,
- const void *key, unsigned mask)
+static unsigned int lu_obj_hop_hash(struct cfs_hash *hs,
+ const void *key, unsigned int mask)
{
struct lu_fid *fid = (struct lu_fid *)key;
__u32 hash;
@@ -1311,6 +1316,7 @@ enum {
static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
static DEFINE_SPINLOCK(lu_keys_guard);
+static atomic_t lu_key_initing_cnt = ATOMIC_INIT(0);
/**
* Global counter incremented whenever key is registered, unregistered,
@@ -1318,7 +1324,7 @@ static DEFINE_SPINLOCK(lu_keys_guard);
* lu_context_refill(). No locking is provided, as initialization and shutdown
* are supposed to be externally serialized.
*/
-static unsigned key_set_version;
+static unsigned int key_set_version;
/**
* Register new key.
@@ -1385,6 +1391,19 @@ void lu_context_key_degister(struct lu_context_key *key)
++key_set_version;
spin_lock(&lu_keys_guard);
key_fini(&lu_shrink_env.le_ctx, key->lct_index);
+
+ /**
+ * Wait until all transient contexts referencing this key have
+ * run lu_context_key::lct_fini() method.
+ */
+ while (atomic_read(&key->lct_used) > 1) {
+ spin_unlock(&lu_keys_guard);
+ CDEBUG(D_INFO, "lu_context_key_degister: \"%s\" %p, %d\n",
+ key->lct_owner ? key->lct_owner->name : "", key,
+ atomic_read(&key->lct_used));
+ schedule();
+ spin_lock(&lu_keys_guard);
+ }
if (lu_keys[key->lct_index]) {
lu_keys[key->lct_index] = NULL;
lu_ref_fini(&key->lct_reference);
@@ -1507,14 +1526,25 @@ void lu_context_key_quiesce(struct lu_context_key *key)
if (!(key->lct_tags & LCT_QUIESCENT)) {
/*
- * XXX layering violation.
- */
- cl_env_cache_purge(~0);
- key->lct_tags |= LCT_QUIESCENT;
- /*
* XXX memory barrier has to go here.
*/
spin_lock(&lu_keys_guard);
+ key->lct_tags |= LCT_QUIESCENT;
+
+ /**
+ * Wait until all lu_context_key::lct_init() methods
+ * have completed.
+ */
+ while (atomic_read(&lu_key_initing_cnt) > 0) {
+ spin_unlock(&lu_keys_guard);
+ CDEBUG(D_INFO, "lu_context_key_quiesce: \"%s\" %p, %d (%d)\n",
+ key->lct_owner ? key->lct_owner->name : "",
+ key, atomic_read(&key->lct_used),
+ atomic_read(&lu_key_initing_cnt));
+ schedule();
+ spin_lock(&lu_keys_guard);
+ }
+
list_for_each_entry(ctx, &lu_context_remembered, lc_remember)
key_fini(ctx, key->lct_index);
spin_unlock(&lu_keys_guard);
@@ -1546,6 +1576,19 @@ static int keys_fill(struct lu_context *ctx)
{
unsigned int i;
+ /*
+ * A serialisation with lu_context_key_quiesce() is needed, but some
+ * "key->lct_init()" are calling kernel memory allocation routine and
+ * can't be called while holding a spin_lock.
+ * "lu_keys_guard" is held while incrementing "lu_key_initing_cnt"
+ * to ensure the start of the serialisation.
+ * An atomic_t variable is still used, in order not to reacquire the
+ * lock when decrementing the counter.
+ */
+ spin_lock(&lu_keys_guard);
+ atomic_inc(&lu_key_initing_cnt);
+ spin_unlock(&lu_keys_guard);
+
LINVRNT(ctx->lc_value);
for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
struct lu_context_key *key;
@@ -1563,12 +1606,19 @@ static int keys_fill(struct lu_context *ctx)
LINVRNT(key->lct_init);
LINVRNT(key->lct_index == i);
+ LASSERT(key->lct_owner);
+ if (!(ctx->lc_tags & LCT_NOREF) &&
+ !try_module_get(key->lct_owner)) {
+ /* module is unloading, skip this key */
+ continue;
+ }
+
value = key->lct_init(ctx, key);
- if (IS_ERR(value))
+ if (unlikely(IS_ERR(value))) {
+ atomic_dec(&lu_key_initing_cnt);
return PTR_ERR(value);
+ }
- if (!(ctx->lc_tags & LCT_NOREF))
- try_module_get(key->lct_owner);
lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
atomic_inc(&key->lct_used);
/*
@@ -1582,6 +1632,7 @@ static int keys_fill(struct lu_context *ctx)
}
ctx->lc_version = key_set_version;
}
+ atomic_dec(&lu_key_initing_cnt);
return 0;
}
@@ -1663,6 +1714,9 @@ void lu_context_exit(struct lu_context *ctx)
ctx->lc_state = LCS_LEFT;
if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value) {
for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
+ /* could race with key quiescency */
+ if (ctx->lc_tags & LCT_REMEMBER)
+ spin_lock(&lu_keys_guard);
if (ctx->lc_value[i]) {
struct lu_context_key *key;
@@ -1671,6 +1725,8 @@ void lu_context_exit(struct lu_context *ctx)
key->lct_exit(ctx,
key, ctx->lc_value[i]);
}
+ if (ctx->lc_tags & LCT_REMEMBER)
+ spin_unlock(&lu_keys_guard);
}
}
}
@@ -1930,7 +1986,7 @@ int lu_site_stats_print(const struct lu_site *s, struct seq_file *m)
memset(&stats, 0, sizeof(stats));
lu_site_stats_get(s->ls_obj_hash, &stats, 1);
- seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d %d\n",
+ seq_printf(m, "%d/%d %d/%ld %d %d %d %d %d %d %d %d\n",
stats.lss_busy,
stats.lss_total,
stats.lss_populated,
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index bbed1b72d52e..9ca84c7d49de 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -35,12 +35,15 @@
*/
#define DEBUG_SUBSYSTEM S_CLASS
-#include "../include/obd_class.h"
+
#include <linux/string.h>
+
#include "../include/lustre/lustre_ioctl.h"
-#include "../include/lustre_log.h"
+#include "../include/llog_swab.h"
#include "../include/lprocfs_status.h"
+#include "../include/lustre_log.h"
#include "../include/lustre_param.h"
+#include "../include/obd_class.h"
#include "llog_internal.h"
@@ -446,7 +449,7 @@ static int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg)
LASSERT(obd->obd_self_export);
/* Precleanup, we must make sure all exports get destroyed. */
- err = obd_precleanup(obd, OBD_CLEANUP_EXPORTS);
+ err = obd_precleanup(obd);
if (err)
CERROR("Precleanup %s returned %d\n",
obd->obd_name, err);
@@ -585,16 +588,21 @@ static int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
}
static LIST_HEAD(lustre_profile_list);
+static DEFINE_SPINLOCK(lustre_profile_list_lock);
struct lustre_profile *class_get_profile(const char *prof)
{
struct lustre_profile *lprof;
+ spin_lock(&lustre_profile_list_lock);
list_for_each_entry(lprof, &lustre_profile_list, lp_list) {
if (!strcmp(lprof->lp_profile, prof)) {
+ lprof->lp_refs++;
+ spin_unlock(&lustre_profile_list_lock);
return lprof;
}
}
+ spin_unlock(&lustre_profile_list_lock);
return NULL;
}
EXPORT_SYMBOL(class_get_profile);
@@ -639,7 +647,11 @@ static int class_add_profile(int proflen, char *prof, int osclen, char *osc,
}
}
+ spin_lock(&lustre_profile_list_lock);
+ lprof->lp_refs = 1;
+ lprof->lp_list_deleted = false;
list_add(&lprof->lp_list, &lustre_profile_list);
+ spin_unlock(&lustre_profile_list_lock);
return err;
free_lp_dt:
@@ -659,27 +671,59 @@ void class_del_profile(const char *prof)
lprof = class_get_profile(prof);
if (lprof) {
+ spin_lock(&lustre_profile_list_lock);
+ /* because get profile increments the ref counter */
+ lprof->lp_refs--;
list_del(&lprof->lp_list);
- kfree(lprof->lp_profile);
- kfree(lprof->lp_dt);
- kfree(lprof->lp_md);
- kfree(lprof);
+ lprof->lp_list_deleted = true;
+ spin_unlock(&lustre_profile_list_lock);
+
+ class_put_profile(lprof);
}
}
EXPORT_SYMBOL(class_del_profile);
+void class_put_profile(struct lustre_profile *lprof)
+{
+ spin_lock(&lustre_profile_list_lock);
+ if (--lprof->lp_refs > 0) {
+ LASSERT(lprof->lp_refs > 0);
+ spin_unlock(&lustre_profile_list_lock);
+ return;
+ }
+ spin_unlock(&lustre_profile_list_lock);
+
+ /* confirm not a negative number */
+ LASSERT(!lprof->lp_refs);
+
+ /*
+ * At least one class_del_profile/profiles must be called
+ * on the target profile or lustre_profile_list will corrupt
+ */
+ LASSERT(lprof->lp_list_deleted);
+ kfree(lprof->lp_profile);
+ kfree(lprof->lp_dt);
+ kfree(lprof->lp_md);
+ kfree(lprof);
+}
+EXPORT_SYMBOL(class_put_profile);
+
/* COMPAT_146 */
void class_del_profiles(void)
{
struct lustre_profile *lprof, *n;
+ spin_lock(&lustre_profile_list_lock);
list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) {
list_del(&lprof->lp_list);
- kfree(lprof->lp_profile);
- kfree(lprof->lp_dt);
- kfree(lprof->lp_md);
- kfree(lprof);
+ lprof->lp_list_deleted = true;
+ spin_unlock(&lustre_profile_list_lock);
+
+ class_put_profile(lprof);
+
+ spin_lock(&lustre_profile_list_lock);
}
+ spin_unlock(&lustre_profile_list_lock);
}
EXPORT_SYMBOL(class_del_profiles);
@@ -1406,8 +1450,8 @@ EXPORT_SYMBOL(class_manual_cleanup);
* uuid<->export lustre hash operations
*/
-static unsigned
-uuid_hash(struct cfs_hash *hs, const void *key, unsigned mask)
+static unsigned int
+uuid_hash(struct cfs_hash *hs, const void *key, unsigned int mask)
{
return cfs_hash_djb2_hash(((struct obd_uuid *)key)->uuid,
sizeof(((struct obd_uuid *)key)->uuid), mask);
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index 0d3a3b05a637..2283e920d839 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -261,7 +261,7 @@ int lustre_start_mgc(struct super_block *sb)
rc = obd_get_info(NULL, obd->obd_self_export,
strlen(KEY_CONN_DATA), KEY_CONN_DATA,
- &vallen, data, NULL);
+ &vallen, data);
LASSERT(rc == 0);
has_ir = OCD_HAS_FLAG(data, IMP_RECOV);
if (has_ir ^ !(*flags & LMD_FLG_NOIR)) {
@@ -382,7 +382,7 @@ int lustre_start_mgc(struct super_block *sb)
/* We connect to the MGS at setup, and don't disconnect until cleanup */
data->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_AT |
OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV |
- OBD_CONNECT_LVB_TYPE;
+ OBD_CONNECT_LVB_TYPE | OBD_CONNECT_BULK_MBITS;
#if OBD_OCD_VERSION(3, 0, 53, 0) > LUSTRE_VERSION_CODE
data->ocd_connect_flags |= OBD_CONNECT_MNE_SWAB;
@@ -1216,8 +1216,7 @@ static struct file_system_type lustre_fs_type = {
.name = "lustre",
.mount = lustre_mount,
.kill_sb = lustre_kill_super,
- .fs_flags = FS_BINARY_MOUNTDATA | FS_REQUIRES_DEV |
- FS_RENAME_DOES_D_MOVE,
+ .fs_flags = FS_REQUIRES_DEV | FS_RENAME_DOES_D_MOVE,
};
MODULE_ALIAS_FS("lustre");
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
index 79104a66da96..c52b9e07d7dd 100644
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -124,68 +124,3 @@ void obdo_to_ioobj(const struct obdo *oa, struct obd_ioobj *ioobj)
ioobj->ioo_max_brw = 0;
}
EXPORT_SYMBOL(obdo_to_ioobj);
-
-static void iattr_from_obdo(struct iattr *attr, const struct obdo *oa,
- u32 valid)
-{
- valid &= oa->o_valid;
-
- if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME))
- CDEBUG(D_INODE, "valid %#llx, new time %llu/%llu\n",
- oa->o_valid, oa->o_mtime, oa->o_ctime);
-
- attr->ia_valid = 0;
- if (valid & OBD_MD_FLATIME) {
- LTIME_S(attr->ia_atime) = oa->o_atime;
- attr->ia_valid |= ATTR_ATIME;
- }
- if (valid & OBD_MD_FLMTIME) {
- LTIME_S(attr->ia_mtime) = oa->o_mtime;
- attr->ia_valid |= ATTR_MTIME;
- }
- if (valid & OBD_MD_FLCTIME) {
- LTIME_S(attr->ia_ctime) = oa->o_ctime;
- attr->ia_valid |= ATTR_CTIME;
- }
- if (valid & OBD_MD_FLSIZE) {
- attr->ia_size = oa->o_size;
- attr->ia_valid |= ATTR_SIZE;
- }
-#if 0 /* you shouldn't be able to change a file's type with setattr */
- if (valid & OBD_MD_FLTYPE) {
- attr->ia_mode = (attr->ia_mode & ~S_IFMT) |
- (oa->o_mode & S_IFMT);
- attr->ia_valid |= ATTR_MODE;
- }
-#endif
- if (valid & OBD_MD_FLMODE) {
- attr->ia_mode = (attr->ia_mode & S_IFMT) |
- (oa->o_mode & ~S_IFMT);
- attr->ia_valid |= ATTR_MODE;
- if (!in_group_p(make_kgid(&init_user_ns, oa->o_gid)) &&
- !capable(CFS_CAP_FSETID))
- attr->ia_mode &= ~S_ISGID;
- }
- if (valid & OBD_MD_FLUID) {
- attr->ia_uid = make_kuid(&init_user_ns, oa->o_uid);
- attr->ia_valid |= ATTR_UID;
- }
- if (valid & OBD_MD_FLGID) {
- attr->ia_gid = make_kgid(&init_user_ns, oa->o_gid);
- attr->ia_valid |= ATTR_GID;
- }
-}
-
-void md_from_obdo(struct md_op_data *op_data, const struct obdo *oa, u32 valid)
-{
- iattr_from_obdo(&op_data->op_attr, oa, valid);
- if (valid & OBD_MD_FLBLOCKS) {
- op_data->op_attr_blocks = oa->o_blocks;
- op_data->op_attr.ia_valid |= ATTR_BLOCKS;
- }
- if (valid & OBD_MD_FLFLAGS) {
- op_data->op_attr_flags = oa->o_flags;
- op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
- }
-}
-EXPORT_SYMBOL(md_from_obdo);
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 505582ff4d1e..549076193bde 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -55,7 +55,7 @@ struct echo_device {
struct echo_client_obd *ed_ec;
struct cl_site ed_site_myself;
- struct cl_site *ed_site;
+ struct lu_site *ed_site;
struct lu_device *ed_next;
};
@@ -505,9 +505,6 @@ static const struct lu_device_operations echo_device_lu_ops = {
/** @} echo_lu_dev_ops */
-static const struct cl_device_operations echo_device_cl_ops = {
-};
-
/** \defgroup echo_init Setup and teardown
*
* Init and fini functions for echo client.
@@ -527,17 +524,19 @@ static int echo_site_init(const struct lu_env *env, struct echo_device *ed)
}
rc = lu_site_init_finish(&site->cs_lu);
- if (rc)
+ if (rc) {
+ cl_site_fini(site);
return rc;
+ }
- ed->ed_site = site;
+ ed->ed_site = &site->cs_lu;
return 0;
}
static void echo_site_fini(const struct lu_env *env, struct echo_device *ed)
{
if (ed->ed_site) {
- cl_site_fini(ed->ed_site);
+ lu_site_fini(ed->ed_site);
ed->ed_site = NULL;
}
}
@@ -561,16 +560,10 @@ static void echo_thread_key_fini(const struct lu_context *ctx,
kmem_cache_free(echo_thread_kmem, info);
}
-static void echo_thread_key_exit(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
-}
-
static struct lu_context_key echo_thread_key = {
.lct_tags = LCT_CL_THREAD,
.lct_init = echo_thread_key_init,
.lct_fini = echo_thread_key_fini,
- .lct_exit = echo_thread_key_exit
};
static void *echo_session_key_init(const struct lu_context *ctx,
@@ -592,16 +585,10 @@ static void echo_session_key_fini(const struct lu_context *ctx,
kmem_cache_free(echo_session_kmem, session);
}
-static void echo_session_key_exit(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
-}
-
static struct lu_context_key echo_session_key = {
.lct_tags = LCT_SESSION,
.lct_init = echo_session_key_init,
.lct_fini = echo_session_key_fini,
- .lct_exit = echo_session_key_exit
};
LU_TYPE_INIT_FINI(echo, &echo_thread_key, &echo_session_key);
@@ -630,7 +617,6 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
goto out_free;
cd->cd_lu_dev.ld_ops = &echo_device_lu_ops;
- cd->cd_ops = &echo_device_cl_ops;
obd = class_name2obd(lustre_cfg_string(cfg, 0));
LASSERT(obd);
@@ -674,7 +660,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
goto out_cleanup;
}
- next->ld_site = &ed->ed_site->cs_lu;
+ next->ld_site = ed->ed_site;
rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
next->ld_type->ldt_name,
NULL);
@@ -741,7 +727,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n",
ed, next);
- lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+ lu_site_purge(env, ed->ed_site, -1);
/* check if there are objects still alive.
* It shouldn't have any object because lu_site_purge would cleanup
@@ -754,7 +740,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
spin_unlock(&ec->ec_lock);
/* purge again */
- lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+ lu_site_purge(env, ed->ed_site, -1);
CDEBUG(D_INFO,
"Waiting for the reference of echo object to be dropped\n");
@@ -766,7 +752,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
CERROR("echo_client still has objects at cleanup time, wait for 1 second\n");
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
- lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+ lu_site_purge(env, ed->ed_site, -1);
spin_lock(&ec->ec_lock);
}
spin_unlock(&ec->ec_lock);
@@ -780,11 +766,13 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
while (next)
next = next->ld_type->ldt_ops->ldto_device_free(env, next);
- LASSERT(ed->ed_site == lu2cl_site(d->ld_site));
+ LASSERT(ed->ed_site == d->ld_site);
echo_site_fini(env, ed);
cl_device_fini(&ed->ed_cl);
kfree(ed);
+ cl_env_cache_purge(~0);
+
return NULL;
}
@@ -1100,7 +1088,7 @@ out:
static u64 last_object_id;
static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
- struct obdo *oa, struct obd_trans_info *oti)
+ struct obdo *oa)
{
struct echo_object *eco;
struct echo_client_obd *ec = ed->ed_ec;
@@ -1117,7 +1105,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
if (!ostid_id(&oa->o_oi))
ostid_set_id(&oa->o_oi, ++last_object_id);
- rc = obd_create(env, ec->ec_exp, oa, oti);
+ rc = obd_create(env, ec->ec_exp, oa);
if (rc != 0) {
CERROR("Cannot create objects: rc = %d\n", rc);
goto failed;
@@ -1137,7 +1125,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
failed:
if (created && rc)
- obd_destroy(env, ec->ec_exp, oa, oti);
+ obd_destroy(env, ec->ec_exp, oa);
if (rc)
CERROR("create object failed with: rc = %d\n", rc);
return rc;
@@ -1237,8 +1225,7 @@ static int echo_client_page_debug_check(struct page *page, u64 id,
static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
struct echo_object *eco, u64 offset,
- u64 count, int async,
- struct obd_trans_info *oti)
+ u64 count, int async)
{
u32 npages;
struct brw_page *pga;
@@ -1332,12 +1319,11 @@ static int echo_client_prep_commit(const struct lu_env *env,
struct obd_export *exp, int rw,
struct obdo *oa, struct echo_object *eco,
u64 offset, u64 count,
- u64 batch, struct obd_trans_info *oti,
- int async)
+ u64 batch, int async)
{
struct obd_ioobj ioo;
struct niobuf_local *lnb;
- struct niobuf_remote *rnb;
+ struct niobuf_remote rnb;
u64 off;
u64 npages, tot_pages;
int i, ret = 0, brw_flags = 0;
@@ -1349,9 +1335,7 @@ static int echo_client_prep_commit(const struct lu_env *env,
tot_pages = count >> PAGE_SHIFT;
lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS);
- rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS);
-
- if (!lnb || !rnb) {
+ if (!lnb) {
ret = -ENOMEM;
goto out;
}
@@ -1363,26 +1347,22 @@ static int echo_client_prep_commit(const struct lu_env *env,
off = offset;
- for (; tot_pages; tot_pages -= npages) {
+ for (; tot_pages > 0; tot_pages -= npages) {
int lpages;
if (tot_pages < npages)
npages = tot_pages;
- for (i = 0; i < npages; i++, off += PAGE_SIZE) {
- rnb[i].rnb_offset = off;
- rnb[i].rnb_len = PAGE_SIZE;
- rnb[i].rnb_flags = brw_flags;
- }
-
- ioo.ioo_bufcnt = npages;
+ rnb.rnb_offset = off;
+ rnb.rnb_len = npages * PAGE_SIZE;
+ rnb.rnb_flags = brw_flags;
+ ioo.ioo_bufcnt = 1;
+ off += npages * PAGE_SIZE;
lpages = npages;
- ret = obd_preprw(env, rw, exp, oa, 1, &ioo, rnb, &lpages,
- lnb, oti);
+ ret = obd_preprw(env, rw, exp, oa, 1, &ioo, &rnb, &lpages, lnb);
if (ret != 0)
goto out;
- LASSERT(lpages == npages);
for (i = 0; i < lpages; i++) {
struct page *page = lnb[i].lnb_page;
@@ -1401,24 +1381,21 @@ static int echo_client_prep_commit(const struct lu_env *env,
if (rw == OBD_BRW_WRITE)
echo_client_page_debug_setup(page, rw,
- ostid_id(&oa->o_oi),
- rnb[i].rnb_offset,
- rnb[i].rnb_len);
+ ostid_id(&oa->o_oi),
+ lnb[i].lnb_file_offset,
+ lnb[i].lnb_len);
else
echo_client_page_debug_check(page,
- ostid_id(&oa->o_oi),
- rnb[i].rnb_offset,
- rnb[i].rnb_len);
+ ostid_id(&oa->o_oi),
+ lnb[i].lnb_file_offset,
+ lnb[i].lnb_len);
}
- ret = obd_commitrw(env, rw, exp, oa, 1, &ioo,
- rnb, npages, lnb, oti, ret);
+ ret = obd_commitrw(env, rw, exp, oa, 1, &ioo, &rnb, npages, lnb,
+ ret);
if (ret != 0)
goto out;
- /* Reset oti otherwise it would confuse ldiskfs. */
- memset(oti, 0, sizeof(*oti));
-
/* Reuse env context. */
lu_context_exit((struct lu_context *)&env->le_ctx);
lu_context_enter((struct lu_context *)&env->le_ctx);
@@ -1426,14 +1403,12 @@ static int echo_client_prep_commit(const struct lu_env *env,
out:
kfree(lnb);
- kfree(rnb);
return ret;
}
static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
struct obd_export *exp,
- struct obd_ioctl_data *data,
- struct obd_trans_info *dummy_oti)
+ struct obd_ioctl_data *data)
{
struct obd_device *obd = class_exp2obd(exp);
struct echo_device *ed = obd2echo_dev(obd);
@@ -1470,15 +1445,13 @@ static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
case 1:
/* fall through */
case 2:
- rc = echo_client_kbrw(ed, rw, oa,
- eco, data->ioc_offset,
- data->ioc_count, async, dummy_oti);
+ rc = echo_client_kbrw(ed, rw, oa, eco, data->ioc_offset,
+ data->ioc_count, async);
break;
case 3:
- rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa,
- eco, data->ioc_offset,
- data->ioc_count, data->ioc_plen1,
- dummy_oti, async);
+ rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa, eco,
+ data->ioc_offset, data->ioc_count,
+ data->ioc_plen1, async);
break;
default:
rc = -EINVAL;
@@ -1496,16 +1469,11 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
struct echo_client_obd *ec = ed->ed_ec;
struct echo_object *eco;
struct obd_ioctl_data *data = karg;
- struct obd_trans_info dummy_oti;
struct lu_env *env;
- struct oti_req_ack_lock *ack_lock;
struct obdo *oa;
struct lu_fid fid;
int rw = OBD_BRW_READ;
int rc = 0;
- int i;
-
- memset(&dummy_oti, 0, sizeof(dummy_oti));
oa = &data->ioc_obdo1;
if (!(oa->o_valid & OBD_MD_FLGROUP)) {
@@ -1535,7 +1503,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
goto out;
}
- rc = echo_create_object(env, ed, oa, &dummy_oti);
+ rc = echo_create_object(env, ed, oa);
goto out;
case OBD_IOC_DESTROY:
@@ -1546,7 +1514,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
rc = echo_get_object(&eco, ed, oa);
if (rc == 0) {
- rc = obd_destroy(env, ec->ec_exp, oa, &dummy_oti);
+ rc = obd_destroy(env, ec->ec_exp, oa);
if (rc == 0)
eco->eo_deleted = 1;
echo_put_object(eco);
@@ -1556,11 +1524,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
case OBD_IOC_GETATTR:
rc = echo_get_object(&eco, ed, oa);
if (rc == 0) {
- struct obd_info oinfo = {
- .oi_oa = oa,
- };
-
- rc = obd_getattr(env, ec->ec_exp, &oinfo);
+ rc = obd_getattr(env, ec->ec_exp, oa);
echo_put_object(eco);
}
goto out;
@@ -1573,11 +1537,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
rc = echo_get_object(&eco, ed, oa);
if (rc == 0) {
- struct obd_info oinfo = {
- .oi_oa = oa,
- };
-
- rc = obd_setattr(env, ec->ec_exp, &oinfo, NULL);
+ rc = obd_setattr(env, ec->ec_exp, oa);
echo_put_object(eco);
}
goto out;
@@ -1591,7 +1551,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
rw = OBD_BRW_WRITE;
/* fall through */
case OBD_IOC_BRW_READ:
- rc = echo_client_brw_ioctl(env, rw, exp, data, &dummy_oti);
+ rc = echo_client_brw_ioctl(env, rw, exp, data);
goto out;
default:
@@ -1604,14 +1564,6 @@ out:
lu_env_fini(env);
kfree(env);
- /* XXX this should be in a helper also called by target_send_reply */
- for (ack_lock = dummy_oti.oti_ack_locks, i = 0; i < 4;
- i++, ack_lock++) {
- if (!ack_lock->mode)
- break;
- ldlm_lock_decref(&ack_lock->lock, ack_lock->mode);
- }
-
return rc;
}
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index f0062d44ee03..575b2969ad83 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -162,7 +162,7 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
if (pages_number <= 0 ||
- pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
+ pages_number >= OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
pages_number > totalram_pages / 4) /* 1/4 of RAM */
return -ERANGE;
@@ -183,10 +183,12 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
seq_printf(m,
"used_mb: %ld\n"
- "busy_cnt: %ld\n",
+ "busy_cnt: %ld\n"
+ "reclaim: %llu\n",
(atomic_long_read(&cli->cl_lru_in_list) +
atomic_long_read(&cli->cl_lru_busy)) >> shift,
- atomic_long_read(&cli->cl_lru_busy));
+ atomic_long_read(&cli->cl_lru_busy),
+ cli->cl_lru_reclaim);
return 0;
}
@@ -585,7 +587,8 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
/* max_pages_per_rpc must be chunk aligned */
val = (val + ~chunk_mask) & chunk_mask;
- if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) {
+ if (!val || (ocd->ocd_brw_size &&
+ val > ocd->ocd_brw_size >> PAGE_SHIFT)) {
return -ERANGE;
}
spin_lock(&cli->cl_loi_list_lock);
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 4bbe219add98..b0f030c6c9c9 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -360,6 +360,7 @@ static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
RB_CLEAR_NODE(&ext->oe_node);
ext->oe_obj = obj;
+ cl_object_get(osc2cl(obj));
atomic_set(&ext->oe_refc, 1);
atomic_set(&ext->oe_users, 0);
INIT_LIST_HEAD(&ext->oe_link);
@@ -398,6 +399,7 @@ static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext)
LDLM_LOCK_PUT(ext->oe_dlmlock);
ext->oe_dlmlock = NULL;
}
+ cl_object_put(env, osc2cl(ext->oe_obj));
osc_extent_free(ext);
}
}
@@ -959,7 +961,7 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
if (rc == -ETIMEDOUT) {
OSC_EXTENT_DUMP(D_ERROR, ext,
"%s: wait ext to %u timedout, recovery in progress?\n",
- osc_export(obj)->exp_obd->obd_name, state);
+ cli_name(osc_cli(obj)), state);
lwi = LWI_INTR(NULL, NULL);
rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
@@ -977,7 +979,6 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
bool partial)
{
- struct cl_env_nest nest;
struct lu_env *env;
struct cl_io *io;
struct osc_object *obj = ext->oe_obj;
@@ -990,6 +991,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
int grants = 0;
int nr_pages = 0;
int rc = 0;
+ int refcheck;
LASSERT(sanity_check(ext) == 0);
EASSERT(ext->oe_state == OES_TRUNC, ext);
@@ -999,7 +1001,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
* We can't use that env from osc_cache_truncate_start() because
* it's from lov_io_sub and not fully initialized.
*/
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
io = &osc_env_info(env)->oti_io;
io->ci_obj = cl_object_top(osc2cl(obj));
rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
@@ -1085,7 +1087,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
out:
cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
return rc;
}
@@ -1327,7 +1329,6 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
{
struct osc_page *opg = oap2osc_page(oap);
struct cl_page *page = oap2cl_page(oap);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
enum cl_req_type crt;
int srvlock;
@@ -1338,25 +1339,10 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
"cp_state:%u, cmd:%d\n", page->cp_state, cmd);
LASSERT(opg->ops_transfer_pinned);
- /*
- * page->cp_req can be NULL if io submission failed before
- * cl_req was allocated.
- */
- if (page->cp_req)
- cl_req_page_done(env, page);
- LASSERT(!page->cp_req);
-
crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
/* Clear opg->ops_transfer_pinned before VM lock is released. */
opg->ops_transfer_pinned = 0;
- spin_lock(&obj->oo_seatbelt);
- LASSERT(opg->ops_submitter);
- LASSERT(!list_empty(&opg->ops_inflight));
- list_del_init(&opg->ops_inflight);
- opg->ops_submitter = NULL;
- spin_unlock(&obj->oo_seatbelt);
-
opg->ops_submit_time = 0;
srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK;
@@ -1380,16 +1366,17 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
lu_ref_del(&page->cp_reference, "transfer", page);
cl_page_completion(env, page, crt, rc);
+ cl_page_put(env, page);
return 0;
}
#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \
struct client_obd *__tmp = (cli); \
- CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %ld/%lu " \
+ CDEBUG(lvl, "%s: grant { dirty: %lu/%lu dirty_pages: %ld/%lu " \
"dropped: %ld avail: %ld, reserved: %ld, flight: %d }" \
"lru {in list: %ld, left: %ld, waiters: %d }" fmt "\n", \
- __tmp->cl_import->imp_obd->obd_name, \
+ cli_name(__tmp), \
__tmp->cl_dirty_pages, __tmp->cl_dirty_max_pages, \
atomic_long_read(&obd_dirty_pages), obd_max_dirty_pages, \
__tmp->cl_lost_grant, __tmp->cl_avail_grant, \
@@ -1627,7 +1614,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
osc_io_unplug_async(env, cli, NULL);
CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
- cli->cl_import->imp_obd->obd_name, &ocw, oap);
+ cli_name(cli), &ocw, oap);
rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
@@ -1671,7 +1658,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
break;
default:
CDEBUG(D_CACHE, "%s: event for cache space @ %p never arrived due to %d, fall back to sync i/o\n",
- cli->cl_import->imp_obd->obd_name, &ocw, rc);
+ cli_name(cli), &ocw, rc);
break;
}
out:
@@ -1931,7 +1918,8 @@ static int try_to_add_extent_for_io(struct client_obd *cli,
}
if (tmp->oe_srvlock != ext->oe_srvlock ||
- !tmp->oe_grants != !ext->oe_grants)
+ !tmp->oe_grants != !ext->oe_grants ||
+ tmp->oe_no_merge || ext->oe_no_merge)
return 0;
/* remove break for strict check */
@@ -2250,14 +2238,9 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
return 0;
if (!async) {
- /* disable osc_lru_shrink() temporarily to avoid
- * potential stack overrun problem. LU-2859
- */
- atomic_inc(&cli->cl_lru_shrinkers);
spin_lock(&cli->cl_loi_list_lock);
osc_check_rpcs(env, cli);
spin_unlock(&cli->cl_loi_list_lock);
- atomic_dec(&cli->cl_lru_shrinkers);
} else {
CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
LASSERT(cli->cl_writeback_work);
@@ -2479,7 +2462,6 @@ int osc_teardown_async_page(const struct lu_env *env,
struct osc_object *obj, struct osc_page *ops)
{
struct osc_async_page *oap = &ops->ops_oap;
- struct osc_extent *ext = NULL;
int rc = 0;
LASSERT(oap->oap_magic == OAP_MAGIC);
@@ -2487,12 +2469,15 @@ int osc_teardown_async_page(const struct lu_env *env,
CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
oap, ops, osc_index(oap2osc(oap)));
- osc_object_lock(obj);
if (!list_empty(&oap->oap_rpc_item)) {
CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
rc = -EBUSY;
} else if (!list_empty(&oap->oap_pending_item)) {
+ struct osc_extent *ext = NULL;
+
+ osc_object_lock(obj);
ext = osc_extent_lookup(obj, osc_index(oap2osc(oap)));
+ osc_object_unlock(obj);
/* only truncated pages are allowed to be taken out.
* See osc_extent_truncate() and osc_cache_truncate_start()
* for details.
@@ -2502,10 +2487,9 @@ int osc_teardown_async_page(const struct lu_env *env,
osc_index(oap2osc(oap)));
rc = -EBUSY;
}
+ if (ext)
+ osc_extent_put(env, ext);
}
- osc_object_unlock(obj);
- if (ext)
- osc_extent_put(env, ext);
return rc;
}
@@ -2666,11 +2650,13 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
struct osc_async_page *oap, *tmp;
int page_count = 0;
int mppr = cli->cl_max_pages_per_rpc;
+ bool can_merge = true;
pgoff_t start = CL_PAGE_EOF;
pgoff_t end = 0;
list_for_each_entry(oap, list, oap_pending_item) {
- pgoff_t index = osc_index(oap2osc(oap));
+ struct osc_page *opg = oap2osc_page(oap);
+ pgoff_t index = osc_index(opg);
if (index > end)
end = index;
@@ -2678,6 +2664,9 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
start = index;
++page_count;
mppr <<= (page_count > mppr);
+
+ if (unlikely(opg->ops_from > 0 || opg->ops_to < PAGE_SIZE))
+ can_merge = false;
}
ext = osc_extent_alloc(obj);
@@ -2691,6 +2680,7 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
ext->oe_rw = !!(cmd & OBD_BRW_READ);
ext->oe_sync = 1;
+ ext->oe_no_merge = !can_merge;
ext->oe_urgent = 1;
ext->oe_start = start;
ext->oe_end = end;
@@ -3158,7 +3148,8 @@ static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
struct cl_page *page = ops->ops_cl.cpl_page;
/* refresh non-overlapped index */
- tmp = osc_dlmlock_at_pgoff(env, osc, index, 0, 0);
+ tmp = osc_dlmlock_at_pgoff(env, osc, index,
+ OSC_DAP_FL_TEST_LOCK);
if (tmp) {
__u64 end = tmp->l_policy_data.l_extent.end;
/* Cache the first-non-overlapped index so as to skip
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index 9c8de15c309c..cce55a9689f0 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -77,7 +77,6 @@ struct osc_io {
/** write osc_lock for this IO, used by osc_extent_find(). */
struct osc_lock *oi_write_osclock;
- struct obd_info oi_info;
struct obdo oi_oa;
struct osc_async_cbargs {
bool opc_rpc_sent;
@@ -87,13 +86,6 @@ struct osc_io {
};
/**
- * State of transfer for osc.
- */
-struct osc_req {
- struct cl_req_slice or_cl;
-};
-
-/**
* State maintained by osc layer for the duration of a system call.
*/
struct osc_session {
@@ -103,7 +95,7 @@ struct osc_session {
#define OTI_PVEC_SIZE 256
struct osc_thread_info {
struct ldlm_res_id oti_resname;
- ldlm_policy_data_t oti_policy;
+ union ldlm_policy_data oti_policy;
struct cl_lock_descr oti_descr;
struct cl_attr oti_attr;
struct lustre_handle oti_handle;
@@ -116,6 +108,7 @@ struct osc_thread_info {
pgoff_t oti_next_index;
pgoff_t oti_fn_index; /* first non-overlapped index */
struct cl_sync_io oti_anchor;
+ struct cl_req_attr oti_req_attr;
};
struct osc_object {
@@ -127,16 +120,6 @@ struct osc_object {
int oo_contended;
unsigned long oo_contention_time;
/**
- * List of pages in transfer.
- */
- struct list_head oo_inflight[CRT_NR];
- /**
- * Lock, protecting osc_page::ops_inflight, because a seat-belt is
- * locked during take-off and landing.
- */
- spinlock_t oo_seatbelt;
-
- /**
* used by the osc to keep track of what objects to build into rpcs.
* Protected by client_obd->cli_loi_list_lock.
*/
@@ -364,15 +347,6 @@ struct osc_page {
*/
struct list_head ops_lru;
/**
- * Linkage into a per-osc_object list of pages in flight. For
- * debugging.
- */
- struct list_head ops_inflight;
- /**
- * Thread that submitted this page for transfer. For debugging.
- */
- struct task_struct *ops_submitter;
- /**
* Submit time - the time when the page is starting RPC. For debugging.
*/
unsigned long ops_submit_time;
@@ -382,7 +356,6 @@ extern struct kmem_cache *osc_lock_kmem;
extern struct kmem_cache *osc_object_kmem;
extern struct kmem_cache *osc_thread_kmem;
extern struct kmem_cache *osc_session_kmem;
-extern struct kmem_cache *osc_req_kmem;
extern struct kmem_cache *osc_extent_kmem;
extern struct lu_device_type osc_device_type;
@@ -396,15 +369,14 @@ int osc_lock_init(const struct lu_env *env,
const struct cl_io *io);
int osc_io_init(const struct lu_env *env,
struct cl_object *obj, struct cl_io *io);
-int osc_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req);
struct lu_object *osc_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
int osc_page_init(const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, pgoff_t ind);
-void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
+void osc_index2policy(union ldlm_policy_data *policy,
+ const struct cl_object *obj,
pgoff_t start, pgoff_t end);
int osc_lvb_print(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct ost_lvb *lvb);
@@ -554,6 +526,16 @@ static inline struct osc_page *oap2osc_page(struct osc_async_page *oap)
return (struct osc_page *)container_of(oap, struct osc_page, ops_oap);
}
+static inline struct osc_page *
+osc_cl_page_osc(struct cl_page *page, struct osc_object *osc)
+{
+ const struct cl_page_slice *slice;
+
+ LASSERT(osc);
+ slice = cl_object_page_slice(&osc->oo_cl, page);
+ return cl2osc_page(slice);
+}
+
static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice)
{
LINVRNT(osc_is_object(&slice->cls_obj->co_lu));
@@ -615,6 +597,10 @@ struct osc_extent {
oe_rw:1,
/** sync extent, queued by osc_queue_sync_pages() */
oe_sync:1,
+ /** set if this extent has partial, sync pages.
+ * Extents with partial page(s) can't merge with others in RPC
+ */
+ oe_no_merge:1,
oe_srvlock:1,
oe_memalloc:1,
/** an ACTIVE extent is going to be truncated, so when this extent
diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c
index 83d30c135ba4..c5d62aeaeab5 100644
--- a/drivers/staging/lustre/lustre/osc/osc_dev.c
+++ b/drivers/staging/lustre/lustre/osc/osc_dev.c
@@ -29,7 +29,7 @@
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*
- * Implementation of cl_device, cl_req for OSC layer.
+ * Implementation of cl_device, for OSC layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
*/
@@ -49,7 +49,6 @@ struct kmem_cache *osc_lock_kmem;
struct kmem_cache *osc_object_kmem;
struct kmem_cache *osc_thread_kmem;
struct kmem_cache *osc_session_kmem;
-struct kmem_cache *osc_req_kmem;
struct kmem_cache *osc_extent_kmem;
struct kmem_cache *osc_quota_kmem;
@@ -75,11 +74,6 @@ struct lu_kmem_descr osc_caches[] = {
.ckd_size = sizeof(struct osc_session)
},
{
- .ckd_cache = &osc_req_kmem,
- .ckd_name = "osc_req_kmem",
- .ckd_size = sizeof(struct osc_req)
- },
- {
.ckd_cache = &osc_extent_kmem,
.ckd_name = "osc_extent_kmem",
.ckd_size = sizeof(struct osc_extent)
@@ -94,8 +88,6 @@ struct lu_kmem_descr osc_caches[] = {
}
};
-struct lock_class_key osc_ast_guard_class;
-
/*****************************************************************************
*
* Type conversions.
@@ -178,10 +170,6 @@ static const struct lu_device_operations osc_lu_ops = {
.ldo_recovery_complete = NULL
};
-static const struct cl_device_operations osc_cl_ops = {
- .cdo_req_init = osc_req_init
-};
-
static int osc_device_init(const struct lu_env *env, struct lu_device *d,
const char *name, struct lu_device *next)
{
@@ -220,7 +208,6 @@ static struct lu_device *osc_device_alloc(const struct lu_env *env,
cl_device_init(&od->od_cl, t);
d = osc2lu_dev(od);
d->ld_ops = &osc_lu_ops;
- od->od_cl.cd_ops = &osc_cl_ops;
/* Setup OSC OBD */
obd = class_name2obd(lustre_cfg_string(cfg, 0));
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index 67fe0a254991..688783dcc1e4 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -107,26 +107,24 @@ typedef int (*osc_enqueue_upcall_f)(void *cookie, struct lustre_handle *lockh,
int rc);
int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- __u64 *flags, ldlm_policy_data_t *policy,
+ __u64 *flags, union ldlm_policy_data *policy,
struct ost_lvb *lvb, int kms_valid,
osc_enqueue_upcall_f upcall,
void *cookie, struct ldlm_enqueue_info *einfo,
struct ptlrpc_request_set *rqset, int async, int agl);
-int osc_cancel_base(struct lustre_handle *lockh, __u32 mode);
int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- __u32 type, ldlm_policy_data_t *policy, __u32 mode,
+ __u32 type, union ldlm_policy_data *policy, __u32 mode,
__u64 *flags, void *data, struct lustre_handle *lockh,
int unref);
-int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- obd_enqueue_update_f upcall, void *cookie,
- struct ptlrpc_request_set *rqset);
-int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
+int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
+ obd_enqueue_update_f upcall, void *cookie,
+ struct ptlrpc_request_set *rqset);
+int osc_punch_base(struct obd_export *exp, struct obdo *oa,
obd_enqueue_update_f upcall, void *cookie,
struct ptlrpc_request_set *rqset);
-int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
+int osc_sync_base(struct osc_object *exp, struct obdo *oa,
obd_enqueue_update_f upcall, void *cookie,
struct ptlrpc_request_set *rqset);
@@ -135,7 +133,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
struct list_head *ext_list, int cmd);
long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
long target, bool force);
-long osc_lru_reclaim(struct client_obd *cli);
+long osc_lru_reclaim(struct client_obd *cli, unsigned long npages);
unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
@@ -157,6 +155,11 @@ static inline unsigned long rpcs_in_flight(struct client_obd *cli)
return cli->cl_r_in_flight + cli->cl_w_in_flight;
}
+static inline char *cli_name(struct client_obd *cli)
+{
+ return cli->cl_import->imp_obd->obd_name;
+}
+
struct osc_device {
struct cl_device od_cl;
struct obd_export *od_exp;
@@ -192,15 +195,27 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[]);
int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
struct obd_quotactl *oqctl);
-int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl);
-int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk);
void osc_inc_unstable_pages(struct ptlrpc_request *req);
void osc_dec_unstable_pages(struct ptlrpc_request *req);
bool osc_over_unstable_soft_limit(struct client_obd *cli);
+/**
+ * Bit flags for osc_dlm_lock_at_pageoff().
+ */
+enum osc_dap_flags {
+ /**
+ * Just check if the desired lock exists, it won't hold reference
+ * count on lock.
+ */
+ OSC_DAP_FL_TEST_LOCK = BIT(0),
+ /**
+ * Return the lock even if it is being canceled.
+ */
+ OSC_DAP_FL_CANCELING = BIT(1),
+};
+
struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
struct osc_object *obj, pgoff_t index,
- int pending, int canceling);
+ enum osc_dap_flags flags);
#endif /* OSC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index 8a559cbcdd0c..228a97c098fe 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -49,12 +49,6 @@
*
*/
-static struct osc_req *cl2osc_req(const struct cl_req_slice *slice)
-{
- LINVRNT(slice->crs_dev->cd_lu_dev.ld_type == &osc_device_type);
- return container_of0(slice, struct osc_req, or_cl);
-}
-
static struct osc_io *cl2osc_io(const struct lu_env *env,
const struct cl_io_slice *slice)
{
@@ -64,20 +58,6 @@ static struct osc_io *cl2osc_io(const struct lu_env *env,
return oio;
}
-static struct osc_page *osc_cl_page_osc(struct cl_page *page,
- struct osc_object *osc)
-{
- const struct cl_page_slice *slice;
-
- if (osc)
- slice = cl_object_page_slice(&osc->oo_cl, page);
- else
- slice = cl_page_at(page, &osc_device_type);
- LASSERT(slice);
-
- return cl2osc_page(slice);
-}
-
/*****************************************************************************
*
* io operations.
@@ -88,6 +68,45 @@ static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io)
{
}
+static void osc_read_ahead_release(const struct lu_env *env, void *cbdata)
+{
+ struct ldlm_lock *dlmlock = cbdata;
+ struct lustre_handle lockh;
+
+ ldlm_lock2handle(dlmlock, &lockh);
+ ldlm_lock_decref(&lockh, LCK_PR);
+ LDLM_LOCK_PUT(dlmlock);
+}
+
+static int osc_io_read_ahead(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ pgoff_t start, struct cl_read_ahead *ra)
+{
+ struct osc_object *osc = cl2osc(ios->cis_obj);
+ struct ldlm_lock *dlmlock;
+ int result = -ENODATA;
+
+ dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0);
+ if (dlmlock) {
+ LASSERT(dlmlock->l_ast_data == osc);
+ if (dlmlock->l_req_mode != LCK_PR) {
+ struct lustre_handle lockh;
+
+ ldlm_lock2handle(dlmlock, &lockh);
+ ldlm_lock_addref(&lockh, LCK_PR);
+ ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
+ }
+
+ ra->cra_end = cl_index(osc2cl(osc),
+ dlmlock->l_policy_data.l_extent.end);
+ ra->cra_release = osc_read_ahead_release;
+ ra->cra_cbdata = dlmlock;
+ result = 0;
+ }
+
+ return result;
+}
+
/**
* An implementation of cl_io_operations::cio_io_submit() method for osc
* layer. Iterates over pages in the in-queue, prepares each for io by calling
@@ -334,7 +353,7 @@ static int osc_io_rw_iter_init(const struct lu_env *env,
npages = max_pages;
c = atomic_long_read(cli->cl_lru_left);
- if (c < npages && osc_lru_reclaim(cli) > 0)
+ if (c < npages && osc_lru_reclaim(cli, npages) > 0)
c = atomic_long_read(cli->cl_lru_left);
while (c >= npages) {
if (c == atomic_long_cmpxchg(cli->cl_lru_left, c, c - npages)) {
@@ -343,6 +362,17 @@ static int osc_io_rw_iter_init(const struct lu_env *env,
}
c = atomic_long_read(cli->cl_lru_left);
}
+ if (atomic_long_read(cli->cl_lru_left) < max_pages) {
+ /*
+ * If there aren't enough pages in the per-OSC LRU then
+ * wake up the LRU thread to try and clear out space, so
+ * we don't block if pages are being dirtied quickly.
+ */
+ CDEBUG(D_CACHE, "%s: queue LRU, left: %lu/%ld.\n",
+ cli_name(cli), atomic_long_read(cli->cl_lru_left),
+ max_pages);
+ (void)ptlrpcd_queue_work(cli->cl_lru_work);
+ }
return 0;
}
@@ -446,7 +476,6 @@ static int osc_io_setattr_start(const struct lu_env *env,
__u64 size = io->u.ci_setattr.sa_attr.lvb_size;
unsigned int ia_valid = io->u.ci_setattr.sa_valid;
int result = 0;
- struct obd_info oinfo = { };
/* truncate cache dirty pages first */
if (cl_io_is_trunc(io))
@@ -486,11 +515,19 @@ static int osc_io_setattr_start(const struct lu_env *env,
oa->o_oi = loi->loi_oi;
obdo_set_parent_fid(oa, io->u.ci_setattr.sa_parent_fid);
oa->o_stripe_idx = io->u.ci_setattr.sa_stripe_index;
- oa->o_mtime = attr->cat_mtime;
- oa->o_atime = attr->cat_atime;
- oa->o_ctime = attr->cat_ctime;
- oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
- OBD_MD_FLCTIME | OBD_MD_FLMTIME;
+ oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP;
+ if (ia_valid & ATTR_CTIME) {
+ oa->o_valid |= OBD_MD_FLCTIME;
+ oa->o_ctime = attr->cat_ctime;
+ }
+ if (ia_valid & ATTR_ATIME) {
+ oa->o_valid |= OBD_MD_FLATIME;
+ oa->o_atime = attr->cat_atime;
+ }
+ if (ia_valid & ATTR_MTIME) {
+ oa->o_valid |= OBD_MD_FLMTIME;
+ oa->o_mtime = attr->cat_mtime;
+ }
if (ia_valid & ATTR_SIZE) {
oa->o_size = size;
oa->o_blocks = OBD_OBJECT_EOF;
@@ -503,19 +540,21 @@ static int osc_io_setattr_start(const struct lu_env *env,
} else {
LASSERT(oio->oi_lockless == 0);
}
+ if (ia_valid & ATTR_ATTR_FLAG) {
+ oa->o_flags = io->u.ci_setattr.sa_attr_flags;
+ oa->o_valid |= OBD_MD_FLFLAGS;
+ }
- oinfo.oi_oa = oa;
init_completion(&cbargs->opc_sync);
if (ia_valid & ATTR_SIZE)
result = osc_punch_base(osc_export(cl2osc(obj)),
- &oinfo, osc_async_upcall,
+ oa, osc_async_upcall,
cbargs, PTLRPCD_SET);
else
- result = osc_setattr_async_base(osc_export(cl2osc(obj)),
- &oinfo, NULL,
- osc_async_upcall,
- cbargs, PTLRPCD_SET);
+ result = osc_setattr_async(osc_export(cl2osc(obj)),
+ oa, osc_async_upcall,
+ cbargs, PTLRPCD_SET);
cbargs->opc_rpc_sent = result == 0;
}
return result;
@@ -557,6 +596,107 @@ static void osc_io_setattr_end(const struct lu_env *env,
}
}
+struct osc_data_version_args {
+ struct osc_io *dva_oio;
+};
+
+static int
+osc_data_version_interpret(const struct lu_env *env, struct ptlrpc_request *req,
+ void *arg, int rc)
+{
+ struct osc_data_version_args *dva = arg;
+ struct osc_io *oio = dva->dva_oio;
+ const struct ost_body *body;
+
+ if (rc < 0)
+ goto out;
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ if (!body) {
+ rc = -EPROTO;
+ goto out;
+ }
+
+ lustre_get_wire_obdo(&req->rq_import->imp_connect_data, &oio->oi_oa,
+ &body->oa);
+out:
+ oio->oi_cbarg.opc_rc = rc;
+ complete(&oio->oi_cbarg.opc_sync);
+
+ return 0;
+}
+
+static int osc_io_data_version_start(const struct lu_env *env,
+ const struct cl_io_slice *slice)
+{
+ struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
+ struct osc_io *oio = cl2osc_io(env, slice);
+ struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
+ struct osc_object *obj = cl2osc(slice->cis_obj);
+ struct obd_export *exp = osc_export(obj);
+ struct lov_oinfo *loi = obj->oo_oinfo;
+ struct osc_data_version_args *dva;
+ struct obdo *oa = &oio->oi_oa;
+ struct ptlrpc_request *req;
+ struct ost_body *body;
+ int rc;
+
+ memset(oa, 0, sizeof(*oa));
+ oa->o_oi = loi->loi_oi;
+ oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
+
+ if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) {
+ oa->o_valid |= OBD_MD_FLFLAGS;
+ oa->o_flags |= OBD_FL_SRVLOCK;
+ if (dv->dv_flags & LL_DV_WR_FLUSH)
+ oa->o_flags |= OBD_FL_FLUSH;
+ }
+
+ init_completion(&cbargs->opc_sync);
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
+ if (!req)
+ return -ENOMEM;
+
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
+ if (rc < 0) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+ lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
+
+ ptlrpc_request_set_replen(req);
+ req->rq_interpret_reply = osc_data_version_interpret;
+ CLASSERT(sizeof(*dva) <= sizeof(req->rq_async_args));
+ dva = ptlrpc_req_async_args(req);
+ dva->dva_oio = oio;
+
+ ptlrpcd_add_req(req);
+
+ return 0;
+}
+
+static void osc_io_data_version_end(const struct lu_env *env,
+ const struct cl_io_slice *slice)
+{
+ struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
+ struct osc_io *oio = cl2osc_io(env, slice);
+ struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
+
+ wait_for_completion(&cbargs->opc_sync);
+
+ if (cbargs->opc_rc) {
+ slice->cis_io->ci_result = cbargs->opc_rc;
+ } else if (!(oio->oi_oa.o_valid & OBD_MD_FLDATAVERSION)) {
+ slice->cis_io->ci_result = -EOPNOTSUPP;
+ } else {
+ dv->dv_data_version = oio->oi_oa.o_data_version;
+ slice->cis_io->ci_result = 0;
+ }
+}
+
static int osc_io_read_start(const struct lu_env *env,
const struct cl_io_slice *slice)
{
@@ -595,7 +735,6 @@ static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
{
struct osc_io *oio = osc_env_io(env);
struct obdo *oa = &oio->oi_oa;
- struct obd_info *oinfo = &oio->oi_info;
struct lov_oinfo *loi = obj->oo_oinfo;
struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
int rc = 0;
@@ -611,12 +750,9 @@ static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
obdo_set_parent_fid(oa, fio->fi_fid);
- memset(oinfo, 0, sizeof(*oinfo));
- oinfo->oi_oa = oa;
init_completion(&cbargs->opc_sync);
- rc = osc_sync_base(osc_export(obj), oinfo, osc_async_upcall, cbargs,
- PTLRPCD_SET);
+ rc = osc_sync_base(obj, oa, osc_async_upcall, cbargs, PTLRPCD_SET);
return rc;
}
@@ -710,6 +846,10 @@ static const struct cl_io_operations osc_io_ops = {
.cio_start = osc_io_setattr_start,
.cio_end = osc_io_setattr_end
},
+ [CIT_DATA_VERSION] = {
+ .cio_start = osc_io_data_version_start,
+ .cio_end = osc_io_data_version_end,
+ },
[CIT_FAULT] = {
.cio_start = osc_io_fault_start,
.cio_end = osc_io_end,
@@ -724,6 +864,7 @@ static const struct cl_io_operations osc_io_ops = {
.cio_fini = osc_io_fini
}
},
+ .cio_read_ahead = osc_io_read_ahead,
.cio_submit = osc_io_submit,
.cio_commit_async = osc_io_commit_async
};
@@ -734,103 +875,6 @@ static const struct cl_io_operations osc_io_ops = {
*
*/
-static int osc_req_prep(const struct lu_env *env,
- const struct cl_req_slice *slice)
-{
- return 0;
-}
-
-static void osc_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret)
-{
- struct osc_req *or;
-
- or = cl2osc_req(slice);
- kmem_cache_free(osc_req_kmem, or);
-}
-
-/**
- * Implementation of struct cl_req_operations::cro_attr_set() for osc
- * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
- * fields.
- */
-static void osc_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags)
-{
- struct lov_oinfo *oinfo;
- struct cl_req *clerq;
- struct cl_page *apage; /* _some_ page in @clerq */
- struct ldlm_lock *lock; /* _some_ lock protecting @apage */
- struct osc_page *opg;
- struct obdo *oa;
- struct ost_lvb *lvb;
-
- oinfo = cl2osc(obj)->oo_oinfo;
- lvb = &oinfo->loi_lvb;
- oa = attr->cra_oa;
-
- if ((flags & OBD_MD_FLMTIME) != 0) {
- oa->o_mtime = lvb->lvb_mtime;
- oa->o_valid |= OBD_MD_FLMTIME;
- }
- if ((flags & OBD_MD_FLATIME) != 0) {
- oa->o_atime = lvb->lvb_atime;
- oa->o_valid |= OBD_MD_FLATIME;
- }
- if ((flags & OBD_MD_FLCTIME) != 0) {
- oa->o_ctime = lvb->lvb_ctime;
- oa->o_valid |= OBD_MD_FLCTIME;
- }
- if (flags & OBD_MD_FLGROUP) {
- ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi));
- oa->o_valid |= OBD_MD_FLGROUP;
- }
- if (flags & OBD_MD_FLID) {
- ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi));
- oa->o_valid |= OBD_MD_FLID;
- }
- if (flags & OBD_MD_FLHANDLE) {
- clerq = slice->crs_req;
- LASSERT(!list_empty(&clerq->crq_pages));
- apage = container_of(clerq->crq_pages.next,
- struct cl_page, cp_flight);
- opg = osc_cl_page_osc(apage, NULL);
- lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
- 1, 1);
- if (!lock && !opg->ops_srvlock) {
- struct ldlm_resource *res;
- struct ldlm_res_id *resname;
-
- CL_PAGE_DEBUG(D_ERROR, env, apage, "uncovered page!\n");
-
- resname = &osc_env_info(env)->oti_resname;
- ostid_build_res_name(&oinfo->loi_oi, resname);
- res = ldlm_resource_get(
- osc_export(cl2osc(obj))->exp_obd->obd_namespace,
- NULL, resname, LDLM_EXTENT, 0);
- ldlm_resource_dump(D_ERROR, res);
-
- dump_stack();
- LBUG();
- }
-
- /* check for lockless io. */
- if (lock) {
- oa->o_handle = lock->l_remote_handle;
- oa->o_valid |= OBD_MD_FLHANDLE;
- LDLM_LOCK_PUT(lock);
- }
- }
-}
-
-static const struct cl_req_operations osc_req_ops = {
- .cro_prep = osc_req_prep,
- .cro_attr_set = osc_req_attr_set,
- .cro_completion = osc_req_completion
-};
-
int osc_io_init(const struct lu_env *env,
struct cl_object *obj, struct cl_io *io)
{
@@ -841,20 +885,4 @@ int osc_io_init(const struct lu_env *env,
return 0;
}
-int osc_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
-{
- struct osc_req *or;
- int result;
-
- or = kmem_cache_zalloc(osc_req_kmem, GFP_NOFS);
- if (or) {
- cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);
- result = 0;
- } else {
- result = -ENOMEM;
- }
- return result;
-}
-
/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 39a8a5851603..5f799a4c78f9 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -145,7 +145,7 @@ static void osc_lock_fini(const struct lu_env *env,
static void osc_lock_build_policy(const struct lu_env *env,
const struct cl_lock *lock,
- ldlm_policy_data_t *policy)
+ union ldlm_policy_data *policy)
{
const struct cl_lock_descr *d = &lock->cll_descr;
@@ -188,7 +188,7 @@ static void osc_lock_lvb_update(const struct lu_env *env,
struct cl_object *obj = osc2cl(osc);
struct lov_oinfo *oinfo = osc->oo_oinfo;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- unsigned valid;
+ unsigned int valid;
valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
if (!lvb)
@@ -294,10 +294,10 @@ static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
struct osc_lock *oscl = cookie;
struct cl_lock_slice *slice = &oscl->ols_cl;
struct lu_env *env;
- struct cl_env_nest nest;
int rc;
+ int refcheck;
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
/* should never happen, similar to osc_ldlm_blocking_ast(). */
LASSERT(!IS_ERR(env));
@@ -336,7 +336,7 @@ static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
if (oscl->ols_owner)
cl_sync_io_note(env, oscl->ols_owner, rc);
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
return rc;
}
@@ -347,9 +347,9 @@ static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
struct osc_object *osc = cookie;
struct ldlm_lock *dlmlock;
struct lu_env *env;
- struct cl_env_nest nest;
+ int refcheck;
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
LASSERT(!IS_ERR(env));
if (errcode == ELDLM_LOCK_MATCHED) {
@@ -374,7 +374,7 @@ static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
out:
cl_object_put(env, osc2cl(osc));
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
return ldlm_error2errno(errcode);
}
@@ -382,11 +382,11 @@ static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
enum cl_lock_mode mode, int discard)
{
struct lu_env *env;
- struct cl_env_nest nest;
+ int refcheck;
int rc = 0;
int rc2 = 0;
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env))
return PTR_ERR(env);
@@ -404,7 +404,7 @@ static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
if (rc == 0 && rc2 < 0)
rc = rc2;
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
return rc;
}
@@ -536,7 +536,7 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
}
case LDLM_CB_CANCELING: {
struct lu_env *env;
- struct cl_env_nest nest;
+ int refcheck;
/*
* This can be called in the context of outer IO, e.g.,
@@ -549,14 +549,14 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
* new environment has to be created to not corrupt outer
* context.
*/
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env)) {
result = PTR_ERR(env);
break;
}
result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
break;
}
default:
@@ -568,61 +568,63 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
{
struct ptlrpc_request *req = data;
- struct cl_env_nest nest;
struct lu_env *env;
struct ost_lvb *lvb;
struct req_capsule *cap;
+ struct cl_object *obj = NULL;
int result;
+ int refcheck;
LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
- env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- struct cl_object *obj = NULL;
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env)) {
+ result = PTR_ERR(env);
+ goto out;
+ }
- lock_res_and_lock(dlmlock);
- if (dlmlock->l_ast_data) {
- obj = osc2cl(dlmlock->l_ast_data);
- cl_object_get(obj);
- }
- unlock_res_and_lock(dlmlock);
+ lock_res_and_lock(dlmlock);
+ if (dlmlock->l_ast_data) {
+ obj = osc2cl(dlmlock->l_ast_data);
+ cl_object_get(obj);
+ }
+ unlock_res_and_lock(dlmlock);
- if (obj) {
- /* Do not grab the mutex of cl_lock for glimpse.
- * See LU-1274 for details.
- * BTW, it's okay for cl_lock to be cancelled during
- * this period because server can handle this race.
- * See ldlm_server_glimpse_ast() for details.
- * cl_lock_mutex_get(env, lock);
- */
- cap = &req->rq_pill;
- req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
- req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
- sizeof(*lvb));
- result = req_capsule_server_pack(cap);
- if (result == 0) {
- lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
- result = cl_object_glimpse(env, obj, lvb);
- }
- if (!exp_connect_lvb_type(req->rq_export))
- req_capsule_shrink(&req->rq_pill,
- &RMF_DLM_LVB,
- sizeof(struct ost_lvb_v1),
- RCL_SERVER);
- cl_object_put(env, obj);
- } else {
- /*
- * These errors are normal races, so we don't want to
- * fill the console with messages by calling
- * ptlrpc_error()
- */
- lustre_pack_reply(req, 1, NULL, NULL);
- result = -ELDLM_NO_LOCK_DATA;
+ if (obj) {
+ /* Do not grab the mutex of cl_lock for glimpse.
+ * See LU-1274 for details.
+ * BTW, it's okay for cl_lock to be cancelled during
+ * this period because server can handle this race.
+ * See ldlm_server_glimpse_ast() for details.
+ * cl_lock_mutex_get(env, lock);
+ */
+ cap = &req->rq_pill;
+ req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
+ req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
+ sizeof(*lvb));
+ result = req_capsule_server_pack(cap);
+ if (result == 0) {
+ lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
+ result = cl_object_glimpse(env, obj, lvb);
+ }
+ if (!exp_connect_lvb_type(req->rq_export)) {
+ req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB,
+ sizeof(struct ost_lvb_v1),
+ RCL_SERVER);
}
- cl_env_nested_put(&nest, env);
+ cl_object_put(env, obj);
} else {
- result = PTR_ERR(env);
+ /*
+ * These errors are normal races, so we don't want to
+ * fill the console with messages by calling
+ * ptlrpc_error()
+ */
+ lustre_pack_reply(req, 1, NULL, NULL);
+ result = -ELDLM_NO_LOCK_DATA;
}
+ cl_env_put(env, &refcheck);
+
+out:
req->rq_status = result;
return result;
}
@@ -677,12 +679,12 @@ static unsigned long osc_lock_weight(const struct lu_env *env,
*/
unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
{
- struct cl_env_nest nest;
struct lu_env *env;
struct osc_object *obj;
struct osc_lock *oscl;
unsigned long weight;
bool found = false;
+ int refcheck;
might_sleep();
/*
@@ -692,7 +694,7 @@ unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
* the upper context because cl_lock_put don't modify environment
* variables. But just in case ..
*/
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env))
/* Mostly because lack of memory, do not eliminate this lock */
return 1;
@@ -722,7 +724,7 @@ unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
weight = osc_lock_weight(env, obj, &dlmlock->l_policy_data.l_extent);
out:
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
return weight;
}
@@ -912,7 +914,7 @@ static int osc_lock_enqueue(const struct lu_env *env,
struct osc_lock *oscl = cl2osc_lock(slice);
struct cl_lock *lock = slice->cls_lock;
struct ldlm_res_id *resname = &info->oti_resname;
- ldlm_policy_data_t *policy = &info->oti_policy;
+ union ldlm_policy_data *policy = &info->oti_policy;
osc_enqueue_upcall_f upcall = osc_lock_upcall;
void *cookie = oscl;
bool async = false;
@@ -1009,7 +1011,7 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
if (olck->ols_hold) {
olck->ols_hold = 0;
- osc_cancel_base(&olck->ols_handle, olck->ols_einfo.ei_mode);
+ ldlm_lock_decref(&olck->ols_handle, olck->ols_einfo.ei_mode);
olck->ols_handle.cookie = 0ULL;
}
@@ -1180,11 +1182,11 @@ int osc_lock_init(const struct lu_env *env,
*/
struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
struct osc_object *obj, pgoff_t index,
- int pending, int canceling)
+ enum osc_dap_flags dap_flags)
{
struct osc_thread_info *info = osc_env_info(env);
struct ldlm_res_id *resname = &info->oti_resname;
- ldlm_policy_data_t *policy = &info->oti_policy;
+ union ldlm_policy_data *policy = &info->oti_policy;
struct lustre_handle lockh;
struct ldlm_lock *lock = NULL;
enum ldlm_mode mode;
@@ -1194,17 +1196,18 @@ struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
osc_index2policy(policy, osc2cl(obj), index, index);
policy->l_extent.gid = LDLM_GID_ANY;
- flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
- if (pending)
- flags |= LDLM_FL_CBPENDING;
+ flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
+ if (dap_flags & OSC_DAP_FL_TEST_LOCK)
+ flags |= LDLM_FL_TEST_LOCK;
+
/*
* It is fine to match any group lock since there could be only one
* with a uniq gid and it conflicts with all other lock modes too
*/
again:
- mode = ldlm_lock_match(osc_export(obj)->exp_obd->obd_namespace,
- flags, resname, LDLM_EXTENT, policy,
- LCK_PR | LCK_PW | LCK_GROUP, &lockh, canceling);
+ mode = osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
+ LCK_PR | LCK_PW | LCK_GROUP, &flags, obj, &lockh,
+ dap_flags & OSC_DAP_FL_CANCELING);
if (mode != 0) {
lock = ldlm_handle2lock(&lockh);
/* RACE: the lock is cancelled so let's try again */
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index aae3a2d4243f..e0c3324857dd 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -71,13 +71,8 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
{
struct osc_object *osc = lu2osc(obj);
const struct cl_object_conf *cconf = lu2cl_conf(conf);
- int i;
osc->oo_oinfo = cconf->u.coc_oinfo;
- spin_lock_init(&osc->oo_seatbelt);
- for (i = 0; i < CRT_NR; ++i)
- INIT_LIST_HEAD(&osc->oo_inflight[i]);
-
INIT_LIST_HEAD(&osc->oo_ready_item);
INIT_LIST_HEAD(&osc->oo_hp_ready_item);
INIT_LIST_HEAD(&osc->oo_write_item);
@@ -103,10 +98,6 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
static void osc_object_free(const struct lu_env *env, struct lu_object *obj)
{
struct osc_object *osc = lu2osc(obj);
- int i;
-
- for (i = 0; i < CRT_NR; ++i)
- LASSERT(list_empty(&osc->oo_inflight[i]));
LASSERT(list_empty(&osc->oo_ready_item));
LASSERT(list_empty(&osc->oo_hp_ready_item));
@@ -218,6 +209,94 @@ static int osc_object_prune(const struct lu_env *env, struct cl_object *obj)
return 0;
}
+static int osc_object_fiemap(const struct lu_env *env, struct cl_object *obj,
+ struct ll_fiemap_info_key *fmkey,
+ struct fiemap *fiemap, size_t *buflen)
+{
+ struct obd_export *exp = osc_export(cl2osc(obj));
+ union ldlm_policy_data policy;
+ struct ptlrpc_request *req;
+ struct lustre_handle lockh;
+ struct ldlm_res_id resid;
+ enum ldlm_mode mode = 0;
+ struct fiemap *reply;
+ char *tmp;
+ int rc;
+
+ fmkey->lfik_oa.o_oi = cl2osc(obj)->oo_oinfo->loi_oi;
+ if (!(fmkey->lfik_fiemap.fm_flags & FIEMAP_FLAG_SYNC))
+ goto skip_locking;
+
+ policy.l_extent.start = fmkey->lfik_fiemap.fm_start & PAGE_MASK;
+
+ if (OBD_OBJECT_EOF - fmkey->lfik_fiemap.fm_length <=
+ fmkey->lfik_fiemap.fm_start + PAGE_SIZE - 1)
+ policy.l_extent.end = OBD_OBJECT_EOF;
+ else
+ policy.l_extent.end = (fmkey->lfik_fiemap.fm_start +
+ fmkey->lfik_fiemap.fm_length +
+ PAGE_SIZE - 1) & PAGE_MASK;
+
+ ostid_build_res_name(&fmkey->lfik_oa.o_oi, &resid);
+ mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
+ LDLM_FL_BLOCK_GRANTED | LDLM_FL_LVB_READY,
+ &resid, LDLM_EXTENT, &policy,
+ LCK_PR | LCK_PW, &lockh, 0);
+ if (mode) { /* lock is cached on client */
+ if (mode != LCK_PR) {
+ ldlm_lock_addref(&lockh, LCK_PR);
+ ldlm_lock_decref(&lockh, LCK_PW);
+ }
+ } else { /* no cached lock, needs acquire lock on server side */
+ fmkey->lfik_oa.o_valid |= OBD_MD_FLFLAGS;
+ fmkey->lfik_oa.o_flags |= OBD_FL_SRVLOCK;
+ }
+
+skip_locking:
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_OST_GET_INFO_FIEMAP);
+ if (!req) {
+ rc = -ENOMEM;
+ goto drop_lock;
+ }
+
+ req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY, RCL_CLIENT,
+ sizeof(*fmkey));
+ req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_CLIENT,
+ *buflen);
+ req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_SERVER,
+ *buflen);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
+ if (rc) {
+ ptlrpc_request_free(req);
+ goto drop_lock;
+ }
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
+ memcpy(tmp, fmkey, sizeof(*fmkey));
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
+ memcpy(tmp, fiemap, *buflen);
+ ptlrpc_request_set_replen(req);
+
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ goto fini_req;
+
+ reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
+ if (!reply) {
+ rc = -EPROTO;
+ goto fini_req;
+ }
+
+ memcpy(fiemap, reply, *buflen);
+fini_req:
+ ptlrpc_req_finished(req);
+drop_lock:
+ if (mode)
+ ldlm_lock_decref(&lockh, LCK_PR);
+ return rc;
+}
+
void osc_object_set_contended(struct osc_object *obj)
{
obj->oo_contention_time = cfs_time_current();
@@ -256,6 +335,76 @@ int osc_object_is_contended(struct osc_object *obj)
return 1;
}
+/**
+ * Implementation of struct cl_object_operations::coo_req_attr_set() for osc
+ * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
+ * fields.
+ */
+static void osc_req_attr_set(const struct lu_env *env, struct cl_object *obj,
+ struct cl_req_attr *attr)
+{
+ u64 flags = attr->cra_flags;
+ struct lov_oinfo *oinfo;
+ struct ost_lvb *lvb;
+ struct obdo *oa;
+
+ oinfo = cl2osc(obj)->oo_oinfo;
+ lvb = &oinfo->loi_lvb;
+ oa = attr->cra_oa;
+
+ if (flags & OBD_MD_FLMTIME) {
+ oa->o_mtime = lvb->lvb_mtime;
+ oa->o_valid |= OBD_MD_FLMTIME;
+ }
+ if (flags & OBD_MD_FLATIME) {
+ oa->o_atime = lvb->lvb_atime;
+ oa->o_valid |= OBD_MD_FLATIME;
+ }
+ if (flags & OBD_MD_FLCTIME) {
+ oa->o_ctime = lvb->lvb_ctime;
+ oa->o_valid |= OBD_MD_FLCTIME;
+ }
+ if (flags & OBD_MD_FLGROUP) {
+ ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi));
+ oa->o_valid |= OBD_MD_FLGROUP;
+ }
+ if (flags & OBD_MD_FLID) {
+ ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi));
+ oa->o_valid |= OBD_MD_FLID;
+ }
+ if (flags & OBD_MD_FLHANDLE) {
+ struct ldlm_lock *lock;
+ struct osc_page *opg;
+
+ opg = osc_cl_page_osc(attr->cra_page, cl2osc(obj));
+ lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
+ OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_CANCELING);
+ if (!lock && !opg->ops_srvlock) {
+ struct ldlm_resource *res;
+ struct ldlm_res_id *resname;
+
+ CL_PAGE_DEBUG(D_ERROR, env, attr->cra_page,
+ "uncovered page!\n");
+
+ resname = &osc_env_info(env)->oti_resname;
+ ostid_build_res_name(&oinfo->loi_oi, resname);
+ res = ldlm_resource_get(
+ osc_export(cl2osc(obj))->exp_obd->obd_namespace,
+ NULL, resname, LDLM_EXTENT, 0);
+ ldlm_resource_dump(D_ERROR, res);
+
+ LBUG();
+ }
+
+ /* check for lockless io. */
+ if (lock) {
+ oa->o_handle = lock->l_remote_handle;
+ oa->o_valid |= OBD_MD_FLHANDLE;
+ LDLM_LOCK_PUT(lock);
+ }
+ }
+}
+
static const struct cl_object_operations osc_ops = {
.coo_page_init = osc_page_init,
.coo_lock_init = osc_lock_init,
@@ -263,7 +412,9 @@ static const struct cl_object_operations osc_ops = {
.coo_attr_get = osc_attr_get,
.coo_attr_update = osc_attr_update,
.coo_glimpse = osc_object_glimpse,
- .coo_prune = osc_object_prune
+ .coo_prune = osc_object_prune,
+ .coo_fiemap = osc_object_fiemap,
+ .coo_req_attr_set = osc_req_attr_set
};
static const struct lu_object_operations osc_lu_obj_ops = {
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index 2a7a70aa9e80..e356e4af08e1 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -37,6 +37,7 @@
#define DEBUG_SUBSYSTEM S_OSC
+#include <linux/math64.h>
#include "osc_cl_internal.h"
static void osc_lru_del(struct client_obd *cli, struct osc_page *opg);
@@ -86,11 +87,6 @@ static void osc_page_transfer_add(const struct lu_env *env,
struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
osc_lru_use(osc_cli(obj), opg);
-
- spin_lock(&obj->oo_seatbelt);
- list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
- opg->ops_submitter = current;
- spin_unlock(&obj->oo_seatbelt);
}
int osc_page_cache_add(const struct lu_env *env,
@@ -109,7 +105,8 @@ int osc_page_cache_add(const struct lu_env *env,
return result;
}
-void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
+void osc_index2policy(union ldlm_policy_data *policy,
+ const struct cl_object *obj,
pgoff_t start, pgoff_t end)
{
memset(policy, 0, sizeof(*policy));
@@ -117,25 +114,6 @@ void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
policy->l_extent.end = cl_offset(obj, end + 1) - 1;
}
-static int osc_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused, pgoff_t *max_index)
-{
- struct osc_page *opg = cl2osc_page(slice);
- struct ldlm_lock *dlmlock;
- int result = -ENODATA;
-
- dlmlock = osc_dlmlock_at_pgoff(env, cl2osc(slice->cpl_obj),
- osc_index(opg), 1, 0);
- if (dlmlock) {
- *max_index = cl_index(slice->cpl_obj,
- dlmlock->l_policy_data.l_extent.end);
- LDLM_LOCK_PUT(dlmlock);
- result = 0;
- }
- return result;
-}
-
static const char *osc_list(struct list_head *head)
{
return list_empty(head) ? "-" : "+";
@@ -158,7 +136,7 @@ static int osc_page_print(const struct lu_env *env,
struct osc_object *obj = cl2osc(slice->cpl_obj);
struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
- return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p %lu: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %s %p %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
+ return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p %lu: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
opg, osc_index(opg),
/* 1 */
oap->oap_magic, oap->oap_cmd,
@@ -170,8 +148,7 @@ static int osc_page_print(const struct lu_env *env,
oap->oap_async_flags, oap->oap_brw_flags,
oap->oap_request, oap->oap_cli, obj,
/* 3 */
- osc_list(&opg->ops_inflight),
- opg->ops_submitter, opg->ops_transfer_pinned,
+ opg->ops_transfer_pinned,
osc_submit_duration(opg), opg->ops_srvlock,
/* 4 */
cli->cl_r_in_flight, cli->cl_w_in_flight,
@@ -210,14 +187,6 @@ static void osc_page_delete(const struct lu_env *env,
LASSERT(0);
}
- spin_lock(&obj->oo_seatbelt);
- if (opg->ops_submitter) {
- LASSERT(!list_empty(&opg->ops_inflight));
- list_del_init(&opg->ops_inflight);
- opg->ops_submitter = NULL;
- }
- spin_unlock(&obj->oo_seatbelt);
-
osc_lru_del(osc_cli(obj), opg);
if (slice->cpl_page->cp_type == CPT_CACHEABLE) {
@@ -276,7 +245,6 @@ static int osc_page_flush(const struct lu_env *env,
static const struct cl_page_operations osc_page_ops = {
.cpo_print = osc_page_print,
.cpo_delete = osc_page_delete,
- .cpo_is_under_lock = osc_page_is_under_lock,
.cpo_clip = osc_page_clip,
.cpo_cancel = osc_page_cancel,
.cpo_flush = osc_page_flush
@@ -301,10 +269,6 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
cl_page_slice_add(page, &opg->ops_cl, obj, index,
&osc_page_ops);
}
- /* ops_inflight and ops_lru are the same field, but it doesn't
- * hurt to initialize it twice :-)
- */
- INIT_LIST_HEAD(&opg->ops_inflight);
INIT_LIST_HEAD(&opg->ops_lru);
/* reserve an LRU space for this page */
@@ -362,16 +326,27 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
* OSC to free slots voluntarily to maintain a reasonable number of free slots
* at any time.
*/
-
static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
-/* LRU pages are freed in batch mode. OSC should at least free this
- * number of pages to avoid running out of LRU budget, and..
+
+/**
+ * LRU pages are freed in batch mode. OSC should at least free this
+ * number of pages to avoid running out of LRU slots.
+ */
+static inline int lru_shrink_min(struct client_obd *cli)
+{
+ return cli->cl_max_pages_per_rpc * 2;
+}
+
+/**
+ * free this number at most otherwise it will take too long time to finish.
*/
-static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */
-/* free this number at most otherwise it will take too long time to finish. */
-static const int lru_shrink_max = 8 << (20 - PAGE_SHIFT); /* 8M */
+static inline int lru_shrink_max(struct client_obd *cli)
+{
+ return cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
+}
-/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
+/**
+ * Check if we can free LRU slots from this OSC. If there exists LRU waiters,
* we should free slots aggressively. In this way, slots are freed in a steady
* step to maintain fairness among OSCs.
*
@@ -388,13 +363,20 @@ static int osc_cache_too_much(struct client_obd *cli)
/* if it's going to run out LRU slots, we should free some, but not
* too much to maintain fairness among OSCs.
*/
- if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
+ if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 2) {
if (pages >= budget)
- return lru_shrink_max;
+ return lru_shrink_max(cli);
else if (pages >= budget / 2)
- return lru_shrink_min;
- } else if (pages >= budget * 2) {
- return lru_shrink_min;
+ return lru_shrink_min(cli);
+ } else {
+ time64_t duration = ktime_get_real_seconds();
+
+ /* knock out pages by duration of no IO activity */
+ duration -= cli->cl_lru_last_used;
+ duration >>= 6; /* approximately 1 minute */
+ if (duration > 0 &&
+ pages >= div64_s64((s64)budget, duration))
+ return lru_shrink_min(cli);
}
return 0;
}
@@ -402,11 +384,21 @@ static int osc_cache_too_much(struct client_obd *cli)
int lru_queue_work(const struct lu_env *env, void *data)
{
struct client_obd *cli = data;
+ int count;
- CDEBUG(D_CACHE, "Run LRU work for client obd %p.\n", cli);
+ CDEBUG(D_CACHE, "%s: run LRU work for client obd\n", cli_name(cli));
- if (osc_cache_too_much(cli))
- osc_lru_shrink(env, cli, lru_shrink_max, true);
+ count = osc_cache_too_much(cli);
+ if (count > 0) {
+ int rc = osc_lru_shrink(env, cli, count, false);
+
+ CDEBUG(D_CACHE, "%s: shrank %d/%d pages from client obd\n",
+ cli_name(cli), rc, count);
+ if (rc >= count) {
+ CDEBUG(D_CACHE, "%s: queue again\n", cli_name(cli));
+ ptlrpcd_queue_work(cli->cl_lru_work);
+ }
+ }
return 0;
}
@@ -433,10 +425,10 @@ void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
list_splice_tail(&lru, &cli->cl_lru_list);
atomic_long_sub(npages, &cli->cl_lru_busy);
atomic_long_add(npages, &cli->cl_lru_in_list);
+ cli->cl_lru_last_used = ktime_get_real_seconds();
spin_unlock(&cli->cl_lru_list_lock);
- /* XXX: May set force to be true for better performance */
- if (osc_cache_too_much(cli))
+ if (waitqueue_active(&osc_lru_waitq))
(void)ptlrpcd_queue_work(cli->cl_lru_work);
}
}
@@ -469,8 +461,10 @@ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
* this osc occupies too many LRU pages and kernel is
* stealing one of them.
*/
- if (!memory_pressure_get())
+ if (osc_cache_too_much(cli)) {
+ CDEBUG(D_CACHE, "%s: queue LRU work\n", cli_name(cli));
(void)ptlrpcd_queue_work(cli->cl_lru_work);
+ }
wake_up(&osc_lru_waitq);
} else {
LASSERT(list_empty(&opg->ops_lru));
@@ -502,6 +496,7 @@ static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
struct cl_page *page = pvec[i];
LASSERT(cl_page_is_owned(page, io));
+ cl_page_delete(env, page);
cl_page_discard(env, io, page);
cl_page_disown(env, io, page);
cl_page_put(env, page);
@@ -542,7 +537,6 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
struct cl_object *clobj = NULL;
struct cl_page **pvec;
struct osc_page *opg;
- struct osc_page *temp;
int maxscan = 0;
long count = 0;
int index = 0;
@@ -552,6 +546,8 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
if (atomic_long_read(&cli->cl_lru_in_list) == 0 || target <= 0)
return 0;
+ CDEBUG(D_CACHE, "%s: shrinkers: %d, force: %d\n",
+ cli_name(cli), atomic_read(&cli->cl_lru_shrinkers), force);
if (!force) {
if (atomic_read(&cli->cl_lru_shrinkers) > 0)
return -EBUSY;
@@ -568,14 +564,21 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
io = &osc_env_info(env)->oti_io;
spin_lock(&cli->cl_lru_list_lock);
+ if (force)
+ cli->cl_lru_reclaim++;
maxscan = min(target << 1, atomic_long_read(&cli->cl_lru_in_list));
- list_for_each_entry_safe(opg, temp, &cli->cl_lru_list, ops_lru) {
+ while (!list_empty(&cli->cl_lru_list)) {
struct cl_page *page;
bool will_free = false;
+ if (!force && atomic_read(&cli->cl_lru_shrinkers) > 1)
+ break;
+
if (--maxscan < 0)
break;
+ opg = list_entry(cli->cl_lru_list.next, struct osc_page,
+ ops_lru);
page = opg->ops_cl.cpl_page;
if (lru_page_busy(cli, page)) {
list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
@@ -662,34 +665,43 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
return count > 0 ? count : rc;
}
-long osc_lru_reclaim(struct client_obd *cli)
+/**
+ * Reclaim LRU pages by an IO thread. The caller wants to reclaim at least
+ * \@npages of LRU slots. For performance consideration, it's better to drop
+ * LRU pages in batch. Therefore, the actual number is adjusted at least
+ * max_pages_per_rpc.
+ */
+long osc_lru_reclaim(struct client_obd *cli, unsigned long npages)
{
- struct cl_env_nest nest;
struct lu_env *env;
struct cl_client_cache *cache = cli->cl_cache;
int max_scans;
+ int refcheck;
long rc = 0;
LASSERT(cache);
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env))
return 0;
- rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli), false);
- if (rc != 0) {
- if (rc == -EBUSY)
- rc = 0;
-
- CDEBUG(D_CACHE, "%s: Free %ld pages from own LRU: %p.\n",
- cli->cl_import->imp_obd->obd_name, rc, cli);
+ npages = max_t(int, npages, cli->cl_max_pages_per_rpc);
+ CDEBUG(D_CACHE, "%s: start to reclaim %ld pages from LRU\n",
+ cli_name(cli), npages);
+ rc = osc_lru_shrink(env, cli, npages, true);
+ if (rc >= npages) {
+ CDEBUG(D_CACHE, "%s: reclaimed %ld/%ld pages from LRU\n",
+ cli_name(cli), rc, npages);
+ if (osc_cache_too_much(cli) > 0)
+ ptlrpcd_queue_work(cli->cl_lru_work);
goto out;
+ } else if (rc > 0) {
+ npages -= rc;
}
- CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld, busy: %ld.\n",
- cli->cl_import->imp_obd->obd_name, cli,
- atomic_long_read(&cli->cl_lru_in_list),
- atomic_long_read(&cli->cl_lru_busy));
+ CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld/%ld, want: %ld\n",
+ cli_name(cli), cli, atomic_long_read(&cli->cl_lru_in_list),
+ atomic_long_read(&cli->cl_lru_busy), npages);
/* Reclaim LRU slots from other client_obd as it can't free enough
* from its own. This should rarely happen.
@@ -706,7 +718,7 @@ long osc_lru_reclaim(struct client_obd *cli)
cl_lru_osc);
CDEBUG(D_CACHE, "%s: cli %p LRU pages: %ld, busy: %ld.\n",
- cli->cl_import->imp_obd->obd_name, cli,
+ cli_name(cli), cli,
atomic_long_read(&cli->cl_lru_in_list),
atomic_long_read(&cli->cl_lru_busy));
@@ -714,19 +726,20 @@ long osc_lru_reclaim(struct client_obd *cli)
if (osc_cache_too_much(cli) > 0) {
spin_unlock(&cache->ccc_lru_lock);
- rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli),
- true);
+ rc = osc_lru_shrink(env, cli, npages, true);
spin_lock(&cache->ccc_lru_lock);
- if (rc != 0)
+ if (rc >= npages)
break;
+ if (rc > 0)
+ npages -= rc;
}
}
spin_unlock(&cache->ccc_lru_lock);
out:
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
CDEBUG(D_CACHE, "%s: cli %p freed %ld pages.\n",
- cli->cl_import->imp_obd->obd_name, cli, rc);
+ cli_name(cli), cli, rc);
return rc;
}
@@ -756,7 +769,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
LASSERT(atomic_long_read(cli->cl_lru_left) >= 0);
while (!atomic_long_add_unless(cli->cl_lru_left, -1, 0)) {
/* run out of LRU spaces, try to drop some by itself */
- rc = osc_lru_reclaim(cli);
+ rc = osc_lru_reclaim(cli, 1);
if (rc < 0)
break;
if (rc > 0)
@@ -796,8 +809,10 @@ static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
int count = 0;
int i;
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+
for (i = 0; i < page_count; i++) {
- pg_data_t *pgdat = page_pgdat(desc->bd_iov[i].bv_page);
+ pg_data_t *pgdat = page_pgdat(BD_GET_KIOV(desc, i).bv_page);
if (likely(pgdat == last)) {
++count;
@@ -857,7 +872,7 @@ void osc_dec_unstable_pages(struct ptlrpc_request *req)
if (!unstable_count)
wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
- if (osc_cache_too_much(cli))
+ if (waitqueue_active(&osc_lru_waitq))
(void)ptlrpcd_queue_work(cli->cl_lru_work);
}
@@ -913,8 +928,7 @@ bool osc_over_unstable_soft_limit(struct client_obd *cli)
CDEBUG(D_CACHE,
"%s: cli: %p unstable pages: %lu, osc unstable pages: %lu\n",
- cli->cl_import->imp_obd->obd_name, cli,
- unstable_nr, osc_unstable_count);
+ cli_name(cli), cli, unstable_nr, osc_unstable_count);
/*
* If the LRU slots are in shortage - 25% remaining AND this OSC
diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c
index 194d8ede40a2..fed4da63ee45 100644
--- a/drivers/staging/lustre/lustre/osc/osc_quota.c
+++ b/drivers/staging/lustre/lustre/osc/osc_quota.c
@@ -106,7 +106,7 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
}
CDEBUG(D_QUOTA, "%s: setdq to insert for %s %d (%d)\n",
- cli->cl_import->imp_obd->obd_name,
+ cli_name(cli),
type == USRQUOTA ? "user" : "group",
qid[type], rc);
} else {
@@ -122,7 +122,7 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
kmem_cache_free(osc_quota_kmem, oqi);
CDEBUG(D_QUOTA, "%s: setdq to remove for %s %d (%p)\n",
- cli->cl_import->imp_obd->obd_name,
+ cli_name(cli),
type == USRQUOTA ? "user" : "group",
qid[type], oqi);
}
@@ -134,8 +134,8 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
/*
* Hash operations for uid/gid <-> osc_quota_info
*/
-static unsigned
-oqi_hashfn(struct cfs_hash *hs, const void *key, unsigned mask)
+static unsigned int
+oqi_hashfn(struct cfs_hash *hs, const void *key, unsigned int mask)
{
return cfs_hash_u32_hash(*((__u32 *)key), mask);
}
@@ -281,47 +281,3 @@ int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
return rc;
}
-
-int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- struct ptlrpc_request *req;
- struct obd_quotactl *body;
- int rc;
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_OST_QUOTACHECK, LUSTRE_OST_VERSION,
- OST_QUOTACHECK);
- if (!req)
- return -ENOMEM;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- *body = *oqctl;
-
- ptlrpc_request_set_replen(req);
-
- /* the next poll will find -ENODATA, that means quotacheck is going on
- */
- cli->cl_qchk_stat = -ENODATA;
- rc = ptlrpc_queue_wait(req);
- if (rc)
- cli->cl_qchk_stat = rc;
- ptlrpc_req_finished(req);
- return rc;
-}
-
-int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- int rc;
-
- qchk->obd_uuid = cli->cl_target_uuid;
- memcpy(qchk->obd_type, LUSTRE_OST_NAME, strlen(LUSTRE_OST_NAME));
-
- rc = cli->cl_qchk_stat;
- /* the client is not the previous one */
- if (rc == CL_NOT_QUOTACHECKED)
- rc = -EINTR;
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 749781f022e2..7143564ae7e7 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -68,7 +68,6 @@ struct osc_brw_async_args {
struct client_obd *aa_cli;
struct list_head aa_oaps;
struct list_head aa_exts;
- struct cl_req *aa_clerq;
};
struct osc_async_args {
@@ -82,7 +81,8 @@ struct osc_setattr_args {
};
struct osc_fsync_args {
- struct obd_info *fa_oi;
+ struct osc_object *fa_obj;
+ struct obdo *fa_oa;
obd_enqueue_update_f fa_upcall;
void *fa_cookie;
};
@@ -103,140 +103,19 @@ static void osc_release_ppga(struct brw_page **ppga, u32 count);
static int brw_interpret(const struct lu_env *env,
struct ptlrpc_request *req, void *data, int rc);
-/* Unpack OSC object metadata from disk storage (LE byte order). */
-static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
- struct lov_mds_md *lmm, int lmm_bytes)
-{
- int lsm_size;
- struct obd_import *imp = class_exp2cliimp(exp);
-
- if (lmm) {
- if (lmm_bytes < sizeof(*lmm)) {
- CERROR("%s: lov_mds_md too small: %d, need %d\n",
- exp->exp_obd->obd_name, lmm_bytes,
- (int)sizeof(*lmm));
- return -EINVAL;
- }
- /* XXX LOV_MAGIC etc check? */
-
- if (unlikely(ostid_id(&lmm->lmm_oi) == 0)) {
- CERROR("%s: zero lmm_object_id: rc = %d\n",
- exp->exp_obd->obd_name, -EINVAL);
- return -EINVAL;
- }
- }
-
- lsm_size = lov_stripe_md_size(1);
- if (!lsmp)
- return lsm_size;
-
- if (*lsmp && !lmm) {
- kfree((*lsmp)->lsm_oinfo[0]);
- kfree(*lsmp);
- *lsmp = NULL;
- return 0;
- }
-
- if (!*lsmp) {
- *lsmp = kzalloc(lsm_size, GFP_NOFS);
- if (unlikely(!*lsmp))
- return -ENOMEM;
- (*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo),
- GFP_NOFS);
- if (unlikely(!(*lsmp)->lsm_oinfo[0])) {
- kfree(*lsmp);
- return -ENOMEM;
- }
- loi_init((*lsmp)->lsm_oinfo[0]);
- } else if (unlikely(ostid_id(&(*lsmp)->lsm_oi) == 0)) {
- return -EBADF;
- }
-
- if (lmm)
- /* XXX zero *lsmp? */
- ostid_le_to_cpu(&lmm->lmm_oi, &(*lsmp)->lsm_oi);
-
- if (imp &&
- (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
- (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
- else
- (*lsmp)->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
-
- return lsm_size;
-}
-
static inline void osc_pack_req_body(struct ptlrpc_request *req,
- struct obd_info *oinfo)
+ struct obdo *oa)
{
struct ost_body *body;
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body);
- lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
- oinfo->oi_oa);
-}
-
-static int osc_getattr_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- struct osc_async_args *aa, int rc)
-{
- struct ost_body *body;
-
- if (rc != 0)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body) {
- CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
- lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
- aa->aa_oi->oi_oa, &body->oa);
-
- /* This should really be sent by the OST */
- aa->aa_oi->oi_oa->o_blksize = DT_MAX_BRW_SIZE;
- aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
- } else {
- CDEBUG(D_INFO, "can't unpack ost_body\n");
- rc = -EPROTO;
- aa->aa_oi->oi_oa->o_valid = 0;
- }
-out:
- rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
- return rc;
-}
-
-static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
- struct ptlrpc_request_set *set)
-{
- struct ptlrpc_request *req;
- struct osc_async_args *aa;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- osc_pack_req_body(req, oinfo);
-
- ptlrpc_request_set_replen(req);
- req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
-
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- aa->aa_oi = oinfo;
-
- ptlrpc_set_add_req(set, req);
- return 0;
+ lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
}
static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
- struct obd_info *oinfo)
+ struct obdo *oa)
{
struct ptlrpc_request *req;
struct ost_body *body;
@@ -252,7 +131,7 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
return rc;
}
- osc_pack_req_body(req, oinfo);
+ osc_pack_req_body(req, oa);
ptlrpc_request_set_replen(req);
@@ -267,11 +146,11 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
}
CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
- lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
+ lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa,
&body->oa);
- oinfo->oi_oa->o_blksize = cli_brw_size(exp->exp_obd);
- oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
+ oa->o_blksize = cli_brw_size(exp->exp_obd);
+ oa->o_valid |= OBD_MD_FLBLKSZ;
out:
ptlrpc_req_finished(req);
@@ -279,13 +158,13 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
}
static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
- struct obd_info *oinfo, struct obd_trans_info *oti)
+ struct obdo *oa)
{
struct ptlrpc_request *req;
struct ost_body *body;
int rc;
- LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
+ LASSERT(oa->o_valid & OBD_MD_FLGROUP);
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
if (!req)
@@ -297,7 +176,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
return rc;
}
- osc_pack_req_body(req, oinfo);
+ osc_pack_req_body(req, oa);
ptlrpc_request_set_replen(req);
@@ -311,7 +190,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
goto out;
}
- lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
+ lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa,
&body->oa);
out:
@@ -341,10 +220,9 @@ out:
return rc;
}
-int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- obd_enqueue_update_f upcall, void *cookie,
- struct ptlrpc_request_set *rqset)
+int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
+ obd_enqueue_update_f upcall, void *cookie,
+ struct ptlrpc_request_set *rqset)
{
struct ptlrpc_request *req;
struct osc_setattr_args *sa;
@@ -360,10 +238,7 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
return rc;
}
- if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
- oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
-
- osc_pack_req_body(req, oinfo);
+ osc_pack_req_body(req, oa);
ptlrpc_request_set_replen(req);
@@ -377,7 +252,7 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
sa = ptlrpc_req_async_args(req);
- sa->sa_oa = oinfo->oi_oa;
+ sa->sa_oa = oa;
sa->sa_upcall = upcall;
sa->sa_cookie = cookie;
@@ -390,16 +265,8 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
return 0;
}
-static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct ptlrpc_request_set *rqset)
-{
- return osc_setattr_async_base(exp, oinfo, oti,
- oinfo->oi_cb_up, oinfo, rqset);
-}
-
static int osc_create(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct obd_trans_info *oti)
+ struct obdo *oa)
{
struct ptlrpc_request *req;
struct ost_body *body;
@@ -428,15 +295,6 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp,
ptlrpc_request_set_replen(req);
- if ((oa->o_valid & OBD_MD_FLFLAGS) &&
- oa->o_flags == OBD_FL_DELORPHAN) {
- DEBUG_REQ(D_HA, req,
- "delorphan from OST integration");
- /* Don't resend the delorphan req */
- req->rq_no_resend = 1;
- req->rq_no_delay = 1;
- }
-
rc = ptlrpc_queue_wait(req);
if (rc)
goto out_req;
@@ -453,12 +311,6 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp,
oa->o_blksize = cli_brw_size(exp->exp_obd);
oa->o_valid |= OBD_MD_FLBLKSZ;
- if (oti && oa->o_valid & OBD_MD_FLCOOKIE) {
- if (!oti->oti_logcookies)
- oti->oti_logcookies = &oti->oti_onecookie;
- *oti->oti_logcookies = oa->o_lcookie;
- }
-
CDEBUG(D_HA, "transno: %lld\n",
lustre_msg_get_transno(req->rq_repmsg));
out_req:
@@ -467,7 +319,7 @@ out:
return rc;
}
-int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
+int osc_punch_base(struct obd_export *exp, struct obdo *oa,
obd_enqueue_update_f upcall, void *cookie,
struct ptlrpc_request_set *rqset)
{
@@ -491,14 +343,14 @@ int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body);
lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
- oinfo->oi_oa);
+ oa);
ptlrpc_request_set_replen(req);
req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
sa = ptlrpc_req_async_args(req);
- sa->sa_oa = oinfo->oi_oa;
+ sa->sa_oa = oa;
sa->sa_upcall = upcall;
sa->sa_cookie = cookie;
if (rqset == PTLRPCD_SET)
@@ -513,8 +365,11 @@ static int osc_sync_interpret(const struct lu_env *env,
struct ptlrpc_request *req,
void *arg, int rc)
{
+ struct cl_attr *attr = &osc_env_info(env)->oti_attr;
struct osc_fsync_args *fa = arg;
+ unsigned long valid = 0;
struct ost_body *body;
+ struct cl_object *obj;
if (rc)
goto out;
@@ -526,16 +381,30 @@ static int osc_sync_interpret(const struct lu_env *env,
goto out;
}
- *fa->fa_oi->oi_oa = body->oa;
+ *fa->fa_oa = body->oa;
+ obj = osc2cl(fa->fa_obj);
+
+ /* Update osc object's blocks attribute */
+ cl_object_attr_lock(obj);
+ if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
+ attr->cat_blocks = body->oa.o_blocks;
+ valid |= CAT_BLOCKS;
+ }
+
+ if (valid)
+ cl_object_attr_update(env, obj, attr, valid);
+ cl_object_attr_unlock(obj);
+
out:
rc = fa->fa_upcall(fa->fa_cookie, rc);
return rc;
}
-int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
+int osc_sync_base(struct osc_object *obj, struct obdo *oa,
obd_enqueue_update_f upcall, void *cookie,
struct ptlrpc_request_set *rqset)
{
+ struct obd_export *exp = osc_export(obj);
struct ptlrpc_request *req;
struct ost_body *body;
struct osc_fsync_args *fa;
@@ -555,14 +424,15 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body);
lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
- oinfo->oi_oa);
+ oa);
ptlrpc_request_set_replen(req);
req->rq_interpret_reply = osc_sync_interpret;
CLASSERT(sizeof(*fa) <= sizeof(req->rq_async_args));
fa = ptlrpc_req_async_args(req);
- fa->fa_oi = oinfo;
+ fa->fa_obj = obj;
+ fa->fa_oa = oa;
fa->fa_upcall = upcall;
fa->fa_cookie = cookie;
@@ -639,19 +509,8 @@ static int osc_can_send_destroy(struct client_obd *cli)
return 0;
}
-/* Destroy requests can be async always on the client, and we don't even really
- * care about the return code since the client cannot do anything at all about
- * a destroy failure.
- * When the MDS is unlinking a filename, it saves the file objects into a
- * recovery llog, and these object records are cancelled when the OST reports
- * they were destroyed and sync'd to disk (i.e. transaction committed).
- * If the client dies, or the OST is down when the object should be destroyed,
- * the records are not cancelled, and when the OST reconnects to the MDS next,
- * it will retrieve the llog unlink logs and then sends the log cancellation
- * cookies to the MDS after committing destroy transactions.
- */
static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct obd_trans_info *oti)
+ struct obdo *oa)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
struct ptlrpc_request *req;
@@ -683,32 +542,22 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
ptlrpc_at_set_req_timeout(req);
- if (oti && oa->o_valid & OBD_MD_FLCOOKIE)
- oa->o_lcookie = *oti->oti_logcookies;
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body);
lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
ptlrpc_request_set_replen(req);
- /* If osc_destroy is for destroying the unlink orphan,
- * sent from MDT to OST, which should not be blocked here,
- * because the process might be triggered by ptlrpcd, and
- * it is not good to block ptlrpcd thread (b=16006
- **/
- if (!(oa->o_flags & OBD_FL_DELORPHAN)) {
- req->rq_interpret_reply = osc_destroy_interpret;
- if (!osc_can_send_destroy(cli)) {
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP,
- NULL);
-
- /*
- * Wait until the number of on-going destroy RPCs drops
- * under max_rpc_in_flight
- */
- l_wait_event_exclusive(cli->cl_destroy_waitq,
- osc_can_send_destroy(cli), &lwi);
- }
+ req->rq_interpret_reply = osc_destroy_interpret;
+ if (!osc_can_send_destroy(cli)) {
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+
+ /*
+ * Wait until the number of on-going destroy RPCs drops
+ * under max_rpc_in_flight
+ */
+ l_wait_event_exclusive(cli->cl_destroy_waitq,
+ osc_can_send_destroy(cli), &lwi);
}
/* Do not wait for response */
@@ -734,14 +583,13 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
oa->o_undirty = 0;
} else if (unlikely(atomic_long_read(&obd_dirty_pages) -
atomic_long_read(&obd_dirty_transit_pages) >
- (obd_max_dirty_pages + 1))) {
+ (long)(obd_max_dirty_pages + 1))) {
/* The atomic_read() allowing the atomic_inc() are
* not covered by a lock thus they may safely race and trip
* this CERROR() unless we add in a small fudge factor (+1).
*/
- CERROR("%s: dirty %ld + %ld > system dirty_max %lu\n",
- cli->cl_import->imp_obd->obd_name,
- atomic_long_read(&obd_dirty_pages),
+ CERROR("%s: dirty %ld + %ld > system dirty_max %ld\n",
+ cli_name(cli), atomic_long_read(&obd_dirty_pages),
atomic_long_read(&obd_dirty_transit_pages),
obd_max_dirty_pages);
oa->o_undirty = 0;
@@ -936,12 +784,10 @@ static int osc_add_shrink_grant(struct client_obd *client)
osc_grant_shrink_grant_cb, NULL,
&client->cl_grant_shrink_list);
if (rc) {
- CERROR("add grant client %s error %d\n",
- client->cl_import->imp_obd->obd_name, rc);
+ CERROR("add grant client %s error %d\n", cli_name(client), rc);
return rc;
}
- CDEBUG(D_CACHE, "add grant client %s\n",
- client->cl_import->imp_obd->obd_name);
+ CDEBUG(D_CACHE, "add grant client %s\n", cli_name(client));
osc_update_next_shrink(client);
return 0;
}
@@ -970,23 +816,13 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
cli->cl_avail_grant = ocd->ocd_grant -
(cli->cl_dirty_pages << PAGE_SHIFT);
- if (cli->cl_avail_grant < 0) {
- CWARN("%s: available grant < 0: avail/ocd/dirty %ld/%u/%ld\n",
- cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant,
- ocd->ocd_grant, cli->cl_dirty_pages << PAGE_SHIFT);
- /* workaround for servers which do not have the patch from
- * LU-2679
- */
- cli->cl_avail_grant = ocd->ocd_grant;
- }
-
/* determine the appropriate chunk size used by osc_extent. */
cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
- cli->cl_import->imp_obd->obd_name,
- cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits);
+ cli_name(cli), cli->cl_avail_grant, cli->cl_lost_grant,
+ cli->cl_chunkbits);
if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
list_empty(&cli->cl_grant_shrink_list))
@@ -1072,9 +908,9 @@ static int check_write_rcs(struct ptlrpc_request *req,
static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
{
if (p1->flag != p2->flag) {
- unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
- OBD_BRW_SYNC | OBD_BRW_ASYNC |
- OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC);
+ unsigned int mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
+ OBD_BRW_SYNC | OBD_BRW_ASYNC |
+ OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC);
/* warn if we try to combine flags that we don't know to be
* safe to combine
@@ -1097,7 +933,6 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
int i = 0;
struct cfs_crypto_hash_desc *hdesc;
unsigned int bufsize;
- int err;
unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
LASSERT(pg_count > 0);
@@ -1139,7 +974,7 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
}
bufsize = sizeof(cksum);
- err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
+ cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
/* For sending we only compute the wrong checksum instead
* of corrupting the data so it is still correct on a redo
@@ -1151,8 +986,7 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
}
static int osc_brw_prep_request(int cmd, struct client_obd *cli,
- struct obdo *oa,
- struct lov_stripe_md *lsm, u32 page_count,
+ struct obdo *oa, u32 page_count,
struct brw_page **pga,
struct ptlrpc_request **reqp,
int reserve,
@@ -1210,8 +1044,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
desc = ptlrpc_prep_bulk_imp(req, page_count,
cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
- opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK,
- OST_BULK_PORTAL);
+ (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
+ PTLRPC_BULK_PUT_SINK) | PTLRPC_BULK_BUF_KIOV, OST_BULK_PORTAL,
+ &ptlrpc_bulk_kiov_pin_ops);
if (!desc) {
rc = -ENOMEM;
@@ -1259,7 +1094,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
(pg->flag & OBD_BRW_SRVLOCK));
- ptlrpc_prep_bulk_page_pin(desc, pg->pg, poff, pg->count);
+ desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff, pg->count);
requested_nob += pg->count;
if (i > 0 && can_merge_pages(pg_prev, pg)) {
@@ -1569,7 +1404,6 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
aa->aa_cli, aa->aa_oa,
- NULL /* lsm unused by osc currently */,
aa->aa_page_count, aa->aa_ppga,
&new_req, 0, 1);
if (rc)
@@ -1764,8 +1598,6 @@ static int brw_interpret(const struct lu_env *env,
LASSERT(list_empty(&aa->aa_exts));
LASSERT(list_empty(&aa->aa_oaps));
- cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
- req->rq_bulk->bd_nob_transferred);
osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
@@ -1818,9 +1650,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
struct osc_brw_async_args *aa = NULL;
struct obdo *oa = NULL;
struct osc_async_page *oap;
- struct osc_async_page *tmp;
- struct cl_req *clerq = NULL;
- enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
+ struct osc_object *obj = NULL;
struct cl_req_attr *crattr = NULL;
u64 starting_offset = OBD_OBJECT_EOF;
u64 ending_offset = 0;
@@ -1828,6 +1658,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
int mem_tight = 0;
int page_count = 0;
bool soft_sync = false;
+ bool interrupted = false;
int i;
int rc;
struct ost_body *body;
@@ -1839,32 +1670,15 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
list_for_each_entry(ext, ext_list, oe_link) {
LASSERT(ext->oe_state == OES_RPC);
mem_tight |= ext->oe_memalloc;
- list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
- ++page_count;
- list_add_tail(&oap->oap_rpc_item, &rpc_list);
- if (starting_offset > oap->oap_obj_off)
- starting_offset = oap->oap_obj_off;
- else
- LASSERT(oap->oap_page_off == 0);
- if (ending_offset < oap->oap_obj_off + oap->oap_count)
- ending_offset = oap->oap_obj_off +
- oap->oap_count;
- else
- LASSERT(oap->oap_page_off + oap->oap_count ==
- PAGE_SIZE);
- }
+ page_count += ext->oe_nr_pages;
+ if (!obj)
+ obj = ext->oe_obj;
}
soft_sync = osc_over_unstable_soft_limit(cli);
if (mem_tight)
mpflag = cfs_memory_pressure_get_and_set();
- crattr = kzalloc(sizeof(*crattr), GFP_NOFS);
- if (!crattr) {
- rc = -ENOMEM;
- goto out;
- }
-
pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS);
if (!pga) {
rc = -ENOMEM;
@@ -1878,44 +1692,46 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
}
i = 0;
- list_for_each_entry(oap, &rpc_list, oap_rpc_item) {
- struct cl_page *page = oap2cl_page(oap);
-
- if (!clerq) {
- clerq = cl_req_alloc(env, page, crt,
- 1 /* only 1-object rpcs for now */);
- if (IS_ERR(clerq)) {
- rc = PTR_ERR(clerq);
- goto out;
- }
+ list_for_each_entry(ext, ext_list, oe_link) {
+ list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
+ if (mem_tight)
+ oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
+ if (soft_sync)
+ oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
+ pga[i] = &oap->oap_brw_page;
+ pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
+ i++;
+
+ list_add_tail(&oap->oap_rpc_item, &rpc_list);
+ if (starting_offset == OBD_OBJECT_EOF ||
+ starting_offset > oap->oap_obj_off)
+ starting_offset = oap->oap_obj_off;
+ else
+ LASSERT(!oap->oap_page_off);
+ if (ending_offset < oap->oap_obj_off + oap->oap_count)
+ ending_offset = oap->oap_obj_off +
+ oap->oap_count;
+ else
+ LASSERT(oap->oap_page_off + oap->oap_count ==
+ PAGE_SIZE);
+ if (oap->oap_interrupted)
+ interrupted = true;
}
- if (mem_tight)
- oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
- if (soft_sync)
- oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
- pga[i] = &oap->oap_brw_page;
- pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
- CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
- pga[i]->pg, oap->oap_page->index, oap,
- pga[i]->flag);
- i++;
- cl_req_page_add(env, clerq, page);
}
- /* always get the data for the obdo for the rpc */
- LASSERT(clerq);
- crattr->cra_oa = oa;
- cl_req_attr_set(env, clerq, crattr, ~0ULL);
+ /* first page in the list */
+ oap = list_entry(rpc_list.next, typeof(*oap), oap_rpc_item);
- rc = cl_req_prep(env, clerq);
- if (rc != 0) {
- CERROR("cl_req_prep failed: %d\n", rc);
- goto out;
- }
+ crattr = &osc_env_info(env)->oti_req_attr;
+ memset(crattr, 0, sizeof(*crattr));
+ crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
+ crattr->cra_flags = ~0ULL;
+ crattr->cra_page = oap2cl_page(oap);
+ crattr->cra_oa = oa;
+ cl_req_attr_set(env, osc2cl(obj), crattr);
sort_brw_pages(pga, page_count);
- rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
- pga, &req, 1, 0);
+ rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 1, 0);
if (rc != 0) {
CERROR("prep_req failed: %d\n", rc);
goto out;
@@ -1924,8 +1740,10 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
req->rq_commit_cb = brw_commit;
req->rq_interpret_reply = brw_interpret;
- if (mem_tight != 0)
- req->rq_memalloc = 1;
+ req->rq_memalloc = mem_tight != 0;
+ oap->oap_request = ptlrpc_request_addref(req);
+ if (interrupted && !req->rq_intr)
+ ptlrpc_mark_interrupted(req);
/* Need to update the timestamps after the request is built in case
* we race with setattr (locally or in queue at OST). If OST gets
@@ -1935,9 +1753,8 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
*/
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
crattr->cra_oa = &body->oa;
- cl_req_attr_set(env, clerq, crattr,
- OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME);
-
+ crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
+ cl_req_attr_set(env, osc2cl(obj), crattr);
lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
@@ -1946,24 +1763,6 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
list_splice_init(&rpc_list, &aa->aa_oaps);
INIT_LIST_HEAD(&aa->aa_exts);
list_splice_init(ext_list, &aa->aa_exts);
- aa->aa_clerq = clerq;
-
- /* queued sync pages can be torn down while the pages
- * were between the pending list and the rpc
- */
- tmp = NULL;
- list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
- /* only one oap gets a request reference */
- if (!tmp)
- tmp = oap;
- if (oap->oap_interrupted && !req->rq_intr) {
- CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
- oap, req);
- ptlrpc_mark_interrupted(req);
- }
- }
- if (tmp)
- tmp->oap_request = ptlrpc_request_addref(req);
spin_lock(&cli->cl_loi_list_lock);
starting_offset >>= PAGE_SHIFT;
@@ -1985,6 +1784,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %ur/%dw in flight",
page_count, aa, cli->cl_r_in_flight,
cli->cl_w_in_flight);
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
ptlrpcd_add_req(req);
rc = 0;
@@ -1993,8 +1793,6 @@ out:
if (mem_tight != 0)
cfs_memory_pressure_restore(mpflag);
- kfree(crattr);
-
if (rc != 0) {
LASSERT(!req);
@@ -2010,22 +1808,15 @@ out:
list_del_init(&ext->oe_link);
osc_extent_finish(env, ext, 0, rc);
}
- if (clerq && !IS_ERR(clerq))
- cl_req_completion(env, clerq, rc);
}
return rc;
}
-static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
- struct ldlm_enqueue_info *einfo)
+static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
{
- void *data = einfo->ei_cbdata;
int set = 0;
- LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
- LASSERT(lock->l_resource->lr_type == einfo->ei_type);
- LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
- LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
+ LASSERT(lock);
lock_res_and_lock(lock);
@@ -2039,21 +1830,6 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
return set;
}
-static int osc_set_data_with_check(struct lustre_handle *lockh,
- struct ldlm_enqueue_info *einfo)
-{
- struct ldlm_lock *lock = ldlm_handle2lock(lockh);
- int set = 0;
-
- if (lock) {
- set = osc_set_lock_data_with_check(lock, einfo);
- LDLM_LOCK_PUT(lock);
- } else
- CERROR("lockh %p, data %p - client evicted?\n",
- lockh, einfo->ei_cbdata);
- return set;
-}
-
static int osc_enqueue_fini(struct ptlrpc_request *req,
osc_enqueue_upcall_f upcall, void *cookie,
struct lustre_handle *lockh, enum ldlm_mode mode,
@@ -2153,7 +1929,7 @@ struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
* release locks just after they are obtained.
*/
int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- __u64 *flags, ldlm_policy_data_t *policy,
+ __u64 *flags, union ldlm_policy_data *policy,
struct ost_lvb *lvb, int kms_valid,
osc_enqueue_upcall_f upcall, void *cookie,
struct ldlm_enqueue_info *einfo,
@@ -2219,7 +1995,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
ldlm_lock_decref(&lockh, mode);
LDLM_LOCK_PUT(matched);
return -ECANCELED;
- } else if (osc_set_lock_data_with_check(matched, einfo)) {
+ } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
*flags |= LDLM_FL_LVB_READY;
/* We already have a lock, and it's referenced. */
(*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
@@ -2304,7 +2080,7 @@ no_match:
}
int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- __u32 type, ldlm_policy_data_t *policy, __u32 mode,
+ __u32 type, union ldlm_policy_data *policy, __u32 mode,
__u64 *flags, void *data, struct lustre_handle *lockh,
int unref)
{
@@ -2331,31 +2107,20 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
rc |= LCK_PW;
rc = ldlm_lock_match(obd->obd_namespace, lflags,
res_id, type, policy, rc, lockh, unref);
- if (rc) {
- if (data) {
- if (!osc_set_data_with_check(lockh, data)) {
- if (!(lflags & LDLM_FL_TEST_LOCK))
- ldlm_lock_decref(lockh, rc);
- return 0;
- }
- }
- if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
- ldlm_lock_addref(lockh, LCK_PR);
- ldlm_lock_decref(lockh, LCK_PW);
- }
+ if (!rc || lflags & LDLM_FL_TEST_LOCK)
return rc;
- }
- return rc;
-}
-int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
-{
- if (unlikely(mode == LCK_GROUP))
- ldlm_lock_decref_and_cancel(lockh, mode);
- else
- ldlm_lock_decref(lockh, mode);
+ if (data) {
+ struct ldlm_lock *lock = ldlm_handle2lock(lockh);
- return 0;
+ LASSERT(lock);
+ if (!osc_set_lock_data(lock, data)) {
+ ldlm_lock_decref(lockh, rc);
+ rc = 0;
+ }
+ LDLM_LOCK_PUT(lock);
+ }
+ return rc;
}
static int osc_statfs_interpret(const struct lu_env *env,
@@ -2526,9 +2291,6 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
err = ptlrpc_set_import_active(obd->u.cli.cl_import,
data->ioc_offset);
goto out;
- case OBD_IOC_POLL_QUOTACHECK:
- err = osc_quota_poll_check(exp, karg);
- goto out;
case OBD_IOC_PING_TARGET:
err = ptlrpc_obd_ping(obd);
goto out;
@@ -2543,103 +2305,6 @@ out:
return err;
}
-static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
- u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm)
-{
- if (!vallen || !val)
- return -EFAULT;
-
- if (KEY_IS(KEY_FIEMAP)) {
- struct ll_fiemap_info_key *fm_key = key;
- struct ldlm_res_id res_id;
- ldlm_policy_data_t policy;
- struct lustre_handle lockh;
- enum ldlm_mode mode = 0;
- struct ptlrpc_request *req;
- struct ll_user_fiemap *reply;
- char *tmp;
- int rc;
-
- if (!(fm_key->fiemap.fm_flags & FIEMAP_FLAG_SYNC))
- goto skip_locking;
-
- policy.l_extent.start = fm_key->fiemap.fm_start &
- PAGE_MASK;
-
- if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
- fm_key->fiemap.fm_start + PAGE_SIZE - 1)
- policy.l_extent.end = OBD_OBJECT_EOF;
- else
- policy.l_extent.end = (fm_key->fiemap.fm_start +
- fm_key->fiemap.fm_length +
- PAGE_SIZE - 1) & PAGE_MASK;
-
- ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
- mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
- LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_LVB_READY,
- &res_id, LDLM_EXTENT, &policy,
- LCK_PR | LCK_PW, &lockh, 0);
- if (mode) { /* lock is cached on client */
- if (mode != LCK_PR) {
- ldlm_lock_addref(&lockh, LCK_PR);
- ldlm_lock_decref(&lockh, LCK_PW);
- }
- } else { /* no cached lock, needs acquire lock on server side */
- fm_key->oa.o_valid |= OBD_MD_FLFLAGS;
- fm_key->oa.o_flags |= OBD_FL_SRVLOCK;
- }
-
-skip_locking:
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_OST_GET_INFO_FIEMAP);
- if (!req) {
- rc = -ENOMEM;
- goto drop_lock;
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
- RCL_CLIENT, keylen);
- req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
- RCL_CLIENT, *vallen);
- req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
- RCL_SERVER, *vallen);
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
- if (rc) {
- ptlrpc_request_free(req);
- goto drop_lock;
- }
-
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
- memcpy(tmp, key, keylen);
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
- memcpy(tmp, val, *vallen);
-
- ptlrpc_request_set_replen(req);
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto fini_req;
-
- reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
- if (!reply) {
- rc = -EPROTO;
- goto fini_req;
- }
-
- memcpy(val, reply, *vallen);
-fini_req:
- ptlrpc_req_finished(req);
-drop_lock:
- if (mode)
- ldlm_lock_decref(&lockh, LCK_PR);
- return rc;
- }
-
- return -EINVAL;
-}
-
static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
u32 keylen, void *key, u32 vallen,
void *val, struct ptlrpc_request_set *set)
@@ -2999,47 +2664,33 @@ out_ptlrpcd:
return rc;
}
-static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+static int osc_precleanup(struct obd_device *obd)
{
- switch (stage) {
- case OBD_CLEANUP_EARLY: {
- struct obd_import *imp;
-
- imp = obd->u.cli.cl_import;
- CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
- /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
- ptlrpc_deactivate_import(imp);
- spin_lock(&imp->imp_lock);
- imp->imp_pingable = 0;
- spin_unlock(&imp->imp_lock);
- break;
+ struct client_obd *cli = &obd->u.cli;
+
+ /* LU-464
+ * for echo client, export may be on zombie list, wait for
+ * zombie thread to cull it, because cli.cl_import will be
+ * cleared in client_disconnect_export():
+ * class_export_destroy() -> obd_cleanup() ->
+ * echo_device_free() -> echo_client_cleanup() ->
+ * obd_disconnect() -> osc_disconnect() ->
+ * client_disconnect_export()
+ */
+ obd_zombie_barrier();
+ if (cli->cl_writeback_work) {
+ ptlrpcd_destroy_work(cli->cl_writeback_work);
+ cli->cl_writeback_work = NULL;
}
- case OBD_CLEANUP_EXPORTS: {
- struct client_obd *cli = &obd->u.cli;
- /* LU-464
- * for echo client, export may be on zombie list, wait for
- * zombie thread to cull it, because cli.cl_import will be
- * cleared in client_disconnect_export():
- * class_export_destroy() -> obd_cleanup() ->
- * echo_device_free() -> echo_client_cleanup() ->
- * obd_disconnect() -> osc_disconnect() ->
- * client_disconnect_export()
- */
- obd_zombie_barrier();
- if (cli->cl_writeback_work) {
- ptlrpcd_destroy_work(cli->cl_writeback_work);
- cli->cl_writeback_work = NULL;
- }
- if (cli->cl_lru_work) {
- ptlrpcd_destroy_work(cli->cl_lru_work);
- cli->cl_lru_work = NULL;
- }
- obd_cleanup_client_import(obd);
- ptlrpc_lprocfs_unregister_obd(obd);
- lprocfs_obd_cleanup(obd);
- break;
- }
+
+ if (cli->cl_lru_work) {
+ ptlrpcd_destroy_work(cli->cl_lru_work);
+ cli->cl_lru_work = NULL;
}
+
+ obd_cleanup_client_import(obd);
+ ptlrpc_lprocfs_unregister_obd(obd);
+ lprocfs_obd_cleanup(obd);
return 0;
}
@@ -3104,24 +2755,18 @@ static struct obd_ops osc_obd_ops = {
.disconnect = osc_disconnect,
.statfs = osc_statfs,
.statfs_async = osc_statfs_async,
- .unpackmd = osc_unpackmd,
.create = osc_create,
.destroy = osc_destroy,
.getattr = osc_getattr,
- .getattr_async = osc_getattr_async,
.setattr = osc_setattr,
- .setattr_async = osc_setattr_async,
.iocontrol = osc_iocontrol,
- .get_info = osc_get_info,
.set_info_async = osc_set_info_async,
.import_event = osc_import_event,
.process_config = osc_process_config,
.quotactl = osc_quotactl,
- .quotacheck = osc_quotacheck,
};
extern struct lu_kmem_descr osc_caches[];
-extern struct lock_class_key osc_ast_guard_class;
static int __init osc_init(void)
{
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 8c51d51a678b..804741362bc0 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -43,6 +43,18 @@
#include "ptlrpc_internal.h"
+const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = {
+ .add_kiov_frag = ptlrpc_prep_bulk_page_pin,
+ .release_frags = ptlrpc_release_bulk_page_pin,
+};
+EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops);
+
+const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = {
+ .add_kiov_frag = ptlrpc_prep_bulk_page_nopin,
+ .release_frags = NULL,
+};
+EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops);
+
static int ptlrpc_send_new_req(struct ptlrpc_request *req);
static int ptlrpcd_check_work(struct ptlrpc_request *req);
static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
@@ -95,24 +107,43 @@ struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
* Allocate and initialize new bulk descriptor on the sender.
* Returns pointer to the descriptor or NULL on error.
*/
-struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
- unsigned type, unsigned portal)
+struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
+ unsigned int max_brw,
+ enum ptlrpc_bulk_op_type type,
+ unsigned int portal,
+ const struct ptlrpc_bulk_frag_ops *ops)
{
struct ptlrpc_bulk_desc *desc;
int i;
- desc = kzalloc(offsetof(struct ptlrpc_bulk_desc, bd_iov[npages]),
- GFP_NOFS);
+ /* ensure that only one of KIOV or IOVEC is set but not both */
+ LASSERT((ptlrpc_is_bulk_desc_kiov(type) && ops->add_kiov_frag) ||
+ (ptlrpc_is_bulk_desc_kvec(type) && ops->add_iov_frag));
+
+ desc = kzalloc(sizeof(*desc), GFP_NOFS);
if (!desc)
return NULL;
+ if (type & PTLRPC_BULK_BUF_KIOV) {
+ GET_KIOV(desc) = kcalloc(nfrags, sizeof(*GET_KIOV(desc)),
+ GFP_NOFS);
+ if (!GET_KIOV(desc))
+ goto free_desc;
+ } else {
+ GET_KVEC(desc) = kcalloc(nfrags, sizeof(*GET_KVEC(desc)),
+ GFP_NOFS);
+ if (!GET_KVEC(desc))
+ goto free_desc;
+ }
+
spin_lock_init(&desc->bd_lock);
init_waitqueue_head(&desc->bd_waitq);
- desc->bd_max_iov = npages;
+ desc->bd_max_iov = nfrags;
desc->bd_iov_count = 0;
desc->bd_portal = portal;
desc->bd_type = type;
desc->bd_md_count = 0;
+ desc->bd_frag_ops = (struct ptlrpc_bulk_frag_ops *)ops;
LASSERT(max_brw > 0);
desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
/*
@@ -123,24 +154,31 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
LNetInvalidateHandle(&desc->bd_mds[i]);
return desc;
+free_desc:
+ kfree(desc);
+ return NULL;
}
/**
* Prepare bulk descriptor for specified outgoing request \a req that
- * can fit \a npages * pages. \a type is bulk type. \a portal is where
+ * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
* the bulk to be sent. Used on client-side.
* Returns pointer to newly allocated initialized bulk descriptor or NULL on
* error.
*/
struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
- unsigned npages, unsigned max_brw,
- unsigned type, unsigned portal)
+ unsigned int nfrags,
+ unsigned int max_brw,
+ unsigned int type,
+ unsigned int portal,
+ const struct ptlrpc_bulk_frag_ops *ops)
{
struct obd_import *imp = req->rq_import;
struct ptlrpc_bulk_desc *desc;
- LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
- desc = ptlrpc_new_bulk(npages, max_brw, type, portal);
+ LASSERT(ptlrpc_is_bulk_op_passive(type));
+
+ desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
if (!desc)
return NULL;
@@ -158,56 +196,82 @@ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
}
EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
-/**
- * Add a page \a page to the bulk descriptor \a desc.
- * Data to transfer in the page starts at offset \a pageoffset and
- * amount of data to transfer from the page is \a len
- */
void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
struct page *page, int pageoffset, int len, int pin)
{
+ struct bio_vec *kiov;
+
LASSERT(desc->bd_iov_count < desc->bd_max_iov);
LASSERT(page);
LASSERT(pageoffset >= 0);
LASSERT(len > 0);
LASSERT(pageoffset + len <= PAGE_SIZE);
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+
+ kiov = &BD_GET_KIOV(desc, desc->bd_iov_count);
desc->bd_nob += len;
if (pin)
get_page(page);
- ptlrpc_add_bulk_page(desc, page, pageoffset, len);
+ kiov->bv_page = page;
+ kiov->bv_offset = pageoffset;
+ kiov->bv_len = len;
+
+ desc->bd_iov_count++;
}
EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
-/**
- * Uninitialize and free bulk descriptor \a desc.
- * Works on bulk descriptors both from server and client side.
- */
-void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
+int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc,
+ void *frag, int len)
{
- int i;
+ struct kvec *iovec;
+
+ LASSERT(desc->bd_iov_count < desc->bd_max_iov);
+ LASSERT(frag);
+ LASSERT(len > 0);
+ LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type));
+ iovec = &BD_GET_KVEC(desc, desc->bd_iov_count);
+
+ desc->bd_nob += len;
+
+ iovec->iov_base = frag;
+ iovec->iov_len = len;
+
+ desc->bd_iov_count++;
+
+ return desc->bd_nob;
+}
+EXPORT_SYMBOL(ptlrpc_prep_bulk_frag);
+
+void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
+{
LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
LASSERT(desc->bd_md_count == 0); /* network hands off */
LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
+ LASSERT(desc->bd_frag_ops);
- sptlrpc_enc_pool_put_pages(desc);
+ if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
+ sptlrpc_enc_pool_put_pages(desc);
if (desc->bd_export)
class_export_put(desc->bd_export);
else
class_import_put(desc->bd_import);
- if (unpin) {
- for (i = 0; i < desc->bd_iov_count; i++)
- put_page(desc->bd_iov[i].bv_page);
- }
+ if (desc->bd_frag_ops->release_frags)
+ desc->bd_frag_ops->release_frags(desc);
+
+ if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
+ kfree(GET_KIOV(desc));
+ else
+ kfree(GET_KVEC(desc));
kfree(desc);
}
-EXPORT_SYMBOL(__ptlrpc_free_bulk);
+EXPORT_SYMBOL(ptlrpc_free_bulk);
/**
* Set server timelimit for this req, i.e. how long are we willing to wait
@@ -589,6 +653,42 @@ static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
spin_unlock(&pool->prp_lock);
}
+void ptlrpc_add_unreplied(struct ptlrpc_request *req)
+{
+ struct obd_import *imp = req->rq_import;
+ struct list_head *tmp;
+ struct ptlrpc_request *iter;
+
+ assert_spin_locked(&imp->imp_lock);
+ LASSERT(list_empty(&req->rq_unreplied_list));
+
+ /* unreplied list is sorted by xid in ascending order */
+ list_for_each_prev(tmp, &imp->imp_unreplied_list) {
+ iter = list_entry(tmp, struct ptlrpc_request,
+ rq_unreplied_list);
+
+ LASSERT(req->rq_xid != iter->rq_xid);
+ if (req->rq_xid < iter->rq_xid)
+ continue;
+ list_add(&req->rq_unreplied_list, &iter->rq_unreplied_list);
+ return;
+ }
+ list_add(&req->rq_unreplied_list, &imp->imp_unreplied_list);
+}
+
+void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req)
+{
+ req->rq_xid = ptlrpc_next_xid();
+ ptlrpc_add_unreplied(req);
+}
+
+static inline void ptlrpc_assign_next_xid(struct ptlrpc_request *req)
+{
+ spin_lock(&req->rq_import->imp_lock);
+ ptlrpc_assign_next_xid_nolock(req);
+ spin_unlock(&req->rq_import->imp_lock);
+}
+
int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
__u32 version, int opcode, char **bufs,
struct ptlrpc_cli_ctx *ctx)
@@ -637,8 +737,8 @@ int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
ptlrpc_at_set_req_timeout(request);
- request->rq_xid = ptlrpc_next_xid();
lustre_msg_set_opc(request->rq_reqmsg, opcode);
+ ptlrpc_assign_next_xid(request);
/* Let's setup deadline for req/reply/bulk unlink for opcode. */
if (cfs_fail_val == opcode) {
@@ -1129,7 +1229,9 @@ static int ptlrpc_check_status(struct ptlrpc_request *req)
lnet_nid_t nid = imp->imp_connection->c_peer.nid;
__u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
- if (ptlrpc_console_allow(req))
+ /* -EAGAIN is normal when using POSIX flocks */
+ if (ptlrpc_console_allow(req) &&
+ !(opc == LDLM_ENQUEUE && err == -EAGAIN))
LCONSOLE_ERROR_MSG(0x011, "%s: operation %s to node %s failed: rc = %d\n",
imp->imp_obd->obd_name,
ll_opcode2str(opc),
@@ -1166,6 +1268,24 @@ static void ptlrpc_save_versions(struct ptlrpc_request *req)
versions[0], versions[1]);
}
+__u64 ptlrpc_known_replied_xid(struct obd_import *imp)
+{
+ struct ptlrpc_request *req;
+
+ assert_spin_locked(&imp->imp_lock);
+ if (list_empty(&imp->imp_unreplied_list))
+ return 0;
+
+ req = list_entry(imp->imp_unreplied_list.next, struct ptlrpc_request,
+ rq_unreplied_list);
+ LASSERTF(req->rq_xid >= 1, "XID:%llu\n", req->rq_xid);
+
+ if (imp->imp_known_replied_xid < req->rq_xid - 1)
+ imp->imp_known_replied_xid = req->rq_xid - 1;
+
+ return req->rq_xid - 1;
+}
+
/**
* Callback function called when client receives RPC reply for \a req.
* Returns 0 on success or error code.
@@ -1180,6 +1300,7 @@ static int after_reply(struct ptlrpc_request *req)
int rc;
struct timespec64 work_start;
long timediff;
+ u64 committed;
LASSERT(obd);
/* repbuf must be unlinked */
@@ -1206,6 +1327,10 @@ static int after_reply(struct ptlrpc_request *req)
return 0;
}
+ ktime_get_real_ts64(&work_start);
+ timediff = (work_start.tv_sec - req->rq_sent_tv.tv_sec) * USEC_PER_SEC +
+ (work_start.tv_nsec - req->rq_sent_tv.tv_nsec) /
+ NSEC_PER_USEC;
/*
* NB Until this point, the whole of the incoming message,
* including buflens, status etc is in the sender's byte order.
@@ -1235,13 +1360,6 @@ static int after_reply(struct ptlrpc_request *req)
spin_unlock(&req->rq_lock);
req->rq_nr_resend++;
- /* allocate new xid to avoid reply reconstruction */
- if (!req->rq_bulk) {
- /* new xid is already allocated for bulk in ptlrpc_check_set() */
- req->rq_xid = ptlrpc_next_xid();
- DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for resend on EINPROGRESS");
- }
-
/* Readjust the timeout for current conditions */
ptlrpc_at_set_req_timeout(req);
/*
@@ -1255,13 +1373,14 @@ static int after_reply(struct ptlrpc_request *req)
else
req->rq_sent = now + req->rq_nr_resend;
+ /* Resend for EINPROGRESS will use a new XID */
+ spin_lock(&imp->imp_lock);
+ list_del_init(&req->rq_unreplied_list);
+ spin_unlock(&imp->imp_lock);
+
return 0;
}
- ktime_get_real_ts64(&work_start);
- timediff = (work_start.tv_sec - req->rq_sent_tv.tv_sec) * USEC_PER_SEC +
- (work_start.tv_nsec - req->rq_sent_tv.tv_nsec) /
- NSEC_PER_USEC;
if (obd->obd_svc_stats) {
lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
timediff);
@@ -1338,10 +1457,9 @@ static int after_reply(struct ptlrpc_request *req)
}
/* Replay-enabled imports return commit-status information. */
- if (lustre_msg_get_last_committed(req->rq_repmsg)) {
- imp->imp_peer_committed_transno =
- lustre_msg_get_last_committed(req->rq_repmsg);
- }
+ committed = lustre_msg_get_last_committed(req->rq_repmsg);
+ if (likely(committed > imp->imp_peer_committed_transno))
+ imp->imp_peer_committed_transno = committed;
ptlrpc_free_committed(imp);
@@ -1373,9 +1491,17 @@ static int after_reply(struct ptlrpc_request *req)
static int ptlrpc_send_new_req(struct ptlrpc_request *req)
{
struct obd_import *imp = req->rq_import;
+ u64 min_xid = 0;
int rc;
LASSERT(req->rq_phase == RQ_PHASE_NEW);
+
+ /* do not try to go further if there is not enough memory in enc_pool */
+ if (req->rq_sent && req->rq_bulk)
+ if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() &&
+ pool_is_at_full_capacity())
+ return -ENOMEM;
+
if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
(!req->rq_generation_set ||
req->rq_import_generation == imp->imp_generation))
@@ -1385,6 +1511,9 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
spin_lock(&imp->imp_lock);
+ LASSERT(req->rq_xid);
+ LASSERT(!list_empty(&req->rq_unreplied_list));
+
if (!req->rq_generation_set)
req->rq_import_generation = imp->imp_generation;
@@ -1414,8 +1543,25 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
LASSERT(list_empty(&req->rq_list));
list_add_tail(&req->rq_list, &imp->imp_sending_list);
atomic_inc(&req->rq_import->imp_inflight);
+
+ /* find the known replied XID from the unreplied list, CONNECT
+ * and DISCONNECT requests are skipped to make the sanity check
+ * on server side happy. see process_req_last_xid().
+ *
+ * For CONNECT: Because replay requests have lower XID, it'll
+ * break the sanity check if CONNECT bump the exp_last_xid on
+ * server.
+ *
+ * For DISCONNECT: Since client will abort inflight RPC before
+ * sending DISCONNECT, DISCONNECT may carry an XID which higher
+ * than the inflight RPC.
+ */
+ if (!ptlrpc_req_is_connect(req) && !ptlrpc_req_is_disconnect(req))
+ min_xid = ptlrpc_known_replied_xid(imp);
spin_unlock(&imp->imp_lock);
+ lustre_msg_set_last_xid(req->rq_reqmsg, min_xid);
+
lustre_msg_set_status(req->rq_reqmsg, current_pid());
rc = sptlrpc_req_refresh_ctx(req, -1);
@@ -1438,6 +1584,16 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
lustre_msg_get_opc(req->rq_reqmsg));
rc = ptl_send_rpc(req, 0);
+ if (rc == -ENOMEM) {
+ spin_lock(&imp->imp_lock);
+ if (!list_empty(&req->rq_list)) {
+ list_del_init(&req->rq_list);
+ atomic_dec(&req->rq_import->imp_inflight);
+ }
+ spin_unlock(&imp->imp_lock);
+ ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
+ return rc;
+ }
if (rc) {
DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
spin_lock(&req->rq_lock);
@@ -1688,18 +1844,9 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
spin_lock(&req->rq_lock);
req->rq_resend = 1;
spin_unlock(&req->rq_lock);
- if (req->rq_bulk) {
- __u64 old_xid;
-
- if (!ptlrpc_unregister_bulk(req, 1))
- continue;
-
- /* ensure previous bulk fails */
- old_xid = req->rq_xid;
- req->rq_xid = ptlrpc_next_xid();
- CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
- old_xid, req->rq_xid);
- }
+ if (req->rq_bulk &&
+ !ptlrpc_unregister_bulk(req, 1))
+ continue;
}
/*
* rq_wait_ctx is only touched by ptlrpcd,
@@ -1727,6 +1874,14 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
}
rc = ptl_send_rpc(req, 0);
+ if (rc == -ENOMEM) {
+ spin_lock(&imp->imp_lock);
+ if (!list_empty(&req->rq_list))
+ list_del_init(&req->rq_list);
+ spin_unlock(&imp->imp_lock);
+ ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
+ continue;
+ }
if (rc) {
DEBUG_REQ(D_HA, req,
"send failed: rc = %d", rc);
@@ -1850,6 +2005,7 @@ interpret:
list_del_init(&req->rq_list);
atomic_dec(&imp->imp_inflight);
}
+ list_del_init(&req->rq_unreplied_list);
spin_unlock(&imp->imp_lock);
atomic_dec(&set->set_remaining);
@@ -2247,6 +2403,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
if (!locked)
spin_lock(&request->rq_import->imp_lock);
list_del_init(&request->rq_replay_list);
+ list_del_init(&request->rq_unreplied_list);
if (!locked)
spin_unlock(&request->rq_import->imp_lock);
}
@@ -2266,7 +2423,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
request->rq_import = NULL;
}
if (request->rq_bulk)
- ptlrpc_free_bulk_pin(request->rq_bulk);
+ ptlrpc_free_bulk(request->rq_bulk);
if (request->rq_reqbuf || request->rq_clrbuf)
sptlrpc_cli_free_reqbuf(request);
@@ -2542,14 +2699,6 @@ void ptlrpc_resend_req(struct ptlrpc_request *req)
req->rq_resend = 1;
req->rq_net_err = 0;
req->rq_timedout = 0;
- if (req->rq_bulk) {
- __u64 old_xid = req->rq_xid;
-
- /* ensure previous bulk fails */
- req->rq_xid = ptlrpc_next_xid();
- CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
- old_xid, req->rq_xid);
- }
ptlrpc_client_wake_req(req);
spin_unlock(&req->rq_lock);
}
@@ -2592,6 +2741,10 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
+ spin_lock(&req->rq_lock);
+ req->rq_resend = 0;
+ spin_unlock(&req->rq_lock);
+
LASSERT(imp->imp_replayable);
/* Balanced in ptlrpc_free_committed, usually. */
ptlrpc_request_addref(req);
@@ -2667,8 +2820,15 @@ static int ptlrpc_replay_interpret(const struct lu_env *env,
atomic_dec(&imp->imp_replay_inflight);
- if (!ptlrpc_client_replied(req)) {
- CERROR("request replay timed out, restarting recovery\n");
+ /*
+ * Note: if it is bulk replay (MDS-MDS replay), then even if
+ * server got the request, but bulk transfer timeout, let's
+ * replay the bulk req again
+ */
+ if (!ptlrpc_client_replied(req) ||
+ (req->rq_bulk &&
+ lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) {
+ DEBUG_REQ(D_ERROR, req, "request replay timed out.\n");
rc = -ETIMEDOUT;
goto out;
}
@@ -2939,6 +3099,48 @@ __u64 ptlrpc_next_xid(void)
}
/**
+ * If request has a new allocated XID (new request or EINPROGRESS resend),
+ * use this XID as matchbits of bulk, otherwise allocate a new matchbits for
+ * request to ensure previous bulk fails and avoid problems with lost replies
+ * and therefore several transfers landing into the same buffer from different
+ * sending attempts.
+ */
+void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req)
+{
+ struct ptlrpc_bulk_desc *bd = req->rq_bulk;
+
+ LASSERT(bd);
+
+ if (!req->rq_resend) {
+ /* this request has a new xid, just use it as bulk matchbits */
+ req->rq_mbits = req->rq_xid;
+
+ } else { /* needs to generate a new matchbits for resend */
+ u64 old_mbits = req->rq_mbits;
+
+ if ((bd->bd_import->imp_connect_data.ocd_connect_flags &
+ OBD_CONNECT_BULK_MBITS)) {
+ req->rq_mbits = ptlrpc_next_xid();
+ } else {
+ /* old version transfers rq_xid to peer as matchbits */
+ req->rq_mbits = ptlrpc_next_xid();
+ req->rq_xid = req->rq_mbits;
+ }
+
+ CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
+ old_mbits, req->rq_mbits);
+ }
+
+ /*
+ * For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so
+ * that server can infer the number of bulks that were prepared,
+ * see LU-1431
+ */
+ req->rq_mbits += ((bd->bd_iov_count + LNET_MAX_IOV - 1) /
+ LNET_MAX_IOV) - 1;
+}
+
+/**
* Get a glimpse at what next xid value might have been.
* Returns possible next xid.
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c
index 7b020d60c9e5..6c7c8b68a909 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/connection.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c
@@ -152,8 +152,8 @@ void ptlrpc_connection_fini(void)
/*
* Hash operations for net_peer<->connection
*/
-static unsigned
-conn_hashfn(struct cfs_hash *hs, const void *key, unsigned mask)
+static unsigned int
+conn_hashfn(struct cfs_hash *hs, const void *key, unsigned int mask)
{
return cfs_hash_djb2_hash(key, sizeof(lnet_process_id_t), mask);
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 283dfb296d35..49f3e6368415 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -182,9 +182,9 @@ void client_bulk_callback(lnet_event_t *ev)
struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
struct ptlrpc_request *req;
- LASSERT((desc->bd_type == BULK_PUT_SINK &&
+ LASSERT((ptlrpc_is_bulk_put_sink(desc->bd_type) &&
ev->type == LNET_EVENT_PUT) ||
- (desc->bd_type == BULK_GET_SOURCE &&
+ (ptlrpc_is_bulk_get_source(desc->bd_type) &&
ev->type == LNET_EVENT_GET) ||
ev->type == LNET_EVENT_UNLINK);
LASSERT(ev->unlinked);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index a23d0a05b574..e8280194001c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -396,7 +396,7 @@ void ptlrpc_activate_import(struct obd_import *imp)
}
EXPORT_SYMBOL(ptlrpc_activate_import);
-static void ptlrpc_pinger_force(struct obd_import *imp)
+void ptlrpc_pinger_force(struct obd_import *imp)
{
CDEBUG(D_HA, "%s: waking up pinger s:%s\n", obd2cli_tgt(imp->imp_obd),
ptlrpc_import_state_name(imp->imp_state));
@@ -408,6 +408,7 @@ static void ptlrpc_pinger_force(struct obd_import *imp)
if (imp->imp_state != LUSTRE_IMP_CONNECTING)
ptlrpc_pinger_wake_up();
}
+EXPORT_SYMBOL(ptlrpc_pinger_force);
void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
{
@@ -621,7 +622,8 @@ int ptlrpc_connect_import(struct obd_import *imp)
spin_unlock(&imp->imp_lock);
CERROR("already connected\n");
return 0;
- } else if (imp->imp_state == LUSTRE_IMP_CONNECTING) {
+ } else if (imp->imp_state == LUSTRE_IMP_CONNECTING ||
+ imp->imp_connected) {
spin_unlock(&imp->imp_lock);
CERROR("already connecting\n");
return -EALREADY;
@@ -691,8 +693,6 @@ int ptlrpc_connect_import(struct obd_import *imp)
request->rq_timeout = INITIAL_CONNECT_TIMEOUT;
lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout);
- lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_NEXT_VER);
-
request->rq_no_resend = 1;
request->rq_no_delay = 1;
request->rq_send_state = LUSTRE_IMP_CONNECTING;
@@ -859,6 +859,17 @@ static int ptlrpc_connect_set_flags(struct obd_import *imp,
client_adjust_max_dirty(cli);
/*
+ * Update client max modify RPCs in flight with value returned
+ * by the server
+ */
+ if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
+ cli->cl_max_mod_rpcs_in_flight = min(
+ cli->cl_max_mod_rpcs_in_flight,
+ ocd->ocd_maxmodrpcs);
+ else
+ cli->cl_max_mod_rpcs_in_flight = 1;
+
+ /*
* Reset ns_connect_flags only for initial connect. It might be
* changed in while using FS and if we reset it in reconnect
* this leads to losing user settings done before such as
@@ -873,8 +884,7 @@ static int ptlrpc_connect_set_flags(struct obd_import *imp,
ocd->ocd_connect_flags;
}
- if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) &&
- (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
+ if (ocd->ocd_connect_flags & OBD_CONNECT_AT)
/*
* We need a per-message support flag, because
* a. we don't know if the incoming connect reply
@@ -889,16 +899,45 @@ static int ptlrpc_connect_set_flags(struct obd_import *imp,
else
imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
- if ((ocd->ocd_connect_flags & OBD_CONNECT_FULL20) &&
- (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
- imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
- else
- imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
+ imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
return 0;
}
/**
+ * Add all replay requests back to unreplied list before start replay,
+ * so that we can make sure the known replied XID is always increased
+ * only even if when replaying requests.
+ */
+static void ptlrpc_prepare_replay(struct obd_import *imp)
+{
+ struct ptlrpc_request *req;
+
+ if (imp->imp_state != LUSTRE_IMP_REPLAY ||
+ imp->imp_resend_replay)
+ return;
+
+ /*
+ * If the server was restart during repaly, the requests may
+ * have been added to the unreplied list in former replay.
+ */
+ spin_lock(&imp->imp_lock);
+
+ list_for_each_entry(req, &imp->imp_committed_list, rq_replay_list) {
+ if (list_empty(&req->rq_unreplied_list))
+ ptlrpc_add_unreplied(req);
+ }
+
+ list_for_each_entry(req, &imp->imp_replay_list, rq_replay_list) {
+ if (list_empty(&req->rq_unreplied_list))
+ ptlrpc_add_unreplied(req);
+ }
+
+ imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp);
+ spin_unlock(&imp->imp_lock);
+}
+
+/**
* interpret_reply callback for connect RPCs.
* Looks into returned status of connect operation and decides
* what to do with the import - i.e enter recovery, promote it to
@@ -933,6 +972,13 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
ptlrpc_maybe_ping_import_soon(imp);
goto out;
}
+
+ /*
+ * LU-7558: indicate that we are interpretting connect reply,
+ * pltrpc_connect_import() will not try to reconnect until
+ * interpret will finish.
+ */
+ imp->imp_connected = 1;
spin_unlock(&imp->imp_lock);
LASSERT(imp->imp_conn_current);
@@ -967,6 +1013,16 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
spin_unlock(&imp->imp_lock);
+ if (!exp) {
+ /* This could happen if export is cleaned during the
+ * connect attempt
+ */
+ CERROR("%s: missing export after connect\n",
+ imp->imp_obd->obd_name);
+ rc = -ENODEV;
+ goto out;
+ }
+
/* check that server granted subset of flags we asked for. */
if ((ocd->ocd_connect_flags & imp->imp_connect_flags_orig) !=
ocd->ocd_connect_flags) {
@@ -977,15 +1033,6 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
goto out;
}
- if (!exp) {
- /* This could happen if export is cleaned during the
- * connect attempt
- */
- CERROR("%s: missing export after connect\n",
- imp->imp_obd->obd_name);
- rc = -ENODEV;
- goto out;
- }
old_connect_flags = exp_connect_flags(exp);
exp->exp_connect_data = *ocd;
imp->imp_obd->obd_self_export->exp_connect_data = *ocd;
@@ -1124,6 +1171,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
imp->imp_remote_handle =
*lustre_msg_get_handle(request->rq_repmsg);
imp->imp_last_replay_transno = 0;
+ imp->imp_replay_cursor = &imp->imp_committed_list;
IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
} else {
DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags not set: %x)",
@@ -1147,18 +1195,25 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
}
finish:
+ ptlrpc_prepare_replay(imp);
rc = ptlrpc_import_recovery_state_machine(imp);
if (rc == -ENOTCONN) {
CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery; invalidating and reconnecting\n",
obd2cli_tgt(imp->imp_obd),
imp->imp_connection->c_remote_uuid.uuid);
ptlrpc_connect_import(imp);
+ spin_lock(&imp->imp_lock);
+ imp->imp_connected = 0;
imp->imp_connect_tried = 1;
+ spin_unlock(&imp->imp_lock);
return 0;
}
out:
+ spin_lock(&imp->imp_lock);
+ imp->imp_connected = 0;
imp->imp_connect_tried = 1;
+ spin_unlock(&imp->imp_lock);
if (rc != 0) {
IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index 839ef3e80c1a..99d7c667df28 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -48,14 +48,14 @@
#include <linux/module.h>
-/* LUSTRE_VERSION_CODE */
-#include "../include/lustre_ver.h"
-
-#include "../include/obd_support.h"
-/* lustre_swab_mdt_body */
#include "../include/lustre/lustre_idl.h"
-/* obd2cli_tgt() (required by DEBUG_REQ()) */
+
+#include "../include/llog_swab.h"
+#include "../include/lustre_debug.h"
+#include "../include/lustre_swab.h"
+#include "../include/lustre_ver.h"
#include "../include/obd.h"
+#include "../include/obd_support.h"
/* __REQ_LAYOUT_USER__ */
#endif
@@ -121,7 +121,7 @@ static const struct req_msg_field *mdt_close_client[] = {
&RMF_CAPA1
};
-static const struct req_msg_field *mdt_release_close_client[] = {
+static const struct req_msg_field *mdt_intent_close_client[] = {
&RMF_PTLRPC_BODY,
&RMF_MDT_EPOCH,
&RMF_REC_REINT,
@@ -257,6 +257,18 @@ static const struct req_msg_field *mds_reint_rename_client[] = {
&RMF_DLM_REQ
};
+static const struct req_msg_field *mds_reint_migrate_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_REC_REINT,
+ &RMF_CAPA1,
+ &RMF_CAPA2,
+ &RMF_NAME,
+ &RMF_SYMTGT,
+ &RMF_DLM_REQ,
+ &RMF_MDT_EPOCH,
+ &RMF_CLOSE_DATA
+};
+
static const struct req_msg_field *mds_last_unlink_server[] = {
&RMF_PTLRPC_BODY,
&RMF_MDT_BODY,
@@ -666,10 +678,9 @@ static struct req_format *req_formats[] = {
&RQF_MDS_GETXATTR,
&RQF_MDS_SYNC,
&RQF_MDS_CLOSE,
- &RQF_MDS_RELEASE_CLOSE,
+ &RQF_MDS_INTENT_CLOSE,
&RQF_MDS_READPAGE,
&RQF_MDS_WRITEPAGE,
- &RQF_MDS_DONE_WRITING,
&RQF_MDS_REINT,
&RQF_MDS_REINT_CREATE,
&RQF_MDS_REINT_CREATE_ACL,
@@ -679,9 +690,9 @@ static struct req_format *req_formats[] = {
&RQF_MDS_REINT_UNLINK,
&RQF_MDS_REINT_LINK,
&RQF_MDS_REINT_RENAME,
+ &RQF_MDS_REINT_MIGRATE,
&RQF_MDS_REINT_SETATTR,
&RQF_MDS_REINT_SETXATTR,
- &RQF_MDS_QUOTACHECK,
&RQF_MDS_QUOTACTL,
&RQF_MDS_HSM_PROGRESS,
&RQF_MDS_HSM_CT_REGISTER,
@@ -691,10 +702,8 @@ static struct req_format *req_formats[] = {
&RQF_MDS_HSM_ACTION,
&RQF_MDS_HSM_REQUEST,
&RQF_MDS_SWAP_LAYOUTS,
- &RQF_QC_CALLBACK,
&RQF_OST_CONNECT,
&RQF_OST_DISCONNECT,
- &RQF_OST_QUOTACHECK,
&RQF_OST_QUOTACTL,
&RQF_OST_GETATTR,
&RQF_OST_SETATTR,
@@ -1180,14 +1189,6 @@ struct req_format RQF_LOG_CANCEL =
DEFINE_REQ_FMT0("OBD_LOG_CANCEL", log_cancel_client, empty);
EXPORT_SYMBOL(RQF_LOG_CANCEL);
-struct req_format RQF_MDS_QUOTACHECK =
- DEFINE_REQ_FMT0("MDS_QUOTACHECK", quotactl_only, empty);
-EXPORT_SYMBOL(RQF_MDS_QUOTACHECK);
-
-struct req_format RQF_OST_QUOTACHECK =
- DEFINE_REQ_FMT0("OST_QUOTACHECK", quotactl_only, empty);
-EXPORT_SYMBOL(RQF_OST_QUOTACHECK);
-
struct req_format RQF_MDS_QUOTACTL =
DEFINE_REQ_FMT0("MDS_QUOTACTL", quotactl_only, quotactl_only);
EXPORT_SYMBOL(RQF_MDS_QUOTACTL);
@@ -1196,10 +1197,6 @@ struct req_format RQF_OST_QUOTACTL =
DEFINE_REQ_FMT0("OST_QUOTACTL", quotactl_only, quotactl_only);
EXPORT_SYMBOL(RQF_OST_QUOTACTL);
-struct req_format RQF_QC_CALLBACK =
- DEFINE_REQ_FMT0("QC_CALLBACK", quotactl_only, empty);
-EXPORT_SYMBOL(RQF_QC_CALLBACK);
-
struct req_format RQF_MDS_GETSTATUS =
DEFINE_REQ_FMT0("MDS_GETSTATUS", mdt_body_only, mdt_body_capa);
EXPORT_SYMBOL(RQF_MDS_GETSTATUS);
@@ -1270,6 +1267,11 @@ struct req_format RQF_MDS_REINT_RENAME =
mds_last_unlink_server);
EXPORT_SYMBOL(RQF_MDS_REINT_RENAME);
+struct req_format RQF_MDS_REINT_MIGRATE =
+ DEFINE_REQ_FMT0("MDS_REINT_MIGRATE", mds_reint_migrate_client,
+ mds_last_unlink_server);
+EXPORT_SYMBOL(RQF_MDS_REINT_MIGRATE);
+
struct req_format RQF_MDS_REINT_SETATTR =
DEFINE_REQ_FMT0("MDS_REINT_SETATTR",
mds_reint_setattr_client, mds_setattr_server);
@@ -1381,15 +1383,10 @@ struct req_format RQF_MDS_CLOSE =
mdt_close_client, mds_last_unlink_server);
EXPORT_SYMBOL(RQF_MDS_CLOSE);
-struct req_format RQF_MDS_RELEASE_CLOSE =
+struct req_format RQF_MDS_INTENT_CLOSE =
DEFINE_REQ_FMT0("MDS_CLOSE",
- mdt_release_close_client, mds_last_unlink_server);
-EXPORT_SYMBOL(RQF_MDS_RELEASE_CLOSE);
-
-struct req_format RQF_MDS_DONE_WRITING =
- DEFINE_REQ_FMT0("MDS_DONE_WRITING",
- mdt_close_client, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_DONE_WRITING);
+ mdt_intent_close_client, mds_last_unlink_server);
+EXPORT_SYMBOL(RQF_MDS_INTENT_CLOSE);
struct req_format RQF_MDS_READPAGE =
DEFINE_REQ_FMT0("MDS_READPAGE",
@@ -1874,13 +1871,14 @@ static void *__req_capsule_get(struct req_capsule *pill,
getter = (field->rmf_flags & RMF_F_STRING) ?
(typeof(getter))lustre_msg_string : lustre_msg_buf;
- if (field->rmf_flags & RMF_F_STRUCT_ARRAY) {
+ if (field->rmf_flags & (RMF_F_STRUCT_ARRAY | RMF_F_NO_SIZE_CHECK)) {
/*
* We've already asserted that field->rmf_size > 0 in
* req_layout_init().
*/
len = lustre_msg_buflen(msg, offset);
- if ((len % field->rmf_size) != 0) {
+ if (!(field->rmf_flags & RMF_F_NO_SIZE_CHECK) &&
+ (len % field->rmf_size)) {
CERROR("%s: array field size mismatch %d modulo %u != 0 (%d)\n",
field->rmf_name, len, field->rmf_size, loc);
return NULL;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
index 0f55c01feba8..110d9f505787 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
@@ -287,8 +287,13 @@ static int llog_client_read_header(const struct lu_env *env,
goto out;
}
- memcpy(handle->lgh_hdr, hdr, sizeof(*hdr));
- handle->lgh_last_idx = handle->lgh_hdr->llh_tail.lrt_index;
+ if (handle->lgh_hdr_size < hdr->llh_hdr.lrh_len) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ memcpy(handle->lgh_hdr, hdr, hdr->llh_hdr.lrh_len);
+ handle->lgh_last_idx = LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_index;
/* sanity checks */
llh_hdr = &handle->lgh_hdr->llh_hdr;
@@ -296,9 +301,14 @@ static int llog_client_read_header(const struct lu_env *env,
CERROR("bad log header magic: %#x (expecting %#x)\n",
llh_hdr->lrh_type, LLOG_HDR_MAGIC);
rc = -EIO;
- } else if (llh_hdr->lrh_len != LLOG_CHUNK_SIZE) {
- CERROR("incorrectly sized log header: %#x (expecting %#x)\n",
- llh_hdr->lrh_len, LLOG_CHUNK_SIZE);
+ } else if (llh_hdr->lrh_len !=
+ LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_len ||
+ (llh_hdr->lrh_len & (llh_hdr->lrh_len - 1)) ||
+ llh_hdr->lrh_len < LLOG_MIN_CHUNK_SIZE ||
+ llh_hdr->lrh_len > handle->lgh_hdr_size) {
+ CERROR("incorrectly sized log header: %#x (expecting %#x) (power of two > 8192)\n",
+ llh_hdr->lrh_len,
+ LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_len);
CERROR("you may need to re-run lconf --write_conf.\n");
rc = -EIO;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index 9bad57d65db4..f87478180013 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -479,8 +479,8 @@ static int ptlrpc_lprocfs_nrs_seq_show(struct seq_file *m, void *n)
struct ptlrpc_nrs_policy *policy;
struct ptlrpc_nrs_pol_info *infos;
struct ptlrpc_nrs_pol_info tmp;
- unsigned num_pols;
- unsigned pol_idx = 0;
+ unsigned int num_pols;
+ unsigned int pol_idx = 0;
bool hp = false;
int i;
int rc = 0;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index 9c937398a085..da1209e40f03 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -114,7 +114,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
int rc2;
int posted_md;
int total_md;
- __u64 xid;
+ u64 mbits;
lnet_handle_me_t me_h;
lnet_md_t md;
@@ -127,8 +127,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
LASSERT(desc->bd_req);
- LASSERT(desc->bd_type == BULK_PUT_SINK ||
- desc->bd_type == BULK_GET_SOURCE);
+ LASSERT(ptlrpc_is_bulk_op_passive(desc->bd_type));
/* cleanup the state of the bulk for it will be reused */
if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
@@ -143,40 +142,37 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
LASSERT(desc->bd_cbid.cbid_arg == desc);
- /* An XID is only used for a single request from the client.
- * For retried bulk transfers, a new XID will be allocated in
- * in ptlrpc_check_set() if it needs to be resent, so it is not
- * using the same RDMA match bits after an error.
- *
- * For multi-bulk RPCs, rq_xid is the last XID needed for bulks. The
- * first bulk XID is power-of-two aligned before rq_xid. LU-1431
- */
- xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1);
+ total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
+ /* rq_mbits is matchbits of the final bulk */
+ mbits = req->rq_mbits - total_md + 1;
+
+ LASSERTF(mbits == (req->rq_mbits & PTLRPC_BULK_OPS_MASK),
+ "first mbits = x%llu, last mbits = x%llu\n",
+ mbits, req->rq_mbits);
LASSERTF(!(desc->bd_registered &&
req->rq_send_state != LUSTRE_IMP_REPLAY) ||
- xid != desc->bd_last_xid,
- "registered: %d rq_xid: %llu bd_last_xid: %llu\n",
- desc->bd_registered, xid, desc->bd_last_xid);
+ mbits != desc->bd_last_mbits,
+ "registered: %d rq_mbits: %llu bd_last_mbits: %llu\n",
+ desc->bd_registered, mbits, desc->bd_last_mbits);
- total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
desc->bd_registered = 1;
- desc->bd_last_xid = xid;
+ desc->bd_last_mbits = mbits;
desc->bd_md_count = total_md;
md.user_ptr = &desc->bd_cbid;
md.eq_handle = ptlrpc_eq_h;
md.threshold = 1; /* PUT or GET */
- for (posted_md = 0; posted_md < total_md; posted_md++, xid++) {
+ for (posted_md = 0; posted_md < total_md; posted_md++, mbits++) {
md.options = PTLRPC_MD_OPTIONS |
- ((desc->bd_type == BULK_GET_SOURCE) ?
+ (ptlrpc_is_bulk_op_get(desc->bd_type) ?
LNET_MD_OP_GET : LNET_MD_OP_PUT);
ptlrpc_fill_bulk_md(&md, desc, posted_md);
- rc = LNetMEAttach(desc->bd_portal, peer, xid, 0,
+ rc = LNetMEAttach(desc->bd_portal, peer, mbits, 0,
LNET_UNLINK, LNET_INS_AFTER, &me_h);
if (rc != 0) {
CERROR("%s: LNetMEAttach failed x%llu/%d: rc = %d\n",
- desc->bd_import->imp_obd->obd_name, xid,
+ desc->bd_import->imp_obd->obd_name, mbits,
posted_md, rc);
break;
}
@@ -186,7 +182,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
&desc->bd_mds[posted_md]);
if (rc != 0) {
CERROR("%s: LNetMDAttach failed x%llu/%d: rc = %d\n",
- desc->bd_import->imp_obd->obd_name, xid,
+ desc->bd_import->imp_obd->obd_name, mbits,
posted_md, rc);
rc2 = LNetMEUnlink(me_h);
LASSERT(rc2 == 0);
@@ -205,27 +201,19 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
return -ENOMEM;
}
- /* Set rq_xid to matchbits of the final bulk so that server can
- * infer the number of bulks that were prepared
- */
- req->rq_xid = --xid;
- LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK),
- "bd_last_xid = x%llu, rq_xid = x%llu\n",
- desc->bd_last_xid, req->rq_xid);
-
spin_lock(&desc->bd_lock);
- /* Holler if peer manages to touch buffers before he knows the xid */
+ /* Holler if peer manages to touch buffers before he knows the mbits */
if (desc->bd_md_count != total_md)
CWARN("%s: Peer %s touched %d buffers while I registered\n",
desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer),
total_md - desc->bd_md_count);
spin_unlock(&desc->bd_lock);
- CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, xid x%#llx-%#llx, portal %u\n",
+ CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, mbits x%#llx-%#llx, portal %u\n",
desc->bd_md_count,
- desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
+ ptlrpc_is_bulk_op_get(desc->bd_type) ? "get-source" : "put-sink",
desc->bd_iov_count, desc->bd_nob,
- desc->bd_last_xid, req->rq_xid, desc->bd_portal);
+ desc->bd_last_mbits, req->rq_mbits, desc->bd_portal);
return 0;
}
@@ -521,6 +509,39 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
lustre_msg_set_conn_cnt(request->rq_reqmsg, imp->imp_conn_cnt);
lustre_msghdr_set_flags(request->rq_reqmsg, imp->imp_msghdr_flags);
+ /*
+ * If it's the first time to resend the request for EINPROGRESS,
+ * we need to allocate a new XID (see after_reply()), it's different
+ * from the resend for reply timeout.
+ */
+ if (request->rq_nr_resend && list_empty(&request->rq_unreplied_list)) {
+ __u64 min_xid = 0;
+ /*
+ * resend for EINPROGRESS, allocate new xid to avoid reply
+ * reconstruction
+ */
+ spin_lock(&imp->imp_lock);
+ ptlrpc_assign_next_xid_nolock(request);
+ request->rq_mbits = request->rq_xid;
+ min_xid = ptlrpc_known_replied_xid(imp);
+ spin_unlock(&imp->imp_lock);
+
+ lustre_msg_set_last_xid(request->rq_reqmsg, min_xid);
+ DEBUG_REQ(D_RPCTRACE, request, "Allocating new xid for resend on EINPROGRESS");
+ } else if (request->rq_bulk) {
+ ptlrpc_set_bulk_mbits(request);
+ lustre_msg_set_mbits(request->rq_reqmsg, request->rq_mbits);
+ }
+
+ if (list_empty(&request->rq_unreplied_list) ||
+ request->rq_xid <= imp->imp_known_replied_xid) {
+ DEBUG_REQ(D_ERROR, request,
+ "xid: %llu, replied: %llu, list_empty:%d\n",
+ request->rq_xid, imp->imp_known_replied_xid,
+ list_empty(&request->rq_unreplied_list));
+ LBUG();
+ }
+
/**
* For enabled AT all request should have AT_SUPPORT in the
* FULL import state when OBD_CONNECT_AT is set
@@ -537,8 +558,15 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
mpflag = cfs_memory_pressure_get_and_set();
rc = sptlrpc_cli_wrap_request(request);
- if (rc)
+ if (rc) {
+ /*
+ * set rq_sent so that this request is treated
+ * as a delayed send in the upper layers
+ */
+ if (rc == -ENOMEM)
+ request->rq_sent = ktime_get_seconds();
goto out;
+ }
/* bulk register should be done after wrap_request() */
if (request->rq_bulk) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index d88faf61e740..7b6ffb195834 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -82,16 +82,9 @@ static int nrs_policy_ctl_locked(struct ptlrpc_nrs_policy *policy,
static void nrs_policy_stop0(struct ptlrpc_nrs_policy *policy)
{
- struct ptlrpc_nrs *nrs = policy->pol_nrs;
-
- if (policy->pol_desc->pd_ops->op_policy_stop) {
- spin_unlock(&nrs->nrs_lock);
-
+ if (policy->pol_desc->pd_ops->op_policy_stop)
policy->pol_desc->pd_ops->op_policy_stop(policy);
- spin_lock(&nrs->nrs_lock);
- }
-
LASSERT(list_empty(&policy->pol_list_queued));
LASSERT(policy->pol_req_queued == 0 &&
policy->pol_req_started == 0);
@@ -619,6 +612,12 @@ static int nrs_policy_ctl(struct ptlrpc_nrs *nrs, char *name,
goto out;
}
+ if (policy->pol_state != NRS_POL_STATE_STARTED &&
+ policy->pol_state != NRS_POL_STATE_STOPPED) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
switch (opc) {
/**
* Unknown opcode, pass it down to the policy-specific control
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index 871768511e8c..13f00b7cbbe5 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -42,11 +42,14 @@
#include "../../include/linux/libcfs/libcfs.h"
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
+#include "../include/lustre/ll_fiemap.h"
+
+#include "../include/llog_swab.h"
#include "../include/lustre_net.h"
+#include "../include/lustre_swab.h"
#include "../include/obd_cksum.h"
-#include "../include/lustre/ll_fiemap.h"
+#include "../include/obd_support.h"
+#include "../include/obd_class.h"
#include "ptlrpc_internal.h"
@@ -942,6 +945,25 @@ __u32 lustre_msg_get_opc(struct lustre_msg *msg)
}
EXPORT_SYMBOL(lustre_msg_get_opc);
+__u16 lustre_msg_get_tag(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return 0;
+ }
+ return pb->pb_tag;
+ }
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_get_tag);
+
__u64 lustre_msg_get_last_committed(struct lustre_msg *msg)
{
switch (msg->lm_magic) {
@@ -1236,6 +1258,37 @@ void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc)
}
}
+void lustre_msg_set_last_xid(struct lustre_msg *msg, u64 last_xid)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_last_xid = last_xid;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+
+void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_tag = tag;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_msg_set_tag);
+
void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions)
{
switch (msg->lm_magic) {
@@ -1373,6 +1426,21 @@ void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum)
}
}
+void lustre_msg_set_mbits(struct lustre_msg *msg, __u64 mbits)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_mbits = mbits;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+
void ptlrpc_request_set_replen(struct ptlrpc_request *req)
{
int count = req_capsule_filled_sizes(&req->rq_pill, RCL_SERVER);
@@ -1442,7 +1510,7 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *b)
__swab32s(&b->pb_opc);
__swab32s(&b->pb_status);
__swab64s(&b->pb_last_xid);
- __swab64s(&b->pb_last_seen);
+ __swab16s(&b->pb_tag);
__swab64s(&b->pb_last_committed);
__swab64s(&b->pb_transno);
__swab32s(&b->pb_flags);
@@ -1456,7 +1524,12 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *b)
__swab64s(&b->pb_pre_versions[1]);
__swab64s(&b->pb_pre_versions[2]);
__swab64s(&b->pb_pre_versions[3]);
- CLASSERT(offsetof(typeof(*b), pb_padding) != 0);
+ __swab64s(&b->pb_mbits);
+ CLASSERT(offsetof(typeof(*b), pb_padding0) != 0);
+ CLASSERT(offsetof(typeof(*b), pb_padding1) != 0);
+ CLASSERT(offsetof(typeof(*b), pb_padding64_0) != 0);
+ CLASSERT(offsetof(typeof(*b), pb_padding64_1) != 0);
+ CLASSERT(offsetof(typeof(*b), pb_padding64_2) != 0);
/* While we need to maintain compatibility between
* clients and servers without ptlrpc_body_v2 (< 2.3)
* do not swab any fields beyond pb_jobid, as we are
@@ -1492,8 +1565,12 @@ void lustre_swab_connect(struct obd_connect_data *ocd)
__swab32s(&ocd->ocd_max_easize);
if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES)
__swab64s(&ocd->ocd_maxbytes);
+ if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
+ __swab16s(&ocd->ocd_maxmodrpcs);
+ CLASSERT(offsetof(typeof(*ocd), padding0));
CLASSERT(offsetof(typeof(*ocd), padding1) != 0);
- CLASSERT(offsetof(typeof(*ocd), padding2) != 0);
+ if (ocd->ocd_connect_flags & OBD_CONNECT_FLAGS2)
+ __swab64s(&ocd->ocd_connect_flags2);
CLASSERT(offsetof(typeof(*ocd), padding3) != 0);
CLASSERT(offsetof(typeof(*ocd), padding4) != 0);
CLASSERT(offsetof(typeof(*ocd), padding5) != 0);
@@ -1666,7 +1743,7 @@ void lustre_swab_mdt_body(struct mdt_body *b)
__swab32s(&b->mbo_eadatasize);
__swab32s(&b->mbo_aclsize);
__swab32s(&b->mbo_max_mdsize);
- __swab32s(&b->mbo_max_cookiesize);
+ CLASSERT(offsetof(typeof(*b), mbo_unused3));
__swab32s(&b->mbo_uid_h);
__swab32s(&b->mbo_gid_h);
CLASSERT(offsetof(typeof(*b), mbo_padding_5) != 0);
@@ -1675,9 +1752,10 @@ void lustre_swab_mdt_body(struct mdt_body *b)
void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b)
{
/* handle is opaque */
- __swab64s(&b->ioepoch);
- __swab32s(&b->flags);
- CLASSERT(offsetof(typeof(*b), padding) != 0);
+ /* mio_handle is opaque */
+ CLASSERT(offsetof(typeof(*b), mio_unused1));
+ CLASSERT(offsetof(typeof(*b), mio_unused2));
+ CLASSERT(offsetof(typeof(*b), mio_padding));
}
void lustre_swab_mgs_target_info(struct mgs_target_info *mti)
@@ -1772,7 +1850,7 @@ void lustre_swab_fid2path(struct getinfo_fid2path *gf)
}
EXPORT_SYMBOL(lustre_swab_fid2path);
-static void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent)
+static void lustre_swab_fiemap_extent(struct fiemap_extent *fm_extent)
{
__swab64s(&fm_extent->fe_logical);
__swab64s(&fm_extent->fe_physical);
@@ -1781,7 +1859,7 @@ static void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent)
__swab32s(&fm_extent->fe_device);
}
-void lustre_swab_fiemap(struct ll_user_fiemap *fiemap)
+void lustre_swab_fiemap(struct fiemap *fiemap)
{
__u32 i;
@@ -1938,7 +2016,7 @@ static void lustre_swab_ldlm_res_id(struct ldlm_res_id *id)
__swab64s(&id->name[i]);
}
-static void lustre_swab_ldlm_policy_data(ldlm_wire_policy_data_t *d)
+static void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d)
{
/* the lock data is a union and the first two fields are always an
* extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
@@ -2062,8 +2140,6 @@ static void dump_obdo(struct obdo *oa)
if (valid & OBD_MD_FLHANDLE)
CDEBUG(D_RPCTRACE, "obdo: o_handle = %lld\n",
oa->o_handle.cookie);
- if (valid & OBD_MD_FLCOOKIE)
- CDEBUG(D_RPCTRACE, "obdo: o_lcookie = (llog_cookie dumping not yet implemented)\n");
}
void dump_ost_body(struct ost_body *ob)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pers.c b/drivers/staging/lustre/lustre/ptlrpc/pers.c
index 5b9fb11c0b6b..94e9fa85d774 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pers.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pers.c
@@ -43,6 +43,8 @@
void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
int mdidx)
{
+ int offset = mdidx * LNET_MAX_IOV;
+
CLASSERT(PTLRPC_MAX_BRW_PAGES < LI_POISON);
LASSERT(mdidx < desc->bd_md_max_brw);
@@ -50,23 +52,20 @@ void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV |
LNET_MD_PHYS)));
- md->options |= LNET_MD_KIOV;
md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV);
md->length = min_t(unsigned int, LNET_MAX_IOV, md->length);
- if (desc->bd_enc_iov)
- md->start = &desc->bd_enc_iov[mdidx * LNET_MAX_IOV];
- else
- md->start = &desc->bd_iov[mdidx * LNET_MAX_IOV];
-}
-
-void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page,
- int pageoffset, int len)
-{
- lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count];
-
- kiov->bv_page = page;
- kiov->bv_offset = pageoffset;
- kiov->bv_len = len;
- desc->bd_iov_count++;
+ if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) {
+ md->options |= LNET_MD_KIOV;
+ if (GET_ENC_KIOV(desc))
+ md->start = &BD_GET_ENC_KIOV(desc, offset);
+ else
+ md->start = &BD_GET_KIOV(desc, offset);
+ } else {
+ md->options |= LNET_MD_IOVEC;
+ if (GET_ENC_KVEC(desc))
+ md->start = &BD_GET_ENC_KVEC(desc, offset);
+ else
+ md->start = &BD_GET_KVEC(desc, offset);
+ }
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
index f14d193287da..e0f859ca6223 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
@@ -55,8 +55,11 @@ int ptlrpcd_start(struct ptlrpcd_ctl *pc);
/* client.c */
void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
unsigned int service_time);
-struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
- unsigned type, unsigned portal);
+struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
+ unsigned int max_brw,
+ enum ptlrpc_bulk_op_type type,
+ unsigned int portal,
+ const struct ptlrpc_bulk_frag_ops *ops);
int ptlrpc_request_cache_init(void);
void ptlrpc_request_cache_fini(void);
struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags);
@@ -67,6 +70,10 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
int ptlrpc_expired_set(void *data);
int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
void ptlrpc_resend_req(struct ptlrpc_request *request);
+void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req);
+void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req);
+__u64 ptlrpc_known_replied_xid(struct obd_import *imp);
+void ptlrpc_add_unreplied(struct ptlrpc_request *req);
/* events.c */
int ptlrpc_init_portals(void);
@@ -226,8 +233,6 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink);
/* pers.c */
void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
int mdcnt);
-void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page,
- int pageoffset, int len);
/* pack_generic.c */
struct ptlrpc_reply_state *
@@ -322,6 +327,7 @@ static inline void ptlrpc_cli_req_init(struct ptlrpc_request *req)
INIT_LIST_HEAD(&cr->cr_set_chain);
INIT_LIST_HEAD(&cr->cr_ctx_chain);
+ INIT_LIST_HEAD(&cr->cr_unreplied_list);
init_waitqueue_head(&cr->cr_reply_waitq);
init_waitqueue_head(&cr->cr_set_waitq);
}
@@ -338,4 +344,24 @@ static inline void ptlrpc_srv_req_init(struct ptlrpc_request *req)
INIT_LIST_HEAD(&sr->sr_hist_list);
}
+static inline bool ptlrpc_req_is_connect(struct ptlrpc_request *req)
+{
+ if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_CONNECT ||
+ lustre_msg_get_opc(req->rq_reqmsg) == OST_CONNECT ||
+ lustre_msg_get_opc(req->rq_reqmsg) == MGS_CONNECT)
+ return true;
+ else
+ return false;
+}
+
+static inline bool ptlrpc_req_is_disconnect(struct ptlrpc_request *req)
+{
+ if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_DISCONNECT ||
+ lustre_msg_get_opc(req->rq_reqmsg) == OST_DISCONNECT ||
+ lustre_msg_get_opc(req->rq_reqmsg) == MGS_DISCONNECT)
+ return true;
+ else
+ return false;
+}
+
#endif /* PTLRPC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index 405faf0dc9fc..c00449036884 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -111,7 +111,9 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
* all of it's requests being replayed, it's safe to
* use a cursor to accelerate the search
*/
- imp->imp_replay_cursor = imp->imp_replay_cursor->next;
+ if (!imp->imp_resend_replay ||
+ imp->imp_replay_cursor == &imp->imp_committed_list)
+ imp->imp_replay_cursor = imp->imp_replay_cursor->next;
while (imp->imp_replay_cursor !=
&imp->imp_committed_list) {
@@ -155,10 +157,24 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
spin_lock(&imp->imp_lock);
+ /* The resend replay request may have been removed from the
+ * unreplied list.
+ */
+ if (req && imp->imp_resend_replay &&
+ list_empty(&req->rq_unreplied_list)) {
+ ptlrpc_add_unreplied(req);
+ imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp);
+ }
+
imp->imp_resend_replay = 0;
spin_unlock(&imp->imp_lock);
if (req) {
+ /* The request should have been added back in unreplied list
+ * by ptlrpc_prepare_replay().
+ */
+ LASSERT(!list_empty(&req->rq_unreplied_list));
+
rc = ptlrpc_replay_req(req);
if (rc) {
CERROR("recovery replay error %d for req %llu\n",
@@ -194,7 +210,13 @@ int ptlrpc_resend(struct obd_import *imp)
LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
"req %p bad\n", req);
LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
- if (!ptlrpc_no_resend(req))
+
+ /*
+ * If the request is allowed to be sent during replay and it
+ * is not timeout yet, then it does not need to be resent.
+ */
+ if (!ptlrpc_no_resend(req) &&
+ (req->rq_timedout || !req->rq_allow_replay))
ptlrpc_resend_req(req);
}
spin_unlock(&imp->imp_lock);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index a7416cd9ac71..e860df7c45a2 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -379,7 +379,7 @@ int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
if (!req->rq_cli_ctx) {
CERROR("req %p: fail to get context\n", req);
- return -ENOMEM;
+ return -ECONNREFUSED;
}
return 0;
@@ -515,6 +515,13 @@ static int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
+ } else if (unlikely(!test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags))) {
+ /*
+ * new ctx not up to date yet
+ */
+ CDEBUG(D_SEC,
+ "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
+ newctx, newctx->cc_flags);
} else {
/*
* it's possible newctx == oldctx if we're switching
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index b2cc5ea6cb93..2fe9085e2034 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -108,6 +108,7 @@ static struct ptlrpc_enc_page_pool {
unsigned long epp_st_lowfree; /* lowest free pages reached */
unsigned int epp_st_max_wqlen; /* highest waitqueue length */
unsigned long epp_st_max_wait; /* in jiffies */
+ unsigned long epp_st_outofmem; /* # of out of mem requests */
/*
* pointers to pools
*/
@@ -139,7 +140,8 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
"cache missing: %lu\n"
"low free mark: %lu\n"
"max waitqueue depth: %u\n"
- "max wait time: %ld/%lu\n",
+ "max wait time: %ld/%lu\n"
+ "out of mem: %lu\n",
totalram_pages,
PAGES_PER_POOL,
page_pools.epp_max_pages,
@@ -158,7 +160,8 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
page_pools.epp_st_lowfree,
page_pools.epp_st_max_wqlen,
page_pools.epp_st_max_wait,
- msecs_to_jiffies(MSEC_PER_SEC));
+ msecs_to_jiffies(MSEC_PER_SEC),
+ page_pools.epp_st_outofmem);
spin_unlock(&page_pools.epp_lock);
@@ -306,12 +309,30 @@ static inline void enc_pools_wakeup(void)
}
}
+/*
+ * Export the number of free pages in the pool
+ */
+int get_free_pages_in_pool(void)
+{
+ return page_pools.epp_free_pages;
+}
+
+/*
+ * Let outside world know if enc_pool full capacity is reached
+ */
+int pool_is_at_full_capacity(void)
+{
+ return (page_pools.epp_total_pages == page_pools.epp_max_pages);
+}
+
void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
{
int p_idx, g_idx;
int i;
- if (!desc->bd_enc_iov)
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+
+ if (!GET_ENC_KIOV(desc))
return;
LASSERT(desc->bd_iov_count > 0);
@@ -326,12 +347,12 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
LASSERT(page_pools.epp_pools[p_idx]);
for (i = 0; i < desc->bd_iov_count; i++) {
- LASSERT(desc->bd_enc_iov[i].bv_page);
+ LASSERT(BD_GET_ENC_KIOV(desc, i).bv_page);
LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
LASSERT(!page_pools.epp_pools[p_idx][g_idx]);
page_pools.epp_pools[p_idx][g_idx] =
- desc->bd_enc_iov[i].bv_page;
+ BD_GET_ENC_KIOV(desc, i).bv_page;
if (++g_idx == PAGES_PER_POOL) {
p_idx++;
@@ -345,8 +366,8 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
spin_unlock(&page_pools.epp_lock);
- kfree(desc->bd_enc_iov);
- desc->bd_enc_iov = NULL;
+ kfree(GET_ENC_KIOV(desc));
+ GET_ENC_KIOV(desc) = NULL;
}
static inline void enc_pools_alloc(void)
@@ -404,6 +425,7 @@ int sptlrpc_enc_pool_init(void)
page_pools.epp_st_lowfree = 0;
page_pools.epp_st_max_wqlen = 0;
page_pools.epp_st_max_wait = 0;
+ page_pools.epp_st_outofmem = 0;
enc_pools_alloc();
if (!page_pools.epp_pools)
@@ -431,13 +453,14 @@ void sptlrpc_enc_pool_fini(void)
if (page_pools.epp_st_access > 0) {
CDEBUG(D_SEC,
- "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait %ld/%ld\n",
+ "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait %ld/%ld, out of mem %lu\n",
page_pools.epp_st_max_pages, page_pools.epp_st_grows,
page_pools.epp_st_grow_fails,
page_pools.epp_st_shrinks, page_pools.epp_st_access,
page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
page_pools.epp_st_max_wait,
- msecs_to_jiffies(MSEC_PER_SEC));
+ msecs_to_jiffies(MSEC_PER_SEC),
+ page_pools.epp_st_outofmem);
}
}
@@ -520,10 +543,11 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]);
for (i = 0; i < desc->bd_iov_count; i++) {
- cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].bv_page,
- desc->bd_iov[i].bv_offset &
+ cfs_crypto_hash_update_page(hdesc,
+ BD_GET_KIOV(desc, i).bv_page,
+ BD_GET_KIOV(desc, i).bv_offset &
~PAGE_MASK,
- desc->bd_iov[i].bv_len);
+ BD_GET_KIOV(desc, i).bv_len);
}
if (hashsize > buflen) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index cd305bcb334a..c5e7a2309fce 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -153,14 +153,16 @@ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
char *ptr;
unsigned int off, i;
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+
for (i = 0; i < desc->bd_iov_count; i++) {
- if (desc->bd_iov[i].bv_len == 0)
+ if (!BD_GET_KIOV(desc, i).bv_len)
continue;
- ptr = kmap(desc->bd_iov[i].bv_page);
- off = desc->bd_iov[i].bv_offset & ~PAGE_MASK;
+ ptr = kmap(BD_GET_KIOV(desc, i).bv_page);
+ off = BD_GET_KIOV(desc, i).bv_offset & ~PAGE_MASK;
ptr[off] ^= 0x1;
- kunmap(desc->bd_iov[i].bv_page);
+ kunmap(BD_GET_KIOV(desc, i).bv_page);
return;
}
}
@@ -352,11 +354,11 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
/* fix the actual data size */
for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
- if (desc->bd_iov[i].bv_len + nob > desc->bd_nob_transferred) {
- desc->bd_iov[i].bv_len =
- desc->bd_nob_transferred - nob;
- }
- nob += desc->bd_iov[i].bv_len;
+ struct bio_vec bv_desc = BD_GET_KIOV(desc, i);
+
+ if (bv_desc.bv_len + nob > desc->bd_nob_transferred)
+ bv_desc.bv_len = desc->bd_nob_transferred - nob;
+ nob += bv_desc.bv_len;
}
rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 72f39308eebb..70c70558e177 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -343,9 +343,9 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
struct ptlrpc_service_conf *conf)
{
struct ptlrpc_service_thr_conf *tc = &conf->psc_thr;
- unsigned init;
- unsigned total;
- unsigned nthrs;
+ unsigned int init;
+ unsigned int total;
+ unsigned int nthrs;
int weight;
/*
@@ -2541,8 +2541,9 @@ int ptlrpc_hr_init(void)
hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i);
hrp->hrp_nthrs /= weight;
+ if (hrp->hrp_nthrs == 0)
+ hrp->hrp_nthrs = 1;
- LASSERT(hrp->hrp_nthrs > 0);
hrp->hrp_thrs =
kzalloc_node(hrp->hrp_nthrs * sizeof(*hrt), GFP_NOFS,
cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index b05b1f935e4c..a04e36cf6dd4 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -195,49 +195,29 @@ void lustre_assert_wire_constants(void)
LASSERTF(REINT_MAX == 10, "found %lld\n",
(long long)REINT_MAX);
LASSERTF(DISP_IT_EXECD == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)DISP_IT_EXECD);
+ (unsigned int)DISP_IT_EXECD);
LASSERTF(DISP_LOOKUP_EXECD == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)DISP_LOOKUP_EXECD);
+ (unsigned int)DISP_LOOKUP_EXECD);
LASSERTF(DISP_LOOKUP_NEG == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)DISP_LOOKUP_NEG);
+ (unsigned int)DISP_LOOKUP_NEG);
LASSERTF(DISP_LOOKUP_POS == 0x00000008UL, "found 0x%.8xUL\n",
- (unsigned)DISP_LOOKUP_POS);
+ (unsigned int)DISP_LOOKUP_POS);
LASSERTF(DISP_OPEN_CREATE == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)DISP_OPEN_CREATE);
+ (unsigned int)DISP_OPEN_CREATE);
LASSERTF(DISP_OPEN_OPEN == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned)DISP_OPEN_OPEN);
+ (unsigned int)DISP_OPEN_OPEN);
LASSERTF(DISP_ENQ_COMPLETE == 0x00400000UL, "found 0x%.8xUL\n",
- (unsigned)DISP_ENQ_COMPLETE);
+ (unsigned int)DISP_ENQ_COMPLETE);
LASSERTF(DISP_ENQ_OPEN_REF == 0x00800000UL, "found 0x%.8xUL\n",
- (unsigned)DISP_ENQ_OPEN_REF);
+ (unsigned int)DISP_ENQ_OPEN_REF);
LASSERTF(DISP_ENQ_CREATE_REF == 0x01000000UL, "found 0x%.8xUL\n",
- (unsigned)DISP_ENQ_CREATE_REF);
+ (unsigned int)DISP_ENQ_CREATE_REF);
LASSERTF(DISP_OPEN_LOCK == 0x02000000UL, "found 0x%.8xUL\n",
- (unsigned)DISP_OPEN_LOCK);
+ (unsigned int)DISP_OPEN_LOCK);
LASSERTF(MDS_STATUS_CONN == 1, "found %lld\n",
(long long)MDS_STATUS_CONN);
LASSERTF(MDS_STATUS_LOV == 2, "found %lld\n",
(long long)MDS_STATUS_LOV);
- LASSERTF(LUSTRE_BFLAG_UNCOMMITTED_WRITES == 1, "found %lld\n",
- (long long)LUSTRE_BFLAG_UNCOMMITTED_WRITES);
- LASSERTF(MF_SOM_CHANGE == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)MF_SOM_CHANGE);
- LASSERTF(MF_EPOCH_OPEN == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)MF_EPOCH_OPEN);
- LASSERTF(MF_EPOCH_CLOSE == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)MF_EPOCH_CLOSE);
- LASSERTF(MF_MDC_CANCEL_FID1 == 0x00000008UL, "found 0x%.8xUL\n",
- (unsigned)MF_MDC_CANCEL_FID1);
- LASSERTF(MF_MDC_CANCEL_FID2 == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)MF_MDC_CANCEL_FID2);
- LASSERTF(MF_MDC_CANCEL_FID3 == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned)MF_MDC_CANCEL_FID3);
- LASSERTF(MF_MDC_CANCEL_FID4 == 0x00000040UL, "found 0x%.8xUL\n",
- (unsigned)MF_MDC_CANCEL_FID4);
- LASSERTF(MF_SOM_AU == 0x00000080UL, "found 0x%.8xUL\n",
- (unsigned)MF_SOM_AU);
- LASSERTF(MF_GETATTR_LOCK == 0x00000100UL, "found 0x%.8xUL\n",
- (unsigned)MF_GETATTR_LOCK);
LASSERTF(MDS_ATTR_MODE == 0x0000000000000001ULL, "found 0x%.16llxULL\n",
(long long)MDS_ATTR_MODE);
LASSERTF(MDS_ATTR_UID == 0x0000000000000002ULL, "found 0x%.16llxULL\n",
@@ -420,15 +400,13 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid) == 16, "found %lld\n",
(long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid));
LASSERTF(LMAI_RELEASED == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)LMAI_RELEASED);
+ (unsigned int)LMAI_RELEASED);
LASSERTF(LMAC_HSM == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)LMAC_HSM);
- LASSERTF(LMAC_SOM == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)LMAC_SOM);
+ (unsigned int)LMAC_HSM);
LASSERTF(LMAC_NOT_IN_OI == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)LMAC_NOT_IN_OI);
+ (unsigned int)LMAC_NOT_IN_OI);
LASSERTF(LMAC_FID_ON_OST == 0x00000008UL, "found 0x%.8xUL\n",
- (unsigned)LMAC_FID_ON_OST);
+ (unsigned int)LMAC_FID_ON_OST);
/* Checks for struct ost_id */
LASSERTF((int)sizeof(struct ost_id) == 16, "found %lld\n",
@@ -478,11 +456,11 @@ void lustre_assert_wire_constants(void)
LASSERTF(FID_SEQ_LOV_DEFAULT == 0xffffffffffffffffULL, "found 0x%.16llxULL\n",
(long long)FID_SEQ_LOV_DEFAULT);
LASSERTF(FID_OID_SPECIAL_BFL == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)FID_OID_SPECIAL_BFL);
+ (unsigned int)FID_OID_SPECIAL_BFL);
LASSERTF(FID_OID_DOT_LUSTRE == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)FID_OID_DOT_LUSTRE);
+ (unsigned int)FID_OID_DOT_LUSTRE);
LASSERTF(FID_OID_DOT_LUSTRE_OBF == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)FID_OID_DOT_LUSTRE_OBF);
+ (unsigned int)FID_OID_DOT_LUSTRE_OBF);
/* Checks for struct lu_dirent */
LASSERTF((int)sizeof(struct lu_dirent) == 32, "found %lld\n",
@@ -512,11 +490,11 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_name[0]) == 1, "found %lld\n",
(long long)(int)sizeof(((struct lu_dirent *)0)->lde_name[0]));
LASSERTF(LUDA_FID == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)LUDA_FID);
+ (unsigned int)LUDA_FID);
LASSERTF(LUDA_TYPE == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)LUDA_TYPE);
+ (unsigned int)LUDA_TYPE);
LASSERTF(LUDA_64BITHASH == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)LUDA_64BITHASH);
+ (unsigned int)LUDA_64BITHASH);
/* Checks for struct luda_type */
LASSERTF((int)sizeof(struct luda_type) == 2, "found %lld\n",
@@ -635,10 +613,18 @@ void lustre_assert_wire_constants(void)
(long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_xid));
LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == 8, "found %lld\n",
(long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_seen) == 32, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_seen));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_tag) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_tag));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding0) == 34, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding0));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding1) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding1));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1));
LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == 40, "found %lld\n",
(long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_committed));
LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == 8, "found %lld\n",
@@ -680,10 +666,22 @@ void lustre_assert_wire_constants(void)
(long long)(int)offsetof(struct ptlrpc_body_v3, pb_pre_versions));
LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == 32, "found %lld\n",
(long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding) == 120, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_mbits) == 120, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_mbits));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_0) == 128, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_0));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_1) == 136, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_1));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_2) == 144, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_2));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2));
CLASSERT(LUSTRE_JOBID_SIZE == 32);
LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_jobid) == 152, "found %lld\n",
(long long)(int)offsetof(struct ptlrpc_body_v3, pb_jobid));
@@ -713,10 +711,18 @@ void lustre_assert_wire_constants(void)
(int)offsetof(struct ptlrpc_body_v3, pb_last_xid), (int)offsetof(struct ptlrpc_body_v2, pb_last_xid));
LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid), "%d != %d\n",
(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_seen) == (int)offsetof(struct ptlrpc_body_v2, pb_last_seen), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_last_seen), (int)offsetof(struct ptlrpc_body_v2, pb_last_seen));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_seen), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_seen));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_tag) == (int)offsetof(struct ptlrpc_body_v2, pb_tag), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_tag), (int)offsetof(struct ptlrpc_body_v2, pb_tag));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_tag), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_tag));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding0) == (int)offsetof(struct ptlrpc_body_v2, pb_padding0), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_padding0), (int)offsetof(struct ptlrpc_body_v2, pb_padding0));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding0), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding0));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding1) == (int)offsetof(struct ptlrpc_body_v2, pb_padding1), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_padding1), (int)offsetof(struct ptlrpc_body_v2, pb_padding1));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding1), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding1));
LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == (int)offsetof(struct ptlrpc_body_v2, pb_last_committed), "%d != %d\n",
(int)offsetof(struct ptlrpc_body_v3, pb_last_committed), (int)offsetof(struct ptlrpc_body_v2, pb_last_committed));
LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_committed), "%d != %d\n",
@@ -757,10 +763,22 @@ void lustre_assert_wire_constants(void)
(int)offsetof(struct ptlrpc_body_v3, pb_pre_versions), (int)offsetof(struct ptlrpc_body_v2, pb_pre_versions));
LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions), "%d != %d\n",
(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding) == (int)offsetof(struct ptlrpc_body_v2, pb_padding), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_padding), (int)offsetof(struct ptlrpc_body_v2, pb_padding));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_mbits) == (int)offsetof(struct ptlrpc_body_v2, pb_mbits), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_mbits), (int)offsetof(struct ptlrpc_body_v2, pb_mbits));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_mbits), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_mbits));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_0) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_0), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_padding64_0), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_0));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_0), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_0));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_1) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_1), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_padding64_1), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_1));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_1), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_1));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_2) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_2), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_padding64_2), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_2));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_2), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_2));
LASSERTF(MSG_PTLRPC_BODY_OFF == 0, "found %lld\n",
(long long)MSG_PTLRPC_BODY_OFF);
LASSERTF(REQ_REC_OFF == 1, "found %lld\n",
@@ -802,41 +820,41 @@ void lustre_assert_wire_constants(void)
LASSERTF(MSGHDR_CKSUM_INCOMPAT18 == 2, "found %lld\n",
(long long)MSGHDR_CKSUM_INCOMPAT18);
LASSERTF(MSG_OP_FLAG_MASK == 0xffff0000UL, "found 0x%.8xUL\n",
- (unsigned)MSG_OP_FLAG_MASK);
+ (unsigned int)MSG_OP_FLAG_MASK);
LASSERTF(MSG_OP_FLAG_SHIFT == 16, "found %lld\n",
(long long)MSG_OP_FLAG_SHIFT);
LASSERTF(MSG_GEN_FLAG_MASK == 0x0000ffffUL, "found 0x%.8xUL\n",
- (unsigned)MSG_GEN_FLAG_MASK);
+ (unsigned int)MSG_GEN_FLAG_MASK);
LASSERTF(MSG_LAST_REPLAY == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)MSG_LAST_REPLAY);
+ (unsigned int)MSG_LAST_REPLAY);
LASSERTF(MSG_RESENT == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)MSG_RESENT);
+ (unsigned int)MSG_RESENT);
LASSERTF(MSG_REPLAY == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)MSG_REPLAY);
+ (unsigned int)MSG_REPLAY);
LASSERTF(MSG_DELAY_REPLAY == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)MSG_DELAY_REPLAY);
+ (unsigned int)MSG_DELAY_REPLAY);
LASSERTF(MSG_VERSION_REPLAY == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned)MSG_VERSION_REPLAY);
+ (unsigned int)MSG_VERSION_REPLAY);
LASSERTF(MSG_REQ_REPLAY_DONE == 0x00000040UL, "found 0x%.8xUL\n",
- (unsigned)MSG_REQ_REPLAY_DONE);
+ (unsigned int)MSG_REQ_REPLAY_DONE);
LASSERTF(MSG_LOCK_REPLAY_DONE == 0x00000080UL, "found 0x%.8xUL\n",
- (unsigned)MSG_LOCK_REPLAY_DONE);
+ (unsigned int)MSG_LOCK_REPLAY_DONE);
LASSERTF(MSG_CONNECT_RECOVERING == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_RECOVERING);
+ (unsigned int)MSG_CONNECT_RECOVERING);
LASSERTF(MSG_CONNECT_RECONNECT == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_RECONNECT);
+ (unsigned int)MSG_CONNECT_RECONNECT);
LASSERTF(MSG_CONNECT_REPLAYABLE == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_REPLAYABLE);
+ (unsigned int)MSG_CONNECT_REPLAYABLE);
LASSERTF(MSG_CONNECT_LIBCLIENT == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_LIBCLIENT);
+ (unsigned int)MSG_CONNECT_LIBCLIENT);
LASSERTF(MSG_CONNECT_INITIAL == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_INITIAL);
+ (unsigned int)MSG_CONNECT_INITIAL);
LASSERTF(MSG_CONNECT_ASYNC == 0x00000040UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_ASYNC);
+ (unsigned int)MSG_CONNECT_ASYNC);
LASSERTF(MSG_CONNECT_NEXT_VER == 0x00000080UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_NEXT_VER);
+ (unsigned int)MSG_CONNECT_NEXT_VER);
LASSERTF(MSG_CONNECT_TRANSNO == 0x00000100UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_TRANSNO);
+ (unsigned int)MSG_CONNECT_TRANSNO);
/* Checks for struct obd_connect_data */
LASSERTF((int)sizeof(struct obd_connect_data) == 192, "found %lld\n",
@@ -905,14 +923,22 @@ void lustre_assert_wire_constants(void)
(long long)(int)offsetof(struct obd_connect_data, ocd_maxbytes));
LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes) == 8, "found %lld\n",
(long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes));
- LASSERTF((int)offsetof(struct obd_connect_data, padding1) == 72, "found %lld\n",
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_maxmodrpcs) == 72, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_maxmodrpcs));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_maxmodrpcs) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_maxmodrpcs));
+ LASSERTF((int)offsetof(struct obd_connect_data, padding0) == 74, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, padding0));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding0) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->padding0));
+ LASSERTF((int)offsetof(struct obd_connect_data, padding1) == 76, "found %lld\n",
(long long)(int)offsetof(struct obd_connect_data, padding1));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding1) == 8, "found %lld\n",
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding1) == 4, "found %lld\n",
(long long)(int)sizeof(((struct obd_connect_data *)0)->padding1));
- LASSERTF((int)offsetof(struct obd_connect_data, padding2) == 80, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding2));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding2));
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_connect_flags2) == 80, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_connect_flags2));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags2) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags2));
LASSERTF((int)offsetof(struct obd_connect_data, padding3) == 88, "found %lld\n",
(long long)(int)offsetof(struct obd_connect_data, padding3));
LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding3) == 8, "found %lld\n",
@@ -1075,14 +1101,24 @@ void lustre_assert_wire_constants(void)
OBD_CONNECT_LFSCK);
LASSERTF(OBD_CONNECT_UNLINK_CLOSE == 0x100000000000000ULL, "found 0x%.16llxULL\n",
OBD_CONNECT_UNLINK_CLOSE);
+ LASSERTF(OBD_CONNECT_MULTIMODRPCS == 0x200000000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_MULTIMODRPCS);
LASSERTF(OBD_CONNECT_DIR_STRIPE == 0x400000000000000ULL, "found 0x%.16llxULL\n",
OBD_CONNECT_DIR_STRIPE);
+ LASSERTF(OBD_CONNECT_SUBTREE == 0x800000000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_SUBTREE);
+ LASSERTF(OBD_CONNECT_LOCK_AHEAD == 0x1000000000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_LOCK_AHEAD);
+ LASSERTF(OBD_CONNECT_OBDOPACK == 0x4000000000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_OBDOPACK);
+ LASSERTF(OBD_CONNECT_FLAGS2 == 0x8000000000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_FLAGS2);
LASSERTF(OBD_CKSUM_CRC32 == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)OBD_CKSUM_CRC32);
+ (unsigned int)OBD_CKSUM_CRC32);
LASSERTF(OBD_CKSUM_ADLER == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)OBD_CKSUM_ADLER);
+ (unsigned int)OBD_CKSUM_ADLER);
LASSERTF(OBD_CKSUM_CRC32C == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)OBD_CKSUM_CRC32C);
+ (unsigned int)OBD_CKSUM_CRC32C);
/* Checks for struct obdo */
LASSERTF((int)sizeof(struct obdo) == 208, "found %lld\n",
@@ -1239,8 +1275,6 @@ void lustre_assert_wire_constants(void)
OBD_MD_FLCKSUM);
LASSERTF(OBD_MD_FLQOS == (0x00200000ULL), "found 0x%.16llxULL\n",
OBD_MD_FLQOS);
- LASSERTF(OBD_MD_FLCOOKIE == (0x00800000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLCOOKIE);
LASSERTF(OBD_MD_FLGROUP == (0x01000000ULL), "found 0x%.16llxULL\n",
OBD_MD_FLGROUP);
LASSERTF(OBD_MD_FLFID == (0x02000000ULL), "found 0x%.16llxULL\n",
@@ -1394,13 +1428,13 @@ void lustre_assert_wire_constants(void)
(long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_objects[0]));
CLASSERT(LOV_MAGIC_V3 == (0x0BD30000 | 0x0BD0));
LASSERTF(LOV_PATTERN_RAID0 == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)LOV_PATTERN_RAID0);
+ (unsigned int)LOV_PATTERN_RAID0);
LASSERTF(LOV_PATTERN_RAID1 == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)LOV_PATTERN_RAID1);
+ (unsigned int)LOV_PATTERN_RAID1);
LASSERTF(LOV_PATTERN_FIRST == 0x00000100UL, "found 0x%.8xUL\n",
- (unsigned)LOV_PATTERN_FIRST);
+ (unsigned int)LOV_PATTERN_FIRST);
LASSERTF(LOV_PATTERN_CMOBD == 0x00000200UL, "found 0x%.8xUL\n",
- (unsigned)LOV_PATTERN_CMOBD);
+ (unsigned int)LOV_PATTERN_CMOBD);
/* Checks for struct lmv_mds_md_v1 */
LASSERTF((int)sizeof(struct lmv_mds_md_v1) == 56, "found %lld\n",
@@ -1542,6 +1576,8 @@ void lustre_assert_wire_constants(void)
(long long)(int)offsetof(struct obd_ioobj, ioo_bufcnt));
LASSERTF((int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt) == 4, "found %lld\n",
(long long)(int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt));
+ LASSERTF(IOOBJ_MAX_BRW_BITS == 16, "found %lld\n",
+ (long long)IOOBJ_MAX_BRW_BITS);
/* Checks for union lquota_id */
LASSERTF((int)sizeof(union lquota_id) == 16, "found %lld\n",
@@ -1817,10 +1853,10 @@ void lustre_assert_wire_constants(void)
(long long)(int)offsetof(struct mdt_body, mbo_max_mdsize));
LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_max_mdsize) == 4, "found %lld\n",
(long long)(int)sizeof(((struct mdt_body *)0)->mbo_max_mdsize));
- LASSERTF((int)offsetof(struct mdt_body, mbo_max_cookiesize) == 160, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_max_cookiesize));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_max_cookiesize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_max_cookiesize));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_unused3) == 160, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_unused3));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_unused3) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_unused3));
LASSERTF((int)offsetof(struct mdt_body, mbo_uid_h) == 164, "found %lld\n",
(long long)(int)offsetof(struct mdt_body, mbo_uid_h));
LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_uid_h) == 4, "found %lld\n",
@@ -1857,12 +1893,6 @@ void lustre_assert_wire_constants(void)
MDS_FMODE_CLOSED);
LASSERTF(MDS_FMODE_EXEC == 000000000004UL, "found 0%.11oUL\n",
MDS_FMODE_EXEC);
- LASSERTF(MDS_FMODE_EPOCH == 000001000000UL, "found 0%.11oUL\n",
- MDS_FMODE_EPOCH);
- LASSERTF(MDS_FMODE_TRUNC == 000002000000UL, "found 0%.11oUL\n",
- MDS_FMODE_TRUNC);
- LASSERTF(MDS_FMODE_SOM == 000004000000UL, "found 0%.11oUL\n",
- MDS_FMODE_SOM);
LASSERTF(MDS_OPEN_CREATED == 000000000010UL, "found 0%.11oUL\n",
MDS_OPEN_CREATED);
LASSERTF(MDS_OPEN_CROSS == 000000000020UL, "found 0%.11oUL\n",
@@ -1905,10 +1935,20 @@ void lustre_assert_wire_constants(void)
LUSTRE_IMMUTABLE_FL);
LASSERTF(LUSTRE_APPEND_FL == 0x00000020, "found 0x%.8x\n",
LUSTRE_APPEND_FL);
+ LASSERTF(LUSTRE_NODUMP_FL == 0x00000040, "found 0x%.8x\n",
+ LUSTRE_NODUMP_FL);
LASSERTF(LUSTRE_NOATIME_FL == 0x00000080, "found 0x%.8x\n",
LUSTRE_NOATIME_FL);
+ LASSERTF(LUSTRE_INDEX_FL == 0x00001000, "found 0x%.8x\n",
+ LUSTRE_INDEX_FL);
LASSERTF(LUSTRE_DIRSYNC_FL == 0x00010000, "found 0x%.8x\n",
LUSTRE_DIRSYNC_FL);
+ LASSERTF(LUSTRE_TOPDIR_FL == 0x00020000, "found 0x%.8x\n",
+ LUSTRE_TOPDIR_FL);
+ LASSERTF(LUSTRE_DIRECTIO_FL == 0x00100000, "found 0x%.8x\n",
+ LUSTRE_DIRECTIO_FL);
+ LASSERTF(LUSTRE_INLINE_DATA_FL == 0x10000000, "found 0x%.8x\n",
+ LUSTRE_INLINE_DATA_FL);
LASSERTF(MDS_INODELOCK_LOOKUP == 0x000001, "found 0x%.8x\n",
MDS_INODELOCK_LOOKUP);
LASSERTF(MDS_INODELOCK_UPDATE == 0x000002, "found 0x%.8x\n",
@@ -1921,22 +1961,22 @@ void lustre_assert_wire_constants(void)
/* Checks for struct mdt_ioepoch */
LASSERTF((int)sizeof(struct mdt_ioepoch) == 24, "found %lld\n",
(long long)(int)sizeof(struct mdt_ioepoch));
- LASSERTF((int)offsetof(struct mdt_ioepoch, handle) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_ioepoch, handle));
- LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->handle) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_ioepoch *)0)->handle));
- LASSERTF((int)offsetof(struct mdt_ioepoch, ioepoch) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_ioepoch, ioepoch));
- LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->ioepoch) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_ioepoch *)0)->ioepoch));
- LASSERTF((int)offsetof(struct mdt_ioepoch, flags) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_ioepoch, flags));
- LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_ioepoch *)0)->flags));
- LASSERTF((int)offsetof(struct mdt_ioepoch, padding) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mdt_ioepoch, padding));
- LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_ioepoch *)0)->padding));
+ LASSERTF((int)offsetof(struct mdt_ioepoch, mio_handle) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_ioepoch, mio_handle));
+ LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_handle) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_handle));
+ LASSERTF((int)offsetof(struct mdt_ioepoch, mio_unused1) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_ioepoch, mio_unused1));
+ LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_unused1) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_unused1));
+ LASSERTF((int)offsetof(struct mdt_ioepoch, mio_unused2) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_ioepoch, mio_unused2));
+ LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_unused2) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_unused2));
+ LASSERTF((int)offsetof(struct mdt_ioepoch, mio_padding) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_ioepoch, mio_padding));
+ LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_padding) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_padding));
/* Checks for struct mdt_rec_setattr */
LASSERTF((int)sizeof(struct mdt_rec_setattr) == 136, "found %lld\n",
@@ -3520,21 +3560,21 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx) == 4, "found %lld\n",
(long long)(int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx));
- /* Checks for struct ll_fiemap_info_key */
+ /* Checks for struct fiemap_info_key */
LASSERTF((int)sizeof(struct ll_fiemap_info_key) == 248, "found %lld\n",
(long long)(int)sizeof(struct ll_fiemap_info_key));
- LASSERTF((int)offsetof(struct ll_fiemap_info_key, name[8]) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_info_key, name[8]));
- LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->name[8]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->name[8]));
- LASSERTF((int)offsetof(struct ll_fiemap_info_key, oa) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_info_key, oa));
- LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->oa) == 208, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->oa));
- LASSERTF((int)offsetof(struct ll_fiemap_info_key, fiemap) == 216, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_info_key, fiemap));
- LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap));
+ LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_name[8]) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_name[8]));
+ LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_name[8]) == 1, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_name[8]));
+ LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_oa) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_oa));
+ LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_oa) == 208, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_oa));
+ LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_fiemap) == 216, "found %lld\n",
+ (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_fiemap));
+ LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_fiemap) == 32, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_fiemap));
/* Checks for struct mgs_target_info */
LASSERTF((int)sizeof(struct mgs_target_info) == 4544, "found %lld\n",
@@ -3670,64 +3710,64 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0]) == 1, "found %lld\n",
(long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0]));
- /* Checks for struct ll_user_fiemap */
- LASSERTF((int)sizeof(struct ll_user_fiemap) == 32, "found %lld\n",
- (long long)(int)sizeof(struct ll_user_fiemap));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_start) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_start));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_start) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_start));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_length) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_length));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_length) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_length));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_flags) == 16, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_flags));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_flags));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_mapped_extents) == 20, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_mapped_extents));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_mapped_extents) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_mapped_extents));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_extent_count) == 24, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_extent_count));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_extent_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_extent_count));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_reserved) == 28, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_reserved));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_reserved) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_reserved));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_extents) == 32, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_extents));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_extents) == 0, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_extents));
+ /* Checks for struct fiemap */
+ LASSERTF((int)sizeof(struct fiemap) == 32, "found %lld\n",
+ (long long)(int)sizeof(struct fiemap));
+ LASSERTF((int)offsetof(struct fiemap, fm_start) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap, fm_start));
+ LASSERTF((int)sizeof(((struct fiemap *)0)->fm_start) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap *)0)->fm_start));
+ LASSERTF((int)offsetof(struct fiemap, fm_length) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap, fm_length));
+ LASSERTF((int)sizeof(((struct fiemap *)0)->fm_length) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap *)0)->fm_length));
+ LASSERTF((int)offsetof(struct fiemap, fm_flags) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap, fm_flags));
+ LASSERTF((int)sizeof(((struct fiemap *)0)->fm_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap *)0)->fm_flags));
+ LASSERTF((int)offsetof(struct fiemap, fm_mapped_extents) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap, fm_mapped_extents));
+ LASSERTF((int)sizeof(((struct fiemap *)0)->fm_mapped_extents) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap *)0)->fm_mapped_extents));
+ LASSERTF((int)offsetof(struct fiemap, fm_extent_count) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap, fm_extent_count));
+ LASSERTF((int)sizeof(((struct fiemap *)0)->fm_extent_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap *)0)->fm_extent_count));
+ LASSERTF((int)offsetof(struct fiemap, fm_reserved) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap, fm_reserved));
+ LASSERTF((int)sizeof(((struct fiemap *)0)->fm_reserved) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap *)0)->fm_reserved));
+ LASSERTF((int)offsetof(struct fiemap, fm_extents) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap, fm_extents));
+ LASSERTF((int)sizeof(((struct fiemap *)0)->fm_extents) == 0, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap *)0)->fm_extents));
CLASSERT(FIEMAP_FLAG_SYNC == 0x00000001);
CLASSERT(FIEMAP_FLAG_XATTR == 0x00000002);
CLASSERT(FIEMAP_FLAG_DEVICE_ORDER == 0x40000000);
- /* Checks for struct ll_fiemap_extent */
- LASSERTF((int)sizeof(struct ll_fiemap_extent) == 56, "found %lld\n",
- (long long)(int)sizeof(struct ll_fiemap_extent));
- LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_logical) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_extent, fe_logical));
- LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_logical) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_logical));
- LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_physical) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_extent, fe_physical));
- LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_physical) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_physical));
- LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_length) == 16, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_extent, fe_length));
- LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_length) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_length));
- LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_flags) == 40, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_extent, fe_flags));
- LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_flags));
- LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_device) == 44, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_extent, fe_device));
- LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_device) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_device));
+ /* Checks for struct fiemap_extent */
+ LASSERTF((int)sizeof(struct fiemap_extent) == 56, "found %lld\n",
+ (long long)(int)sizeof(struct fiemap_extent));
+ LASSERTF((int)offsetof(struct fiemap_extent, fe_logical) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap_extent, fe_logical));
+ LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_logical) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_logical));
+ LASSERTF((int)offsetof(struct fiemap_extent, fe_physical) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap_extent, fe_physical));
+ LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_physical) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_physical));
+ LASSERTF((int)offsetof(struct fiemap_extent, fe_length) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap_extent, fe_length));
+ LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_length) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_length));
+ LASSERTF((int)offsetof(struct fiemap_extent, fe_flags) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap_extent, fe_flags));
+ LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_flags));
+ LASSERTF((int)offsetof(struct fiemap_extent, fe_reserved[0]) == 44, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap_extent, fe_reserved[0]));
+ LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_reserved[0]) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_reserved[0]));
CLASSERT(FIEMAP_EXTENT_LAST == 0x00000001);
CLASSERT(FIEMAP_EXTENT_UNKNOWN == 0x00000002);
CLASSERT(FIEMAP_EXTENT_DELALLOC == 0x00000004);
@@ -4093,9 +4133,9 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_data_len) == 4, "found %lld\n",
(long long)(int)sizeof(((struct hsm_request *)0)->hr_data_len));
LASSERTF(HSM_FORCE_ACTION == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)HSM_FORCE_ACTION);
+ (unsigned int)HSM_FORCE_ACTION);
LASSERTF(HSM_GHOST_COPY == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)HSM_GHOST_COPY);
+ (unsigned int)HSM_GHOST_COPY);
/* Checks for struct hsm_user_request */
LASSERTF((int)sizeof(struct hsm_user_request) == 24, "found %lld\n",
diff --git a/drivers/staging/lustre/sysfs-fs-lustre b/drivers/staging/lustre/sysfs-fs-lustre
index 20206ba965af..8691c6543a9c 100644
--- a/drivers/staging/lustre/sysfs-fs-lustre
+++ b/drivers/staging/lustre/sysfs-fs-lustre
@@ -11,7 +11,7 @@ Description:
Shows if the lustre module has pinger support.
"on" means yes and "off" means no.
-What: /sys/fs/lustre/health
+What: /sys/fs/lustre/health_check
Date: May 2015
Contact: "Oleg Drokin" <oleg.drokin@intel.com>
Description:
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 4d9bd02ede47..c5116c058cea 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -2542,7 +2542,7 @@ static int bcm2048_vidioc_s_hw_freq_seek(struct file *file, void *priv,
return err;
}
-static struct v4l2_ioctl_ops bcm2048_ioctl_ops = {
+static const struct v4l2_ioctl_ops bcm2048_ioctl_ops = {
.vidioc_querycap = bcm2048_vidioc_querycap,
.vidioc_g_input = bcm2048_vidioc_g_input,
.vidioc_s_input = bcm2048_vidioc_s_input,
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
index fedeb3c3549e..c72c3f09f175 100644
--- a/drivers/staging/media/cxd2099/cxd2099.c
+++ b/drivers/staging/media/cxd2099/cxd2099.c
@@ -336,7 +336,8 @@ static int init(struct cxd *ci)
break;
#endif
/* TOSTRT = 8, Mode B (gated clock), falling Edge,
- * Serial, POL=HIGH, MSB */
+ * Serial, POL=HIGH, MSB
+ */
status = write_reg(ci, 0x0A, 0xA7);
if (status < 0)
break;
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 8be9f854510f..c34bf4621767 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -1143,8 +1143,8 @@ static int vpfe_buffer_prepare(struct vb2_buffer *vb)
/* Initialize buffer */
vb2_set_plane_payload(vb, 0, video->fmt.fmt.pix.sizeimage);
if (vb2_plane_vaddr(vb, 0) &&
- vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
- return -EINVAL;
+ vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
+ return -EINVAL;
addr = vb2_dma_contig_plane_dma_addr(vb, 0);
/* Make sure user addresses are aligned to 32 bytes */
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index 4678ae10b030..920c4a1290f4 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -103,7 +103,8 @@ struct sasem_context {
struct tx_t {
unsigned char data_buf[SASEM_DATA_BUF_SZ]; /* user data
- * buffer */
+ * buffer
+ */
struct completion finished; /* wait for write to finish */
atomic_t busy; /* write in progress */
int status; /* status of tx completion */
@@ -295,7 +296,8 @@ static int vfd_close(struct inode *inode, struct file *file)
if (!context->dev_present && !context->ir_isopen) {
/* Device disconnected before close and IR port is
* not open. If IR port is open, context will be
- * deleted by ir_close. */
+ * deleted by ir_close.
+ */
mutex_unlock(&context->ctx_lock);
delete_context(context);
return retval;
@@ -397,7 +399,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
/* Nine 8 byte packets to be sent */
/* NOTE: "\x07\x01\0\0\0\0\0\0" or "\x0c\0\0\0\0\0\0\0"
- * will clear the VFD */
+ * will clear the VFD
+ */
for (i = 0; i < 9; i++) {
switch (i) {
case 0:
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 3551aed589c0..34aac3e2eb87 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -1157,8 +1157,8 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
/* Send the code */
if (ret == 0) {
- ret = send_code(tx, (unsigned)command >> 16,
- (unsigned)command & 0xFFFF);
+ ret = send_code(tx, (unsigned int)command >> 16,
+ (unsigned int)command & 0xFFFF);
if (ret == -EPROTO) {
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
index aaca39d751a5..f71d5f2f179f 100644
--- a/drivers/staging/media/omap4iss/iss_csi2.c
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -224,7 +224,7 @@ static u16 csi2_ctx_map_format(struct iss_csi2_device *csi2)
fmtidx = 3;
break;
default:
- WARN(1, KERN_ERR "CSI2: pixel format %08x unsupported!\n",
+ WARN(1, "CSI2: pixel format %08x unsupported!\n",
fmt->code);
return 0;
}
diff --git a/drivers/staging/media/s5p-cec/s5p_cec.c b/drivers/staging/media/s5p-cec/s5p_cec.c
index 1780a08b73c9..aef962b6af31 100644
--- a/drivers/staging/media/s5p-cec/s5p_cec.c
+++ b/drivers/staging/media/s5p-cec/s5p_cec.c
@@ -22,7 +22,6 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/timer.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <media/cec.h>
diff --git a/drivers/staging/media/st-cec/stih-cec.c b/drivers/staging/media/st-cec/stih-cec.c
index 214344866a6b..b22394ac4ec4 100644
--- a/drivers/staging/media/st-cec/stih-cec.c
+++ b/drivers/staging/media/st-cec/stih-cec.c
@@ -108,11 +108,11 @@
/* Constants for CEC_BIT_TOUT_THRESH register */
#define CEC_SBIT_TOUT_47MS BIT(1)
-#define CEC_SBIT_TOUT_48MS BIT(0) | BIT(1)
+#define CEC_SBIT_TOUT_48MS (BIT(0) | BIT(1))
#define CEC_SBIT_TOUT_50MS BIT(2)
#define CEC_DBIT_TOUT_27MS BIT(0)
#define CEC_DBIT_TOUT_28MS BIT(1)
-#define CEC_DBIT_TOUT_29MS BIT(0) | BIT(1)
+#define CEC_DBIT_TOUT_29MS (BIT(0) | BIT(1))
/* Constants for CEC_BIT_PULSE_THRESH register */
#define CEC_BIT_LPULSE_03MS BIT(1)
diff --git a/drivers/staging/most/aim-network/networking.c b/drivers/staging/most/aim-network/networking.c
index 4659a6450c04..ce1764cba5f0 100644
--- a/drivers/staging/most/aim-network/networking.c
+++ b/drivers/staging/most/aim-network/networking.c
@@ -67,10 +67,10 @@ struct net_dev_context {
struct most_interface *iface;
bool channels_opened;
bool is_mamac;
- unsigned char link_stat;
struct net_device *dev;
struct net_dev_channel rx;
struct net_dev_channel tx;
+ struct completion mac_compl;
struct list_head list;
};
@@ -181,6 +181,7 @@ static int most_nd_set_mac_address(struct net_device *dev, void *p)
static int most_nd_open(struct net_device *dev)
{
struct net_dev_context *nd = dev->ml_priv;
+ long ret;
netdev_info(dev, "open net device\n");
@@ -202,16 +203,30 @@ static int most_nd_open(struct net_device *dev)
return -EBUSY;
}
- nd->channels_opened = true;
-
- if (nd->is_mamac) {
- nd->link_stat = 1;
- netif_wake_queue(dev);
- } else {
+ if (!is_valid_ether_addr(dev->dev_addr)) {
nd->iface->request_netinfo(nd->iface, nd->tx.ch_id);
+ ret = wait_for_completion_interruptible_timeout(
+ &nd->mac_compl, msecs_to_jiffies(5000));
+ if (!ret) {
+ netdev_err(dev, "mac timeout\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ if (ret < 0) {
+ netdev_warn(dev, "mac waiting interrupted\n");
+ goto err;
+ }
}
+ nd->channels_opened = true;
+ netif_wake_queue(dev);
return 0;
+
+err:
+ most_stop_channel(nd->iface, nd->tx.ch_id, &aim);
+ most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
+ return ret;
}
static int most_nd_stop(struct net_device *dev)
@@ -277,7 +292,6 @@ static const struct net_device_ops most_nd_ops = {
static void most_nd_setup(struct net_device *dev)
{
- netdev_info(dev, "setup net device\n");
ether_setup(dev);
dev->netdev_ops = &most_nd_ops;
}
@@ -332,6 +346,7 @@ static int aim_probe_channel(struct most_interface *iface, int channel_idx,
if (!nd)
return -ENOMEM;
+ init_completion(&nd->mac_compl);
nd->iface = iface;
spin_lock_irqsave(&list_lock, flags);
@@ -548,8 +563,7 @@ void most_deliver_netinfo(struct most_interface *iface,
{
struct net_dev_context *nd;
struct net_device *dev;
-
- pr_info("Received netinfo from %s\n", iface->description);
+ const u8 *m = mac_addr;
nd = get_net_dev_context(iface);
if (!nd)
@@ -559,15 +573,16 @@ void most_deliver_netinfo(struct most_interface *iface,
if (!dev)
return;
- if (mac_addr)
- ether_addr_copy(dev->dev_addr, mac_addr);
-
- if (nd->link_stat != link_stat) {
- nd->link_stat = link_stat;
- if (nd->link_stat)
- netif_wake_queue(dev);
- else
- netif_stop_queue(dev);
+ if (m && is_valid_ether_addr(m)) {
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ netdev_info(dev, "set mac %02x-%02x-%02x-%02x-%02x-%02x\n",
+ m[0], m[1], m[2], m[3], m[4], m[5]);
+ ether_addr_copy(dev->dev_addr, m);
+ complete(&nd->mac_compl);
+ } else if (!ether_addr_equal(dev->dev_addr, m)) {
+ netdev_warn(dev, "reject mac %02x-%02x-%02x-%02x-%02x-%02x\n",
+ m[0], m[1], m[2], m[3], m[4], m[5]);
+ }
}
}
EXPORT_SYMBOL(most_deliver_netinfo);
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.c b/drivers/staging/most/hdm-dim2/dim2_hdm.c
index 78b2c3dd9bb3..35aee9fbbf02 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hdm.c
+++ b/drivers/staging/most/hdm-dim2/dim2_hdm.c
@@ -306,14 +306,11 @@ static int deliver_netinfo_thread(void *data)
static void retrieve_netinfo(struct dim2_hdm *dev, struct mbo *mbo)
{
u8 *data = mbo->virt_address;
- u8 *mac = dev->mac_addrs;
pr_info("Node Address: 0x%03x\n", (u16)data[16] << 8 | data[17]);
dev->link_state = data[18];
pr_info("NIState: %d\n", dev->link_state);
- memcpy(mac, data + 19, 6);
- pr_info("MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n",
- mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ memcpy(dev->mac_addrs, data + 19, 6);
dev->deliver_netinfo++;
wake_up_interruptible(&dev->netinfo_waitq);
}
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c
index 26c9adb29308..d6db0bd65be0 100644
--- a/drivers/staging/most/hdm-usb/hdm_usb.c
+++ b/drivers/staging/most/hdm-usb/hdm_usb.c
@@ -97,9 +97,7 @@ struct clear_hold_work {
* @cap: channel capabilities
* @conf: channel configuration
* @dci: direct communication interface of hardware
- * @hw_addr: MAC address of hardware
* @ep_address: endpoint address table
- * @link_stat: link status of hardware
* @description: device description
* @suffix: suffix for channel name
* @channel_lock: synchronize channel access
@@ -117,9 +115,7 @@ struct most_dev {
struct most_channel_capability *cap;
struct most_channel_config *conf;
struct most_dci_obj *dci;
- u8 hw_addr[6];
u8 *ep_address;
- u16 link_stat;
char description[MAX_STRING_LEN];
char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN];
spinlock_t channel_lock[MAX_NUM_ENDPOINTS]; /* sync channel access */
@@ -186,28 +182,9 @@ static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data)
5 * HZ);
}
-/**
- * free_anchored_buffers - free device's anchored items
- * @mdev: the device
- * @channel: channel ID
- * @status: status of MBO termination
- */
-static void free_anchored_buffers(struct most_dev *mdev, unsigned int channel,
- enum mbo_status_flags status)
+static inline int start_sync_ep(struct usb_device *usb_dev, u16 ep)
{
- struct mbo *mbo;
- struct urb *urb;
-
- while ((urb = usb_get_from_anchor(&mdev->busy_urbs[channel]))) {
- mbo = urb->context;
- usb_kill_urb(urb);
- if (mbo && mbo->complete) {
- mbo->status = status;
- mbo->processed_length = 0;
- mbo->complete(mbo);
- }
- usb_free_urb(urb);
- }
+ return drci_wr_reg(usb_dev, DRCI_REG_BASE + DRCI_COMMAND + ep * 16, 1);
}
/**
@@ -278,7 +255,7 @@ static int hdm_poison_channel(struct most_interface *iface, int channel)
cancel_work_sync(&mdev->clear_work[channel].ws);
mutex_lock(&mdev->io_mutex);
- free_anchored_buffers(mdev, channel, MBO_E_CLOSE);
+ usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
if (mdev->padding_active[channel])
mdev->padding_active[channel] = false;
@@ -377,33 +354,27 @@ static void hdm_write_completion(struct urb *urb)
unsigned long flags;
spin_lock_irqsave(lock, flags);
- if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
- !mdev->is_channel_healthy[channel]) {
- spin_unlock_irqrestore(lock, flags);
- return;
- }
- if (unlikely(urb->status && urb->status != -ESHUTDOWN)) {
- mbo->processed_length = 0;
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_INVAL;
+ if (likely(mdev->is_channel_healthy[channel])) {
switch (urb->status) {
+ case 0:
+ case -ESHUTDOWN:
+ mbo->processed_length = urb->actual_length;
+ mbo->status = MBO_SUCCESS;
+ break;
case -EPIPE:
dev_warn(dev, "Broken OUT pipe detected\n");
mdev->is_channel_healthy[channel] = false;
- spin_unlock_irqrestore(lock, flags);
mdev->clear_work[channel].pipe = urb->pipe;
schedule_work(&mdev->clear_work[channel].ws);
- return;
+ break;
case -ENODEV:
case -EPROTO:
mbo->status = MBO_E_CLOSE;
break;
- default:
- mbo->status = MBO_E_INVAL;
- break;
}
- } else {
- mbo->status = MBO_SUCCESS;
- mbo->processed_length = urb->actual_length;
}
spin_unlock_irqrestore(lock, flags);
@@ -531,40 +502,35 @@ static void hdm_read_completion(struct urb *urb)
unsigned long flags;
spin_lock_irqsave(lock, flags);
- if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
- !mdev->is_channel_healthy[channel]) {
- spin_unlock_irqrestore(lock, flags);
- return;
- }
- if (unlikely(urb->status && urb->status != -ESHUTDOWN)) {
- mbo->processed_length = 0;
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_INVAL;
+ if (likely(mdev->is_channel_healthy[channel])) {
switch (urb->status) {
+ case 0:
+ case -ESHUTDOWN:
+ mbo->processed_length = urb->actual_length;
+ mbo->status = MBO_SUCCESS;
+ if (mdev->padding_active[channel] &&
+ hdm_remove_padding(mdev, channel, mbo)) {
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_INVAL;
+ }
+ break;
case -EPIPE:
dev_warn(dev, "Broken IN pipe detected\n");
mdev->is_channel_healthy[channel] = false;
- spin_unlock_irqrestore(lock, flags);
mdev->clear_work[channel].pipe = urb->pipe;
schedule_work(&mdev->clear_work[channel].ws);
- return;
+ break;
case -ENODEV:
case -EPROTO:
mbo->status = MBO_E_CLOSE;
break;
case -EOVERFLOW:
dev_warn(dev, "Babble on IN pipe detected\n");
- default:
- mbo->status = MBO_E_INVAL;
break;
}
- } else {
- mbo->processed_length = urb->actual_length;
- mbo->status = MBO_SUCCESS;
- if (mdev->padding_active[channel] &&
- hdm_remove_padding(mdev, channel, mbo)) {
- mbo->processed_length = 0;
- mbo->status = MBO_E_INVAL;
- }
}
spin_unlock_irqrestore(lock, flags);
@@ -668,6 +634,15 @@ _error:
* @iface: interface
* @channel: channel ID
* @conf: structure that holds the configuration information
+ *
+ * The attached network interface controller (NIC) supports a padding mode
+ * to avoid short packets on USB, hence increasing the performance due to a
+ * lower interrupt load. This mode is default for synchronous data and can
+ * be switched on for isochronous data. In case padding is active the
+ * driver needs to know the frame size of the payload in order to calculate
+ * the number of bytes it needs to pad when transmitting or to cut off when
+ * receiving data.
+ *
*/
static int hdm_configure_channel(struct most_interface *iface, int channel,
struct most_channel_config *conf)
@@ -701,6 +676,11 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
!(conf->data_type == MOST_CH_ISOC &&
conf->packets_per_xact != 0xFF)) {
mdev->padding_active[channel] = false;
+ /*
+ * Since the NIC's padding mode is not going to be
+ * used, we can skip the frame size calculations and
+ * move directly on to exit.
+ */
goto exit;
}
@@ -734,56 +714,12 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
- conf->buffer_size;
exit:
mdev->conf[channel] = *conf;
- return 0;
-}
+ if (conf->data_type == MOST_CH_ASYNC) {
+ u16 ep = mdev->ep_address[channel];
-/**
- * hdm_update_netinfo - retrieve latest networking information
- * @mdev: device interface
- *
- * This triggers the USB vendor requests to read the hardware address and
- * the current link status of the attached device.
- */
-static int hdm_update_netinfo(struct most_dev *mdev)
-{
- struct usb_device *usb_device = mdev->usb_device;
- struct device *dev = &usb_device->dev;
- u16 hi, mi, lo, link;
-
- if (!is_valid_ether_addr(mdev->hw_addr)) {
- if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) {
- dev_err(dev, "Vendor request \"hw_addr_hi\" failed\n");
- return -EFAULT;
- }
-
- if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) {
- dev_err(dev, "Vendor request \"hw_addr_mid\" failed\n");
- return -EFAULT;
- }
-
- if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) {
- dev_err(dev, "Vendor request \"hw_addr_low\" failed\n");
- return -EFAULT;
- }
-
- mutex_lock(&mdev->io_mutex);
- mdev->hw_addr[0] = hi >> 8;
- mdev->hw_addr[1] = hi;
- mdev->hw_addr[2] = mi >> 8;
- mdev->hw_addr[3] = mi;
- mdev->hw_addr[4] = lo >> 8;
- mdev->hw_addr[5] = lo;
- mutex_unlock(&mdev->io_mutex);
- }
-
- if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) {
- dev_err(dev, "Vendor request \"link status\" failed\n");
- return -EFAULT;
+ if (start_sync_ep(mdev->usb_device, ep) < 0)
+ dev_warn(dev, "sync for ep%02x failed", ep);
}
-
- mutex_lock(&mdev->io_mutex);
- mdev->link_stat = link;
- mutex_unlock(&mdev->io_mutex);
return 0;
}
@@ -807,7 +743,7 @@ static void hdm_request_netinfo(struct most_interface *iface, int channel)
}
/**
- * link_stat_timer_handler - add work to link_stat work queue
+ * link_stat_timer_handler - schedule work obtaining mac address and link status
* @data: pointer to USB device instance
*
* The handler runs in interrupt context. That's why we need to defer the
@@ -823,33 +759,47 @@ static void link_stat_timer_handler(unsigned long data)
}
/**
- * wq_netinfo - work queue function
+ * wq_netinfo - work queue function to deliver latest networking information
* @wq_obj: object that holds data for our deferred work to do
*
* This retrieves the network interface status of the USB INIC
- * and compares it with the current status. If the status has
- * changed, it updates the status of the core.
*/
static void wq_netinfo(struct work_struct *wq_obj)
{
struct most_dev *mdev = to_mdev_from_work(wq_obj);
- int i, prev_link_stat = mdev->link_stat;
- u8 prev_hw_addr[6];
+ struct usb_device *usb_device = mdev->usb_device;
+ struct device *dev = &usb_device->dev;
+ u16 hi, mi, lo, link;
+ u8 hw_addr[6];
- for (i = 0; i < 6; i++)
- prev_hw_addr[i] = mdev->hw_addr[i];
+ if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) {
+ dev_err(dev, "Vendor request 'hw_addr_hi' failed\n");
+ return;
+ }
+
+ if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) {
+ dev_err(dev, "Vendor request 'hw_addr_mid' failed\n");
+ return;
+ }
- if (hdm_update_netinfo(mdev) < 0)
+ if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) {
+ dev_err(dev, "Vendor request 'hw_addr_low' failed\n");
+ return;
+ }
+
+ if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) {
+ dev_err(dev, "Vendor request 'link status' failed\n");
return;
- if (prev_link_stat != mdev->link_stat ||
- prev_hw_addr[0] != mdev->hw_addr[0] ||
- prev_hw_addr[1] != mdev->hw_addr[1] ||
- prev_hw_addr[2] != mdev->hw_addr[2] ||
- prev_hw_addr[3] != mdev->hw_addr[3] ||
- prev_hw_addr[4] != mdev->hw_addr[4] ||
- prev_hw_addr[5] != mdev->hw_addr[5])
- most_deliver_netinfo(&mdev->iface, mdev->link_stat,
- &mdev->hw_addr[0]);
+ }
+
+ hw_addr[0] = hi >> 8;
+ hw_addr[1] = hi;
+ hw_addr[2] = mi >> 8;
+ hw_addr[3] = mi;
+ hw_addr[4] = lo >> 8;
+ hw_addr[5] = lo;
+
+ most_deliver_netinfo(&mdev->iface, link, hw_addr);
}
/**
@@ -867,7 +817,7 @@ static void wq_clear_halt(struct work_struct *wq_obj)
mutex_lock(&mdev->io_mutex);
most_stop_enqueue(&mdev->iface, channel);
- free_anchored_buffers(mdev, channel, MBO_E_INVAL);
+ usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
if (usb_clear_halt(mdev->usb_device, pipe))
dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
@@ -1053,6 +1003,7 @@ static ssize_t store_value(struct most_dci_obj *dci_obj,
u16 val;
u16 reg_addr;
const char *name = attr->attr.name;
+ struct usb_device *usb_dev = dci_obj->usb_device;
int err = kstrtou16(buf, 16, &val);
if (err)
@@ -1063,18 +1014,15 @@ static ssize_t store_value(struct most_dci_obj *dci_obj,
return count;
}
- if (!strcmp(name, "arb_value")) {
- reg_addr = dci_obj->reg_addr;
- } else if (!strcmp(name, "sync_ep")) {
- u16 ep = val;
-
- reg_addr = DRCI_REG_BASE + DRCI_COMMAND + ep * 16;
- val = 1;
- } else if (get_static_reg_addr(ro_regs, name, &reg_addr)) {
+ if (!strcmp(name, "arb_value"))
+ err = drci_wr_reg(usb_dev, dci_obj->reg_addr, val);
+ else if (!strcmp(name, "sync_ep"))
+ err = start_sync_ep(usb_dev, val);
+ else if (!get_static_reg_addr(ro_regs, name, &reg_addr))
+ err = drci_wr_reg(usb_dev, reg_addr, val);
+ else
return -EFAULT;
- }
- err = drci_wr_reg(dci_obj->usb_device, reg_addr, val);
if (err < 0)
return err;
@@ -1186,7 +1134,6 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
struct most_channel_capability *tmp_cap;
struct usb_endpoint_descriptor *ep_desc;
int ret = 0;
- int err;
if (!mdev)
goto exit_ENOMEM;
@@ -1262,13 +1209,6 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
tmp_cap++;
init_usb_anchor(&mdev->busy_urbs[i]);
spin_lock_init(&mdev->channel_lock[i]);
- err = drci_wr_reg(usb_dev,
- DRCI_REG_BASE + DRCI_COMMAND +
- ep_desc->bEndpointAddress * 16,
- 1);
- if (err < 0)
- dev_warn(dev, "DCI Sync for EP %02x failed",
- ep_desc->bEndpointAddress);
}
dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
le16_to_cpu(usb_dev->descriptor.idVendor),
diff --git a/drivers/staging/most/mostcore/core.c b/drivers/staging/most/mostcore/core.c
index 329109c0024f..191404bc5906 100644
--- a/drivers/staging/most/mostcore/core.c
+++ b/drivers/staging/most/mostcore/core.c
@@ -342,7 +342,7 @@ static ssize_t show_channel_starving(struct most_c_obj *c,
}
#define create_show_channel_attribute(val) \
- static MOST_CHNL_ATTR(val, S_IRUGO, show_##val, NULL)
+ static MOST_CHNL_ATTR(val, 0444, show_##val, NULL)
create_show_channel_attribute(available_directions);
create_show_channel_attribute(available_datatypes);
@@ -494,9 +494,7 @@ static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
}
#define create_channel_attribute(value) \
- static MOST_CHNL_ATTR(value, S_IRUGO | S_IWUSR, \
- show_##value, \
- store_##value)
+ static MOST_CHNL_ATTR(value, 0644, show_##value, store_##value)
create_channel_attribute(set_buffer_size);
create_channel_attribute(set_number_of_buffers);
@@ -690,7 +688,7 @@ static ssize_t show_interface(struct most_inst_obj *instance_obj,
}
#define create_inst_attribute(value) \
- static MOST_INST_ATTR(value, S_IRUGO, show_##value, NULL)
+ static MOST_INST_ATTR(value, 0444, show_##value, NULL)
create_inst_attribute(description);
create_inst_attribute(interface);
@@ -763,8 +761,6 @@ struct most_aim_obj {
struct kobject kobj;
struct list_head list;
struct most_aim *driver;
- char add_link[STRING_SIZE];
- char remove_link[STRING_SIZE];
};
#define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
@@ -851,7 +847,7 @@ static void most_aim_release(struct kobject *kobj)
kfree(aim_obj);
}
-static ssize_t show_add_link(struct most_aim_obj *aim_obj,
+static ssize_t add_link_show(struct most_aim_obj *aim_obj,
struct most_aim_attribute *attr,
char *buf)
{
@@ -885,16 +881,16 @@ static ssize_t show_add_link(struct most_aim_obj *aim_obj,
*
* Examples:
*
- * Input: "mdev0:ch0@ep_81:my_channel\n" or
- * "mdev0:ch0@ep_81:my_channel"
+ * Input: "mdev0:ch6:my_channel\n" or
+ * "mdev0:ch6:my_channel"
*
- * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "my_channel"
+ * Output: *a -> "mdev0", *b -> "ch6", *c -> "my_channel"
*
- * Input: "mdev0:ch0@ep_81\n"
- * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> ""
+ * Input: "mdev1:ep81\n"
+ * Output: *a -> "mdev1", *b -> "ep81", *c -> ""
*
- * Input: "mdev0:ch0@ep_81"
- * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c == NULL
+ * Input: "mdev1:ep81"
+ * Output: *a -> "mdev1", *b -> "ep81", *c == NULL
*/
static int split_string(char *buf, char **a, char **b, char **c)
{
@@ -962,13 +958,13 @@ most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
* Searches for a pair of device and channel and probes the AIM
*
* Example:
- * (1) echo -n -e "mdev0:ch0@ep_81:my_rxchannel\n" >add_link
- * (2) echo -n -e "mdev0:ch0@ep_81\n" >add_link
+ * (1) echo "mdev0:ch6:my_rxchannel" >add_link
+ * (2) echo "mdev1:ep81" >add_link
*
* (1) would create the device node /dev/my_rxchannel
- * (2) would create the device node /dev/mdev0-ch0@ep_81
+ * (2) would create the device node /dev/mdev1-ep81
*/
-static ssize_t store_add_link(struct most_aim_obj *aim_obj,
+static ssize_t add_link_store(struct most_aim_obj *aim_obj,
struct most_aim_attribute *attr,
const char *buf,
size_t len)
@@ -984,7 +980,6 @@ static ssize_t store_add_link(struct most_aim_obj *aim_obj,
size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
strlcpy(buffer, buf, max_len);
- strlcpy(aim_obj->add_link, buf, max_len);
ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
if (ret)
@@ -1019,14 +1014,7 @@ static ssize_t store_add_link(struct most_aim_obj *aim_obj,
}
static struct most_aim_attribute most_aim_attr_add_link =
- __ATTR(add_link, S_IRUGO | S_IWUSR, show_add_link, store_add_link);
-
-static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
- struct most_aim_attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->remove_link);
-}
+ __ATTR_RW(add_link);
/**
* store_remove_link - store function for remove_link attribute
@@ -1036,9 +1024,9 @@ static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
* @len: buffer length
*
* Example:
- * echo -n -e "mdev0:ch0@ep_81\n" >remove_link
+ * echo "mdev0:ep81" >remove_link
*/
-static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
+static ssize_t remove_link_store(struct most_aim_obj *aim_obj,
struct most_aim_attribute *attr,
const char *buf,
size_t len)
@@ -1051,7 +1039,6 @@ static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
strlcpy(buffer, buf, max_len);
- strlcpy(aim_obj->remove_link, buf, max_len);
ret = split_string(buffer, &mdev, &mdev_ch, NULL);
if (ret)
return ret;
@@ -1070,8 +1057,7 @@ static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
}
static struct most_aim_attribute most_aim_attr_remove_link =
- __ATTR(remove_link, S_IRUGO | S_IWUSR, show_remove_link,
- store_remove_link);
+ __ATTR_WO(remove_link);
static struct attribute *most_aim_def_attrs[] = {
&most_aim_attr_add_link.attr,
@@ -1761,9 +1747,6 @@ struct kobject *most_register_interface(struct most_interface *iface)
if (!name_suffix)
snprintf(channel_name, STRING_SIZE, "ch%d", i);
- else if (name_suffix[0] == '@')
- snprintf(channel_name, STRING_SIZE, "ch%d%s", i,
- name_suffix);
else
snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 552a7dcbf50b..fb0928a4fb97 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -172,29 +172,31 @@ static struct phy_device *xlr_get_phydev(struct xlr_net_priv *priv)
/*
* Ethtool operation
*/
-static int xlr_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+static int xlr_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *ecmd)
{
struct xlr_net_priv *priv = netdev_priv(ndev);
struct phy_device *phydev = xlr_get_phydev(priv);
if (!phydev)
return -ENODEV;
- return phy_ethtool_gset(phydev, ecmd);
+ return phy_ethtool_ksettings_get(phydev, ecmd);
}
-static int xlr_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+static int xlr_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *ecmd)
{
struct xlr_net_priv *priv = netdev_priv(ndev);
struct phy_device *phydev = xlr_get_phydev(priv);
if (!phydev)
return -ENODEV;
- return phy_ethtool_sset(phydev, ecmd);
+ return phy_ethtool_ksettings_set(phydev, ecmd);
}
static const struct ethtool_ops xlr_ethtool_ops = {
- .get_settings = xlr_get_settings,
- .set_settings = xlr_set_settings,
+ .get_link_ksettings = xlr_get_link_ksettings,
+ .set_link_ksettings = xlr_set_link_ksettings,
};
/*
@@ -1005,10 +1007,8 @@ static int xlr_net_probe(struct platform_device *pdev)
*/
adapter = (struct xlr_adapter *)
devm_kzalloc(&pdev->dev, sizeof(*adapter), GFP_KERNEL);
- if (!adapter) {
- err = -ENOMEM;
- return err;
- }
+ if (!adapter)
+ return -ENOMEM;
/*
* XLR and XLS have 1 and 2 NAE controller respectively
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index f1f4788dbd86..36109cec8706 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -308,7 +308,7 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
mod_timer(&pmlmepriv->scan_to_timer,
jiffies + msecs_to_jiffies(SCANNING_TIMEOUT));
- rtw_led_control(padapter, LED_CTL_SITE_SURVEY);
+ LedControl8188eu(padapter, LED_CTL_SITE_SURVEY);
pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
} else {
@@ -335,7 +335,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
- rtw_led_control(padapter, LED_CTL_START_TO_LINK);
+ LedControl8188eu(padapter, LED_CTL_START_TO_LINK);
if (pmlmepriv->assoc_ssid.SsidLength == 0)
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for Any SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
@@ -379,7 +379,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- rtw_led_control(padapter, LED_CTL_START_TO_LINK);
+ LedControl8188eu(padapter, LED_CTL_START_TO_LINK);
if (pmlmepriv->assoc_ssid.SsidLength == 0)
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+Join cmd: Any SSid\n"));
diff --git a/drivers/staging/rtl8188eu/core/rtw_led.c b/drivers/staging/rtl8188eu/core/rtw_led.c
index 14461cf34037..c1478cff5854 100644
--- a/drivers/staging/rtl8188eu/core/rtw_led.c
+++ b/drivers/staging/rtl8188eu/core/rtw_led.c
@@ -30,7 +30,7 @@ void BlinkTimerCallback(unsigned long data)
if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
return;
- schedule_work(&(pLed->BlinkWorkItem));
+ schedule_work(&pLed->BlinkWorkItem);
}
/* */
@@ -60,7 +60,6 @@ void ResetLedStatus(struct LED_871x *pLed)
pLed->bLedNoLinkBlinkInProgress = false;
pLed->bLedLinkBlinkInProgress = false;
- pLed->bLedStartToLinkBlinkInProgress = false;
pLed->bLedScanBlinkInProgress = false;
}
@@ -72,10 +71,10 @@ void InitLed871x(struct adapter *padapter, struct LED_871x *pLed)
ResetLedStatus(pLed);
- setup_timer(&(pLed->BlinkTimer), BlinkTimerCallback,
+ setup_timer(&pLed->BlinkTimer, BlinkTimerCallback,
(unsigned long)pLed);
- INIT_WORK(&(pLed->BlinkWorkItem), BlinkWorkItemCallback);
+ INIT_WORK(&pLed->BlinkWorkItem, BlinkWorkItemCallback);
}
@@ -85,8 +84,8 @@ void InitLed871x(struct adapter *padapter, struct LED_871x *pLed)
/* */
void DeInitLed871x(struct LED_871x *pLed)
{
- cancel_work_sync(&(pLed->BlinkWorkItem));
- del_timer_sync(&(pLed->BlinkTimer));
+ cancel_work_sync(&pLed->BlinkWorkItem);
+ del_timer_sync(&pLed->BlinkTimer);
ResetLedStatus(pLed);
}
@@ -99,7 +98,7 @@ void DeInitLed871x(struct LED_871x *pLed)
static void SwLedBlink1(struct LED_871x *pLed)
{
struct adapter *padapter = pLed->padapter;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
u8 bStopBlinking = false;
/* Change LED according to BlinkingLedState specified. */
@@ -247,9 +246,9 @@ static void SwLedBlink1(struct LED_871x *pLed)
/* ALPHA, added by chiyoko, 20090106 */
static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAction)
{
- struct led_priv *ledpriv = &(padapter->ledpriv);
- struct LED_871x *pLed = &(ledpriv->SwLed0);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct led_priv *ledpriv = &padapter->ledpriv;
+ struct LED_871x *pLed = &ledpriv->SwLed0;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
switch (LedAction) {
case LED_CTL_POWER_ON:
@@ -259,11 +258,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
@@ -282,11 +281,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
pLed->bLedLinkBlinkInProgress = true;
@@ -306,15 +305,15 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
if (IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
pLed->bLedScanBlinkInProgress = true;
@@ -326,7 +325,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->BlinkingLedState = RTW_LED_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
- }
+ }
break;
case LED_CTL_TX:
case LED_CTL_RX:
@@ -334,11 +333,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
pLed->bLedBlinkInProgress = true;
@@ -354,21 +353,21 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
break;
case LED_CTL_START_WPS: /* wait until xinpin finish */
case LED_CTL_START_WPS_BOTTON:
- if (!pLed->bLedWPSBlinkInProgress) {
+ if (!pLed->bLedWPSBlinkInProgress) {
if (pLed->bLedNoLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false;
}
pLed->bLedWPSBlinkInProgress = true;
@@ -379,27 +378,27 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->BlinkingLedState = RTW_LED_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
- }
+ }
break;
case LED_CTL_STOP_WPS:
if (pLed->bLedNoLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false;
}
if (pLed->bLedWPSBlinkInProgress)
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
else
pLed->bLedWPSBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_WPS_STOP;
@@ -415,7 +414,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
break;
case LED_CTL_STOP_WPS_FAIL:
if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false;
}
pLed->bLedNoLinkBlinkInProgress = true;
@@ -431,23 +430,23 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->CurrLedState = RTW_LED_OFF;
pLed->BlinkingLedState = RTW_LED_OFF;
if (pLed->bLedNoLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false;
}
SwLedOff(padapter, pLed);
@@ -475,15 +474,10 @@ void BlinkHandler(struct LED_871x *pLed)
void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction)
{
- struct led_priv *ledpriv = &(padapter->ledpriv);
-
if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped) ||
(!padapter->hw_init_completed))
return;
- if (!ledpriv->bRegUseLed)
- return;
-
if ((padapter->pwrctrlpriv.rf_pwrstate != rf_on &&
padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) &&
(LedAction == LED_CTL_TX || LedAction == LED_CTL_RX ||
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index ee2dcd05010f..032f783b0d83 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -801,7 +801,7 @@ void rtw_indicate_connect(struct adapter *padapter)
if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
set_fwstate(pmlmepriv, _FW_LINKED);
- rtw_led_control(padapter, LED_CTL_LINK);
+ LedControl8188eu(padapter, LED_CTL_LINK);
rtw_os_indicate_connect(padapter);
}
@@ -833,7 +833,7 @@ void rtw_indicate_disconnect(struct adapter *padapter)
rtw_os_indicate_disconnect(padapter);
_clr_fwstate_(pmlmepriv, _FW_LINKED);
- rtw_led_control(padapter, LED_CTL_NO_LINK);
+ LedControl8188eu(padapter, LED_CTL_NO_LINK);
rtw_clear_scan_deny(padapter);
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index fb13df586441..d9c114776cab 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -133,7 +133,9 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
{0x03}, /* 0x41, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G */
};
-static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03}; /* use the combination for max channel numbers */
+static const struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {
+ 0x03
+}; /* use the combination for max channel numbers */
/*
* Search the @param channel_num in given @param channel_set
@@ -667,10 +669,10 @@ static int issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pss
get_rate_set(padapter, bssrate, &bssrate_len);
if (bssrate_len > 8) {
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , 8, bssrate, &(pattrib->pktlen));
- pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_ , (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, bssrate, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
} else {
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , bssrate_len , bssrate, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, bssrate_len, bssrate, &(pattrib->pktlen));
}
/* add wps_ie for wps2.0 */
@@ -999,7 +1001,7 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
}
if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6 , REALTEK_96B_IE, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6, REALTEK_96B_IE, &(pattrib->pktlen));
/* add WPS IE ie for wps 2.0 */
if (pmlmepriv->wps_assoc_resp_ie && pmlmepriv->wps_assoc_resp_ie_len > 0) {
@@ -1120,10 +1122,10 @@ static void issue_assocreq(struct adapter *padapter)
if (bssrate_len > 8) {
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , 8, bssrate, &(pattrib->pktlen));
- pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_ , (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, bssrate, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
} else {
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , bssrate_len , bssrate, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, bssrate_len, bssrate, &(pattrib->pktlen));
}
/* RSN */
@@ -1165,7 +1167,7 @@ static void issue_assocreq(struct adapter *padapter)
memcpy(&pmlmeinfo->HT_caps.mcs, MCS_rate_2R, 16);
break;
}
- pframe = rtw_set_ie(pframe, _HT_CAPABILITY_IE_, ie_len , (u8 *)(&(pmlmeinfo->HT_caps)), &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _HT_CAPABILITY_IE_, ie_len, (u8 *)(&(pmlmeinfo->HT_caps)), &(pattrib->pktlen));
}
}
@@ -1194,7 +1196,7 @@ static void issue_assocreq(struct adapter *padapter)
}
if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6 , REALTEK_96B_IE, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6, REALTEK_96B_IE, &(pattrib->pktlen));
pattrib->last_txcmdsz = pattrib->pktlen;
dump_mgntframe(padapter, pmgntframe);
@@ -2644,7 +2646,7 @@ static unsigned int OnBeacon(struct adapter *padapter,
ret = rtw_check_bcn_info(padapter, pframe, len);
if (!ret) {
DBG_88E_LEVEL(_drv_info_, "ap has changed, disconnect now\n ");
- receive_disconnect(padapter, pmlmeinfo->network.MacAddress , 65535);
+ receive_disconnect(padapter, pmlmeinfo->network.MacAddress, 65535);
return _SUCCESS;
}
/* update WMM, ERP in the beacon */
@@ -2802,7 +2804,7 @@ static unsigned int OnAuth(struct adapter *padapter,
/* checking for challenging txt... */
DBG_88E("checking for challenging txt...\n");
- p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_ , _CHLGETXT_IE_, (int *)&ie_len,
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, (int *)&ie_len,
len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_ - 4);
if ((p == NULL) || (ie_len <= 0)) {
@@ -3046,7 +3048,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
memcpy(supportRate, p+2, ie_len);
supportRateNum = ie_len;
- p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_ , &ie_len,
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_, &ie_len,
pkt_len - WLAN_HDR_A3_LEN - ie_offset);
if (p != NULL) {
if (supportRateNum <= sizeof(supportRate)) {
@@ -3146,7 +3148,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
if (pmlmepriv->wps_beacon_ie) {
u8 selected_registrar = 0;
- rtw_get_wps_attr_content(pmlmepriv->wps_beacon_ie, pmlmepriv->wps_beacon_ie_len, WPS_ATTR_SELECTED_REGISTRAR , &selected_registrar, NULL);
+ rtw_get_wps_attr_content(pmlmepriv->wps_beacon_ie, pmlmepriv->wps_beacon_ie_len, WPS_ATTR_SELECTED_REGISTRAR, &selected_registrar, NULL);
if (!selected_registrar) {
DBG_88E("selected_registrar is false , or AP is not ready to do WPS\n");
@@ -3511,7 +3513,7 @@ static unsigned int OnDeAuth(struct adapter *padapter,
DBG_88E_LEVEL(_drv_always_, "sta recv deauth reason code(%d) sta:%pM\n",
reason, GetAddr3Ptr(pframe));
- receive_disconnect(padapter, GetAddr3Ptr(pframe) , reason);
+ receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
}
pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
return _SUCCESS;
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 0b70fe7d3b72..4032121a06f3 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -56,7 +56,7 @@ static int rtw_hw_suspend(struct adapter *padapter)
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
_clr_fwstate_(pmlmepriv, _FW_LINKED);
- rtw_led_control(padapter, LED_CTL_NO_LINK);
+ LedControl8188eu(padapter, LED_CTL_NO_LINK);
rtw_os_indicate_disconnect(padapter);
@@ -94,7 +94,7 @@ static int rtw_hw_resume(struct adapter *padapter)
pwrpriv->bips_processing = true;
rtw_reset_drv_sw(padapter);
- if (pm_netdev_open(pnetdev, false) != 0) {
+ if (ips_netdrv_open((struct adapter *)rtw_netdev_priv(pnetdev)) != _SUCCESS) {
mutex_unlock(&pwrpriv->mutex_lock);
goto error_exit;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index b87cbbbee054..3e6edb63d36b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -66,16 +66,12 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
precvpriv->adapter = padapter;
- precvpriv->free_recvframe_cnt = NR_RECVFRAME;
-
precvpriv->pallocated_frame_buf = vzalloc(NR_RECVFRAME * sizeof(struct recv_frame) + RXFRAME_ALIGN_SZ);
if (!precvpriv->pallocated_frame_buf)
return _FAIL;
- precvpriv->precv_frame_buf = PTR_ALIGN(precvpriv->pallocated_frame_buf, RXFRAME_ALIGN_SZ);
-
- precvframe = (struct recv_frame *)precvpriv->precv_frame_buf;
+ precvframe = PTR_ALIGN(precvpriv->pallocated_frame_buf, RXFRAME_ALIGN_SZ);
for (i = 0; i < NR_RECVFRAME; i++) {
INIT_LIST_HEAD(&(precvframe->list));
@@ -83,15 +79,12 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
list_add_tail(&(precvframe->list),
&(precvpriv->free_recv_queue.queue));
- rtw_os_recv_resource_alloc(precvframe);
-
+ precvframe->pkt = NULL;
precvframe->len = 0;
precvframe->adapter = padapter;
precvframe++;
}
- precvpriv->rx_pending_cnt = 1;
-
res = rtw_hal_init_recv_priv(padapter);
setup_timer(&precvpriv->signal_stat_timer,
@@ -120,20 +113,11 @@ void _rtw_free_recv_priv(struct recv_priv *precvpriv)
struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
{
struct recv_frame *hdr;
- struct adapter *padapter;
- struct recv_priv *precvpriv;
hdr = list_first_entry_or_null(&pfree_recv_queue->queue,
struct recv_frame, list);
- if (hdr) {
+ if (hdr)
list_del_init(&hdr->list);
- padapter = hdr->adapter;
- if (padapter) {
- precvpriv = &padapter->recvpriv;
- if (pfree_recv_queue == &precvpriv->free_recv_queue)
- precvpriv->free_recvframe_cnt--;
- }
- }
return hdr;
}
@@ -154,13 +138,8 @@ struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
int rtw_free_recvframe(struct recv_frame *precvframe,
struct __queue *pfree_recv_queue)
{
- struct adapter *padapter;
- struct recv_priv *precvpriv;
-
if (!precvframe)
return _FAIL;
- padapter = precvframe->adapter;
- precvpriv = &padapter->recvpriv;
if (precvframe->pkt) {
dev_kfree_skb_any(precvframe->pkt);/* free skb by driver */
precvframe->pkt = NULL;
@@ -174,29 +153,16 @@ int rtw_free_recvframe(struct recv_frame *precvframe,
list_add_tail(&(precvframe->list), get_list_head(pfree_recv_queue));
- if (padapter != NULL) {
- if (pfree_recv_queue == &precvpriv->free_recv_queue)
- precvpriv->free_recvframe_cnt++;
- }
-
- spin_unlock_bh(&pfree_recv_queue->lock);
+ spin_unlock_bh(&pfree_recv_queue->lock);
return _SUCCESS;
}
int _rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue)
{
- struct adapter *padapter = precvframe->adapter;
- struct recv_priv *precvpriv = &padapter->recvpriv;
-
list_del_init(&(precvframe->list));
list_add_tail(&(precvframe->list), get_list_head(queue));
- if (padapter != NULL) {
- if (queue == &precvpriv->free_recv_queue)
- precvpriv->free_recvframe_cnt++;
- }
-
return _SUCCESS;
}
@@ -1294,7 +1260,7 @@ static int validate_recv_frame(struct adapter *adapter,
retval = _FAIL; /* only data frame return _SUCCESS */
break;
case WIFI_DATA_TYPE: /* data */
- rtw_led_control(adapter, LED_CTL_RX);
+ LedControl8188eu(adapter, LED_CTL_RX);
pattrib->qos = (subtype & BIT(7)) ? 1 : 0;
retval = validate_recv_data_frame(adapter, precv_frame);
if (retval == _FAIL) {
@@ -1989,7 +1955,7 @@ static int recv_func_posthandle(struct adapter *padapter,
struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
/* DATA FRAME */
- rtw_led_control(padapter, LED_CTL_RX);
+ LedControl8188eu(padapter, LED_CTL_RX);
prframe = decryptor(padapter, prframe);
if (prframe == NULL) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index a71e25294add..941d1a069d20 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -310,7 +310,6 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
/* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */
for (i = 0; i < 16; i++) {
struct list_head *phead, *plist;
- struct recv_frame *prhdr;
struct recv_frame *prframe;
struct __queue *ppending_recvframe_queue;
struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
@@ -327,8 +326,7 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
plist = phead->next;
while (!list_empty(phead)) {
- prhdr = container_of(plist, struct recv_frame, list);
- prframe = (struct recv_frame *)prhdr;
+ prframe = container_of(plist, struct recv_frame, list);
plist = plist->next;
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 0f8b8e0bffdf..b60b126b860e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -220,7 +220,6 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
struct adapter *padapter = pxmitpriv->adapter;
struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitpriv->pxmit_frame_buf;
struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
- u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
if (pxmitpriv->pxmit_frame_buf == NULL)
@@ -233,7 +232,7 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
}
for (i = 0; i < NR_XMITBUFF; i++) {
- rtw_os_xmit_resource_free(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
+ rtw_os_xmit_resource_free(pxmitbuf);
pxmitbuf++;
}
@@ -243,7 +242,7 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
/* free xmit extension buff */
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
for (i = 0; i < num_xmit_extbuf; i++) {
- rtw_os_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ));
+ rtw_os_xmit_resource_free(pxmitbuf);
pxmitbuf++;
}
@@ -1064,7 +1063,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
frg_inx++;
- if (bmcst || rtw_endofpktfile(&pktfile)) {
+ if (bmcst || pktfile.pkt_len == 0) {
pattrib->nr_frags = frg_inx;
pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len + ((pattrib->nr_frags == 1) ? llc_sz : 0) +
@@ -1677,7 +1676,7 @@ s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
}
pxmitframe->pkt = *ppkt;
- rtw_led_control(padapter, LED_CTL_TX);
+ LedControl8188eu(padapter, LED_CTL_TX);
pxmitframe->attrib.qsel = pxmitframe->attrib.priority;
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index d983a8029f4c..16476e735011 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -991,7 +991,6 @@ void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm)
{
pDM_Odm->RFCalibrateInfo.bTXPowerTracking = true;
pDM_Odm->RFCalibrateInfo.TXPowercount = 0;
- pDM_Odm->RFCalibrateInfo.bTXPowerTrackingInit = false;
if (*(pDM_Odm->mp_mode) != 1)
pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
MSG_88E("pDM_Odm TxPowerTrackControl = %d\n", pDM_Odm->RFCalibrateInfo.TxPowerTrackControl);
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index 5192ef70bcfc..35c91e06cc47 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -40,12 +40,11 @@ static u32 cal_bit_shift(u32 bitmask)
u32 phy_query_bb_reg(struct adapter *adapt, u32 regaddr, u32 bitmask)
{
- u32 return_value = 0, original_value, bit_shift;
+ u32 original_value, bit_shift;
original_value = usb_read32(adapt, regaddr);
bit_shift = cal_bit_shift(bitmask);
- return_value = (original_value & bitmask) >> bit_shift;
- return return_value;
+ return (original_value & bitmask) >> bit_shift;
}
void phy_set_bb_reg(struct adapter *adapt, u32 regaddr, u32 bitmask, u32 data)
@@ -119,12 +118,11 @@ static void rf_serial_write(struct adapter *adapt,
u32 rtw_hal_read_rfreg(struct adapter *adapt, enum rf_radio_path rf_path,
u32 reg_addr, u32 bit_mask)
{
- u32 original_value, readback_value, bit_shift;
+ u32 original_value, bit_shift;
original_value = rf_serial_read(adapt, rf_path, reg_addr);
bit_shift = cal_bit_shift(bit_mask);
- readback_value = (original_value & bit_mask) >> bit_shift;
- return readback_value;
+ return (original_value & bit_mask) >> bit_shift;
}
void phy_set_rf_reg(struct adapter *adapt, enum rf_radio_path rf_path,
@@ -210,13 +208,6 @@ static void phy_set_bw_mode_callback(struct adapter *adapt)
u8 reg_bw_opmode;
u8 reg_prsr_rsc;
- if (hal_data->rf_chip == RF_PSEUDO_11N)
- return;
-
- /* There is no 40MHz mode in RF_8225. */
- if (hal_data->rf_chip == RF_8225)
- return;
-
if (adapt->bDriverStopped)
return;
@@ -265,8 +256,7 @@ static void phy_set_bw_mode_callback(struct adapter *adapt)
}
/* Set RF related register */
- if (hal_data->rf_chip == RF_6052)
- rtl88eu_phy_rf6052_set_bandwidth(adapt, hal_data->CurrentChannelBW);
+ rtl88eu_phy_rf6052_set_bandwidth(adapt, hal_data->CurrentChannelBW);
}
void rtw_hal_set_bwmode(struct adapter *adapt, enum ht_channel_width bandwidth,
@@ -286,7 +276,6 @@ void rtw_hal_set_bwmode(struct adapter *adapt, enum ht_channel_width bandwidth,
static void phy_sw_chnl_callback(struct adapter *adapt, u8 channel)
{
- u8 rf_path;
u32 param1, param2;
struct hal_data_8188e *hal_data = adapt->HalData;
@@ -294,12 +283,10 @@ static void phy_sw_chnl_callback(struct adapter *adapt, u8 channel)
param1 = RF_CHNLBW;
param2 = channel;
- for (rf_path = 0; rf_path < hal_data->NumTotalRFPath; rf_path++) {
- hal_data->RfRegChnlVal[rf_path] = (hal_data->RfRegChnlVal[rf_path] &
- 0xfffffc00) | param2;
- phy_set_rf_reg(adapt, (enum rf_radio_path)rf_path, param1,
- bRFRegOffsetMask, hal_data->RfRegChnlVal[rf_path]);
- }
+ hal_data->RfRegChnlVal[0] = (hal_data->RfRegChnlVal[0] &
+ 0xfffffc00) | param2;
+ phy_set_rf_reg(adapt, 0, param1,
+ bRFRegOffsetMask, hal_data->RfRegChnlVal[0]);
}
void rtw_hal_set_chan(struct adapter *adapt, u8 channel)
@@ -307,9 +294,6 @@ void rtw_hal_set_chan(struct adapter *adapt, u8 channel)
struct hal_data_8188e *hal_data = adapt->HalData;
u8 tmpchannel = hal_data->CurrentChannel;
- if (hal_data->rf_chip == RF_PSEUDO_11N)
- return;
-
if (channel == 0)
channel = 1;
@@ -407,9 +391,8 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
s8 ofdm_index[2], cck_index = 0;
s8 ofdm_index_old[2] = {0, 0}, cck_index_old = 0;
u32 i = 0, j = 0;
- bool is2t = false;
- u8 ofdm_min_index = 6, rf; /* OFDM BB Swing should be less than +3.0dB */
+ u8 ofdm_min_index = 6; /* OFDM BB Swing should be less than +3.0dB */
s8 ofdm_index_mapping[2][index_mapping_NUM_88E] = {
/* 2.4G, decrease power */
{0, 0, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11},
@@ -427,18 +410,12 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
dm_txpwr_track_setpwr(dm_odm);
dm_odm->RFCalibrateInfo.TXPowerTrackingCallbackCnt++;
- dm_odm->RFCalibrateInfo.bTXPowerTrackingInit = true;
dm_odm->RFCalibrateInfo.RegA24 = 0x090e1317;
thermal_val = (u8)rtw_hal_read_rfreg(adapt, RF_PATH_A,
RF_T_METER_88E, 0xfc00);
- if (is2t)
- rf = 2;
- else
- rf = 1;
-
if (thermal_val) {
/* Query OFDM path A default setting */
ele_d = phy_query_bb_reg(adapt, rOFDM0_XATxIQImbalance, bMaskDWord)&bMaskOFDM_D;
@@ -450,17 +427,6 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
}
}
- /* Query OFDM path B default setting */
- if (is2t) {
- ele_d = phy_query_bb_reg(adapt, rOFDM0_XBTxIQImbalance, bMaskDWord)&bMaskOFDM_D;
- for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
- if (ele_d == (OFDMSwingTable[i]&bMaskOFDM_D)) {
- ofdm_index_old[1] = (u8)i;
- break;
- }
- }
- }
-
/* Query CCK default setting From 0xa24 */
temp_cck = dm_odm->RFCalibrateInfo.RegA24;
@@ -479,8 +445,7 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
dm_odm->RFCalibrateInfo.ThermalValue_LCK = thermal_val;
dm_odm->RFCalibrateInfo.ThermalValue_IQK = thermal_val;
- for (i = 0; i < rf; i++)
- dm_odm->RFCalibrateInfo.OFDM_index[i] = ofdm_index_old[i];
+ dm_odm->RFCalibrateInfo.OFDM_index[0] = ofdm_index_old[0];
dm_odm->RFCalibrateInfo.CCK_index = cck_index_old;
}
@@ -539,13 +504,11 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
offset = index_mapping_NUM_88E-1;
/* Updating ofdm_index values with new OFDM / CCK offset */
- for (i = 0; i < rf; i++) {
- ofdm_index[i] = dm_odm->RFCalibrateInfo.OFDM_index[i] + ofdm_index_mapping[j][offset];
- if (ofdm_index[i] > OFDM_TABLE_SIZE_92D-1)
- ofdm_index[i] = OFDM_TABLE_SIZE_92D-1;
- else if (ofdm_index[i] < ofdm_min_index)
- ofdm_index[i] = ofdm_min_index;
- }
+ ofdm_index[0] = dm_odm->RFCalibrateInfo.OFDM_index[0] + ofdm_index_mapping[j][offset];
+ if (ofdm_index[0] > OFDM_TABLE_SIZE_92D-1)
+ ofdm_index[0] = OFDM_TABLE_SIZE_92D-1;
+ else if (ofdm_index[0] < ofdm_min_index)
+ ofdm_index[0] = ofdm_min_index;
cck_index = dm_odm->RFCalibrateInfo.CCK_index + ofdm_index_mapping[j][offset];
if (cck_index > CCK_TABLE_SIZE-1)
diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c
index 2f3edf0f850a..8f8c9de6a9bc 100644
--- a/drivers/staging/rtl8188eu/hal/rf.c
+++ b/drivers/staging/rtl8188eu/hal/rf.c
@@ -61,8 +61,6 @@ void rtl88eu_phy_rf6052_set_cck_txpower(struct adapter *adapt, u8 *powerlevel)
(powerlevel[idx1]<<8) |
(powerlevel[idx1]<<16) |
(powerlevel[idx1]<<24);
- if (tx_agc[idx1] > 0x20 && hal_data->ExternalPA)
- tx_agc[idx1] = 0x20;
}
} else {
if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1) {
@@ -139,17 +137,15 @@ static void getpowerbase88e(struct adapter *adapt, u8 *pwr_level_ofdm,
(powerbase0<<8) | powerbase0;
*(ofdmbase+i) = powerbase0;
}
- for (i = 0; i < adapt->HalData->NumTotalRFPath; i++) {
- /* Check HT20 to HT40 diff */
- if (adapt->HalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
- powerlevel[i] = pwr_level_bw20[i];
- else
- powerlevel[i] = pwr_level_bw40[i];
- powerbase1 = powerlevel[i];
- powerbase1 = (powerbase1<<24) | (powerbase1<<16) |
- (powerbase1<<8) | powerbase1;
- *(mcs_base+i) = powerbase1;
- }
+ /* Check HT20 to HT40 diff */
+ if (adapt->HalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
+ powerlevel[0] = pwr_level_bw20[0];
+ else
+ powerlevel[0] = pwr_level_bw40[0];
+ powerbase1 = powerlevel[0];
+ powerbase1 = (powerbase1<<24) | (powerbase1<<16) |
+ (powerbase1<<8) | powerbase1;
+ *mcs_base = powerbase1;
}
static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel,
u8 index, u32 *powerbase0, u32 *powerbase1,
diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c
index dde64417e66a..9712d7b74345 100644
--- a/drivers/staging/rtl8188eu/hal/rf_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c
@@ -230,79 +230,33 @@ static bool rf6052_conf_para(struct adapter *adapt)
{
struct hal_data_8188e *hal_data = adapt->HalData;
u32 u4val = 0;
- u8 rfpath;
bool rtstatus = true;
struct bb_reg_def *pphyreg;
- for (rfpath = 0; rfpath < hal_data->NumTotalRFPath; rfpath++) {
- pphyreg = &hal_data->PHYRegDef[rfpath];
+ pphyreg = &hal_data->PHYRegDef[RF90_PATH_A];
+ u4val = phy_query_bb_reg(adapt, pphyreg->rfintfs, BRFSI_RFENV);
- switch (rfpath) {
- case RF90_PATH_A:
- case RF90_PATH_C:
- u4val = phy_query_bb_reg(adapt, pphyreg->rfintfs,
- BRFSI_RFENV);
- break;
- case RF90_PATH_B:
- case RF90_PATH_D:
- u4val = phy_query_bb_reg(adapt, pphyreg->rfintfs,
- BRFSI_RFENV << 16);
- break;
- }
+ phy_set_bb_reg(adapt, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
+ udelay(1);
- phy_set_bb_reg(adapt, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
- udelay(1);
+ phy_set_bb_reg(adapt, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
+ udelay(1);
- phy_set_bb_reg(adapt, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
- udelay(1);
+ phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2, B3WIREADDREAALENGTH, 0x0);
+ udelay(1);
- phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2,
- B3WIREADDREAALENGTH, 0x0);
- udelay(1);
+ phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2, B3WIREDATALENGTH, 0x0);
+ udelay(1);
- phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2,
- B3WIREDATALENGTH, 0x0);
- udelay(1);
+ rtstatus = rtl88e_phy_config_rf_with_headerfile(adapt);
- switch (rfpath) {
- case RF90_PATH_A:
- rtstatus = rtl88e_phy_config_rf_with_headerfile(adapt);
- break;
- case RF90_PATH_B:
- rtstatus = rtl88e_phy_config_rf_with_headerfile(adapt);
- break;
- case RF90_PATH_C:
- break;
- case RF90_PATH_D:
- break;
- }
-
- switch (rfpath) {
- case RF90_PATH_A:
- case RF90_PATH_C:
- phy_set_bb_reg(adapt, pphyreg->rfintfs,
- BRFSI_RFENV, u4val);
- break;
- case RF90_PATH_B:
- case RF90_PATH_D:
- phy_set_bb_reg(adapt, pphyreg->rfintfs,
- BRFSI_RFENV << 16, u4val);
- break;
- }
-
- if (!rtstatus)
- return false;
- }
+ phy_set_bb_reg(adapt, pphyreg->rfintfs, BRFSI_RFENV, u4val);
return rtstatus;
}
static bool rtl88e_phy_rf6052_config(struct adapter *adapt)
{
- struct hal_data_8188e *hal_data = adapt->HalData;
-
- hal_data->NumTotalRFPath = 1;
-
return rf6052_conf_para(adapt);
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 385bc2f56f2f..0ce7db723a5d 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -135,7 +135,6 @@ void rtw_hal_read_chip_version(struct adapter *padapter)
dump_chip_info(ChipVersion);
pHalData->VersionID = ChipVersion;
- pHalData->NumTotalRFPath = 1;
}
void rtw_hal_set_odm_var(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
@@ -470,7 +469,7 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto
{
struct hal_data_8188e *pHalData = padapter->HalData;
struct txpowerinfo24g pwrInfo24G;
- u8 rfPath, ch, group;
+ u8 ch, group;
u8 bIn24G, TxCount;
Hal_ReadPowerValueFromPROM_8188E(&pwrInfo24G, PROMContent, AutoLoadFail);
@@ -478,34 +477,32 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto
if (!AutoLoadFail)
pHalData->bTXPowerDataReadFromEEPORM = true;
- for (rfPath = 0; rfPath < pHalData->NumTotalRFPath; rfPath++) {
- for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
- bIn24G = Hal_GetChnlGroup88E(ch, &group);
- if (bIn24G) {
- pHalData->Index24G_CCK_Base[rfPath][ch] = pwrInfo24G.IndexCCK_Base[rfPath][group];
- if (ch == 14)
- pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][4];
- else
- pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][group];
- }
- if (bIn24G) {
- DBG_88E("======= Path %d, Channel %d =======\n", rfPath, ch);
- DBG_88E("Index24G_CCK_Base[%d][%d] = 0x%x\n", rfPath, ch , pHalData->Index24G_CCK_Base[rfPath][ch]);
- DBG_88E("Index24G_BW40_Base[%d][%d] = 0x%x\n", rfPath, ch , pHalData->Index24G_BW40_Base[rfPath][ch]);
- }
+ for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
+ bIn24G = Hal_GetChnlGroup88E(ch, &group);
+ if (bIn24G) {
+ pHalData->Index24G_CCK_Base[0][ch] = pwrInfo24G.IndexCCK_Base[0][group];
+ if (ch == 14)
+ pHalData->Index24G_BW40_Base[0][ch] = pwrInfo24G.IndexBW40_Base[0][4];
+ else
+ pHalData->Index24G_BW40_Base[0][ch] = pwrInfo24G.IndexBW40_Base[0][group];
}
- for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
- pHalData->CCK_24G_Diff[rfPath][TxCount] = pwrInfo24G.CCK_Diff[rfPath][TxCount];
- pHalData->OFDM_24G_Diff[rfPath][TxCount] = pwrInfo24G.OFDM_Diff[rfPath][TxCount];
- pHalData->BW20_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW20_Diff[rfPath][TxCount];
- pHalData->BW40_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW40_Diff[rfPath][TxCount];
- DBG_88E("======= TxCount %d =======\n", TxCount);
- DBG_88E("CCK_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->CCK_24G_Diff[rfPath][TxCount]);
- DBG_88E("OFDM_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->OFDM_24G_Diff[rfPath][TxCount]);
- DBG_88E("BW20_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW20_24G_Diff[rfPath][TxCount]);
- DBG_88E("BW40_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW40_24G_Diff[rfPath][TxCount]);
+ if (bIn24G) {
+ DBG_88E("======= Path %d, Channel %d =======\n", 0, ch);
+ DBG_88E("Index24G_CCK_Base[%d][%d] = 0x%x\n", 0, ch, pHalData->Index24G_CCK_Base[0][ch]);
+ DBG_88E("Index24G_BW40_Base[%d][%d] = 0x%x\n", 0, ch, pHalData->Index24G_BW40_Base[0][ch]);
}
}
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+ pHalData->CCK_24G_Diff[0][TxCount] = pwrInfo24G.CCK_Diff[0][TxCount];
+ pHalData->OFDM_24G_Diff[0][TxCount] = pwrInfo24G.OFDM_Diff[0][TxCount];
+ pHalData->BW20_24G_Diff[0][TxCount] = pwrInfo24G.BW20_Diff[0][TxCount];
+ pHalData->BW40_24G_Diff[0][TxCount] = pwrInfo24G.BW40_Diff[0][TxCount];
+ DBG_88E("======= TxCount %d =======\n", TxCount);
+ DBG_88E("CCK_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->CCK_24G_Diff[0][TxCount]);
+ DBG_88E("OFDM_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->OFDM_24G_Diff[0][TxCount]);
+ DBG_88E("BW20_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->BW20_24G_Diff[0][TxCount]);
+ DBG_88E("BW40_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->BW40_24G_Diff[0][TxCount]);
+ }
/* 2010/10/19 MH Add Regulator recognize for CU. */
if (!AutoLoadFail) {
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
index 780666a755ee..12879afb992e 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
@@ -46,16 +46,12 @@ void SwLedOff(struct adapter *padapter, struct LED_871x *pLed)
LedCfg = usb_read8(padapter, REG_LEDCFG2);/* 0x4E */
- if (padapter->HalData->bLedOpenDrain) {
- /* Open-drain arrangement for controlling the LED) */
- LedCfg &= 0x90; /* Set to software control. */
- usb_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3)));
- LedCfg = usb_read8(padapter, REG_MAC_PINMUX_CFG);
- LedCfg &= 0xFE;
- usb_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg);
- } else {
- usb_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3) | BIT(5) | BIT(6)));
- }
+ /* Open-drain arrangement for controlling the LED) */
+ LedCfg &= 0x90; /* Set to software control. */
+ usb_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3)));
+ LedCfg = usb_read8(padapter, REG_MAC_PINMUX_CFG);
+ LedCfg &= 0xFE;
+ usb_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg);
exit:
pLed->bLedOn = false;
}
@@ -69,10 +65,6 @@ void rtw_hal_sw_led_init(struct adapter *padapter)
{
struct led_priv *pledpriv = &(padapter->ledpriv);
- pledpriv->bRegUseLed = true;
- pledpriv->LedControlHandler = LedControl8188eu;
- padapter->HalData->bLedOpenDrain = true;
-
InitLed871x(padapter, &(pledpriv->SwLed0));
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index d0495a16ff79..0fc093eb7a77 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -37,19 +37,15 @@ int rtw_hal_init_recv_priv(struct adapter *padapter)
/* init recv_buf */
_rtw_init_queue(&precvpriv->free_recv_buf_queue);
- precvpriv->pallocated_recv_buf =
+ precvpriv->precv_buf =
kcalloc(NR_RECVBUFF, sizeof(struct recv_buf), GFP_KERNEL);
- if (!precvpriv->pallocated_recv_buf) {
+ if (!precvpriv->precv_buf) {
res = _FAIL;
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
("alloc recv_buf fail!\n"));
goto exit;
}
-
- precvpriv->precv_buf = precvpriv->pallocated_recv_buf;
-
-
- precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+ precvbuf = precvpriv->precv_buf;
for (i = 0; i < NR_RECVBUFF; i++) {
res = rtw_os_recvbuf_resource_alloc(padapter, precvbuf);
@@ -58,27 +54,18 @@ int rtw_hal_init_recv_priv(struct adapter *padapter)
precvbuf->adapter = padapter;
precvbuf++;
}
- precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
skb_queue_head_init(&precvpriv->rx_skb_queue);
{
int i;
- size_t tmpaddr = 0;
- size_t alignm = 0;
struct sk_buff *pskb = NULL;
skb_queue_head_init(&precvpriv->free_recv_skb_queue);
for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) {
pskb = __netdev_alloc_skb(padapter->pnetdev,
- MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ,
- GFP_KERNEL);
+ MAX_RECVBUF_SZ, GFP_KERNEL);
if (pskb) {
kmemleak_not_leak(pskb);
- pskb->dev = padapter->pnetdev;
- tmpaddr = (size_t)pskb->data;
- alignm = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
- skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignm));
-
skb_queue_tail(&precvpriv->free_recv_skb_queue,
pskb);
}
@@ -95,14 +82,14 @@ void rtw_hal_free_recv_priv(struct adapter *padapter)
struct recv_buf *precvbuf;
struct recv_priv *precvpriv = &padapter->recvpriv;
- precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+ precvbuf = precvpriv->precv_buf;
for (i = 0; i < NR_RECVBUFF; i++) {
usb_free_urb(precvbuf->purb);
precvbuf++;
}
- kfree(precvpriv->pallocated_recv_buf);
+ kfree(precvpriv->precv_buf);
if (skb_queue_len(&precvpriv->rx_skb_queue))
DBG_88E(KERN_WARNING "rx_skb_queue not empty\n");
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 7692ca495ee5..3675edb61942 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -562,9 +562,6 @@ static void InitUsbAggregationSetting(struct adapter *Adapter)
/* Rx aggregation setting */
usb_AggSettingRxUpdate(Adapter);
-
- /* 201/12/10 MH Add for USB agg mode dynamic switch. */
- Adapter->HalData->UsbRxHighSpeedMode = false;
}
static void _InitBeaconParameters(struct adapter *Adapter)
@@ -604,11 +601,6 @@ static void _BBTurnOnBlock(struct adapter *Adapter)
phy_set_bb_reg(Adapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
}
-enum {
- Antenna_Lfet = 1,
- Antenna_Right = 2,
-};
-
static void _InitAntenna_Selection(struct adapter *Adapter)
{
struct hal_data_8188e *haldata = Adapter->HalData;
@@ -994,19 +986,16 @@ u32 rtw_hal_inirp_init(struct adapter *Adapter)
RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
("===> usb_inirp_init\n"));
- precvpriv->ff_hwaddr = RECV_BULK_IN_ADDR;
-
/* issue Rx irp to receive data */
- precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+ precvbuf = precvpriv->precv_buf;
for (i = 0; i < NR_RECVBUFF; i++) {
- if (usb_read_port(Adapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf) == false) {
+ if (usb_read_port(Adapter, RECV_BULK_IN_ADDR, precvbuf) == false) {
RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("usb_rx_init: usb_read_port error\n"));
status = _FAIL;
goto exit;
}
precvbuf++;
- precvpriv->free_recv_buf_queue_cnt--;
}
exit:
@@ -1107,18 +1096,12 @@ static void _ReadPROMContent(
readAdapterInfo_8188EU(Adapter);
}
-static void _ReadRFType(struct adapter *Adapter)
-{
- Adapter->HalData->rf_chip = RF_6052;
-}
-
void rtw_hal_read_chip_info(struct adapter *Adapter)
{
unsigned long start = jiffies;
MSG_88E("====> %s\n", __func__);
- _ReadRFType(Adapter);/* rf_chip -> _InitRFType() */
_ReadPROMContent(Adapter);
MSG_88E("<==== %s in %d ms\n", __func__,
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
index 0976a761b280..550ad62e7064 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
@@ -99,17 +99,6 @@ enum phy_rate_tx_offset_area {
RA_OFFSET_HT_CCK,
};
-/* BB/RF related */
-enum RF_TYPE_8190P {
- RF_TYPE_MIN, /* 0 */
- RF_8225 = 1, /* 1 11b/g RF for verification only */
- RF_8256 = 2, /* 2 11b/g/n */
- RF_8258 = 3, /* 3 11a/b/g/n RF */
- RF_6052 = 4, /* 4 11b/g/n RF */
- /* TODO: We should remove this psudo PHY RF after we get new RF. */
- RF_PSEUDO_11N = 5, /* 5, It is a temporality RF. */
-};
-
struct bb_reg_def {
u32 rfintfs; /* set software control: */
/* 0x870~0x877[8 bytes] */
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index 32326fd1dd24..e86419e525d8 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -156,8 +156,6 @@ struct adapter {
u8 hw_init_completed;
void *cmdThread;
- void (*intf_start)(struct adapter *adapter);
- void (*intf_stop)(struct adapter *adapter);
struct net_device *pnetdev;
struct net_device *pmondev;
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index fa032b0c12ff..e1114a95d442 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -190,6 +190,7 @@ void rtw_hal_set_odm_var(struct adapter *padapter,
u32 rtw_hal_inirp_init(struct adapter *padapter);
void rtw_hal_inirp_deinit(struct adapter *padapter);
+void usb_intf_stop(struct adapter *padapter);
s32 rtw_hal_xmit(struct adapter *padapter, struct xmit_frame *pxmitframe);
s32 rtw_hal_mgnt_xmit(struct adapter *padapter,
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index 805f52e108b2..4fb3bb07ceaa 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -80,11 +80,6 @@
#define DM_DIG_FA_TH2_LPS 30 /* 30 lps */
#define RSSI_OFFSET_DIG 0x05;
-/* ANT Test */
-#define ANTTESTALL 0x00 /* Ant A or B will be Testing */
-#define ANTTESTA 0x01 /* Ant A will be Testing */
-#define ANTTESTB 0x02 /* Ant B will be testing */
-
struct rtw_dig {
u8 Dig_Enable_Flag;
u8 Dig_Ext_Port_Stage;
@@ -590,7 +585,6 @@ struct odm_rf_cal {
s32 RegEBC;
u8 TXPowercount;
- bool bTXPowerTrackingInit;
bool bTXPowerTracking;
u8 TxPowerTrackControl; /* for mp mode, turn off txpwrtracking
* as default */
diff --git a/drivers/staging/rtl8188eu/include/osdep_intf.h b/drivers/staging/rtl8188eu/include/osdep_intf.h
index dbd7dc4f87dd..97d3d8504184 100644
--- a/drivers/staging/rtl8188eu/include/osdep_intf.h
+++ b/drivers/staging/rtl8188eu/include/osdep_intf.h
@@ -35,7 +35,8 @@ int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname);
struct net_device *rtw_init_netdev(struct adapter *padapter);
u16 rtw_recv_select_queue(struct sk_buff *skb);
-int pm_netdev_open(struct net_device *pnetdev, u8 bnormal);
+int netdev_open(struct net_device *pnetdev);
+int ips_netdrv_open(struct adapter *padapter);
void rtw_ips_dev_unload(struct adapter *padapter);
int rtw_ips_pwr_up(struct adapter *padapter);
void rtw_ips_pwr_down(struct adapter *padapter);
diff --git a/drivers/staging/rtl8188eu/include/recv_osdep.h b/drivers/staging/rtl8188eu/include/recv_osdep.h
index 7550d58f6b5b..9b43a1314bd5 100644
--- a/drivers/staging/rtl8188eu/include/recv_osdep.h
+++ b/drivers/staging/rtl8188eu/include/recv_osdep.h
@@ -29,8 +29,6 @@ int rtw_recv_indicatepkt(struct adapter *adapter,
void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup);
-void rtw_os_recv_resource_alloc(struct recv_frame *recvfr);
-
int rtw_os_recvbuf_resource_alloc(struct adapter *adapt, struct recv_buf *buf);
void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 7c81e3f3d12e..9330361da4ad 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -200,10 +200,6 @@ struct hal_data_8188e {
u16 BasicRateSet;
- /* rf_ctrl */
- u8 rf_chip;
- u8 NumTotalRFPath;
-
u8 BoardType;
/* EEPROM setting. */
@@ -265,14 +261,6 @@ struct hal_data_8188e {
u32 CCKTxPowerLevelOriginalOffset;
u8 CrystalCap;
- u32 AntennaTxPath; /* Antenna path Tx */
- u32 AntennaRxPath; /* Antenna path Rx */
- u8 BluetoothCoexist;
- u8 ExternalPA;
-
- u8 bLedOpenDrain; /* Open-drain support for controlling the LED.*/
-
- u8 b1x1RecvCombine; /* for 1T1R receive combining */
u32 AcParam_BE; /* Original parameter for BE, use for EDCA turbo. */
@@ -316,14 +304,6 @@ struct hal_data_8188e {
u8 OutEpQueueSel;
u8 OutEpNumber;
- /* Add for USB aggreation mode dynamic shceme. */
- bool UsbRxHighSpeedMode;
-
- /* 2010/11/22 MH Add for slim combo debug mode selective. */
- /* This is used for fix the drawback of CU TSMC-A/UMC-A cut.
- * HW auto suspend ability. Close BT clock. */
- bool SlimComboDbg;
-
u16 EfuseUsedBytes;
/* Auto FSM to Turn On, include clock, isolation, power control
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
index 80832a5f0732..0d8bf51c72a9 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -51,9 +51,7 @@ enum rx_packet_type {
};
#define INTERRUPT_MSG_FORMAT_LEN 60
-void rtl8188eu_recv_hdl(struct adapter *padapter, struct recv_buf *precvbuf);
void rtl8188eu_recv_tasklet(void *priv);
-void rtl8188e_query_rx_phy_status(struct recv_frame *fr, struct phy_stat *phy);
void rtl8188e_process_phy_info(struct adapter *padapter,
struct recv_frame *prframe);
void update_recvframe_phyinfo_88e(struct recv_frame *fra, struct phy_stat *phy);
diff --git a/drivers/staging/rtl8188eu/include/rtw_led.h b/drivers/staging/rtl8188eu/include/rtw_led.h
index f2054ef70358..607d1ba56a46 100644
--- a/drivers/staging/rtl8188eu/include/rtw_led.h
+++ b/drivers/staging/rtl8188eu/include/rtw_led.h
@@ -70,12 +70,9 @@ struct LED_871x {
struct timer_list BlinkTimer; /* Timer object for led blinking. */
- u8 bSWLedCtrl;
-
/* ALPHA, added by chiyoko, 20090106 */
u8 bLedNoLinkBlinkInProgress;
u8 bLedLinkBlinkInProgress;
- u8 bLedStartToLinkBlinkInProgress;
u8 bLedScanBlinkInProgress;
struct work_struct BlinkWorkItem; /* Workitem used by BlinkTimer to
* manipulate H/W to blink LED. */
@@ -91,18 +88,9 @@ void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction);
struct led_priv {
/* add for led control */
struct LED_871x SwLed0;
- u8 bRegUseLed;
- void (*LedControlHandler)(struct adapter *padapter,
- enum LED_CTL_MODE LedAction);
/* add for led control */
};
-#define rtw_led_control(adapt, action) \
- do { \
- if ((adapt)->ledpriv.LedControlHandler) \
- (adapt)->ledpriv.LedControlHandler((adapt), (action)); \
- } while (0)
-
void BlinkTimerCallback(unsigned long data);
void BlinkWorkItemCallback(struct work_struct *work);
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 9434b869c5e9..18fb7e7b2273 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -504,7 +504,7 @@ void rtw_scan_abort(struct adapter *adapter);
int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
uint in_len);
int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
- uint in_len, uint initial_out_len);
+ uint in_len, uint initial_out_len);
void rtw_init_registrypriv_dev_network(struct adapter *adapter);
void rtw_update_registrypriv_dev_network(struct adapter *adapter);
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
index 49d973881a04..052af7b891da 100644
--- a/drivers/staging/rtl8188eu/include/rtw_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -139,8 +139,6 @@ struct rx_pkt_attrib {
#define SN_EQUAL(a, b) (a == b)
#define REORDER_WAIT_TIME (50) /* (ms) */
-#define RECVBUFF_ALIGN_SZ 8
-
#define RXDESC_SIZE 24
#define RXDESC_OFFSET RXDESC_SIZE
@@ -166,9 +164,7 @@ struct recv_priv {
struct __queue free_recv_queue;
struct __queue recv_pending_queue;
struct __queue uc_swdec_pending_queue;
- u8 *pallocated_frame_buf;
- u8 *precv_frame_buf;
- uint free_recvframe_cnt;
+ void *pallocated_frame_buf;
struct adapter *adapter;
u32 bIsAnyNonBEPkts;
u64 rx_bytes;
@@ -176,17 +172,12 @@ struct recv_priv {
u64 rx_drop;
u64 last_rx_bytes;
- uint ff_hwaddr;
- u8 rx_pending_cnt;
-
struct tasklet_struct irq_prepare_beacon_tasklet;
struct tasklet_struct recv_tasklet;
struct sk_buff_head free_recv_skb_queue;
struct sk_buff_head rx_skb_queue;
- u8 *pallocated_recv_buf;
- u8 *precv_buf; /* 4 alignment */
+ struct recv_buf *precv_buf; /* 4 alignment */
struct __queue free_recv_buf_queue;
- u32 free_recv_buf_queue_cnt;
/* For display the phy informatiom */
u8 is_signal_dbg; /* for debug */
u8 signal_strength_dbg; /* for debug */
diff --git a/drivers/staging/rtl8188eu/include/usb_ops_linux.h b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
index 78d9b6e035bf..fb586365d2e5 100644
--- a/drivers/staging/rtl8188eu/include/usb_ops_linux.h
+++ b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
@@ -53,7 +53,7 @@ u8 usb_read8(struct adapter *adapter, u32 addr);
u16 usb_read16(struct adapter *adapter, u32 addr);
u32 usb_read32(struct adapter *adapter, u32 addr);
-u32 usb_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+u32 usb_read_port(struct adapter *adapter, u32 addr, struct recv_buf *precvbuf);
void usb_read_port_cancel(struct adapter *adapter);
int usb_write8(struct adapter *adapter, u32 addr, u8 val);
diff --git a/drivers/staging/rtl8188eu/include/xmit_osdep.h b/drivers/staging/rtl8188eu/include/xmit_osdep.h
index f96ca6af934d..959ef4b3066c 100644
--- a/drivers/staging/rtl8188eu/include/xmit_osdep.h
+++ b/drivers/staging/rtl8188eu/include/xmit_osdep.h
@@ -41,13 +41,11 @@ void rtw_os_xmit_schedule(struct adapter *padapter);
int rtw_os_xmit_resource_alloc(struct adapter *padapter,
struct xmit_buf *pxmitbuf, u32 alloc_sz);
-void rtw_os_xmit_resource_free(struct adapter *padapter,
- struct xmit_buf *pxmitbuf, u32 free_sz);
+void rtw_os_xmit_resource_free(struct xmit_buf *pxmitbuf);
uint rtw_remainder_len(struct pkt_file *pfile);
void _rtw_open_pktfile(struct sk_buff *pkt, struct pkt_file *pfile);
uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen);
-int rtw_endofpktfile(struct pkt_file *pfile);
void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt);
void rtw_os_xmit_complete(struct adapter *padapter,
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 40691f1ec507..8fc3fadf065f 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -144,7 +144,6 @@ static bool rtw_monitor_enable;
module_param_named(monitor_enable, rtw_monitor_enable, bool, 0444);
MODULE_PARM_DESC(monitor_enable, "Enable monitor inferface (default: false)");
-static int netdev_open(struct net_device *pnetdev);
static int netdev_close(struct net_device *pnetdev);
static void loadparam(struct adapter *padapter, struct net_device *pnetdev)
@@ -596,10 +595,9 @@ static int _netdev_open(struct net_device *pnetdev)
pr_info("can't init mlme_ext_priv\n");
goto netdev_open_error;
}
- if (padapter->intf_start)
- padapter->intf_start(padapter);
+ rtw_hal_inirp_init(padapter);
- rtw_led_control(padapter, LED_CTL_NO_LINK);
+ LedControl8188eu(padapter, LED_CTL_NO_LINK);
padapter->bup = true;
}
@@ -630,7 +628,7 @@ netdev_open_error:
return -1;
}
-static int netdev_open(struct net_device *pnetdev)
+int netdev_open(struct net_device *pnetdev)
{
int ret;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
@@ -642,7 +640,7 @@ static int netdev_open(struct net_device *pnetdev)
return ret;
}
-static int ips_netdrv_open(struct adapter *padapter)
+int ips_netdrv_open(struct adapter *padapter)
{
int status = _SUCCESS;
@@ -658,8 +656,7 @@ static int ips_netdrv_open(struct adapter *padapter)
goto netdev_open_error;
}
- if (padapter->intf_start)
- padapter->intf_start(padapter);
+ rtw_hal_inirp_init(padapter);
rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
mod_timer(&padapter->mlmepriv.dynamic_chk_timer,
@@ -684,7 +681,7 @@ int rtw_ips_pwr_up(struct adapter *padapter)
result = ips_netdrv_open(padapter);
- rtw_led_control(padapter, LED_CTL_NO_LINK);
+ LedControl8188eu(padapter, LED_CTL_NO_LINK);
DBG_88E("<=== rtw_ips_pwr_up.............. in %dms\n",
jiffies_to_msecs(jiffies - start_time));
@@ -699,7 +696,7 @@ void rtw_ips_pwr_down(struct adapter *padapter)
padapter->net_closed = true;
- rtw_led_control(padapter, LED_CTL_POWER_OFF);
+ LedControl8188eu(padapter, LED_CTL_POWER_OFF);
rtw_ips_dev_unload(padapter);
DBG_88E("<=== rtw_ips_pwr_down..................... in %dms\n",
@@ -712,25 +709,13 @@ void rtw_ips_dev_unload(struct adapter *padapter)
rtw_hal_set_hwreg(padapter, HW_VAR_FIFO_CLEARN_UP, NULL);
- if (padapter->intf_stop)
- padapter->intf_stop(padapter);
+ usb_intf_stop(padapter);
/* s5. */
if (!padapter->bSurpriseRemoved)
rtw_hal_deinit(padapter);
}
-int pm_netdev_open(struct net_device *pnetdev, u8 bnormal)
-{
- int status;
-
- if (bnormal)
- status = netdev_open(pnetdev);
- else
- status = (_SUCCESS == ips_netdrv_open((struct adapter *)rtw_netdev_priv(pnetdev))) ? (0) : (-1);
- return status;
-}
-
static int netdev_close(struct net_device *pnetdev)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
@@ -763,7 +748,7 @@ static int netdev_close(struct net_device *pnetdev)
/* s2-4. */
rtw_free_network_queue(padapter, true);
/* Close LED */
- rtw_led_control(padapter, LED_CTL_POWER_OFF);
+ LedControl8188eu(padapter, LED_CTL_POWER_OFF);
}
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - drv_close\n"));
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index 7cd2655f27fe..6ff836f481da 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -12,8 +12,6 @@
* more details.
*
******************************************************************************/
-
-
#define _OSDEP_SERVICE_C_
#include <osdep_service.h>
@@ -24,9 +22,10 @@
#include <rtw_ioctl_set.h>
/*
-* Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE
-* @return: one of RTW_STATUS_CODE
-*/
+ * Translate the OS dependent @param error_code to OS independent
+ * RTW_STATUS_CODE
+ * @return: one of RTW_STATUS_CODE
+ */
inline int RTW_STATUS_CODE(int error_code)
{
if (error_code >= 0)
@@ -43,22 +42,20 @@ void *rtw_malloc2d(int h, int w, int size)
{
int j;
- void **a = kzalloc(h*sizeof(void *) + h*w*size, GFP_KERNEL);
- if (!a) {
- pr_info("%s: alloc memory fail!\n", __func__);
- return NULL;
- }
+ void **a = kzalloc(h * sizeof(void *) + h * w * size, GFP_KERNEL);
+ if (!a)
+ goto out;
for (j = 0; j < h; j++)
- a[j] = ((char *)(a+h)) + j*w*size;
-
+ a[j] = ((char *)(a + h)) + j * w * size;
+out:
return a;
}
-void _rtw_init_queue(struct __queue *pqueue)
+void _rtw_init_queue(struct __queue *pqueue)
{
- INIT_LIST_HEAD(&(pqueue->queue));
- spin_lock_init(&(pqueue->lock));
+ INIT_LIST_HEAD(&pqueue->queue);
+ spin_lock_init(&pqueue->lock);
}
struct net_device *rtw_alloc_etherdev_with_old_priv(void *old_priv)
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index 103cdb4ed073..b85824ec5354 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -21,12 +21,6 @@
#include <osdep_intf.h>
#include <usb_ops_linux.h>
-/* alloc os related resource in struct recv_frame */
-void rtw_os_recv_resource_alloc(struct recv_frame *precvframe)
-{
- precvframe->pkt = NULL;
-}
-
/* alloc os related resource in struct recv_buf */
int rtw_os_recvbuf_resource_alloc(struct adapter *padapter,
struct recv_buf *precvbuf)
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 68e1e6bbe87f..c6316ffa64d3 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -141,16 +141,7 @@ static void usb_dvobj_deinit(struct usb_interface *usb_intf)
}
-static void usb_intf_start(struct adapter *padapter)
-{
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+usb_intf_start\n"));
-
- rtw_hal_inirp_init(padapter);
-
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-usb_intf_start\n"));
-}
-
-static void usb_intf_stop(struct adapter *padapter)
+void usb_intf_stop(struct adapter *padapter)
{
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+usb_intf_stop\n"));
@@ -183,8 +174,7 @@ static void rtw_dev_unload(struct adapter *padapter)
if (padapter->xmitpriv.ack_tx)
rtw_ack_tx_done(&padapter->xmitpriv, RTW_SCTX_DONE_DRV_STOP);
/* s3. */
- if (padapter->intf_stop)
- padapter->intf_stop(padapter);
+ usb_intf_stop(padapter);
/* s4. */
if (!padapter->pwrctrlpriv.bInternalAutoSuspend)
rtw_stop_drv_threads(padapter);
@@ -294,7 +284,7 @@ static int rtw_resume_process(struct adapter *padapter)
pwrpriv->bkeepfwalive = false;
pr_debug("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
- if (pm_netdev_open(pnetdev, true) != 0) {
+ if (netdev_open(pnetdev) != 0) {
mutex_unlock(&pwrpriv->mutex_lock);
goto exit;
}
@@ -366,9 +356,6 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
if (!padapter->HalData)
DBG_88E("cant not alloc memory for HAL DATA\n");
- padapter->intf_start = &usb_intf_start;
- padapter->intf_stop = &usb_intf_stop;
-
/* step read_chip_version */
rtw_hal_read_chip_version(padapter);
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index d0d591501b73..e2dbe1b4afd3 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -167,27 +167,26 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
}
if (pattrib->pkt_rpt_type == NORMAL_RX) { /* Normal rx packet */
if (pattrib->physt)
- update_recvframe_phyinfo_88e(precvframe, (struct phy_stat *)pphy_status);
+ update_recvframe_phyinfo_88e(precvframe, pphy_status);
if (rtw_recv_entry(precvframe) != _SUCCESS) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
("recvbuf2recvframe: rtw_recv_entry(precvframe) != _SUCCESS\n"));
}
- } else {
- /* enqueue recvframe to txrtp queue */
- if (pattrib->pkt_rpt_type == TX_REPORT1) {
- /* CCX-TXRPT ack for xmit mgmt frames. */
- handle_txrpt_ccx_88e(adapt, precvframe->rx_data);
- } else if (pattrib->pkt_rpt_type == TX_REPORT2) {
- ODM_RA_TxRPT2Handle_8188E(
- &haldata->odmpriv,
- precvframe->rx_data,
- pattrib->pkt_len,
- pattrib->MacIDValidEntry[0],
- pattrib->MacIDValidEntry[1]
- );
- } else if (pattrib->pkt_rpt_type == HIS_REPORT) {
- interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->rx_data);
- }
+ } else if (pattrib->pkt_rpt_type == TX_REPORT1) {
+ /* CCX-TXRPT ack for xmit mgmt frames. */
+ handle_txrpt_ccx_88e(adapt, precvframe->rx_data);
+ rtw_free_recvframe(precvframe, pfree_recv_queue);
+ } else if (pattrib->pkt_rpt_type == TX_REPORT2) {
+ ODM_RA_TxRPT2Handle_8188E(
+ &haldata->odmpriv,
+ precvframe->rx_data,
+ pattrib->pkt_len,
+ pattrib->MacIDValidEntry[0],
+ pattrib->MacIDValidEntry[1]
+ );
+ rtw_free_recvframe(precvframe, pfree_recv_queue);
+ } else if (pattrib->pkt_rpt_type == HIS_REPORT) {
+ interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->rx_data);
rtw_free_recvframe(precvframe, pfree_recv_queue);
}
pkt_cnt--;
@@ -253,7 +252,7 @@ static int usbctrl_vendorreq(struct adapter *adapt, u8 request, u16 value, u16 i
/* Acquire IO memory for vendorreq */
pIo_buf = kmalloc(MAX_USB_IO_CTL_SIZE, GFP_ATOMIC);
- if (pIo_buf == NULL) {
+ if (!pIo_buf) {
DBG_88E("[%s] pIo_buf == NULL\n", __func__);
status = -ENOMEM;
goto release_mutex;
@@ -384,8 +383,6 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete!!!\n"));
- precvpriv->rx_pending_cnt--;
-
if (adapt->bSurpriseRemoved || adapt->bDriverStopped || adapt->bReadPortCancel) {
RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
("usb_read_port_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n",
@@ -403,7 +400,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
("usb_read_port_complete: (purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)\n"));
precvbuf->reuse = true;
- usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+ usb_read_port(adapt, RECV_BULK_IN_ADDR, precvbuf);
DBG_88E("%s()-%d: RX Warning!\n", __func__, __LINE__);
} else {
skb_put(precvbuf->pskb, purb->actual_length);
@@ -414,7 +411,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
precvbuf->pskb = NULL;
precvbuf->reuse = false;
- usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+ usb_read_port(adapt, RECV_BULK_IN_ADDR, precvbuf);
}
} else {
RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete : purb->status(%d) != 0\n", purb->status));
@@ -437,7 +434,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
case -EOVERFLOW:
adapt->HalData->srestpriv.Wifi_Error_Status = USB_READ_PORT_FAIL;
precvbuf->reuse = true;
- usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+ usb_read_port(adapt, RECV_BULK_IN_ADDR, precvbuf);
break;
case -EINPROGRESS:
DBG_88E("ERROR: URB IS IN PROGRESS!\n");
@@ -448,17 +445,14 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
}
}
-u32 usb_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *rmem)
+u32 usb_read_port(struct adapter *adapter, u32 addr, struct recv_buf *precvbuf)
{
struct urb *purb = NULL;
- struct recv_buf *precvbuf = (struct recv_buf *)rmem;
struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter);
struct recv_priv *precvpriv = &adapter->recvpriv;
struct usb_device *pusbd = pdvobj->pusbdev;
int err;
unsigned int pipe;
- size_t tmpaddr = 0;
- size_t alignment = 0;
u32 ret = _SUCCESS;
@@ -483,22 +477,16 @@ u32 usb_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *rmem)
/* re-assign for linux based on skb */
if ((!precvbuf->reuse) || (precvbuf->pskb == NULL)) {
- precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
+ precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ);
if (precvbuf->pskb == NULL) {
RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("init_recvbuf(): alloc_skb fail!\n"));
DBG_88E("#### usb_read_port() alloc_skb fail!#####\n");
return _FAIL;
}
-
- tmpaddr = (size_t)precvbuf->pskb->data;
- alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
- skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
} else { /* reuse skb */
precvbuf->reuse = false;
}
- precvpriv->rx_pending_cnt++;
-
purb = precvbuf->purb;
/* translate DMA FIFO addr to pipehandle */
@@ -528,7 +516,7 @@ void rtw_hal_inirp_deinit(struct adapter *padapter)
int i;
struct recv_buf *precvbuf;
- precvbuf = (struct recv_buf *)padapter->recvpriv.precv_buf;
+ precvbuf = padapter->recvpriv.precv_buf;
DBG_88E("%s\n", __func__);
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index 4b1b04e00715..e097c619ed1b 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -59,11 +59,6 @@ uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen)
return len;
}
-int rtw_endofpktfile(struct pkt_file *pfile)
-{
- return pfile->pkt_len == 0;
-}
-
int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz)
{
int i;
@@ -85,8 +80,7 @@ int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitb
return _SUCCESS;
}
-void rtw_os_xmit_resource_free(struct adapter *padapter,
- struct xmit_buf *pxmitbuf, u32 free_sz)
+void rtw_os_xmit_resource_free(struct xmit_buf *pxmitbuf)
{
int i;
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index 25725b158eca..017fe04ebe2d 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ ******************************************************************************/
#include "dot11d.h"
struct channel_list {
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index f9003a28cae2..757ffd4f2f89 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
@@ -49,7 +49,7 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
else
skb = dev_alloc_skb(frag_length + 4);
- if (skb == NULL) {
+ if (!skb) {
rt_status = false;
goto Failed;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
index 9aaa85526eb8..bbe399010be1 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
@@ -202,7 +202,5 @@ bool rtl92e_init_fw(struct net_device *dev)
download_firmware_fail:
netdev_err(dev, "%s: Failed to initialize firmware.\n", __func__);
- rt_status = false;
- return rt_status;
-
+ return false;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 5f53fbd565ef..8a9172aa8178 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -367,7 +367,7 @@ static void _rtl92e_update_cap(struct net_device *dev, u16 cap)
}
}
-static struct rtllib_qos_parameters def_qos_parameters = {
+static const struct rtllib_qos_parameters def_qos_parameters = {
{cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3)},
{cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7)},
{2, 2, 2, 2},
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index c7fd1b1653d6..20260af49ee7 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ ******************************************************************************/
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include <linux/etherdevice.h>
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index dd9c0c868361..cded0f43cd33 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ ******************************************************************************/
#include "rtllib.h"
#include "rtl819x_HT.h"
u8 MCS_FILTER_ALL[16] = {
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index a966a8e490ab..48bbd9e8a52f 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ ******************************************************************************/
#include "rtllib.h"
#include <linux/etherdevice.h>
#include "rtl819x_TS.h"
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index c743182b933e..e5ba7d1a809f 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -130,7 +130,7 @@ rtllib_frag_cache_get(struct rtllib_device *ieee,
ETH_ALEN /* WDS */ +
/* QOS Control */
(RTLLIB_QOS_HAS_SEQ(fc) ? 2 : 0));
- if (skb == NULL)
+ if (!skb)
return NULL;
entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]];
@@ -986,7 +986,7 @@ static void rtllib_rx_extract_addr(struct rtllib_device *ieee,
ether_addr_copy(src, hdr->addr4);
ether_addr_copy(bssid, ieee->current_network.bssid);
break;
- case 0:
+ default:
ether_addr_copy(dst, hdr->addr1);
ether_addr_copy(src, hdr->addr2);
ether_addr_copy(bssid, hdr->addr3);
@@ -1201,6 +1201,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
if (crypt && !(fc & RTLLIB_FCTL_WEP) &&
rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
struct eapol *eap = (struct eapol *)(skb->data + 24);
+
netdev_dbg(ieee->dev, "RX: IEEE 802.1X EAPOL frame: %s\n",
eap_get_type(eap->type));
}
@@ -1430,7 +1431,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
/* skb: hdr + (possible reassembled) full plaintext payload */
payload = skb->data + hdrlen;
rxb = kmalloc(sizeof(struct rtllib_rxb), GFP_ATOMIC);
- if (rxb == NULL)
+ if (!rxb)
goto rx_dropped;
/* to parse amsdu packets */
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index da74dc49b95e..1430ba27b049 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -1524,6 +1524,7 @@ static void rtllib_associate_complete_wq(void *data)
struct rtllib_device,
associate_complete_wq);
struct rt_pwr_save_ctrl *pPSC = &(ieee->PowerSaveControl);
+
netdev_info(ieee->dev, "Associated successfully\n");
if (!ieee->is_silent_reset) {
netdev_info(ieee->dev, "normal associate\n");
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 6fa96d57d316..e68850897adf 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -553,7 +553,7 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */
break;
- case 0:
+ default:
memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
break;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 89cbc077a48d..82f654305414 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -129,7 +129,7 @@ ieee80211_frag_cache_get(struct ieee80211_device *ieee,
8 /* WEP */ +
ETH_ALEN /* WDS */ +
(IEEE80211_QOS_HAS_SEQ(fc)?2:0) /* QOS Control */);
- if (skb == NULL)
+ if (!skb)
return NULL;
entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]];
@@ -1079,7 +1079,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
memcpy(src, hdr->addr4, ETH_ALEN);
memcpy(bssid, ieee->current_network.bssid, ETH_ALEN);
break;
- case 0:
+ default:
memcpy(dst, hdr->addr1, ETH_ALEN);
memcpy(src, hdr->addr2, ETH_ALEN);
memcpy(bssid, hdr->addr3, ETH_ALEN);
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index c9ea50daffff..b8a170978434 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -63,15 +63,6 @@ static inline u32 end_of_queue_search(struct list_head *head,
return (head == plist);
}
-static inline void sleep_schedulable(int ms)
-{
- u32 delta;
-
- delta = msecs_to_jiffies(ms);/*(ms)*/
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(delta);
-}
-
static inline void flush_signals_thread(void)
{
if (signal_pending(current))
diff --git a/drivers/staging/rtl8712/rtl8712_hal.h b/drivers/staging/rtl8712/rtl8712_hal.h
index 57d5d2db3c77..84456bb560ef 100644
--- a/drivers/staging/rtl8712/rtl8712_hal.h
+++ b/drivers/staging/rtl8712/rtl8712_hal.h
@@ -68,14 +68,14 @@ struct fw_priv { /*8-bytes alignment required*/
unsigned char signature_0; /*0x12: CE product, 0x92: IT product*/
unsigned char signature_1; /*0x87: CE product, 0x81: IT product*/
unsigned char hci_sel; /*0x81: PCI-AP, 01:PCIe, 02: 92S-U, 0x82: USB-AP,
- * 0x12: 72S-U, 03:SDIO
- */
+ * 0x12: 72S-U, 03:SDIO
+ */
unsigned char chip_version; /*the same value as register value*/
unsigned char customer_ID_0; /*customer ID low byte*/
unsigned char customer_ID_1; /*customer ID high byte*/
unsigned char rf_config; /*0x11: 1T1R, 0x12: 1T2R, 0x92: 1T2R turbo,
- * 0x22: 2T2R
- */
+ * 0x22: 2T2R
+ */
unsigned char usb_ep_num; /* 4: 4EP, 6: 6EP, 11: 11EP*/
/*--- long word 1 ----*/
unsigned char regulatory_class_0; /*regulatory class bit map 0*/
@@ -99,8 +99,8 @@ struct fw_priv { /*8-bytes alignment required*/
unsigned char qos_en; /*1: QoS enable*/
unsigned char bw_40MHz_en; /*1: 40MHz BW enable*/
unsigned char AMSDU2AMPDU_en; /*1: 4181 convert AMSDU to AMPDU,
- * 0: disable
- */
+ * 0: disable
+ */
unsigned char AMPDU_en; /*1: 11n AMPDU enable*/
unsigned char rate_control_offload; /*1: FW offloads,0: driver handles*/
unsigned char aggregation_offload; /*1: FW offloads,0: driver handles*/
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index a8e237e480c9..317aeeed38e8 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -355,7 +355,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
}
pLed->bLedScanBlinkInProgress = false;
} else {
- if (pLed->bLedOn)
+ if (pLed->bLedOn)
pLed->BlinkingLedState = LED_STATE_OFF;
else
pLed->BlinkingLedState = LED_STATE_ON;
@@ -390,7 +390,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
pLed->BlinkTimes = 0;
pLed->bLedBlinkInProgress = false;
} else {
- if (pLed->bLedOn)
+ if (pLed->bLedOn)
pLed->BlinkingLedState = LED_STATE_OFF;
else
pLed->BlinkingLedState = LED_STATE_ON;
@@ -460,7 +460,7 @@ static void SwLedBlink2(struct LED_871x *pLed)
}
pLed->bLedScanBlinkInProgress = false;
} else {
- if (pLed->bLedOn)
+ if (pLed->bLedOn)
pLed->BlinkingLedState = LED_STATE_OFF;
else
pLed->BlinkingLedState = LED_STATE_ON;
@@ -667,7 +667,7 @@ static void SwLedBlink4(struct LED_871x *pLed)
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
pLed->bLedBlinkInProgress = false;
} else {
- if (pLed->bLedOn)
+ if (pLed->bLedOn)
pLed->BlinkingLedState = LED_STATE_OFF;
else
pLed->BlinkingLedState = LED_STATE_ON;
@@ -764,7 +764,7 @@ static void SwLedBlink5(struct LED_871x *pLed)
msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
pLed->bLedBlinkInProgress = false;
} else {
- if (pLed->bLedOn)
+ if (pLed->bLedOn)
pLed->BlinkingLedState = LED_STATE_OFF;
else
pLed->BlinkingLedState = LED_STATE_ON;
@@ -946,7 +946,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
if (psitesurveyctrl->traffic_busy &&
check_fwstate(pmlmepriv, _FW_LINKED))
; /* dummy branch */
- else if (!pLed->bLedScanBlinkInProgress) {
+ else if (!pLed->bLedScanBlinkInProgress) {
if (IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress) {
@@ -970,7 +970,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
- }
+ }
break;
case LED_CTL_TX:
case LED_CTL_RX:
@@ -1000,7 +1000,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
case LED_CTL_START_WPS: /*wait until xinpin finish */
case LED_CTL_START_WPS_BOTTON:
- if (!pLed->bLedWPSBlinkInProgress) {
+ if (!pLed->bLedWPSBlinkInProgress) {
if (pLed->bLedNoLinkBlinkInProgress) {
del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
@@ -1113,9 +1113,9 @@ static void SwLedControlMode2(struct _adapter *padapter,
switch (LedAction) {
case LED_CTL_SITE_SURVEY:
- if (pmlmepriv->sitesurveyctrl.traffic_busy)
+ if (pmlmepriv->sitesurveyctrl.traffic_busy)
; /* dummy branch */
- else if (!pLed->bLedScanBlinkInProgress) {
+ else if (!pLed->bLedScanBlinkInProgress) {
if (IS_LED_WPS_BLINKING(pLed))
return;
@@ -1132,7 +1132,7 @@ static void SwLedControlMode2(struct _adapter *padapter,
pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
- }
+ }
break;
case LED_CTL_TX:
@@ -1186,7 +1186,7 @@ static void SwLedControlMode2(struct _adapter *padapter,
pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer,
jiffies + msecs_to_jiffies(0));
- }
+ }
break;
case LED_CTL_STOP_WPS:
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index b7ee5e63af33..04638f1e4e88 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -72,8 +72,11 @@ static sint _init_cmd_priv(struct cmd_priv *pcmdpriv)
((addr_t)(pcmdpriv->cmd_allocated_buf) &
(CMDBUFF_ALIGN_SZ - 1));
pcmdpriv->rsp_allocated_buf = kmalloc(MAX_RSPSZ + 4, GFP_ATOMIC);
- if (!pcmdpriv->rsp_allocated_buf)
+ if (!pcmdpriv->rsp_allocated_buf) {
+ kfree(pcmdpriv->cmd_allocated_buf);
+ pcmdpriv->cmd_allocated_buf = NULL;
return _FAIL;
+ }
pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 -
((addr_t)(pcmdpriv->rsp_allocated_buf) & 3);
pcmdpriv->cmd_issued_cnt = 0;
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 475e7904fe45..590acb5aea3d 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -588,9 +588,9 @@ static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie,
netdev_info(padapter->pnetdev, "r8712u: SET WPS_IE, wps_phase==true\n");
cnt += buf[cnt + 1] + 2;
break;
- } else {
- cnt += buf[cnt + 1] + 2;
}
+
+ cnt += buf[cnt + 1] + 2;
}
}
}
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index 0aaf2aab6dd0..01a150446f5a 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -139,9 +139,10 @@ u8 r8712_set_802_11_bssid(struct _adapter *padapter, u8 *bssid)
if (!memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid,
ETH_ALEN)) {
if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE))
- goto _Abort_Set_BSSID; /* driver is in
- * WIFI_ADHOC_MASTER_STATE
- */
+ /* driver is in
+ * WIFI_ADHOC_MASTER_STATE
+ */
+ goto _Abort_Set_BSSID;
} else {
r8712_disassoc_cmd(padapter);
if (check_fwstate(pmlmepriv, _FW_LINKED))
@@ -203,9 +204,10 @@ void r8712_set_802_11_ssid(struct _adapter *padapter,
WIFI_ADHOC_STATE);
}
} else {
- goto _Abort_Set_SSID; /* driver is in
- * WIFI_ADHOC_MASTER_STATE
- */
+ /* driver is in
+ * WIFI_ADHOC_MASTER_STATE
+ */
+ goto _Abort_Set_SSID;
}
}
} else {
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index c1feef3da26c..35cbdc71cad4 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -137,11 +137,10 @@ static void free_network_nolock(struct mlme_priv *pmlmepriv,
}
-/*
- return the wlan_network with the matching addr
- Shall be called under atomic context...
- to avoid possible racing condition...
-*/
+/* return the wlan_network with the matching addr
+ * Shall be called under atomic context...
+ * to avoid possible racing condition...
+ */
static struct wlan_network *_r8712_find_network(struct __queue *scanned_queue,
u8 *addr)
{
@@ -239,11 +238,10 @@ void r8712_free_network_queue(struct _adapter *dev)
}
/*
- return the wlan_network with the matching addr
-
- Shall be called under atomic context...
- to avoid possible racing condition...
-*/
+ * return the wlan_network with the matching addr
+ * Shall be called under atomic context...
+ * to avoid possible racing condition...
+ */
static struct wlan_network *r8712_find_network(struct __queue *scanned_queue,
u8 *addr)
{
@@ -369,9 +367,7 @@ static void update_current_network(struct _adapter *adapter,
}
}
-/*
-Caller must hold pmlmepriv->lock first.
-*/
+/* Caller must hold pmlmepriv->lock first */
static void update_scanned_network(struct _adapter *adapter,
struct wlan_bssid_ex *target)
{
@@ -651,8 +647,8 @@ void r8712_free_assoc_resources(struct _adapter *adapter)
}
/*
-*r8712_indicate_connect: the caller has to lock pmlmepriv->lock
-*/
+ * r8712_indicate_connect: the caller has to lock pmlmepriv->lock
+ */
void r8712_indicate_connect(struct _adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -668,8 +664,8 @@ void r8712_indicate_connect(struct _adapter *padapter)
/*
-*r8712_ind_disconnect: the caller has to lock pmlmepriv->lock
-*/
+ * r8712_ind_disconnect: the caller has to lock pmlmepriv->lock
+ */
void r8712_ind_disconnect(struct _adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1347,8 +1343,8 @@ static int SecIsInPMKIDList(struct _adapter *Adapter, u8 *bssid)
(!memcmp(psecuritypriv->PMKIDList[i].Bssid,
bssid, ETH_ALEN)))
break;
- else
- i++;
+ i++;
+
} while (i < NUM_PMKID_CACHE);
if (i == NUM_PMKID_CACHE) {
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.h b/drivers/staging/rtl8712/rtl871x_mlme.h
index ddaaab058b2f..53a23234c598 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.h
+++ b/drivers/staging/rtl8712/rtl871x_mlme.h
@@ -162,24 +162,6 @@ static inline void clr_fwstate(struct mlme_priv *pmlmepriv, sint state)
spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
}
-static inline void up_scanned_network(struct mlme_priv *pmlmepriv)
-{
- unsigned long irqL;
-
- spin_lock_irqsave(&pmlmepriv->lock, irqL);
- pmlmepriv->num_of_scanned++;
- spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
-}
-
-static inline void down_scanned_network(struct mlme_priv *pmlmepriv)
-{
- unsigned long irqL;
-
- spin_lock_irqsave(&pmlmepriv->lock, irqL);
- pmlmepriv->num_of_scanned--;
- spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
-}
-
static inline void set_scanned_network_val(struct mlme_priv *pmlmepriv,
sint val)
{
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index d464c136dd98..e42fc1404c35 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -190,19 +190,15 @@ void r8712_init_pwrctrl_priv(struct _adapter *padapter)
}
/*
-Caller: r8712_cmd_thread
-
-Check if the fw_pwrstate is okay for issuing cmd.
-If not (cpwm should be is less than P2 state), then the sub-routine
-will raise the cpwm to be greater than or equal to P2.
-
-Calling Context: Passive
-
-Return Value:
-
-_SUCCESS: r8712_cmd_thread can issue cmds to firmware afterwards.
-_FAIL: r8712_cmd_thread can not do anything.
-*/
+ * Caller: r8712_cmd_thread
+ * Check if the fw_pwrstate is okay for issuing cmd.
+ * If not (cpwm should be is less than P2 state), then the sub-routine
+ * will raise the cpwm to be greater than or equal to P2.
+ * Calling Context: Passive
+ * Return Value:
+ * _SUCCESS: r8712_cmd_thread can issue cmds to firmware afterwards.
+ * _FAIL: r8712_cmd_thread can not do anything.
+ */
sint r8712_register_cmd_alive(struct _adapter *padapter)
{
uint res = _SUCCESS;
@@ -219,13 +215,11 @@ sint r8712_register_cmd_alive(struct _adapter *padapter)
}
/*
-Caller: ISR
-
-If ISR's txdone,
-No more pkts for TX,
-Then driver shall call this fun. to power down firmware again.
-*/
-
+ * Caller: ISR
+ * If ISR's txdone,
+ * No more pkts for TX,
+ * Then driver shall call this fun. to power down firmware again.
+ */
void r8712_unregister_cmd_alive(struct _adapter *padapter)
{
struct pwrctrl_priv *pwrctrl = &padapter->pwrctrlpriv;
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index cbd2e51ba42b..35c721a50598 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -125,13 +125,10 @@ union recv_frame *r8712_alloc_recvframe(struct __queue *pfree_recv_queue)
}
/*
-caller : defrag; recvframe_chk_defrag in recv_thread (passive)
-pframequeue: defrag_queue : will be accessed in recv_thread (passive)
-
-using spin_lock to protect
-
-*/
-
+ * caller : defrag; recvframe_chk_defrag in recv_thread (passive)
+ * pframequeue: defrag_queue : will be accessed in recv_thread (passive)
+ * using spin_lock to protect
+ */
void r8712_free_recvframe_queue(struct __queue *pframequeue,
struct __queue *pfree_recv_queue)
{
@@ -405,7 +402,7 @@ static sint ap2sta_data_frame(struct _adapter *adapter,
}
/* filter packets that SA is myself or multicast or broadcast */
- if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN))
+ if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN))
return _FAIL;
/* da should be for me */
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index 09242425dad4..a7f04a4b089d 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -159,8 +159,8 @@ static u32 getcrc32(u8 *buf, u32 len)
}
/*
- Need to consider the fragment situation
-*/
+ * Need to consider the fragment situation
+ */
void r8712_wep_encrypt(struct _adapter *padapter, u8 *pxmitframe)
{ /* exclude ICV */
unsigned char crc[4];
@@ -467,22 +467,22 @@ static const unsigned short Sbox1[2][256] = {/* Sbox for hash (can be in ROM) */
};
/*
-**********************************************************************
-* Routine: Phase 1 -- generate P1K, given TA, TK, IV32
-*
-* Inputs:
-* tk[] = temporal key [128 bits]
-* ta[] = transmitter's MAC address [ 48 bits]
-* iv32 = upper 32 bits of IV [ 32 bits]
-* Output:
-* p1k[] = Phase 1 key [ 80 bits]
-*
-* Note:
-* This function only needs to be called every 2**16 packets,
-* although in theory it could be called every packet.
-*
-**********************************************************************
-*/
+ **********************************************************************
+ * Routine: Phase 1 -- generate P1K, given TA, TK, IV32
+ *
+ * Inputs:
+ * tk[] = temporal key [128 bits]
+ * ta[] = transmitter's MAC address [ 48 bits]
+ * iv32 = upper 32 bits of IV [ 32 bits]
+ * Output:
+ * p1k[] = Phase 1 key [ 80 bits]
+ *
+ * Note:
+ * This function only needs to be called every 2**16 packets,
+ * although in theory it could be called every packet.
+ *
+ **********************************************************************
+ */
static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
{
sint i;
@@ -506,28 +506,28 @@ static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
}
/*
-**********************************************************************
-* Routine: Phase 2 -- generate RC4KEY, given TK, P1K, IV16
-*
-* Inputs:
-* tk[] = Temporal key [128 bits]
-* p1k[] = Phase 1 output key [ 80 bits]
-* iv16 = low 16 bits of IV counter [ 16 bits]
-* Output:
-* rc4key[] = the key used to encrypt the packet [128 bits]
-*
-* Note:
-* The value {TA,IV32,IV16} for Phase1/Phase2 must be unique
-* across all packets using the same key TK value. Then, for a
-* given value of TK[], this TKIP48 construction guarantees that
-* the final RC4KEY value is unique across all packets.
-*
-* Suggested implementation optimization: if PPK[] is "overlaid"
-* appropriately on RC4KEY[], there is no need for the final
-* for loop below that copies the PPK[] result into RC4KEY[].
-*
-**********************************************************************
-*/
+ **********************************************************************
+ * Routine: Phase 2 -- generate RC4KEY, given TK, P1K, IV16
+ *
+ * Inputs:
+ * tk[] = Temporal key [128 bits]
+ * p1k[] = Phase 1 output key [ 80 bits]
+ * iv16 = low 16 bits of IV counter [ 16 bits]
+ * Output:
+ * rc4key[] = the key used to encrypt the packet [128 bits]
+ *
+ * Note:
+ * The value {TA,IV32,IV16} for Phase1/Phase2 must be unique
+ * across all packets using the same key TK value. Then, for a
+ * given value of TK[], this TKIP48 construction guarantees that
+ * the final RC4KEY value is unique across all packets.
+ *
+ * Suggested implementation optimization: if PPK[] is "overlaid"
+ * appropriately on RC4KEY[], there is no need for the final
+ * for loop below that copies the PPK[] result into RC4KEY[].
+ *
+ **********************************************************************
+ */
static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
{
sint i;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index be38364c8a7c..4ab82ba9bb3f 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -71,8 +71,8 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv));
spin_lock_init(&pxmitpriv->lock);
/*
- Please insert all the queue initialization using _init_queue below
- */
+ *Please insert all the queue initialization using _init_queue below
+ */
pxmitpriv->adapter = padapter;
_init_queue(&pxmitpriv->be_pending);
_init_queue(&pxmitpriv->bk_pending);
@@ -83,10 +83,10 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
_init_queue(&pxmitpriv->apsd_queue);
_init_queue(&pxmitpriv->free_xmit_queue);
/*
- Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
- and initialize free_xmit_frame below.
- Please also apply free_txobj to link_up all the xmit_frames...
- */
+ * Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
+ * and initialize free_xmit_frame below.
+ * Please also apply free_txobj to link_up all the xmit_frames...
+ */
pxmitpriv->pallocated_frame_buf = kmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4,
GFP_ATOMIC);
if (!pxmitpriv->pallocated_frame_buf) {
@@ -109,8 +109,8 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
}
pxmitpriv->free_xmitframe_cnt = NR_XMITFRAME;
/*
- init xmit hw_txqueue
- */
+ * init xmit hw_txqueue
+ */
_r8712_init_hw_txqueue(&pxmitpriv->be_txqueue, BE_QUEUE_INX);
_r8712_init_hw_txqueue(&pxmitpriv->bk_txqueue, BK_QUEUE_INX);
_r8712_init_hw_txqueue(&pxmitpriv->vi_txqueue, VI_QUEUE_INX);
@@ -128,8 +128,11 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
_init_queue(&pxmitpriv->pending_xmitbuf_queue);
pxmitpriv->pallocated_xmitbuf = kmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4,
GFP_ATOMIC);
- if (!pxmitpriv->pallocated_xmitbuf)
+ if (!pxmitpriv->pallocated_xmitbuf) {
+ kfree(pxmitpriv->pallocated_frame_buf);
+ pxmitpriv->pallocated_frame_buf = NULL;
return _FAIL;
+ }
pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 -
((addr_t)(pxmitpriv->pallocated_xmitbuf) & 3);
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
@@ -777,24 +780,23 @@ int r8712_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
}
/*
-Calling context:
-1. OS_TXENTRY
-2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
-
-If we turn on USE_RXTHREAD, then, no need for critical section.
-Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
-
-Must be very very cautious...
-
-*/
-
+ * Calling context:
+ * 1. OS_TXENTRY
+ * 2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
+ *
+ * If we turn on USE_RXTHREAD, then, no need for critical section.
+ * Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
+ *
+ * Must be very very cautious...
+ *
+ */
struct xmit_frame *r8712_alloc_xmitframe(struct xmit_priv *pxmitpriv)
{
/*
- Please remember to use all the osdep_service api,
- and lock/unlock or _enter/_exit critical to protect
- pfree_xmit_queue
- */
+ * Please remember to use all the osdep_service api,
+ * and lock/unlock or _enter/_exit critical to protect
+ * pfree_xmit_queue
+ */
unsigned long irqL;
struct xmit_frame *pxframe;
struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
index d899d0c6d3a6..40927277f498 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.h
+++ b/drivers/staging/rtl8712/rtl871x_xmit.h
@@ -261,12 +261,6 @@ struct xmit_priv {
uint free_xmitbuf_cnt;
};
-static inline struct __queue *get_free_xmit_queue(
- struct xmit_priv *pxmitpriv)
-{
- return &(pxmitpriv->free_xmit_queue);
-}
-
int r8712_free_xmitbuf(struct xmit_priv *pxmitpriv,
struct xmit_buf *pxmitbuf);
struct xmit_buf *r8712_alloc_xmitbuf(struct xmit_priv *pxmitpriv);
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index f27df0b4cb44..28d56c5d1449 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -432,31 +432,36 @@ static int ms_pull_ctl_disable(struct rtsx_chip *chip)
if (CHECK_PID(chip, 0x5208)) {
retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF,
- MS_D1_PD | MS_D2_PD | MS_CLK_PD | MS_D6_PD);
+ MS_D1_PD | MS_D2_PD | MS_CLK_PD |
+ MS_D6_PD);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF,
- MS_D3_PD | MS_D0_PD | MS_BS_PD | XD_D4_PD);
+ MS_D3_PD | MS_D0_PD | MS_BS_PD |
+ XD_D4_PD);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF,
- MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ MS_D7_PD | XD_CE_PD | XD_CLE_PD |
+ XD_CD_PU);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF,
- XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
+ XD_RDY_PD | SD_D3_PD | SD_D2_PD |
+ XD_ALE_PD);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF,
- MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ MS_INS_PU | SD_WP_PD | SD_CD_PU |
+ SD_CMD_PD);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -507,17 +512,17 @@ static int ms_pull_ctl_enable(struct rtsx_chip *chip)
if (CHECK_PID(chip, 0x5208)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
- MS_D1_PD | MS_D2_PD | MS_CLK_NP | MS_D6_PD);
+ MS_D1_PD | MS_D2_PD | MS_CLK_NP | MS_D6_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
- MS_D3_PD | MS_D0_PD | MS_BS_NP | XD_D4_PD);
+ MS_D3_PD | MS_D0_PD | MS_BS_NP | XD_D4_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
- MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
- XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
+ XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
- MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
- MS_D5_PD | MS_D4_PD);
+ MS_D5_PD | MS_D4_PD);
} else if (CHECK_PID(chip, 0x5288)) {
if (CHECK_BARO_PKG(chip, QFN)) {
rtsx_add_cmd(chip, WRITE_REG_CMD,
@@ -616,14 +621,20 @@ static int ms_prepare_reset(struct rtsx_chip *chip)
if (chip->asic_code) {
retval = rtsx_write_register(chip, MS_CFG, 0xFF,
- SAMPLE_TIME_RISING | PUSH_TIME_DEFAULT | NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1);
+ SAMPLE_TIME_RISING |
+ PUSH_TIME_DEFAULT |
+ NO_EXTEND_TOGGLE |
+ MS_BUS_WIDTH_1);
if (retval) {
rtsx_trace(chip);
return retval;
}
} else {
retval = rtsx_write_register(chip, MS_CFG, 0xFF,
- SAMPLE_TIME_FALLING | PUSH_TIME_DEFAULT | NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1);
+ SAMPLE_TIME_FALLING |
+ PUSH_TIME_DEFAULT |
+ NO_EXTEND_TOGGLE |
+ MS_BUS_WIDTH_1);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -665,7 +676,7 @@ static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus)
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, READ_REG,
- 6, NO_WAIT_INT);
+ 6, NO_WAIT_INT);
if (retval == STATUS_SUCCESS)
break;
}
@@ -765,7 +776,7 @@ static int ms_confirm_cpu_startup(struct rtsx_chip *chip)
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_read_bytes(chip, GET_INT, 1,
- NO_WAIT_INT, &val, 1);
+ NO_WAIT_INT, &val, 1);
if (retval == STATUS_SUCCESS)
break;
}
@@ -794,9 +805,9 @@ static int ms_confirm_cpu_startup(struct rtsx_chip *chip)
}
if (val & INT_REG_ERR) {
- if (val & INT_REG_CMDNK)
+ if (val & INT_REG_CMDNK) {
chip->card_wp |= (MS_CARD);
- else {
+ } else {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -861,7 +872,7 @@ static int ms_switch_8bit_bus(struct rtsx_chip *chip)
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT,
- 1, NO_WAIT_INT);
+ 1, NO_WAIT_INT);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1061,8 +1072,8 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
return STATUS_FAIL;
}
retval = ms_transfer_data(chip, MS_TM_AUTO_READ,
- PRO_READ_LONG_DATA, 0x40, WAIT_INT,
- 0, 0, buf, 64 * 512);
+ PRO_READ_LONG_DATA, 0x40, WAIT_INT,
+ 0, 0, buf, 64 * 512);
if (retval == STATUS_SUCCESS)
break;
@@ -1087,7 +1098,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
break;
retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ,
- PRO_READ_LONG_DATA, 0, WAIT_INT);
+ PRO_READ_LONG_DATA, 0, WAIT_INT);
if (retval != STATUS_SUCCESS) {
kfree(buf);
rtsx_trace(chip);
@@ -1121,7 +1132,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
#ifdef SUPPORT_MSXC
if ((buf[cur_addr_off + 8] == 0x10) ||
- (buf[cur_addr_off + 8] == 0x13)) {
+ (buf[cur_addr_off + 8] == 0x13)) {
#else
if (buf[cur_addr_off + 8] == 0x10) {
#endif
@@ -1264,7 +1275,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
if (device_type != 0x00) {
if ((device_type == 0x01) || (device_type == 0x02) ||
- (device_type == 0x03)) {
+ (device_type == 0x03)) {
chip->card_wp |= MS_CARD;
} else {
rtsx_trace(chip);
@@ -1298,7 +1309,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
#ifdef SUPPORT_MAGIC_GATE
static int mg_set_tpc_para_sub(struct rtsx_chip *chip,
- int type, u8 mg_entry_num);
+ int type, u8 mg_entry_num);
#endif
static int reset_ms_pro(struct rtsx_chip *chip)
@@ -1317,7 +1328,7 @@ static int reset_ms_pro(struct rtsx_chip *chip)
#endif
#ifdef XC_POWERCLASS
-Retry:
+retry:
#endif
retval = ms_pro_reset_flow(chip, 1);
if (retval != STATUS_SUCCESS) {
@@ -1365,10 +1376,10 @@ Retry:
change_power_class = power_class_mode;
if (change_power_class) {
retval = msxc_change_power(chip,
- change_power_class);
+ change_power_class);
if (retval != STATUS_SUCCESS) {
change_power_class--;
- goto Retry;
+ goto retry;
}
}
}
@@ -1418,14 +1429,14 @@ static int ms_read_status_reg(struct rtsx_chip *chip)
}
static int ms_read_extra_data(struct rtsx_chip *chip,
- u16 block_addr, u8 page_num, u8 *buf, int buf_len)
+ u16 block_addr, u8 page_num, u8 *buf, int buf_len)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 val, data[10];
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ SystemParm, 6);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1488,7 +1499,8 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
}
retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
- MS_EXTRA_SIZE, SystemParm, 6);
+ MS_EXTRA_SIZE, SystemParm,
+ 6);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1497,7 +1509,7 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
}
retval = ms_read_bytes(chip, READ_REG, MS_EXTRA_SIZE, NO_WAIT_INT,
- data, MS_EXTRA_SIZE);
+ data, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1512,8 +1524,8 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
return STATUS_SUCCESS;
}
-static int ms_write_extra_data(struct rtsx_chip *chip,
- u16 block_addr, u8 page_num, u8 *buf, int buf_len)
+static int ms_write_extra_data(struct rtsx_chip *chip, u16 block_addr,
+ u8 page_num, u8 *buf, int buf_len)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
@@ -1525,7 +1537,7 @@ static int ms_write_extra_data(struct rtsx_chip *chip,
}
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6 + MS_EXTRA_SIZE);
+ SystemParm, 6 + MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1588,7 +1600,7 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
u8 val, data[6];
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ SystemParm, 6);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1651,7 +1663,7 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
}
retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ, READ_PAGE_DATA,
- 0, NO_WAIT_INT);
+ 0, NO_WAIT_INT);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1678,7 +1690,7 @@ static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk)
}
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 7);
+ SystemParm, 7);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1742,7 +1754,7 @@ static int ms_erase_block(struct rtsx_chip *chip, u16 phy_blk)
u8 val, data[6];
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ SystemParm, 6);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1844,7 +1856,7 @@ static int ms_init_page(struct rtsx_chip *chip, u16 phy_blk, u16 log_blk,
}
retval = ms_write_extra_data(chip, phy_blk, i,
- extra, MS_EXTRA_SIZE);
+ extra, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1855,7 +1867,7 @@ static int ms_init_page(struct rtsx_chip *chip, u16 phy_blk, u16 log_blk,
}
static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
- u16 log_blk, u8 start_page, u8 end_page)
+ u16 log_blk, u8 start_page, u8 end_page)
{
struct ms_info *ms_card = &chip->ms_card;
bool uncorrect_flag = false;
@@ -1915,7 +1927,7 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
ms_read_extra_data(chip, old_blk, i, extra, MS_EXTRA_SIZE);
retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
- MS_EXTRA_SIZE, SystemParm, 6);
+ MS_EXTRA_SIZE, SystemParm, 6);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1971,9 +1983,9 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
}
retval = ms_transfer_tpc(chip,
- MS_TM_NORMAL_READ,
- READ_PAGE_DATA,
- 0, NO_WAIT_INT);
+ MS_TM_NORMAL_READ,
+ READ_PAGE_DATA,
+ 0, NO_WAIT_INT);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1981,20 +1993,24 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
if (uncorrect_flag) {
ms_set_page_status(log_blk, setPS_NG,
- extra, MS_EXTRA_SIZE);
+ extra,
+ MS_EXTRA_SIZE);
if (i == 0)
extra[0] &= 0xEF;
ms_write_extra_data(chip, old_blk, i,
- extra, MS_EXTRA_SIZE);
+ extra,
+ MS_EXTRA_SIZE);
dev_dbg(rtsx_dev(chip), "page %d : extra[0] = 0x%x\n",
i, extra[0]);
MS_SET_BAD_BLOCK_FLG(ms_card);
ms_set_page_status(log_blk, setPS_Error,
- extra, MS_EXTRA_SIZE);
+ extra,
+ MS_EXTRA_SIZE);
ms_write_extra_data(chip, new_blk, i,
- extra, MS_EXTRA_SIZE);
+ extra,
+ MS_EXTRA_SIZE);
continue;
}
@@ -2021,8 +2037,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
}
}
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
- MS_EXTRA_SIZE, SystemParm, (6 + MS_EXTRA_SIZE));
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, (6 + MS_EXTRA_SIZE));
ms_set_err_code(chip, MS_NO_ERROR);
@@ -2085,7 +2101,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
if (i == 0) {
retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
- MS_EXTRA_SIZE, SystemParm, 7);
+ MS_EXTRA_SIZE, SystemParm,
+ 7);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -2121,7 +2138,7 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_read_bytes(chip, GET_INT, 1,
- NO_WAIT_INT, &val, 1);
+ NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -2361,7 +2378,7 @@ RE_SEARCH:
}
retval = ms_transfer_tpc(chip, MS_TM_WRITE_BYTES, WRITE_REG, 1,
- NO_WAIT_INT);
+ NO_WAIT_INT);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -2369,7 +2386,9 @@ RE_SEARCH:
retval = rtsx_write_register(chip, MS_CFG,
0x58 | MS_NO_CHECK_INT,
- MS_BUS_WIDTH_4 | PUSH_TIME_ODD | MS_NO_CHECK_INT);
+ MS_BUS_WIDTH_4 |
+ PUSH_TIME_ODD |
+ MS_NO_CHECK_INT);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -2474,7 +2493,7 @@ static u16 ms_get_l2p_tbl(struct rtsx_chip *chip, int seg_no, u16 log_off)
}
static void ms_set_l2p_tbl(struct rtsx_chip *chip,
- int seg_no, u16 log_off, u16 phy_blk)
+ int seg_no, u16 log_off, u16 phy_blk)
{
struct ms_info *ms_card = &chip->ms_card;
struct zone_entry *segment;
@@ -2530,7 +2549,7 @@ static const unsigned short ms_start_idx[] = {0, 494, 990, 1486, 1982, 2478,
7934};
static int ms_arbitrate_l2p(struct rtsx_chip *chip, u16 phy_blk,
- u16 log_off, u8 us1, u8 us2)
+ u16 log_off, u8 us1, u8 us2)
{
struct ms_info *ms_card = &chip->ms_card;
struct zone_entry *segment;
@@ -2627,7 +2646,8 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
disable_cnt = segment->disable_count;
- segment->get_index = segment->set_index = 0;
+ segment->get_index = 0;
+ segment->set_index = 0;
segment->unused_blk_cnt = 0;
for (phy_blk = start; phy_blk < end; phy_blk++) {
@@ -2646,7 +2666,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
}
retval = ms_read_extra_data(chip, phy_blk, 0,
- extra, MS_EXTRA_SIZE);
+ extra, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS) {
dev_dbg(rtsx_dev(chip), "read extra data fail\n");
ms_set_bad_block(chip, phy_blk);
@@ -2685,7 +2705,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
}
if ((log_blk < ms_start_idx[seg_no]) ||
- (log_blk >= ms_start_idx[seg_no + 1])) {
+ (log_blk >= ms_start_idx[seg_no + 1])) {
if (!(chip->card_wp & MS_CARD)) {
retval = ms_erase_block(chip, phy_blk);
if (retval != STATUS_SUCCESS)
@@ -2705,7 +2725,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
us1 = extra[0] & 0x10;
tmp_blk = segment->l2p_table[idx];
retval = ms_read_extra_data(chip, tmp_blk, 0,
- extra, MS_EXTRA_SIZE);
+ extra, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS)
continue;
us2 = extra[0] & 0x10;
@@ -2774,7 +2794,8 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
phy_blk = ms_get_unused_block(chip, 0);
retval = ms_copy_page(chip, tmp_blk, phy_blk,
- log_blk, 0, ms_card->page_off + 1);
+ log_blk, 0,
+ ms_card->page_off + 1);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -2861,7 +2882,7 @@ int reset_ms_card(struct rtsx_chip *chip)
}
static int mspro_set_rw_cmd(struct rtsx_chip *chip,
- u32 start_sec, u16 sec_cnt, u8 cmd)
+ u32 start_sec, u16 sec_cnt, u8 cmd)
{
int retval, i;
u8 data[8];
@@ -2932,8 +2953,8 @@ static inline int ms_auto_tune_clock(struct rtsx_chip *chip)
}
static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
- struct rtsx_chip *chip, u32 start_sector,
- u16 sector_cnt)
+ struct rtsx_chip *chip, u32 start_sector,
+ u16 sector_cnt)
{
struct ms_info *ms_card = &chip->ms_card;
bool mode_2k = false;
@@ -2992,12 +3013,13 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
}
if (ms_card->seq_mode) {
- if ((ms_card->pre_dir != srb->sc_data_direction)
- || ((ms_card->pre_sec_addr + ms_card->pre_sec_cnt) != start_sector)
- || (mode_2k && (ms_card->seq_mode & MODE_512_SEQ))
- || (!mode_2k && (ms_card->seq_mode & MODE_2K_SEQ))
- || !(val & MS_INT_BREQ)
- || ((ms_card->total_sec_cnt + sector_cnt) > 0xFE00)) {
+ if ((ms_card->pre_dir != srb->sc_data_direction) ||
+ ((ms_card->pre_sec_addr + ms_card->pre_sec_cnt) !=
+ start_sector) ||
+ (mode_2k && (ms_card->seq_mode & MODE_512_SEQ)) ||
+ (!mode_2k && (ms_card->seq_mode & MODE_2K_SEQ)) ||
+ !(val & MS_INT_BREQ) ||
+ ((ms_card->total_sec_cnt + sector_cnt) > 0xFE00)) {
ms_card->seq_mode = 0;
ms_card->total_sec_cnt = 0;
if (val & MS_INT_BREQ) {
@@ -3007,7 +3029,8 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
return STATUS_FAIL;
}
- rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
+ rtsx_write_register(chip, RBCTL, RB_FLUSH,
+ RB_FLUSH);
}
}
}
@@ -3038,8 +3061,8 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
}
retval = ms_transfer_data(chip, trans_mode, rw_tpc, sector_cnt,
- WAIT_INT, mode_2k, scsi_sg_count(srb),
- scsi_sglist(srb), scsi_bufflen(srb));
+ WAIT_INT, mode_2k, scsi_sg_count(srb),
+ scsi_sglist(srb), scsi_bufflen(srb));
if (retval != STATUS_SUCCESS) {
ms_card->seq_mode = 0;
rtsx_read_register(chip, MS_TRANS_CFG, &val);
@@ -3076,7 +3099,7 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
}
static int mspro_read_format_progress(struct rtsx_chip *chip,
- const int short_data_len)
+ const int short_data_len)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
@@ -3102,7 +3125,8 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
}
if (!(tmp & MS_INT_BREQ)) {
- if ((tmp & (MS_INT_CED | MS_INT_BREQ | MS_INT_CMDNK | MS_INT_ERR)) == MS_INT_CED) {
+ if ((tmp & (MS_INT_CED | MS_INT_BREQ | MS_INT_CMDNK |
+ MS_INT_ERR)) == MS_INT_CED) {
ms_card->format_status = FORMAT_SUCCESS;
return STATUS_SUCCESS;
}
@@ -3117,7 +3141,7 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
cnt = (u8)short_data_len;
retval = rtsx_write_register(chip, MS_CFG, MS_NO_CHECK_INT,
- MS_NO_CHECK_INT);
+ MS_NO_CHECK_INT);
if (retval != STATUS_SUCCESS) {
ms_card->format_status = FORMAT_FAIL;
rtsx_trace(chip);
@@ -3125,7 +3149,7 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
}
retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, cnt, WAIT_INT,
- data, 8);
+ data, 8);
if (retval != STATUS_SUCCESS) {
ms_card->format_status = FORMAT_FAIL;
rtsx_trace(chip);
@@ -3204,7 +3228,7 @@ void mspro_polling_format_status(struct rtsx_chip *chip)
int i;
if (ms_card->pro_under_formatting &&
- (rtsx_get_stat(chip) != RTSX_STAT_SS)) {
+ (rtsx_get_stat(chip) != RTSX_STAT_SS)) {
rtsx_set_stat(chip, RTSX_STAT_RUN);
for (i = 0; i < 65535; i++) {
@@ -3216,7 +3240,7 @@ void mspro_polling_format_status(struct rtsx_chip *chip)
}
int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- int short_data_len, bool quick_format)
+ int short_data_len, bool quick_format)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
@@ -3305,9 +3329,9 @@ int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
}
static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
- u16 log_blk, u8 start_page, u8 end_page,
- u8 *buf, unsigned int *index,
- unsigned int *offset)
+ u16 log_blk, u8 start_page, u8 end_page,
+ u8 *buf, unsigned int *index,
+ unsigned int *offset)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
@@ -3315,7 +3339,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
u8 *ptr;
retval = ms_read_extra_data(chip, phy_blk, start_page,
- extra, MS_EXTRA_SIZE);
+ extra, MS_EXTRA_SIZE);
if (retval == STATUS_SUCCESS) {
if ((extra[1] & 0x30) != 0x30) {
ms_set_err_code(chip, MS_FLASH_READ_ERROR);
@@ -3325,7 +3349,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
}
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ SystemParm, 6);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3389,11 +3413,17 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
if (retval != STATUS_SUCCESS) {
if (!(chip->card_wp & MS_CARD)) {
reset_ms(chip);
- ms_set_page_status(log_blk, setPS_NG, extra, MS_EXTRA_SIZE);
- ms_write_extra_data(chip, phy_blk,
- page_addr, extra, MS_EXTRA_SIZE);
+ ms_set_page_status
+ (log_blk, setPS_NG,
+ extra,
+ MS_EXTRA_SIZE);
+ ms_write_extra_data
+ (chip, phy_blk,
+ page_addr, extra,
+ MS_EXTRA_SIZE);
}
- ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ ms_set_err_code(chip,
+ MS_FLASH_READ_ERROR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3420,7 +3450,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
}
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT,
- &val, 1);
+ &val, 1);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3441,23 +3471,24 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, READ_PAGE_DATA);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG,
- 0xFF, trans_cfg);
+ 0xFF, trans_cfg);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, RING_BUFFER);
+ 0x01, RING_BUFFER);
trans_dma_enable(DMA_FROM_DEVICE, chip, 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
- MS_TRANSFER_START | MS_TM_NORMAL_READ);
+ MS_TRANSFER_START | MS_TM_NORMAL_READ);
rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
- MS_TRANSFER_END, MS_TRANSFER_END);
+ MS_TRANSFER_END, MS_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
- retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr,
- 512, scsi_sg_count(chip->srb),
- index, offset, DMA_FROM_DEVICE,
- chip->ms_timeout);
+ retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr, 512,
+ scsi_sg_count(chip->srb),
+ index, offset,
+ DMA_FROM_DEVICE,
+ chip->ms_timeout);
if (retval < 0) {
if (retval == -ETIMEDOUT) {
ms_set_err_code(chip, MS_TO_ERROR);
@@ -3489,7 +3520,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
}
static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
- u16 new_blk, u16 log_blk, u8 start_page,
+ u16 new_blk, u16 log_blk, u8 start_page,
u8 end_page, u8 *buf, unsigned int *index,
unsigned int *offset)
{
@@ -3500,7 +3531,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
if (!start_page) {
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 7);
+ SystemParm, 7);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3534,7 +3565,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT, 1,
- NO_WAIT_INT);
+ NO_WAIT_INT);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3542,7 +3573,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
}
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, (6 + MS_EXTRA_SIZE));
+ SystemParm, (6 + MS_EXTRA_SIZE));
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3630,25 +3661,26 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC,
- 0xFF, WRITE_PAGE_DATA);
+ 0xFF, WRITE_PAGE_DATA);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG,
- 0xFF, WAIT_INT);
+ 0xFF, WAIT_INT);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, RING_BUFFER);
+ 0x01, RING_BUFFER);
trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
- MS_TRANSFER_START | MS_TM_NORMAL_WRITE);
+ MS_TRANSFER_START | MS_TM_NORMAL_WRITE);
rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
- MS_TRANSFER_END, MS_TRANSFER_END);
+ MS_TRANSFER_END, MS_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
- retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr,
- 512, scsi_sg_count(chip->srb),
- index, offset, DMA_TO_DEVICE,
- chip->ms_timeout);
+ retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr, 512,
+ scsi_sg_count(chip->srb),
+ index, offset,
+ DMA_TO_DEVICE,
+ chip->ms_timeout);
if (retval < 0) {
ms_set_err_code(chip, MS_TO_ERROR);
rtsx_clear_ms_error(chip);
@@ -3677,7 +3709,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
if (page_addr == (end_page - 1)) {
if (!(val & INT_REG_CED)) {
retval = ms_send_cmd(chip, BLOCK_END,
- WAIT_INT);
+ WAIT_INT);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3685,7 +3717,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
}
retval = ms_read_bytes(chip, GET_INT, 1,
- NO_WAIT_INT, &val, 1);
+ NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3693,7 +3725,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
}
if ((page_addr == (end_page - 1)) ||
- (page_addr == ms_card->page_off)) {
+ (page_addr == ms_card->page_off)) {
if (!(val & INT_REG_CED)) {
ms_set_err_code(chip,
MS_FLASH_WRITE_ERROR);
@@ -3711,13 +3743,13 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
}
static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
- u16 log_blk, u8 page_off)
+ u16 log_blk, u8 page_off)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, seg_no;
retval = ms_copy_page(chip, old_blk, new_blk, log_blk,
- page_off, ms_card->page_off + 1);
+ page_off, ms_card->page_off + 1);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3740,13 +3772,13 @@ static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
}
static int ms_prepare_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
- u16 log_blk, u8 start_page)
+ u16 log_blk, u8 start_page)
{
int retval;
if (start_page) {
retval = ms_copy_page(chip, old_blk, new_blk, log_blk,
- 0, start_page);
+ 0, start_page);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3772,7 +3804,7 @@ int ms_delay_write(struct rtsx_chip *chip)
delay_write->delay_write_flag = 0;
retval = ms_finish_write(chip,
- delay_write->old_phyblock,
+ delay_write->old_phyblock,
delay_write->new_phyblock,
delay_write->logblock,
delay_write->pageoff);
@@ -3790,13 +3822,13 @@ static inline void ms_rw_fail(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
if (srb->sc_data_direction == DMA_FROM_DEVICE)
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
else
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
}
static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- u32 start_sector, u16 sector_cnt)
+ u32 start_sector, u16 sector_cnt)
{
struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
@@ -3843,16 +3875,17 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (srb->sc_data_direction == DMA_TO_DEVICE) {
#ifdef MS_DELAY_WRITE
if (delay_write->delay_write_flag &&
- (delay_write->logblock == log_blk) &&
- (start_page > delay_write->pageoff)) {
+ (delay_write->logblock == log_blk) &&
+ (start_page > delay_write->pageoff)) {
delay_write->delay_write_flag = 0;
retval = ms_copy_page(chip,
- delay_write->old_phyblock,
- delay_write->new_phyblock, log_blk,
- delay_write->pageoff, start_page);
+ delay_write->old_phyblock,
+ delay_write->new_phyblock,
+ log_blk,
+ delay_write->pageoff, start_page);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3868,32 +3901,35 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
retval = ms_delay_write(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
#endif
- old_blk = ms_get_l2p_tbl(chip, seg_no,
- log_blk - ms_start_idx[seg_no]);
+ old_blk = ms_get_l2p_tbl
+ (chip, seg_no,
+ log_blk - ms_start_idx[seg_no]);
new_blk = ms_get_unused_block(chip, seg_no);
if ((old_blk == 0xFFFF) || (new_blk == 0xFFFF)) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_prepare_write(chip, old_blk, new_blk,
- log_blk, start_page);
+ log_blk, start_page);
if (retval != STATUS_SUCCESS) {
- if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
- set_sense_type(chip, lun,
+ if (detect_card_cd(chip, MS_CARD) !=
+ STATUS_SUCCESS) {
+ set_sense_type
+ (chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return STATUS_FAIL;
}
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3906,21 +3942,21 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_NOT_PRESENT);
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return STATUS_FAIL;
}
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
#endif
old_blk = ms_get_l2p_tbl(chip, seg_no,
- log_blk - ms_start_idx[seg_no]);
+ log_blk - ms_start_idx[seg_no]);
if (old_blk == 0xFFFF) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3942,19 +3978,21 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
retval = ms_read_multiple_pages(chip,
- old_blk, log_blk, start_page, end_page,
- ptr, &index, &offset);
+ old_blk, log_blk,
+ start_page, end_page,
+ ptr, &index, &offset);
} else {
- retval = ms_write_multiple_pages(chip, old_blk,
- new_blk, log_blk, start_page, end_page,
- ptr, &index, &offset);
+ retval = ms_write_multiple_pages(chip, old_blk, new_blk,
+ log_blk, start_page,
+ end_page, ptr, &index,
+ &offset);
}
if (retval != STATUS_SUCCESS) {
toggle_gpio(chip, 1);
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_NOT_PRESENT);
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3970,8 +4008,8 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
ms_set_unused_block(chip, old_blk);
ms_set_l2p_tbl(chip, seg_no,
- log_blk - ms_start_idx[seg_no],
- new_blk);
+ log_blk - ms_start_idx[seg_no],
+ new_blk);
}
}
@@ -3995,14 +4033,14 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS) {
chip->card_fail |= MS_CARD;
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_NOT_PRESENT);
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return STATUS_FAIL;
}
}
old_blk = ms_get_l2p_tbl(chip, seg_no,
- log_blk - ms_start_idx[seg_no]);
+ log_blk - ms_start_idx[seg_no]);
if (old_blk == 0xFFFF) {
ms_rw_fail(srb, chip);
rtsx_trace(chip);
@@ -4034,10 +4072,12 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
delay_write->pageoff = end_page;
#else
retval = ms_finish_write(chip, old_blk, new_blk,
- log_blk, end_page);
+ log_blk, end_page);
if (retval != STATUS_SUCCESS) {
- if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
- set_sense_type(chip, lun,
+ if (detect_card_cd(chip, MS_CARD) !=
+ STATUS_SUCCESS) {
+ set_sense_type
+ (chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return STATUS_FAIL;
@@ -4057,17 +4097,17 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
}
int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- u32 start_sector, u16 sector_cnt)
+ u32 start_sector, u16 sector_cnt)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
if (CHK_MSPRO(ms_card))
retval = mspro_rw_multi_sector(srb, chip, start_sector,
- sector_cnt);
+ sector_cnt);
else
retval = ms_rw_multi_sector(srb, chip, start_sector,
- sector_cnt);
+ sector_cnt);
return retval;
}
@@ -4189,7 +4229,7 @@ static int mg_send_ex_cmd(struct rtsx_chip *chip, u8 cmd, u8 entry_num)
}
static int mg_set_tpc_para_sub(struct rtsx_chip *chip, int type,
- u8 mg_entry_num)
+ u8 mg_entry_num)
{
int retval;
u8 buf[6];
@@ -4306,7 +4346,7 @@ int mg_get_local_EKB(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
- 3, WAIT_INT, 0, 0, buf + 4, 1536);
+ 3, WAIT_INT, 0, 0, buf + 4, 1536);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
rtsx_clear_ms_error(chip);
@@ -4354,7 +4394,7 @@ int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT,
- buf, 32);
+ buf, 32);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
rtsx_trace(chip);
@@ -4437,7 +4477,7 @@ int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT,
- buf1, 32);
+ buf1, 32);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
rtsx_trace(chip);
@@ -4560,7 +4600,7 @@ int mg_get_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
- 2, WAIT_INT, 0, 0, buf + 4, 1024);
+ 2, WAIT_INT, 0, 0, buf + 4, 1024);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_clear_ms_error(chip);
@@ -4615,11 +4655,12 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
if (ms_card->mg_auth == 0) {
if ((buf[5] & 0xC0) != 0)
- set_sense_type(chip, lun,
+ set_sense_type
+ (chip, lun,
SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
else
set_sense_type(chip, lun,
- SENSE_TYPE_MG_WRITE_ERR);
+ SENSE_TYPE_MG_WRITE_ERR);
} else {
set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
}
@@ -4634,17 +4675,17 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC,
- 0xFF, PRO_WRITE_LONG_DATA);
+ 0xFF, PRO_WRITE_LONG_DATA);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, WAIT_INT);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, RING_BUFFER);
+ 0x01, RING_BUFFER);
trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
- MS_TRANSFER_START | MS_TM_NORMAL_WRITE);
+ MS_TRANSFER_START | MS_TM_NORMAL_WRITE);
rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
- MS_TRANSFER_END, MS_TRANSFER_END);
+ MS_TRANSFER_END, MS_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
@@ -4654,13 +4695,15 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_clear_ms_error(chip);
if (ms_card->mg_auth == 0) {
if ((buf[5] & 0xC0) != 0)
- set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ set_sense_type
+ (chip, lun,
+ SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
else
set_sense_type(chip, lun,
- SENSE_TYPE_MG_WRITE_ERR);
+ SENSE_TYPE_MG_WRITE_ERR);
} else {
set_sense_type(chip, lun,
- SENSE_TYPE_MG_WRITE_ERR);
+ SENSE_TYPE_MG_WRITE_ERR);
}
retval = STATUS_FAIL;
rtsx_trace(chip);
@@ -4669,16 +4712,17 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
#else
retval = ms_transfer_data(chip, MS_TM_AUTO_WRITE, PRO_WRITE_LONG_DATA,
- 2, WAIT_INT, 0, 0, buf + 4, 1024);
+ 2, WAIT_INT, 0, 0, buf + 4, 1024);
if ((retval != STATUS_SUCCESS) || check_ms_err(chip)) {
rtsx_clear_ms_error(chip);
if (ms_card->mg_auth == 0) {
if ((buf[5] & 0xC0) != 0)
- set_sense_type(chip, lun,
- SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ set_sense_type
+ (chip, lun,
+ SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
else
set_sense_type(chip, lun,
- SENSE_TYPE_MG_WRITE_ERR);
+ SENSE_TYPE_MG_WRITE_ERR);
} else {
set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
}
@@ -4706,11 +4750,12 @@ void ms_cleanup_work(struct rtsx_chip *chip)
}
if (CHK_MSHG(ms_card)) {
rtsx_write_register(chip, MS_CFG,
- MS_2K_SECTOR_MODE, 0x00);
+ MS_2K_SECTOR_MODE, 0x00);
}
}
#ifdef MS_DELAY_WRITE
- else if ((!CHK_MSPRO(ms_card)) && ms_card->delay_write.delay_write_flag) {
+ else if ((!CHK_MSPRO(ms_card)) &&
+ ms_card->delay_write.delay_write_flag) {
dev_dbg(rtsx_dev(chip), "MS: delay write\n");
ms_delay_write(chip);
ms_card->cleanup_counter = 0;
diff --git a/drivers/staging/rts5208/ms.h b/drivers/staging/rts5208/ms.h
index d7686399df97..71f98cc03eed 100644
--- a/drivers/staging/rts5208/ms.h
+++ b/drivers/staging/rts5208/ms.h
@@ -202,9 +202,9 @@ void mspro_polling_format_status(struct rtsx_chip *chip);
void mspro_stop_seq_mode(struct rtsx_chip *chip);
int reset_ms_card(struct rtsx_chip *chip);
int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- u32 start_sector, u16 sector_cnt);
+ u32 start_sector, u16 sector_cnt);
int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- int short_data_len, bool quick_format);
+ int short_data_len, bool quick_format);
void ms_free_l2p_tbl(struct rtsx_chip *chip);
void ms_cleanup_work(struct rtsx_chip *chip);
int ms_power_off_card3v3(struct rtsx_chip *chip);
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 5d65a5cdc748..68d75d0d5efd 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -107,8 +107,10 @@ static int slave_configure(struct scsi_device *sdev)
* the actual value or the modified one, depending on where the
* data comes from.
*/
- if (sdev->scsi_level < SCSI_2)
- sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2;
+ if (sdev->scsi_level < SCSI_2) {
+ sdev->scsi_level = SCSI_2;
+ sdev->sdev_target->scsi_level = SCSI_2;
+ }
return 0;
}
@@ -120,12 +122,15 @@ static int slave_configure(struct scsi_device *sdev)
/* we use this macro to help us write into the buffer */
#undef SPRINTF
#define SPRINTF(args...) \
- do { if (pos < buffer+length) pos += sprintf(pos, ## args); } while (0)
+ do { \
+ if (pos < buffer + length) \
+ pos += sprintf(pos, ## args); \
+ } while (0)
/* queue a command */
/* This is always called with scsi_lock(host) held */
static int queuecommand_lck(struct scsi_cmnd *srb,
- void (*done)(struct scsi_cmnd *))
+ void (*done)(struct scsi_cmnd *))
{
struct rtsx_dev *dev = host_to_rtsx(srb->device->host);
struct rtsx_chip *chip = dev->chip;
@@ -313,7 +318,7 @@ static int rtsx_suspend(struct pci_dev *pci, pm_message_t state)
return 0;
/* lock the device pointers */
- mutex_lock(&(dev->dev_mutex));
+ mutex_lock(&dev->dev_mutex);
chip = dev->chip;
@@ -349,7 +354,7 @@ static int rtsx_resume(struct pci_dev *pci)
chip = dev->chip;
/* lock the device pointers */
- mutex_lock(&(dev->dev_mutex));
+ mutex_lock(&dev->dev_mutex);
pci_set_power_state(pci, PCI_D0);
pci_restore_state(pci);
@@ -418,7 +423,7 @@ static int rtsx_control_thread(void *__dev)
break;
/* lock the device pointers */
- mutex_lock(&(dev->dev_mutex));
+ mutex_lock(&dev->dev_mutex);
/* if the device has disconnected, we are free to exit */
if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
@@ -433,7 +438,7 @@ static int rtsx_control_thread(void *__dev)
/* has the command aborted ? */
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
chip->srb->result = DID_ABORT << 16;
- goto SkipForAbort;
+ goto skip_for_abort;
}
scsi_unlock(host);
@@ -480,12 +485,12 @@ static int rtsx_control_thread(void *__dev)
else if (chip->srb->result != DID_ABORT << 16) {
chip->srb->scsi_done(chip->srb);
} else {
-SkipForAbort:
+skip_for_abort:
dev_err(&dev->pci->dev, "scsi command aborted\n");
}
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
- complete(&(dev->notify));
+ complete(&dev->notify);
rtsx_set_stat(chip, RTSX_STAT_IDLE);
}
@@ -519,9 +524,9 @@ static int rtsx_polling_thread(void *__dev)
{
struct rtsx_dev *dev = __dev;
struct rtsx_chip *chip = dev->chip;
- struct sd_info *sd_card = &(chip->sd_card);
- struct xd_info *xd_card = &(chip->xd_card);
- struct ms_info *ms_card = &(chip->ms_card);
+ struct sd_info *sd_card = &chip->sd_card;
+ struct xd_info *xd_card = &chip->xd_card;
+ struct ms_info *ms_card = &chip->ms_card;
sd_card->cleanup_counter = 0;
xd_card->cleanup_counter = 0;
@@ -531,12 +536,11 @@ static int rtsx_polling_thread(void *__dev)
wait_timeout((delay_use + 5) * 1000);
for (;;) {
-
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
/* lock the device pointers */
- mutex_lock(&(dev->dev_mutex));
+ mutex_lock(&dev->dev_mutex);
/* if the device has disconnected, we are free to exit */
if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
@@ -550,7 +554,7 @@ static int rtsx_polling_thread(void *__dev)
mspro_polling_format_status(chip);
/* lock the device pointers */
- mutex_lock(&(dev->dev_mutex));
+ mutex_lock(&dev->dev_mutex);
rtsx_polling_func(chip);
@@ -597,7 +601,7 @@ static irqreturn_t rtsx_interrupt(int irq, void *dev_id)
dev->trans_result = TRANS_RESULT_FAIL;
if (dev->done)
complete(dev->done);
- goto Exit;
+ goto exit;
}
}
@@ -619,7 +623,7 @@ static irqreturn_t rtsx_interrupt(int irq, void *dev_id)
}
}
-Exit:
+exit:
spin_unlock(&dev->reg_lock);
return IRQ_HANDLED;
}
@@ -724,9 +728,10 @@ static int rtsx_scan_thread(void *__dev)
dev_info(&dev->pci->dev,
"%s: waiting for device to settle before scanning\n",
CR_DRIVER_NAME);
- wait_event_interruptible_timeout(dev->delay_wait,
- rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT),
- delay_use * HZ);
+ wait_event_interruptible_timeout
+ (dev->delay_wait,
+ rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT),
+ delay_use * HZ);
}
/* If the device is still connected, perform the scanning */
@@ -844,7 +849,7 @@ static void rtsx_init_options(struct rtsx_chip *chip)
}
static int rtsx_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+ const struct pci_device_id *pci_id)
{
struct Scsi_Host *host;
struct rtsx_dev *dev;
@@ -879,18 +884,18 @@ static int rtsx_probe(struct pci_dev *pci,
dev = host_to_rtsx(host);
memset(dev, 0, sizeof(struct rtsx_dev));
- dev->chip = kzalloc(sizeof(struct rtsx_chip), GFP_KERNEL);
+ dev->chip = kzalloc(sizeof(*dev->chip), GFP_KERNEL);
if (!dev->chip) {
err = -ENOMEM;
goto errout;
}
spin_lock_init(&dev->reg_lock);
- mutex_init(&(dev->dev_mutex));
+ mutex_init(&dev->dev_mutex);
init_completion(&dev->cmnd_ready);
init_completion(&dev->control_exit);
init_completion(&dev->polling_exit);
- init_completion(&(dev->notify));
+ init_completion(&dev->notify);
init_completion(&dev->scanning_done);
init_waitqueue_head(&dev->delay_wait);
diff --git a/drivers/staging/rts5208/rtsx.h b/drivers/staging/rts5208/rtsx.h
index e725b10ed087..575e5734f2a5 100644
--- a/drivers/staging/rts5208/rtsx.h
+++ b/drivers/staging/rts5208/rtsx.h
@@ -149,7 +149,7 @@ static inline void get_current_time(u8 *timeval_buf, int buf_len)
getnstimeofday64(&ts64);
- tv_usec = ts64.tv_nsec/NSEC_PER_USEC;
+ tv_usec = ts64.tv_nsec / NSEC_PER_USEC;
timeval_buf[0] = (u8)(ts64.tv_sec >> 24);
timeval_buf[1] = (u8)(ts64.tv_sec >> 16);
diff --git a/drivers/staging/rts5208/rtsx_card.c b/drivers/staging/rts5208/rtsx_card.c
index 97717744962d..a6b7bffc6714 100644
--- a/drivers/staging/rts5208/rtsx_card.c
+++ b/drivers/staging/rts5208/rtsx_card.c
@@ -33,11 +33,11 @@
void do_remaining_work(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
#ifdef XD_DELAY_WRITE
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
#endif
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
if (chip->card_ready & SD_CARD) {
if (sd_card->seq_mode) {
@@ -100,9 +100,9 @@ void try_to_switch_sdio_ctrl(struct rtsx_chip *chip)
if ((reg1 & 0xC0) && (reg2 & 0xC0)) {
chip->sd_int = 1;
rtsx_write_register(chip, SDIO_CTRL, 0xFF,
- SDIO_BUS_CTRL | SDIO_CD_CTRL);
+ SDIO_BUS_CTRL | SDIO_CD_CTRL);
rtsx_write_register(chip, PWR_GATE_CTRL,
- LDO3318_PWR_MASK, LDO_ON);
+ LDO3318_PWR_MASK, LDO_ON);
}
}
@@ -133,7 +133,7 @@ void dynamic_configure_sdio_aspm(struct rtsx_chip *chip)
if (!chip->sdio_aspm) {
dev_dbg(rtsx_dev(chip), "SDIO enter ASPM!\n");
rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFC,
- 0x30 | (chip->aspm_level[1] << 2));
+ 0x30 | (chip->aspm_level[1] << 2));
chip->sdio_aspm = 1;
}
} else {
@@ -154,7 +154,7 @@ void do_reset_sd_card(struct rtsx_chip *chip)
chip->sd_reset_counter, chip->card2lun[SD_CARD]);
if (chip->card2lun[SD_CARD] >= MAX_ALLOWED_LUN_CNT) {
- clear_bit(SD_NR, &(chip->need_reset));
+ clear_bit(SD_NR, &chip->need_reset);
chip->sd_reset_counter = 0;
chip->sd_show_cnt = 0;
return;
@@ -169,7 +169,7 @@ void do_reset_sd_card(struct rtsx_chip *chip)
if (chip->need_release & SD_CARD)
return;
if (retval == STATUS_SUCCESS) {
- clear_bit(SD_NR, &(chip->need_reset));
+ clear_bit(SD_NR, &chip->need_reset);
chip->sd_reset_counter = 0;
chip->sd_show_cnt = 0;
chip->card_ready |= SD_CARD;
@@ -177,7 +177,7 @@ void do_reset_sd_card(struct rtsx_chip *chip)
chip->rw_card[chip->card2lun[SD_CARD]] = sd_rw;
} else {
if (chip->sd_io || (chip->sd_reset_counter >= MAX_RESET_CNT)) {
- clear_bit(SD_NR, &(chip->need_reset));
+ clear_bit(SD_NR, &chip->need_reset);
chip->sd_reset_counter = 0;
chip->sd_show_cnt = 0;
} else {
@@ -208,7 +208,7 @@ void do_reset_xd_card(struct rtsx_chip *chip)
chip->xd_reset_counter, chip->card2lun[XD_CARD]);
if (chip->card2lun[XD_CARD] >= MAX_ALLOWED_LUN_CNT) {
- clear_bit(XD_NR, &(chip->need_reset));
+ clear_bit(XD_NR, &chip->need_reset);
chip->xd_reset_counter = 0;
chip->xd_show_cnt = 0;
return;
@@ -223,14 +223,14 @@ void do_reset_xd_card(struct rtsx_chip *chip)
if (chip->need_release & XD_CARD)
return;
if (retval == STATUS_SUCCESS) {
- clear_bit(XD_NR, &(chip->need_reset));
+ clear_bit(XD_NR, &chip->need_reset);
chip->xd_reset_counter = 0;
chip->card_ready |= XD_CARD;
chip->card_fail &= ~XD_CARD;
chip->rw_card[chip->card2lun[XD_CARD]] = xd_rw;
} else {
if (chip->xd_reset_counter >= MAX_RESET_CNT) {
- clear_bit(XD_NR, &(chip->need_reset));
+ clear_bit(XD_NR, &chip->need_reset);
chip->xd_reset_counter = 0;
chip->xd_show_cnt = 0;
} else {
@@ -256,7 +256,7 @@ void do_reset_ms_card(struct rtsx_chip *chip)
chip->ms_reset_counter, chip->card2lun[MS_CARD]);
if (chip->card2lun[MS_CARD] >= MAX_ALLOWED_LUN_CNT) {
- clear_bit(MS_NR, &(chip->need_reset));
+ clear_bit(MS_NR, &chip->need_reset);
chip->ms_reset_counter = 0;
chip->ms_show_cnt = 0;
return;
@@ -271,14 +271,14 @@ void do_reset_ms_card(struct rtsx_chip *chip)
if (chip->need_release & MS_CARD)
return;
if (retval == STATUS_SUCCESS) {
- clear_bit(MS_NR, &(chip->need_reset));
+ clear_bit(MS_NR, &chip->need_reset);
chip->ms_reset_counter = 0;
chip->card_ready |= MS_CARD;
chip->card_fail &= ~MS_CARD;
chip->rw_card[chip->card2lun[MS_CARD]] = ms_rw;
} else {
if (chip->ms_reset_counter >= MAX_RESET_CNT) {
- clear_bit(MS_NR, &(chip->need_reset));
+ clear_bit(MS_NR, &chip->need_reset);
chip->ms_reset_counter = 0;
chip->ms_show_cnt = 0;
} else {
@@ -300,7 +300,7 @@ static void release_sdio(struct rtsx_chip *chip)
{
if (chip->sd_io) {
rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR,
- SD_STOP | SD_CLR_ERR);
+ SD_STOP | SD_CLR_ERR);
if (chip->chip_insert_with_sdio) {
chip->chip_insert_with_sdio = 0;
@@ -369,7 +369,7 @@ void rtsx_reset_cards(struct rtsx_chip *chip)
rtsx_disable_aspm(chip);
if ((chip->need_reset & SD_CARD) && chip->chip_insert_with_sdio)
- clear_bit(SD_NR, &(chip->need_reset));
+ clear_bit(SD_NR, &chip->need_reset);
if (chip->need_reset & XD_CARD) {
chip->card_exist |= XD_CARD;
@@ -381,8 +381,8 @@ void rtsx_reset_cards(struct rtsx_chip *chip)
}
if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN)) {
if (chip->card_exist & XD_CARD) {
- clear_bit(SD_NR, &(chip->need_reset));
- clear_bit(MS_NR, &(chip->need_reset));
+ clear_bit(SD_NR, &chip->need_reset);
+ clear_bit(MS_NR, &chip->need_reset);
}
}
if (chip->need_reset & SD_CARD) {
@@ -449,7 +449,7 @@ void rtsx_reinit_cards(struct rtsx_chip *chip, int reset_chip)
#ifdef DISABLE_CARD_INT
void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset,
- unsigned long *need_release)
+ unsigned long *need_release)
{
u8 release_map = 0, reset_map = 0;
@@ -502,13 +502,13 @@ void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset,
reset_map = 0;
if (!(chip->card_exist & XD_CARD) &&
- (xd_cnt > (DEBOUNCE_CNT-1)))
+ (xd_cnt > (DEBOUNCE_CNT - 1)))
reset_map |= XD_CARD;
if (!(chip->card_exist & SD_CARD) &&
- (sd_cnt > (DEBOUNCE_CNT-1)))
+ (sd_cnt > (DEBOUNCE_CNT - 1)))
reset_map |= SD_CARD;
if (!(chip->card_exist & MS_CARD) &&
- (ms_cnt > (DEBOUNCE_CNT-1)))
+ (ms_cnt > (DEBOUNCE_CNT - 1)))
reset_map |= MS_CARD;
}
@@ -531,23 +531,23 @@ void rtsx_init_cards(struct rtsx_chip *chip)
}
#ifdef DISABLE_CARD_INT
- card_cd_debounce(chip, &(chip->need_reset), &(chip->need_release));
+ card_cd_debounce(chip, &chip->need_reset, &chip->need_release);
#endif
if (chip->need_release) {
if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN)) {
if (chip->int_reg & XD_EXIST) {
- clear_bit(SD_NR, &(chip->need_release));
- clear_bit(MS_NR, &(chip->need_release));
+ clear_bit(SD_NR, &chip->need_release);
+ clear_bit(MS_NR, &chip->need_release);
}
}
if (!(chip->card_exist & SD_CARD) && !chip->sd_io)
- clear_bit(SD_NR, &(chip->need_release));
+ clear_bit(SD_NR, &chip->need_release);
if (!(chip->card_exist & XD_CARD))
- clear_bit(XD_NR, &(chip->need_release));
+ clear_bit(XD_NR, &chip->need_release);
if (!(chip->card_exist & MS_CARD))
- clear_bit(MS_NR, &(chip->need_release));
+ clear_bit(MS_NR, &chip->need_release);
dev_dbg(rtsx_dev(chip), "chip->need_release = 0x%x\n",
(unsigned int)(chip->need_release));
@@ -556,8 +556,10 @@ void rtsx_init_cards(struct rtsx_chip *chip)
if (chip->need_release) {
if (chip->ocp_stat & (CARD_OC_NOW | CARD_OC_EVER))
rtsx_write_register(chip, OCPCLR,
- CARD_OC_INT_CLR | CARD_OC_CLR,
- CARD_OC_INT_CLR | CARD_OC_CLR);
+ CARD_OC_INT_CLR |
+ CARD_OC_CLR,
+ CARD_OC_INT_CLR |
+ CARD_OC_CLR);
chip->ocp_stat = 0;
}
#endif
@@ -567,7 +569,7 @@ void rtsx_init_cards(struct rtsx_chip *chip)
}
if (chip->need_release & SD_CARD) {
- clear_bit(SD_NR, &(chip->need_release));
+ clear_bit(SD_NR, &chip->need_release);
chip->card_exist &= ~SD_CARD;
chip->card_ejected &= ~SD_CARD;
chip->card_fail &= ~SD_CARD;
@@ -580,7 +582,7 @@ void rtsx_init_cards(struct rtsx_chip *chip)
}
if (chip->need_release & XD_CARD) {
- clear_bit(XD_NR, &(chip->need_release));
+ clear_bit(XD_NR, &chip->need_release);
chip->card_exist &= ~XD_CARD;
chip->card_ejected &= ~XD_CARD;
chip->card_fail &= ~XD_CARD;
@@ -590,13 +592,13 @@ void rtsx_init_cards(struct rtsx_chip *chip)
release_xd_card(chip);
if (CHECK_PID(chip, 0x5288) &&
- CHECK_BARO_PKG(chip, QFN))
+ CHECK_BARO_PKG(chip, QFN))
rtsx_write_register(chip, HOST_SLEEP_STATE,
- 0xC0, 0xC0);
+ 0xC0, 0xC0);
}
if (chip->need_release & MS_CARD) {
- clear_bit(MS_NR, &(chip->need_release));
+ clear_bit(MS_NR, &chip->need_release);
chip->card_exist &= ~MS_CARD;
chip->card_ejected &= ~MS_CARD;
chip->card_fail &= ~MS_CARD;
@@ -650,7 +652,7 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk)
return STATUS_FAIL;
}
- mcu_cnt = (u8)(125/clk + 3);
+ mcu_cnt = (u8)(125 / clk + 3);
if (mcu_cnt > 7)
mcu_cnt = 7;
@@ -681,9 +683,9 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk)
rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
if (sd_vpclk_phase_reset) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
- PHASE_NOT_RESET, 0);
+ PHASE_NOT_RESET, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
- PHASE_NOT_RESET, PHASE_NOT_RESET);
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
}
retval = rtsx_send_cmd(chip, 0, WAIT_TIME);
@@ -850,7 +852,7 @@ int switch_normal_clock(struct rtsx_chip *chip, int clk)
}
void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip,
- u32 byte_cnt, u8 pack_size)
+ u32 byte_cnt, u8 pack_size)
{
if (pack_size > DMA_1024)
pack_size = DMA_512;
@@ -864,11 +866,11 @@ void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip,
if (dir == DMA_FROM_DEVICE) {
rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL,
- 0x03 | DMA_PACK_SIZE_MASK,
+ 0x03 | DMA_PACK_SIZE_MASK,
DMA_DIR_FROM_CARD | DMA_EN | pack_size);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL,
- 0x03 | DMA_PACK_SIZE_MASK,
+ 0x03 | DMA_PACK_SIZE_MASK,
DMA_DIR_TO_CARD | DMA_EN | pack_size);
}
@@ -978,13 +980,13 @@ int card_power_off(struct rtsx_chip *chip, u8 card)
}
int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- u32 sec_addr, u16 sec_cnt)
+ u32 sec_addr, u16 sec_cnt)
{
int retval;
unsigned int lun = SCSI_LUN(srb);
int i;
- if (chip->rw_card[lun] == NULL) {
+ if (!chip->rw_card[lun]) {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1115,7 +1117,7 @@ void turn_on_led(struct rtsx_chip *chip, u8 gpio)
{
if (CHECK_PID(chip, 0x5288))
rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio),
- (u8)(1 << gpio));
+ (u8)(1 << gpio));
else
rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0);
}
@@ -1126,7 +1128,7 @@ void turn_off_led(struct rtsx_chip *chip, u8 gpio)
rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0);
else
rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio),
- (u8)(1 << gpio));
+ (u8)(1 << gpio));
}
int detect_card_cd(struct rtsx_chip *chip, int card)
diff --git a/drivers/staging/rts5208/rtsx_card.h b/drivers/staging/rts5208/rtsx_card.h
index 56df9a431d6d..aa37705bae39 100644
--- a/drivers/staging/rts5208/rtsx_card.h
+++ b/drivers/staging/rts5208/rtsx_card.h
@@ -1011,9 +1011,9 @@ int switch_normal_clock(struct rtsx_chip *chip, int clk);
int enable_card_clock(struct rtsx_chip *chip, u8 card);
int disable_card_clock(struct rtsx_chip *chip, u8 card);
int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- u32 sec_addr, u16 sec_cnt);
+ u32 sec_addr, u16 sec_cnt);
void trans_dma_enable(enum dma_data_direction dir,
- struct rtsx_chip *chip, u32 byte_cnt, u8 pack_size);
+ struct rtsx_chip *chip, u32 byte_cnt, u8 pack_size);
void toggle_gpio(struct rtsx_chip *chip, u8 gpio);
void turn_on_led(struct rtsx_chip *chip, u8 gpio);
void turn_off_led(struct rtsx_chip *chip, u8 gpio);
@@ -1030,10 +1030,10 @@ u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun);
static inline u32 get_card_size(struct rtsx_chip *chip, unsigned int lun)
{
#ifdef SUPPORT_SD_LOCK
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
if ((get_lun_card(chip, lun) == SD_CARD) &&
- (sd_card->sd_lock_status & SD_LOCKED))
+ (sd_card->sd_lock_status & SD_LOCKED))
return 0;
return chip->capacity[lun];
@@ -1073,25 +1073,25 @@ static inline int card_power_off_all(struct rtsx_chip *chip)
static inline void rtsx_clear_xd_error(struct rtsx_chip *chip)
{
rtsx_write_register(chip, CARD_STOP, XD_STOP | XD_CLR_ERR,
- XD_STOP | XD_CLR_ERR);
+ XD_STOP | XD_CLR_ERR);
}
static inline void rtsx_clear_sd_error(struct rtsx_chip *chip)
{
rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR,
- SD_STOP | SD_CLR_ERR);
+ SD_STOP | SD_CLR_ERR);
}
static inline void rtsx_clear_ms_error(struct rtsx_chip *chip)
{
rtsx_write_register(chip, CARD_STOP, MS_STOP | MS_CLR_ERR,
- MS_STOP | MS_CLR_ERR);
+ MS_STOP | MS_CLR_ERR);
}
static inline void rtsx_clear_spi_error(struct rtsx_chip *chip)
{
rtsx_write_register(chip, CARD_STOP, SPI_STOP | SPI_CLR_ERR,
- SPI_STOP | SPI_CLR_ERR);
+ SPI_STOP | SPI_CLR_ERR);
}
#ifdef SUPPORT_SDIO_ASPM
diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c
index a10dd6220a7b..3511157a2c78 100644
--- a/drivers/staging/rts5208/rtsx_chip.c
+++ b/drivers/staging/rts5208/rtsx_chip.c
@@ -114,7 +114,8 @@ static int rtsx_pre_handle_sdio_old(struct rtsx_chip *chip)
if (chip->asic_code) {
retval = rtsx_write_register(chip, CARD_PULL_CTL5,
0xFF,
- MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU);
+ MS_INS_PU | SD_WP_PU |
+ SD_CD_PU | SD_CMD_PU);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -240,10 +241,10 @@ static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
return STATUS_FAIL;
}
} else {
- retval = rtsx_write_register(chip,
- FPGA_PULL_CTL,
- FPGA_SD_PULL_CTL_BIT | 0x20,
- 0);
+ retval = rtsx_write_register
+ (chip, FPGA_PULL_CTL,
+ FPGA_SD_PULL_CTL_BIT | 0x20,
+ 0);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -713,7 +714,8 @@ nextcard:
if (chip->ft2_fast_mode) {
retval = rtsx_write_register(chip, CARD_PWR_CTL, 0xFF,
- MS_PARTIAL_POWER_ON | SD_PARTIAL_POWER_ON);
+ MS_PARTIAL_POWER_ON |
+ SD_PARTIAL_POWER_ON);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -1567,7 +1569,8 @@ int rtsx_write_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 mask,
}
retval = rtsx_write_register(chip, CFGRWCTL, 0xFF,
- 0x80 | mode | ((func_no & 0x03) << 4));
+ 0x80 | mode |
+ ((func_no & 0x03) << 4));
if (retval) {
rtsx_trace(chip);
return retval;
diff --git a/drivers/staging/rts5208/rtsx_chip.h b/drivers/staging/rts5208/rtsx_chip.h
index f36642817c6e..4f6e3c1c4621 100644
--- a/drivers/staging/rts5208/rtsx_chip.h
+++ b/drivers/staging/rts5208/rtsx_chip.h
@@ -130,16 +130,20 @@
#define PRDCT_REV_LEN 4 /* Product LOT Length */
/* Dynamic flag definitions: used in set_bit() etc. */
-#define RTSX_FLIDX_TRANS_ACTIVE 18 /* 0x00040000 transfer is active */
-#define RTSX_FLIDX_ABORTING 20 /* 0x00100000 abort is in progress */
-#define RTSX_FLIDX_DISCONNECTING 21 /* 0x00200000 disconnect in progress */
+/* 0x00040000 transfer is active */
+#define RTSX_FLIDX_TRANS_ACTIVE 18
+/* 0x00100000 abort is in progress */
+#define RTSX_FLIDX_ABORTING 20
+/* 0x00200000 disconnect in progress */
+#define RTSX_FLIDX_DISCONNECTING 21
#define ABORTING_OR_DISCONNECTING ((1UL << US_FLIDX_ABORTING) | \
(1UL << US_FLIDX_DISCONNECTING))
-#define RTSX_FLIDX_RESETTING 22 /* 0x00400000 device reset in progress */
-#define RTSX_FLIDX_TIMED_OUT 23 /* 0x00800000 SCSI midlayer timed out */
-
+/* 0x00400000 device reset in progress */
+#define RTSX_FLIDX_RESETTING 22
+/* 0x00800000 SCSI midlayer timed out */
+#define RTSX_FLIDX_TIMED_OUT 23
#define DRCT_ACCESS_DEV 0x00 /* Direct Access Device */
#define RMB_DISC 0x80 /* The Device is Removable */
#define ANSI_SCSI2 0x02 /* Based on ANSI-SCSI2 */
@@ -285,23 +289,24 @@ struct sense_data_t {
#define CARD_INT (XD_INT | MS_INT | SD_INT)
#define NEED_COMPLETE_INT (DATA_DONE_INT | TRANS_OK_INT | TRANS_FAIL_INT)
-#define RTSX_INT (CMD_DONE_INT | NEED_COMPLETE_INT | CARD_INT | GPIO0_INT | OC_INT)
+#define RTSX_INT (CMD_DONE_INT | NEED_COMPLETE_INT | CARD_INT | \
+ GPIO0_INT | OC_INT)
#define CARD_EXIST (XD_EXIST | MS_EXIST | SD_EXIST)
/* Bus interrupt enable register */
-#define CMD_DONE_INT_EN (1 << 31)
-#define DATA_DONE_INT_EN (1 << 30)
-#define TRANS_OK_INT_EN (1 << 29)
-#define TRANS_FAIL_INT_EN (1 << 28)
-#define XD_INT_EN (1 << 27)
-#define MS_INT_EN (1 << 26)
-#define SD_INT_EN (1 << 25)
-#define GPIO0_INT_EN (1 << 24)
-#define OC_INT_EN (1 << 23)
+#define CMD_DONE_INT_EN BIT(31)
+#define DATA_DONE_INT_EN BIT(30)
+#define TRANS_OK_INT_EN BIT(29)
+#define TRANS_FAIL_INT_EN BIT(28)
+#define XD_INT_EN BIT(27)
+#define MS_INT_EN BIT(26)
+#define SD_INT_EN BIT(25)
+#define GPIO0_INT_EN BIT(24)
+#define OC_INT_EN BIT(23)
#define DELINK_INT_EN GPIO0_INT_EN
-#define MS_OC_INT_EN (1 << 23)
-#define SD_OC_INT_EN (1 << 22)
+#define MS_OC_INT_EN BIT(23)
+#define SD_OC_INT_EN BIT(22)
#define READ_REG_CMD 0
#define WRITE_REG_CMD 1
@@ -318,10 +323,10 @@ struct sense_data_t {
#define MS_NR 3
#define XD_NR 4
#define SPI_NR 7
-#define SD_CARD (1 << SD_NR)
-#define MS_CARD (1 << MS_NR)
-#define XD_CARD (1 << XD_NR)
-#define SPI_CARD (1 << SPI_NR)
+#define SD_CARD BIT(SD_NR)
+#define MS_CARD BIT(MS_NR)
+#define XD_CARD BIT(XD_NR)
+#define SPI_CARD BIT(SPI_NR)
#define MAX_ALLOWED_LUN_CNT 8
@@ -393,14 +398,23 @@ struct zone_entry {
/* SD card */
#define CHK_SD(sd_card) (((sd_card)->sd_type & 0xFF) == TYPE_SD)
-#define CHK_SD_HS(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_HS))
-#define CHK_SD_SDR50(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_SDR50))
-#define CHK_SD_DDR50(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_DDR50))
-#define CHK_SD_SDR104(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_SDR104))
-#define CHK_SD_HCXC(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_HCXC))
-#define CHK_SD_HC(sd_card) (CHK_SD_HCXC(sd_card) && ((sd_card)->capacity <= 0x4000000))
-#define CHK_SD_XC(sd_card) (CHK_SD_HCXC(sd_card) && ((sd_card)->capacity > 0x4000000))
-#define CHK_SD30_SPEED(sd_card) (CHK_SD_SDR50(sd_card) || CHK_SD_DDR50(sd_card) || CHK_SD_SDR104(sd_card))
+#define CHK_SD_HS(sd_card) (CHK_SD(sd_card) && \
+ ((sd_card)->sd_type & SD_HS))
+#define CHK_SD_SDR50(sd_card) (CHK_SD(sd_card) && \
+ ((sd_card)->sd_type & SD_SDR50))
+#define CHK_SD_DDR50(sd_card) (CHK_SD(sd_card) && \
+ ((sd_card)->sd_type & SD_DDR50))
+#define CHK_SD_SDR104(sd_card) (CHK_SD(sd_card) && \
+ ((sd_card)->sd_type & SD_SDR104))
+#define CHK_SD_HCXC(sd_card) (CHK_SD(sd_card) && \
+ ((sd_card)->sd_type & SD_HCXC))
+#define CHK_SD_HC(sd_card) (CHK_SD_HCXC(sd_card) && \
+ ((sd_card)->capacity <= 0x4000000))
+#define CHK_SD_XC(sd_card) (CHK_SD_HCXC(sd_card) && \
+ ((sd_card)->capacity > 0x4000000))
+#define CHK_SD30_SPEED(sd_card) (CHK_SD_SDR50(sd_card) || \
+ CHK_SD_DDR50(sd_card) || \
+ CHK_SD_SDR104(sd_card))
#define SET_SD(sd_card) ((sd_card)->sd_type = TYPE_SD)
#define SET_SD_HS(sd_card) ((sd_card)->sd_type |= SD_HS)
@@ -416,13 +430,20 @@ struct zone_entry {
#define CLR_SD_HCXC(sd_card) ((sd_card)->sd_type &= ~SD_HCXC)
/* MMC card */
-#define CHK_MMC(sd_card) (((sd_card)->sd_type & 0xFF) == TYPE_MMC)
-#define CHK_MMC_26M(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_26M))
-#define CHK_MMC_52M(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_52M))
-#define CHK_MMC_4BIT(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_4BIT))
-#define CHK_MMC_8BIT(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_8BIT))
-#define CHK_MMC_SECTOR_MODE(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_SECTOR_MODE))
-#define CHK_MMC_DDR52(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_DDR52))
+#define CHK_MMC(sd_card) (((sd_card)->sd_type & 0xFF) == \
+ TYPE_MMC)
+#define CHK_MMC_26M(sd_card) (CHK_MMC(sd_card) && \
+ ((sd_card)->sd_type & MMC_26M))
+#define CHK_MMC_52M(sd_card) (CHK_MMC(sd_card) && \
+ ((sd_card)->sd_type & MMC_52M))
+#define CHK_MMC_4BIT(sd_card) (CHK_MMC(sd_card) && \
+ ((sd_card)->sd_type & MMC_4BIT))
+#define CHK_MMC_8BIT(sd_card) (CHK_MMC(sd_card) && \
+ ((sd_card)->sd_type & MMC_8BIT))
+#define CHK_MMC_SECTOR_MODE(sd_card) (CHK_MMC(sd_card) && \
+ ((sd_card)->sd_type & MMC_SECTOR_MODE))
+#define CHK_MMC_DDR52(sd_card) (CHK_MMC(sd_card) && \
+ ((sd_card)->sd_type & MMC_DDR52))
#define SET_MMC(sd_card) ((sd_card)->sd_type = TYPE_MMC)
#define SET_MMC_26M(sd_card) ((sd_card)->sd_type |= MMC_26M)
@@ -439,7 +460,8 @@ struct zone_entry {
#define CLR_MMC_SECTOR_MODE(sd_card) ((sd_card)->sd_type &= ~MMC_SECTOR_MODE)
#define CLR_MMC_DDR52(sd_card) ((sd_card)->sd_type &= ~MMC_DDR52)
-#define CHK_MMC_HS(sd_card) (CHK_MMC_52M(sd_card) && CHK_MMC_26M(sd_card))
+#define CHK_MMC_HS(sd_card) (CHK_MMC_52M(sd_card) && \
+ CHK_MMC_26M(sd_card))
#define CLR_MMC_HS(sd_card) \
do { \
CLR_MMC_DDR52(sd_card); \
@@ -450,12 +472,18 @@ do { \
#define SD_SUPPORT_CLASS_TEN 0x01
#define SD_SUPPORT_1V8 0x02
-#define SD_SET_CLASS_TEN(sd_card) ((sd_card)->sd_setting |= SD_SUPPORT_CLASS_TEN)
-#define SD_CHK_CLASS_TEN(sd_card) ((sd_card)->sd_setting & SD_SUPPORT_CLASS_TEN)
-#define SD_CLR_CLASS_TEN(sd_card) ((sd_card)->sd_setting &= ~SD_SUPPORT_CLASS_TEN)
-#define SD_SET_1V8(sd_card) ((sd_card)->sd_setting |= SD_SUPPORT_1V8)
-#define SD_CHK_1V8(sd_card) ((sd_card)->sd_setting & SD_SUPPORT_1V8)
-#define SD_CLR_1V8(sd_card) ((sd_card)->sd_setting &= ~SD_SUPPORT_1V8)
+#define SD_SET_CLASS_TEN(sd_card) ((sd_card)->sd_setting |= \
+ SD_SUPPORT_CLASS_TEN)
+#define SD_CHK_CLASS_TEN(sd_card) ((sd_card)->sd_setting & \
+ SD_SUPPORT_CLASS_TEN)
+#define SD_CLR_CLASS_TEN(sd_card) ((sd_card)->sd_setting &= \
+ ~SD_SUPPORT_CLASS_TEN)
+#define SD_SET_1V8(sd_card) ((sd_card)->sd_setting |= \
+ SD_SUPPORT_1V8)
+#define SD_CHK_1V8(sd_card) ((sd_card)->sd_setting & \
+ SD_SUPPORT_1V8)
+#define SD_CLR_1V8(sd_card) ((sd_card)->sd_setting &= \
+ ~SD_SUPPORT_1V8)
struct sd_info {
u16 sd_type;
@@ -544,9 +572,12 @@ struct xd_info {
#define HG8BIT (MS_HG | MS_8BIT)
#define CHK_MSPRO(ms_card) (((ms_card)->ms_type & 0xFF) == TYPE_MSPRO)
-#define CHK_HG8BIT(ms_card) (CHK_MSPRO(ms_card) && (((ms_card)->ms_type & HG8BIT) == HG8BIT))
-#define CHK_MSXC(ms_card) (CHK_MSPRO(ms_card) && ((ms_card)->ms_type & MS_XC))
-#define CHK_MSHG(ms_card) (CHK_MSPRO(ms_card) && ((ms_card)->ms_type & MS_HG))
+#define CHK_HG8BIT(ms_card) (CHK_MSPRO(ms_card) && \
+ (((ms_card)->ms_type & HG8BIT) == HG8BIT))
+#define CHK_MSXC(ms_card) (CHK_MSPRO(ms_card) && \
+ ((ms_card)->ms_type & MS_XC))
+#define CHK_MSHG(ms_card) (CHK_MSPRO(ms_card) && \
+ ((ms_card)->ms_type & MS_HG))
#define CHK_MS8BIT(ms_card) (((ms_card)->ms_type & MS_8BIT))
#define CHK_MS4BIT(ms_card) (((ms_card)->ms_type & MS_4BIT))
@@ -679,8 +710,10 @@ struct trace_msg_t {
#define CLR_SDIO_EXIST(chip) ((chip)->sdio_func_exist &= ~SDIO_EXIST)
#define CHK_SDIO_IGNORED(chip) ((chip)->sdio_func_exist & SDIO_IGNORED)
-#define SET_SDIO_IGNORED(chip) ((chip)->sdio_func_exist |= SDIO_IGNORED)
-#define CLR_SDIO_IGNORED(chip) ((chip)->sdio_func_exist &= ~SDIO_IGNORED)
+#define SET_SDIO_IGNORED(chip) ((chip)->sdio_func_exist |= \
+ SDIO_IGNORED)
+#define CLR_SDIO_IGNORED(chip) ((chip)->sdio_func_exist &= \
+ ~SDIO_IGNORED)
struct rtsx_chip {
struct rtsx_dev *rtsx;
@@ -957,12 +990,12 @@ void rtsx_stop_cmd(struct rtsx_chip *chip, int card);
int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data);
int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data);
int rtsx_write_cfg_dw(struct rtsx_chip *chip,
- u8 func_no, u16 addr, u32 mask, u32 val);
+ u8 func_no, u16 addr, u32 mask, u32 val);
int rtsx_read_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 *val);
int rtsx_write_cfg_seq(struct rtsx_chip *chip,
- u8 func, u16 addr, u8 *buf, int len);
+ u8 func, u16 addr, u8 *buf, int len);
int rtsx_read_cfg_seq(struct rtsx_chip *chip,
- u8 func, u16 addr, u8 *buf, int len);
+ u8 func, u16 addr, u8 *buf, int len);
int rtsx_write_phy_register(struct rtsx_chip *chip, u8 addr, u16 val);
int rtsx_read_phy_register(struct rtsx_chip *chip, u8 addr, u16 *val);
int rtsx_read_efuse(struct rtsx_chip *chip, u8 addr, u8 *val);
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index becb4bba166c..a95c5de1aa00 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -354,7 +354,7 @@ void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type)
case SENSE_TYPE_MEDIA_INVALID_CMD_FIELD:
set_sense_data(chip, lun, CUR_ERR, ILGAL_REQ, 0,
- ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1);
+ ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1);
break;
case SENSE_TYPE_FORMAT_IN_PROGRESS:
@@ -397,10 +397,10 @@ void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type)
}
void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
- u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0,
+ u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0,
u16 sns_key_info1)
{
- struct sense_data_t *sense = &(chip->sense_buffer[lun]);
+ struct sense_data_t *sense = &chip->sense_buffer[lun];
sense->err_code = err_code;
sense->sense_key = sense_key;
@@ -436,7 +436,7 @@ static int test_unit_ready(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#ifdef SUPPORT_SD_LOCK
if (get_lun_card(chip, SCSI_LUN(srb)) == SD_CARD) {
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
if (sd_card->sd_lock_notify) {
sd_card->sd_lock_notify = 0;
@@ -444,7 +444,7 @@ static int test_unit_ready(struct scsi_cmnd *srb, struct rtsx_chip *chip)
return TRANSPORT_FAILED;
} else if (sd_card->sd_lock_status & SD_LOCKED) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ SENSE_TYPE_MEDIA_READ_FORBIDDEN);
return TRANSPORT_FAILED;
}
}
@@ -514,7 +514,7 @@ static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#ifdef SUPPORT_MAGIC_GATE
if ((chip->mspro_formatter_enable) &&
- (chip->lun2card[lun] & MS_CARD))
+ (chip->lun2card[lun] & MS_CARD))
#else
if (chip->mspro_formatter_enable)
#endif
@@ -603,7 +603,7 @@ static int allow_medium_removal(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (prevent) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -615,13 +615,13 @@ static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct sense_data_t *sense;
unsigned int lun = SCSI_LUN(srb);
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
unsigned char *tmp, *buf;
- sense = &(chip->sense_buffer[lun]);
+ sense = &chip->sense_buffer[lun];
if ((get_lun_card(chip, lun) == MS_CARD) &&
- ms_card->pro_under_formatting) {
+ ms_card->pro_under_formatting) {
if (ms_card->format_status == FORMAT_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
ms_card->pro_under_formatting = 0;
@@ -629,7 +629,7 @@ static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else if (ms_card->format_status == FORMAT_IN_PROGRESS) {
/* Logical Unit Not Ready Format in Progress */
set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
- 0, (u16)(ms_card->progress));
+ 0, (u16)(ms_card->progress));
} else {
/* Format Command Failed */
set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
@@ -659,9 +659,9 @@ static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
static void ms_mode_sense(struct rtsx_chip *chip, u8 cmd,
- int lun, u8 *buf, int buf_len)
+ int lun, u8 *buf, int buf_len)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int sys_info_offset;
int data_size = buf_len;
bool support_format = false;
@@ -754,10 +754,10 @@ static void ms_mode_sense(struct rtsx_chip *chip, u8 cmd,
static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
- unsigned int dataSize;
+ unsigned int data_size;
int status;
bool pro_formatter_flag;
- unsigned char pageCode, *buf;
+ unsigned char page_code, *buf;
u8 card = get_lun_card(chip, lun);
#ifndef SUPPORT_MAGIC_GATE
@@ -770,11 +770,11 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#endif
pro_formatter_flag = false;
- dataSize = 8;
+ data_size = 8;
#ifdef SUPPORT_MAGIC_GATE
if ((chip->lun2card[lun] & MS_CARD)) {
if (!card || (card == MS_CARD)) {
- dataSize = 108;
+ data_size = 108;
if (chip->mspro_formatter_enable)
pro_formatter_flag = true;
}
@@ -783,28 +783,28 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (card == MS_CARD) {
if (chip->mspro_formatter_enable) {
pro_formatter_flag = true;
- dataSize = 108;
+ data_size = 108;
}
}
#endif
- buf = kmalloc(dataSize, GFP_KERNEL);
+ buf = kmalloc(data_size, GFP_KERNEL);
if (!buf) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
}
- pageCode = srb->cmnd[2] & 0x3f;
+ page_code = srb->cmnd[2] & 0x3f;
- if ((pageCode == 0x3F) || (pageCode == 0x1C) ||
- (pageCode == 0x00) ||
- (pro_formatter_flag && (pageCode == 0x20))) {
+ if ((page_code == 0x3F) || (page_code == 0x1C) ||
+ (page_code == 0x00) ||
+ (pro_formatter_flag && (page_code == 0x20))) {
if (srb->cmnd[0] == MODE_SENSE) {
- if ((pageCode == 0x3F) || (pageCode == 0x20)) {
+ if ((page_code == 0x3F) || (page_code == 0x20)) {
ms_mode_sense(chip, srb->cmnd[0],
- lun, buf, dataSize);
+ lun, buf, data_size);
} else {
- dataSize = 4;
+ data_size = 4;
buf[0] = 0x03;
buf[1] = 0x00;
if (check_card_wp(chip, lun))
@@ -815,11 +815,11 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf[3] = 0x00;
}
} else {
- if ((pageCode == 0x3F) || (pageCode == 0x20)) {
+ if ((page_code == 0x3F) || (page_code == 0x20)) {
ms_mode_sense(chip, srb->cmnd[0],
- lun, buf, dataSize);
+ lun, buf, data_size);
} else {
- dataSize = 8;
+ data_size = 8;
buf[0] = 0x00;
buf[1] = 0x06;
buf[2] = 0x00;
@@ -842,7 +842,7 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (status == TRANSPORT_GOOD) {
unsigned int len = min_t(unsigned int, scsi_bufflen(srb),
- dataSize);
+ data_size);
rtsx_stor_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
}
@@ -854,7 +854,7 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
#ifdef SUPPORT_SD_LOCK
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
#endif
unsigned int lun = SCSI_LUN(srb);
int retval;
@@ -896,7 +896,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (sd_card->sd_lock_status & SD_LOCKED) {
dev_dbg(rtsx_dev(chip), "SD card locked!\n");
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ SENSE_TYPE_MEDIA_READ_FORBIDDEN);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -932,7 +932,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
* need to judge start_sec at first
*/
if ((start_sec > get_card_size(chip, lun)) ||
- ((start_sec + sec_cnt) > get_card_size(chip, lun))) {
+ ((start_sec + sec_cnt) > get_card_size(chip, lun))) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LBA_OVER_RANGE);
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -947,7 +947,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
dev_dbg(rtsx_dev(chip), "read/write fail three times in succession\n");
if (srb->sc_data_direction == DMA_FROM_DEVICE)
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
else
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
@@ -959,7 +959,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (check_card_wp(chip, lun)) {
dev_dbg(rtsx_dev(chip), "Write protected card!\n");
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_PROTECT);
+ SENSE_TYPE_MEDIA_WRITE_PROTECT);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -973,15 +973,16 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
chip->rw_fail_cnt[lun]++;
if (srb->sc_data_direction == DMA_FROM_DEVICE)
- set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ set_sense_type
+ (chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
else
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
}
retval = TRANSPORT_FAILED;
rtsx_trace(chip);
- goto Exit;
+ goto exit;
} else {
chip->rw_fail_cnt[lun] = 0;
retval = TRANSPORT_GOOD;
@@ -989,7 +990,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
scsi_set_resid(srb, 0);
-Exit:
+exit:
return retval;
}
@@ -1025,8 +1026,8 @@ static int read_format_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
/* Capacity List Length */
if ((buf_len > 12) && chip->mspro_formatter_enable &&
- (chip->lun2card[lun] & MS_CARD) &&
- (!card || (card == MS_CARD))) {
+ (chip->lun2card[lun] & MS_CARD) &&
+ (!card || (card == MS_CARD))) {
buf[i++] = 0x10;
desc_cnt = 2;
} else {
@@ -1143,7 +1144,7 @@ static int read_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1153,7 +1154,7 @@ static int read_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1195,7 +1196,7 @@ static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = spi_erase_eeprom_chip(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1216,7 +1217,7 @@ static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1247,7 +1248,7 @@ static int read_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (addr < 0xFC00) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1271,7 +1272,7 @@ static int read_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1305,7 +1306,7 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (addr < 0xFC00) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1333,7 +1334,7 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1346,7 +1347,7 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
static int get_sd_csd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
if (!check_card_ready(chip, lun)) {
@@ -1399,7 +1400,7 @@ static int trace_msg_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if ((scsi_bufflen(srb) < buf_len) || !scsi_sglist(srb)) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1522,9 +1523,9 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (srb->cmnd[3] == 1) {
/* Variable Clock */
- struct xd_info *xd_card = &(chip->xd_card);
- struct sd_info *sd_card = &(chip->sd_card);
- struct ms_info *ms_card = &(chip->ms_card);
+ struct xd_info *xd_card = &chip->xd_card;
+ struct sd_info *sd_card = &chip->sd_card;
+ struct ms_info *ms_card = &chip->ms_card;
switch (srb->cmnd[4]) {
case XD_CARD:
@@ -1541,7 +1542,7 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
default:
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1556,7 +1557,7 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_disable_aspm(chip);
if (chip->ss_en &&
- (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
@@ -1565,7 +1566,7 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1586,9 +1587,9 @@ static int get_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
unsigned int lun = SCSI_LUN(srb);
if (srb->cmnd[3] == 1) {
- struct xd_info *xd_card = &(chip->xd_card);
- struct sd_info *sd_card = &(chip->sd_card);
- struct ms_info *ms_card = &(chip->ms_card);
+ struct xd_info *xd_card = &chip->xd_card;
+ struct sd_info *sd_card = &chip->sd_card;
+ struct ms_info *ms_card = &chip->ms_card;
u8 tmp;
switch (srb->cmnd[4]) {
@@ -1606,7 +1607,7 @@ static int get_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
default:
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1648,14 +1649,15 @@ static int dma_access_ring_buffer(struct scsi_cmnd *srb, struct rtsx_chip *chip)
dev_dbg(rtsx_dev(chip), "Write to device\n");
retval = rtsx_transfer_data(chip, 0, scsi_sglist(srb), len,
- scsi_sg_count(srb), srb->sc_data_direction, 1000);
+ scsi_sg_count(srb), srb->sc_data_direction,
+ 1000);
if (retval < 0) {
if (srb->sc_data_direction == DMA_FROM_DEVICE)
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
else
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -1667,8 +1669,8 @@ static int dma_access_ring_buffer(struct scsi_cmnd *srb, struct rtsx_chip *chip)
static int get_dev_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
- struct ms_info *ms_card = &(chip->ms_card);
+ struct sd_info *sd_card = &chip->sd_card;
+ struct ms_info *ms_card = &chip->ms_card;
int buf_len;
unsigned int lun = SCSI_LUN(srb);
u8 card = get_lun_card(chip, lun);
@@ -1699,8 +1701,8 @@ static int get_dev_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#ifdef SUPPORT_OCP
status[8] = 0;
- if (CHECK_LUN_MODE(chip,
- SD_MS_2LUN) && (chip->lun2card[lun] == MS_CARD)) {
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN) &&
+ (chip->lun2card[lun] == MS_CARD)) {
oc_now_mask = MS_OC_NOW;
oc_ever_mask = MS_OC_EVER;
} else {
@@ -1804,7 +1806,7 @@ static int set_chip_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!CHECK_PID(chip, 0x5208)) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1884,7 +1886,7 @@ static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip)
cmd_type = srb->cmnd[4];
if (cmd_type > 2) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1903,7 +1905,7 @@ static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip)
value = *(rtsx_get_cmd_data(chip) + idx);
if (scsi_bufflen(srb) < 1) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1971,7 +1973,7 @@ static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1980,8 +1982,9 @@ static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_read_phy_register(chip, addr + i, &val);
if (retval != STATUS_SUCCESS) {
vfree(buf);
- set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ set_sense_type
+ (chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2039,7 +2042,7 @@ static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2050,7 +2053,7 @@ static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2090,7 +2093,7 @@ static int erase_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = spi_erase_eeprom_chip(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2098,13 +2101,13 @@ static int erase_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = spi_erase_eeprom_byte(chip, addr);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
} else {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2139,7 +2142,7 @@ static int read_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2149,7 +2152,7 @@ static int read_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2204,7 +2207,7 @@ static int write_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2242,7 +2245,7 @@ static int read_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2252,7 +2255,7 @@ static int read_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2311,7 +2314,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
retval = rtsx_write_register(chip, PWR_GATE_CTRL,
- LDO3318_PWR_MASK, LDO_OFF);
+ LDO3318_PWR_MASK, LDO_OFF);
if (retval != STATUS_SUCCESS) {
vfree(buf);
rtsx_trace(chip);
@@ -2321,7 +2324,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
wait_timeout(600);
retval = rtsx_write_phy_register(chip, 0x08,
- 0x4C00 | chip->phy_voltage);
+ 0x4C00 | chip->phy_voltage);
if (retval != STATUS_SUCCESS) {
vfree(buf);
rtsx_trace(chip);
@@ -2329,7 +2332,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
retval = rtsx_write_register(chip, PWR_GATE_CTRL,
- LDO3318_PWR_MASK, LDO_ON);
+ LDO3318_PWR_MASK, LDO_ON);
if (retval != STATUS_SUCCESS) {
vfree(buf);
rtsx_trace(chip);
@@ -2352,14 +2355,14 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_write_efuse(chip, addr + i, buf[i]);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
result = TRANSPORT_FAILED;
rtsx_trace(chip);
- goto Exit;
+ goto exit;
}
}
-Exit:
+exit:
vfree(buf);
retval = card_power_off(chip, SPI_CARD);
@@ -2370,7 +2373,7 @@ Exit:
if (chip->asic_code) {
retval = rtsx_write_register(chip, PWR_GATE_CTRL,
- LDO3318_PWR_MASK, LDO_OFF);
+ LDO3318_PWR_MASK, LDO_OFF);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
@@ -2385,7 +2388,7 @@ Exit:
}
retval = rtsx_write_register(chip, PWR_GATE_CTRL,
- LDO3318_PWR_MASK, LDO_ON);
+ LDO3318_PWR_MASK, LDO_ON);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
@@ -2425,7 +2428,7 @@ static int read_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (func > func_max) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2439,7 +2442,7 @@ static int read_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_read_cfg_seq(chip, func, addr, buf, len);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
vfree(buf);
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -2484,7 +2487,7 @@ static int write_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (func > func_max) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2593,7 +2596,7 @@ static int app_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
default:
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2670,7 +2673,7 @@ static int read_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (get_lun_card(chip, lun) == XD_CARD) {
rtsx_status[13] = 0x40;
} else if (get_lun_card(chip, lun) == SD_CARD) {
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
rtsx_status[13] = 0x20;
if (CHK_SD(sd_card)) {
@@ -2686,7 +2689,7 @@ static int read_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_status[13] |= 0x04;
}
} else if (get_lun_card(chip, lun) == MS_CARD) {
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
if (CHK_MSPRO(ms_card)) {
rtsx_status[13] = 0x38;
@@ -2881,7 +2884,7 @@ static int vendor_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
default:
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2895,14 +2898,15 @@ void led_shine(struct scsi_cmnd *srb, struct rtsx_chip *chip)
unsigned int lun = SCSI_LUN(srb);
u16 sec_cnt;
- if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10))
+ if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) {
sec_cnt = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
- else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
+ } else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
sec_cnt = srb->cmnd[4];
if (sec_cnt == 0)
sec_cnt = 256;
- } else
+ } else {
return;
+ }
if (chip->rw_cap[lun] >= GPIO_TOGGLE_THRESHOLD) {
toggle_gpio(chip, LED_GPIO);
@@ -2915,7 +2919,7 @@ void led_shine(struct scsi_cmnd *srb, struct rtsx_chip *chip)
static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
bool quick_format;
int retval;
@@ -2927,7 +2931,7 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
if ((srb->cmnd[3] != 0x4D) || (srb->cmnd[4] != 0x47) ||
- (srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D) ||
+ (srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D) ||
(srb->cmnd[7] != 0x74)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
@@ -2941,7 +2945,7 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
wait_timeout(100);
if (!check_card_ready(chip, lun) ||
- (get_card_size(chip, lun) == 0)) {
+ (get_card_size(chip, lun) == 0)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -2986,7 +2990,7 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#ifdef SUPPORT_PCGL_1P18
static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
u8 dev_info_id, data_len;
u8 *buf;
@@ -3005,8 +3009,8 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
if ((srb->cmnd[2] != 0xB0) || (srb->cmnd[4] != 0x4D) ||
- (srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) ||
- (srb->cmnd[7] != 0x44)) {
+ (srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) ||
+ (srb->cmnd[7] != 0x44)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -3014,17 +3018,20 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
dev_info_id = srb->cmnd[3];
if ((CHK_MSXC(ms_card) && (dev_info_id == 0x10)) ||
- (!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) ||
- !CHK_MSPRO(ms_card)) {
+ (!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) ||
+ !CHK_MSPRO(ms_card)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
- if (dev_info_id == 0x15)
- buf_len = data_len = 0x3A;
- else
- buf_len = data_len = 0x6A;
+ if (dev_info_id == 0x15) {
+ buf_len = 0x3A;
+ data_len = 0x3A;
+ } else {
+ buf_len = 0x6A;
+ data_len = 0x6A;
+ }
buf = kmalloc(buf_len, GFP_KERNEL);
if (!buf) {
@@ -3100,7 +3107,7 @@ static int ms_sp_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
#ifdef SUPPORT_CPRM
-static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+static int sd_extension_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
int result;
@@ -3164,7 +3171,7 @@ static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#ifdef SUPPORT_MAGIC_GATE
static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
int retval;
u8 key_format;
@@ -3208,8 +3215,8 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
switch (key_format) {
case KF_GET_LOC_EKB:
if ((scsi_bufflen(srb) == 0x41C) &&
- (srb->cmnd[8] == 0x04) &&
- (srb->cmnd[9] == 0x1C)) {
+ (srb->cmnd[8] == 0x04) &&
+ (srb->cmnd[9] == 0x1C)) {
retval = mg_get_local_EKB(srb, chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
@@ -3218,7 +3225,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3226,8 +3233,8 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case KF_RSP_CHG:
if ((scsi_bufflen(srb) == 0x24) &&
- (srb->cmnd[8] == 0x00) &&
- (srb->cmnd[9] == 0x24)) {
+ (srb->cmnd[8] == 0x00) &&
+ (srb->cmnd[9] == 0x24)) {
retval = mg_get_rsp_chg(srb, chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
@@ -3236,7 +3243,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3245,12 +3252,12 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case KF_GET_ICV:
ms_card->mg_entry_num = srb->cmnd[5];
if ((scsi_bufflen(srb) == 0x404) &&
- (srb->cmnd[8] == 0x04) &&
- (srb->cmnd[9] == 0x04) &&
- (srb->cmnd[2] == 0x00) &&
- (srb->cmnd[3] == 0x00) &&
- (srb->cmnd[4] == 0x00) &&
- (srb->cmnd[5] < 32)) {
+ (srb->cmnd[8] == 0x04) &&
+ (srb->cmnd[9] == 0x04) &&
+ (srb->cmnd[2] == 0x00) &&
+ (srb->cmnd[3] == 0x00) &&
+ (srb->cmnd[4] == 0x00) &&
+ (srb->cmnd[5] < 32)) {
retval = mg_get_ICV(srb, chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
@@ -3259,7 +3266,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3277,7 +3284,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
int retval;
u8 key_format;
@@ -3326,8 +3333,8 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
switch (key_format) {
case KF_SET_LEAF_ID:
if ((scsi_bufflen(srb) == 0x0C) &&
- (srb->cmnd[8] == 0x00) &&
- (srb->cmnd[9] == 0x0C)) {
+ (srb->cmnd[8] == 0x00) &&
+ (srb->cmnd[9] == 0x0C)) {
retval = mg_set_leaf_id(srb, chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
@@ -3336,7 +3343,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3344,8 +3351,8 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case KF_CHG_HOST:
if ((scsi_bufflen(srb) == 0x0C) &&
- (srb->cmnd[8] == 0x00) &&
- (srb->cmnd[9] == 0x0C)) {
+ (srb->cmnd[8] == 0x00) &&
+ (srb->cmnd[9] == 0x0C)) {
retval = mg_chg(srb, chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
@@ -3354,7 +3361,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3362,8 +3369,8 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case KF_RSP_HOST:
if ((scsi_bufflen(srb) == 0x0C) &&
- (srb->cmnd[8] == 0x00) &&
- (srb->cmnd[9] == 0x0C)) {
+ (srb->cmnd[8] == 0x00) &&
+ (srb->cmnd[9] == 0x0C)) {
retval = mg_rsp(srb, chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
@@ -3372,7 +3379,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3381,12 +3388,12 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case KF_SET_ICV:
ms_card->mg_entry_num = srb->cmnd[5];
if ((scsi_bufflen(srb) == 0x404) &&
- (srb->cmnd[8] == 0x04) &&
- (srb->cmnd[9] == 0x04) &&
- (srb->cmnd[2] == 0x00) &&
- (srb->cmnd[3] == 0x00) &&
- (srb->cmnd[4] == 0x00) &&
- (srb->cmnd[5] < 32)) {
+ (srb->cmnd[8] == 0x04) &&
+ (srb->cmnd[9] == 0x04) &&
+ (srb->cmnd[2] == 0x00) &&
+ (srb->cmnd[3] == 0x00) &&
+ (srb->cmnd[4] == 0x00) &&
+ (srb->cmnd[5] < 32)) {
retval = mg_set_ICV(srb, chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
@@ -3395,7 +3402,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3415,9 +3422,9 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
#ifdef SUPPORT_SD_LOCK
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
#endif
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
int result;
@@ -3427,9 +3434,9 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
* REQUEST_SENSE and rs_ppstatus
*/
if (!((srb->cmnd[0] == VENDOR_CMND) &&
- (srb->cmnd[1] == SCSI_APP_CMD) &&
- (srb->cmnd[2] == GET_DEV_STATUS)) &&
- (srb->cmnd[0] != REQUEST_SENSE)) {
+ (srb->cmnd[1] == SCSI_APP_CMD) &&
+ (srb->cmnd[2] == GET_DEV_STATUS)) &&
+ (srb->cmnd[0] != REQUEST_SENSE)) {
/* Logical Unit Not Ready Format in Progress */
set_sense_data(chip, lun, CUR_ERR,
0x02, 0, 0x04, 0x04, 0, 0);
@@ -3440,12 +3447,12 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#endif
if ((get_lun_card(chip, lun) == MS_CARD) &&
- (ms_card->format_status == FORMAT_IN_PROGRESS)) {
+ (ms_card->format_status == FORMAT_IN_PROGRESS)) {
if ((srb->cmnd[0] != REQUEST_SENSE) &&
- (srb->cmnd[0] != INQUIRY)) {
+ (srb->cmnd[0] != INQUIRY)) {
/* Logical Unit Not Ready Format in Progress */
set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
- 0, (u16)(ms_card->progress));
+ 0, (u16)(ms_card->progress));
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3510,7 +3517,7 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case SD_EXECUTE_WRITE:
case SD_GET_RSP:
case SD_HW_RST:
- result = sd_extention_cmnd(srb, chip);
+ result = sd_extension_cmnd(srb, chip);
break;
#endif
diff --git a/drivers/staging/rts5208/rtsx_scsi.h b/drivers/staging/rts5208/rtsx_scsi.h
index 03dd76d6c859..30f3724848fe 100644
--- a/drivers/staging/rts5208/rtsx_scsi.h
+++ b/drivers/staging/rts5208/rtsx_scsi.h
@@ -136,8 +136,8 @@
void scsi_show_command(struct rtsx_chip *chip);
void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type);
void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
- u8 sense_key, u32 info, u8 asc, u8 ascq,
- u8 sns_key_info0, u16 sns_key_info1);
+ u8 sense_key, u32 info, u8 asc, u8 ascq,
+ u8 sns_key_info0, u16 sns_key_info1);
int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip);
#endif /* __REALTEK_RTSX_SCSI_H */
diff --git a/drivers/staging/rts5208/rtsx_sys.h b/drivers/staging/rts5208/rtsx_sys.h
index f49bed9ec76a..817700c0d794 100644
--- a/drivers/staging/rts5208/rtsx_sys.h
+++ b/drivers/staging/rts5208/rtsx_sys.h
@@ -32,9 +32,9 @@ static inline void rtsx_exclusive_enter_ss(struct rtsx_chip *chip)
{
struct rtsx_dev *dev = chip->rtsx;
- spin_lock(&(dev->reg_lock));
+ spin_lock(&dev->reg_lock);
rtsx_enter_ss(chip);
- spin_unlock(&(dev->reg_lock));
+ spin_unlock(&dev->reg_lock);
}
static inline void rtsx_reset_detected_cards(struct rtsx_chip *chip, int flag)
diff --git a/drivers/staging/rts5208/rtsx_transport.h b/drivers/staging/rts5208/rtsx_transport.h
index 479137398c3d..99740c33f2fb 100644
--- a/drivers/staging/rts5208/rtsx_transport.h
+++ b/drivers/staging/rts5208/rtsx_transport.h
@@ -30,18 +30,21 @@
#define WAIT_TIME 2000
unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
- unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
- unsigned int *offset, enum xfer_buf_dir dir);
-void rtsx_stor_set_xfer_buf(unsigned char *buffer,
- unsigned int buflen, struct scsi_cmnd *srb);
-void rtsx_stor_get_xfer_buf(unsigned char *buffer,
- unsigned int buflen, struct scsi_cmnd *srb);
+ unsigned int buflen,
+ struct scsi_cmnd *srb,
+ unsigned int *index,
+ unsigned int *offset,
+ enum xfer_buf_dir dir);
+void rtsx_stor_set_xfer_buf(unsigned char *buffer, unsigned int buflen,
+ struct scsi_cmnd *srb);
+void rtsx_stor_get_xfer_buf(unsigned char *buffer, unsigned int buflen,
+ struct scsi_cmnd *srb);
void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip);
#define rtsx_init_cmd(chip) ((chip)->ci = 0)
-void rtsx_add_cmd(struct rtsx_chip *chip,
- u8 cmd_type, u16 reg_addr, u8 mask, u8 data);
+void rtsx_add_cmd(struct rtsx_chip *chip, u8 cmd_type, u16 reg_addr, u8 mask,
+ u8 data);
void rtsx_send_cmd_no_wait(struct rtsx_chip *chip);
int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout);
@@ -55,11 +58,12 @@ static inline u8 *rtsx_get_cmd_data(struct rtsx_chip *chip)
}
int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
- int use_sg, enum dma_data_direction dma_dir, int timeout);
+ int use_sg, enum dma_data_direction dma_dir,
+ int timeout);
-int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
- void *buf, size_t len,
- int use_sg, unsigned int *index, unsigned int *offset,
- enum dma_data_direction dma_dir, int timeout);
+int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card, void *buf,
+ size_t len, int use_sg, unsigned int *index,
+ unsigned int *offset,
+ enum dma_data_direction dma_dir, int timeout);
#endif /* __REALTEK_RTSX_TRANSPORT_H */
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index b0bbb36f8988..bdd35b611f27 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -56,21 +56,21 @@ static u16 REG_SD_DCMPS1_CTL;
static inline void sd_set_err_code(struct rtsx_chip *chip, u8 err_code)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
sd_card->err_code |= err_code;
}
static inline void sd_clr_err_code(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
sd_card->err_code = 0;
}
static inline int sd_check_err_code(struct rtsx_chip *chip, u8 err_code)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
return sd_card->err_code & err_code;
}
@@ -124,9 +124,9 @@ static int sd_check_data0_status(struct rtsx_chip *chip)
}
static int sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
- u32 arg, u8 rsp_type, u8 *rsp, int rsp_len)
+ u32 arg, u8 rsp_type, u8 *rsp, int rsp_len)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
int timeout = 100;
u16 reg_addr;
@@ -153,11 +153,12 @@ RTY_SEND_CMD:
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, PINGPONG_BUFFER);
+ 0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
- 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
+ 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
- SD_TRANSFER_END | SD_STAT_IDLE, SD_TRANSFER_END | SD_STAT_IDLE);
+ SD_TRANSFER_END | SD_STAT_IDLE, SD_TRANSFER_END |
+ SD_STAT_IDLE);
if (rsp_type == SD_RSP_TYPE_R2) {
for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16;
@@ -238,7 +239,7 @@ RTY_SEND_CMD:
if ((rsp_type == SD_RSP_TYPE_R1) || (rsp_type == SD_RSP_TYPE_R1b)) {
if ((cmd_idx != SEND_RELATIVE_ADDR) &&
- (cmd_idx != SEND_IF_COND)) {
+ (cmd_idx != SEND_IF_COND)) {
if (cmd_idx != STOP_TRANSMISSION) {
if (ptr[1] & 0x80) {
rtsx_trace(chip);
@@ -285,7 +286,7 @@ static int sd_read_data(struct rtsx_chip *chip,
u16 blk_cnt, u8 bus_width, u8 *buf, int buf_len,
int timeout)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
int i;
@@ -308,27 +309,27 @@ static int sd_read_data(struct rtsx_chip *chip,
0xFF, cmd[i]);
}
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
- (u8)byte_cnt);
+ (u8)byte_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
- (u8)(byte_cnt >> 8));
+ (u8)(byte_cnt >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
- (u8)blk_cnt);
+ (u8)blk_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
- (u8)(blk_cnt >> 8));
+ (u8)(blk_cnt >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
- SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END|
- SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
+ SD_CHECK_CRC7 | SD_RSP_LEN_6);
if (trans_mode != SD_TM_AUTO_TUNING)
rtsx_add_cmd(chip, WRITE_REG_CMD,
- CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
+ CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
- trans_mode | SD_TRANSFER_START);
+ trans_mode | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
- SD_TRANSFER_END);
+ SD_TRANSFER_END);
retval = rtsx_send_cmd(chip, SD_CARD, timeout);
if (retval < 0) {
@@ -353,10 +354,10 @@ static int sd_read_data(struct rtsx_chip *chip,
}
static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode,
- u8 *cmd, int cmd_len, u16 byte_cnt, u16 blk_cnt, u8 bus_width,
- u8 *buf, int buf_len, int timeout)
+ u8 *cmd, int cmd_len, u16 byte_cnt, u16 blk_cnt,
+ u8 bus_width, u8 *buf, int buf_len, int timeout)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
int i;
@@ -389,30 +390,30 @@ static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode,
}
}
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
- (u8)byte_cnt);
+ (u8)byte_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
- (u8)(byte_cnt >> 8));
+ (u8)(byte_cnt >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
- (u8)blk_cnt);
+ (u8)blk_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
- (u8)(blk_cnt >> 8));
+ (u8)(blk_cnt >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
- SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
- SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
+ SD_CHECK_CRC7 | SD_RSP_LEN_6);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
- trans_mode | SD_TRANSFER_START);
+ trans_mode | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
- SD_TRANSFER_END);
+ SD_TRANSFER_END);
retval = rtsx_send_cmd(chip, SD_CARD, timeout);
if (retval < 0) {
if (retval == -ETIMEDOUT) {
- sd_send_cmd_get_rsp(chip, SEND_STATUS,
- sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0);
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
}
rtsx_trace(chip);
@@ -424,7 +425,7 @@ static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode,
static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
int i;
u8 csd_ver, trans_speed;
@@ -438,7 +439,7 @@ static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
}
retval = sd_send_cmd_get_rsp(chip, SEND_CSD, sd_card->sd_addr,
- SD_RSP_TYPE_R2, rsp, 16);
+ SD_RSP_TYPE_R2, rsp, 16);
if (retval == STATUS_SUCCESS)
break;
}
@@ -534,7 +535,7 @@ static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
static int sd_set_sample_push_timing(struct rtsx_chip *chip)
{
int retval;
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
u8 val = 0;
if ((chip->sd_ctl & SD_PUSH_POINT_CTL_MASK) == SD_PUSH_POINT_DELAY)
@@ -573,7 +574,7 @@ static int sd_set_sample_push_timing(struct rtsx_chip *chip)
static void sd_choose_proper_clock(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
if (CHK_SD_SDR104(sd_card)) {
if (chip->asic_code)
@@ -637,7 +638,7 @@ static int sd_set_clock_divider(struct rtsx_chip *chip, u8 clk_div)
static int sd_set_init_para(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
retval = sd_set_sample_push_timing(chip);
@@ -659,7 +660,7 @@ static int sd_set_init_para(struct rtsx_chip *chip)
int sd_select_card(struct rtsx_chip *chip, int select)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd_idx, cmd_type;
u32 addr;
@@ -686,12 +687,12 @@ int sd_select_card(struct rtsx_chip *chip, int select)
#ifdef SUPPORT_SD_LOCK
static int sd_update_lock_status(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 rsp[5];
retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
- SD_RSP_TYPE_R1, rsp, 5);
+ SD_RSP_TYPE_R1, rsp, 5);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -715,23 +716,23 @@ static int sd_update_lock_status(struct rtsx_chip *chip)
#endif
static int sd_wait_state_data_ready(struct rtsx_chip *chip, u8 state,
- u8 data_ready, int polling_cnt)
+ u8 data_ready, int polling_cnt)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval, i;
u8 rsp[5];
for (i = 0; i < polling_cnt; i++) {
retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
- sd_card->sd_addr, SD_RSP_TYPE_R1, rsp,
- 5);
+ sd_card->sd_addr, SD_RSP_TYPE_R1,
+ rsp, 5);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
}
if (((rsp[3] & 0x1E) == state) &&
- ((rsp[3] & 0x01) == data_ready))
+ ((rsp[3] & 0x01) == data_ready))
return STATUS_SUCCESS;
}
@@ -746,8 +747,8 @@ static int sd_change_bank_voltage(struct rtsx_chip *chip, u8 voltage)
if (voltage == SD_IO_3V3) {
if (chip->asic_code) {
retval = rtsx_write_phy_register(chip, 0x08,
- 0x4FC0 |
- chip->phy_voltage);
+ 0x4FC0 |
+ chip->phy_voltage);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -763,8 +764,8 @@ static int sd_change_bank_voltage(struct rtsx_chip *chip, u8 voltage)
} else if (voltage == SD_IO_1V8) {
if (chip->asic_code) {
retval = rtsx_write_phy_register(chip, 0x08,
- 0x4C40 |
- chip->phy_voltage);
+ 0x4C40 |
+ chip->phy_voltage);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -800,7 +801,7 @@ static int sd_voltage_switch(struct rtsx_chip *chip)
}
retval = sd_send_cmd_get_rsp(chip, VOLTAGE_SWITCH, 0, SD_RSP_TYPE_R1,
- NULL, 0);
+ NULL, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -851,8 +852,8 @@ static int sd_voltage_switch(struct rtsx_chip *chip)
(SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
SD_DAT1_STATUS | SD_DAT0_STATUS)) {
dev_dbg(rtsx_dev(chip), "SD_BUS_STAT: 0x%x\n", stat);
- rtsx_write_register(chip, SD_BUS_STAT,
- SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
+ rtsx_write_register(chip, SD_BUS_STAT, SD_CLK_TOGGLE_EN |
+ SD_CLK_FORCE_STOP, 0);
rtsx_write_register(chip, CARD_CLK_EN, 0xFF, 0);
rtsx_trace(chip);
return STATUS_FAIL;
@@ -903,7 +904,7 @@ static int sd_reset_dcm(struct rtsx_chip *chip, u8 tune_dir)
static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
u16 SD_VP_CTL, SD_DCMPS_CTL;
u8 val;
int retval;
@@ -968,7 +969,9 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
}
udelay(50);
retval = rtsx_write_register(chip, SD_VP_CTL, 0xFF,
- PHASE_CHANGE | PHASE_NOT_RESET | sample_point);
+ PHASE_CHANGE |
+ PHASE_NOT_RESET |
+ sample_point);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -982,7 +985,8 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
}
udelay(50);
retval = rtsx_write_register(chip, SD_VP_CTL, 0xFF,
- PHASE_NOT_RESET | sample_point);
+ PHASE_NOT_RESET |
+ sample_point);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -992,24 +996,24 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, SD_DCMPS_CTL, DCMPS_CHANGE,
- DCMPS_CHANGE);
+ DCMPS_CHANGE);
rtsx_add_cmd(chip, CHECK_REG_CMD, SD_DCMPS_CTL,
- DCMPS_CHANGE_DONE, DCMPS_CHANGE_DONE);
+ DCMPS_CHANGE_DONE, DCMPS_CHANGE_DONE);
retval = rtsx_send_cmd(chip, SD_CARD, 100);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto Fail;
+ goto fail;
}
val = *rtsx_get_cmd_data(chip);
if (val & DCMPS_ERROR) {
rtsx_trace(chip);
- goto Fail;
+ goto fail;
}
if ((val & DCMPS_CURRENT_PHASE) != sample_point) {
rtsx_trace(chip);
- goto Fail;
+ goto fail;
}
retval = rtsx_write_register(chip, SD_DCMPS_CTL,
@@ -1045,7 +1049,7 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
return STATUS_SUCCESS;
-Fail:
+fail:
rtsx_read_register(chip, SD_VP_CTL, &val);
dev_dbg(rtsx_dev(chip), "SD_VP_CTL: 0x%x\n", val);
rtsx_read_register(chip, SD_DCMPS_CTL, &val);
@@ -1060,12 +1064,12 @@ Fail:
static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd[5], buf[8];
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1078,7 +1082,7 @@ static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width)
cmd[4] = 0;
retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 8, 1, bus_width,
- buf, 8, 250);
+ buf, 8, 250);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
rtsx_trace(chip);
@@ -1096,7 +1100,7 @@ static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width)
}
static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
- u8 func_to_switch, u8 *buf, int buf_len)
+ u8 func_to_switch, u8 *buf, int buf_len)
{
u8 support_mask = 0, query_switch = 0, switch_busy = 0;
int support_offset = 0, query_switch_offset = 0, check_busy_offset = 0;
@@ -1198,7 +1202,7 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
if (func_group == SD_FUNC_GROUP_1) {
if (!(buf[support_offset] & support_mask) ||
- ((buf[query_switch_offset] & 0x0F) != query_switch)) {
+ ((buf[query_switch_offset] & 0x0F) != query_switch)) {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1206,7 +1210,7 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
/* Check 'Busy Status' */
if ((buf[DATA_STRUCTURE_VER_OFFSET] == 0x01) &&
- ((buf[check_busy_offset] & switch_busy) == switch_busy)) {
+ ((buf[check_busy_offset] & switch_busy) == switch_busy)) {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1214,10 +1218,10 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
return STATUS_SUCCESS;
}
-static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode,
- u8 func_group, u8 func_to_switch, u8 bus_width)
+static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode, u8 func_group,
+ u8 func_to_switch, u8 bus_width)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd[5], buf[64];
@@ -1247,7 +1251,7 @@ static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode,
}
retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1, bus_width,
- buf, 64, 250);
+ buf, 64, 250);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
rtsx_trace(chip);
@@ -1326,7 +1330,7 @@ static u8 downgrade_switch_mode(u8 func_group, u8 func_to_switch)
}
static int sd_check_switch(struct rtsx_chip *chip,
- u8 func_group, u8 func_to_switch, u8 bus_width)
+ u8 func_group, u8 func_to_switch, u8 bus_width)
{
int retval;
int i;
@@ -1340,12 +1344,14 @@ static int sd_check_switch(struct rtsx_chip *chip,
}
retval = sd_check_switch_mode(chip, SD_CHECK_MODE, func_group,
- func_to_switch, bus_width);
+ func_to_switch, bus_width);
if (retval == STATUS_SUCCESS) {
u8 stat;
retval = sd_check_switch_mode(chip, SD_SWITCH_MODE,
- func_group, func_to_switch, bus_width);
+ func_group,
+ func_to_switch,
+ bus_width);
if (retval == STATUS_SUCCESS) {
switch_good = true;
break;
@@ -1364,7 +1370,7 @@ static int sd_check_switch(struct rtsx_chip *chip,
}
func_to_switch = downgrade_switch_mode(func_group,
- func_to_switch);
+ func_to_switch);
wait_timeout(20);
}
@@ -1379,14 +1385,14 @@ static int sd_check_switch(struct rtsx_chip *chip,
static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
int i;
u8 func_to_switch = 0;
/* Get supported functions */
- retval = sd_check_switch_mode(chip, SD_CHECK_MODE,
- NO_ARGUMENT, NO_ARGUMENT, bus_width);
+ retval = sd_check_switch_mode(chip, SD_CHECK_MODE, NO_ARGUMENT,
+ NO_ARGUMENT, bus_width);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1396,24 +1402,24 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
/* Function Group 1: Access Mode */
for (i = 0; i < 4; i++) {
- switch ((u8)(chip->sd_speed_prior >> (i*8))) {
+ switch ((u8)(chip->sd_speed_prior >> (i * 8))) {
case SDR104_SUPPORT:
- if ((sd_card->func_group1_mask & SDR104_SUPPORT_MASK)
- && chip->sdr104_en) {
+ if ((sd_card->func_group1_mask & SDR104_SUPPORT_MASK) &&
+ chip->sdr104_en) {
func_to_switch = SDR104_SUPPORT;
}
break;
case DDR50_SUPPORT:
- if ((sd_card->func_group1_mask & DDR50_SUPPORT_MASK)
- && chip->ddr50_en) {
+ if ((sd_card->func_group1_mask & DDR50_SUPPORT_MASK) &&
+ chip->ddr50_en) {
func_to_switch = DDR50_SUPPORT;
}
break;
case SDR50_SUPPORT:
- if ((sd_card->func_group1_mask & SDR50_SUPPORT_MASK)
- && chip->sdr50_en) {
+ if ((sd_card->func_group1_mask & SDR50_SUPPORT_MASK) &&
+ chip->sdr50_en) {
func_to_switch = SDR50_SUPPORT;
}
break;
@@ -1430,7 +1436,6 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
if (func_to_switch)
break;
-
}
dev_dbg(rtsx_dev(chip), "SD_FUNC_GROUP_1: func_to_switch = 0x%02x",
func_to_switch);
@@ -1446,7 +1451,7 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
if (func_to_switch) {
retval = sd_check_switch(chip, SD_FUNC_GROUP_1, func_to_switch,
- bus_width);
+ bus_width);
if (retval != STATUS_SUCCESS) {
if (func_to_switch == SDR104_SUPPORT) {
sd_card->sd_switch_fail = SDR104_SUPPORT_MASK;
@@ -1496,7 +1501,7 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
func_to_switch = 0xFF;
for (i = 0; i < 4; i++) {
- switch ((u8)(chip->sd_current_prior >> (i*8))) {
+ switch ((u8)(chip->sd_current_prior >> (i * 8))) {
case CURRENT_LIMIT_800:
if (sd_card->func_group4_mask & CURRENT_LIMIT_800_MASK)
func_to_switch = CURRENT_LIMIT_800;
@@ -1534,7 +1539,7 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
if (func_to_switch <= CURRENT_LIMIT_800) {
retval = sd_check_switch(chip, SD_FUNC_GROUP_4, func_to_switch,
- bus_width);
+ bus_width);
if (retval != STATUS_SUCCESS) {
if (sd_check_err_code(chip, SD_NO_CARD)) {
rtsx_trace(chip);
@@ -1596,8 +1601,8 @@ static int sd_sdr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
cmd[3] = 0;
cmd[4] = 0;
- retval = sd_read_data(chip, SD_TM_AUTO_TUNING,
- cmd, 5, 0x40, 1, SD_BUS_WIDTH_4, NULL, 0, 100);
+ retval = sd_read_data(chip, SD_TM_AUTO_TUNING, cmd, 5, 0x40, 1,
+ SD_BUS_WIDTH_4, NULL, 0, 100);
if (retval != STATUS_SUCCESS) {
(void)sd_wait_data_idle(chip);
@@ -1611,7 +1616,7 @@ static int sd_sdr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd[5];
@@ -1624,7 +1629,7 @@ static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
dev_dbg(rtsx_dev(chip), "sd ddr tuning rx\n");
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1636,8 +1641,8 @@ static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
cmd[3] = 0;
cmd[4] = 0;
- retval = sd_read_data(chip, SD_TM_NORMAL_READ,
- cmd, 5, 64, 1, SD_BUS_WIDTH_4, NULL, 0, 100);
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1,
+ SD_BUS_WIDTH_4, NULL, 0, 100);
if (retval != STATUS_SUCCESS) {
(void)sd_wait_data_idle(chip);
@@ -1651,7 +1656,7 @@ static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd[5], bus_width;
@@ -1676,8 +1681,8 @@ static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
cmd[3] = 0;
cmd[4] = 0;
- retval = sd_read_data(chip, SD_TM_NORMAL_READ,
- cmd, 5, 0x200, 1, bus_width, NULL, 0, 100);
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 0x200, 1,
+ bus_width, NULL, 0, 100);
if (retval != STATUS_SUCCESS) {
(void)sd_wait_data_idle(chip);
@@ -1691,7 +1696,7 @@ static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
retval = sd_change_phase(chip, sample_point, TUNE_TX);
@@ -1708,11 +1713,11 @@ static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
}
retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS) {
if (sd_check_err_code(chip, SD_RSP_TIMEOUT)) {
rtsx_write_register(chip, SD_CFG3,
- SD_RSP_80CLK_TIMEOUT_EN, 0);
+ SD_RSP_80CLK_TIMEOUT_EN, 0);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1730,7 +1735,7 @@ static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd[5], bus_width;
@@ -1770,8 +1775,8 @@ static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
cmd[3] = 0;
cmd[4] = 0;
- retval = sd_write_data(chip, SD_TM_AUTO_WRITE_2,
- cmd, 5, 16, 1, bus_width, sd_card->raw_csd, 16, 100);
+ retval = sd_write_data(chip, SD_TM_AUTO_WRITE_2, cmd, 5, 16, 1,
+ bus_width, sd_card->raw_csd, 16, 100);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
@@ -1787,7 +1792,7 @@ static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
}
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1,
- NULL, 0);
+ NULL, 0);
return STATUS_SUCCESS;
}
@@ -1795,7 +1800,7 @@ static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
u8 tune_dir)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
struct timing_phase_path path[MAX_PHASE + 1];
int i, j, cont_path_cnt;
bool new_block;
@@ -1808,7 +1813,7 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
else
final_phase = (u8)chip->sd_default_tx_phase;
- goto Search_Finish;
+ goto search_finish;
}
cont_path_cnt = 0;
@@ -1839,7 +1844,7 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
if (cont_path_cnt == 0) {
dev_dbg(rtsx_dev(chip), "No continuous phase path\n");
- goto Search_Finish;
+ goto search_finish;
} else {
int idx = cont_path_cnt - 1;
@@ -1848,7 +1853,7 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
}
if ((path[0].start == 0) &&
- (path[cont_path_cnt - 1].end == MAX_PHASE)) {
+ (path[cont_path_cnt - 1].end == MAX_PHASE)) {
path[0].start = path[cont_path_cnt - 1].start - MAX_PHASE - 1;
path[0].len += path[cont_path_cnt - 1].len;
path[0].mid = path[0].start + path[0].len / 2;
@@ -1906,14 +1911,14 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
}
}
-Search_Finish:
+search_finish:
dev_dbg(rtsx_dev(chip), "Final chosen phase: %d\n", final_phase);
return final_phase;
}
static int sd_tuning_rx(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
int i, j;
u32 raw_phase_map[3], phase_map;
@@ -1974,7 +1979,7 @@ static int sd_tuning_rx(struct rtsx_chip *chip)
static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
int i;
u32 phase_map;
@@ -1992,7 +1997,7 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
rtsx_write_register(chip, SD_CFG3,
- SD_RSP_80CLK_TIMEOUT_EN, 0);
+ SD_RSP_80CLK_TIMEOUT_EN, 0);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2002,10 +2007,10 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
continue;
retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
- sd_card->sd_addr, SD_RSP_TYPE_R1, NULL,
- 0);
+ sd_card->sd_addr, SD_RSP_TYPE_R1,
+ NULL, 0);
if ((retval == STATUS_SUCCESS) ||
- !sd_check_err_code(chip, SD_RSP_TIMEOUT))
+ !sd_check_err_code(chip, SD_RSP_TIMEOUT))
phase_map |= 1 << i;
}
@@ -2039,7 +2044,7 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
static int sd_tuning_tx(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
int i, j;
u32 raw_phase_map[3], phase_map;
@@ -2131,7 +2136,7 @@ static int sd_ddr_tuning(struct rtsx_chip *chip)
}
} else {
retval = sd_change_phase(chip, (u8)chip->sd_ddr_tx_phase,
- TUNE_TX);
+ TUNE_TX);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -2167,7 +2172,7 @@ static int mmc_ddr_tuning(struct rtsx_chip *chip)
}
} else {
retval = sd_change_phase(chip, (u8)chip->mmc_ddr_tx_phase,
- TUNE_TX);
+ TUNE_TX);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -2193,7 +2198,7 @@ static int mmc_ddr_tuning(struct rtsx_chip *chip)
int sd_switch_clock(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
int re_tuning = 0;
@@ -2231,7 +2236,7 @@ int sd_switch_clock(struct rtsx_chip *chip)
static int sd_prepare_reset(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
if (chip->asic_code)
@@ -2286,31 +2291,36 @@ static int sd_pull_ctl_disable(struct rtsx_chip *chip)
if (CHECK_PID(chip, 0x5208)) {
retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF,
- XD_D3_PD | SD_D7_PD | SD_CLK_PD | SD_D5_PD);
+ XD_D3_PD | SD_D7_PD | SD_CLK_PD |
+ SD_D5_PD);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF,
- SD_D6_PD | SD_D0_PD | SD_D1_PD | XD_D5_PD);
+ SD_D6_PD | SD_D0_PD | SD_D1_PD |
+ XD_D5_PD);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF,
- SD_D4_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ SD_D4_PD | XD_CE_PD | XD_CLE_PD |
+ XD_CD_PU);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF,
- XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
+ XD_RDY_PD | SD_D3_PD | SD_D2_PD |
+ XD_ALE_PD);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF,
- MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ MS_INS_PU | SD_WP_PD | SD_CD_PU |
+ SD_CMD_PD);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -2361,27 +2371,27 @@ int sd_pull_ctl_enable(struct rtsx_chip *chip)
if (CHECK_PID(chip, 0x5208)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
- XD_D3_PD | SD_DAT7_PU | SD_CLK_NP | SD_D5_PU);
+ XD_D3_PD | SD_DAT7_PU | SD_CLK_NP | SD_D5_PU);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
- SD_D6_PU | SD_D0_PU | SD_D1_PU | XD_D5_PD);
+ SD_D6_PU | SD_D0_PU | SD_D1_PU | XD_D5_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
- SD_D4_PU | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ SD_D4_PU | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
- XD_RDY_PD | SD_D3_PU | SD_D2_PU | XD_ALE_PD);
+ XD_RDY_PD | SD_D3_PU | SD_D2_PU | XD_ALE_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
- MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU);
+ MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
- MS_D5_PD | MS_D4_PD);
+ MS_D5_PD | MS_D4_PD);
} else if (CHECK_PID(chip, 0x5288)) {
if (CHECK_BARO_PKG(chip, QFN)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
- 0xA8);
+ 0xA8);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
- 0x5A);
+ 0x5A);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
- 0x95);
+ 0x95);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
- 0xAA);
+ 0xAA);
}
}
@@ -2478,7 +2488,7 @@ static int sd_dummy_clock(struct rtsx_chip *chip)
static int sd_read_lba0(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd[5], bus_width;
@@ -2499,8 +2509,8 @@ static int sd_read_lba0(struct rtsx_chip *chip)
bus_width = SD_BUS_WIDTH_1;
}
- retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd,
- 5, 512, 1, bus_width, NULL, 0, 100);
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 512, 1,
+ bus_width, NULL, 0, 100);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
rtsx_trace(chip);
@@ -2512,14 +2522,14 @@ static int sd_read_lba0(struct rtsx_chip *chip)
static int sd_check_wp_state(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u32 val;
u16 sd_card_type;
u8 cmd[5], buf[64];
- retval = sd_send_cmd_get_rsp(chip, APP_CMD,
- sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0);
+ retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -2532,12 +2542,12 @@ static int sd_check_wp_state(struct rtsx_chip *chip)
cmd[4] = 0;
retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1,
- SD_BUS_WIDTH_4, buf, 64, 250);
+ SD_BUS_WIDTH_4, buf, 64, 250);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2562,7 +2572,7 @@ static int sd_check_wp_state(struct rtsx_chip *chip)
static int reset_sd(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
bool hi_cap_flow = false;
int retval, i = 0, j = 0, k = 0;
bool sd_dont_switch = false;
@@ -2575,7 +2585,7 @@ static int reset_sd(struct rtsx_chip *chip)
SET_SD(sd_card);
-Switch_Fail:
+switch_fail:
i = 0;
j = 0;
@@ -2589,11 +2599,11 @@ Switch_Fail:
retval = sd_prepare_reset(chip);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
retval = sd_dummy_clock(chip);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip) && try_sdio) {
int rty_cnt = 0;
@@ -2601,11 +2611,11 @@ Switch_Fail:
for (; rty_cnt < chip->sdio_retry_cnt; rty_cnt++) {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
- goto Status_Fail;
+ goto status_fail;
}
retval = sd_send_cmd_get_rsp(chip, IO_SEND_OP_COND, 0,
- SD_RSP_TYPE_R4, rsp, 5);
+ SD_RSP_TYPE_R4, rsp, 5);
if (retval == STATUS_SUCCESS) {
int func_num = (rsp[1] >> 4) & 0x07;
@@ -2613,7 +2623,7 @@ Switch_Fail:
dev_dbg(rtsx_dev(chip), "SD_IO card (Function number: %d)!\n",
func_num);
chip->sd_io = 1;
- goto Status_Fail;
+ goto status_fail;
}
break;
@@ -2630,14 +2640,14 @@ Switch_Fail:
/* Start Initialization Process of SD Card */
RTY_SD_RST:
retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0,
- NULL, 0);
+ NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
wait_timeout(20);
retval = sd_send_cmd_get_rsp(chip, SEND_IF_COND, 0x000001AA,
- SD_RSP_TYPE_R7, rsp, 5);
+ SD_RSP_TYPE_R7, rsp, 5);
if (retval == STATUS_SUCCESS) {
if ((rsp[4] == 0xAA) && ((rsp[3] & 0x0f) == 0x01)) {
hi_cap_flow = true;
@@ -2649,37 +2659,37 @@ RTY_SD_RST:
voltage = SUPPORT_VOLTAGE;
retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0,
- SD_RSP_TYPE_R0, NULL, 0);
+ SD_RSP_TYPE_R0, NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
wait_timeout(20);
}
do {
retval = sd_send_cmd_get_rsp(chip, APP_CMD, 0, SD_RSP_TYPE_R1,
- NULL, 0);
+ NULL, 0);
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
- goto Status_Fail;
+ goto status_fail;
}
j++;
if (j < 3)
goto RTY_SD_RST;
else
- goto Status_Fail;
+ goto status_fail;
}
retval = sd_send_cmd_get_rsp(chip, SD_APP_OP_COND, voltage,
- SD_RSP_TYPE_R3, rsp, 5);
+ SD_RSP_TYPE_R3, rsp, 5);
if (retval != STATUS_SUCCESS) {
k++;
if (k < 3)
goto RTY_SD_RST;
else
- goto Status_Fail;
+ goto status_fail;
}
i++;
@@ -2687,7 +2697,7 @@ RTY_SD_RST:
} while (!(rsp[1] & 0x80) && (i < 255));
if (i == 255)
- goto Status_Fail;
+ goto status_fail;
if (hi_cap_flow) {
if (rsp[1] & 0x40)
@@ -2705,19 +2715,19 @@ RTY_SD_RST:
if (support_1v8) {
retval = sd_voltage_switch(chip);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
}
retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2,
- NULL, 0);
+ NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
for (i = 0; i < 3; i++) {
retval = sd_send_cmd_get_rsp(chip, SEND_RELATIVE_ADDR, 0,
- SD_RSP_TYPE_R6, rsp, 5);
+ SD_RSP_TYPE_R6, rsp, 5);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
sd_card->sd_addr = (u32)rsp[1] << 24;
sd_card->sd_addr += (u32)rsp[2] << 16;
@@ -2728,17 +2738,17 @@ RTY_SD_RST:
retval = sd_check_csd(chip, 1);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
retval = sd_select_card(chip, 1);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
#ifdef SUPPORT_SD_LOCK
SD_UNLOCK_ENTRY:
retval = sd_update_lock_status(chip);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
if (sd_card->sd_lock_status & SD_LOCKED) {
sd_card->sd_lock_status |= (SD_LOCK_1BIT_MODE | SD_PWD_EXIST);
@@ -2749,25 +2759,25 @@ SD_UNLOCK_ENTRY:
#endif
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
retval = sd_send_cmd_get_rsp(chip, SET_CLR_CARD_DETECT, 0,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
if (support_1v8) {
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
switch_bus_width = SD_BUS_WIDTH_4;
} else {
@@ -2775,13 +2785,13 @@ SD_UNLOCK_ENTRY:
}
retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1,
- NULL, 0);
+ NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
if (!(sd_card->raw_csd[4] & 0x40))
sd_dont_switch = true;
@@ -2804,7 +2814,7 @@ SD_UNLOCK_ENTRY:
sd_dont_switch = true;
try_sdio = false;
- goto Switch_Fail;
+ goto switch_fail;
}
} else {
if (support_1v8) {
@@ -2812,21 +2822,21 @@ SD_UNLOCK_ENTRY:
sd_dont_switch = true;
try_sdio = false;
- goto Switch_Fail;
+ goto switch_fail;
}
}
}
if (!support_1v8) {
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
}
#ifdef SUPPORT_SD_LOCK
@@ -2845,7 +2855,7 @@ SD_UNLOCK_ENTRY:
retval = sd_set_init_para(chip);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
if (CHK_SD_DDR50(sd_card))
retval = sd_ddr_tuning(chip);
@@ -2854,20 +2864,20 @@ SD_UNLOCK_ENTRY:
if (retval != STATUS_SUCCESS) {
if (sd20_mode) {
- goto Status_Fail;
+ goto status_fail;
} else {
retval = sd_init_power(chip);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
try_sdio = false;
sd20_mode = true;
- goto Switch_Fail;
+ goto switch_fail;
}
}
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (CHK_SD_DDR50(sd_card)) {
retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
@@ -2879,15 +2889,15 @@ SD_UNLOCK_ENTRY:
retval = sd_read_lba0(chip);
if (retval != STATUS_SUCCESS) {
if (sd20_mode) {
- goto Status_Fail;
+ goto status_fail;
} else {
retval = sd_init_power(chip);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
try_sdio = false;
sd20_mode = true;
- goto Switch_Fail;
+ goto switch_fail;
}
}
}
@@ -2895,7 +2905,7 @@ SD_UNLOCK_ENTRY:
retval = sd_check_wp_state(chip);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
chip->card_bus_width[chip->card2lun[SD_CARD]] = 4;
@@ -2918,21 +2928,21 @@ SD_UNLOCK_ENTRY:
return STATUS_SUCCESS;
-Status_Fail:
+status_fail:
rtsx_trace(chip);
return STATUS_FAIL;
}
static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 buf[8] = {0}, bus_width, *ptr;
u16 byte_cnt;
int len;
retval = sd_send_cmd_get_rsp(chip, BUSTEST_W, 0, SD_RSP_TYPE_R1, NULL,
- 0);
+ 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return SWITCH_FAIL;
@@ -2957,8 +2967,8 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
return SWITCH_ERR;
}
- retval = sd_write_data(chip, SD_TM_AUTO_WRITE_3,
- NULL, 0, byte_cnt, 1, bus_width, buf, len, 100);
+ retval = sd_write_data(chip, SD_TM_AUTO_WRITE_3, NULL, 0, byte_cnt, 1,
+ bus_width, buf, len, 100);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0);
@@ -2980,23 +2990,23 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
if (width == MMC_8BIT_BUS)
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L,
- 0xFF, 0x08);
+ 0xFF, 0x08);
else
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L,
- 0xFF, 0x04);
+ 0xFF, 0x04);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0);
- rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
- SD_CALCULATE_CRC7 | SD_NO_CHECK_CRC16 | SD_NO_WAIT_BUSY_END|
- SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, SD_CALCULATE_CRC7 |
+ SD_NO_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
+ SD_CHECK_CRC7 | SD_RSP_LEN_6);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
- PINGPONG_BUFFER);
+ PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
- SD_TM_NORMAL_READ | SD_TRANSFER_START);
+ SD_TM_NORMAL_READ | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
- SD_TRANSFER_END);
+ SD_TRANSFER_END);
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2, 0, 0);
if (width == MMC_8BIT_BUS)
@@ -3024,9 +3034,9 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
arg = 0x03B70200;
retval = sd_send_cmd_get_rsp(chip, SWITCH, arg,
- SD_RSP_TYPE_R1b, rsp, 5);
+ SD_RSP_TYPE_R1b, rsp, 5);
if ((retval == STATUS_SUCCESS) &&
- !(rsp[4] & MMC_SWITCH_ERR))
+ !(rsp[4] & MMC_SWITCH_ERR))
return SWITCH_SUCCESS;
}
} else {
@@ -3041,9 +3051,9 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
arg = 0x03B70100;
retval = sd_send_cmd_get_rsp(chip, SWITCH, arg,
- SD_RSP_TYPE_R1b, rsp, 5);
+ SD_RSP_TYPE_R1b, rsp, 5);
if ((retval == STATUS_SUCCESS) &&
- !(rsp[4] & MMC_SWITCH_ERR))
+ !(rsp[4] & MMC_SWITCH_ERR))
return SWITCH_SUCCESS;
}
}
@@ -3054,7 +3064,7 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 *ptr, card_type, card_type_mask = 0;
@@ -3065,7 +3075,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
- 0x40 | SEND_EXT_CSD);
+ 0x40 | SEND_EXT_CSD);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, 0);
@@ -3077,14 +3087,14 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
- SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END|
- SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
+ SD_CHECK_CRC7 | SD_RSP_LEN_6);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
- PINGPONG_BUFFER);
+ PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
- SD_TM_NORMAL_READ | SD_TRANSFER_START);
+ SD_TM_NORMAL_READ | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
- SD_TRANSFER_END);
+ SD_TRANSFER_END);
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 196, 0xFF, 0);
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 212, 0xFF, 0);
@@ -3097,7 +3107,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
if (retval == -ETIMEDOUT) {
rtsx_clear_sd_error(chip);
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
}
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3106,7 +3116,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
ptr = rtsx_get_cmd_data(chip);
if (ptr[0] & SD_TRANSFER_ERR) {
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3132,8 +3142,8 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
SET_MMC_26M(sd_card);
}
- retval = sd_send_cmd_get_rsp(chip, SWITCH,
- 0x03B90100, SD_RSP_TYPE_R1b, rsp, 5);
+ retval = sd_send_cmd_get_rsp(chip, SWITCH, 0x03B90100,
+ SD_RSP_TYPE_R1b, rsp, 5);
if ((retval != STATUS_SUCCESS) || (rsp[4] & MMC_SWITCH_ERR))
CLR_MMC_HS(sd_card);
}
@@ -3178,7 +3188,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
static int reset_mmc(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval, i = 0, j = 0, k = 0;
bool switch_ddr = true;
u8 rsp[16];
@@ -3190,7 +3200,7 @@ static int reset_mmc(struct rtsx_chip *chip)
goto MMC_UNLOCK_ENTRY;
#endif
-Switch_Fail:
+switch_fail:
retval = sd_prepare_reset(chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
@@ -3201,7 +3211,7 @@ Switch_Fail:
RTY_MMC_RST:
retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0,
- NULL, 0);
+ NULL, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3215,11 +3225,11 @@ RTY_MMC_RST:
}
retval = sd_send_cmd_get_rsp(chip, SEND_OP_COND,
- (SUPPORT_VOLTAGE | 0x40000000),
- SD_RSP_TYPE_R3, rsp, 5);
+ (SUPPORT_VOLTAGE | 0x40000000),
+ SD_RSP_TYPE_R3, rsp, 5);
if (retval != STATUS_SUCCESS) {
if (sd_check_err_code(chip, SD_BUSY) ||
- sd_check_err_code(chip, SD_TO_ERR)) {
+ sd_check_err_code(chip, SD_TO_ERR)) {
k++;
if (k < 20) {
sd_clr_err_code(chip);
@@ -3255,7 +3265,7 @@ RTY_MMC_RST:
CLR_MMC_SECTOR_MODE(sd_card);
retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2,
- NULL, 0);
+ NULL, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3263,7 +3273,7 @@ RTY_MMC_RST:
sd_card->sd_addr = 0x00100000;
retval = sd_send_cmd_get_rsp(chip, SET_RELATIVE_ADDR, sd_card->sd_addr,
- SD_RSP_TYPE_R6, rsp, 5);
+ SD_RSP_TYPE_R6, rsp, 5);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3284,7 +3294,7 @@ RTY_MMC_RST:
}
retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1,
- NULL, 0);
+ NULL, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3319,7 +3329,7 @@ MMC_UNLOCK_ENTRY:
}
sd_card->mmc_dont_switch_bus = 1;
rtsx_trace(chip);
- goto Switch_Fail;
+ goto switch_fail;
}
}
@@ -3345,7 +3355,7 @@ MMC_UNLOCK_ENTRY:
switch_ddr = false;
rtsx_trace(chip);
- goto Switch_Fail;
+ goto switch_fail;
}
retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
@@ -3360,7 +3370,7 @@ MMC_UNLOCK_ENTRY:
switch_ddr = false;
rtsx_trace(chip);
- goto Switch_Fail;
+ goto switch_fail;
}
}
}
@@ -3392,7 +3402,7 @@ MMC_UNLOCK_ENTRY:
int reset_sd_card(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
sd_init_reg_addr(chip);
@@ -3407,7 +3417,7 @@ int reset_sd_card(struct rtsx_chip *chip)
}
if (chip->ignore_sd && CHK_SDIO_EXIST(chip) &&
- !CHK_SDIO_IGNORED(chip)) {
+ !CHK_SDIO_IGNORED(chip)) {
if (chip->asic_code) {
retval = sd_pull_ctl_enable(chip);
if (retval != STATUS_SUCCESS) {
@@ -3416,7 +3426,8 @@ int reset_sd_card(struct rtsx_chip *chip)
}
} else {
retval = rtsx_write_register(chip, FPGA_PULL_CTL,
- FPGA_SD_PULL_CTL_BIT | 0x20, 0);
+ FPGA_SD_PULL_CTL_BIT |
+ 0x20, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3505,7 +3516,7 @@ int reset_sd_card(struct rtsx_chip *chip)
static int reset_mmc_only(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
sd_card->sd_type = 0;
@@ -3574,7 +3585,7 @@ static int reset_mmc_only(struct rtsx_chip *chip)
static int wait_data_buf_ready(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int i, retval;
for (i = 0; i < WAIT_DATA_READY_RTY_CNT; i++) {
@@ -3587,7 +3598,8 @@ static int wait_data_buf_ready(struct rtsx_chip *chip)
sd_card->sd_data_buf_ready = 0;
retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
- sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0);
+ sd_card->sd_addr, SD_RSP_TYPE_R1,
+ NULL, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3607,7 +3619,7 @@ static int wait_data_buf_ready(struct rtsx_chip *chip)
void sd_stop_seq_mode(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
if (sd_card->seq_mode) {
@@ -3616,7 +3628,7 @@ void sd_stop_seq_mode(struct rtsx_chip *chip)
return;
retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
- SD_RSP_TYPE_R1b, NULL, 0);
+ SD_RSP_TYPE_R1b, NULL, 0);
if (retval != STATUS_SUCCESS)
sd_set_err_code(chip, SD_STS_ERR);
@@ -3632,7 +3644,7 @@ void sd_stop_seq_mode(struct rtsx_chip *chip)
static inline int sd_auto_tune_clock(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
if (chip->asic_code) {
@@ -3679,9 +3691,9 @@ static inline int sd_auto_tune_clock(struct rtsx_chip *chip)
}
int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
- u16 sector_cnt)
+ u16 sector_cnt)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
u32 data_addr;
u8 cfg2;
int retval;
@@ -3730,20 +3742,20 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
}
if (sd_card->seq_mode &&
- ((sd_card->pre_dir != srb->sc_data_direction) ||
- ((sd_card->pre_sec_addr + sd_card->pre_sec_cnt) !=
- start_sector))) {
- if ((sd_card->pre_sec_cnt < 0x80)
- && (sd_card->pre_dir == DMA_FROM_DEVICE)
- && !CHK_SD30_SPEED(sd_card)
- && !CHK_SD_HS(sd_card)
- && !CHK_MMC_HS(sd_card)) {
+ ((sd_card->pre_dir != srb->sc_data_direction) ||
+ ((sd_card->pre_sec_addr + sd_card->pre_sec_cnt) !=
+ start_sector))) {
+ if ((sd_card->pre_sec_cnt < 0x80) &&
+ (sd_card->pre_dir == DMA_FROM_DEVICE) &&
+ !CHK_SD30_SPEED(sd_card) &&
+ !CHK_SD_HS(sd_card) &&
+ !CHK_MMC_HS(sd_card)) {
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
}
- retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION,
- 0, SD_RSP_TYPE_R1b, NULL, 0);
+ retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
+ SD_RSP_TYPE_R1b, NULL, 0);
if (retval != STATUS_SUCCESS) {
chip->rw_need_retry = 1;
sd_set_err_code(chip, SD_STS_ERR);
@@ -3760,12 +3772,12 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
goto RW_FAIL;
}
- if ((sd_card->pre_sec_cnt < 0x80)
- && !CHK_SD30_SPEED(sd_card)
- && !CHK_SD_HS(sd_card)
- && !CHK_MMC_HS(sd_card)) {
+ if ((sd_card->pre_sec_cnt < 0x80) &&
+ !CHK_SD30_SPEED(sd_card) &&
+ !CHK_SD_HS(sd_card) &&
+ !CHK_MMC_HS(sd_card)) {
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
}
}
@@ -3774,30 +3786,30 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 0x00);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 0x02);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
- (u8)sector_cnt);
+ (u8)sector_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
- (u8)(sector_cnt >> 8));
+ (u8)(sector_cnt >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
if (CHK_MMC_8BIT(sd_card))
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
- 0x03, SD_BUS_WIDTH_8);
+ 0x03, SD_BUS_WIDTH_8);
else if (CHK_MMC_4BIT(sd_card) || CHK_SD(sd_card))
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
- 0x03, SD_BUS_WIDTH_4);
+ 0x03, SD_BUS_WIDTH_4);
else
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
- 0x03, SD_BUS_WIDTH_1);
+ 0x03, SD_BUS_WIDTH_1);
if (sd_card->seq_mode) {
- cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16|
+ cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 |
SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 |
SD_RSP_LEN_0;
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, cfg2);
trans_dma_enable(srb->sc_data_direction, chip, sector_cnt * 512,
- DMA_512);
+ DMA_512);
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
@@ -3808,7 +3820,7 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
}
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
- SD_TRANSFER_END, SD_TRANSFER_END);
+ SD_TRANSFER_END, SD_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
} else {
@@ -3818,22 +3830,22 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
0x40 | READ_MULTIPLE_BLOCK);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF,
- (u8)(data_addr >> 24));
+ (u8)(data_addr >> 24));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF,
- (u8)(data_addr >> 16));
+ (u8)(data_addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF,
- (u8)(data_addr >> 8));
+ (u8)(data_addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF,
- (u8)data_addr);
+ (u8)data_addr);
cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 |
SD_RSP_LEN_6;
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
- cfg2);
+ cfg2);
trans_dma_enable(srb->sc_data_direction, chip,
- sector_cnt * 512, DMA_512);
+ sector_cnt * 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
SD_TM_AUTO_READ_2 | SD_TRANSFER_START);
@@ -3861,7 +3873,8 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
}
retval = sd_send_cmd_get_rsp(chip, WRITE_MULTIPLE_BLOCK,
- data_addr, SD_RSP_TYPE_R1, NULL, 0);
+ data_addr, SD_RSP_TYPE_R1,
+ NULL, 0);
if (retval != STATUS_SUCCESS) {
chip->rw_need_retry = 1;
rtsx_trace(chip);
@@ -3874,10 +3887,10 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
SD_NO_WAIT_BUSY_END |
SD_NO_CHECK_CRC7 | SD_RSP_LEN_0;
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
- cfg2);
+ cfg2);
trans_dma_enable(srb->sc_data_direction, chip,
- sector_cnt * 512, DMA_512);
+ sector_cnt * 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
@@ -3891,7 +3904,7 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
}
retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
- scsi_bufflen(srb), scsi_sg_count(srb),
+ scsi_bufflen(srb), scsi_sg_count(srb),
srb->sc_data_direction, chip->sd_timeout);
if (retval < 0) {
u8 stat = 0;
@@ -3916,7 +3929,7 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
chip->rw_need_retry = 1;
retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
- SD_RSP_TYPE_R1b, NULL, 0);
+ SD_RSP_TYPE_R1b, NULL, 0);
if (retval != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_STS_ERR);
rtsx_trace(chip);
@@ -3984,8 +3997,9 @@ int soft_reset_sd_card(struct rtsx_chip *chip)
return reset_sd(chip);
}
-int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
- u32 arg, u8 rsp_type, u8 *rsp, int rsp_len, bool special_check)
+int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx, u32 arg,
+ u8 rsp_type, u8 *rsp, int rsp_len,
+ bool special_check)
{
int retval;
int timeout = 100;
@@ -4011,11 +4025,11 @@ RTY_SEND_CMD:
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, PINGPONG_BUFFER);
+ 0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
- 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
+ 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
- SD_TRANSFER_END);
+ SD_TRANSFER_END);
if (rsp_type == SD_RSP_TYPE_R2) {
for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16;
@@ -4084,7 +4098,7 @@ RTY_SEND_CMD:
}
if ((cmd_idx == SELECT_CARD) || (cmd_idx == APP_CMD) ||
- (cmd_idx == SEND_STATUS) || (cmd_idx == STOP_TRANSMISSION)) {
+ (cmd_idx == SEND_STATUS) || (cmd_idx == STOP_TRANSMISSION)) {
if ((cmd_idx != STOP_TRANSMISSION) && !special_check) {
if (ptr[1] & 0x80) {
rtsx_trace(chip);
@@ -4172,7 +4186,7 @@ int ext_sd_get_rsp(struct rtsx_chip *chip, int len, u8 *rsp, u8 rsp_type)
int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
int len;
u8 buf[18] = {
@@ -4206,9 +4220,9 @@ int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) ||
- (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
- (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
- (srb->cmnd[8] != 0x64)) {
+ (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
+ (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
+ (srb->cmnd[8] != 0x64)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -4245,7 +4259,7 @@ int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
static inline int get_rsp_type(struct scsi_cmnd *srb, u8 *rsp_type,
- int *rsp_len)
+ int *rsp_len)
{
if (!rsp_type || !rsp_len)
return STATUS_FAIL;
@@ -4285,7 +4299,7 @@ static inline int get_rsp_type(struct scsi_cmnd *srb, u8 *rsp_type,
int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
int retval, rsp_len;
u8 cmd_idx, rsp_type;
@@ -4339,7 +4353,7 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
if (CHK_MMC_8BIT(sd_card)) {
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
- SD_BUS_WIDTH_8);
+ SD_BUS_WIDTH_8);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -4347,7 +4361,7 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) {
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
- SD_BUS_WIDTH_4);
+ SD_BUS_WIDTH_4);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -4366,32 +4380,33 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = sd_select_card(chip, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Cmd_Failed;
+ goto sd_execute_cmd_failed;
}
}
if (acmd) {
retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
- sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0, false);
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0,
+ false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Cmd_Failed;
+ goto sd_execute_cmd_failed;
}
}
retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type,
- sd_card->rsp, rsp_len, false);
+ sd_card->rsp, rsp_len, false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Cmd_Failed;
+ goto sd_execute_cmd_failed;
}
if (standby) {
retval = sd_select_card(chip, 1);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Cmd_Failed;
+ goto sd_execute_cmd_failed;
}
}
@@ -4399,14 +4414,14 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = sd_update_lock_status(chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Cmd_Failed;
+ goto sd_execute_cmd_failed;
}
#endif
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
-SD_Execute_Cmd_Failed:
+sd_execute_cmd_failed:
sd_card->pre_cmd_err = 1;
set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
release_sd_card(chip);
@@ -4420,7 +4435,7 @@ SD_Execute_Cmd_Failed:
int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
int retval, rsp_len, i;
bool read_err = false, cmd13_checkbit = false;
@@ -4492,10 +4507,11 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (data_len < 512) {
retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len,
- SD_RSP_TYPE_R1, NULL, 0, false);
+ SD_RSP_TYPE_R1, NULL, 0,
+ false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
}
@@ -4503,17 +4519,18 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = sd_select_card(chip, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
}
if (acmd) {
retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
- sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0, false);
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0,
+ false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
}
@@ -4539,13 +4556,13 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, byte_cnt,
- blk_cnt, bus_width, buf, data_len, 2000);
+ blk_cnt, bus_width, buf, data_len, 2000);
if (retval != STATUS_SUCCESS) {
read_err = true;
kfree(buf);
rtsx_clear_sd_error(chip);
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
min_len = min(data_len, scsi_bufflen(srb));
@@ -4558,24 +4575,24 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
trans_dma_enable(DMA_FROM_DEVICE, chip, data_len, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
- 0x02);
+ 0x02);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
- 0x00);
+ 0x00);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H,
- 0xFF, (srb->cmnd[7] & 0xFE) >> 1);
+ 0xFF, (srb->cmnd[7] & 0xFE) >> 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L,
- 0xFF, (u8)((data_len & 0x0001FE00) >> 9));
+ 0xFF, (u8)((data_len & 0x0001FE00) >> 9));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
- 0x40 | cmd_idx);
+ 0x40 | cmd_idx);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF,
- srb->cmnd[3]);
+ srb->cmnd[3]);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF,
- srb->cmnd[4]);
+ srb->cmnd[4]);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF,
- srb->cmnd[5]);
+ srb->cmnd[5]);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF,
- srb->cmnd[6]);
+ srb->cmnd[6]);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
@@ -4583,66 +4600,69 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
0xFF, SD_TM_AUTO_READ_2 | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
- SD_TRANSFER_END, SD_TRANSFER_END);
+ SD_TRANSFER_END, SD_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
- scsi_bufflen(srb), scsi_sg_count(srb),
- DMA_FROM_DEVICE, 10000);
+ scsi_bufflen(srb),
+ scsi_sg_count(srb),
+ DMA_FROM_DEVICE, 10000);
if (retval < 0) {
read_err = true;
rtsx_clear_sd_error(chip);
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
} else {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
retval = ext_sd_get_rsp(chip, rsp_len, sd_card->rsp, rsp_type);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
if (standby) {
retval = sd_select_card(chip, 1);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
}
if (send_cmd12) {
- retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION,
- 0, SD_RSP_TYPE_R1b, NULL, 0, false);
+ retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
+ SD_RSP_TYPE_R1b, NULL, 0,
+ false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
}
if (data_len < 512) {
retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200,
- SD_RSP_TYPE_R1, NULL, 0, false);
+ SD_RSP_TYPE_R1, NULL, 0,
+ false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
}
@@ -4651,7 +4671,7 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
for (i = 0; i < 3; i++) {
retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS,
- sd_card->sd_addr,
+ sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0,
cmd13_checkbit);
if (retval == STATUS_SUCCESS)
@@ -4659,13 +4679,13 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
-SD_Execute_Read_Cmd_Failed:
+sd_execute_read_cmd_failed:
sd_card->pre_cmd_err = 1;
set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
if (read_err)
@@ -4682,7 +4702,7 @@ SD_Execute_Read_Cmd_Failed:
int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
int retval, rsp_len, i;
bool write_err = false, cmd13_checkbit = false;
@@ -4754,7 +4774,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
if (CHK_MMC_8BIT(sd_card)) {
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
- SD_BUS_WIDTH_8);
+ SD_BUS_WIDTH_8);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -4762,7 +4782,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) {
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
- SD_BUS_WIDTH_4);
+ SD_BUS_WIDTH_4);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -4779,10 +4799,11 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (data_len < 512) {
retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len,
- SD_RSP_TYPE_R1, NULL, 0, false);
+ SD_RSP_TYPE_R1, NULL, 0,
+ false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
}
@@ -4790,25 +4811,26 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = sd_select_card(chip, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
}
if (acmd) {
retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
- sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0, false);
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0,
+ false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
}
retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type,
- sd_card->rsp, rsp_len, false);
+ sd_card->rsp, rsp_len, false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
if (data_len <= 512) {
@@ -4832,37 +4854,37 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_init_cmd(chip);
for (i = 0; i < 256; i++) {
rtsx_add_cmd(chip, WRITE_REG_CMD,
- PPBUF_BASE2 + i, 0xFF, buf[i]);
+ PPBUF_BASE2 + i, 0xFF, buf[i]);
}
retval = rtsx_send_cmd(chip, 0, 250);
if (retval != STATUS_SUCCESS) {
kfree(buf);
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
rtsx_init_cmd(chip);
for (i = 256; i < data_len; i++) {
rtsx_add_cmd(chip, WRITE_REG_CMD,
- PPBUF_BASE2 + i, 0xFF, buf[i]);
+ PPBUF_BASE2 + i, 0xFF, buf[i]);
}
retval = rtsx_send_cmd(chip, 0, 250);
if (retval != STATUS_SUCCESS) {
kfree(buf);
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
} else {
rtsx_init_cmd(chip);
for (i = 0; i < data_len; i++) {
rtsx_add_cmd(chip, WRITE_REG_CMD,
- PPBUF_BASE2 + i, 0xFF, buf[i]);
+ PPBUF_BASE2 + i, 0xFF, buf[i]);
}
retval = rtsx_send_cmd(chip, 0, 250);
if (retval != STATUS_SUCCESS) {
kfree(buf);
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
}
@@ -4871,20 +4893,20 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
- srb->cmnd[8] & 0x03);
+ srb->cmnd[8] & 0x03);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
- srb->cmnd[9]);
+ srb->cmnd[9]);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
- 0x00);
+ 0x00);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
- 0x01);
+ 0x01);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
- PINGPONG_BUFFER);
+ PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
- SD_TRANSFER_END, SD_TRANSFER_END);
+ SD_TRANSFER_END, SD_TRANSFER_END);
retval = rtsx_send_cmd(chip, SD_CARD, 250);
} else if (!(data_len & 0x1FF)) {
@@ -4893,35 +4915,36 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
trans_dma_enable(DMA_TO_DEVICE, chip, data_len, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
- 0x02);
+ 0x02);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
- 0x00);
+ 0x00);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H,
- 0xFF, (srb->cmnd[7] & 0xFE) >> 1);
+ 0xFF, (srb->cmnd[7] & 0xFE) >> 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L,
- 0xFF, (u8)((data_len & 0x0001FE00) >> 9));
+ 0xFF, (u8)((data_len & 0x0001FE00) >> 9));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
- SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
+ SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
- SD_TRANSFER_END, SD_TRANSFER_END);
+ SD_TRANSFER_END, SD_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
- scsi_bufflen(srb), scsi_sg_count(srb),
- DMA_TO_DEVICE, 10000);
+ scsi_bufflen(srb),
+ scsi_sg_count(srb),
+ DMA_TO_DEVICE, 10000);
} else {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
if (retval < 0) {
write_err = true;
rtsx_clear_sd_error(chip);
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
#ifdef SUPPORT_SD_LOCK
@@ -4949,37 +4972,39 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = sd_select_card(chip, 1);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
}
if (send_cmd12) {
- retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION,
- 0, SD_RSP_TYPE_R1b, NULL, 0, false);
+ retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
+ SD_RSP_TYPE_R1b, NULL, 0,
+ false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
}
if (data_len < 512) {
retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200,
- SD_RSP_TYPE_R1, NULL, 0, false);
+ SD_RSP_TYPE_R1, NULL, 0,
+ false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
}
@@ -4988,15 +5013,15 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
for (i = 0; i < 3; i++) {
retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS,
- sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0,
- cmd13_checkbit);
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0,
+ cmd13_checkbit);
if (retval == STATUS_SUCCESS)
break;
}
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
#ifdef SUPPORT_SD_LOCK
@@ -5024,7 +5049,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
sd_card->sd_lock_status &= ~(SD_UNLOCK_POW_ON | SD_SDR_RST);
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
}
@@ -5045,7 +5070,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
-SD_Execute_Write_Cmd_Failed:
+sd_execute_write_cmd_failed:
sd_card->pre_cmd_err = 1;
set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
if (write_err)
@@ -5062,7 +5087,7 @@ SD_Execute_Write_Cmd_Failed:
int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
int count;
u16 data_len;
@@ -5104,7 +5129,7 @@ int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
int retval;
@@ -5122,9 +5147,9 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) ||
- (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
- (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
- (srb->cmnd[8] != 0x64)) {
+ (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
+ (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
+ (srb->cmnd[8] != 0x64)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -5174,7 +5199,7 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
void sd_cleanup_work(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
if (sd_card->seq_mode) {
dev_dbg(rtsx_dev(chip), "SD: stop transmission\n");
@@ -5230,7 +5255,7 @@ int sd_power_off_card3v3(struct rtsx_chip *chip)
int release_sd_card(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
chip->card_ready &= ~SD_CARD;
diff --git a/drivers/staging/rts5208/sd.h b/drivers/staging/rts5208/sd.h
index 60b79280fb5f..55764e16b93a 100644
--- a/drivers/staging/rts5208/sd.h
+++ b/drivers/staging/rts5208/sd.h
@@ -280,14 +280,15 @@ int reset_sd_card(struct rtsx_chip *chip);
int sd_switch_clock(struct rtsx_chip *chip);
void sd_stop_seq_mode(struct rtsx_chip *chip);
int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- u32 start_sector, u16 sector_cnt);
+ u32 start_sector, u16 sector_cnt);
void sd_cleanup_work(struct rtsx_chip *chip);
int sd_power_off_card3v3(struct rtsx_chip *chip);
int release_sd_card(struct rtsx_chip *chip);
#ifdef SUPPORT_CPRM
int soft_reset_sd_card(struct rtsx_chip *chip);
int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
- u32 arg, u8 rsp_type, u8 *rsp, int rsp_len, bool special_check);
+ u32 arg, u8 rsp_type, u8 *rsp, int rsp_len,
+ bool special_check);
int ext_sd_get_rsp(struct rtsx_chip *chip, int len, u8 *rsp, u8 rsp_type);
int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip);
diff --git a/drivers/staging/rts5208/spi.c b/drivers/staging/rts5208/spi.c
index 13c539c83838..8b8cd955dfeb 100644
--- a/drivers/staging/rts5208/spi.c
+++ b/drivers/staging/rts5208/spi.c
@@ -29,7 +29,7 @@
static inline void spi_set_err_code(struct rtsx_chip *chip, u8 err_code)
{
- struct spi_info *spi = &(chip->spi);
+ struct spi_info *spi = &chip->spi;
spi->err_code = err_code;
}
@@ -57,7 +57,7 @@ static int spi_init(struct rtsx_chip *chip)
static int spi_set_init_para(struct rtsx_chip *chip)
{
- struct spi_info *spi = &(chip->spi);
+ struct spi_info *spi = &chip->spi;
int retval;
retval = rtsx_write_register(chip, SPI_CLK_DIVIDER1, 0xFF,
@@ -117,9 +117,9 @@ static int sf_polling_status(struct rtsx_chip *chip, int msec)
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, SPI_RDSR);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_POLLING_MODE0);
+ SPI_TRANSFER0_START | SPI_POLLING_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, msec);
if (retval < 0) {
@@ -134,7 +134,7 @@ static int sf_polling_status(struct rtsx_chip *chip, int msec)
static int sf_enable_write(struct rtsx_chip *chip, u8 ins)
{
- struct spi_info *spi = &(chip->spi);
+ struct spi_info *spi = &chip->spi;
int retval;
if (!spi->write_en)
@@ -144,11 +144,11 @@ static int sf_enable_write(struct rtsx_chip *chip, u8 ins)
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
- SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_C_MODE0);
+ SPI_TRANSFER0_START | SPI_C_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
@@ -163,7 +163,7 @@ static int sf_enable_write(struct rtsx_chip *chip, u8 ins)
static int sf_disable_write(struct rtsx_chip *chip, u8 ins)
{
- struct spi_info *spi = &(chip->spi);
+ struct spi_info *spi = &chip->spi;
int retval;
if (!spi->write_en)
@@ -173,11 +173,11 @@ static int sf_disable_write(struct rtsx_chip *chip, u8 ins)
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
- SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_C_MODE0);
+ SPI_TRANSFER0_START | SPI_C_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
@@ -191,27 +191,27 @@ static int sf_disable_write(struct rtsx_chip *chip, u8 ins)
}
static void sf_program(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr,
- u16 len)
+ u16 len)
{
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
- SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, (u8)len);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, (u8)(len >> 8));
if (addr_mode) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
- (u8)(addr >> 8));
+ (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
- (u8)(addr >> 16));
+ (u8)(addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CADO_MODE0);
+ SPI_TRANSFER0_START | SPI_CADO_MODE0);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CDO_MODE0);
+ SPI_TRANSFER0_START | SPI_CDO_MODE0);
}
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
}
static int sf_erase(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr)
@@ -222,21 +222,21 @@ static int sf_erase(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr)
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
- SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
if (addr_mode) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
- (u8)(addr >> 8));
+ (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
- (u8)(addr >> 16));
+ (u8)(addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CA_MODE0);
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_C_MODE0);
+ SPI_TRANSFER0_START | SPI_C_MODE0);
}
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
@@ -322,9 +322,9 @@ static int spi_eeprom_program_enable(struct rtsx_chip *chip)
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x86);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x13);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CA_MODE0);
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
@@ -358,9 +358,9 @@ int spi_erase_eeprom_chip(struct rtsx_chip *chip)
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x12);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x84);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CA_MODE0);
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
@@ -402,9 +402,9 @@ int spi_erase_eeprom_byte(struct rtsx_chip *chip, u16 addr)
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x46);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CA_MODE0);
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
@@ -442,9 +442,9 @@ int spi_read_eeprom(struct rtsx_chip *chip, u16 addr, u8 *val)
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x46);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CADI_MODE0);
+ SPI_TRANSFER0_START | SPI_CADI_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
@@ -497,9 +497,9 @@ int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val)
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x4E);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CA_MODE0);
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
@@ -518,12 +518,12 @@ int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val)
int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct spi_info *spi = &(chip->spi);
+ struct spi_info *spi = &chip->spi;
dev_dbg(rtsx_dev(chip), "spi_get_status: err_code = 0x%x\n",
spi->err_code);
- rtsx_stor_set_xfer_buf(&(spi->err_code),
- min_t(int, scsi_bufflen(srb), 1), srb);
+ rtsx_stor_set_xfer_buf(&spi->err_code,
+ min_t(int, scsi_bufflen(srb), 1), srb);
scsi_set_resid(srb, scsi_bufflen(srb) - 1);
return STATUS_SUCCESS;
@@ -531,7 +531,7 @@ int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
int spi_set_parameter(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct spi_info *spi = &(chip->spi);
+ struct spi_info *spi = &chip->spi;
spi_set_err_code(chip, SPI_NO_ERR);
@@ -574,37 +574,37 @@ int spi_read_flash_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
- PINGPONG_BUFFER);
+ PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, srb->cmnd[3]);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, srb->cmnd[4]);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, srb->cmnd[5]);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, srb->cmnd[6]);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
- SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, srb->cmnd[7]);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, srb->cmnd[8]);
if (len == 0) {
if (srb->cmnd[9]) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0,
- 0xFF, SPI_TRANSFER0_START | SPI_CA_MODE0);
+ 0xFF, SPI_TRANSFER0_START | SPI_CA_MODE0);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0,
- 0xFF, SPI_TRANSFER0_START | SPI_C_MODE0);
+ 0xFF, SPI_TRANSFER0_START | SPI_C_MODE0);
}
} else {
if (srb->cmnd[9]) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CADI_MODE0);
+ SPI_TRANSFER0_START | SPI_CADI_MODE0);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CDI_MODE0);
+ SPI_TRANSFER0_START | SPI_CDI_MODE0);
}
}
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
@@ -682,38 +682,38 @@ int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (slow_read) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF,
- (u8)addr);
+ (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
- (u8)(addr >> 8));
+ (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
- (u8)(addr >> 16));
+ (u8)(addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
- SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
- (u8)addr);
+ (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
- (u8)(addr >> 8));
+ (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR3, 0xFF,
- (u8)(addr >> 16));
+ (u8)(addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
- SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_32);
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_32);
}
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF,
- (u8)(pagelen >> 8));
+ (u8)(pagelen >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF,
- (u8)pagelen);
+ (u8)pagelen);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CADI_MODE0);
+ SPI_TRANSFER0_START | SPI_CADI_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0,
- SPI_TRANSFER0_END, SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END, SPI_TRANSFER0_END);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data(chip, 0, buf, pagelen, 0,
- DMA_FROM_DEVICE, 10000);
+ DMA_FROM_DEVICE, 10000);
if (retval < 0) {
kfree(buf);
rtsx_clear_spi_error(chip);
@@ -723,7 +723,7 @@ int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
rtsx_stor_access_xfer_buf(buf, pagelen, srb, &index, &offset,
- TO_XFER_BUF);
+ TO_XFER_BUF);
addr += pagelen;
len -= pagelen;
@@ -775,14 +775,14 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
rtsx_stor_access_xfer_buf(buf, 1, srb, &index, &offset,
- FROM_XFER_BUF);
+ FROM_XFER_BUF);
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, PINGPONG_BUFFER);
+ 0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF,
- buf[0]);
+ buf[0]);
sf_program(chip, ins, 1, addr, 1);
retval = rtsx_send_cmd(chip, 0, 100);
@@ -824,14 +824,14 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
while (len) {
rtsx_stor_access_xfer_buf(buf, 1, srb, &index, &offset,
- FROM_XFER_BUF);
+ FROM_XFER_BUF);
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, PINGPONG_BUFFER);
+ 0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF,
- buf[0]);
+ buf[0]);
if (first_byte) {
sf_program(chip, ins, 1, addr, 1);
first_byte = 0;
@@ -899,10 +899,10 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_send_cmd_no_wait(chip);
rtsx_stor_access_xfer_buf(buf, pagelen, srb, &index,
- &offset, FROM_XFER_BUF);
+ &offset, FROM_XFER_BUF);
retval = rtsx_transfer_data(chip, 0, buf, pagelen, 0,
- DMA_TO_DEVICE, 100);
+ DMA_TO_DEVICE, 100);
if (retval < 0) {
kfree(buf);
rtsx_clear_spi_error(chip);
@@ -1010,18 +1010,18 @@ int spi_write_flash_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
- PINGPONG_BUFFER);
+ PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
- SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF, status);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CDO_MODE0);
+ SPI_TRANSFER0_START | SPI_CDO_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval != STATUS_SUCCESS) {
diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c
index 1de02bb98839..85aba05acbc1 100644
--- a/drivers/staging/rts5208/xd.c
+++ b/drivers/staging/rts5208/xd.c
@@ -37,21 +37,21 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk, u16 logoff,
static inline void xd_set_err_code(struct rtsx_chip *chip, u8 err_code)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
xd_card->err_code = err_code;
}
static inline int xd_check_err_code(struct rtsx_chip *chip, u8 err_code)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
return (xd_card->err_code == err_code);
}
static int xd_set_init_para(struct rtsx_chip *chip)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int retval;
if (chip->asic_code)
@@ -70,7 +70,7 @@ static int xd_set_init_para(struct rtsx_chip *chip)
static int xd_switch_clock(struct rtsx_chip *chip)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int retval;
retval = select_card(chip, XD_CARD);
@@ -97,9 +97,9 @@ static int xd_read_id(struct rtsx_chip *chip, u8 id_cmd, u8 *id_buf, u8 buf_len)
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, id_cmd);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
- XD_TRANSFER_START | XD_READ_ID);
+ XD_TRANSFER_START | XD_READ_ID);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
- XD_TRANSFER_END);
+ XD_TRANSFER_END);
for (i = 0; i < 4; i++)
rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_ADDRESS1 + i), 0, 0);
@@ -122,28 +122,30 @@ static int xd_read_id(struct rtsx_chip *chip, u8 id_cmd, u8 *id_buf, u8 buf_len)
static void xd_assign_phy_addr(struct rtsx_chip *chip, u32 addr, u8 mode)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
switch (mode) {
case XD_RW_ADDR:
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1, 0xFF, (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2,
- 0xFF, (u8)(addr >> 8));
+ 0xFF, (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS3,
- 0xFF, (u8)(addr >> 16));
+ 0xFF, (u8)(addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
- xd_card->addr_cycle | XD_CALC_ECC | XD_BA_NO_TRANSFORM);
+ xd_card->addr_cycle |
+ XD_CALC_ECC |
+ XD_BA_NO_TRANSFORM);
break;
case XD_ERASE_ADDR:
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1,
- 0xFF, (u8)(addr >> 8));
+ 0xFF, (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2,
- 0xFF, (u8)(addr >> 16));
+ 0xFF, (u8)(addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
- (xd_card->addr_cycle - 1) | XD_CALC_ECC |
+ (xd_card->addr_cycle - 1) | XD_CALC_ECC |
XD_BA_NO_TRANSFORM);
break;
@@ -153,7 +155,7 @@ static void xd_assign_phy_addr(struct rtsx_chip *chip, u32 addr, u8 mode)
}
static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr,
- u8 *buf, int buf_len)
+ u8 *buf, int buf_len)
{
int retval, i;
@@ -162,16 +164,16 @@ static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr,
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
- 0xFF, XD_TRANSFER_START | XD_READ_REDUNDANT);
+ 0xFF, XD_TRANSFER_START | XD_READ_REDUNDANT);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
for (i = 0; i < 6; i++)
rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_PAGE_STATUS + i),
- 0, 0);
+ 0, 0);
for (i = 0; i < 4; i++)
rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_RESERVED0 + i),
- 0, 0);
+ 0, 0);
rtsx_add_cmd(chip, READ_REG_CMD, XD_PARITY, 0, 0);
retval = rtsx_send_cmd(chip, XD_CARD, 500);
@@ -192,7 +194,7 @@ static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr,
}
static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
- u8 *buf, int buf_len)
+ u8 *buf, int buf_len)
{
int retval, i;
@@ -205,7 +207,7 @@ static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
for (i = 0; i < buf_len; i++)
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + offset + i,
- 0, 0);
+ 0, 0);
retval = rtsx_send_cmd(chip, 0, 250);
if (retval < 0) {
@@ -220,7 +222,7 @@ static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
}
static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
- int buf_len)
+ int buf_len)
{
int retval;
u8 reg;
@@ -235,15 +237,15 @@ static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, PINGPONG_BUFFER);
+ 0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
- XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
+ XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
- XD_TRANSFER_START | XD_READ_PAGES);
+ XD_TRANSFER_START | XD_READ_PAGES);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
- XD_TRANSFER_END);
+ XD_TRANSFER_END);
retval = rtsx_send_cmd(chip, XD_CARD, 250);
if (retval == -ETIMEDOUT) {
@@ -347,27 +349,27 @@ static void xd_fill_pull_ctl_disable(struct rtsx_chip *chip)
{
if (CHECK_PID(chip, 0x5208)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
- XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
+ XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
- XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
+ XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
- XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
- XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD);
+ XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
- MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
- MS_D5_PD | MS_D4_PD);
+ MS_D5_PD | MS_D4_PD);
} else if (CHECK_PID(chip, 0x5288)) {
if (CHECK_BARO_PKG(chip, QFN)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1,
- 0xFF, 0x55);
+ 0xFF, 0x55);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2,
- 0xFF, 0x55);
+ 0xFF, 0x55);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3,
- 0xFF, 0x4B);
+ 0xFF, 0x4B);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4,
- 0xFF, 0x69);
+ 0xFF, 0x69);
}
}
}
@@ -386,27 +388,27 @@ static void xd_fill_pull_ctl_enable(struct rtsx_chip *chip)
{
if (CHECK_PID(chip, 0x5208)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
- XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
+ XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
- XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
+ XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
- XD_WP_PD | XD_CE_PU | XD_CLE_PD | XD_CD_PU);
+ XD_WP_PD | XD_CE_PU | XD_CLE_PD | XD_CD_PU);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
- XD_RDY_PU | XD_WE_PU | XD_RE_PU | XD_ALE_PD);
+ XD_RDY_PU | XD_WE_PU | XD_RE_PU | XD_ALE_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
- MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
- MS_D5_PD | MS_D4_PD);
+ MS_D5_PD | MS_D4_PD);
} else if (CHECK_PID(chip, 0x5288)) {
if (CHECK_BARO_PKG(chip, QFN)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1,
- 0xFF, 0x55);
+ 0xFF, 0x55);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2,
- 0xFF, 0x55);
+ 0xFF, 0x55);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3,
- 0xFF, 0x53);
+ 0xFF, 0x53);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4,
- 0xFF, 0xA9);
+ 0xFF, 0xA9);
}
}
}
@@ -417,31 +419,46 @@ static int xd_pull_ctl_disable(struct rtsx_chip *chip)
if (CHECK_PID(chip, 0x5208)) {
retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF,
- XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
+ XD_D3_PD |
+ XD_D2_PD |
+ XD_D1_PD |
+ XD_D0_PD);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF,
- XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
+ XD_D7_PD |
+ XD_D6_PD |
+ XD_D5_PD |
+ XD_D4_PD);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF,
- XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ XD_WP_PD |
+ XD_CE_PD |
+ XD_CLE_PD |
+ XD_CD_PU);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF,
- XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD);
+ XD_RDY_PD |
+ XD_WE_PD |
+ XD_RE_PD |
+ XD_ALE_PD);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF,
- MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ MS_INS_PU |
+ SD_WP_PD |
+ SD_CD_PU |
+ SD_CMD_PD);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -486,7 +503,7 @@ static int xd_pull_ctl_disable(struct rtsx_chip *chip)
static int reset_xd(struct rtsx_chip *chip)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int retval, i, j;
u8 *ptr, id_buf[4], redunt[11];
@@ -499,7 +516,7 @@ static int reset_xd(struct rtsx_chip *chip)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS, 0xFF,
- XD_PGSTS_NOT_FF);
+ XD_PGSTS_NOT_FF);
if (chip->asic_code) {
if (!CHECK_PID(chip, 0x5288))
xd_fill_pull_ctl_disable(chip);
@@ -507,12 +524,13 @@ static int reset_xd(struct rtsx_chip *chip)
xd_fill_pull_ctl_stage1_barossa(chip);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
- (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN3) | 0x20);
+ (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN3) |
+ 0x20);
}
if (!chip->ft2_fast_mode)
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_INIT,
- XD_NO_AUTO_PWR_OFF, 0);
+ XD_NO_AUTO_PWR_OFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, 0);
@@ -537,8 +555,9 @@ static int reset_xd(struct rtsx_chip *chip)
xd_fill_pull_ctl_enable(chip);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
- (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN2) |
- 0x20);
+ (FPGA_XD_PULL_CTL_EN1 &
+ FPGA_XD_PULL_CTL_EN2) |
+ 0x20);
}
retval = rtsx_send_cmd(chip, XD_CARD, 100);
@@ -571,8 +590,9 @@ static int reset_xd(struct rtsx_chip *chip)
xd_fill_pull_ctl_enable(chip);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
- (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN2) |
- 0x20);
+ (FPGA_XD_PULL_CTL_EN1 &
+ FPGA_XD_PULL_CTL_EN2) |
+ 0x20);
}
}
@@ -599,16 +619,17 @@ static int reset_xd(struct rtsx_chip *chip)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DTCTL, 0xFF,
- XD_TIME_SETUP_STEP * 3 +
- XD_TIME_RW_STEP * (2 + i) + XD_TIME_RWN_STEP * i);
+ XD_TIME_SETUP_STEP * 3 +
+ XD_TIME_RW_STEP * (2 + i) + XD_TIME_RWN_STEP * i);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CATCTL, 0xFF,
- XD_TIME_SETUP_STEP * 3 + XD_TIME_RW_STEP * (4 + i) +
- XD_TIME_RWN_STEP * (3 + i));
+ XD_TIME_SETUP_STEP * 3 +
+ XD_TIME_RW_STEP * (4 + i) +
+ XD_TIME_RWN_STEP * (3 + i));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
- XD_TRANSFER_START | XD_RESET);
+ XD_TRANSFER_START | XD_RESET);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
@@ -625,7 +646,7 @@ static int reset_xd(struct rtsx_chip *chip)
ptr[0], ptr[1]);
if (((ptr[0] & READY_FLAG) != READY_STATE) ||
- !(ptr[1] & XD_RDY))
+ !(ptr[1] & XD_RDY))
continue;
retval = xd_read_id(chip, READ_ID, id_buf, 4);
@@ -773,7 +794,7 @@ static int reset_xd(struct rtsx_chip *chip)
if (redunt[PAGE_STATUS] != XD_GPG) {
for (j = 1; j <= 8; j++) {
retval = xd_read_redundant(chip, page_addr + j,
- redunt, 11);
+ redunt, 11);
if (retval == STATUS_SUCCESS) {
if (redunt[PAGE_STATUS] == XD_GPG)
break;
@@ -786,7 +807,7 @@ static int reset_xd(struct rtsx_chip *chip)
/* Check CIS data */
if ((redunt[BLOCK_STATUS] == XD_GBLK) &&
- (redunt[PARITY] & XD_BA1_ALL0)) {
+ (redunt[PARITY] & XD_BA1_ALL0)) {
u8 buf[10];
page_addr += j;
@@ -798,11 +819,11 @@ static int reset_xd(struct rtsx_chip *chip)
}
if ((buf[0] == 0x01) && (buf[1] == 0x03) &&
- (buf[2] == 0xD9)
- && (buf[3] == 0x01) && (buf[4] == 0xFF)
- && (buf[5] == 0x18) && (buf[6] == 0x02)
- && (buf[7] == 0xDF) && (buf[8] == 0x01)
- && (buf[9] == 0x20)) {
+ (buf[2] == 0xD9) &&
+ (buf[3] == 0x01) && (buf[4] == 0xFF) &&
+ (buf[5] == 0x18) && (buf[6] == 0x02) &&
+ (buf[7] == 0xDF) && (buf[8] == 0x01) &&
+ (buf[9] == 0x20)) {
xd_card->cis_block = (u16)i;
}
}
@@ -861,7 +882,7 @@ static u16 xd_load_log_block_addr(u8 *redunt)
static int xd_init_l2p_tbl(struct rtsx_chip *chip)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int size, i;
dev_dbg(rtsx_dev(chip), "xd_init_l2p_tbl: zone_cnt = %d\n",
@@ -910,7 +931,7 @@ static inline void free_zone(struct zone_entry *zone)
static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
struct zone_entry *zone;
int zone_no;
@@ -920,15 +941,15 @@ static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
zone_no, xd_card->zone_cnt);
return;
}
- zone = &(xd_card->zone[zone_no]);
+ zone = &xd_card->zone[zone_no];
- if (zone->free_table == NULL) {
+ if (!zone->free_table) {
if (xd_build_l2p_tbl(chip, zone_no) != STATUS_SUCCESS)
return;
}
- if ((zone->set_index >= XD_FREE_TABLE_CNT)
- || (zone->set_index < 0)) {
+ if ((zone->set_index >= XD_FREE_TABLE_CNT) ||
+ (zone->set_index < 0)) {
free_zone(zone);
dev_dbg(rtsx_dev(chip), "Set unused block fail, invalid set_index\n");
return;
@@ -945,7 +966,7 @@ static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
struct zone_entry *zone;
u32 phy_blk;
@@ -954,10 +975,10 @@ static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no)
zone_no, xd_card->zone_cnt);
return BLK_NOT_FOUND;
}
- zone = &(xd_card->zone[zone_no]);
+ zone = &xd_card->zone[zone_no];
if ((zone->unused_blk_cnt == 0) ||
- (zone->set_index == zone->get_index)) {
+ (zone->set_index == zone->get_index)) {
free_zone(zone);
dev_dbg(rtsx_dev(chip), "Get unused block fail, no unused block available\n");
return BLK_NOT_FOUND;
@@ -982,22 +1003,22 @@ static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no)
}
static void xd_set_l2p_tbl(struct rtsx_chip *chip,
- int zone_no, u16 log_off, u16 phy_off)
+ int zone_no, u16 log_off, u16 phy_off)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
struct zone_entry *zone;
- zone = &(xd_card->zone[zone_no]);
+ zone = &xd_card->zone[zone_no];
zone->l2p_table[log_off] = phy_off;
}
static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
struct zone_entry *zone;
int retval;
- zone = &(xd_card->zone[zone_no]);
+ zone = &xd_card->zone[zone_no];
if (zone->l2p_table[log_off] == 0xFFFF) {
u32 phy_blk = 0;
int i;
@@ -1023,7 +1044,7 @@ static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off)
}
retval = xd_init_page(chip, phy_blk, log_off,
- 0, xd_card->page_off + 1);
+ 0, xd_card->page_off + 1);
if (retval == STATUS_SUCCESS)
break;
}
@@ -1041,7 +1062,7 @@ static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off)
int reset_xd_card(struct rtsx_chip *chip)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int retval;
memset(xd_card, 0, sizeof(struct xd_info));
@@ -1077,7 +1098,7 @@ int reset_xd_card(struct rtsx_chip *chip)
static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int retval;
u32 page_addr;
u8 reg = 0;
@@ -1107,12 +1128,12 @@ static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk)
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF,
- xd_card->page_off + 1);
+ xd_card->page_off + 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
- XD_TRANSFER_START | XD_WRITE_REDUNDANT);
+ XD_TRANSFER_START | XD_WRITE_REDUNDANT);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
retval = rtsx_send_cmd(chip, XD_CARD, 500);
if (retval < 0) {
@@ -1132,7 +1153,7 @@ static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk)
static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
u16 logoff, u8 start_page, u8 end_page)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int retval;
u32 page_addr;
u8 reg = 0;
@@ -1153,7 +1174,7 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, 0xFF);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, 0xFF);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H,
- 0xFF, (u8)(logoff >> 8));
+ 0xFF, (u8)(logoff >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)logoff);
page_addr = (phy_blk << xd_card->block_shift) + start_page;
@@ -1161,15 +1182,15 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG,
- XD_BA_TRANSFORM, XD_BA_TRANSFORM);
+ XD_BA_TRANSFORM, XD_BA_TRANSFORM);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT,
- 0xFF, (end_page - start_page));
+ 0xFF, (end_page - start_page));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
- 0xFF, XD_TRANSFER_START | XD_WRITE_REDUNDANT);
+ 0xFF, XD_TRANSFER_START | XD_WRITE_REDUNDANT);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
retval = rtsx_send_cmd(chip, XD_CARD, 500);
if (retval < 0) {
@@ -1191,7 +1212,7 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
u8 start_page, u8 end_page)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
u32 old_page, new_page;
u8 i, reg = 0;
int retval;
@@ -1235,11 +1256,11 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
- XD_AUTO_CHK_DATA_STATUS, 0);
+ XD_AUTO_CHK_DATA_STATUS, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
- XD_TRANSFER_START | XD_READ_PAGES);
+ XD_TRANSFER_START | XD_READ_PAGES);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
retval = rtsx_send_cmd(chip, XD_CARD, 500);
if (retval < 0) {
@@ -1250,22 +1271,24 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
wait_timeout(100);
if (detect_card_cd(chip,
- XD_CARD) != STATUS_SUCCESS) {
+ XD_CARD) != STATUS_SUCCESS) {
xd_set_err_code(chip, XD_NO_CARD);
rtsx_trace(chip);
return STATUS_FAIL;
}
if (((reg & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ==
- (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
- || ((reg & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) ==
+ (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ||
+ ((reg & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) ==
(XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
rtsx_write_register(chip,
- XD_PAGE_STATUS, 0xFF,
- XD_BPG);
+ XD_PAGE_STATUS,
+ 0xFF,
+ XD_BPG);
rtsx_write_register(chip,
- XD_BLOCK_STATUS, 0xFF,
- XD_GBLK);
+ XD_BLOCK_STATUS,
+ 0xFF,
+ XD_GBLK);
XD_SET_BAD_OLDBLK(xd_card);
dev_dbg(rtsx_dev(chip), "old block 0x%x ecc error\n",
old_blk);
@@ -1287,7 +1310,7 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_WRITE_PAGES);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
retval = rtsx_send_cmd(chip, XD_CARD, 300);
if (retval < 0) {
@@ -1320,9 +1343,9 @@ static int xd_reset_cmd(struct rtsx_chip *chip)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
- 0xFF, XD_TRANSFER_START | XD_RESET);
+ 0xFF, XD_TRANSFER_START | XD_RESET);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
@@ -1342,7 +1365,7 @@ static int xd_reset_cmd(struct rtsx_chip *chip)
static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
u32 page_addr;
u8 reg = 0, *ptr;
int i, retval;
@@ -1360,9 +1383,9 @@ static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
xd_assign_phy_addr(chip, page_addr, XD_ERASE_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
- XD_TRANSFER_START | XD_ERASE);
+ XD_TRANSFER_START | XD_ERASE);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
retval = rtsx_send_cmd(chip, XD_CARD, 250);
@@ -1403,7 +1426,7 @@ static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
struct zone_entry *zone;
int retval;
u32 start, end, i;
@@ -1413,7 +1436,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
dev_dbg(rtsx_dev(chip), "xd_build_l2p_tbl: %d\n", zone_no);
- if (xd_card->zone == NULL) {
+ if (!xd_card->zone) {
retval = xd_init_l2p_tbl(chip);
if (retval != STATUS_SUCCESS)
return retval;
@@ -1425,22 +1448,22 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
return STATUS_SUCCESS;
}
- zone = &(xd_card->zone[zone_no]);
+ zone = &xd_card->zone[zone_no];
- if (zone->l2p_table == NULL) {
+ if (!zone->l2p_table) {
zone->l2p_table = vmalloc(2000);
if (!zone->l2p_table) {
rtsx_trace(chip);
- goto Build_Fail;
+ goto build_fail;
}
}
memset((u8 *)(zone->l2p_table), 0xff, 2000);
- if (zone->free_table == NULL) {
+ if (!zone->free_table) {
zone->free_table = vmalloc(XD_FREE_TABLE_CNT * 2);
if (!zone->free_table) {
rtsx_trace(chip);
- goto Build_Fail;
+ goto build_fail;
}
}
memset((u8 *)(zone->free_table), 0xff, XD_FREE_TABLE_CNT * 2);
@@ -1466,7 +1489,8 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
dev_dbg(rtsx_dev(chip), "start block 0x%x, end block 0x%x\n",
start, end);
- zone->set_index = zone->get_index = 0;
+ zone->set_index = 0;
+ zone->get_index = 0;
zone->unused_blk_cnt = 0;
for (i = start; i < end; i++) {
@@ -1490,7 +1514,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
cur_fst_page_logoff = xd_load_log_block_addr(redunt);
if ((cur_fst_page_logoff == 0xFFFF) ||
- (cur_fst_page_logoff > max_logoff)) {
+ (cur_fst_page_logoff > max_logoff)) {
retval = xd_erase_block(chip, i);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, i);
@@ -1498,7 +1522,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
}
if ((zone_no == 0) && (cur_fst_page_logoff == 0) &&
- (redunt[PAGE_STATUS] != XD_GPG))
+ (redunt[PAGE_STATUS] != XD_GPG))
XD_SET_MBR_FAIL(xd_card);
if (zone->l2p_table[cur_fst_page_logoff] == 0xFFFF) {
@@ -1524,7 +1548,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
for (m = 0; m < 3; m++) {
retval = xd_read_redundant(chip, page_addr,
- redunt, 11);
+ redunt, 11);
if (retval == STATUS_SUCCESS)
break;
}
@@ -1581,7 +1605,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
return STATUS_SUCCESS;
-Build_Fail:
+build_fail:
vfree(zone->l2p_table);
zone->l2p_table = NULL;
vfree(zone->free_table);
@@ -1598,9 +1622,9 @@ static int xd_send_cmd(struct rtsx_chip *chip, u8 cmd)
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, cmd);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
- XD_TRANSFER_START | XD_SET_CMD);
+ XD_TRANSFER_START | XD_SET_CMD);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
retval = rtsx_send_cmd(chip, XD_CARD, 200);
if (retval < 0) {
@@ -1612,18 +1636,18 @@ static int xd_send_cmd(struct rtsx_chip *chip, u8 cmd)
}
static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
- u32 log_blk, u8 start_page, u8 end_page,
- u8 *buf, unsigned int *index,
- unsigned int *offset)
+ u32 log_blk, u8 start_page, u8 end_page,
+ u8 *buf, unsigned int *index,
+ unsigned int *offset)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
u32 page_addr, new_blk;
u16 log_off;
u8 reg_val, page_cnt;
int zone_no, retval, i;
if (start_page > end_page)
- goto Status_Fail;
+ goto status_fail;
page_cnt = end_page - start_page;
zone_no = (int)(log_blk / 1000);
@@ -1639,7 +1663,7 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
xd_set_err_code(chip, XD_NO_CARD);
- goto Status_Fail;
+ goto status_fail;
}
}
}
@@ -1653,37 +1677,38 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
- XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
+ XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
trans_dma_enable(chip->srb->sc_data_direction, chip,
- page_cnt * 512, DMA_512);
+ page_cnt * 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
- XD_TRANSFER_START | XD_READ_PAGES);
+ XD_TRANSFER_START | XD_READ_PAGES);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END | XD_PPB_EMPTY, XD_TRANSFER_END | XD_PPB_EMPTY);
+ XD_TRANSFER_END | XD_PPB_EMPTY,
+ XD_TRANSFER_END | XD_PPB_EMPTY);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512,
- scsi_sg_count(chip->srb),
- index, offset, DMA_FROM_DEVICE,
- chip->xd_timeout);
+ scsi_sg_count(chip->srb),
+ index, offset, DMA_FROM_DEVICE,
+ chip->xd_timeout);
if (retval < 0) {
rtsx_clear_xd_error(chip);
if (retval == -ETIMEDOUT) {
xd_set_err_code(chip, XD_TO_ERROR);
- goto Status_Fail;
+ goto status_fail;
} else {
rtsx_trace(chip);
- goto Fail;
+ goto fail;
}
}
return STATUS_SUCCESS;
-Fail:
+fail:
retval = rtsx_read_register(chip, XD_PAGE_STATUS, &reg_val);
if (retval) {
rtsx_trace(chip);
@@ -1699,15 +1724,15 @@ Fail:
return retval;
}
- if (((reg_val & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
- == (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
- || ((reg_val & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))
- == (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
+ if (((reg_val & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ==
+ (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ||
+ ((reg_val & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) ==
+ (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
wait_timeout(100);
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
xd_set_err_code(chip, XD_NO_CARD);
- goto Status_Fail;
+ goto status_fail;
}
xd_set_err_code(chip, XD_ECC_ERROR);
@@ -1715,11 +1740,11 @@ Fail:
new_blk = xd_get_unused_block(chip, zone_no);
if (new_blk == NO_NEW_BLK) {
XD_CLR_BAD_OLDBLK(xd_card);
- goto Status_Fail;
+ goto status_fail;
}
retval = xd_copy_page(chip, phy_blk, new_blk, 0,
- xd_card->page_off + 1);
+ xd_card->page_off + 1);
if (retval != STATUS_SUCCESS) {
if (!XD_CHK_BAD_NEWBLK(xd_card)) {
retval = xd_erase_block(chip, new_blk);
@@ -1729,7 +1754,7 @@ Fail:
XD_CLR_BAD_NEWBLK(xd_card);
}
XD_CLR_BAD_OLDBLK(xd_card);
- goto Status_Fail;
+ goto status_fail;
}
xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF));
xd_erase_block(chip, phy_blk);
@@ -1737,15 +1762,15 @@ Fail:
XD_CLR_BAD_OLDBLK(xd_card);
}
-Status_Fail:
+status_fail:
rtsx_trace(chip);
return STATUS_FAIL;
}
static int xd_finish_write(struct rtsx_chip *chip,
- u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
+ u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int retval, zone_no;
u16 log_off;
@@ -1762,7 +1787,7 @@ static int xd_finish_write(struct rtsx_chip *chip,
if (old_blk == BLK_NOT_FOUND) {
retval = xd_init_page(chip, new_blk, log_off,
- page_off, xd_card->page_off + 1);
+ page_off, xd_card->page_off + 1);
if (retval != STATUS_SUCCESS) {
retval = xd_erase_block(chip, new_blk);
if (retval == STATUS_SUCCESS)
@@ -1772,7 +1797,7 @@ static int xd_finish_write(struct rtsx_chip *chip,
}
} else {
retval = xd_copy_page(chip, old_blk, new_blk,
- page_off, xd_card->page_off + 1);
+ page_off, xd_card->page_off + 1);
if (retval != STATUS_SUCCESS) {
if (!XD_CHK_BAD_NEWBLK(xd_card)) {
retval = xd_erase_block(chip, new_blk);
@@ -1804,7 +1829,7 @@ static int xd_finish_write(struct rtsx_chip *chip,
}
static int xd_prepare_write(struct rtsx_chip *chip,
- u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
+ u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
{
int retval;
@@ -1823,11 +1848,11 @@ static int xd_prepare_write(struct rtsx_chip *chip,
}
static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
- u32 new_blk, u32 log_blk, u8 start_page,
- u8 end_page, u8 *buf, unsigned int *index,
- unsigned int *offset)
+ u32 new_blk, u32 log_blk, u8 start_page,
+ u8 end_page, u8 *buf, unsigned int *index,
+ unsigned int *offset)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
u32 page_addr;
int zone_no, retval;
u16 log_off;
@@ -1837,7 +1862,7 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
__func__, old_blk, new_blk, log_blk);
if (start_page > end_page)
- goto Status_Fail;
+ goto status_fail;
page_cnt = end_page - start_page;
zone_no = (int)(log_blk / 1000);
@@ -1847,12 +1872,12 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
retval = xd_send_cmd(chip, READ1_1);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H,
- 0xFF, (u8)(log_off >> 8));
+ 0xFF, (u8)(log_off >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)log_off);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, XD_GBLK);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, XD_GPG);
@@ -1860,32 +1885,32 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_BA_TRANSFORM,
- XD_BA_TRANSFORM);
+ XD_BA_TRANSFORM);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
trans_dma_enable(chip->srb->sc_data_direction, chip,
- page_cnt * 512, DMA_512);
+ page_cnt * 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
- 0xFF, XD_TRANSFER_START | XD_WRITE_PAGES);
+ 0xFF, XD_TRANSFER_START | XD_WRITE_PAGES);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512,
- scsi_sg_count(chip->srb),
- index, offset, DMA_TO_DEVICE, chip->xd_timeout);
+ scsi_sg_count(chip->srb),
+ index, offset, DMA_TO_DEVICE, chip->xd_timeout);
if (retval < 0) {
rtsx_clear_xd_error(chip);
if (retval == -ETIMEDOUT) {
xd_set_err_code(chip, XD_TO_ERROR);
- goto Status_Fail;
+ goto status_fail;
} else {
rtsx_trace(chip);
- goto Fail;
+ goto fail;
}
}
@@ -1911,7 +1936,7 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
return STATUS_SUCCESS;
-Fail:
+fail:
retval = rtsx_read_register(chip, XD_DAT, &reg_val);
if (retval) {
rtsx_trace(chip);
@@ -1922,7 +1947,7 @@ Fail:
xd_mark_bad_block(chip, new_blk);
}
-Status_Fail:
+status_fail:
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1930,8 +1955,8 @@ Status_Fail:
#ifdef XD_DELAY_WRITE
int xd_delay_write(struct rtsx_chip *chip)
{
- struct xd_info *xd_card = &(chip->xd_card);
- struct xd_delay_write_tag *delay_write = &(xd_card->delay_write);
+ struct xd_info *xd_card = &chip->xd_card;
+ struct xd_delay_write_tag *delay_write = &xd_card->delay_write;
int retval;
if (delay_write->delay_write_flag) {
@@ -1944,9 +1969,10 @@ int xd_delay_write(struct rtsx_chip *chip)
delay_write->delay_write_flag = 0;
retval = xd_finish_write(chip,
- delay_write->old_phyblock,
- delay_write->new_phyblock,
- delay_write->logblock, delay_write->pageoff);
+ delay_write->old_phyblock,
+ delay_write->new_phyblock,
+ delay_write->logblock,
+ delay_write->pageoff);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1958,12 +1984,12 @@ int xd_delay_write(struct rtsx_chip *chip)
#endif
int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- u32 start_sector, u16 sector_cnt)
+ u32 start_sector, u16 sector_cnt)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
unsigned int lun = SCSI_LUN(srb);
#ifdef XD_DELAY_WRITE
- struct xd_delay_write_tag *delay_write = &(xd_card->delay_write);
+ struct xd_delay_write_tag *delay_write = &xd_card->delay_write;
#endif
int retval, zone_no;
unsigned int index = 0, offset = 0;
@@ -2012,17 +2038,18 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (srb->sc_data_direction == DMA_TO_DEVICE) {
#ifdef XD_DELAY_WRITE
if (delay_write->delay_write_flag &&
- (delay_write->logblock == log_blk) &&
- (start_page > delay_write->pageoff)) {
+ (delay_write->logblock == log_blk) &&
+ (start_page > delay_write->pageoff)) {
delay_write->delay_write_flag = 0;
if (delay_write->old_phyblock != BLK_NOT_FOUND) {
retval = xd_copy_page(chip,
- delay_write->old_phyblock,
- delay_write->new_phyblock,
- delay_write->pageoff, start_page);
+ delay_write->old_phyblock,
+ delay_write->new_phyblock,
+ delay_write->pageoff,
+ start_page);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2039,7 +2066,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
retval = xd_delay_write(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2047,25 +2074,25 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
new_blk = xd_get_unused_block(chip, zone_no);
if ((old_blk == BLK_NOT_FOUND) ||
- (new_blk == BLK_NOT_FOUND)) {
+ (new_blk == BLK_NOT_FOUND)) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
retval = xd_prepare_write(chip, old_blk, new_blk,
- log_blk, start_page);
+ log_blk, start_page);
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, XD_CARD) !=
STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_NOT_PRESENT);
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return STATUS_FAIL;
}
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2078,12 +2105,12 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_NOT_PRESENT);
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return STATUS_FAIL;
}
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2092,7 +2119,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
if (old_blk == BLK_NOT_FOUND) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2116,22 +2143,22 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
page_cnt = end_page - start_page;
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
retval = xd_read_multiple_pages(chip, old_blk, log_blk,
- start_page, end_page, ptr,
- &index, &offset);
+ start_page, end_page,
+ ptr, &index, &offset);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
retval = xd_write_multiple_pages(chip, old_blk,
- new_blk, log_blk,
- start_page, end_page, ptr,
- &index, &offset);
+ new_blk, log_blk,
+ start_page, end_page,
+ ptr, &index, &offset);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2153,7 +2180,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS) {
chip->card_fail |= XD_CARD;
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_NOT_PRESENT);
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2163,10 +2190,10 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (old_blk == BLK_NOT_FOUND) {
if (srb->sc_data_direction == DMA_FROM_DEVICE)
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
else
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
@@ -2176,7 +2203,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
new_blk = xd_get_unused_block(chip, zone_no);
if (new_blk == BLK_NOT_FOUND) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2186,7 +2213,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
}
if ((srb->sc_data_direction == DMA_TO_DEVICE) &&
- (end_page != (xd_card->page_off + 1))) {
+ (end_page != (xd_card->page_off + 1))) {
#ifdef XD_DELAY_WRITE
delay_write->delay_write_flag = 1;
delay_write->old_phyblock = old_blk;
@@ -2202,11 +2229,11 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
}
retval = xd_finish_write(chip, old_blk, new_blk,
- log_blk, end_page);
+ log_blk, end_page);
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_NOT_PRESENT);
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2224,10 +2251,10 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
void xd_free_l2p_tbl(struct rtsx_chip *chip)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int i = 0;
- if (xd_card->zone != NULL) {
+ if (xd_card->zone) {
for (i = 0; i < xd_card->zone_cnt; i++) {
vfree(xd_card->zone[i].l2p_table);
xd_card->zone[i].l2p_table = NULL;
@@ -2242,7 +2269,7 @@ void xd_free_l2p_tbl(struct rtsx_chip *chip)
void xd_cleanup_work(struct rtsx_chip *chip)
{
#ifdef XD_DELAY_WRITE
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
if (xd_card->delay_write.delay_write_flag) {
dev_dbg(rtsx_dev(chip), "xD: delay write\n");
@@ -2297,7 +2324,7 @@ int xd_power_off_card3v3(struct rtsx_chip *chip)
int release_xd_card(struct rtsx_chip *chip)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int retval;
chip->card_ready &= ~XD_CARD;
diff --git a/drivers/staging/rts5208/xd.h b/drivers/staging/rts5208/xd.h
index 938138c50bb5..d5f10880efb7 100644
--- a/drivers/staging/rts5208/xd.h
+++ b/drivers/staging/rts5208/xd.h
@@ -179,7 +179,7 @@ int reset_xd_card(struct rtsx_chip *chip);
int xd_delay_write(struct rtsx_chip *chip);
#endif
int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- u32 start_sector, u16 sector_cnt);
+ u32 start_sector, u16 sector_cnt);
void xd_free_l2p_tbl(struct rtsx_chip *chip);
void xd_cleanup_work(struct rtsx_chip *chip);
int xd_power_off_card3v3(struct rtsx_chip *chip);
diff --git a/drivers/staging/skein/skein_api.c b/drivers/staging/skein/skein_api.c
index cab26e736111..c6526b6fbfb4 100644
--- a/drivers/staging/skein/skein_api.c
+++ b/drivers/staging/skein/skein_api.c
@@ -98,19 +98,16 @@ int skein_mac_init(struct skein_ctx *ctx, const u8 *key, size_t key_len,
switch (ctx->skein_size) {
case SKEIN_256:
ret = skein_256_init_ext(&ctx->m.s256, hash_bit_len,
- tree_info,
- (const u8 *)key, key_len);
+ tree_info, key, key_len);
break;
case SKEIN_512:
ret = skein_512_init_ext(&ctx->m.s512, hash_bit_len,
- tree_info,
- (const u8 *)key, key_len);
+ tree_info, key, key_len);
break;
case SKEIN_1024:
ret = skein_1024_init_ext(&ctx->m.s1024, hash_bit_len,
- tree_info,
- (const u8 *)key, key_len);
+ tree_info, key, key_len);
break;
}
@@ -152,16 +149,13 @@ int skein_update(struct skein_ctx *ctx, const u8 *msg,
switch (ctx->skein_size) {
case SKEIN_256:
- ret = skein_256_update(&ctx->m.s256, (const u8 *)msg,
- msg_byte_cnt);
+ ret = skein_256_update(&ctx->m.s256, msg, msg_byte_cnt);
break;
case SKEIN_512:
- ret = skein_512_update(&ctx->m.s512, (const u8 *)msg,
- msg_byte_cnt);
+ ret = skein_512_update(&ctx->m.s512, msg, msg_byte_cnt);
break;
case SKEIN_1024:
- ret = skein_1024_update(&ctx->m.s1024, (const u8 *)msg,
- msg_byte_cnt);
+ ret = skein_1024_update(&ctx->m.s1024, msg, msg_byte_cnt);
break;
}
return ret;
@@ -211,7 +205,7 @@ int skein_update_bits(struct skein_ctx *ctx, const u8 *msg,
/* partial byte bit mask */
mask = (u8)(1u << (7 - (msg_bit_cnt & 7)));
/* apply bit padding on final byte (in the buffer) */
- up[length - 1] = (u8)((up[length - 1] & (0 - mask)) | mask);
+ up[length - 1] = (up[length - 1] & (0 - mask)) | mask;
return SKEIN_SUCCESS;
}
@@ -224,13 +218,13 @@ int skein_final(struct skein_ctx *ctx, u8 *hash)
switch (ctx->skein_size) {
case SKEIN_256:
- ret = skein_256_final(&ctx->m.s256, (u8 *)hash);
+ ret = skein_256_final(&ctx->m.s256, hash);
break;
case SKEIN_512:
- ret = skein_512_final(&ctx->m.s512, (u8 *)hash);
+ ret = skein_512_final(&ctx->m.s512, hash);
break;
case SKEIN_1024:
- ret = skein_1024_final(&ctx->m.s1024, (u8 *)hash);
+ ret = skein_1024_final(&ctx->m.s1024, hash);
break;
}
return ret;
diff --git a/drivers/staging/skein/threefish_block.c b/drivers/staging/skein/threefish_block.c
index a95563fad071..50640656c10d 100644
--- a/drivers/staging/skein/threefish_block.c
+++ b/drivers/staging/skein/threefish_block.c
@@ -64,7 +64,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 += b1;
b1 = rol64(b1, 32) ^ b2;
-
b1 += k3 + t2;
b0 += b1 + k2;
b1 = rol64(b1, 14) ^ b0;
@@ -117,7 +116,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 += b1;
b1 = rol64(b1, 32) ^ b2;
-
b1 += k0 + t1;
b0 += b1 + k4;
b1 = rol64(b1, 14) ^ b0;
@@ -170,7 +168,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 += b1;
b1 = rol64(b1, 32) ^ b2;
-
b1 += k2 + t0;
b0 += b1 + k1;
b1 = rol64(b1, 14) ^ b0;
@@ -223,7 +220,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 += b1;
b1 = rol64(b1, 32) ^ b2;
-
b1 += k4 + t2;
b0 += b1 + k3;
b1 = rol64(b1, 14) ^ b0;
@@ -276,7 +272,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 += b1;
b1 = rol64(b1, 32) ^ b2;
-
b1 += k1 + t1;
b0 += b1 + k0;
b1 = rol64(b1, 14) ^ b0;
@@ -329,7 +324,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 += b1;
b1 = rol64(b1, 32) ^ b2;
-
b1 += k3 + t0;
b0 += b1 + k2;
b1 = rol64(b1, 14) ^ b0;
@@ -382,7 +376,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 += b1;
b1 = rol64(b1, 32) ^ b2;
-
b1 += k0 + t2;
b0 += b1 + k4;
b1 = rol64(b1, 14) ^ b0;
@@ -435,7 +428,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 += b1;
b1 = rol64(b1, 32) ^ b2;
-
b1 += k2 + t1;
b0 += b1 + k1;
b1 = rol64(b1, 14) ^ b0;
@@ -579,7 +571,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= b3 + k3 + t2;
b3 -= k4 + 16;
-
tmp = b3 ^ b0;
b3 = ror64(tmp, 32);
b0 -= b3;
@@ -648,7 +639,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= b3 + k1 + t0;
b3 -= k2 + 14;
-
tmp = b3 ^ b0;
b3 = ror64(tmp, 32);
b0 -= b3;
@@ -717,7 +707,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= b3 + k4 + t1;
b3 -= k0 + 12;
-
tmp = b3 ^ b0;
b3 = ror64(tmp, 32);
b0 -= b3;
@@ -786,7 +775,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= b3 + k2 + t2;
b3 -= k3 + 10;
-
tmp = b3 ^ b0;
b3 = ror64(tmp, 32);
b0 -= b3;
@@ -855,7 +843,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= b3 + k0 + t0;
b3 -= k1 + 8;
-
tmp = b3 ^ b0;
b3 = ror64(tmp, 32);
b0 -= b3;
@@ -924,7 +911,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= b3 + k3 + t1;
b3 -= k4 + 6;
-
tmp = b3 ^ b0;
b3 = ror64(tmp, 32);
b0 -= b3;
@@ -993,7 +979,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= b3 + k1 + t2;
b3 -= k2 + 4;
-
tmp = b3 ^ b0;
b3 = ror64(tmp, 32);
b0 -= b3;
@@ -1062,7 +1047,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= b3 + k4 + t0;
b3 -= k0 + 2;
-
tmp = b3 ^ b0;
b3 = ror64(tmp, 32);
b0 -= b3;
diff --git a/drivers/staging/slicoss/Kconfig b/drivers/staging/slicoss/Kconfig
deleted file mode 100644
index 5c2a15b42dfe..000000000000
--- a/drivers/staging/slicoss/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-config SLICOSS
- tristate "Alacritech Gigabit IS-NIC support"
- depends on PCI && X86 && NET
- default n
- help
- This driver supports Alacritech's IS-NIC gigabit ethernet cards.
-
- This includes the following devices:
- Mojave cards (single port PCI Gigabit) both copper and fiber
- Oasis cards (single and dual port PCI-x Gigabit) copper and fiber
- Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber
-
- To compile this driver as a module, choose M here: the module
- will be called slicoss.
diff --git a/drivers/staging/slicoss/Makefile b/drivers/staging/slicoss/Makefile
deleted file mode 100644
index 7bc9e9b9d3ab..000000000000
--- a/drivers/staging/slicoss/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_SLICOSS) += slicoss.o
diff --git a/drivers/staging/slicoss/README b/drivers/staging/slicoss/README
deleted file mode 100644
index 4fa50e73ce86..000000000000
--- a/drivers/staging/slicoss/README
+++ /dev/null
@@ -1,7 +0,0 @@
-This driver is supposed to support:
-
- Mojave cards (single port PCI Gigabit) both copper and fiber
- Oasis cards (single and dual port PCI-x Gigabit) copper and fiber
- Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber
-
-The driver was actually tested on Oasis and Kalahari cards.
diff --git a/drivers/staging/slicoss/TODO b/drivers/staging/slicoss/TODO
deleted file mode 100644
index 9019729b7be6..000000000000
--- a/drivers/staging/slicoss/TODO
+++ /dev/null
@@ -1,36 +0,0 @@
-TODO:
- - move firmware loading to request_firmware()
- - remove direct memory access of structures
- - any remaining sparse and checkpatch.pl warnings
-
- - use net_device_ops
- - use dev->stats rather than adapter->stats
- - don't cast netdev_priv it is already void
- - GET RID OF MACROS
- - work on all architectures
- - without CONFIG_X86_64 confusion
- - do 64 bit correctly
- - don't depend on order of union
- - get rid of ASSERT(), use BUG() instead but only where necessary
- looks like most aren't really useful
- - no new SIOCDEVPRIVATE ioctl allowed
- - don't use module_param for configuring interrupt mitigation
- use ethtool instead
- - reorder code to elminate use of forward declarations
- - don't keep private linked list of drivers.
- - use PCI_DEVICE()
- - do ethtool correctly using ethtool_ops
- - NAPI?
- - wasted overhead of extra stats
- - state variables for things that are
- easily available and shouldn't be kept in card structure, cardnum, ...
- slotnumber, events, ...
- - volatile == bad design => bad code
- - locking too fine grained, not designed just throw more locks
- at problem
-
-Please send patches to:
- Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-and Cc: Lior Dotan <liodot@gmail.com> and Christopher Harrer
-<charrer@alacritech.com> as well as they are also able to test out any
-changes.
diff --git a/drivers/staging/slicoss/slic.h b/drivers/staging/slicoss/slic.h
deleted file mode 100644
index 420546d43002..000000000000
--- a/drivers/staging/slicoss/slic.h
+++ /dev/null
@@ -1,573 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2000-2002 Alacritech, Inc. All rights reserved.
- *
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation
- * are those of the authors and should not be interpreted as representing
- * official policies, either expressed or implied, of Alacritech, Inc.
- *
- **************************************************************************/
-
-/*
- * FILENAME: slic.h
- *
- * This is the base set of header definitions for the SLICOSS driver.
- */
-#ifndef __SLIC_DRIVER_H__
-#define __SLIC_DRIVER_H__
-
-/* firmware stuff */
-#define OASIS_UCODE_VERS_STRING "1.2"
-#define OASIS_UCODE_VERS_DATE "2006/03/27 15:10:37"
-#define OASIS_UCODE_HOSTIF_ID 3
-
-#define MOJAVE_UCODE_VERS_STRING "1.2"
-#define MOJAVE_UCODE_VERS_DATE "2006/03/27 15:12:22"
-#define MOJAVE_UCODE_HOSTIF_ID 3
-
-#define GB_RCVUCODE_VERS_STRING "1.2"
-#define GB_RCVUCODE_VERS_DATE "2006/03/27 15:12:15"
-static u32 OasisRcvUCodeLen = 512;
-static u32 GBRcvUCodeLen = 512;
-#define SECTION_SIZE 65536
-
-#define SLIC_RSPQ_PAGES_GB 10
-#define SLIC_RSPQ_BUFSINPAGE (PAGE_SIZE / SLIC_RSPBUF_SIZE)
-
-struct slic_rspqueue {
- u32 offset;
- u32 pageindex;
- u32 num_pages;
- struct slic_rspbuf *rspbuf;
- u32 *vaddr[SLIC_RSPQ_PAGES_GB];
- dma_addr_t paddr[SLIC_RSPQ_PAGES_GB];
-};
-
-#define SLIC_RCVQ_EXPANSION 1
-#define SLIC_RCVQ_ENTRIES (256 * SLIC_RCVQ_EXPANSION)
-#define SLIC_RCVQ_MINENTRIES (SLIC_RCVQ_ENTRIES / 2)
-#define SLIC_RCVQ_MAX_PROCESS_ISR ((SLIC_RCVQ_ENTRIES * 4))
-#define SLIC_RCVQ_RCVBUFSIZE 2048
-#define SLIC_RCVQ_FILLENTRIES (16 * SLIC_RCVQ_EXPANSION)
-#define SLIC_RCVQ_FILLTHRESH (SLIC_RCVQ_ENTRIES - SLIC_RCVQ_FILLENTRIES)
-
-struct slic_rcvqueue {
- struct sk_buff *head;
- struct sk_buff *tail;
- u32 count;
- u32 size;
- u32 errors;
-};
-
-struct slic_rcvbuf_info {
- u32 id;
- u32 starttime;
- u32 stoptime;
- u32 slicworld;
- u32 lasttime;
- u32 lastid;
-};
-
-/*
- * SLIC Handle structure. Used to restrict handle values to
- * 32 bits by using an index rather than an address.
- * Simplifies ucode in 64-bit systems
- */
-struct slic_handle_word {
- union {
- struct {
- ushort index;
- ushort bottombits; /* to denote num bufs to card */
- } parts;
- u32 whole;
- } handle;
-};
-
-struct slic_handle {
- struct slic_handle_word token; /* token passed between host and card*/
- ushort type;
- void *address; /* actual address of the object*/
- ushort offset;
- struct slic_handle *other_handle;
- struct slic_handle *next;
-};
-
-#define SLIC_HANDLE_FREE 0x0000
-#define SLIC_HANDLE_DATA 0x0001
-#define SLIC_HANDLE_CMD 0x0002
-#define SLIC_HANDLE_CONTEXT 0x0003
-#define SLIC_HANDLE_TEAM 0x0004
-
-#define handle_index handle.parts.index
-#define handle_bottom handle.parts.bottombits
-#define handle_token handle.whole
-
-#define SLIC_HOSTCMD_SIZE 512
-
-struct slic_hostcmd {
- struct slic_host64_cmd cmd64;
- u32 type;
- struct sk_buff *skb;
- u32 paddrl;
- u32 paddrh;
- u32 busy;
- u32 cmdsize;
- ushort numbufs;
- struct slic_handle *pslic_handle;/* handle associated with command */
- struct slic_hostcmd *next;
- struct slic_hostcmd *next_all;
-};
-
-#define SLIC_CMDQ_CMDSINPAGE (PAGE_SIZE / SLIC_HOSTCMD_SIZE)
-#define SLIC_CMD_DUMB 3
-#define SLIC_CMDQ_INITCMDS 256
-#define SLIC_CMDQ_MAXCMDS 256
-#define SLIC_CMDQ_MAXOUTSTAND SLIC_CMDQ_MAXCMDS
-#define SLIC_CMDQ_MAXPAGES (SLIC_CMDQ_MAXCMDS / SLIC_CMDQ_CMDSINPAGE)
-#define SLIC_CMDQ_INITPAGES (SLIC_CMDQ_INITCMDS / SLIC_CMDQ_CMDSINPAGE)
-
-struct slic_cmdqmem {
- int pagecnt;
- u32 *pages[SLIC_CMDQ_MAXPAGES];
- dma_addr_t dma_pages[SLIC_CMDQ_MAXPAGES];
-};
-
-struct slic_cmdqueue {
- struct slic_hostcmd *head;
- struct slic_hostcmd *tail;
- int count;
- spinlock_t lock;
-};
-
-#define SLIC_MAX_CARDS 32
-#define SLIC_MAX_PORTS 4 /* Max # of ports per card */
-
-struct mcast_address {
- unsigned char address[6];
- struct mcast_address *next;
-};
-
-#define CARD_DOWN 0x00000000
-#define CARD_UP 0x00000001
-#define CARD_FAIL 0x00000002
-#define CARD_DIAG 0x00000003
-#define CARD_SLEEP 0x00000004
-
-#define ADAPT_DOWN 0x00
-#define ADAPT_UP 0x01
-#define ADAPT_FAIL 0x02
-#define ADAPT_RESET 0x03
-#define ADAPT_SLEEP 0x04
-
-#define ADAPT_FLAGS_BOOTTIME 0x0001
-#define ADAPT_FLAGS_IS64BIT 0x0002
-#define ADAPT_FLAGS_PENDINGLINKDOWN 0x0004
-#define ADAPT_FLAGS_FIBERMEDIA 0x0008
-#define ADAPT_FLAGS_LOCKS_ALLOCED 0x0010
-#define ADAPT_FLAGS_INT_REGISTERED 0x0020
-#define ADAPT_FLAGS_LOAD_TIMER_SET 0x0040
-#define ADAPT_FLAGS_STATS_TIMER_SET 0x0080
-#define ADAPT_FLAGS_RESET_TIMER_SET 0x0100
-
-#define LINK_DOWN 0x00
-#define LINK_CONFIG 0x01
-#define LINK_UP 0x02
-
-#define LINK_10MB 0x00
-#define LINK_100MB 0x01
-#define LINK_AUTOSPEED 0x02
-#define LINK_1000MB 0x03
-#define LINK_10000MB 0x04
-
-#define LINK_HALFD 0x00
-#define LINK_FULLD 0x01
-#define LINK_AUTOD 0x02
-
-#define MAC_DIRECTED 0x00000001
-#define MAC_BCAST 0x00000002
-#define MAC_MCAST 0x00000004
-#define MAC_PROMISC 0x00000008
-#define MAC_LOOPBACK 0x00000010
-#define MAC_ALLMCAST 0x00000020
-
-#define SLIC_DUPLEX(x) ((x == LINK_FULLD) ? "FDX" : "HDX")
-#define SLIC_SPEED(x) ((x == LINK_100MB) ? "100Mb" : ((x == LINK_1000MB) ?\
- "1000Mb" : " 10Mb"))
-#define SLIC_LINKSTATE(x) ((x == LINK_DOWN) ? "Down" : "Up ")
-#define SLIC_ADAPTER_STATE(x) ((x == ADAPT_UP) ? "UP" : "Down")
-#define SLIC_CARD_STATE(x) ((x == CARD_UP) ? "UP" : "Down")
-
-struct slic_iface_stats {
- /*
- * Stats
- */
- u64 xmt_bytes;
- u64 xmt_ucast;
- u64 xmt_mcast;
- u64 xmt_bcast;
- u64 xmt_errors;
- u64 xmt_discards;
- u64 xmit_collisions;
- u64 xmit_excess_xmit_collisions;
- u64 rcv_bytes;
- u64 rcv_ucast;
- u64 rcv_mcast;
- u64 rcv_bcast;
- u64 rcv_errors;
- u64 rcv_discards;
-};
-
-struct sliccp_stats {
- u64 xmit_tcp_segs;
- u64 xmit_tcp_bytes;
- u64 rcv_tcp_segs;
- u64 rcv_tcp_bytes;
-};
-
-struct slicnet_stats {
- struct sliccp_stats tcp;
- struct slic_iface_stats iface;
-};
-
-#define SLIC_LOADTIMER_PERIOD 1
-#define SLIC_INTAGG_DEFAULT 200
-#define SLIC_LOAD_0 0
-#define SLIC_INTAGG_0 0
-#define SLIC_LOAD_1 8000
-#define SLIC_LOAD_2 10000
-#define SLIC_LOAD_3 12000
-#define SLIC_LOAD_4 14000
-#define SLIC_LOAD_5 16000
-#define SLIC_INTAGG_1 50
-#define SLIC_INTAGG_2 100
-#define SLIC_INTAGG_3 150
-#define SLIC_INTAGG_4 200
-#define SLIC_INTAGG_5 250
-#define SLIC_LOAD_1GB 3000
-#define SLIC_LOAD_2GB 6000
-#define SLIC_LOAD_3GB 12000
-#define SLIC_LOAD_4GB 24000
-#define SLIC_LOAD_5GB 48000
-#define SLIC_INTAGG_1GB 50
-#define SLIC_INTAGG_2GB 75
-#define SLIC_INTAGG_3GB 100
-#define SLIC_INTAGG_4GB 100
-#define SLIC_INTAGG_5GB 100
-
-struct ether_header {
- unsigned char ether_dhost[6];
- unsigned char ether_shost[6];
- ushort ether_type;
-};
-
-struct sliccard {
- uint busnumber;
- uint slotnumber;
- uint state;
- uint cardnum;
- uint card_size;
- uint adapters_activated;
- uint adapters_allocated;
- uint adapters_sleeping;
- uint gennumber;
- u32 events;
- u32 loadlevel_current;
- u32 load;
- uint reset_in_progress;
- u32 pingstatus;
- u32 bad_pingstatus;
- struct timer_list loadtimer;
- u32 loadtimerset;
- uint config_set;
- struct slic_config config;
- struct adapter *master;
- struct adapter *adapter[SLIC_MAX_PORTS];
- struct sliccard *next;
- u32 error_interrupts;
- u32 error_rmiss_interrupts;
- u32 rcv_interrupts;
- u32 xmit_interrupts;
- u32 num_isrs;
- u32 false_interrupts;
- u32 max_isr_rcvs;
- u32 max_isr_xmits;
- u32 rcv_interrupt_yields;
- u32 tx_packets;
- u32 debug_ix;
- ushort reg_type[32];
- ushort reg_offset[32];
- u32 reg_value[32];
- u32 reg_valueh[32];
-};
-
-#define NUM_CFG_SPACES 2
-#define NUM_CFG_REGS 64
-#define NUM_CFG_REG_ULONGS (NUM_CFG_REGS / sizeof(u32))
-
-struct physcard {
- struct adapter *adapter[SLIC_MAX_PORTS];
- struct physcard *next;
- uint adapters_allocd;
-
-/*
- * the following is not currently needed
- * u32 bridge_busnum;
- * u32 bridge_cfg[NUM_CFG_SPACES][NUM_CFG_REG_ULONGS];
- */
-};
-
-struct base_driver {
- spinlock_t driver_lock;
- u32 num_slic_cards;
- u32 num_slic_ports;
- u32 num_slic_ports_active;
- u32 dynamic_intagg;
- struct sliccard *slic_card;
- struct physcard *phys_card;
- uint cardnuminuse[SLIC_MAX_CARDS];
-};
-
-struct slic_stats {
- /* xmit stats */
- u64 xmit_tcp_bytes;
- u64 xmit_tcp_segs;
- u64 xmit_bytes;
- u64 xmit_collisions;
- u64 xmit_unicasts;
- u64 xmit_other_error;
- u64 xmit_excess_collisions;
- /* rcv stats */
- u64 rcv_tcp_bytes;
- u64 rcv_tcp_segs;
- u64 rcv_bytes;
- u64 rcv_unicasts;
- u64 rcv_other_error;
- u64 rcv_drops;
-};
-
-struct slic_shmem_data {
- u32 isr;
- u32 lnkstatus;
- struct slic_stats stats;
-};
-
-struct slic_shmemory {
- dma_addr_t isr_phaddr;
- dma_addr_t lnkstatus_phaddr;
- dma_addr_t stats_phaddr;
- struct slic_shmem_data __iomem *shmem_data;
-};
-
-struct slic_upr {
- uint adapter;
- u32 upr_request;
- u32 upr_data;
- u32 upr_data_h;
- u32 upr_buffer;
- u32 upr_buffer_h;
- struct slic_upr *next;
-};
-
-struct slic_ifevents {
- uint oflow802;
- uint uflow802;
- uint Tprtoflow;
- uint rcvearly;
- uint Bufov;
- uint Carre;
- uint Longe;
- uint Invp;
- uint Crc;
- uint Drbl;
- uint Code;
- uint IpHlen;
- uint IpLen;
- uint IpCsum;
- uint TpCsum;
- uint TpHlen;
-};
-
-struct adapter {
- void *ifp;
- struct sliccard *card;
- uint port;
- struct physcard *physcard;
- uint physport;
- uint cardindex;
- uint card_size;
- uint chipid;
- struct net_device *netdev;
- spinlock_t adapter_lock;
- spinlock_t reset_lock;
- struct pci_dev *pcidev;
- uint busnumber;
- uint slotnumber;
- uint functionnumber;
- ushort vendid;
- ushort devid;
- ushort subsysid;
- u32 irq;
- u32 drambase;
- u32 dramlength;
- uint queues_initialized;
- uint allocated;
- uint activated;
- u32 intrregistered;
- uint isp_initialized;
- uint gennumber;
- struct slic_shmemory shmem;
- dma_addr_t phys_shmem;
- void __iomem *regs;
- unsigned char state;
- unsigned char linkstate;
- unsigned char linkspeed;
- unsigned char linkduplex;
- uint flags;
- unsigned char macaddr[6];
- unsigned char currmacaddr[6];
- u32 macopts;
- ushort devflags_prev;
- u64 mcastmask;
- struct mcast_address *mcastaddrs;
- struct slic_upr *upr_list;
- uint upr_busy;
- struct timer_list pingtimer;
- u32 pingtimerset;
- struct timer_list loadtimer;
- u32 loadtimerset;
- spinlock_t upr_lock;
- spinlock_t bit64reglock;
- struct slic_rspqueue rspqueue;
- struct slic_rcvqueue rcvqueue;
- struct slic_cmdqueue cmdq_free;
- struct slic_cmdqueue cmdq_done;
- struct slic_cmdqueue cmdq_all;
- struct slic_cmdqmem cmdqmem;
- /*
- * SLIC Handles
- */
- /* Object handles*/
- struct slic_handle slic_handles[SLIC_CMDQ_MAXCMDS + 1];
- /* Free object handles*/
- struct slic_handle *pfree_slic_handles;
- /* Object handle list lock*/
- spinlock_t handle_lock;
- ushort slic_handle_ix;
-
- u32 xmitq_full;
- u32 all_reg_writes;
- u32 icr_reg_writes;
- u32 isr_reg_writes;
- u32 error_interrupts;
- u32 error_rmiss_interrupts;
- u32 rx_errors;
- u32 rcv_drops;
- u32 rcv_interrupts;
- u32 xmit_interrupts;
- u32 linkevent_interrupts;
- u32 upr_interrupts;
- u32 num_isrs;
- u32 false_interrupts;
- u32 tx_packets;
- u32 xmit_completes;
- u32 tx_drops;
- u32 rcv_broadcasts;
- u32 rcv_multicasts;
- u32 rcv_unicasts;
- u32 max_isr_rcvs;
- u32 max_isr_xmits;
- u32 rcv_interrupt_yields;
- u32 intagg_period;
- u32 intagg_delay;
- u32 dynamic_intagg;
- struct inicpm_state *inicpm_info;
- void *pinicpm_info;
- struct slic_ifevents if_events;
- struct slic_stats inicstats_prev;
- struct slicnet_stats slic_stats;
-};
-
-static inline u32 slic_read32(struct adapter *adapter, unsigned int reg)
-{
- return ioread32(adapter->regs + reg);
-}
-
-static inline void slic_write32(struct adapter *adapter, unsigned int reg,
- u32 val)
-{
- iowrite32(val, adapter->regs + reg);
-}
-
-static inline void slic_write64(struct adapter *adapter, unsigned int reg,
- u32 val, u32 hiaddr)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->bit64reglock, flags);
- slic_write32(adapter, SLIC_REG_ADDR_UPPER, hiaddr);
- slic_write32(adapter, reg, val);
- mmiowb();
- spin_unlock_irqrestore(&adapter->bit64reglock, flags);
-}
-
-static inline void slic_flush_write(struct adapter *adapter)
-{
- ioread32(adapter->regs + SLIC_REG_HOSTID);
-}
-
-#define UPDATE_STATS(largestat, newstat, oldstat) \
-{ \
- if ((newstat) < (oldstat)) \
- (largestat) += ((newstat) + (0xFFFFFFFF - oldstat + 1)); \
- else \
- (largestat) += ((newstat) - (oldstat)); \
-}
-
-#define UPDATE_STATS_GB(largestat, newstat, oldstat) \
-{ \
- (largestat) += ((newstat) - (oldstat)); \
-}
-
-#if BITS_PER_LONG == 64
-#define SLIC_GET_ADDR_LOW(_addr) (u32)((u64)(_addr) & \
- 0x00000000FFFFFFFF)
-#define SLIC_GET_ADDR_HIGH(_addr) (u32)(((u64)(_addr) >> 32) & \
- 0x00000000FFFFFFFF)
-#elif BITS_PER_LONG == 32
-#define SLIC_GET_ADDR_LOW(_addr) (u32)(_addr)
-#define SLIC_GET_ADDR_HIGH(_addr) (u32)0
-#else
-#error BITS_PER_LONG must be 32 or 64
-#endif
-
-#define FLUSH true
-#define DONT_FLUSH false
-
-#define SIOCSLICSETINTAGG (SIOCDEVPRIVATE + 10)
-
-#endif /* __SLIC_DRIVER_H__ */
diff --git a/drivers/staging/slicoss/slichw.h b/drivers/staging/slicoss/slichw.h
deleted file mode 100644
index 49cb91aa02bb..000000000000
--- a/drivers/staging/slicoss/slichw.h
+++ /dev/null
@@ -1,652 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2000-2002 Alacritech, Inc. All rights reserved.
- *
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation
- * are those of the authors and should not be interpreted as representing
- * official policies, either expressed or implied, of Alacritech, Inc.
- *
- **************************************************************************/
-
-/*
- * FILENAME: slichw.h
- *
- * This header file contains definitions that are common to our hardware.
- */
-#ifndef __SLICHW_H__
-#define __SLICHW_H__
-
-#define PCI_VENDOR_ID_ALACRITECH 0x139A
-#define SLIC_1GB_DEVICE_ID 0x0005
-#define SLIC_2GB_DEVICE_ID 0x0007 /* Oasis Device ID */
-
-#define SLIC_1GB_CICADA_SUBSYS_ID 0x0008
-
-#define SLIC_NBR_MACS 4
-
-#define SLIC_RCVBUF_SIZE 2048
-#define SLIC_RCVBUF_HEADSIZE 34
-#define SLIC_RCVBUF_TAILSIZE 0
-#define SLIC_RCVBUF_DATASIZE (SLIC_RCVBUF_SIZE - \
- (SLIC_RCVBUF_HEADSIZE + \
- SLIC_RCVBUF_TAILSIZE))
-
-#define VGBSTAT_XPERR 0x40000000
-#define VGBSTAT_XERRSHFT 25
-#define VGBSTAT_XCSERR 0x23
-#define VGBSTAT_XUFLOW 0x22
-#define VGBSTAT_XHLEN 0x20
-#define VGBSTAT_NETERR 0x01000000
-#define VGBSTAT_NERRSHFT 16
-#define VGBSTAT_NERRMSK 0x1ff
-#define VGBSTAT_NCSERR 0x103
-#define VGBSTAT_NUFLOW 0x102
-#define VGBSTAT_NHLEN 0x100
-#define VGBSTAT_LNKERR 0x00000080
-#define VGBSTAT_LERRMSK 0xff
-#define VGBSTAT_LDEARLY 0x86
-#define VGBSTAT_LBOFLO 0x85
-#define VGBSTAT_LCODERR 0x84
-#define VGBSTAT_LDBLNBL 0x83
-#define VGBSTAT_LCRCERR 0x82
-#define VGBSTAT_LOFLO 0x81
-#define VGBSTAT_LUFLO 0x80
-#define IRHDDR_FLEN_MSK 0x0000ffff
-#define IRHDDR_SVALID 0x80000000
-#define IRHDDR_ERR 0x10000000
-#define VRHSTAT_802OE 0x80000000
-#define VRHSTAT_TPOFLO 0x10000000
-#define VRHSTATB_802UE 0x80000000
-#define VRHSTATB_RCVE 0x40000000
-#define VRHSTATB_BUFF 0x20000000
-#define VRHSTATB_CARRE 0x08000000
-#define VRHSTATB_LONGE 0x02000000
-#define VRHSTATB_PREA 0x01000000
-#define VRHSTATB_CRC 0x00800000
-#define VRHSTATB_DRBL 0x00400000
-#define VRHSTATB_CODE 0x00200000
-#define VRHSTATB_TPCSUM 0x00100000
-#define VRHSTATB_TPHLEN 0x00080000
-#define VRHSTATB_IPCSUM 0x00040000
-#define VRHSTATB_IPLERR 0x00020000
-#define VRHSTATB_IPHERR 0x00010000
-#define SLIC_MAX64_BCNT 23
-#define SLIC_MAX32_BCNT 26
-#define IHCMD_XMT_REQ 0x01
-#define IHFLG_IFSHFT 2
-#define SLIC_RSPBUF_SIZE 32
-
-#define SLIC_RESET_MAGIC 0xDEAD
-#define ICR_INT_OFF 0
-#define ICR_INT_ON 1
-#define ICR_INT_MASK 2
-
-#define ISR_ERR 0x80000000
-#define ISR_RCV 0x40000000
-#define ISR_CMD 0x20000000
-#define ISR_IO 0x60000000
-#define ISR_UPC 0x10000000
-#define ISR_LEVENT 0x08000000
-#define ISR_RMISS 0x02000000
-#define ISR_UPCERR 0x01000000
-#define ISR_XDROP 0x00800000
-#define ISR_UPCBSY 0x00020000
-#define ISR_EVMSK 0xffff0000
-#define ISR_PINGMASK 0x00700000
-#define ISR_PINGDSMASK 0x00710000
-#define ISR_UPCMASK 0x11000000
-#define SLIC_WCS_START 0x80000000
-#define SLIC_WCS_COMPARE 0x40000000
-#define SLIC_RCVWCS_BEGIN 0x40000000
-#define SLIC_RCVWCS_FINISH 0x80000000
-#define SLIC_PM_MAXPATTERNS 6
-#define SLIC_PM_PATTERNSIZE 128
-#define SLIC_PMCAPS_WAKEONLAN 0x00000001
-#define MIICR_REG_PCR 0x00000000
-#define MIICR_REG_4 0x00040000
-#define MIICR_REG_9 0x00090000
-#define MIICR_REG_16 0x00100000
-#define PCR_RESET 0x8000
-#define PCR_POWERDOWN 0x0800
-#define PCR_SPEED_100 0x2000
-#define PCR_SPEED_1000 0x0040
-#define PCR_AUTONEG 0x1000
-#define PCR_AUTONEG_RST 0x0200
-#define PCR_DUPLEX_FULL 0x0100
-#define PSR_LINKUP 0x0004
-
-#define PAR_ADV100FD 0x0100
-#define PAR_ADV100HD 0x0080
-#define PAR_ADV10FD 0x0040
-#define PAR_ADV10HD 0x0020
-#define PAR_ASYMPAUSE 0x0C00
-#define PAR_802_3 0x0001
-
-#define PAR_ADV1000XFD 0x0020
-#define PAR_ADV1000XHD 0x0040
-#define PAR_ASYMPAUSE_FIBER 0x0180
-
-#define PGC_ADV1000FD 0x0200
-#define PGC_ADV1000HD 0x0100
-#define SEEQ_LINKFAIL 0x4000
-#define SEEQ_SPEED 0x0080
-#define SEEQ_DUPLEX 0x0040
-#define TDK_DUPLEX 0x0800
-#define TDK_SPEED 0x0400
-#define MRV_REG16_XOVERON 0x0068
-#define MRV_REG16_XOVEROFF 0x0008
-#define MRV_SPEED_1000 0x8000
-#define MRV_SPEED_100 0x4000
-#define MRV_SPEED_10 0x0000
-#define MRV_FULLDUPLEX 0x2000
-#define MRV_LINKUP 0x0400
-
-#define GIG_LINKUP 0x0001
-#define GIG_FULLDUPLEX 0x0002
-#define GIG_SPEED_MASK 0x000C
-#define GIG_SPEED_1000 0x0008
-#define GIG_SPEED_100 0x0004
-#define GIG_SPEED_10 0x0000
-
-#define MCR_RESET 0x80000000
-#define MCR_CRCEN 0x40000000
-#define MCR_FULLD 0x10000000
-#define MCR_PAD 0x02000000
-#define MCR_RETRYLATE 0x01000000
-#define MCR_BOL_SHIFT 21
-#define MCR_IPG1_SHIFT 14
-#define MCR_IPG2_SHIFT 7
-#define MCR_IPG3_SHIFT 0
-#define GMCR_RESET 0x80000000
-#define GMCR_GBIT 0x20000000
-#define GMCR_FULLD 0x10000000
-#define GMCR_GAPBB_SHIFT 14
-#define GMCR_GAPR1_SHIFT 7
-#define GMCR_GAPR2_SHIFT 0
-#define GMCR_GAPBB_1000 0x60
-#define GMCR_GAPR1_1000 0x2C
-#define GMCR_GAPR2_1000 0x40
-#define GMCR_GAPBB_100 0x70
-#define GMCR_GAPR1_100 0x2C
-#define GMCR_GAPR2_100 0x40
-#define XCR_RESET 0x80000000
-#define XCR_XMTEN 0x40000000
-#define XCR_PAUSEEN 0x20000000
-#define XCR_LOADRNG 0x10000000
-#define RCR_RESET 0x80000000
-#define RCR_RCVEN 0x40000000
-#define RCR_RCVALL 0x20000000
-#define RCR_RCVBAD 0x10000000
-#define RCR_CTLEN 0x08000000
-#define RCR_ADDRAEN 0x02000000
-#define GXCR_RESET 0x80000000
-#define GXCR_XMTEN 0x40000000
-#define GXCR_PAUSEEN 0x20000000
-#define GRCR_RESET 0x80000000
-#define GRCR_RCVEN 0x40000000
-#define GRCR_RCVALL 0x20000000
-#define GRCR_RCVBAD 0x10000000
-#define GRCR_CTLEN 0x08000000
-#define GRCR_ADDRAEN 0x02000000
-#define GRCR_HASHSIZE_SHIFT 17
-#define GRCR_HASHSIZE 14
-
-#define SLIC_EEPROM_ID 0xA5A5
-#define SLIC_SRAM_SIZE2GB (64 * 1024)
-#define SLIC_SRAM_SIZE1GB (32 * 1024)
-#define SLIC_HOSTID_DEFAULT 0xFFFF /* uninitialized hostid */
-#define SLIC_NBR_MACS 4
-
-struct slic_rcvbuf {
- u8 pad1[6];
- u16 pad2;
- u32 pad3;
- u32 pad4;
- u32 buffer;
- u32 length;
- u32 status;
- u32 pad5;
- u16 pad6;
- u8 data[SLIC_RCVBUF_DATASIZE];
-};
-
-struct slic_hddr_wds {
- union {
- struct {
- u32 frame_status;
- u32 frame_status_b;
- u32 time_stamp;
- u32 checksum;
- } hdrs_14port;
- struct {
- u32 frame_status;
- u16 ByteCnt;
- u16 TpChksum;
- u16 CtxHash;
- u16 MacHash;
- u32 BufLnk;
- } hdrs_gbit;
- } u0;
-};
-
-#define frame_status14 u0.hdrs_14port.frame_status
-#define frame_status_b14 u0.hdrs_14port.frame_status_b
-#define frame_statusGB u0.hdrs_gbit.frame_status
-
-struct slic_host64sg {
- u32 paddrl;
- u32 paddrh;
- u32 length;
-};
-
-struct slic_host64_cmd {
- u32 hosthandle;
- u32 RSVD;
- u8 command;
- u8 flags;
- union {
- u16 rsv1;
- u16 rsv2;
- } u0;
- union {
- struct {
- u32 totlen;
- struct slic_host64sg bufs[SLIC_MAX64_BCNT];
- } slic_buffers;
- } u;
-};
-
-struct slic_rspbuf {
- u32 hosthandle;
- u32 pad0;
- u32 pad1;
- u32 status;
- u32 pad2[4];
-};
-
-/* Reset Register */
-#define SLIC_REG_RESET 0x0000
-/* Interrupt Control Register */
-#define SLIC_REG_ICR 0x0008
-/* Interrupt status pointer */
-#define SLIC_REG_ISP 0x0010
-/* Interrupt status */
-#define SLIC_REG_ISR 0x0018
-/*
- * Header buffer address reg
- * 31-8 - phy addr of set of contiguous hdr buffers
- * 7-0 - number of buffers passed
- * Buffers are 256 bytes long on 256-byte boundaries.
- */
-#define SLIC_REG_HBAR 0x0020
-/*
- * Data buffer handle & address reg
- * 4 sets of registers; Buffers are 2K bytes long 2 per 4K page.
- */
-#define SLIC_REG_DBAR 0x0028
-/*
- * Xmt Cmd buf addr regs.
- * 1 per XMT interface
- * 31-5 - phy addr of host command buffer
- * 4-0 - length of cmd in multiples of 32 bytes
- * Buffers are 32 bytes up to 512 bytes long
- */
-#define SLIC_REG_CBAR 0x0030
-/* Write control store */
-#define SLIC_REG_WCS 0x0034
-/*
- * Response buffer address reg.
- * 31-8 - phy addr of set of contiguous response buffers
- * 7-0 - number of buffers passed
- * Buffers are 32 bytes long on 32-byte boundaries.
- */
-#define SLIC_REG_RBAR 0x0038
-/* Read statistics (UPR) */
-#define SLIC_REG_RSTAT 0x0040
-/* Read link status */
-#define SLIC_REG_LSTAT 0x0048
-/* Write Mac Config */
-#define SLIC_REG_WMCFG 0x0050
-/* Write phy register */
-#define SLIC_REG_WPHY 0x0058
-/* Rcv Cmd buf addr reg */
-#define SLIC_REG_RCBAR 0x0060
-/* Read SLIC Config*/
-#define SLIC_REG_RCONFIG 0x0068
-/* Interrupt aggregation time */
-#define SLIC_REG_INTAGG 0x0070
-/* Write XMIT config reg */
-#define SLIC_REG_WXCFG 0x0078
-/* Write RCV config reg */
-#define SLIC_REG_WRCFG 0x0080
-/* Write rcv addr a low */
-#define SLIC_REG_WRADDRAL 0x0088
-/* Write rcv addr a high */
-#define SLIC_REG_WRADDRAH 0x0090
-/* Write rcv addr b low */
-#define SLIC_REG_WRADDRBL 0x0098
-/* Write rcv addr b high */
-#define SLIC_REG_WRADDRBH 0x00a0
-/* Low bits of mcast mask */
-#define SLIC_REG_MCASTLOW 0x00a8
-/* High bits of mcast mask */
-#define SLIC_REG_MCASTHIGH 0x00b0
-/* Ping the card */
-#define SLIC_REG_PING 0x00b8
-/* Dump command */
-#define SLIC_REG_DUMP_CMD 0x00c0
-/* Dump data pointer */
-#define SLIC_REG_DUMP_DATA 0x00c8
-/* Read card's pci_status register */
-#define SLIC_REG_PCISTATUS 0x00d0
-/* Write hostid field */
-#define SLIC_REG_WRHOSTID 0x00d8
-/* Put card in a low power state */
-#define SLIC_REG_LOW_POWER 0x00e0
-/* Force slic into quiescent state before soft reset */
-#define SLIC_REG_QUIESCE 0x00e8
-/* Reset interface queues */
-#define SLIC_REG_RESET_IFACE 0x00f0
-/*
- * Register is only written when it has changed.
- * Bits 63-32 for host i/f addrs.
- */
-#define SLIC_REG_ADDR_UPPER 0x00f8
-/* 64 bit Header buffer address reg */
-#define SLIC_REG_HBAR64 0x0100
-/* 64 bit Data buffer handle & address reg */
-#define SLIC_REG_DBAR64 0x0108
-/* 64 bit Xmt Cmd buf addr regs. */
-#define SLIC_REG_CBAR64 0x0110
-/* 64 bit Response buffer address reg.*/
-#define SLIC_REG_RBAR64 0x0118
-/* 64 bit Rcv Cmd buf addr reg*/
-#define SLIC_REG_RCBAR64 0x0120
-/* Read statistics (64 bit UPR) */
-#define SLIC_REG_RSTAT64 0x0128
-/* Download Gigabit RCV sequencer ucode */
-#define SLIC_REG_RCV_WCS 0x0130
-/* Write VlanId field */
-#define SLIC_REG_WRVLANID 0x0138
-/* Read Transformer info */
-#define SLIC_REG_READ_XF_INFO 0x0140
-/* Write Transformer info */
-#define SLIC_REG_WRITE_XF_INFO 0x0148
-/* Write card ticks per second */
-#define SLIC_REG_TICKS_PER_SEC 0x0170
-
-#define SLIC_REG_HOSTID 0x1554
-
-enum UPR_REQUEST {
- SLIC_UPR_STATS,
- SLIC_UPR_RLSR,
- SLIC_UPR_WCFG,
- SLIC_UPR_RCONFIG,
- SLIC_UPR_RPHY,
- SLIC_UPR_ENLB,
- SLIC_UPR_ENCT,
- SLIC_UPR_PDWN,
- SLIC_UPR_PING,
- SLIC_UPR_DUMP,
-};
-
-struct inicpm_wakepattern {
- u32 patternlength;
- u8 pattern[SLIC_PM_PATTERNSIZE];
- u8 mask[SLIC_PM_PATTERNSIZE];
-};
-
-struct inicpm_state {
- u32 powercaps;
- u32 powerstate;
- u32 wake_linkstatus;
- u32 wake_magicpacket;
- u32 wake_framepattern;
- struct inicpm_wakepattern wakepattern[SLIC_PM_MAXPATTERNS];
-};
-
-struct slicpm_packet_pattern {
- u32 priority;
- u32 reserved;
- u32 masksize;
- u32 patternoffset;
- u32 patternsize;
- u32 patternflags;
-};
-
-enum slicpm_power_state {
- slicpm_state_unspecified = 0,
- slicpm_state_d0,
- slicpm_state_d1,
- slicpm_state_d2,
- slicpm_state_d3,
- slicpm_state_maximum
-};
-
-struct slicpm_wakeup_capabilities {
- enum slicpm_power_state min_magic_packet_wakeup;
- enum slicpm_power_state min_pattern_wakeup;
- enum slicpm_power_state min_link_change_wakeup;
-};
-
-struct slic_pnp_capabilities {
- u32 flags;
- struct slicpm_wakeup_capabilities wakeup_capabilities;
-};
-
-struct slic_config_mac {
- u8 macaddrA[6];
-};
-
-#define ATK_FRU_FORMAT 0x00
-#define VENDOR1_FRU_FORMAT 0x01
-#define VENDOR2_FRU_FORMAT 0x02
-#define VENDOR3_FRU_FORMAT 0x03
-#define VENDOR4_FRU_FORMAT 0x04
-#define NO_FRU_FORMAT 0xFF
-
-struct atk_fru {
- u8 assembly[6];
- u8 revision[2];
- u8 serial[14];
- u8 pad[3];
-};
-
-struct vendor1_fru {
- u8 commodity;
- u8 assembly[4];
- u8 revision[2];
- u8 supplier[2];
- u8 date[2];
- u8 sequence[3];
- u8 pad[13];
-};
-
-struct vendor2_fru {
- u8 part[8];
- u8 supplier[5];
- u8 date[3];
- u8 sequence[4];
- u8 pad[7];
-};
-
-struct vendor3_fru {
- u8 assembly[6];
- u8 revision[2];
- u8 serial[14];
- u8 pad[3];
-};
-
-struct vendor4_fru {
- u8 number[8];
- u8 part[8];
- u8 version[8];
- u8 pad[3];
-};
-
-union oemfru {
- struct vendor1_fru vendor1_fru;
- struct vendor2_fru vendor2_fru;
- struct vendor3_fru vendor3_fru;
- struct vendor4_fru vendor4_fru;
-};
-
-/*
- * SLIC EEPROM structure for Mojave
- */
-struct slic_eeprom {
- u16 Id; /* 00 EEPROM/FLASH Magic code 'A5A5'*/
- u16 EecodeSize; /* 01 Size of EEPROM Codes (bytes * 4)*/
- u16 FlashSize; /* 02 Flash size */
- u16 EepromSize; /* 03 EEPROM Size */
- u16 VendorId; /* 04 Vendor ID */
- u16 DeviceId; /* 05 Device ID */
- u8 RevisionId; /* 06 Revision ID */
- u8 ClassCode[3]; /* 07 Class Code */
- u8 DbgIntPin; /* 08 Debug Interrupt pin */
- u8 NetIntPin0; /* Network Interrupt Pin */
- u8 MinGrant; /* 09 Minimum grant */
- u8 MaxLat; /* Maximum Latency */
- u16 PciStatus; /* 10 PCI Status */
- u16 SubSysVId; /* 11 Subsystem Vendor Id */
- u16 SubSysId; /* 12 Subsystem ID */
- u16 DbgDevId; /* 13 Debug Device Id */
- u16 DramRomFn; /* 14 Dram/Rom function */
- u16 DSize2Pci; /* 15 DRAM size to PCI (bytes * 64K) */
- u16 RSize2Pci; /* 16 ROM extension size to PCI (bytes * 4k) */
- u8 NetIntPin1; /* 17 Network Interface Pin 1
- * (simba/leone only)
- */
- u8 NetIntPin2; /* Network Interface Pin 2 (simba/leone only)*/
- union {
- u8 NetIntPin3; /* 18 Network Interface Pin 3 (simba only) */
- u8 FreeTime; /* FreeTime setting (leone/mojave only) */
- } u1;
- u8 TBIctl; /* 10-bit interface control (Mojave only) */
- u16 DramSize; /* 19 DRAM size (bytes * 64k) */
- union {
- struct {
- /* Mac Interface Specific portions */
- struct slic_config_mac MacInfo[SLIC_NBR_MACS];
- } mac; /* MAC access for all boards */
- struct {
- /* use above struct for MAC access */
- struct slic_config_mac pad[SLIC_NBR_MACS - 1];
- u16 DeviceId2; /* Device ID for 2nd PCI function */
- u8 IntPin2; /* Interrupt pin for 2nd PCI function */
- u8 ClassCode2[3]; /* Class Code for 2nd PCI function */
- } mojave; /* 2nd function access for gigabit board */
- } u2;
- u16 CfgByte6; /* Config Byte 6 */
- u16 PMECapab; /* Power Mgment capabilities */
- u16 NwClkCtrls; /* NetworkClockControls */
- u8 FruFormat; /* Alacritech FRU format type */
- struct atk_fru AtkFru; /* Alacritech FRU information */
- u8 OemFruFormat; /* optional OEM FRU format type */
- union oemfru OemFru; /* optional OEM FRU information */
- u8 Pad[4]; /* Pad to 128 bytes - includes 2 cksum bytes
- * (if OEM FRU info exists) and two unusable
- * bytes at the end
- */
-};
-
-/* SLIC EEPROM structure for Oasis */
-struct oslic_eeprom {
- u16 Id; /* 00 EEPROM/FLASH Magic code 'A5A5' */
- u16 EecodeSize; /* 01 Size of EEPROM Codes (bytes * 4)*/
- u16 FlashConfig0; /* 02 Flash Config for SPI device 0 */
- u16 FlashConfig1; /* 03 Flash Config for SPI device 1 */
- u16 VendorId; /* 04 Vendor ID */
- u16 DeviceId; /* 05 Device ID (function 0) */
- u8 RevisionId; /* 06 Revision ID */
- u8 ClassCode[3]; /* 07 Class Code for PCI function 0 */
- u8 IntPin1; /* 08 Interrupt pin for PCI function 1*/
- u8 ClassCode2[3]; /* 09 Class Code for PCI function 1 */
- u8 IntPin2; /* 10 Interrupt pin for PCI function 2*/
- u8 IntPin0; /* Interrupt pin for PCI function 0*/
- u8 MinGrant; /* 11 Minimum grant */
- u8 MaxLat; /* Maximum Latency */
- u16 SubSysVId; /* 12 Subsystem Vendor Id */
- u16 SubSysId; /* 13 Subsystem ID */
- u16 FlashSize; /* 14 Flash size (bytes / 4K) */
- u16 DSize2Pci; /* 15 DRAM size to PCI (bytes / 64K) */
- u16 RSize2Pci; /* 16 Flash (ROM extension) size to PCI
- * (bytes / 4K)
- */
- u16 DeviceId1; /* 17 Device Id (function 1) */
- u16 DeviceId2; /* 18 Device Id (function 2) */
- u16 CfgByte6; /* 19 Device Status Config Bytes 6-7 */
- u16 PMECapab; /* 20 Power Mgment capabilities */
- u8 MSICapab; /* 21 MSI capabilities */
- u8 ClockDivider; /* Clock divider */
- u16 PciStatusLow; /* 22 PCI Status bits 15:0 */
- u16 PciStatusHigh; /* 23 PCI Status bits 31:16 */
- u16 DramConfigLow; /* 24 DRAM Configuration bits 15:0 */
- u16 DramConfigHigh; /* 25 DRAM Configuration bits 31:16 */
- u16 DramSize; /* 26 DRAM size (bytes / 64K) */
- u16 GpioTbiCtl; /* 27 GPIO/TBI controls for functions 1/0 */
- u16 EepromSize; /* 28 EEPROM Size */
- struct slic_config_mac MacInfo[2]; /* 29 MAC addresses (2 ports) */
- u8 FruFormat; /* 35 Alacritech FRU format type */
- struct atk_fru AtkFru; /* Alacritech FRU information */
- u8 OemFruFormat; /* optional OEM FRU format type */
- union oemfru OemFru; /* optional OEM FRU information */
- u8 Pad[4]; /* Pad to 128 bytes - includes 2 checksum bytes
- * (if OEM FRU info exists) and two unusable
- * bytes at the end
- */
-};
-
-#define MAX_EECODE_SIZE sizeof(struct slic_eeprom)
-#define MIN_EECODE_SIZE 0x62 /* code size without optional OEM FRU stuff */
-
-/*
- * SLIC CONFIG structure
- *
- * This structure lives in the CARD structure and is valid for all board types.
- * It is filled in from the appropriate EEPROM structure by
- * SlicGetConfigData()
- */
-struct slic_config {
- bool EepromValid; /* Valid EEPROM flag (checksum good?) */
- u16 DramSize; /* DRAM size (bytes / 64K) */
- struct slic_config_mac MacInfo[SLIC_NBR_MACS]; /* MAC addresses */
- u8 FruFormat; /* Alacritech FRU format type */
- struct atk_fru AtkFru; /* Alacritech FRU information */
- u8 OemFruFormat; /* optional OEM FRU format type */
- union {
- struct vendor1_fru vendor1_fru;
- struct vendor2_fru vendor2_fru;
- struct vendor3_fru vendor3_fru;
- struct vendor4_fru vendor4_fru;
- } OemFru;
-};
-
-#pragma pack()
-
-#endif
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
deleted file mode 100644
index 2802b900f8ee..000000000000
--- a/drivers/staging/slicoss/slicoss.c
+++ /dev/null
@@ -1,3131 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2000-2006 Alacritech, Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation
- * are those of the authors and should not be interpreted as representing
- * official policies, either expressed or implied, of Alacritech, Inc.
- *
- **************************************************************************/
-
-/*
- * FILENAME: slicoss.c
- *
- * The SLICOSS driver for Alacritech's IS-NIC products.
- *
- * This driver is supposed to support:
- *
- * Mojave cards (single port PCI Gigabit) both copper and fiber
- * Oasis cards (single and dual port PCI-x Gigabit) copper and fiber
- * Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber
- *
- * The driver was actually tested on Oasis and Kalahari cards.
- *
- *
- * NOTE: This is the standard, non-accelerated version of Alacritech's
- * IS-NIC driver.
- */
-
-#define KLUDGE_FOR_4GB_BOUNDARY 1
-#define DEBUG_MICROCODE 1
-#define DBG 1
-#define SLIC_INTERRUPT_PROCESS_LIMIT 1
-#define SLIC_OFFLOAD_IP_CHECKSUM 1
-#define STATS_TIMER_INTERVAL 2
-#define PING_TIMER_INTERVAL 1
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/timer.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/io.h>
-#include <linux/netdevice.h>
-#include <linux/crc32.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/seq_file.h>
-#include <linux/kthread.h>
-#include <linux/module.h>
-
-#include <linux/firmware.h>
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <asm/unaligned.h>
-
-#include <linux/ethtool.h>
-#include <linux/uaccess.h>
-#include "slichw.h"
-#include "slic.h"
-
-static uint slic_first_init = 1;
-static char *slic_banner = "Alacritech SLIC Technology(tm) Server and Storage Accelerator (Non-Accelerated)";
-
-static char *slic_proc_version = "2.0.351 2006/07/14 12:26:00";
-
-static struct base_driver slic_global = { {}, 0, 0, 0, 1, NULL, NULL };
-#define DEFAULT_INTAGG_DELAY 100
-static unsigned int rcv_count;
-
-#define DRV_NAME "slicoss"
-#define DRV_VERSION "2.0.1"
-#define DRV_AUTHOR "Alacritech, Inc. Engineering"
-#define DRV_DESCRIPTION "Alacritech SLIC Techonology(tm) "\
- "Non-Accelerated Driver"
-#define DRV_COPYRIGHT "Copyright 2000-2006 Alacritech, Inc. "\
- "All rights reserved."
-#define PFX DRV_NAME " "
-
-MODULE_AUTHOR(DRV_AUTHOR);
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_LICENSE("Dual BSD/GPL");
-
-static const struct pci_device_id slic_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_1GB_DEVICE_ID) },
- { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_2GB_DEVICE_ID) },
- { 0 }
-};
-
-static const struct ethtool_ops slic_ethtool_ops;
-
-MODULE_DEVICE_TABLE(pci, slic_pci_tbl);
-
-static void slic_mcast_set_bit(struct adapter *adapter, char *address)
-{
- unsigned char crcpoly;
-
- /* Get the CRC polynomial for the mac address */
- /*
- * we use bits 1-8 (lsb), bitwise reversed,
- * msb (= lsb bit 0 before bitrev) is automatically discarded
- */
- crcpoly = ether_crc(ETH_ALEN, address) >> 23;
-
- /*
- * We only have space on the SLIC for 64 entries. Lop
- * off the top two bits. (2^6 = 64)
- */
- crcpoly &= 0x3F;
-
- /* OR in the new bit into our 64 bit mask. */
- adapter->mcastmask |= (u64)1 << crcpoly;
-}
-
-static void slic_mcast_set_mask(struct adapter *adapter)
-{
- if (adapter->macopts & (MAC_ALLMCAST | MAC_PROMISC)) {
- /*
- * Turn on all multicast addresses. We have to do this for
- * promiscuous mode as well as ALLMCAST mode. It saves the
- * Microcode from having to keep state about the MAC
- * configuration.
- */
- slic_write32(adapter, SLIC_REG_MCASTLOW, 0xFFFFFFFF);
- slic_write32(adapter, SLIC_REG_MCASTHIGH, 0xFFFFFFFF);
- } else {
- /*
- * Commit our multicast mast to the SLIC by writing to the
- * multicast address mask registers
- */
- slic_write32(adapter, SLIC_REG_MCASTLOW,
- (u32)(adapter->mcastmask & 0xFFFFFFFF));
- slic_write32(adapter, SLIC_REG_MCASTHIGH,
- (u32)((adapter->mcastmask >> 32) & 0xFFFFFFFF));
- }
-}
-
-static void slic_timer_ping(ulong dev)
-{
- struct adapter *adapter;
- struct sliccard *card;
-
- adapter = netdev_priv((struct net_device *)dev);
- card = adapter->card;
-
- adapter->pingtimer.expires = jiffies + (PING_TIMER_INTERVAL * HZ);
- add_timer(&adapter->pingtimer);
-}
-
-/*
- * slic_link_config
- *
- * Write phy control to configure link duplex/speed
- *
- */
-static void slic_link_config(struct adapter *adapter,
- u32 linkspeed, u32 linkduplex)
-{
- u32 speed;
- u32 duplex;
- u32 phy_config;
- u32 phy_advreg;
- u32 phy_gctlreg;
-
- if (adapter->state != ADAPT_UP)
- return;
-
- if (linkspeed > LINK_1000MB)
- linkspeed = LINK_AUTOSPEED;
- if (linkduplex > LINK_AUTOD)
- linkduplex = LINK_AUTOD;
-
- if ((linkspeed == LINK_AUTOSPEED) || (linkspeed == LINK_1000MB)) {
- if (adapter->flags & ADAPT_FLAGS_FIBERMEDIA) {
- /*
- * We've got a fiber gigabit interface, and register
- * 4 is different in fiber mode than in copper mode
- */
-
- /* advertise FD only @1000 Mb */
- phy_advreg = (MIICR_REG_4 | (PAR_ADV1000XFD));
- /* enable PAUSE frames */
- phy_advreg |= PAR_ASYMPAUSE_FIBER;
- slic_write32(adapter, SLIC_REG_WPHY, phy_advreg);
-
- if (linkspeed == LINK_AUTOSPEED) {
- /* reset phy, enable auto-neg */
- phy_config =
- (MIICR_REG_PCR |
- (PCR_RESET | PCR_AUTONEG |
- PCR_AUTONEG_RST));
- slic_write32(adapter, SLIC_REG_WPHY,
- phy_config);
- } else { /* forced 1000 Mb FD*/
- /*
- * power down phy to break link
- * this may not work)
- */
- phy_config = (MIICR_REG_PCR | PCR_POWERDOWN);
- slic_write32(adapter, SLIC_REG_WPHY,
- phy_config);
- slic_flush_write(adapter);
- /*
- * wait, Marvell says 1 sec,
- * try to get away with 10 ms
- */
- mdelay(10);
-
- /*
- * disable auto-neg, set speed/duplex,
- * soft reset phy, powerup
- */
- phy_config =
- (MIICR_REG_PCR |
- (PCR_RESET | PCR_SPEED_1000 |
- PCR_DUPLEX_FULL));
- slic_write32(adapter, SLIC_REG_WPHY,
- phy_config);
- }
- } else { /* copper gigabit */
-
- /*
- * Auto-Negotiate or 1000 Mb must be auto negotiated
- * We've got a copper gigabit interface, and
- * register 4 is different in copper mode than
- * in fiber mode
- */
- if (linkspeed == LINK_AUTOSPEED) {
- /* advertise 10/100 Mb modes */
- phy_advreg =
- (MIICR_REG_4 |
- (PAR_ADV100FD | PAR_ADV100HD | PAR_ADV10FD
- | PAR_ADV10HD));
- } else {
- /*
- * linkspeed == LINK_1000MB -
- * don't advertise 10/100 Mb modes
- */
- phy_advreg = MIICR_REG_4;
- }
- /* enable PAUSE frames */
- phy_advreg |= PAR_ASYMPAUSE;
- /* required by the Cicada PHY */
- phy_advreg |= PAR_802_3;
- slic_write32(adapter, SLIC_REG_WPHY, phy_advreg);
- /* advertise FD only @1000 Mb */
- phy_gctlreg = (MIICR_REG_9 | (PGC_ADV1000FD));
- slic_write32(adapter, SLIC_REG_WPHY, phy_gctlreg);
-
- if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
- /*
- * if a Marvell PHY
- * enable auto crossover
- */
- phy_config =
- (MIICR_REG_16 | (MRV_REG16_XOVERON));
- slic_write32(adapter, SLIC_REG_WPHY,
- phy_config);
-
- /* reset phy, enable auto-neg */
- phy_config =
- (MIICR_REG_PCR |
- (PCR_RESET | PCR_AUTONEG |
- PCR_AUTONEG_RST));
- slic_write32(adapter, SLIC_REG_WPHY,
- phy_config);
- } else { /* it's a Cicada PHY */
- /* enable and restart auto-neg (don't reset) */
- phy_config =
- (MIICR_REG_PCR |
- (PCR_AUTONEG | PCR_AUTONEG_RST));
- slic_write32(adapter, SLIC_REG_WPHY,
- phy_config);
- }
- }
- } else {
- /* Forced 10/100 */
- if (linkspeed == LINK_10MB)
- speed = 0;
- else
- speed = PCR_SPEED_100;
- if (linkduplex == LINK_HALFD)
- duplex = 0;
- else
- duplex = PCR_DUPLEX_FULL;
-
- if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
- /*
- * if a Marvell PHY
- * disable auto crossover
- */
- phy_config = (MIICR_REG_16 | (MRV_REG16_XOVEROFF));
- slic_write32(adapter, SLIC_REG_WPHY, phy_config);
- }
-
- /* power down phy to break link (this may not work) */
- phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN | speed | duplex));
- slic_write32(adapter, SLIC_REG_WPHY, phy_config);
- slic_flush_write(adapter);
- /* wait, Marvell says 1 sec, try to get away with 10 ms */
- mdelay(10);
-
- if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
- /*
- * if a Marvell PHY
- * disable auto-neg, set speed,
- * soft reset phy, powerup
- */
- phy_config =
- (MIICR_REG_PCR | (PCR_RESET | speed | duplex));
- slic_write32(adapter, SLIC_REG_WPHY, phy_config);
- } else { /* it's a Cicada PHY */
- /* disable auto-neg, set speed, powerup */
- phy_config = (MIICR_REG_PCR | (speed | duplex));
- slic_write32(adapter, SLIC_REG_WPHY, phy_config);
- }
- }
-}
-
-static int slic_card_download_gbrcv(struct adapter *adapter)
-{
- const struct firmware *fw;
- const char *file = "";
- int ret;
- u32 codeaddr;
- u32 instruction;
- int index = 0;
- u32 rcvucodelen = 0;
-
- switch (adapter->devid) {
- case SLIC_2GB_DEVICE_ID:
- file = "slicoss/oasisrcvucode.sys";
- break;
- case SLIC_1GB_DEVICE_ID:
- file = "slicoss/gbrcvucode.sys";
- break;
- default:
- return -ENOENT;
- }
-
- ret = request_firmware(&fw, file, &adapter->pcidev->dev);
- if (ret) {
- dev_err(&adapter->pcidev->dev,
- "Failed to load firmware %s\n", file);
- return ret;
- }
-
- rcvucodelen = *(u32 *)(fw->data + index);
- index += 4;
- switch (adapter->devid) {
- case SLIC_2GB_DEVICE_ID:
- if (rcvucodelen != OasisRcvUCodeLen) {
- release_firmware(fw);
- return -EINVAL;
- }
- break;
- case SLIC_1GB_DEVICE_ID:
- if (rcvucodelen != GBRcvUCodeLen) {
- release_firmware(fw);
- return -EINVAL;
- }
- break;
- }
- /* start download */
- slic_write32(adapter, SLIC_REG_RCV_WCS, SLIC_RCVWCS_BEGIN);
- /* download the rcv sequencer ucode */
- for (codeaddr = 0; codeaddr < rcvucodelen; codeaddr++) {
- /* write out instruction address */
- slic_write32(adapter, SLIC_REG_RCV_WCS, codeaddr);
-
- instruction = *(u32 *)(fw->data + index);
- index += 4;
- /* write out the instruction data low addr */
- slic_write32(adapter, SLIC_REG_RCV_WCS, instruction);
-
- instruction = *(u8 *)(fw->data + index);
- index++;
- /* write out the instruction data high addr */
- slic_write32(adapter, SLIC_REG_RCV_WCS, instruction);
- }
-
- /* download finished */
- release_firmware(fw);
- slic_write32(adapter, SLIC_REG_RCV_WCS, SLIC_RCVWCS_FINISH);
- slic_flush_write(adapter);
-
- return 0;
-}
-
-MODULE_FIRMWARE("slicoss/oasisrcvucode.sys");
-MODULE_FIRMWARE("slicoss/gbrcvucode.sys");
-
-static int slic_card_download(struct adapter *adapter)
-{
- const struct firmware *fw;
- const char *file = "";
- int ret;
- u32 section;
- int thissectionsize;
- int codeaddr;
- u32 instruction;
- u32 baseaddress;
- u32 i;
- u32 numsects = 0;
- u32 sectsize[3];
- u32 sectstart[3];
- int ucode_start, index = 0;
-
- switch (adapter->devid) {
- case SLIC_2GB_DEVICE_ID:
- file = "slicoss/oasisdownload.sys";
- break;
- case SLIC_1GB_DEVICE_ID:
- file = "slicoss/gbdownload.sys";
- break;
- default:
- return -ENOENT;
- }
- ret = request_firmware(&fw, file, &adapter->pcidev->dev);
- if (ret) {
- dev_err(&adapter->pcidev->dev,
- "Failed to load firmware %s\n", file);
- return ret;
- }
- numsects = *(u32 *)(fw->data + index);
- index += 4;
- for (i = 0; i < numsects; i++) {
- sectsize[i] = *(u32 *)(fw->data + index);
- index += 4;
- }
- for (i = 0; i < numsects; i++) {
- sectstart[i] = *(u32 *)(fw->data + index);
- index += 4;
- }
- ucode_start = index;
- instruction = *(u32 *)(fw->data + index);
- index += 4;
- for (section = 0; section < numsects; section++) {
- baseaddress = sectstart[section];
- thissectionsize = sectsize[section] >> 3;
-
- for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) {
- /* Write out instruction address */
- slic_write32(adapter, SLIC_REG_WCS,
- baseaddress + codeaddr);
- /* Write out instruction to low addr */
- slic_write32(adapter, SLIC_REG_WCS,
- instruction);
- instruction = *(u32 *)(fw->data + index);
- index += 4;
-
- /* Write out instruction to high addr */
- slic_write32(adapter, SLIC_REG_WCS,
- instruction);
- instruction = *(u32 *)(fw->data + index);
- index += 4;
- }
- }
- index = ucode_start;
- for (section = 0; section < numsects; section++) {
- instruction = *(u32 *)(fw->data + index);
- baseaddress = sectstart[section];
- if (baseaddress < 0x8000)
- continue;
- thissectionsize = sectsize[section] >> 3;
-
- for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) {
- /* Write out instruction address */
- slic_write32(adapter, SLIC_REG_WCS,
- SLIC_WCS_COMPARE | (baseaddress +
- codeaddr));
- /* Write out instruction to low addr */
- slic_write32(adapter, SLIC_REG_WCS, instruction);
- instruction = *(u32 *)(fw->data + index);
- index += 4;
- /* Write out instruction to high addr */
- slic_write32(adapter, SLIC_REG_WCS, instruction);
- instruction = *(u32 *)(fw->data + index);
- index += 4;
- }
- }
- release_firmware(fw);
- /* Everything OK, kick off the card */
- mdelay(10);
-
- slic_write32(adapter, SLIC_REG_WCS, SLIC_WCS_START);
- slic_flush_write(adapter);
- /*
- * stall for 20 ms, long enough for ucode to init card
- * and reach mainloop
- */
- mdelay(20);
-
- return 0;
-}
-
-MODULE_FIRMWARE("slicoss/oasisdownload.sys");
-MODULE_FIRMWARE("slicoss/gbdownload.sys");
-
-static void slic_adapter_set_hwaddr(struct adapter *adapter)
-{
- struct sliccard *card = adapter->card;
-
- if ((adapter->card) && (card->config_set)) {
- memcpy(adapter->macaddr,
- card->config.MacInfo[adapter->functionnumber].macaddrA,
- sizeof(struct slic_config_mac));
- if (is_zero_ether_addr(adapter->currmacaddr))
- memcpy(adapter->currmacaddr, adapter->macaddr,
- ETH_ALEN);
- if (adapter->netdev)
- memcpy(adapter->netdev->dev_addr, adapter->currmacaddr,
- ETH_ALEN);
- }
-}
-
-static void slic_intagg_set(struct adapter *adapter, u32 value)
-{
- slic_write32(adapter, SLIC_REG_INTAGG, value);
- adapter->card->loadlevel_current = value;
-}
-
-static void slic_soft_reset(struct adapter *adapter)
-{
- if (adapter->card->state == CARD_UP) {
- slic_write32(adapter, SLIC_REG_QUIESCE, 0);
- slic_flush_write(adapter);
- mdelay(1);
- }
-
- slic_write32(adapter, SLIC_REG_RESET, SLIC_RESET_MAGIC);
- slic_flush_write(adapter);
-
- mdelay(1);
-}
-
-static void slic_mac_address_config(struct adapter *adapter)
-{
- u32 value;
- u32 value2;
-
- value = ntohl(*(__be32 *)&adapter->currmacaddr[2]);
- slic_write32(adapter, SLIC_REG_WRADDRAL, value);
- slic_write32(adapter, SLIC_REG_WRADDRBL, value);
-
- value2 = (u32)((adapter->currmacaddr[0] << 8 |
- adapter->currmacaddr[1]) & 0xFFFF);
-
- slic_write32(adapter, SLIC_REG_WRADDRAH, value2);
- slic_write32(adapter, SLIC_REG_WRADDRBH, value2);
-
- /*
- * Write our multicast mask out to the card. This is done
- * here in addition to the slic_mcast_addr_set routine
- * because ALL_MCAST may have been enabled or disabled
- */
- slic_mcast_set_mask(adapter);
-}
-
-static void slic_mac_config(struct adapter *adapter)
-{
- u32 value;
-
- /* Setup GMAC gaps */
- if (adapter->linkspeed == LINK_1000MB) {
- value = ((GMCR_GAPBB_1000 << GMCR_GAPBB_SHIFT) |
- (GMCR_GAPR1_1000 << GMCR_GAPR1_SHIFT) |
- (GMCR_GAPR2_1000 << GMCR_GAPR2_SHIFT));
- } else {
- value = ((GMCR_GAPBB_100 << GMCR_GAPBB_SHIFT) |
- (GMCR_GAPR1_100 << GMCR_GAPR1_SHIFT) |
- (GMCR_GAPR2_100 << GMCR_GAPR2_SHIFT));
- }
-
- /* enable GMII */
- if (adapter->linkspeed == LINK_1000MB)
- value |= GMCR_GBIT;
-
- /* enable fullduplex */
- if ((adapter->linkduplex == LINK_FULLD)
- || (adapter->macopts & MAC_LOOPBACK)) {
- value |= GMCR_FULLD;
- }
-
- /* write mac config */
- slic_write32(adapter, SLIC_REG_WMCFG, value);
-
- /* setup mac addresses */
- slic_mac_address_config(adapter);
-}
-
-static void slic_config_set(struct adapter *adapter, bool linkchange)
-{
- u32 value;
- u32 RcrReset;
-
- if (linkchange) {
- /* Setup MAC */
- slic_mac_config(adapter);
- RcrReset = GRCR_RESET;
- } else {
- slic_mac_address_config(adapter);
- RcrReset = 0;
- }
-
- if (adapter->linkduplex == LINK_FULLD) {
- /* setup xmtcfg */
- value = (GXCR_RESET | /* Always reset */
- GXCR_XMTEN | /* Enable transmit */
- GXCR_PAUSEEN); /* Enable pause */
-
- slic_write32(adapter, SLIC_REG_WXCFG, value);
-
- /* Setup rcvcfg last */
- value = (RcrReset | /* Reset, if linkchange */
- GRCR_CTLEN | /* Enable CTL frames */
- GRCR_ADDRAEN | /* Address A enable */
- GRCR_RCVBAD | /* Rcv bad frames */
- (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
- } else {
- /* setup xmtcfg */
- value = (GXCR_RESET | /* Always reset */
- GXCR_XMTEN); /* Enable transmit */
-
- slic_write32(adapter, SLIC_REG_WXCFG, value);
-
- /* Setup rcvcfg last */
- value = (RcrReset | /* Reset, if linkchange */
- GRCR_ADDRAEN | /* Address A enable */
- GRCR_RCVBAD | /* Rcv bad frames */
- (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
- }
-
- if (adapter->state != ADAPT_DOWN) {
- /* Only enable receive if we are restarting or running */
- value |= GRCR_RCVEN;
- }
-
- if (adapter->macopts & MAC_PROMISC)
- value |= GRCR_RCVALL;
-
- slic_write32(adapter, SLIC_REG_WRCFG, value);
-}
-
-/*
- * Turn off RCV and XMT, power down PHY
- */
-static void slic_config_clear(struct adapter *adapter)
-{
- u32 value;
- u32 phy_config;
-
- /* Setup xmtcfg */
- value = (GXCR_RESET | /* Always reset */
- GXCR_PAUSEEN); /* Enable pause */
-
- slic_write32(adapter, SLIC_REG_WXCFG, value);
-
- value = (GRCR_RESET | /* Always reset */
- GRCR_CTLEN | /* Enable CTL frames */
- GRCR_ADDRAEN | /* Address A enable */
- (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
-
- slic_write32(adapter, SLIC_REG_WRCFG, value);
-
- /* power down phy */
- phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN));
- slic_write32(adapter, SLIC_REG_WPHY, phy_config);
-}
-
-static bool slic_mac_filter(struct adapter *adapter,
- struct ether_header *ether_frame)
-{
- struct net_device *netdev = adapter->netdev;
- u32 opts = adapter->macopts;
-
- if (opts & MAC_PROMISC)
- return true;
-
- if (is_broadcast_ether_addr(ether_frame->ether_dhost)) {
- if (opts & MAC_BCAST) {
- adapter->rcv_broadcasts++;
- return true;
- }
-
- return false;
- }
-
- if (is_multicast_ether_addr(ether_frame->ether_dhost)) {
- if (opts & MAC_ALLMCAST) {
- adapter->rcv_multicasts++;
- netdev->stats.multicast++;
- return true;
- }
- if (opts & MAC_MCAST) {
- struct mcast_address *mcaddr = adapter->mcastaddrs;
-
- while (mcaddr) {
- if (ether_addr_equal(mcaddr->address,
- ether_frame->ether_dhost)) {
- adapter->rcv_multicasts++;
- netdev->stats.multicast++;
- return true;
- }
- mcaddr = mcaddr->next;
- }
-
- return false;
- }
-
- return false;
- }
- if (opts & MAC_DIRECTED) {
- adapter->rcv_unicasts++;
- return true;
- }
- return false;
-}
-
-static int slic_mac_set_address(struct net_device *dev, void *ptr)
-{
- struct adapter *adapter = netdev_priv(dev);
- struct sockaddr *addr = ptr;
-
- if (netif_running(dev))
- return -EBUSY;
- if (!adapter)
- return -EBUSY;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
-
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
-
- slic_config_set(adapter, true);
- return 0;
-}
-
-static void slic_timer_load_check(ulong cardaddr)
-{
- struct sliccard *card = (struct sliccard *)cardaddr;
- struct adapter *adapter = card->master;
- u32 load = card->events;
- u32 level = 0;
-
- if ((adapter) && (adapter->state == ADAPT_UP) &&
- (card->state == CARD_UP) && (slic_global.dynamic_intagg)) {
- if (adapter->devid == SLIC_1GB_DEVICE_ID) {
- if (adapter->linkspeed == LINK_1000MB)
- level = 100;
- else {
- if (load > SLIC_LOAD_5)
- level = SLIC_INTAGG_5;
- else if (load > SLIC_LOAD_4)
- level = SLIC_INTAGG_4;
- else if (load > SLIC_LOAD_3)
- level = SLIC_INTAGG_3;
- else if (load > SLIC_LOAD_2)
- level = SLIC_INTAGG_2;
- else if (load > SLIC_LOAD_1)
- level = SLIC_INTAGG_1;
- else
- level = SLIC_INTAGG_0;
- }
- if (card->loadlevel_current != level) {
- card->loadlevel_current = level;
- slic_write32(adapter, SLIC_REG_INTAGG, level);
- }
- } else {
- if (load > SLIC_LOAD_5)
- level = SLIC_INTAGG_5;
- else if (load > SLIC_LOAD_4)
- level = SLIC_INTAGG_4;
- else if (load > SLIC_LOAD_3)
- level = SLIC_INTAGG_3;
- else if (load > SLIC_LOAD_2)
- level = SLIC_INTAGG_2;
- else if (load > SLIC_LOAD_1)
- level = SLIC_INTAGG_1;
- else
- level = SLIC_INTAGG_0;
- if (card->loadlevel_current != level) {
- card->loadlevel_current = level;
- slic_write32(adapter, SLIC_REG_INTAGG, level);
- }
- }
- }
- card->events = 0;
- card->loadtimer.expires = jiffies + (SLIC_LOADTIMER_PERIOD * HZ);
- add_timer(&card->loadtimer);
-}
-
-static int slic_upr_queue_request(struct adapter *adapter,
- u32 upr_request,
- u32 upr_data,
- u32 upr_data_h,
- u32 upr_buffer, u32 upr_buffer_h)
-{
- struct slic_upr *upr;
- struct slic_upr *uprqueue;
-
- upr = kmalloc(sizeof(*upr), GFP_ATOMIC);
- if (!upr)
- return -ENOMEM;
-
- upr->adapter = adapter->port;
- upr->upr_request = upr_request;
- upr->upr_data = upr_data;
- upr->upr_buffer = upr_buffer;
- upr->upr_data_h = upr_data_h;
- upr->upr_buffer_h = upr_buffer_h;
- upr->next = NULL;
- if (adapter->upr_list) {
- uprqueue = adapter->upr_list;
-
- while (uprqueue->next)
- uprqueue = uprqueue->next;
- uprqueue->next = upr;
- } else {
- adapter->upr_list = upr;
- }
- return 0;
-}
-
-static void slic_upr_start(struct adapter *adapter)
-{
- struct slic_upr *upr;
-
- upr = adapter->upr_list;
- if (!upr)
- return;
- if (adapter->upr_busy)
- return;
- adapter->upr_busy = 1;
-
- switch (upr->upr_request) {
- case SLIC_UPR_STATS:
- if (upr->upr_data_h == 0) {
- slic_write32(adapter, SLIC_REG_RSTAT, upr->upr_data);
- } else {
- slic_write64(adapter, SLIC_REG_RSTAT64, upr->upr_data,
- upr->upr_data_h);
- }
- break;
-
- case SLIC_UPR_RLSR:
- slic_write64(adapter, SLIC_REG_LSTAT, upr->upr_data,
- upr->upr_data_h);
- break;
-
- case SLIC_UPR_RCONFIG:
- slic_write64(adapter, SLIC_REG_RCONFIG, upr->upr_data,
- upr->upr_data_h);
- break;
- case SLIC_UPR_PING:
- slic_write32(adapter, SLIC_REG_PING, 1);
- break;
- }
- slic_flush_write(adapter);
-}
-
-static int slic_upr_request(struct adapter *adapter,
- u32 upr_request,
- u32 upr_data,
- u32 upr_data_h,
- u32 upr_buffer, u32 upr_buffer_h)
-{
- unsigned long flags;
- int rc;
-
- spin_lock_irqsave(&adapter->upr_lock, flags);
- rc = slic_upr_queue_request(adapter,
- upr_request,
- upr_data,
- upr_data_h, upr_buffer, upr_buffer_h);
- if (rc)
- goto err_unlock_irq;
-
- slic_upr_start(adapter);
-err_unlock_irq:
- spin_unlock_irqrestore(&adapter->upr_lock, flags);
- return rc;
-}
-
-static void slic_link_upr_complete(struct adapter *adapter, u32 isr)
-{
- struct slic_shmemory *sm = &adapter->shmem;
- struct slic_shmem_data *sm_data = sm->shmem_data;
- u32 lst = sm_data->lnkstatus;
- uint linkup;
- unsigned char linkspeed;
- unsigned char linkduplex;
-
- if ((isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
- dma_addr_t phaddr = sm->lnkstatus_phaddr;
-
- slic_upr_queue_request(adapter, SLIC_UPR_RLSR,
- cpu_to_le32(lower_32_bits(phaddr)),
- cpu_to_le32(upper_32_bits(phaddr)),
- 0, 0);
- return;
- }
- if (adapter->state != ADAPT_UP)
- return;
-
- linkup = lst & GIG_LINKUP ? LINK_UP : LINK_DOWN;
- if (lst & GIG_SPEED_1000)
- linkspeed = LINK_1000MB;
- else if (lst & GIG_SPEED_100)
- linkspeed = LINK_100MB;
- else
- linkspeed = LINK_10MB;
-
- if (lst & GIG_FULLDUPLEX)
- linkduplex = LINK_FULLD;
- else
- linkduplex = LINK_HALFD;
-
- if ((adapter->linkstate == LINK_DOWN) && (linkup == LINK_DOWN))
- return;
-
- /* link up event, but nothing has changed */
- if ((adapter->linkstate == LINK_UP) &&
- (linkup == LINK_UP) &&
- (adapter->linkspeed == linkspeed) &&
- (adapter->linkduplex == linkduplex))
- return;
-
- /* link has changed at this point */
-
- /* link has gone from up to down */
- if (linkup == LINK_DOWN) {
- adapter->linkstate = LINK_DOWN;
- netif_carrier_off(adapter->netdev);
- return;
- }
-
- /* link has gone from down to up */
- adapter->linkspeed = linkspeed;
- adapter->linkduplex = linkduplex;
-
- if (adapter->linkstate != LINK_UP) {
- /* setup the mac */
- slic_config_set(adapter, true);
- adapter->linkstate = LINK_UP;
- netif_carrier_on(adapter->netdev);
- }
-}
-
-static void slic_upr_request_complete(struct adapter *adapter, u32 isr)
-{
- struct sliccard *card = adapter->card;
- struct slic_upr *upr;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->upr_lock, flags);
- upr = adapter->upr_list;
- if (!upr) {
- spin_unlock_irqrestore(&adapter->upr_lock, flags);
- return;
- }
- adapter->upr_list = upr->next;
- upr->next = NULL;
- adapter->upr_busy = 0;
- switch (upr->upr_request) {
- case SLIC_UPR_STATS: {
- struct slic_shmemory *sm = &adapter->shmem;
- struct slic_shmem_data *sm_data = sm->shmem_data;
- struct slic_stats *stats = &sm_data->stats;
- struct slic_stats *old = &adapter->inicstats_prev;
- struct slicnet_stats *stst = &adapter->slic_stats;
-
- if (isr & ISR_UPCERR) {
- dev_err(&adapter->netdev->dev,
- "SLIC_UPR_STATS command failed isr[%x]\n", isr);
- break;
- }
-
- UPDATE_STATS_GB(stst->tcp.xmit_tcp_segs, stats->xmit_tcp_segs,
- old->xmit_tcp_segs);
-
- UPDATE_STATS_GB(stst->tcp.xmit_tcp_bytes, stats->xmit_tcp_bytes,
- old->xmit_tcp_bytes);
-
- UPDATE_STATS_GB(stst->tcp.rcv_tcp_segs, stats->rcv_tcp_segs,
- old->rcv_tcp_segs);
-
- UPDATE_STATS_GB(stst->tcp.rcv_tcp_bytes, stats->rcv_tcp_bytes,
- old->rcv_tcp_bytes);
-
- UPDATE_STATS_GB(stst->iface.xmt_bytes, stats->xmit_bytes,
- old->xmit_bytes);
-
- UPDATE_STATS_GB(stst->iface.xmt_ucast, stats->xmit_unicasts,
- old->xmit_unicasts);
-
- UPDATE_STATS_GB(stst->iface.rcv_bytes, stats->rcv_bytes,
- old->rcv_bytes);
-
- UPDATE_STATS_GB(stst->iface.rcv_ucast, stats->rcv_unicasts,
- old->rcv_unicasts);
-
- UPDATE_STATS_GB(stst->iface.xmt_errors, stats->xmit_collisions,
- old->xmit_collisions);
-
- UPDATE_STATS_GB(stst->iface.xmt_errors,
- stats->xmit_excess_collisions,
- old->xmit_excess_collisions);
-
- UPDATE_STATS_GB(stst->iface.xmt_errors, stats->xmit_other_error,
- old->xmit_other_error);
-
- UPDATE_STATS_GB(stst->iface.rcv_errors, stats->rcv_other_error,
- old->rcv_other_error);
-
- UPDATE_STATS_GB(stst->iface.rcv_discards, stats->rcv_drops,
- old->rcv_drops);
-
- if (stats->rcv_drops > old->rcv_drops)
- adapter->rcv_drops += (stats->rcv_drops -
- old->rcv_drops);
- memcpy_fromio(old, stats, sizeof(*stats));
- break;
- }
- case SLIC_UPR_RLSR:
- slic_link_upr_complete(adapter, isr);
- break;
- case SLIC_UPR_RCONFIG:
- break;
- case SLIC_UPR_PING:
- card->pingstatus |= (isr & ISR_PINGDSMASK);
- break;
- }
- kfree(upr);
- slic_upr_start(adapter);
- spin_unlock_irqrestore(&adapter->upr_lock, flags);
-}
-
-static int slic_config_get(struct adapter *adapter, u32 config, u32 config_h)
-{
- return slic_upr_request(adapter, SLIC_UPR_RCONFIG, config, config_h,
- 0, 0);
-}
-
-/*
- * Compute a checksum of the EEPROM according to RFC 1071.
- */
-static u16 slic_eeprom_cksum(void *eeprom, unsigned int len)
-{
- u16 *wp = eeprom;
- u32 checksum = 0;
-
- while (len > 1) {
- checksum += *(wp++);
- len -= 2;
- }
-
- if (len > 0)
- checksum += *(u8 *)wp;
-
- while (checksum >> 16)
- checksum = (checksum & 0xFFFF) + ((checksum >> 16) & 0xFFFF);
-
- return ~checksum;
-}
-
-static void slic_rspqueue_free(struct adapter *adapter)
-{
- int i;
- struct slic_rspqueue *rspq = &adapter->rspqueue;
-
- for (i = 0; i < rspq->num_pages; i++) {
- if (rspq->vaddr[i]) {
- pci_free_consistent(adapter->pcidev, PAGE_SIZE,
- rspq->vaddr[i], rspq->paddr[i]);
- }
- rspq->vaddr[i] = NULL;
- rspq->paddr[i] = 0;
- }
- rspq->offset = 0;
- rspq->pageindex = 0;
- rspq->rspbuf = NULL;
-}
-
-static int slic_rspqueue_init(struct adapter *adapter)
-{
- int i;
- struct slic_rspqueue *rspq = &adapter->rspqueue;
- u32 paddrh = 0;
-
- memset(rspq, 0, sizeof(struct slic_rspqueue));
-
- rspq->num_pages = SLIC_RSPQ_PAGES_GB;
-
- for (i = 0; i < rspq->num_pages; i++) {
- rspq->vaddr[i] = pci_zalloc_consistent(adapter->pcidev,
- PAGE_SIZE,
- &rspq->paddr[i]);
- if (!rspq->vaddr[i]) {
- dev_err(&adapter->pcidev->dev,
- "pci_alloc_consistent failed\n");
- slic_rspqueue_free(adapter);
- return -ENOMEM;
- }
-
- if (paddrh == 0) {
- slic_write32(adapter, SLIC_REG_RBAR,
- rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE);
- } else {
- slic_write64(adapter, SLIC_REG_RBAR64,
- rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE,
- paddrh);
- }
- }
- rspq->offset = 0;
- rspq->pageindex = 0;
- rspq->rspbuf = (struct slic_rspbuf *)rspq->vaddr[0];
- return 0;
-}
-
-static struct slic_rspbuf *slic_rspqueue_getnext(struct adapter *adapter)
-{
- struct slic_rspqueue *rspq = &adapter->rspqueue;
- struct slic_rspbuf *buf;
-
- if (!(rspq->rspbuf->status))
- return NULL;
-
- buf = rspq->rspbuf;
- if (++rspq->offset < SLIC_RSPQ_BUFSINPAGE) {
- rspq->rspbuf++;
- } else {
- slic_write64(adapter, SLIC_REG_RBAR64,
- rspq->paddr[rspq->pageindex] |
- SLIC_RSPQ_BUFSINPAGE, 0);
- rspq->pageindex = (rspq->pageindex + 1) % rspq->num_pages;
- rspq->offset = 0;
- rspq->rspbuf = (struct slic_rspbuf *)
- rspq->vaddr[rspq->pageindex];
- }
-
- return buf;
-}
-
-static void slic_cmdqmem_free(struct adapter *adapter)
-{
- struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem;
- int i;
-
- for (i = 0; i < SLIC_CMDQ_MAXPAGES; i++) {
- if (cmdqmem->pages[i]) {
- pci_free_consistent(adapter->pcidev,
- PAGE_SIZE,
- (void *)cmdqmem->pages[i],
- cmdqmem->dma_pages[i]);
- }
- }
- memset(cmdqmem, 0, sizeof(struct slic_cmdqmem));
-}
-
-static u32 *slic_cmdqmem_addpage(struct adapter *adapter)
-{
- struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem;
- u32 *pageaddr;
-
- if (cmdqmem->pagecnt >= SLIC_CMDQ_MAXPAGES)
- return NULL;
- pageaddr = pci_alloc_consistent(adapter->pcidev,
- PAGE_SIZE,
- &cmdqmem->dma_pages[cmdqmem->pagecnt]);
- if (!pageaddr)
- return NULL;
-
- cmdqmem->pages[cmdqmem->pagecnt] = pageaddr;
- cmdqmem->pagecnt++;
- return pageaddr;
-}
-
-static void slic_cmdq_free(struct adapter *adapter)
-{
- struct slic_hostcmd *cmd;
-
- cmd = adapter->cmdq_all.head;
- while (cmd) {
- if (cmd->busy) {
- struct sk_buff *tempskb;
-
- tempskb = cmd->skb;
- if (tempskb) {
- cmd->skb = NULL;
- dev_kfree_skb_irq(tempskb);
- }
- }
- cmd = cmd->next_all;
- }
- memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue));
- memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue));
- memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue));
- slic_cmdqmem_free(adapter);
-}
-
-static void slic_cmdq_addcmdpage(struct adapter *adapter, u32 *page)
-{
- struct slic_hostcmd *cmd;
- struct slic_hostcmd *prev;
- struct slic_hostcmd *tail;
- struct slic_cmdqueue *cmdq;
- int cmdcnt;
- void *cmdaddr;
- ulong phys_addr;
- u32 phys_addrl;
- u32 phys_addrh;
- struct slic_handle *pslic_handle;
- unsigned long flags;
-
- cmdaddr = page;
- cmd = cmdaddr;
- cmdcnt = 0;
-
- phys_addr = virt_to_bus((void *)page);
- phys_addrl = SLIC_GET_ADDR_LOW(phys_addr);
- phys_addrh = SLIC_GET_ADDR_HIGH(phys_addr);
-
- prev = NULL;
- tail = cmd;
- while ((cmdcnt < SLIC_CMDQ_CMDSINPAGE) &&
- (adapter->slic_handle_ix < 256)) {
- /* Allocate and initialize a SLIC_HANDLE for this command */
- spin_lock_irqsave(&adapter->handle_lock, flags);
- pslic_handle = adapter->pfree_slic_handles;
- adapter->pfree_slic_handles = pslic_handle->next;
- spin_unlock_irqrestore(&adapter->handle_lock, flags);
- pslic_handle->type = SLIC_HANDLE_CMD;
- pslic_handle->address = (void *)cmd;
- pslic_handle->offset = (ushort)adapter->slic_handle_ix++;
- pslic_handle->other_handle = NULL;
- pslic_handle->next = NULL;
-
- cmd->pslic_handle = pslic_handle;
- cmd->cmd64.hosthandle = pslic_handle->token.handle_token;
- cmd->busy = false;
- cmd->paddrl = phys_addrl;
- cmd->paddrh = phys_addrh;
- cmd->next_all = prev;
- cmd->next = prev;
- prev = cmd;
- phys_addrl += SLIC_HOSTCMD_SIZE;
- cmdaddr += SLIC_HOSTCMD_SIZE;
-
- cmd = cmdaddr;
- cmdcnt++;
- }
-
- cmdq = &adapter->cmdq_all;
- cmdq->count += cmdcnt; /* SLIC_CMDQ_CMDSINPAGE; mooktodo */
- tail->next_all = cmdq->head;
- cmdq->head = prev;
- cmdq = &adapter->cmdq_free;
- spin_lock_irqsave(&cmdq->lock, flags);
- cmdq->count += cmdcnt; /* SLIC_CMDQ_CMDSINPAGE; mooktodo */
- tail->next = cmdq->head;
- cmdq->head = prev;
- spin_unlock_irqrestore(&cmdq->lock, flags);
-}
-
-static int slic_cmdq_init(struct adapter *adapter)
-{
- int i;
- u32 *pageaddr;
-
- memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue));
- memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue));
- memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue));
- spin_lock_init(&adapter->cmdq_all.lock);
- spin_lock_init(&adapter->cmdq_free.lock);
- spin_lock_init(&adapter->cmdq_done.lock);
- memset(&adapter->cmdqmem, 0, sizeof(struct slic_cmdqmem));
- adapter->slic_handle_ix = 1;
- for (i = 0; i < SLIC_CMDQ_INITPAGES; i++) {
- pageaddr = slic_cmdqmem_addpage(adapter);
- if (!pageaddr) {
- slic_cmdq_free(adapter);
- return -ENOMEM;
- }
- slic_cmdq_addcmdpage(adapter, pageaddr);
- }
- adapter->slic_handle_ix = 1;
-
- return 0;
-}
-
-static void slic_cmdq_reset(struct adapter *adapter)
-{
- struct slic_hostcmd *hcmd;
- struct sk_buff *skb;
- u32 outstanding;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->cmdq_free.lock, flags);
- spin_lock(&adapter->cmdq_done.lock);
- outstanding = adapter->cmdq_all.count - adapter->cmdq_done.count;
- outstanding -= adapter->cmdq_free.count;
- hcmd = adapter->cmdq_all.head;
- while (hcmd) {
- if (hcmd->busy) {
- skb = hcmd->skb;
- hcmd->busy = 0;
- hcmd->skb = NULL;
- dev_kfree_skb_irq(skb);
- }
- hcmd = hcmd->next_all;
- }
- adapter->cmdq_free.count = 0;
- adapter->cmdq_free.head = NULL;
- adapter->cmdq_free.tail = NULL;
- adapter->cmdq_done.count = 0;
- adapter->cmdq_done.head = NULL;
- adapter->cmdq_done.tail = NULL;
- adapter->cmdq_free.head = adapter->cmdq_all.head;
- hcmd = adapter->cmdq_all.head;
- while (hcmd) {
- adapter->cmdq_free.count++;
- hcmd->next = hcmd->next_all;
- hcmd = hcmd->next_all;
- }
- if (adapter->cmdq_free.count != adapter->cmdq_all.count) {
- dev_err(&adapter->netdev->dev,
- "free_count %d != all count %d\n",
- adapter->cmdq_free.count, adapter->cmdq_all.count);
- }
- spin_unlock(&adapter->cmdq_done.lock);
- spin_unlock_irqrestore(&adapter->cmdq_free.lock, flags);
-}
-
-static void slic_cmdq_getdone(struct adapter *adapter)
-{
- struct slic_cmdqueue *done_cmdq = &adapter->cmdq_done;
- struct slic_cmdqueue *free_cmdq = &adapter->cmdq_free;
- unsigned long flags;
-
- spin_lock_irqsave(&done_cmdq->lock, flags);
-
- free_cmdq->head = done_cmdq->head;
- free_cmdq->count = done_cmdq->count;
- done_cmdq->head = NULL;
- done_cmdq->tail = NULL;
- done_cmdq->count = 0;
- spin_unlock_irqrestore(&done_cmdq->lock, flags);
-}
-
-static struct slic_hostcmd *slic_cmdq_getfree(struct adapter *adapter)
-{
- struct slic_cmdqueue *cmdq = &adapter->cmdq_free;
- struct slic_hostcmd *cmd = NULL;
- unsigned long flags;
-
-lock_and_retry:
- spin_lock_irqsave(&cmdq->lock, flags);
-retry:
- cmd = cmdq->head;
- if (cmd) {
- cmdq->head = cmd->next;
- cmdq->count--;
- spin_unlock_irqrestore(&cmdq->lock, flags);
- } else {
- slic_cmdq_getdone(adapter);
- cmd = cmdq->head;
- if (cmd) {
- goto retry;
- } else {
- u32 *pageaddr;
-
- spin_unlock_irqrestore(&cmdq->lock, flags);
- pageaddr = slic_cmdqmem_addpage(adapter);
- if (pageaddr) {
- slic_cmdq_addcmdpage(adapter, pageaddr);
- goto lock_and_retry;
- }
- }
- }
- return cmd;
-}
-
-static void slic_cmdq_putdone_irq(struct adapter *adapter,
- struct slic_hostcmd *cmd)
-{
- struct slic_cmdqueue *cmdq = &adapter->cmdq_done;
-
- spin_lock(&cmdq->lock);
- cmd->busy = 0;
- cmd->next = cmdq->head;
- cmdq->head = cmd;
- cmdq->count++;
- if ((adapter->xmitq_full) && (cmdq->count > 10))
- netif_wake_queue(adapter->netdev);
- spin_unlock(&cmdq->lock);
-}
-
-static int slic_rcvqueue_fill(struct adapter *adapter)
-{
- void *paddr;
- u32 paddrl;
- u32 paddrh;
- struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
- int i = 0;
- struct device *dev = &adapter->netdev->dev;
-
- while (i < SLIC_RCVQ_FILLENTRIES) {
- struct slic_rcvbuf *rcvbuf;
- struct sk_buff *skb;
-#ifdef KLUDGE_FOR_4GB_BOUNDARY
-retry_rcvqfill:
-#endif
- skb = alloc_skb(SLIC_RCVQ_RCVBUFSIZE, GFP_ATOMIC);
- if (skb) {
- paddr = (void *)(unsigned long)
- pci_map_single(adapter->pcidev,
- skb->data,
- SLIC_RCVQ_RCVBUFSIZE,
- PCI_DMA_FROMDEVICE);
- paddrl = SLIC_GET_ADDR_LOW(paddr);
- paddrh = SLIC_GET_ADDR_HIGH(paddr);
-
- skb->len = SLIC_RCVBUF_HEADSIZE;
- rcvbuf = (struct slic_rcvbuf *)skb->head;
- rcvbuf->status = 0;
- skb->next = NULL;
-#ifdef KLUDGE_FOR_4GB_BOUNDARY
- if (paddrl == 0) {
- dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
- __func__);
- dev_err(dev, "skb[%p] PROBLEM\n", skb);
- dev_err(dev, " skbdata[%p]\n",
- skb->data);
- dev_err(dev, " skblen[%x]\n", skb->len);
- dev_err(dev, " paddr[%p]\n", paddr);
- dev_err(dev, " paddrl[%x]\n", paddrl);
- dev_err(dev, " paddrh[%x]\n", paddrh);
- dev_err(dev, " rcvq->head[%p]\n",
- rcvq->head);
- dev_err(dev, " rcvq->tail[%p]\n",
- rcvq->tail);
- dev_err(dev, " rcvq->count[%x]\n",
- rcvq->count);
- dev_err(dev, "SKIP THIS SKB!!!!!!!!\n");
- goto retry_rcvqfill;
- }
-#else
- if (paddrl == 0) {
- dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
- __func__);
- dev_err(dev, "skb[%p] PROBLEM\n", skb);
- dev_err(dev, " skbdata[%p]\n",
- skb->data);
- dev_err(dev, " skblen[%x]\n", skb->len);
- dev_err(dev, " paddr[%p]\n", paddr);
- dev_err(dev, " paddrl[%x]\n", paddrl);
- dev_err(dev, " paddrh[%x]\n", paddrh);
- dev_err(dev, " rcvq->head[%p]\n",
- rcvq->head);
- dev_err(dev, " rcvq->tail[%p]\n",
- rcvq->tail);
- dev_err(dev, " rcvq->count[%x]\n",
- rcvq->count);
- dev_err(dev, "GIVE TO CARD ANYWAY\n");
- }
-#endif
- if (paddrh == 0) {
- slic_write32(adapter, SLIC_REG_HBAR,
- (u32)paddrl);
- } else {
- slic_write64(adapter, SLIC_REG_HBAR64, paddrl,
- paddrh);
- }
- if (rcvq->head)
- rcvq->tail->next = skb;
- else
- rcvq->head = skb;
- rcvq->tail = skb;
- rcvq->count++;
- i++;
- } else {
- dev_err(&adapter->netdev->dev,
- "slic_rcvqueue_fill could only get [%d] skbuffs\n",
- i);
- break;
- }
- }
- return i;
-}
-
-static void slic_rcvqueue_free(struct adapter *adapter)
-{
- struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
- struct sk_buff *skb;
-
- while (rcvq->head) {
- skb = rcvq->head;
- rcvq->head = rcvq->head->next;
- dev_kfree_skb(skb);
- }
- rcvq->tail = NULL;
- rcvq->head = NULL;
- rcvq->count = 0;
-}
-
-static int slic_rcvqueue_init(struct adapter *adapter)
-{
- int i, count;
- struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
-
- rcvq->tail = NULL;
- rcvq->head = NULL;
- rcvq->size = SLIC_RCVQ_ENTRIES;
- rcvq->errors = 0;
- rcvq->count = 0;
- i = SLIC_RCVQ_ENTRIES / SLIC_RCVQ_FILLENTRIES;
- count = 0;
- while (i) {
- count += slic_rcvqueue_fill(adapter);
- i--;
- }
- if (rcvq->count < SLIC_RCVQ_MINENTRIES) {
- slic_rcvqueue_free(adapter);
- return -ENOMEM;
- }
- return 0;
-}
-
-static struct sk_buff *slic_rcvqueue_getnext(struct adapter *adapter)
-{
- struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
- struct sk_buff *skb;
- struct slic_rcvbuf *rcvbuf;
- int count;
-
- if (rcvq->count) {
- skb = rcvq->head;
- rcvbuf = (struct slic_rcvbuf *)skb->head;
-
- if (rcvbuf->status & IRHDDR_SVALID) {
- rcvq->head = rcvq->head->next;
- skb->next = NULL;
- rcvq->count--;
- } else {
- skb = NULL;
- }
- } else {
- dev_err(&adapter->netdev->dev,
- "RcvQ Empty!! rcvq[%p] count[%x]\n", rcvq, rcvq->count);
- skb = NULL;
- }
- while (rcvq->count < SLIC_RCVQ_FILLTHRESH) {
- count = slic_rcvqueue_fill(adapter);
- if (!count)
- break;
- }
- if (skb)
- rcvq->errors = 0;
- return skb;
-}
-
-static u32 slic_rcvqueue_reinsert(struct adapter *adapter, struct sk_buff *skb)
-{
- struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
- void *paddr;
- u32 paddrl;
- u32 paddrh;
- struct slic_rcvbuf *rcvbuf = (struct slic_rcvbuf *)skb->head;
- struct device *dev;
-
- paddr = (void *)(unsigned long)
- pci_map_single(adapter->pcidev, skb->head,
- SLIC_RCVQ_RCVBUFSIZE, PCI_DMA_FROMDEVICE);
- rcvbuf->status = 0;
- skb->next = NULL;
-
- paddrl = SLIC_GET_ADDR_LOW(paddr);
- paddrh = SLIC_GET_ADDR_HIGH(paddr);
-
- if (paddrl == 0) {
- dev = &adapter->netdev->dev;
- dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
- __func__);
- dev_err(dev, "skb[%p] PROBLEM\n", skb);
- dev_err(dev, " skbdata[%p]\n", skb->data);
- dev_err(dev, " skblen[%x]\n", skb->len);
- dev_err(dev, " paddr[%p]\n", paddr);
- dev_err(dev, " paddrl[%x]\n", paddrl);
- dev_err(dev, " paddrh[%x]\n", paddrh);
- dev_err(dev, " rcvq->head[%p]\n", rcvq->head);
- dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail);
- dev_err(dev, " rcvq->count[%x]\n", rcvq->count);
- }
- if (paddrh == 0)
- slic_write32(adapter, SLIC_REG_HBAR, (u32)paddrl);
- else
- slic_write64(adapter, SLIC_REG_HBAR64, paddrl, paddrh);
- if (rcvq->head)
- rcvq->tail->next = skb;
- else
- rcvq->head = skb;
- rcvq->tail = skb;
- rcvq->count++;
- return rcvq->count;
-}
-
-/*
- * slic_link_event_handler -
- *
- * Initiate a link configuration sequence. The link configuration begins
- * by issuing a READ_LINK_STATUS command to the Utility Processor on the
- * SLIC. Since the command finishes asynchronously, the slic_upr_comlete
- * routine will follow it up witha UP configuration write command, which
- * will also complete asynchronously.
- *
- */
-static int slic_link_event_handler(struct adapter *adapter)
-{
- int status;
- struct slic_shmemory *sm = &adapter->shmem;
- dma_addr_t phaddr = sm->lnkstatus_phaddr;
-
- if (adapter->state != ADAPT_UP) {
- /* Adapter is not operational. Ignore. */
- return -ENODEV;
- }
- /* no 4GB wrap guaranteed */
- status = slic_upr_request(adapter, SLIC_UPR_RLSR,
- cpu_to_le32(lower_32_bits(phaddr)),
- cpu_to_le32(upper_32_bits(phaddr)), 0, 0);
- return status;
-}
-
-static void slic_init_cleanup(struct adapter *adapter)
-{
- if (adapter->intrregistered) {
- adapter->intrregistered = 0;
- free_irq(adapter->netdev->irq, adapter->netdev);
- }
-
- if (adapter->shmem.shmem_data) {
- struct slic_shmemory *sm = &adapter->shmem;
- struct slic_shmem_data *sm_data = sm->shmem_data;
-
- pci_free_consistent(adapter->pcidev, sizeof(*sm_data), sm_data,
- sm->isr_phaddr);
- }
-
- if (adapter->pingtimerset) {
- adapter->pingtimerset = 0;
- del_timer(&adapter->pingtimer);
- }
-
- slic_rspqueue_free(adapter);
- slic_cmdq_free(adapter);
- slic_rcvqueue_free(adapter);
-}
-
-/*
- * Allocate a mcast_address structure to hold the multicast address.
- * Link it in.
- */
-static int slic_mcast_add_list(struct adapter *adapter, char *address)
-{
- struct mcast_address *mcaddr, *mlist;
-
- /* Check to see if it already exists */
- mlist = adapter->mcastaddrs;
- while (mlist) {
- if (ether_addr_equal(mlist->address, address))
- return 0;
- mlist = mlist->next;
- }
-
- /* Doesn't already exist. Allocate a structure to hold it */
- mcaddr = kmalloc(sizeof(*mcaddr), GFP_ATOMIC);
- if (!mcaddr)
- return 1;
-
- ether_addr_copy(mcaddr->address, address);
-
- mcaddr->next = adapter->mcastaddrs;
- adapter->mcastaddrs = mcaddr;
-
- return 0;
-}
-
-static void slic_mcast_set_list(struct net_device *dev)
-{
- struct adapter *adapter = netdev_priv(dev);
- int status = 0;
- char *addresses;
- struct netdev_hw_addr *ha;
-
- netdev_for_each_mc_addr(ha, dev) {
- addresses = (char *)&ha->addr;
- status = slic_mcast_add_list(adapter, addresses);
- if (status != 0)
- break;
- slic_mcast_set_bit(adapter, addresses);
- }
-
- if (adapter->devflags_prev != dev->flags) {
- adapter->macopts = MAC_DIRECTED;
- if (dev->flags) {
- if (dev->flags & IFF_BROADCAST)
- adapter->macopts |= MAC_BCAST;
- if (dev->flags & IFF_PROMISC)
- adapter->macopts |= MAC_PROMISC;
- if (dev->flags & IFF_ALLMULTI)
- adapter->macopts |= MAC_ALLMCAST;
- if (dev->flags & IFF_MULTICAST)
- adapter->macopts |= MAC_MCAST;
- }
- adapter->devflags_prev = dev->flags;
- slic_config_set(adapter, true);
- } else {
- if (status == 0)
- slic_mcast_set_mask(adapter);
- }
-}
-
-#define XMIT_FAIL_LINK_STATE 1
-#define XMIT_FAIL_ZERO_LENGTH 2
-#define XMIT_FAIL_HOSTCMD_FAIL 3
-
-static void slic_xmit_build_request(struct adapter *adapter,
- struct slic_hostcmd *hcmd, struct sk_buff *skb)
-{
- struct slic_host64_cmd *ihcmd;
- ulong phys_addr;
-
- ihcmd = &hcmd->cmd64;
-
- ihcmd->flags = adapter->port << IHFLG_IFSHFT;
- ihcmd->command = IHCMD_XMT_REQ;
- ihcmd->u.slic_buffers.totlen = skb->len;
- phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(adapter->pcidev, phys_addr)) {
- kfree_skb(skb);
- dev_err(&adapter->pcidev->dev, "DMA mapping error\n");
- return;
- }
- ihcmd->u.slic_buffers.bufs[0].paddrl = SLIC_GET_ADDR_LOW(phys_addr);
- ihcmd->u.slic_buffers.bufs[0].paddrh = SLIC_GET_ADDR_HIGH(phys_addr);
- ihcmd->u.slic_buffers.bufs[0].length = skb->len;
-#if BITS_PER_LONG == 64
- hcmd->cmdsize = (u32)((((u64)&ihcmd->u.slic_buffers.bufs[1] -
- (u64)hcmd) + 31) >> 5);
-#else
- hcmd->cmdsize = (((u32)&ihcmd->u.slic_buffers.bufs[1] -
- (u32)hcmd) + 31) >> 5;
-#endif
-}
-
-static void slic_xmit_fail(struct adapter *adapter,
- struct sk_buff *skb,
- void *cmd, u32 skbtype, u32 status)
-{
- if (adapter->xmitq_full)
- netif_stop_queue(adapter->netdev);
- if ((!cmd) && (status <= XMIT_FAIL_HOSTCMD_FAIL)) {
- switch (status) {
- case XMIT_FAIL_LINK_STATE:
- dev_err(&adapter->netdev->dev,
- "reject xmit skb[%p: %x] linkstate[%s] adapter[%s:%d] card[%s:%d]\n",
- skb, skb->pkt_type,
- SLIC_LINKSTATE(adapter->linkstate),
- SLIC_ADAPTER_STATE(adapter->state),
- adapter->state,
- SLIC_CARD_STATE(adapter->card->state),
- adapter->card->state);
- break;
- case XMIT_FAIL_ZERO_LENGTH:
- dev_err(&adapter->netdev->dev,
- "xmit_start skb->len == 0 skb[%p] type[%x]\n",
- skb, skb->pkt_type);
- break;
- case XMIT_FAIL_HOSTCMD_FAIL:
- dev_err(&adapter->netdev->dev,
- "xmit_start skb[%p] type[%x] No host commands available\n",
- skb, skb->pkt_type);
- break;
- }
- }
- dev_kfree_skb(skb);
- adapter->netdev->stats.tx_dropped++;
-}
-
-static void slic_rcv_handle_error(struct adapter *adapter,
- struct slic_rcvbuf *rcvbuf)
-{
- struct slic_hddr_wds *hdr = (struct slic_hddr_wds *)rcvbuf->data;
- struct net_device *netdev = adapter->netdev;
-
- if (adapter->devid != SLIC_1GB_DEVICE_ID) {
- if (hdr->frame_status14 & VRHSTAT_802OE)
- adapter->if_events.oflow802++;
- if (hdr->frame_status14 & VRHSTAT_TPOFLO)
- adapter->if_events.Tprtoflow++;
- if (hdr->frame_status_b14 & VRHSTATB_802UE)
- adapter->if_events.uflow802++;
- if (hdr->frame_status_b14 & VRHSTATB_RCVE) {
- adapter->if_events.rcvearly++;
- netdev->stats.rx_fifo_errors++;
- }
- if (hdr->frame_status_b14 & VRHSTATB_BUFF) {
- adapter->if_events.Bufov++;
- netdev->stats.rx_over_errors++;
- }
- if (hdr->frame_status_b14 & VRHSTATB_CARRE) {
- adapter->if_events.Carre++;
- netdev->stats.tx_carrier_errors++;
- }
- if (hdr->frame_status_b14 & VRHSTATB_LONGE)
- adapter->if_events.Longe++;
- if (hdr->frame_status_b14 & VRHSTATB_PREA)
- adapter->if_events.Invp++;
- if (hdr->frame_status_b14 & VRHSTATB_CRC) {
- adapter->if_events.Crc++;
- netdev->stats.rx_crc_errors++;
- }
- if (hdr->frame_status_b14 & VRHSTATB_DRBL)
- adapter->if_events.Drbl++;
- if (hdr->frame_status_b14 & VRHSTATB_CODE)
- adapter->if_events.Code++;
- if (hdr->frame_status_b14 & VRHSTATB_TPCSUM)
- adapter->if_events.TpCsum++;
- if (hdr->frame_status_b14 & VRHSTATB_TPHLEN)
- adapter->if_events.TpHlen++;
- if (hdr->frame_status_b14 & VRHSTATB_IPCSUM)
- adapter->if_events.IpCsum++;
- if (hdr->frame_status_b14 & VRHSTATB_IPLERR)
- adapter->if_events.IpLen++;
- if (hdr->frame_status_b14 & VRHSTATB_IPHERR)
- adapter->if_events.IpHlen++;
- } else {
- if (hdr->frame_statusGB & VGBSTAT_XPERR) {
- u32 xerr = hdr->frame_statusGB >> VGBSTAT_XERRSHFT;
-
- if (xerr == VGBSTAT_XCSERR)
- adapter->if_events.TpCsum++;
- if (xerr == VGBSTAT_XUFLOW)
- adapter->if_events.Tprtoflow++;
- if (xerr == VGBSTAT_XHLEN)
- adapter->if_events.TpHlen++;
- }
- if (hdr->frame_statusGB & VGBSTAT_NETERR) {
- u32 nerr =
- (hdr->
- frame_statusGB >> VGBSTAT_NERRSHFT) &
- VGBSTAT_NERRMSK;
- if (nerr == VGBSTAT_NCSERR)
- adapter->if_events.IpCsum++;
- if (nerr == VGBSTAT_NUFLOW)
- adapter->if_events.IpLen++;
- if (nerr == VGBSTAT_NHLEN)
- adapter->if_events.IpHlen++;
- }
- if (hdr->frame_statusGB & VGBSTAT_LNKERR) {
- u32 lerr = hdr->frame_statusGB & VGBSTAT_LERRMSK;
-
- if (lerr == VGBSTAT_LDEARLY)
- adapter->if_events.rcvearly++;
- if (lerr == VGBSTAT_LBOFLO)
- adapter->if_events.Bufov++;
- if (lerr == VGBSTAT_LCODERR)
- adapter->if_events.Code++;
- if (lerr == VGBSTAT_LDBLNBL)
- adapter->if_events.Drbl++;
- if (lerr == VGBSTAT_LCRCERR)
- adapter->if_events.Crc++;
- if (lerr == VGBSTAT_LOFLO)
- adapter->if_events.oflow802++;
- if (lerr == VGBSTAT_LUFLO)
- adapter->if_events.uflow802++;
- }
- }
-}
-
-#define TCP_OFFLOAD_FRAME_PUSHFLAG 0x10000000
-#define M_FAST_PATH 0x0040
-
-static void slic_rcv_handler(struct adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct sk_buff *skb;
- struct slic_rcvbuf *rcvbuf;
- u32 frames = 0;
-
- while ((skb = slic_rcvqueue_getnext(adapter))) {
- u32 rx_bytes;
-
- rcvbuf = (struct slic_rcvbuf *)skb->head;
- adapter->card->events++;
- if (rcvbuf->status & IRHDDR_ERR) {
- adapter->rx_errors++;
- slic_rcv_handle_error(adapter, rcvbuf);
- slic_rcvqueue_reinsert(adapter, skb);
- continue;
- }
-
- if (!slic_mac_filter(adapter, (struct ether_header *)
- rcvbuf->data)) {
- slic_rcvqueue_reinsert(adapter, skb);
- continue;
- }
- skb_pull(skb, SLIC_RCVBUF_HEADSIZE);
- rx_bytes = (rcvbuf->length & IRHDDR_FLEN_MSK);
- skb_put(skb, rx_bytes);
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += rx_bytes;
-#if SLIC_OFFLOAD_IP_CHECKSUM
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-#endif
-
- skb->dev = adapter->netdev;
- skb->protocol = eth_type_trans(skb, skb->dev);
- netif_rx(skb);
-
- ++frames;
-#if SLIC_INTERRUPT_PROCESS_LIMIT
- if (frames >= SLIC_RCVQ_MAX_PROCESS_ISR) {
- adapter->rcv_interrupt_yields++;
- break;
- }
-#endif
- }
- adapter->max_isr_rcvs = max(adapter->max_isr_rcvs, frames);
-}
-
-static void slic_xmit_complete(struct adapter *adapter)
-{
- struct slic_hostcmd *hcmd;
- struct slic_rspbuf *rspbuf;
- u32 frames = 0;
- struct slic_handle_word slic_handle_word;
-
- do {
- rspbuf = slic_rspqueue_getnext(adapter);
- if (!rspbuf)
- break;
- adapter->xmit_completes++;
- adapter->card->events++;
- /*
- * Get the complete host command buffer
- */
- slic_handle_word.handle_token = rspbuf->hosthandle;
- hcmd =
- adapter->slic_handles[slic_handle_word.handle_index].
- address;
-/* hcmd = (struct slic_hostcmd *) rspbuf->hosthandle; */
- if (hcmd->type == SLIC_CMD_DUMB) {
- if (hcmd->skb)
- dev_kfree_skb_irq(hcmd->skb);
- slic_cmdq_putdone_irq(adapter, hcmd);
- }
- rspbuf->status = 0;
- rspbuf->hosthandle = 0;
- frames++;
- } while (1);
- adapter->max_isr_xmits = max(adapter->max_isr_xmits, frames);
-}
-
-static void slic_interrupt_card_up(u32 isr, struct adapter *adapter,
- struct net_device *dev)
-{
- if (isr & ~ISR_IO) {
- if (isr & ISR_ERR) {
- adapter->error_interrupts++;
- if (isr & ISR_RMISS) {
- int count;
- int pre_count;
- int errors;
-
- struct slic_rcvqueue *rcvq =
- &adapter->rcvqueue;
-
- adapter->error_rmiss_interrupts++;
-
- if (!rcvq->errors)
- rcv_count = rcvq->count;
- pre_count = rcvq->count;
- errors = rcvq->errors;
-
- while (rcvq->count < SLIC_RCVQ_FILLTHRESH) {
- count = slic_rcvqueue_fill(adapter);
- if (!count)
- break;
- }
- } else if (isr & ISR_XDROP) {
- dev_err(&dev->dev,
- "isr & ISR_ERR [%x] ISR_XDROP\n",
- isr);
- } else {
- dev_err(&dev->dev,
- "isr & ISR_ERR [%x]\n",
- isr);
- }
- }
-
- if (isr & ISR_LEVENT) {
- adapter->linkevent_interrupts++;
- if (slic_link_event_handler(adapter))
- adapter->linkevent_interrupts--;
- }
-
- if ((isr & ISR_UPC) || (isr & ISR_UPCERR) ||
- (isr & ISR_UPCBSY)) {
- adapter->upr_interrupts++;
- slic_upr_request_complete(adapter, isr);
- }
- }
-
- if (isr & ISR_RCV) {
- adapter->rcv_interrupts++;
- slic_rcv_handler(adapter);
- }
-
- if (isr & ISR_CMD) {
- adapter->xmit_interrupts++;
- slic_xmit_complete(adapter);
- }
-}
-
-static irqreturn_t slic_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct adapter *adapter = netdev_priv(dev);
- struct slic_shmemory *sm = &adapter->shmem;
- struct slic_shmem_data *sm_data = sm->shmem_data;
- u32 isr;
-
- if (sm_data->isr) {
- slic_write32(adapter, SLIC_REG_ICR, ICR_INT_MASK);
- slic_flush_write(adapter);
-
- isr = sm_data->isr;
- sm_data->isr = 0;
- adapter->num_isrs++;
- switch (adapter->card->state) {
- case CARD_UP:
- slic_interrupt_card_up(isr, adapter, dev);
- break;
-
- case CARD_DOWN:
- if ((isr & ISR_UPC) ||
- (isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
- adapter->upr_interrupts++;
- slic_upr_request_complete(adapter, isr);
- }
- break;
- }
-
- adapter->all_reg_writes += 2;
- adapter->isr_reg_writes++;
- slic_write32(adapter, SLIC_REG_ISR, 0);
- } else {
- adapter->false_interrupts++;
- }
- return IRQ_HANDLED;
-}
-
-#define NORMAL_ETHFRAME 0
-
-static netdev_tx_t slic_xmit_start(struct sk_buff *skb, struct net_device *dev)
-{
- struct sliccard *card;
- struct adapter *adapter = netdev_priv(dev);
- struct slic_hostcmd *hcmd = NULL;
- u32 status = 0;
- void *offloadcmd = NULL;
-
- card = adapter->card;
- if ((adapter->linkstate != LINK_UP) ||
- (adapter->state != ADAPT_UP) || (card->state != CARD_UP)) {
- status = XMIT_FAIL_LINK_STATE;
- goto xmit_fail;
-
- } else if (skb->len == 0) {
- status = XMIT_FAIL_ZERO_LENGTH;
- goto xmit_fail;
- }
-
- hcmd = slic_cmdq_getfree(adapter);
- if (!hcmd) {
- adapter->xmitq_full = 1;
- status = XMIT_FAIL_HOSTCMD_FAIL;
- goto xmit_fail;
- }
- hcmd->skb = skb;
- hcmd->busy = 1;
- hcmd->type = SLIC_CMD_DUMB;
- slic_xmit_build_request(adapter, hcmd, skb);
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
-#ifdef DEBUG_DUMP
- if (adapter->kill_card) {
- struct slic_host64_cmd ihcmd;
-
- ihcmd = &hcmd->cmd64;
-
- ihcmd->flags |= 0x40;
- adapter->kill_card = 0; /* only do this once */
- }
-#endif
- if (hcmd->paddrh == 0) {
- slic_write32(adapter, SLIC_REG_CBAR, (hcmd->paddrl |
- hcmd->cmdsize));
- } else {
- slic_write64(adapter, SLIC_REG_CBAR64,
- hcmd->paddrl | hcmd->cmdsize, hcmd->paddrh);
- }
-xmit_done:
- return NETDEV_TX_OK;
-xmit_fail:
- slic_xmit_fail(adapter, skb, offloadcmd, NORMAL_ETHFRAME, status);
- goto xmit_done;
-}
-
-static void slic_adapter_freeresources(struct adapter *adapter)
-{
- slic_init_cleanup(adapter);
- adapter->error_interrupts = 0;
- adapter->rcv_interrupts = 0;
- adapter->xmit_interrupts = 0;
- adapter->linkevent_interrupts = 0;
- adapter->upr_interrupts = 0;
- adapter->num_isrs = 0;
- adapter->xmit_completes = 0;
- adapter->rcv_broadcasts = 0;
- adapter->rcv_multicasts = 0;
- adapter->rcv_unicasts = 0;
-}
-
-static int slic_adapter_allocresources(struct adapter *adapter,
- unsigned long *flags)
-{
- if (!adapter->intrregistered) {
- int retval;
-
- spin_unlock_irqrestore(&slic_global.driver_lock, *flags);
-
- retval = request_irq(adapter->netdev->irq,
- &slic_interrupt,
- IRQF_SHARED,
- adapter->netdev->name, adapter->netdev);
-
- spin_lock_irqsave(&slic_global.driver_lock, *flags);
-
- if (retval) {
- dev_err(&adapter->netdev->dev,
- "request_irq (%s) FAILED [%x]\n",
- adapter->netdev->name, retval);
- return retval;
- }
- adapter->intrregistered = 1;
- }
- return 0;
-}
-
-/*
- * slic_if_init
- *
- * Perform initialization of our slic interface.
- *
- */
-static int slic_if_init(struct adapter *adapter, unsigned long *flags)
-{
- struct sliccard *card = adapter->card;
- struct net_device *dev = adapter->netdev;
- struct slic_shmemory *sm = &adapter->shmem;
- struct slic_shmem_data *sm_data = sm->shmem_data;
- int rc;
-
- /* adapter should be down at this point */
- if (adapter->state != ADAPT_DOWN) {
- dev_err(&dev->dev, "%s: adapter->state != ADAPT_DOWN\n",
- __func__);
- rc = -EIO;
- goto err;
- }
-
- adapter->devflags_prev = dev->flags;
- adapter->macopts = MAC_DIRECTED;
- if (dev->flags) {
- if (dev->flags & IFF_BROADCAST)
- adapter->macopts |= MAC_BCAST;
- if (dev->flags & IFF_PROMISC)
- adapter->macopts |= MAC_PROMISC;
- if (dev->flags & IFF_ALLMULTI)
- adapter->macopts |= MAC_ALLMCAST;
- if (dev->flags & IFF_MULTICAST)
- adapter->macopts |= MAC_MCAST;
- }
- rc = slic_adapter_allocresources(adapter, flags);
- if (rc) {
- dev_err(&dev->dev, "slic_adapter_allocresources FAILED %x\n",
- rc);
- slic_adapter_freeresources(adapter);
- goto err;
- }
-
- if (!adapter->queues_initialized) {
- rc = slic_rspqueue_init(adapter);
- if (rc)
- goto err;
- rc = slic_cmdq_init(adapter);
- if (rc)
- goto err;
- rc = slic_rcvqueue_init(adapter);
- if (rc)
- goto err;
- adapter->queues_initialized = 1;
- }
-
- slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
- slic_flush_write(adapter);
- mdelay(1);
-
- if (!adapter->isp_initialized) {
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->bit64reglock, flags);
- slic_write32(adapter, SLIC_REG_ADDR_UPPER,
- cpu_to_le32(upper_32_bits(sm->isr_phaddr)));
- slic_write32(adapter, SLIC_REG_ISP,
- cpu_to_le32(lower_32_bits(sm->isr_phaddr)));
- spin_unlock_irqrestore(&adapter->bit64reglock, flags);
-
- adapter->isp_initialized = 1;
- }
-
- adapter->state = ADAPT_UP;
- if (!card->loadtimerset) {
- setup_timer(&card->loadtimer, &slic_timer_load_check,
- (ulong)card);
- card->loadtimer.expires =
- jiffies + (SLIC_LOADTIMER_PERIOD * HZ);
- add_timer(&card->loadtimer);
-
- card->loadtimerset = 1;
- }
-
- if (!adapter->pingtimerset) {
- setup_timer(&adapter->pingtimer, &slic_timer_ping, (ulong)dev);
- adapter->pingtimer.expires =
- jiffies + (PING_TIMER_INTERVAL * HZ);
- add_timer(&adapter->pingtimer);
- adapter->pingtimerset = 1;
- adapter->card->pingstatus = ISR_PINGMASK;
- }
-
- /*
- * clear any pending events, then enable interrupts
- */
- sm_data->isr = 0;
- slic_write32(adapter, SLIC_REG_ISR, 0);
- slic_write32(adapter, SLIC_REG_ICR, ICR_INT_ON);
-
- slic_link_config(adapter, LINK_AUTOSPEED, LINK_AUTOD);
- slic_flush_write(adapter);
-
- rc = slic_link_event_handler(adapter);
- if (rc) {
- /* disable interrupts then clear pending events */
- slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
- slic_write32(adapter, SLIC_REG_ISR, 0);
- slic_flush_write(adapter);
-
- if (adapter->pingtimerset) {
- del_timer(&adapter->pingtimer);
- adapter->pingtimerset = 0;
- }
- if (card->loadtimerset) {
- del_timer(&card->loadtimer);
- card->loadtimerset = 0;
- }
- adapter->state = ADAPT_DOWN;
- slic_adapter_freeresources(adapter);
- }
-
-err:
- return rc;
-}
-
-static int slic_entry_open(struct net_device *dev)
-{
- struct adapter *adapter = netdev_priv(dev);
- struct sliccard *card = adapter->card;
- unsigned long flags;
- int status;
-
- netif_carrier_off(dev);
-
- spin_lock_irqsave(&slic_global.driver_lock, flags);
- if (!adapter->activated) {
- card->adapters_activated++;
- slic_global.num_slic_ports_active++;
- adapter->activated = 1;
- }
- status = slic_if_init(adapter, &flags);
-
- if (status != 0) {
- if (adapter->activated) {
- card->adapters_activated--;
- slic_global.num_slic_ports_active--;
- adapter->activated = 0;
- }
- goto spin_unlock;
- }
- if (!card->master)
- card->master = adapter;
-
-spin_unlock:
- spin_unlock_irqrestore(&slic_global.driver_lock, flags);
-
- netif_start_queue(adapter->netdev);
-
- return status;
-}
-
-static void slic_card_cleanup(struct sliccard *card)
-{
- if (card->loadtimerset) {
- card->loadtimerset = 0;
- del_timer_sync(&card->loadtimer);
- }
-
- kfree(card);
-}
-
-static void slic_entry_remove(struct pci_dev *pcidev)
-{
- struct net_device *dev = pci_get_drvdata(pcidev);
- struct adapter *adapter = netdev_priv(dev);
- struct sliccard *card;
- struct mcast_address *mcaddr, *mlist;
-
- unregister_netdev(dev);
-
- slic_adapter_freeresources(adapter);
- iounmap(adapter->regs);
-
- /* free multicast addresses */
- mlist = adapter->mcastaddrs;
- while (mlist) {
- mcaddr = mlist;
- mlist = mlist->next;
- kfree(mcaddr);
- }
- card = adapter->card;
- card->adapters_allocated--;
- adapter->allocated = 0;
- if (!card->adapters_allocated) {
- struct sliccard *curr_card = slic_global.slic_card;
-
- if (curr_card == card) {
- slic_global.slic_card = card->next;
- } else {
- while (curr_card->next != card)
- curr_card = curr_card->next;
- curr_card->next = card->next;
- }
- slic_global.num_slic_cards--;
- slic_card_cleanup(card);
- }
- free_netdev(dev);
- pci_release_regions(pcidev);
- pci_disable_device(pcidev);
-}
-
-static int slic_entry_halt(struct net_device *dev)
-{
- struct adapter *adapter = netdev_priv(dev);
- struct sliccard *card = adapter->card;
- unsigned long flags;
-
- spin_lock_irqsave(&slic_global.driver_lock, flags);
- netif_stop_queue(adapter->netdev);
- adapter->state = ADAPT_DOWN;
- adapter->linkstate = LINK_DOWN;
- adapter->upr_list = NULL;
- adapter->upr_busy = 0;
- adapter->devflags_prev = 0;
- slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
- adapter->all_reg_writes++;
- adapter->icr_reg_writes++;
- slic_config_clear(adapter);
- if (adapter->activated) {
- card->adapters_activated--;
- slic_global.num_slic_ports_active--;
- adapter->activated = 0;
- }
-#ifdef AUTOMATIC_RESET
- slic_write32(adapter, SLIC_REG_RESET_IFACE, 0);
-#endif
- slic_flush_write(adapter);
-
- /*
- * Reset the adapter's cmd queues
- */
- slic_cmdq_reset(adapter);
-
-#ifdef AUTOMATIC_RESET
- if (!card->adapters_activated)
- slic_card_init(card, adapter);
-#endif
-
- spin_unlock_irqrestore(&slic_global.driver_lock, flags);
-
- netif_carrier_off(dev);
-
- return 0;
-}
-
-static struct net_device_stats *slic_get_stats(struct net_device *dev)
-{
- struct adapter *adapter = netdev_priv(dev);
-
- dev->stats.collisions = adapter->slic_stats.iface.xmit_collisions;
- dev->stats.rx_errors = adapter->slic_stats.iface.rcv_errors;
- dev->stats.tx_errors = adapter->slic_stats.iface.xmt_errors;
- dev->stats.rx_missed_errors = adapter->slic_stats.iface.rcv_discards;
- dev->stats.tx_heartbeat_errors = 0;
- dev->stats.tx_aborted_errors = 0;
- dev->stats.tx_window_errors = 0;
- dev->stats.tx_fifo_errors = 0;
- dev->stats.rx_frame_errors = 0;
- dev->stats.rx_length_errors = 0;
-
- return &dev->stats;
-}
-
-static int slic_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct adapter *adapter = netdev_priv(dev);
- struct ethtool_cmd edata;
- struct ethtool_cmd ecmd;
- u32 data[7];
- u32 intagg;
-
- switch (cmd) {
- case SIOCSLICSETINTAGG:
- if (copy_from_user(data, rq->ifr_data, 28))
- return -EFAULT;
- intagg = data[0];
- dev_err(&dev->dev, "set interrupt aggregation to %d\n",
- intagg);
- slic_intagg_set(adapter, intagg);
- return 0;
-
- case SIOCETHTOOL:
- if (copy_from_user(&ecmd, rq->ifr_data, sizeof(ecmd)))
- return -EFAULT;
-
- if (ecmd.cmd == ETHTOOL_GSET) {
- memset(&edata, 0, sizeof(edata));
- edata.supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_Autoneg | SUPPORTED_MII);
- edata.port = PORT_MII;
- edata.transceiver = XCVR_INTERNAL;
- edata.phy_address = 0;
- if (adapter->linkspeed == LINK_100MB)
- edata.speed = SPEED_100;
- else if (adapter->linkspeed == LINK_10MB)
- edata.speed = SPEED_10;
- else
- edata.speed = 0;
-
- if (adapter->linkduplex == LINK_FULLD)
- edata.duplex = DUPLEX_FULL;
- else
- edata.duplex = DUPLEX_HALF;
-
- edata.autoneg = AUTONEG_ENABLE;
- edata.maxtxpkt = 1;
- edata.maxrxpkt = 1;
- if (copy_to_user(rq->ifr_data, &edata, sizeof(edata)))
- return -EFAULT;
-
- } else if (ecmd.cmd == ETHTOOL_SSET) {
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (adapter->linkspeed == LINK_100MB)
- edata.speed = SPEED_100;
- else if (adapter->linkspeed == LINK_10MB)
- edata.speed = SPEED_10;
- else
- edata.speed = 0;
-
- if (adapter->linkduplex == LINK_FULLD)
- edata.duplex = DUPLEX_FULL;
- else
- edata.duplex = DUPLEX_HALF;
-
- edata.autoneg = AUTONEG_ENABLE;
- edata.maxtxpkt = 1;
- edata.maxrxpkt = 1;
- if ((ecmd.speed != edata.speed) ||
- (ecmd.duplex != edata.duplex)) {
- u32 speed;
- u32 duplex;
-
- if (ecmd.speed == SPEED_10)
- speed = 0;
- else
- speed = PCR_SPEED_100;
- if (ecmd.duplex == DUPLEX_FULL)
- duplex = PCR_DUPLEX_FULL;
- else
- duplex = 0;
- slic_link_config(adapter, speed, duplex);
- if (slic_link_event_handler(adapter))
- return -EFAULT;
- }
- }
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void slic_config_pci(struct pci_dev *pcidev)
-{
- u16 pci_command;
- u16 new_command;
-
- pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
-
- new_command = pci_command | PCI_COMMAND_MASTER
- | PCI_COMMAND_MEMORY
- | PCI_COMMAND_INVALIDATE
- | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK;
- if (pci_command != new_command)
- pci_write_config_word(pcidev, PCI_COMMAND, new_command);
-}
-
-static int slic_card_init(struct sliccard *card, struct adapter *adapter)
-{
- struct slic_shmemory *sm = &adapter->shmem;
- struct slic_shmem_data *sm_data = sm->shmem_data;
- struct slic_eeprom *peeprom;
- struct oslic_eeprom *pOeeprom;
- dma_addr_t phys_config;
- u32 phys_configh;
- u32 phys_configl;
- u32 i = 0;
- int status;
- uint macaddrs = card->card_size;
- ushort eecodesize;
- ushort dramsize;
- ushort ee_chksum;
- ushort calc_chksum;
- struct slic_config_mac *pmac;
- unsigned char fruformat;
- unsigned char oemfruformat;
- struct atk_fru *patkfru;
- union oemfru *poemfru;
- unsigned long flags;
-
- /* Reset everything except PCI configuration space */
- slic_soft_reset(adapter);
-
- /* Download the microcode */
- status = slic_card_download(adapter);
- if (status)
- return status;
-
- if (!card->config_set) {
- peeprom = pci_alloc_consistent(adapter->pcidev,
- sizeof(struct slic_eeprom),
- &phys_config);
-
- if (!peeprom) {
- dev_err(&adapter->pcidev->dev,
- "Failed to allocate DMA memory for EEPROM.\n");
- return -ENOMEM;
- }
-
- phys_configl = SLIC_GET_ADDR_LOW(phys_config);
- phys_configh = SLIC_GET_ADDR_HIGH(phys_config);
-
- memset(peeprom, 0, sizeof(struct slic_eeprom));
-
- slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
- slic_flush_write(adapter);
- mdelay(1);
-
- spin_lock_irqsave(&adapter->bit64reglock, flags);
- slic_write32(adapter, SLIC_REG_ADDR_UPPER,
- cpu_to_le32(upper_32_bits(sm->isr_phaddr)));
- slic_write32(adapter, SLIC_REG_ISP,
- cpu_to_le32(lower_32_bits(sm->isr_phaddr)));
- spin_unlock_irqrestore(&adapter->bit64reglock, flags);
-
- status = slic_config_get(adapter, phys_configl, phys_configh);
- if (status) {
- dev_err(&adapter->pcidev->dev,
- "Failed to fetch config data from device.\n");
- goto card_init_err;
- }
-
- for (;;) {
- if (sm_data->isr) {
- if (sm_data->isr & ISR_UPC) {
- sm_data->isr = 0;
- slic_write64(adapter, SLIC_REG_ISP, 0,
- 0);
- slic_write32(adapter, SLIC_REG_ISR, 0);
- slic_flush_write(adapter);
-
- slic_upr_request_complete(adapter, 0);
- break;
- }
-
- sm_data->isr = 0;
- slic_write32(adapter, SLIC_REG_ISR, 0);
- slic_flush_write(adapter);
- } else {
- mdelay(1);
- i++;
- if (i > 5000) {
- dev_err(&adapter->pcidev->dev,
- "Fetch of config data timed out.\n");
- slic_write64(adapter, SLIC_REG_ISP,
- 0, 0);
- slic_flush_write(adapter);
-
- status = -EINVAL;
- goto card_init_err;
- }
- }
- }
-
- switch (adapter->devid) {
- /* Oasis card */
- case SLIC_2GB_DEVICE_ID:
- /* extract EEPROM data and pointers to EEPROM data */
- pOeeprom = (struct oslic_eeprom *)peeprom;
- eecodesize = pOeeprom->EecodeSize;
- dramsize = pOeeprom->DramSize;
- pmac = pOeeprom->MacInfo;
- fruformat = pOeeprom->FruFormat;
- patkfru = &pOeeprom->AtkFru;
- oemfruformat = pOeeprom->OemFruFormat;
- poemfru = &pOeeprom->OemFru;
- macaddrs = 2;
- /*
- * Minor kludge for Oasis card
- * get 2 MAC addresses from the
- * EEPROM to ensure that function 1
- * gets the Port 1 MAC address
- */
- break;
- default:
- /* extract EEPROM data and pointers to EEPROM data */
- eecodesize = peeprom->EecodeSize;
- dramsize = peeprom->DramSize;
- pmac = peeprom->u2.mac.MacInfo;
- fruformat = peeprom->FruFormat;
- patkfru = &peeprom->AtkFru;
- oemfruformat = peeprom->OemFruFormat;
- poemfru = &peeprom->OemFru;
- break;
- }
-
- card->config.EepromValid = false;
-
- /* see if the EEPROM is valid by checking it's checksum */
- if ((eecodesize <= MAX_EECODE_SIZE) &&
- (eecodesize >= MIN_EECODE_SIZE)) {
- ee_chksum =
- *(u16 *)((char *)peeprom + (eecodesize - 2));
- /*
- * calculate the EEPROM checksum
- */
- calc_chksum = slic_eeprom_cksum(peeprom,
- eecodesize - 2);
- /*
- * if the ucdoe chksum flag bit worked,
- * we wouldn't need this
- */
- if (ee_chksum == calc_chksum)
- card->config.EepromValid = true;
- }
- /* copy in the DRAM size */
- card->config.DramSize = dramsize;
-
- /* copy in the MAC address(es) */
- for (i = 0; i < macaddrs; i++) {
- memcpy(&card->config.MacInfo[i],
- &pmac[i], sizeof(struct slic_config_mac));
- }
-
- /* copy the Alacritech FRU information */
- card->config.FruFormat = fruformat;
- memcpy(&card->config.AtkFru, patkfru,
- sizeof(struct atk_fru));
-
- pci_free_consistent(adapter->pcidev,
- sizeof(struct slic_eeprom),
- peeprom, phys_config);
-
- if (!card->config.EepromValid) {
- slic_write64(adapter, SLIC_REG_ISP, 0, 0);
- slic_flush_write(adapter);
- dev_err(&adapter->pcidev->dev, "EEPROM invalid.\n");
- return -EINVAL;
- }
-
- card->config_set = 1;
- }
-
- status = slic_card_download_gbrcv(adapter);
- if (status)
- return status;
-
- if (slic_global.dynamic_intagg)
- slic_intagg_set(adapter, 0);
- else
- slic_intagg_set(adapter, adapter->intagg_delay);
-
- /*
- * Initialize ping status to "ok"
- */
- card->pingstatus = ISR_PINGMASK;
-
- /*
- * Lastly, mark our card state as up and return success
- */
- card->state = CARD_UP;
- card->reset_in_progress = 0;
-
- return 0;
-
-card_init_err:
- pci_free_consistent(adapter->pcidev, sizeof(struct slic_eeprom),
- peeprom, phys_config);
- return status;
-}
-
-static int slic_get_coalesce(struct net_device *dev,
- struct ethtool_coalesce *coalesce)
-{
- struct adapter *adapter = netdev_priv(dev);
-
- adapter->intagg_delay = coalesce->rx_coalesce_usecs;
- adapter->dynamic_intagg = coalesce->use_adaptive_rx_coalesce;
- return 0;
-}
-
-static int slic_set_coalesce(struct net_device *dev,
- struct ethtool_coalesce *coalesce)
-{
- struct adapter *adapter = netdev_priv(dev);
-
- coalesce->rx_coalesce_usecs = adapter->intagg_delay;
- coalesce->use_adaptive_rx_coalesce = adapter->dynamic_intagg;
- return 0;
-}
-
-static void slic_init_driver(void)
-{
- if (slic_first_init) {
- slic_first_init = 0;
- spin_lock_init(&slic_global.driver_lock);
- }
-}
-
-static int slic_init_adapter(struct net_device *netdev,
- struct pci_dev *pcidev,
- const struct pci_device_id *pci_tbl_entry,
- void __iomem *memaddr, int chip_idx)
-{
- ushort index;
- struct slic_handle *pslic_handle;
- struct adapter *adapter = netdev_priv(netdev);
- struct slic_shmemory *sm = &adapter->shmem;
- struct slic_shmem_data *sm_data;
- dma_addr_t phaddr;
-
-/* adapter->pcidev = pcidev;*/
- adapter->vendid = pci_tbl_entry->vendor;
- adapter->devid = pci_tbl_entry->device;
- adapter->subsysid = pci_tbl_entry->subdevice;
- adapter->busnumber = pcidev->bus->number;
- adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
- adapter->functionnumber = (pcidev->devfn & 0x7);
- adapter->regs = memaddr;
- adapter->irq = pcidev->irq;
- adapter->chipid = chip_idx;
- adapter->port = 0;
- adapter->cardindex = adapter->port;
- spin_lock_init(&adapter->upr_lock);
- spin_lock_init(&adapter->bit64reglock);
- spin_lock_init(&adapter->adapter_lock);
- spin_lock_init(&adapter->reset_lock);
- spin_lock_init(&adapter->handle_lock);
-
- adapter->card_size = 1;
- /*
- * Initialize slic_handle array
- */
- /*
- * Start with 1. 0 is an invalid host handle.
- */
- for (index = 1, pslic_handle = &adapter->slic_handles[1];
- index < SLIC_CMDQ_MAXCMDS; index++, pslic_handle++) {
- pslic_handle->token.handle_index = index;
- pslic_handle->type = SLIC_HANDLE_FREE;
- pslic_handle->next = adapter->pfree_slic_handles;
- adapter->pfree_slic_handles = pslic_handle;
- }
- sm_data = pci_zalloc_consistent(adapter->pcidev, sizeof(*sm_data),
- &phaddr);
- if (!sm_data)
- return -ENOMEM;
-
- sm->shmem_data = sm_data;
- sm->isr_phaddr = phaddr;
- sm->lnkstatus_phaddr = phaddr + offsetof(struct slic_shmem_data,
- lnkstatus);
- sm->stats_phaddr = phaddr + offsetof(struct slic_shmem_data, stats);
-
- return 0;
-}
-
-static const struct net_device_ops slic_netdev_ops = {
- .ndo_open = slic_entry_open,
- .ndo_stop = slic_entry_halt,
- .ndo_start_xmit = slic_xmit_start,
- .ndo_do_ioctl = slic_ioctl,
- .ndo_set_mac_address = slic_mac_set_address,
- .ndo_get_stats = slic_get_stats,
- .ndo_set_rx_mode = slic_mcast_set_list,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static u32 slic_card_locate(struct adapter *adapter)
-{
- struct sliccard *card = slic_global.slic_card;
- struct physcard *physcard = slic_global.phys_card;
- ushort card_hostid;
- uint i;
-
- card_hostid = slic_read32(adapter, SLIC_REG_HOSTID);
-
- /* Initialize a new card structure if need be */
- if (card_hostid == SLIC_HOSTID_DEFAULT) {
- card = kzalloc(sizeof(*card), GFP_KERNEL);
- if (!card)
- return -ENOMEM;
-
- card->next = slic_global.slic_card;
- slic_global.slic_card = card;
- card->busnumber = adapter->busnumber;
- card->slotnumber = adapter->slotnumber;
-
- /* Find an available cardnum */
- for (i = 0; i < SLIC_MAX_CARDS; i++) {
- if (slic_global.cardnuminuse[i] == 0) {
- slic_global.cardnuminuse[i] = 1;
- card->cardnum = i;
- break;
- }
- }
- slic_global.num_slic_cards++;
- } else {
- /* Card exists, find the card this adapter belongs to */
- while (card) {
- if (card->cardnum == card_hostid)
- break;
- card = card->next;
- }
- }
-
- if (!card)
- return -ENXIO;
- /* Put the adapter in the card's adapter list */
- if (!card->adapter[adapter->port]) {
- card->adapter[adapter->port] = adapter;
- adapter->card = card;
- }
-
- card->card_size = 1; /* one port per *logical* card */
-
- while (physcard) {
- for (i = 0; i < SLIC_MAX_PORTS; i++) {
- if (physcard->adapter[i])
- break;
- }
- if (i == SLIC_MAX_PORTS)
- break;
-
- if (physcard->adapter[i]->slotnumber == adapter->slotnumber)
- break;
- physcard = physcard->next;
- }
- if (!physcard) {
- /* no structure allocated for this physical card yet */
- physcard = kzalloc(sizeof(*physcard), GFP_ATOMIC);
- if (!physcard) {
- if (card_hostid == SLIC_HOSTID_DEFAULT)
- kfree(card);
- return -ENOMEM;
- }
-
- physcard->next = slic_global.phys_card;
- slic_global.phys_card = physcard;
- physcard->adapters_allocd = 1;
- } else {
- physcard->adapters_allocd++;
- }
- /* Note - this is ZERO relative */
- adapter->physport = physcard->adapters_allocd - 1;
-
- physcard->adapter[adapter->physport] = adapter;
- adapter->physcard = physcard;
-
- return 0;
-}
-
-static int slic_entry_probe(struct pci_dev *pcidev,
- const struct pci_device_id *pci_tbl_entry)
-{
- static int cards_found;
- static int did_version;
- int err = -ENODEV;
- struct net_device *netdev;
- struct adapter *adapter;
- void __iomem *memmapped_ioaddr = NULL;
- ulong mmio_start = 0;
- ulong mmio_len = 0;
- struct sliccard *card = NULL;
- int pci_using_dac = 0;
-
- err = pci_enable_device(pcidev);
-
- if (err)
- return err;
-
- if (did_version++ == 0) {
- dev_info(&pcidev->dev, "%s\n", slic_banner);
- dev_info(&pcidev->dev, "%s\n", slic_proc_version);
- }
-
- if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- err = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pcidev->dev, "unable to obtain 64-bit DMA for consistent allocations\n");
- goto err_out_disable_pci;
- }
- } else {
- err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pcidev->dev, "no usable DMA configuration\n");
- goto err_out_disable_pci;
- }
- pci_using_dac = 0;
- pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
- }
-
- err = pci_request_regions(pcidev, DRV_NAME);
- if (err) {
- dev_err(&pcidev->dev, "can't obtain PCI resources\n");
- goto err_out_disable_pci;
- }
-
- pci_set_master(pcidev);
-
- netdev = alloc_etherdev(sizeof(struct adapter));
- if (!netdev) {
- err = -ENOMEM;
- goto err_out_exit_slic_probe;
- }
-
- netdev->ethtool_ops = &slic_ethtool_ops;
- SET_NETDEV_DEV(netdev, &pcidev->dev);
-
- pci_set_drvdata(pcidev, netdev);
- adapter = netdev_priv(netdev);
- adapter->netdev = netdev;
- adapter->pcidev = pcidev;
- slic_global.dynamic_intagg = adapter->dynamic_intagg;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
-
- mmio_start = pci_resource_start(pcidev, 0);
- mmio_len = pci_resource_len(pcidev, 0);
-
- memmapped_ioaddr = ioremap_nocache(mmio_start, mmio_len);
- if (!memmapped_ioaddr) {
- dev_err(&pcidev->dev, "cannot remap MMIO region %lx @ %lx\n",
- mmio_len, mmio_start);
- err = -ENOMEM;
- goto err_out_free_netdev;
- }
-
- slic_config_pci(pcidev);
-
- slic_init_driver();
-
- err = slic_init_adapter(netdev, pcidev, pci_tbl_entry, memmapped_ioaddr,
- cards_found);
- if (err) {
- dev_err(&pcidev->dev, "failed to init adapter: %i\n", err);
- goto err_out_unmap;
- }
-
- err = slic_card_locate(adapter);
- if (err) {
- dev_err(&pcidev->dev, "cannot locate card\n");
- goto err_clean_init;
- }
-
- card = adapter->card;
-
- if (!adapter->allocated) {
- card->adapters_allocated++;
- adapter->allocated = 1;
- }
-
- err = slic_card_init(card, adapter);
- if (err)
- goto err_clean_init;
-
- slic_adapter_set_hwaddr(adapter);
-
- netdev->base_addr = (unsigned long)memmapped_ioaddr;
- netdev->irq = adapter->irq;
- netdev->netdev_ops = &slic_netdev_ops;
-
- netif_carrier_off(netdev);
-
- strcpy(netdev->name, "eth%d");
- err = register_netdev(netdev);
- if (err) {
- dev_err(&pcidev->dev, "Cannot register net device, aborting.\n");
- goto err_clean_init;
- }
-
- cards_found++;
-
- return 0;
-
-err_clean_init:
- slic_init_cleanup(adapter);
-err_out_unmap:
- iounmap(memmapped_ioaddr);
-err_out_free_netdev:
- free_netdev(netdev);
-err_out_exit_slic_probe:
- pci_release_regions(pcidev);
-err_out_disable_pci:
- pci_disable_device(pcidev);
- return err;
-}
-
-static struct pci_driver slic_driver = {
- .name = DRV_NAME,
- .id_table = slic_pci_tbl,
- .probe = slic_entry_probe,
- .remove = slic_entry_remove,
-};
-
-static int __init slic_module_init(void)
-{
- slic_init_driver();
-
- return pci_register_driver(&slic_driver);
-}
-
-static void __exit slic_module_cleanup(void)
-{
- pci_unregister_driver(&slic_driver);
-}
-
-static const struct ethtool_ops slic_ethtool_ops = {
- .get_coalesce = slic_get_coalesce,
- .set_coalesce = slic_set_coalesce
-};
-
-module_init(slic_module_init);
-module_exit(slic_module_cleanup);
diff --git a/drivers/staging/sm750fb/Makefile b/drivers/staging/sm750fb/Makefile
index dcce3f487ed5..4d781f78b95c 100644
--- a/drivers/staging/sm750fb/Makefile
+++ b/drivers/staging/sm750fb/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_FB_SM750) += sm750fb.o
sm750fb-objs := sm750.o sm750_hw.o sm750_accel.o sm750_cursor.o ddk750_chip.o ddk750_power.o ddk750_mode.o
-sm750fb-objs += ddk750_display.o ddk750_help.o ddk750_swi2c.o ddk750_sii164.o ddk750_dvi.o ddk750_hwi2c.o
+sm750fb-objs += ddk750_display.o ddk750_swi2c.o ddk750_sii164.o ddk750_dvi.o ddk750_hwi2c.o
diff --git a/drivers/staging/sm750fb/ddk750.h b/drivers/staging/sm750fb/ddk750.h
index 2c10a08ed964..734010324a8f 100644
--- a/drivers/staging/sm750fb/ddk750.h
+++ b/drivers/staging/sm750fb/ddk750.h
@@ -1,22 +1,21 @@
+/*
+ * Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
+ *
+ * All rights are reserved. Reproduction or in part is prohibited
+ * without the written consent of the copyright owner.
+ *
+ * RegSC.h --- SM718 SDK
+ * This file contains the definitions for the System Configuration registers.
+ */
+
#ifndef DDK750_H__
#define DDK750_H__
-/*******************************************************************
-*
-* Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
-*
-* All rights are reserved. Reproduction or in part is prohibited
-* without the written consent of the copyright owner.
-*
-* RegSC.h --- SM718 SDK
-* This file contains the definitions for the System Configuration registers.
-*
-*******************************************************************/
+
#include "ddk750_reg.h"
#include "ddk750_mode.h"
#include "ddk750_chip.h"
#include "ddk750_display.h"
#include "ddk750_power.h"
-#include "ddk750_help.h"
#ifdef USE_HW_I2C
#include "ddk750_hwi2c.h"
#endif
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 839d6730bde9..f59ce5c0867d 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -1,33 +1,32 @@
#include <linux/kernel.h>
#include <linux/sizes.h>
-#include "ddk750_help.h"
#include "ddk750_reg.h"
#include "ddk750_chip.h"
#include "ddk750_power.h"
#define MHz(x) ((x) * 1000000)
+static logical_chip_type_t chip;
+
logical_chip_type_t sm750_get_chip_type(void)
{
- unsigned short physicalID;
- char physicalRev;
- logical_chip_type_t chip;
-
- physicalID = devId750; /* either 0x718 or 0x750 */
- physicalRev = revId750;
+ return chip;
+}
- if (physicalID == 0x718)
+void sm750_set_chip_type(unsigned short devId, u8 revId)
+{
+ if (devId == 0x718)
chip = SM718;
- else if (physicalID == 0x750) {
+ else if (devId == 0x750) {
chip = SM750;
/* SM750 and SM750LE are different in their revision ID only. */
- if (physicalRev == SM750LE_REVISION_ID)
+ if (revId == SM750LE_REVISION_ID) {
chip = SM750LE;
+ pr_info("found sm750le\n");
+ }
} else
chip = SM_UNKNOWN;
-
- return chip;
}
static unsigned int get_mxclk_freq(void)
@@ -52,9 +51,9 @@ static unsigned int get_mxclk_freq(void)
*
* Input: Frequency to be set.
*/
-static void setChipClock(unsigned int frequency)
+static void set_chip_clock(unsigned int frequency)
{
- pll_value_t pll;
+ struct pll_value pll;
unsigned int ulActualMxClk;
/* Cheok_0509: For SM750LE, the chip clock is fixed. Nothing to set. */
@@ -63,29 +62,31 @@ static void setChipClock(unsigned int frequency)
if (frequency) {
/*
- * Set up PLL, a structure to hold the value to be set in clocks.
- */
+ * Set up PLL structure to hold the value to be set in clocks.
+ */
pll.inputFreq = DEFAULT_INPUT_CLOCK; /* Defined in CLOCK.H */
pll.clockType = MXCLK_PLL;
/*
- * Call calcPllValue() to fill the other fields of PLL structure.
- * Sometime, the chip cannot set up the exact clock
- * required by the User.
- * Return value of calcPllValue gives the actual possible clock.
- */
- ulActualMxClk = calcPllValue(frequency, &pll);
+ * Call sm750_calc_pll_value() to fill the other fields of the PLL
+ * structure. Sometimes, the chip cannot set up the exact
+ * clock required by the User.
+ * Return value of sm750_calc_pll_value gives the actual possible
+ * clock.
+ */
+ ulActualMxClk = sm750_calc_pll_value(frequency, &pll);
/* Master Clock Control: MXCLK_PLL */
- POKE32(MXCLK_PLL_CTRL, formatPllReg(&pll));
+ POKE32(MXCLK_PLL_CTRL, sm750_format_pll_reg(&pll));
}
}
-static void setMemoryClock(unsigned int frequency)
+static void set_memory_clock(unsigned int frequency)
{
unsigned int reg, divisor;
- /* Cheok_0509: For SM750LE, the memory clock is fixed.
+ /*
+ * Cheok_0509: For SM750LE, the memory clock is fixed.
* Nothing to set.
*/
if (sm750_get_chip_type() == SM750LE)
@@ -120,7 +121,7 @@ static void setMemoryClock(unsigned int frequency)
break;
}
- setCurrentGate(reg);
+ sm750_set_current_gate(reg);
}
}
@@ -132,18 +133,20 @@ static void setMemoryClock(unsigned int frequency)
* NOTE:
* The maximum frequency the engine can run is 168MHz.
*/
-static void setMasterClock(unsigned int frequency)
+static void set_master_clock(unsigned int frequency)
{
unsigned int reg, divisor;
- /* Cheok_0509: For SM750LE, the memory clock is fixed.
+ /*
+ * Cheok_0509: For SM750LE, the memory clock is fixed.
* Nothing to set.
*/
if (sm750_get_chip_type() == SM750LE)
return;
if (frequency) {
- /* Set the frequency to the maximum frequency
+ /*
+ * Set the frequency to the maximum frequency
* that the SM750 engine can run, which is about 190 MHz.
*/
if (frequency > MHz(190))
@@ -170,11 +173,11 @@ static void setMasterClock(unsigned int frequency)
break;
}
- setCurrentGate(reg);
+ sm750_set_current_gate(reg);
}
}
-unsigned int ddk750_getVMSize(void)
+unsigned int ddk750_get_vm_size(void)
{
unsigned int reg;
unsigned int data;
@@ -206,18 +209,18 @@ unsigned int ddk750_getVMSize(void)
return data;
}
-int ddk750_initHw(initchip_param_t *pInitParam)
+int ddk750_init_hw(struct initchip_param *pInitParam)
{
unsigned int reg;
if (pInitParam->powerMode != 0)
pInitParam->powerMode = 0;
- setPowerMode(pInitParam->powerMode);
+ sm750_set_power_mode(pInitParam->powerMode);
/* Enable display power gate & LOCALMEM power gate*/
reg = PEEK32(CURRENT_GATE);
reg |= (CURRENT_GATE_DISPLAY | CURRENT_GATE_LOCALMEM);
- setCurrentGate(reg);
+ sm750_set_current_gate(reg);
if (sm750_get_chip_type() != SM750LE) {
/* set panel pll and graphic mode via mmio_88 */
@@ -233,16 +236,17 @@ int ddk750_initHw(initchip_param_t *pInitParam)
}
/* Set the Main Chip Clock */
- setChipClock(MHz((unsigned int)pInitParam->chipClock));
+ set_chip_clock(MHz((unsigned int)pInitParam->chipClock));
/* Set up memory clock. */
- setMemoryClock(MHz(pInitParam->memClock));
+ set_memory_clock(MHz(pInitParam->memClock));
/* Set up master clock */
- setMasterClock(MHz(pInitParam->masterClock));
+ set_master_clock(MHz(pInitParam->masterClock));
- /* Reset the memory controller.
+ /*
+ * Reset the memory controller.
* If the memory controller is not reset in SM750,
* the system might hang when sw accesses the memory.
* The memory should be resetted after changing the MXCLK.
@@ -257,7 +261,7 @@ int ddk750_initHw(initchip_param_t *pInitParam)
}
if (pInitParam->setAllEngOff == 1) {
- enable2DEngine(0);
+ sm750_enable_2d_engine(0);
/* Disable Overlay, if a former application left it on */
reg = PEEK32(VIDEO_DISPLAY_CTRL);
@@ -280,7 +284,7 @@ int ddk750_initHw(initchip_param_t *pInitParam)
POKE32(DMA_ABORT_INTERRUPT, reg);
/* Disable DMA Power, if a former application left it on */
- enableDMA(0);
+ sm750_enable_dma(0);
}
/* We can add more initialization as needed. */
@@ -305,9 +309,10 @@ int ddk750_initHw(initchip_param_t *pInitParam)
* M = {1,...,255}
* N = {2,...,15}
*/
-unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
+unsigned int sm750_calc_pll_value(unsigned int request_orig, struct pll_value *pll)
{
- /* as sm750 register definition,
+ /*
+ * as sm750 register definition,
* N located in 2,15 and M located in 1,255
*/
int N, M, X, d;
@@ -319,7 +324,8 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
int max_d = 6;
if (sm750_get_chip_type() == SM750LE) {
- /* SM750LE don't have
+ /*
+ * SM750LE don't have
* programmable PLL and M/N values to work on.
* Just return the requested clock.
*/
@@ -331,14 +337,16 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
request = request_orig / 1000;
input = pll->inputFreq / 1000;
- /* for MXCLK register,
+ /*
+ * for MXCLK register,
* no POD provided, so need be treated differently
*/
if (pll->clockType == MXCLK_PLL)
max_d = 3;
for (N = 15; N > 1; N--) {
- /* RN will not exceed maximum long
+ /*
+ * RN will not exceed maximum long
* if @request <= 285 MHZ (for 32bit cpu)
*/
RN = N * request;
@@ -373,7 +381,7 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
return ret;
}
-unsigned int formatPllReg(pll_value_t *pPLL)
+unsigned int sm750_format_pll_reg(struct pll_value *pPLL)
{
#ifndef VALIDATION_CHIP
unsigned int POD = pPLL->POD;
diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h
index 14357fd1cc6b..e63b8b293816 100644
--- a/drivers/staging/sm750fb/ddk750_chip.h
+++ b/drivers/staging/sm750fb/ddk750_chip.h
@@ -6,6 +6,14 @@
#endif
#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/uaccess.h>
+
+/* software control endianness */
+#define PEEK32(addr) readl(addr + mmio750)
+#define POKE32(addr, data) writel(data, addr + mmio750)
+
+extern void __iomem *mmio750;
/* This is all the chips recognized by this library */
typedef enum _logical_chip_type_t {
@@ -25,7 +33,7 @@ typedef enum _clock_type_t {
}
clock_type_t;
-typedef struct _pll_value_t {
+struct pll_value {
clock_type_t clockType;
unsigned long inputFreq; /* Input clock frequency to the PLL */
@@ -34,46 +42,55 @@ typedef struct _pll_value_t {
unsigned long N;
unsigned long OD;
unsigned long POD;
-}
-pll_value_t;
+};
/* input struct to initChipParam() function */
-typedef struct _initchip_param_t {
- unsigned short powerMode; /* Use power mode 0 or 1 */
- unsigned short chipClock; /**
- * Speed of main chip clock in MHz unit
- * 0 = keep the current clock setting
- * Others = the new main chip clock
- */
- unsigned short memClock; /**
- * Speed of memory clock in MHz unit
- * 0 = keep the current clock setting
- * Others = the new memory clock
- */
- unsigned short masterClock; /**
- * Speed of master clock in MHz unit
- * 0 = keep the current clock setting
- * Others = the new master clock
- */
- unsigned short setAllEngOff; /**
- * 0 = leave all engine state untouched.
- * 1 = make sure they are off: 2D, Overlay,
- * video alpha, alpha, hardware cursors
- */
- unsigned char resetMemory; /**
- * 0 = Do not reset the memory controller
- * 1 = Reset the memory controller
- */
+struct initchip_param {
+ /* Use power mode 0 or 1 */
+ unsigned short powerMode;
+
+ /*
+ * Speed of main chip clock in MHz unit
+ * 0 = keep the current clock setting
+ * Others = the new main chip clock
+ */
+ unsigned short chipClock;
+
+ /*
+ * Speed of memory clock in MHz unit
+ * 0 = keep the current clock setting
+ * Others = the new memory clock
+ */
+ unsigned short memClock;
+
+ /*
+ * Speed of master clock in MHz unit
+ * 0 = keep the current clock setting
+ * Others = the new master clock
+ */
+ unsigned short masterClock;
+
+ /*
+ * 0 = leave all engine state untouched.
+ * 1 = make sure they are off: 2D, Overlay,
+ * video alpha, alpha, hardware cursors
+ */
+ unsigned short setAllEngOff;
+
+ /*
+ * 0 = Do not reset the memory controller
+ * 1 = Reset the memory controller
+ */
+ unsigned char resetMemory;
/* More initialization parameter can be added if needed */
-}
-initchip_param_t;
+};
logical_chip_type_t sm750_get_chip_type(void);
-unsigned int calcPllValue(unsigned int request, pll_value_t *pll);
-unsigned int formatPllReg(pll_value_t *pPLL);
-void ddk750_set_mmio(void __iomem *, unsigned short, char);
-unsigned int ddk750_getVMSize(void);
-int ddk750_initHw(initchip_param_t *);
+void sm750_set_chip_type(unsigned short devId, u8 revId);
+unsigned int sm750_calc_pll_value(unsigned int request, struct pll_value *pll);
+unsigned int sm750_format_pll_reg(struct pll_value *pPLL);
+unsigned int ddk750_get_vm_size(void);
+int ddk750_init_hw(struct initchip_param *);
#endif
diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c
index 4023c476b9e4..c347803f7e19 100644
--- a/drivers/staging/sm750fb/ddk750_display.c
+++ b/drivers/staging/sm750fb/ddk750_display.c
@@ -1,11 +1,9 @@
#include "ddk750_reg.h"
-#include "ddk750_help.h"
+#include "ddk750_chip.h"
#include "ddk750_display.h"
#include "ddk750_power.h"
#include "ddk750_dvi.h"
-#define primaryWaitVerticalSync(delay) waitNextVerticalSync(0, delay)
-
static void setDisplayControl(int ctrl, int disp_state)
{
/* state != 0 means turn on both timing & plane en_bit */
@@ -61,55 +59,28 @@ static void setDisplayControl(int ctrl, int disp_state)
}
}
-static void waitNextVerticalSync(int ctrl, int delay)
+static void primary_wait_vertical_sync(int delay)
{
unsigned int status;
- if (!ctrl) {
- /* primary controller */
+ /*
+ * Do not wait when the Primary PLL is off or display control is
+ * already off. This will prevent the software to wait forever.
+ */
+ if (!(PEEK32(PANEL_PLL_CTRL) & PLL_CTRL_POWER) ||
+ !(PEEK32(PANEL_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING))
+ return;
- /*
- * Do not wait when the Primary PLL is off or display control is
- * already off. This will prevent the software to wait forever.
- */
- if (!(PEEK32(PANEL_PLL_CTRL) & PLL_CTRL_POWER) ||
- !(PEEK32(PANEL_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) {
- return;
- }
-
- while (delay-- > 0) {
- /* Wait for end of vsync. */
- do {
- status = PEEK32(SYSTEM_CTRL);
- } while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
-
- /* Wait for start of vsync. */
- do {
- status = PEEK32(SYSTEM_CTRL);
- } while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE));
- }
+ while (delay-- > 0) {
+ /* Wait for end of vsync. */
+ do {
+ status = PEEK32(SYSTEM_CTRL);
+ } while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
- } else {
- /*
- * Do not wait when the Primary PLL is off or display control is
- * already off. This will prevent the software to wait forever.
- */
- if (!(PEEK32(CRT_PLL_CTRL) & PLL_CTRL_POWER) ||
- !(PEEK32(CRT_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) {
- return;
- }
-
- while (delay-- > 0) {
- /* Wait for end of vsync. */
- do {
- status = PEEK32(SYSTEM_CTRL);
- } while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
-
- /* Wait for start of vsync. */
- do {
- status = PEEK32(SYSTEM_CTRL);
- } while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE));
- }
+ /* Wait for start of vsync. */
+ do {
+ status = PEEK32(SYSTEM_CTRL);
+ } while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE));
}
}
@@ -121,22 +92,22 @@ static void swPanelPowerSequence(int disp, int delay)
reg = PEEK32(PANEL_DISPLAY_CTRL);
reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0);
POKE32(PANEL_DISPLAY_CTRL, reg);
- primaryWaitVerticalSync(delay);
+ primary_wait_vertical_sync(delay);
reg = PEEK32(PANEL_DISPLAY_CTRL);
reg |= (disp ? PANEL_DISPLAY_CTRL_DATA : 0);
POKE32(PANEL_DISPLAY_CTRL, reg);
- primaryWaitVerticalSync(delay);
+ primary_wait_vertical_sync(delay);
reg = PEEK32(PANEL_DISPLAY_CTRL);
reg |= (disp ? PANEL_DISPLAY_CTRL_VBIASEN : 0);
POKE32(PANEL_DISPLAY_CTRL, reg);
- primaryWaitVerticalSync(delay);
+ primary_wait_vertical_sync(delay);
reg = PEEK32(PANEL_DISPLAY_CTRL);
reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0);
POKE32(PANEL_DISPLAY_CTRL, reg);
- primaryWaitVerticalSync(delay);
+ primary_wait_vertical_sync(delay);
}
void ddk750_setLogicalDispOut(disp_output_t output)
@@ -182,5 +153,5 @@ void ddk750_setLogicalDispOut(disp_output_t output)
setDAC((output & DAC_MASK) >> DAC_OFFSET);
if (output & DPMS_USAGE)
- ddk750_setDPMS((output & DPMS_MASK) >> DPMS_OFFSET);
+ ddk750_set_dpms((output & DPMS_MASK) >> DPMS_OFFSET);
}
diff --git a/drivers/staging/sm750fb/ddk750_display.h b/drivers/staging/sm750fb/ddk750_display.h
index e3fde428c52b..8abca88f089e 100644
--- a/drivers/staging/sm750fb/ddk750_display.h
+++ b/drivers/staging/sm750fb/ddk750_display.h
@@ -1,7 +1,8 @@
#ifndef DDK750_DISPLAY_H__
#define DDK750_DISPLAY_H__
-/* panel path select
+/*
+ * panel path select
* 80000[29:28]
*/
@@ -12,7 +13,8 @@
#define PNL_2_SEC ((2 << PNL_2_OFFSET) | PNL_2_USAGE)
-/* primary timing & plane enable bit
+/*
+ * primary timing & plane enable bit
* 1: 80000[8] & 80000[2] on
* 0: both off
*/
@@ -23,7 +25,8 @@
#define PRI_TP_OFF ((0x0 << PRI_TP_OFFSET) | PRI_TP_USAGE)
-/* panel sequency status
+/*
+ * panel sequency status
* 80000[27:24]
*/
#define PNL_SEQ_OFFSET 6
@@ -32,7 +35,8 @@
#define PNL_SEQ_ON (BIT(PNL_SEQ_OFFSET) | PNL_SEQ_USAGE)
#define PNL_SEQ_OFF ((0 << PNL_SEQ_OFFSET) | PNL_SEQ_USAGE)
-/* dual digital output
+/*
+ * dual digital output
* 80000[19]
*/
#define DUAL_TFT_OFFSET 8
@@ -41,7 +45,8 @@
#define DUAL_TFT_ON (BIT(DUAL_TFT_OFFSET) | DUAL_TFT_USAGE)
#define DUAL_TFT_OFF ((0 << DUAL_TFT_OFFSET) | DUAL_TFT_USAGE)
-/* secondary timing & plane enable bit
+/*
+ * secondary timing & plane enable bit
* 1:80200[8] & 80200[2] on
* 0: both off
*/
@@ -51,7 +56,8 @@
#define SEC_TP_ON ((0x1 << SEC_TP_OFFSET) | SEC_TP_USAGE)
#define SEC_TP_OFF ((0x0 << SEC_TP_OFFSET) | SEC_TP_USAGE)
-/* crt path select
+/*
+ * crt path select
* 80200[19:18]
*/
#define CRT_2_OFFSET 2
@@ -61,7 +67,8 @@
#define CRT_2_SEC ((0x2 << CRT_2_OFFSET) | CRT_2_USAGE)
-/* DAC affect both DVI and DSUB
+/*
+ * DAC affect both DVI and DSUB
* 4[20]
*/
#define DAC_OFFSET 7
@@ -70,7 +77,8 @@
#define DAC_ON ((0x0 << DAC_OFFSET) | DAC_USAGE)
#define DAC_OFF ((0x1 << DAC_OFFSET) | DAC_USAGE)
-/* DPMS only affect D-SUB head
+/*
+ * DPMS only affect D-SUB head
* 0[31:30]
*/
#define DPMS_OFFSET 9
@@ -81,7 +89,8 @@
-/* LCD1 means panel path TFT1 & panel path DVI (so enable DAC)
+/*
+ * LCD1 means panel path TFT1 & panel path DVI (so enable DAC)
* CRT means crt path DSUB
*/
typedef enum _disp_output_t {
@@ -89,7 +98,8 @@ typedef enum _disp_output_t {
do_LCD1_SEC = PNL_2_SEC | SEC_TP_ON | PNL_SEQ_ON | DAC_ON,
do_LCD2_PRI = CRT_2_PRI | PRI_TP_ON | DUAL_TFT_ON,
do_LCD2_SEC = CRT_2_SEC | SEC_TP_ON | DUAL_TFT_ON,
- /* do_DSUB_PRI = CRT_2_PRI | PRI_TP_ON | DPMS_ON|DAC_ON,
+ /*
+ * do_DSUB_PRI = CRT_2_PRI | PRI_TP_ON | DPMS_ON|DAC_ON,
* do_DSUB_SEC = CRT_2_SEC | SEC_TP_ON | DPMS_ON|DAC_ON,
*/
do_CRT_PRI = CRT_2_PRI | PRI_TP_ON | DPMS_ON | DAC_ON,
diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c
index 8252f771ef9e..250c2f478778 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.c
+++ b/drivers/staging/sm750fb/ddk750_dvi.c
@@ -1,6 +1,6 @@
#define USE_DVICHIP
#ifdef USE_DVICHIP
-#include "ddk750_help.h"
+#include "ddk750_chip.h"
#include "ddk750_reg.h"
#include "ddk750_dvi.h"
#include "ddk750_sii164.h"
diff --git a/drivers/staging/sm750fb/ddk750_help.c b/drivers/staging/sm750fb/ddk750_help.c
deleted file mode 100644
index 9637dd30d037..000000000000
--- a/drivers/staging/sm750fb/ddk750_help.c
+++ /dev/null
@@ -1,17 +0,0 @@
-#include "ddk750_help.h"
-
-void __iomem *mmio750;
-char revId750;
-unsigned short devId750;
-
-/* after driver mapped io registers, use this function first */
-void ddk750_set_mmio(void __iomem *addr, unsigned short devId, char revId)
-{
- mmio750 = addr;
- devId750 = devId;
- revId750 = revId;
- if (revId == 0xfe)
- printk("found sm750le\n");
-}
-
-
diff --git a/drivers/staging/sm750fb/ddk750_help.h b/drivers/staging/sm750fb/ddk750_help.h
deleted file mode 100644
index 009db9213a73..000000000000
--- a/drivers/staging/sm750fb/ddk750_help.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef DDK750_HELP_H__
-#define DDK750_HELP_H__
-#include "ddk750_chip.h"
-#ifndef USE_INTERNAL_REGISTER_ACCESS
-
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-
-/* software control endianness */
-#define PEEK32(addr) readl(addr + mmio750)
-#define POKE32(addr, data) writel(data, addr + mmio750)
-
-extern void __iomem *mmio750;
-extern char revId750;
-extern unsigned short devId750;
-#else
-/* implement if you want use it*/
-#endif
-
-#endif
diff --git a/drivers/staging/sm750fb/ddk750_hwi2c.c b/drivers/staging/sm750fb/ddk750_hwi2c.c
index d391c127ead7..05d4a73aa1d4 100644
--- a/drivers/staging/sm750fb/ddk750_hwi2c.c
+++ b/drivers/staging/sm750fb/ddk750_hwi2c.c
@@ -1,6 +1,6 @@
#define USE_HW_I2C
#ifdef USE_HW_I2C
-#include "ddk750_help.h"
+#include "ddk750_chip.h"
#include "ddk750_reg.h"
#include "ddk750_hwi2c.h"
#include "ddk750_power.h"
@@ -20,10 +20,11 @@ unsigned char bus_speed_mode
value |= (GPIO_MUX_30 | GPIO_MUX_31);
POKE32(GPIO_MUX, value);
- /* Enable Hardware I2C power.
+ /*
+ * Enable Hardware I2C power.
* TODO: Check if we need to enable GPIO power?
*/
- enableI2C(1);
+ sm750_enable_i2c(1);
/* Enable the I2C Controller and set the bus speed mode */
value = PEEK32(I2C_CTRL) & ~(I2C_CTRL_MODE | I2C_CTRL_EN);
@@ -44,7 +45,7 @@ void sm750_hw_i2c_close(void)
POKE32(I2C_CTRL, value);
/* Disable I2C Power */
- enableI2C(0);
+ sm750_enable_i2c(0);
/* Set GPIO 30 & 31 back as GPIO pins */
value = PEEK32(GPIO_MUX);
@@ -92,7 +93,8 @@ static unsigned int hw_i2c_write_data(
/* Set the Device Address */
POKE32(I2C_SLAVE_ADDRESS, addr & ~0x01);
- /* Write data.
+ /*
+ * Write data.
* Note:
* Only 16 byte can be accessed per i2c start instruction.
*/
@@ -158,7 +160,8 @@ static unsigned int hw_i2c_read_data(
/* Set the Device Address */
POKE32(I2C_SLAVE_ADDRESS, addr | 0x01);
- /* Read data and save them to the buffer.
+ /*
+ * Read data and save them to the buffer.
* Note:
* Only 16 byte can be accessed per i2c start instruction.
*/
diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c
index 05b83646c2d5..4a4b1de97a87 100644
--- a/drivers/staging/sm750fb/ddk750_mode.c
+++ b/drivers/staging/sm750fb/ddk750_mode.c
@@ -1,10 +1,10 @@
-#include "ddk750_help.h"
#include "ddk750_reg.h"
#include "ddk750_mode.h"
#include "ddk750_chip.h"
-/* SM750LE only:
+/*
+ * SM750LE only:
* This function takes care extra registers and bit fields required to set
* up a mode in SM750LE
*
@@ -19,7 +19,8 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
x = pModeParam->horizontal_display_end;
y = pModeParam->vertical_display_end;
- /* SM750LE has to set up the top-left and bottom-right
+ /*
+ * SM750LE has to set up the top-left and bottom-right
* registers as well.
* Note that normal SM750/SM718 only use those two register for
* auto-centering mode.
@@ -31,7 +32,8 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
CRT_AUTO_CENTERING_BR_BOTTOM_MASK) |
((x - 1) & CRT_AUTO_CENTERING_BR_RIGHT_MASK));
- /* Assume common fields in dispControl have been properly set before
+ /*
+ * Assume common fields in dispControl have been properly set before
* calling this function.
* This function only sets the extra fields in dispControl.
*/
@@ -72,7 +74,8 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
/* only timing related registers will be programed */
-static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
+static int programModeRegisters(mode_parameter_t *pModeParam,
+ struct pll_value *pll)
{
int ret = 0;
int cnt = 0;
@@ -80,7 +83,7 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
if (pll->clockType == SECONDARY_PLL) {
/* programe secondary pixel clock */
- POKE32(CRT_PLL_CTRL, formatPllReg(pll));
+ POKE32(CRT_PLL_CTRL, sm750_format_pll_reg(pll));
POKE32(CRT_HORIZONTAL_TOTAL,
(((pModeParam->horizontal_total - 1) <<
CRT_HORIZONTAL_TOTAL_TOTAL_SHIFT) &
@@ -130,7 +133,7 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
} else if (pll->clockType == PRIMARY_PLL) {
unsigned int reserved;
- POKE32(PANEL_PLL_CTRL, formatPllReg(pll));
+ POKE32(PANEL_PLL_CTRL, sm750_format_pll_reg(pll));
reg = ((pModeParam->horizontal_total - 1) <<
PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT) &
@@ -176,14 +179,14 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
DISPLAY_CTRL_HSYNC_PHASE | DISPLAY_CTRL_TIMING |
DISPLAY_CTRL_PLANE);
- /* May a hardware bug or just my test chip (not confirmed).
- * PANEL_DISPLAY_CTRL register seems requiring few writes
- * before a value can be successfully written in.
- * Added some masks to mask out the reserved bits.
- * Note: This problem happens by design. The hardware will wait for the
- * next vertical sync to turn on/off the plane.
- */
-
+ /*
+ * May a hardware bug or just my test chip (not confirmed).
+ * PANEL_DISPLAY_CTRL register seems requiring few writes
+ * before a value can be successfully written in.
+ * Added some masks to mask out the reserved bits.
+ * Note: This problem happens by design. The hardware will wait
+ * for the next vertical sync to turn on/off the plane.
+ */
POKE32(PANEL_DISPLAY_CTRL, tmp | reg);
while ((PEEK32(PANEL_DISPLAY_CTRL) & ~reserved) !=
@@ -201,13 +204,13 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
int ddk750_setModeTiming(mode_parameter_t *parm, clock_type_t clock)
{
- pll_value_t pll;
+ struct pll_value pll;
unsigned int uiActualPixelClk;
pll.inputFreq = DEFAULT_INPUT_CLOCK;
pll.clockType = clock;
- uiActualPixelClk = calcPllValue(parm->pixel_clock, &pll);
+ uiActualPixelClk = sm750_calc_pll_value(parm->pixel_clock, &pll);
if (sm750_get_chip_type() == SM750LE) {
/* set graphic mode via IO method */
outb_p(0x88, 0x3d4);
diff --git a/drivers/staging/sm750fb/ddk750_power.c b/drivers/staging/sm750fb/ddk750_power.c
index 7cc6169f884e..6167e30e8e01 100644
--- a/drivers/staging/sm750fb/ddk750_power.c
+++ b/drivers/staging/sm750fb/ddk750_power.c
@@ -1,8 +1,8 @@
-#include "ddk750_help.h"
+#include "ddk750_chip.h"
#include "ddk750_reg.h"
#include "ddk750_power.h"
-void ddk750_setDPMS(DPMS_t state)
+void ddk750_set_dpms(DPMS_t state)
{
unsigned int value;
@@ -17,7 +17,7 @@ void ddk750_setDPMS(DPMS_t state)
}
}
-static unsigned int getPowerMode(void)
+static unsigned int get_power_mode(void)
{
if (sm750_get_chip_type() == SM750LE)
return 0;
@@ -29,26 +29,26 @@ static unsigned int getPowerMode(void)
* SM50x can operate in one of three modes: 0, 1 or Sleep.
* On hardware reset, power mode 0 is default.
*/
-void setPowerMode(unsigned int powerMode)
+void sm750_set_power_mode(unsigned int mode)
{
- unsigned int control_value = 0;
+ unsigned int ctrl = 0;
- control_value = PEEK32(POWER_MODE_CTRL) & ~POWER_MODE_CTRL_MODE_MASK;
+ ctrl = PEEK32(POWER_MODE_CTRL) & ~POWER_MODE_CTRL_MODE_MASK;
if (sm750_get_chip_type() == SM750LE)
return;
- switch (powerMode) {
+ switch (mode) {
case POWER_MODE_CTRL_MODE_MODE0:
- control_value |= POWER_MODE_CTRL_MODE_MODE0;
+ ctrl |= POWER_MODE_CTRL_MODE_MODE0;
break;
case POWER_MODE_CTRL_MODE_MODE1:
- control_value |= POWER_MODE_CTRL_MODE_MODE1;
+ ctrl |= POWER_MODE_CTRL_MODE_MODE1;
break;
case POWER_MODE_CTRL_MODE_SLEEP:
- control_value |= POWER_MODE_CTRL_MODE_SLEEP;
+ ctrl |= POWER_MODE_CTRL_MODE_SLEEP;
break;
default:
@@ -56,44 +56,28 @@ void setPowerMode(unsigned int powerMode)
}
/* Set up other fields in Power Control Register */
- if (powerMode == POWER_MODE_CTRL_MODE_SLEEP) {
- control_value &= ~POWER_MODE_CTRL_OSC_INPUT;
+ if (mode == POWER_MODE_CTRL_MODE_SLEEP) {
+ ctrl &= ~POWER_MODE_CTRL_OSC_INPUT;
#ifdef VALIDATION_CHIP
- control_value &= ~POWER_MODE_CTRL_336CLK;
+ ctrl &= ~POWER_MODE_CTRL_336CLK;
#endif
} else {
- control_value |= POWER_MODE_CTRL_OSC_INPUT;
+ ctrl |= POWER_MODE_CTRL_OSC_INPUT;
#ifdef VALIDATION_CHIP
- control_value |= POWER_MODE_CTRL_336CLK;
+ ctrl |= POWER_MODE_CTRL_336CLK;
#endif
}
/* Program new power mode. */
- POKE32(POWER_MODE_CTRL, control_value);
+ POKE32(POWER_MODE_CTRL, ctrl);
}
-void setCurrentGate(unsigned int gate)
+void sm750_set_current_gate(unsigned int gate)
{
- unsigned int gate_reg;
- unsigned int mode;
-
- /* Get current power mode. */
- mode = getPowerMode();
-
- switch (mode) {
- case POWER_MODE_CTRL_MODE_MODE0:
- gate_reg = MODE0_GATE;
- break;
-
- case POWER_MODE_CTRL_MODE_MODE1:
- gate_reg = MODE1_GATE;
- break;
-
- default:
- gate_reg = MODE0_GATE;
- break;
- }
- POKE32(gate_reg, gate);
+ if (get_power_mode() == POWER_MODE_CTRL_MODE_MODE1)
+ POKE32(MODE1_GATE, gate);
+ else
+ POKE32(MODE0_GATE, gate);
}
@@ -101,7 +85,7 @@ void setCurrentGate(unsigned int gate)
/*
* This function enable/disable the 2D engine.
*/
-void enable2DEngine(unsigned int enable)
+void sm750_enable_2d_engine(unsigned int enable)
{
u32 gate;
@@ -111,10 +95,10 @@ void enable2DEngine(unsigned int enable)
else
gate &= ~(CURRENT_GATE_DE | CURRENT_GATE_CSC);
- setCurrentGate(gate);
+ sm750_set_current_gate(gate);
}
-void enableDMA(unsigned int enable)
+void sm750_enable_dma(unsigned int enable)
{
u32 gate;
@@ -125,13 +109,13 @@ void enableDMA(unsigned int enable)
else
gate &= ~CURRENT_GATE_DMA;
- setCurrentGate(gate);
+ sm750_set_current_gate(gate);
}
/*
* This function enable/disable the GPIO Engine
*/
-void enableGPIO(unsigned int enable)
+void sm750_enable_gpio(unsigned int enable)
{
u32 gate;
@@ -142,13 +126,13 @@ void enableGPIO(unsigned int enable)
else
gate &= ~CURRENT_GATE_GPIO;
- setCurrentGate(gate);
+ sm750_set_current_gate(gate);
}
/*
* This function enable/disable the I2C Engine
*/
-void enableI2C(unsigned int enable)
+void sm750_enable_i2c(unsigned int enable)
{
u32 gate;
@@ -159,7 +143,7 @@ void enableI2C(unsigned int enable)
else
gate &= ~CURRENT_GATE_I2C;
- setCurrentGate(gate);
+ sm750_set_current_gate(gate);
}
diff --git a/drivers/staging/sm750fb/ddk750_power.h b/drivers/staging/sm750fb/ddk750_power.h
index 5963691f9a68..eb088b0d805f 100644
--- a/drivers/staging/sm750fb/ddk750_power.h
+++ b/drivers/staging/sm750fb/ddk750_power.h
@@ -14,37 +14,29 @@ DPMS_t;
(PEEK32(MISC_CTRL) & ~MISC_CTRL_DAC_POWER_OFF) | (off)); \
}
-void ddk750_setDPMS(DPMS_t);
-
-/*
- * This function sets the current power mode
- */
-void setPowerMode(unsigned int powerMode);
-
-/*
- * This function sets current gate
- */
-void setCurrentGate(unsigned int gate);
+void ddk750_set_dpms(DPMS_t);
+void sm750_set_power_mode(unsigned int powerMode);
+void sm750_set_current_gate(unsigned int gate);
/*
* This function enable/disable the 2D engine.
*/
-void enable2DEngine(unsigned int enable);
+void sm750_enable_2d_engine(unsigned int enable);
/*
* This function enable/disable the DMA Engine
*/
-void enableDMA(unsigned int enable);
+void sm750_enable_dma(unsigned int enable);
/*
* This function enable/disable the GPIO Engine
*/
-void enableGPIO(unsigned int enable);
+void sm750_enable_gpio(unsigned int enable);
/*
* This function enable/disable the I2C Engine
*/
-void enableI2C(unsigned int enable);
+void sm750_enable_i2c(unsigned int enable);
#endif
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index 99a8683e6383..259006ace219 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -173,7 +173,8 @@ long sii164InitChip(
i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
- /* De-skew enabled with default 111b value.
+ /*
+ * De-skew enabled with default 111b value.
* This fixes some artifacts problem in some mode on board 2.2.
* Somehow this fix does not affect board 2.1.
*/
diff --git a/drivers/staging/sm750fb/ddk750_swi2c.c b/drivers/staging/sm750fb/ddk750_swi2c.c
index 72a42330e7a1..b8a4e44359af 100644
--- a/drivers/staging/sm750fb/ddk750_swi2c.c
+++ b/drivers/staging/sm750fb/ddk750_swi2c.c
@@ -1,21 +1,20 @@
-/*******************************************************************
-*
-* Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
-*
-* All rights are reserved. Reproduction or in part is prohibited
-* without the written consent of the copyright owner.
-*
-* swi2c.c --- SM750/SM718 DDK
-* This file contains the source code for I2C using software
-* implementation.
-*
-*******************************************************************/
-#include "ddk750_help.h"
+/*
+ * Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
+ *
+ * All rights are reserved. Reproduction or in part is prohibited
+ * without the written consent of the copyright owner.
+ *
+ * swi2c.c --- SM750/SM718 DDK
+ * This file contains the source code for I2C using software
+ * implementation.
+ */
+
+#include "ddk750_chip.h"
#include "ddk750_reg.h"
#include "ddk750_swi2c.h"
#include "ddk750_power.h"
-/*******************************************************************
+/*
* I2C Software Master Driver:
* ===========================
* Each i2c cycle is split into 4 sections. Each of these section marks
@@ -51,7 +50,7 @@
* SCL | L | | H | |
* ---------------+---+---+---+---+
*
- ******************************************************************/
+ */
/* GPIO pins used for this I2C. It ranges from 0 to 63. */
static unsigned char sw_i2c_clk_gpio = DEFAULT_I2C_SCL;
@@ -429,7 +428,7 @@ long sm750_sw_i2c_init(
PEEK32(sw_i2c_data_gpio_mux_reg) & ~(1 << sw_i2c_data_gpio));
/* Enable GPIO power */
- enableGPIO(1);
+ sm750_enable_gpio(1);
/* Clear the i2c lines. */
for (i = 0; i < 9; i++)
diff --git a/drivers/staging/sm750fb/ddk750_swi2c.h b/drivers/staging/sm750fb/ddk750_swi2c.h
index b53629cda095..5a9466efc7bd 100644
--- a/drivers/staging/sm750fb/ddk750_swi2c.h
+++ b/drivers/staging/sm750fb/ddk750_swi2c.h
@@ -1,15 +1,15 @@
-/*******************************************************************
-*
-* Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
-*
-* All rights are reserved. Reproduction or in part is prohibited
-* without the written consent of the copyright owner.
-*
-* swi2c.h --- SM750/SM718 DDK
-* This file contains the definitions for i2c using software
-* implementation.
-*
-*******************************************************************/
+/*
+ * Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
+ *
+ * All rights are reserved. Reproduction or in part is prohibited
+ * without the written consent of the copyright owner.
+ *
+ * swi2c.h --- SM750/SM718 DDK
+ * This file contains the definitions for i2c using software
+ * implementation.
+ *
+ */
+
#ifndef _SWI2C_H_
#define _SWI2C_H_
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 7d90e250142c..e9632f162f99 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -118,14 +118,14 @@ static int lynxfb_ops_cursor(struct fb_info *info, struct fb_cursor *fbcursor)
return -ENXIO;
}
- hw_cursor_disable(cursor);
+ sm750_hw_cursor_disable(cursor);
if (fbcursor->set & FB_CUR_SETSIZE)
- hw_cursor_setSize(cursor,
+ sm750_hw_cursor_setSize(cursor,
fbcursor->image.width,
fbcursor->image.height);
if (fbcursor->set & FB_CUR_SETPOS)
- hw_cursor_setPos(cursor,
+ sm750_hw_cursor_setPos(cursor,
fbcursor->image.dx - info->var.xoffset,
fbcursor->image.dy - info->var.yoffset);
@@ -141,18 +141,18 @@ static int lynxfb_ops_cursor(struct fb_info *info, struct fb_cursor *fbcursor)
((info->cmap.green[fbcursor->image.bg_color] & 0xfc00) >> 5) |
((info->cmap.blue[fbcursor->image.bg_color] & 0xf800) >> 11);
- hw_cursor_setColor(cursor, fg, bg);
+ sm750_hw_cursor_setColor(cursor, fg, bg);
}
if (fbcursor->set & (FB_CUR_SETSHAPE | FB_CUR_SETIMAGE)) {
- hw_cursor_setData(cursor,
+ sm750_hw_cursor_setData(cursor,
fbcursor->rop,
fbcursor->image.data,
fbcursor->mask);
}
if (fbcursor->enable)
- hw_cursor_enable(cursor);
+ sm750_hw_cursor_enable(cursor);
return 0;
}
@@ -575,11 +575,11 @@ static int lynxfb_ops_check_var(struct fb_var_screeninfo *var,
return hw_sm750_crtc_checkMode(crtc, var);
}
-static int lynxfb_ops_setcolreg(unsigned regno,
- unsigned red,
- unsigned green,
- unsigned blue,
- unsigned transp,
+static int lynxfb_ops_setcolreg(unsigned int regno,
+ unsigned int red,
+ unsigned int green,
+ unsigned int blue,
+ unsigned int transp,
struct fb_info *info)
{
struct lynxfb_par *par;
@@ -788,7 +788,7 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
memset_io(crtc->cursor.vstart, 0, crtc->cursor.size);
if (!g_hwcursor) {
lynxfb_ops.fb_cursor = NULL;
- hw_cursor_disable(&crtc->cursor);
+ sm750_hw_cursor_disable(&crtc->cursor);
}
/* set info->fbops, must be set before fb_find_mode */
@@ -947,13 +947,13 @@ static void sm750fb_setup(struct sm750_dev *sm750_dev, char *src)
g_hwcursor = 3;
if (!src || !*src) {
- pr_warn("no specific g_option.\n");
+ dev_warn(&sm750_dev->pdev->dev, "no specific g_option.\n");
goto NO_PARAM;
}
while ((opt = strsep(&src, ":")) != NULL && *opt != 0) {
- pr_info("opt=%s\n", opt);
- pr_info("src=%s\n", src);
+ dev_info(&sm750_dev->pdev->dev, "opt=%s\n", opt);
+ dev_info(&sm750_dev->pdev->dev, "src=%s\n", src);
if (!strncmp(opt, "swap", strlen("swap")))
swap = 1;
@@ -974,12 +974,12 @@ static void sm750fb_setup(struct sm750_dev *sm750_dev, char *src)
else {
if (!g_fbmode[0]) {
g_fbmode[0] = opt;
- pr_info("find fbmode0 : %s\n", g_fbmode[0]);
+ dev_info(&sm750_dev->pdev->dev, "find fbmode0 : %s\n", g_fbmode[0]);
} else if (!g_fbmode[1]) {
g_fbmode[1] = opt;
- pr_info("find fbmode1 : %s\n", g_fbmode[1]);
+ dev_info(&sm750_dev->pdev->dev, "find fbmode1 : %s\n", g_fbmode[1]);
} else {
- pr_warn("How many view you wann set?\n");
+ dev_warn(&sm750_dev->pdev->dev, "How many view you wann set?\n");
}
}
}
@@ -1083,10 +1083,10 @@ static int lynxfb_pci_probe(struct pci_dev *pdev,
* if some chip need specific function,
* please hook it in smXXX_set_drv routine
*/
- sm750_dev->accel.de_init = hw_de_init;
- sm750_dev->accel.de_fillrect = hw_fillrect;
- sm750_dev->accel.de_copyarea = hw_copyarea;
- sm750_dev->accel.de_imageblit = hw_imageblit;
+ sm750_dev->accel.de_init = sm750_hw_de_init;
+ sm750_dev->accel.de_fillrect = sm750_hw_fillrect;
+ sm750_dev->accel.de_copyarea = sm750_hw_copyarea;
+ sm750_dev->accel.de_imageblit = sm750_hw_imageblit;
}
/* call chip specific setup routine */
@@ -1188,7 +1188,7 @@ static int __init lynxfb_setup(char *options)
return 0;
}
-static struct pci_device_id smi_pci_table[] = {
+static const struct pci_device_id smi_pci_table[] = {
{ PCI_DEVICE(0x126f, 0x0750), },
{0,}
};
@@ -1209,7 +1209,6 @@ static struct pci_driver lynxfb_driver = {
static int __init lynxfb_init(void)
{
char *option;
- int ret;
#ifdef MODULE
option = g_option;
@@ -1219,8 +1218,7 @@ static int __init lynxfb_init(void)
#endif
lynxfb_setup(option);
- ret = pci_register_driver(&lynxfb_driver);
- return ret;
+ return pci_register_driver(&lynxfb_driver);
}
module_init(lynxfb_init);
@@ -1245,4 +1243,4 @@ MODULE_PARM_DESC(g_option,
MODULE_AUTHOR("monk liu <monk.liu@siliconmotion.com>");
MODULE_AUTHOR("Sudip Mukherjee <sudip@vectorindia.org>");
MODULE_DESCRIPTION("Frame buffer driver for SM750 chipset");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/staging/sm750fb/sm750.h b/drivers/staging/sm750fb/sm750.h
index ff31c5c9cc6f..28f4b9b4f95f 100644
--- a/drivers/staging/sm750fb/sm750.h
+++ b/drivers/staging/sm750fb/sm750.h
@@ -146,14 +146,16 @@ struct lynxfb_crtc {
struct lynxfb_output {
int dpms;
int paths;
- /* which paths(s) this output stands for,for sm750:
+ /*
+ * which paths(s) this output stands for,for sm750:
* paths=1:means output for panel paths
* paths=2:means output for crt paths
* paths=3:means output for both panel and crt paths
*/
int *channel;
- /* which channel these outputs linked with,for sm750:
+ /*
+ * which channel these outputs linked with,for sm750:
* *channel=0 means primary channel
* *channel=1 means secondary channel
* output->channel ==> &crtc->channel
diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c
index 38adae6b5d83..af0db5789c53 100644
--- a/drivers/staging/sm750fb/sm750_accel.c
+++ b/drivers/staging/sm750fb/sm750_accel.c
@@ -32,7 +32,7 @@ static inline void write_dpPort(struct lynx_accel *accel, u32 data)
writel(data, accel->dpPortBase);
}
-void hw_de_init(struct lynx_accel *accel)
+void sm750_hw_de_init(struct lynx_accel *accel)
{
/* setup 2d engine registers */
u32 reg, clr;
@@ -65,12 +65,13 @@ void hw_de_init(struct lynx_accel *accel)
write_dpr(accel, DE_CONTROL, read_dpr(accel, DE_CONTROL) & ~clr);
}
-/* set2dformat only be called from setmode functions
+/*
+ * set2dformat only be called from setmode functions
* but if you need dual framebuffer driver,need call set2dformat
* every time you use 2d function
*/
-void hw_set2dformat(struct lynx_accel *accel, int fmt)
+void sm750_hw_set2dformat(struct lynx_accel *accel, int fmt)
{
u32 reg;
@@ -82,7 +83,7 @@ void hw_set2dformat(struct lynx_accel *accel, int fmt)
write_dpr(accel, DE_STRETCH_FORMAT, reg);
}
-int hw_fillrect(struct lynx_accel *accel,
+int sm750_hw_fillrect(struct lynx_accel *accel,
u32 base, u32 pitch, u32 Bpp,
u32 x, u32 y, u32 width, u32 height,
u32 color, u32 rop)
@@ -90,7 +91,8 @@ int hw_fillrect(struct lynx_accel *accel,
u32 deCtrl;
if (accel->de_wait() != 0) {
- /* int time wait and always busy,seems hardware
+ /*
+ * int time wait and always busy,seems hardware
* got something error
*/
pr_debug("De engine always busy\n");
@@ -126,7 +128,7 @@ int hw_fillrect(struct lynx_accel *accel,
return 0;
}
-int hw_copyarea(
+int sm750_hw_copyarea(
struct lynx_accel *accel,
unsigned int sBase, /* Address of source: offset in frame buffer */
unsigned int sPitch, /* Pitch value of source surface in BYTE */
@@ -213,25 +215,29 @@ unsigned int rop2) /* ROP value */
opSign = (-1);
}
- /* Note:
+ /*
+ * Note:
* DE_FOREGROUND are DE_BACKGROUND are don't care.
* DE_COLOR_COMPARE and DE_COLOR_COMPARE_MAKS
* are set by set deSetTransparency().
*/
- /* 2D Source Base.
+ /*
+ * 2D Source Base.
* It is an address offset (128 bit aligned)
* from the beginning of frame buffer.
*/
write_dpr(accel, DE_WINDOW_SOURCE_BASE, sBase); /* dpr40 */
- /* 2D Destination Base.
+ /*
+ * 2D Destination Base.
* It is an address offset (128 bit aligned)
* from the beginning of frame buffer.
*/
write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase); /* dpr44 */
- /* Program pitch (distance between the 1st points of two adjacent lines).
+ /*
+ * Program pitch (distance between the 1st points of two adjacent lines).
* Note that input pitch is BYTE value, but the 2D Pitch register uses
* pixel values. Need Byte to pixel conversion.
*/
@@ -240,7 +246,8 @@ unsigned int rop2) /* ROP value */
DE_PITCH_DESTINATION_MASK) |
(sPitch / Bpp & DE_PITCH_SOURCE_MASK)); /* dpr10 */
- /* Screen Window width in Pixels.
+ /*
+ * Screen Window width in Pixels.
* 2D engine uses this value to calculate the linear address in frame buffer
* for a given point.
*/
@@ -286,7 +293,7 @@ static unsigned int deGetTransparency(struct lynx_accel *accel)
return de_ctrl;
}
-int hw_imageblit(struct lynx_accel *accel,
+int sm750_hw_imageblit(struct lynx_accel *accel,
const char *pSrcbuf, /* pointer to start of source buffer in system memory */
u32 srcDelta, /* Pitch value (in bytes) of the source buffer, +ive means top down and -ive mean button up */
u32 startBit, /* Mono data can start at any bit in a byte, this value should be 0 to 7 */
@@ -316,7 +323,8 @@ int hw_imageblit(struct lynx_accel *accel,
if (accel->de_wait() != 0)
return -1;
- /* 2D Source Base.
+ /*
+ * 2D Source Base.
* Use 0 for HOST Blt.
*/
write_dpr(accel, DE_WINDOW_SOURCE_BASE, 0);
@@ -326,16 +334,19 @@ int hw_imageblit(struct lynx_accel *accel,
* from the beginning of frame buffer.
*/
write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase);
- /* Program pitch (distance between the 1st points of two adjacent lines).
- * Note that input pitch is BYTE value, but the 2D Pitch register uses
- * pixel values. Need Byte to pixel conversion.
- */
+
+ /*
+ * Program pitch (distance between the 1st points of two adjacent
+ * lines). Note that input pitch is BYTE value, but the 2D Pitch
+ * register uses pixel values. Need Byte to pixel conversion.
+ */
write_dpr(accel, DE_PITCH,
((dPitch / bytePerPixel << DE_PITCH_DESTINATION_SHIFT) &
DE_PITCH_DESTINATION_MASK) |
(dPitch / bytePerPixel & DE_PITCH_SOURCE_MASK)); /* dpr10 */
- /* Screen Window width in Pixels.
+ /*
+ * Screen Window width in Pixels.
* 2D engine uses this value to calculate the linear address
* in frame buffer for a given point.
*/
@@ -344,7 +355,8 @@ int hw_imageblit(struct lynx_accel *accel,
DE_WINDOW_WIDTH_DST_MASK) |
(dPitch / bytePerPixel & DE_WINDOW_WIDTH_SRC_MASK));
- /* Note: For 2D Source in Host Write, only X_K1_MONO field is needed,
+ /*
+ * Note: For 2D Source in Host Write, only X_K1_MONO field is needed,
* and Y_K2 field is not used.
* For mono bitmap, use startBit for X_K1.
*/
@@ -383,6 +395,6 @@ int hw_imageblit(struct lynx_accel *accel,
pSrcbuf += srcDelta;
}
- return 0;
+ return 0;
}
diff --git a/drivers/staging/sm750fb/sm750_accel.h b/drivers/staging/sm750fb/sm750_accel.h
index d59d005e0add..4b0ff8feb9a0 100644
--- a/drivers/staging/sm750fb/sm750_accel.h
+++ b/drivers/staging/sm750fb/sm750_accel.h
@@ -184,16 +184,16 @@
#define BOTTOM_TO_TOP 1
#define RIGHT_TO_LEFT 1
-void hw_set2dformat(struct lynx_accel *accel, int fmt);
+void sm750_hw_set2dformat(struct lynx_accel *accel, int fmt);
-void hw_de_init(struct lynx_accel *accel);
+void sm750_hw_de_init(struct lynx_accel *accel);
-int hw_fillrect(struct lynx_accel *accel,
+int sm750_hw_fillrect(struct lynx_accel *accel,
u32 base, u32 pitch, u32 Bpp,
u32 x, u32 y, u32 width, u32 height,
u32 color, u32 rop);
-int hw_copyarea(
+int sm750_hw_copyarea(
struct lynx_accel *accel,
unsigned int sBase, /* Address of source: offset in frame buffer */
unsigned int sPitch, /* Pitch value of source surface in BYTE */
@@ -208,7 +208,7 @@ unsigned int width,
unsigned int height, /* width and height of rectangle in pixel value */
unsigned int rop2);
-int hw_imageblit(struct lynx_accel *accel,
+int sm750_hw_imageblit(struct lynx_accel *accel,
const char *pSrcbuf, /* pointer to start of source buffer in system memory */
u32 srcDelta, /* Pitch value (in bytes) of the source buffer, +ive means top down and -ive mean button up */
u32 startBit, /* Mono data can start at any bit in a byte, this value should be 0 to 7 */
diff --git a/drivers/staging/sm750fb/sm750_cursor.c b/drivers/staging/sm750fb/sm750_cursor.c
index d622d65b6cee..2a13353fc492 100644
--- a/drivers/staging/sm750fb/sm750_cursor.c
+++ b/drivers/staging/sm750fb/sm750_cursor.c
@@ -47,25 +47,25 @@ writel((data), cursor->mmio + (addr))
/* hw_cursor_xxx works for voyager,718 and 750 */
-void hw_cursor_enable(struct lynx_cursor *cursor)
+void sm750_hw_cursor_enable(struct lynx_cursor *cursor)
{
u32 reg;
reg = (cursor->offset & HWC_ADDRESS_ADDRESS_MASK) | HWC_ADDRESS_ENABLE;
POKE32(HWC_ADDRESS, reg);
}
-void hw_cursor_disable(struct lynx_cursor *cursor)
+void sm750_hw_cursor_disable(struct lynx_cursor *cursor)
{
POKE32(HWC_ADDRESS, 0);
}
-void hw_cursor_setSize(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setSize(struct lynx_cursor *cursor,
int w, int h)
{
cursor->w = w;
cursor->h = h;
}
-void hw_cursor_setPos(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setPos(struct lynx_cursor *cursor,
int x, int y)
{
u32 reg;
@@ -74,7 +74,7 @@ void hw_cursor_setPos(struct lynx_cursor *cursor,
(x & HWC_LOCATION_X_MASK));
POKE32(HWC_LOCATION, reg);
}
-void hw_cursor_setColor(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setColor(struct lynx_cursor *cursor,
u32 fg, u32 bg)
{
u32 reg = (fg << HWC_COLOR_12_2_RGB565_SHIFT) &
@@ -84,7 +84,7 @@ void hw_cursor_setColor(struct lynx_cursor *cursor,
POKE32(HWC_COLOR_3, 0xffe0);
}
-void hw_cursor_setData(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setData(struct lynx_cursor *cursor,
u16 rop, const u8 *pcol, const u8 *pmsk)
{
int i, j, count, pitch, offset;
@@ -138,7 +138,7 @@ void hw_cursor_setData(struct lynx_cursor *cursor,
}
-void hw_cursor_setData2(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setData2(struct lynx_cursor *cursor,
u16 rop, const u8 *pcol, const u8 *pmsk)
{
int i, j, count, pitch, offset;
diff --git a/drivers/staging/sm750fb/sm750_cursor.h b/drivers/staging/sm750fb/sm750_cursor.h
index 6c4fc9b73489..c7b86ae235b4 100644
--- a/drivers/staging/sm750fb/sm750_cursor.h
+++ b/drivers/staging/sm750fb/sm750_cursor.h
@@ -2,16 +2,16 @@
#define LYNX_CURSOR_H__
/* hw_cursor_xxx works for voyager,718 and 750 */
-void hw_cursor_enable(struct lynx_cursor *cursor);
-void hw_cursor_disable(struct lynx_cursor *cursor);
-void hw_cursor_setSize(struct lynx_cursor *cursor,
+void sm750_hw_cursor_enable(struct lynx_cursor *cursor);
+void sm750_hw_cursor_disable(struct lynx_cursor *cursor);
+void sm750_hw_cursor_setSize(struct lynx_cursor *cursor,
int w, int h);
-void hw_cursor_setPos(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setPos(struct lynx_cursor *cursor,
int x, int y);
-void hw_cursor_setColor(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setColor(struct lynx_cursor *cursor,
u32 fg, u32 bg);
-void hw_cursor_setData(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setData(struct lynx_cursor *cursor,
u16 rop, const u8 *data, const u8 *mask);
-void hw_cursor_setData2(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setData2(struct lynx_cursor *cursor,
u16 rop, const u8 *data, const u8 *mask);
#endif
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index 7dd208caa5eb..b6af3b53076b 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -23,6 +23,8 @@
#include "ddk750.h"
#include "sm750_accel.h"
+void __iomem *mmio750;
+
int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
{
int ret;
@@ -34,7 +36,8 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
pr_info("mmio phyAddr = %lx\n", sm750_dev->vidreg_start);
- /* reserve the vidreg space of smi adaptor
+ /*
+ * reserve the vidreg space of smi adaptor
* if you do this, you need to add release region code
* in lynxfb_remove, or memory will not be mapped again
* successfully
@@ -59,15 +62,17 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
sm750_dev->accel.dprBase = sm750_dev->pvReg + DE_BASE_ADDR_TYPE1;
sm750_dev->accel.dpPortBase = sm750_dev->pvReg + DE_PORT_ADDR_TYPE1;
- ddk750_set_mmio(sm750_dev->pvReg, sm750_dev->devid, sm750_dev->revid);
+ mmio750 = sm750_dev->pvReg;
+ sm750_set_chip_type(sm750_dev->devid, sm750_dev->revid);
sm750_dev->vidmem_start = pci_resource_start(pdev, 0);
- /* don't use pdev_resource[x].end - resource[x].start to
+ /*
+ * don't use pdev_resource[x].end - resource[x].start to
* calculate the resource size, it's only the maximum available
* size but not the actual size, using
- * @ddk750_getVMSize function can be safe.
+ * @ddk750_get_vm_size function can be safe.
*/
- sm750_dev->vidmem_size = ddk750_getVMSize();
+ sm750_dev->vidmem_size = ddk750_get_vm_size();
pr_info("video memory phyAddr = %lx, size = %u bytes\n",
sm750_dev->vidmem_start, sm750_dev->vidmem_size);
@@ -100,7 +105,7 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
if (parm->master_clk == 0)
parm->master_clk = parm->chip_clk / 3;
- ddk750_initHw((initchip_param_t *)&sm750_dev->initParm);
+ ddk750_init_hw((struct initchip_param *)&sm750_dev->initParm);
/* for sm718, open pci burst */
if (sm750_dev->devid == 0x718) {
POKE32(SYSTEM_CTRL,
@@ -141,7 +146,8 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
}
POKE32(PANEL_DISPLAY_CTRL, val);
} else {
- /* for 750LE, no DVI chip initialization
+ /*
+ * for 750LE, no DVI chip initialization
* makes Monitor no signal
*
* Set up GPIO for software I2C to program DVI chip in the
@@ -149,13 +155,15 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
*/
sm750_sw_i2c_init(0, 1);
- /* Customer may NOT use CH7301 DVI chip, which has to be
+ /*
+ * Customer may NOT use CH7301 DVI chip, which has to be
* initialized differently.
*/
if (sm750_sw_i2c_read_reg(0xec, 0x4a) == 0x95) {
- /* The following register values for CH7301 are from
- * Chrontel app note and our experiment.
- */
+ /*
+ * The following register values for CH7301 are from
+ * Chrontel app note and our experiment.
+ */
pr_info("yes,CH7301 DVI chip found\n");
sm750_sw_i2c_write_reg(0xec, 0x1d, 0x16);
sm750_sw_i2c_write_reg(0xec, 0x21, 0x9);
@@ -267,7 +275,7 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
fmt = 2;
break;
}
- hw_set2dformat(&sm750_dev->accel, fmt);
+ sm750_hw_set2dformat(&sm750_dev->accel, fmt);
}
/* set timing */
@@ -308,7 +316,8 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
crtc->oScreen & PANEL_FB_ADDRESS_ADDRESS_MASK);
reg = var->xres * (var->bits_per_pixel >> 3);
- /* crtc->channel is not equal to par->index on numeric,
+ /*
+ * crtc->channel is not equal to par->index on numeric,
* be aware of that
*/
reg = ALIGN(reg, crtc->line_pad);
@@ -342,7 +351,8 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
/* not implemented now */
POKE32(CRT_FB_ADDRESS, crtc->oScreen);
reg = var->xres * (var->bits_per_pixel >> 3);
- /* crtc->channel is not equal to par->index on numeric,
+ /*
+ * crtc->channel is not equal to par->index on numeric,
* be aware of that
*/
reg = ALIGN(reg, crtc->line_pad) << CRT_FB_WIDTH_WIDTH_SHIFT;
@@ -469,7 +479,7 @@ void hw_sm750_initAccel(struct sm750_dev *sm750_dev)
{
u32 reg;
- enable2DEngine(1);
+ sm750_enable_2d_engine(1);
if (sm750_get_chip_type() == SM750LE) {
reg = PEEK32(DE_STATE1);
diff --git a/drivers/staging/speakup/TODO b/drivers/staging/speakup/TODO
index 3094799cf6a0..993410c3e531 100644
--- a/drivers/staging/speakup/TODO
+++ b/drivers/staging/speakup/TODO
@@ -42,6 +42,6 @@ We prefer that you contact us on the mailing list; however, if you do
not want to subscribe to a mailing list, send your email to all of the
following:
-w.d.hubbs@gmail.com, chris@the-brannons.com, kirk@braille.uwo.ca and
+w.d.hubbs@gmail.com, chris@the-brannons.com, kirk@reisers.ca and
samuel.thibault@ens-lyon.org.
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 97ca4ecca8a9..5c192042eeac 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -351,14 +351,14 @@ static void speakup_cut(struct vc_data *vc)
if (!mark_cut_flag) {
mark_cut_flag = 1;
- spk_xs = (u_short) spk_x;
- spk_ys = (u_short) spk_y;
+ spk_xs = (u_short)spk_x;
+ spk_ys = (u_short)spk_y;
spk_sel_cons = vc;
synth_printf("%s\n", spk_msg_get(MSG_MARK));
return;
}
- spk_xe = (u_short) spk_x;
- spk_ye = (u_short) spk_y;
+ spk_xe = (u_short)spk_x;
+ spk_ye = (u_short)spk_y;
mark_cut_flag = 0;
synth_printf("%s\n", spk_msg_get(MSG_CUT));
@@ -489,7 +489,7 @@ static void say_char(struct vc_data *vc)
u_short ch;
spk_old_attr = spk_attr;
- ch = get_char(vc, (u_short *) spk_pos, &spk_attr);
+ ch = get_char(vc, (u_short *)spk_pos, &spk_attr);
if (spk_attr != spk_old_attr) {
if (spk_attrib_bleep & 1)
bleep(spk_y);
@@ -504,7 +504,7 @@ static void say_phonetic_char(struct vc_data *vc)
u_short ch;
spk_old_attr = spk_attr;
- ch = get_char(vc, (u_short *) spk_pos, &spk_attr);
+ ch = get_char(vc, (u_short *)spk_pos, &spk_attr);
if (isascii(ch) && isalpha(ch)) {
ch &= 0x1f;
synth_printf("%s\n", phonetic[--ch]);
@@ -556,7 +556,7 @@ static u_long get_word(struct vc_data *vc)
u_char temp;
spk_old_attr = spk_attr;
- ch = (char)get_char(vc, (u_short *) tmp_pos, &temp);
+ ch = (char)get_char(vc, (u_short *)tmp_pos, &temp);
/* decided to take out the sayword if on a space (mis-information */
if (spk_say_word_ctl && ch == SPACE) {
@@ -565,26 +565,26 @@ static u_long get_word(struct vc_data *vc)
return 0;
} else if ((tmpx < vc->vc_cols - 2)
&& (ch == SPACE || ch == 0 || IS_WDLM(ch))
- && ((char)get_char(vc, (u_short *) &tmp_pos + 1, &temp) >
+ && ((char)get_char(vc, (u_short *)&tmp_pos + 1, &temp) >
SPACE)) {
tmp_pos += 2;
tmpx++;
} else
while (tmpx > 0) {
- ch = (char)get_char(vc, (u_short *) tmp_pos - 1, &temp);
+ ch = (char)get_char(vc, (u_short *)tmp_pos - 1, &temp);
if ((ch == SPACE || ch == 0 || IS_WDLM(ch))
- && ((char)get_char(vc, (u_short *) tmp_pos, &temp) >
+ && ((char)get_char(vc, (u_short *)tmp_pos, &temp) >
SPACE))
break;
tmp_pos -= 2;
tmpx--;
}
- attr_ch = get_char(vc, (u_short *) tmp_pos, &spk_attr);
+ attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch & 0xff;
while (tmpx < vc->vc_cols - 1) {
tmp_pos += 2;
tmpx++;
- ch = (char)get_char(vc, (u_short *) tmp_pos, &temp);
+ ch = (char)get_char(vc, (u_short *)tmp_pos, &temp);
if ((ch == SPACE) || ch == 0
|| (IS_WDLM(buf[cnt - 1]) && (ch > SPACE)))
break;
@@ -639,7 +639,7 @@ static void say_prev_word(struct vc_data *vc)
} else
spk_x--;
spk_pos -= 2;
- ch = (char)get_char(vc, (u_short *) spk_pos, &temp);
+ ch = (char)get_char(vc, (u_short *)spk_pos, &temp);
if (ch == SPACE || ch == 0)
state = 0;
else if (IS_WDLM(ch))
@@ -672,7 +672,7 @@ static void say_next_word(struct vc_data *vc)
return;
}
while (1) {
- ch = (char)get_char(vc, (u_short *) spk_pos, &temp);
+ ch = (char)get_char(vc, (u_short *)spk_pos, &temp);
if (ch == SPACE || ch == 0)
state = 0;
else if (IS_WDLM(ch))
@@ -709,7 +709,7 @@ static void spell_word(struct vc_data *vc)
if (!get_word(vc))
return;
- while ((ch = (u_char) *cp)) {
+ while ((ch = (u_char)*cp)) {
if (cp != buf)
synth_printf(" %s ", delay_str[spk_spell_delay]);
if (IS_CHAR(ch, B_CAP)) {
@@ -751,7 +751,7 @@ static int get_line(struct vc_data *vc)
spk_old_attr = spk_attr;
spk_attr = get_attributes(vc, (u_short *)spk_pos);
for (i = 0; i < vc->vc_cols; i++) {
- buf[i] = (u_char) get_char(vc, (u_short *) tmp, &tmp2);
+ buf[i] = (u_char)get_char(vc, (u_short *)tmp, &tmp2);
tmp += 2;
}
for (--i; i >= 0; i--)
@@ -816,7 +816,7 @@ static int say_from_to(struct vc_data *vc, u_long from, u_long to,
spk_old_attr = spk_attr;
spk_attr = get_attributes(vc, (u_short *)from);
while (from < to) {
- buf[i++] = (char)get_char(vc, (u_short *) from, &tmp);
+ buf[i++] = (char)get_char(vc, (u_short *)from, &tmp);
from += 2;
if (i >= vc->vc_size_row)
break;
@@ -892,7 +892,7 @@ static int get_sentence_buf(struct vc_data *vc, int read_punc)
spk_attr = get_attributes(vc, (u_short *)start);
while (start < end) {
- sentbuf[bn][i] = (char)get_char(vc, (u_short *) start, &tmp);
+ sentbuf[bn][i] = (char)get_char(vc, (u_short *)start, &tmp);
if (i > 0) {
if (sentbuf[bn][i] == SPACE && sentbuf[bn][i - 1] == '.'
&& numsentences[bn] < 9) {
@@ -1040,7 +1040,7 @@ static void say_position(struct vc_data *vc)
static void say_char_num(struct vc_data *vc)
{
u_char tmp;
- u_short ch = get_char(vc, (u_short *) spk_pos, &tmp);
+ u_short ch = get_char(vc, (u_short *)spk_pos, &tmp);
ch &= 0xff;
synth_printf(spk_msg_get(MSG_CHAR_INFO), ch, ch);
@@ -1085,7 +1085,7 @@ static void spkup_write(const char *in_buf, int count)
(currsentence <= numsentences[bn]))
synth_insert_next_index(currsentence++);
}
- ch = (u_char) *in_buf++;
+ ch = (u_char)*in_buf++;
char_type = spk_chartab[ch];
if (ch == old_ch && !(char_type & B_NUM)) {
if (++rep_count > 2)
@@ -1579,7 +1579,7 @@ static int count_highlight_color(struct vc_data *vc)
int cc;
int vc_num = vc->vc_num;
u16 ch;
- u16 *start = (u16 *) vc->vc_origin;
+ u16 *start = (u16 *)vc->vc_origin;
for (i = 0; i < 8; i++)
speakup_console[vc_num]->ht.bgcount[i] = 0;
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index 0149edc1e0ae..aeb2b865615a 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -137,7 +137,7 @@ static void __speakup_paste_selection(struct work_struct *work)
struct speakup_paste_work *spw =
container_of(work, struct speakup_paste_work, work);
struct tty_struct *tty = xchg(&spw->tty, NULL);
- struct vc_data *vc = (struct vc_data *) tty->driver_data;
+ struct vc_data *vc = (struct vc_data *)tty->driver_data;
int pasted = 0, count;
struct tty_ldisc *ld;
DECLARE_WAITQUEUE(wait, current);
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index c2c435cc3d63..ef89dc1c21c8 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -99,7 +99,7 @@ static irqreturn_t synth_readbuf_handler(int irq, void *dev_id)
while (inb_p(speakup_info.port_tts + UART_LSR) & UART_LSR_DR) {
c = inb_p(speakup_info.port_tts+UART_RX);
- synth->read_buff_add((u_char) c);
+ synth->read_buff_add((u_char)c);
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return IRQ_HANDLED;
@@ -113,7 +113,7 @@ static void start_serial_interrupt(int irq)
return;
rv = request_irq(irq, synth_readbuf_handler, IRQF_SHARED,
- "serial", (void *) synth_readbuf_handler);
+ "serial", (void *)synth_readbuf_handler);
if (rv)
pr_err("Unable to request Speakup serial I R Q\n");
@@ -141,7 +141,7 @@ void spk_stop_serial_interrupt(void)
/* Turn off interrupts */
outb(0, speakup_info.port_tts+UART_IER);
/* Free IRQ */
- free_irq(serstate->irq, (void *) synth_readbuf_handler);
+ free_irq(serstate->irq, (void *)synth_readbuf_handler);
}
int spk_wait_for_xmitr(void)
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index 6b1d0f538bbd..ed3e4282f41c 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -20,8 +20,8 @@
*/
#include <linux/unistd.h>
-#include <linux/miscdevice.h> /* for misc_register, and SYNTH_MINOR */
-#include <linux/poll.h> /* for poll_wait() */
+#include <linux/miscdevice.h> /* for misc_register, and SYNTH_MINOR */
+#include <linux/poll.h> /* for poll_wait() */
#include <linux/sched.h> /* schedule(), signal_pending(), TASK_INTERRUPTIBLE */
#include "spk_priv.h"
@@ -55,27 +55,26 @@ static struct var_t vars[] = {
V_LAST_VAR
};
-/*
- * These attributes will appear in /sys/accessibility/speakup/soft.
- */
+/* These attributes will appear in /sys/accessibility/speakup/soft. */
+
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute freq_attribute =
- __ATTR(freq, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(freq, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
- __ATTR(punct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(punct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
- __ATTR(tone, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(tone, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
- __ATTR(voice, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(voice, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
/*
* We should uncomment the following definition, when we agree on a
@@ -85,15 +84,15 @@ static struct kobj_attribute vol_attribute =
*/
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
@@ -162,8 +161,8 @@ static char *get_initstring(void)
cp = buf;
var = synth_soft.vars;
while (var->var_id != MAXVARS) {
- if (var->var_id != CAPS_START && var->var_id != CAPS_STOP
- && var->var_id != DIRECT)
+ if (var->var_id != CAPS_START && var->var_id != CAPS_STOP &&
+ var->var_id != DIRECT)
cp = cp + sprintf(cp, var->u.n.synth_fmt,
var->u.n.value);
var++;
@@ -277,8 +276,7 @@ static ssize_t softsynth_write(struct file *fp, const char __user *buf,
return count;
}
-static unsigned int softsynth_poll(struct file *fp,
- struct poll_table_struct *wait)
+static unsigned int softsynth_poll(struct file *fp, struct poll_table_struct *wait)
{
unsigned long flags;
int ret = 0;
@@ -310,10 +308,8 @@ static const struct file_operations softsynth_fops = {
.release = softsynth_close,
};
-
static int softsynth_probe(struct spk_synth *synth)
{
-
if (misc_registered != 0)
return 0;
memset(&synth_device, 0, sizeof(synth_device));
diff --git a/drivers/staging/speakup/speakup_spkout.c b/drivers/staging/speakup/speakup_spkout.c
index e449f2770c1f..586890908826 100644
--- a/drivers/staging/speakup/speakup_spkout.c
+++ b/drivers/staging/speakup/speakup_spkout.c
@@ -1,6 +1,6 @@
/*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
-* this version considerably modified by David Borowski, david575@rogers.com
+ * this version considerably modified by David Borowski, david575@rogers.com
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
@@ -40,34 +40,33 @@ static struct var_t vars[] = {
V_LAST_VAR
};
-/*
- * These attributes will appear in /sys/accessibility/speakup/spkout.
- */
+/* These attributes will appear in /sys/accessibility/speakup/spkout. */
+
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
- __ATTR(punct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(punct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
- __ATTR(tone, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(tone, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_txprt.c b/drivers/staging/speakup/speakup_txprt.c
index fd98d4ffcb3e..b3d2cfd20ac8 100644
--- a/drivers/staging/speakup/speakup_txprt.c
+++ b/drivers/staging/speakup/speakup_txprt.c
@@ -1,6 +1,6 @@
/*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
-* this version considerably modified by David Borowski, david575@rogers.com
+ * this version considerably modified by David Borowski, david575@rogers.com
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
@@ -36,32 +36,31 @@ static struct var_t vars[] = {
V_LAST_VAR
};
-/*
- * These attributes will appear in /sys/accessibility/speakup/txprt.
- */
+/* These attributes will appear in /sys/accessibility/speakup/txprt. */
+
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
- __ATTR(tone, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(tone, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/spk_priv_keyinfo.h b/drivers/staging/speakup/spk_priv_keyinfo.h
index 130e9cb0118b..c95b68ebd8e7 100644
--- a/drivers/staging/speakup/spk_priv_keyinfo.h
+++ b/drivers/staging/speakup/spk_priv_keyinfo.h
@@ -23,84 +23,82 @@
#define FIRST_SYNTH_VAR RATE
/* 0 is reserved for no remap */
-#define SPEAKUP_GOTO 0x01
-#define SPEECH_KILL 0x02
-#define SPEAKUP_QUIET 0x03
-#define SPEAKUP_CUT 0x04
-#define SPEAKUP_PASTE 0x05
-#define SAY_FIRST_CHAR 0x06
-#define SAY_LAST_CHAR 0x07
-#define SAY_CHAR 0x08
-#define SAY_PREV_CHAR 0x09
-#define SAY_NEXT_CHAR 0x0a
-#define SAY_WORD 0x0b
-#define SAY_PREV_WORD 0x0c
-#define SAY_NEXT_WORD 0x0d
-#define SAY_LINE 0x0e
-#define SAY_PREV_LINE 0x0f
-#define SAY_NEXT_LINE 0x10
-#define TOP_EDGE 0x11
-#define BOTTOM_EDGE 0x12
-#define LEFT_EDGE 0x13
-#define RIGHT_EDGE 0x14
-#define SPELL_PHONETIC 0x15
-#define SPELL_WORD 0x16
-#define SAY_SCREEN 0x17
-#define SAY_POSITION 0x18
-#define SAY_ATTRIBUTES 0x19
-#define SPEAKUP_OFF 0x1a
-#define SPEAKUP_PARKED 0x1b
-#define SAY_LINE_INDENT 0x1c
-#define SAY_FROM_TOP 0x1d
-#define SAY_TO_BOTTOM 0x1e
-#define SAY_FROM_LEFT 0x1f
-#define SAY_TO_RIGHT 0x20
-#define SAY_CHAR_NUM 0x21
-#define EDIT_SOME 0x22
-#define EDIT_MOST 0x23
-#define SAY_PHONETIC_CHAR 0x24
-#define EDIT_DELIM 0x25
-#define EDIT_REPEAT 0x26
-#define EDIT_EXNUM 0x27
-#define SET_WIN 0x28
-#define CLEAR_WIN 0x29
-#define ENABLE_WIN 0x2a
-#define SAY_WIN 0x2b
-#define SPK_LOCK 0x2c
-#define SPEAKUP_HELP 0x2d
-#define TOGGLE_CURSORING 0x2e
-#define READ_ALL_DOC 0x2f
-#define SPKUP_MAX_FUNC 0x30 /* one greater than the last func handler */
-
-#define SPK_KEY 0x80
-#define FIRST_EDIT_BITS 0x22
-
+#define SPEAKUP_GOTO 0x01
+#define SPEECH_KILL 0x02
+#define SPEAKUP_QUIET 0x03
+#define SPEAKUP_CUT 0x04
+#define SPEAKUP_PASTE 0x05
+#define SAY_FIRST_CHAR 0x06
+#define SAY_LAST_CHAR 0x07
+#define SAY_CHAR 0x08
+#define SAY_PREV_CHAR 0x09
+#define SAY_NEXT_CHAR 0x0a
+#define SAY_WORD 0x0b
+#define SAY_PREV_WORD 0x0c
+#define SAY_NEXT_WORD 0x0d
+#define SAY_LINE 0x0e
+#define SAY_PREV_LINE 0x0f
+#define SAY_NEXT_LINE 0x10
+#define TOP_EDGE 0x11
+#define BOTTOM_EDGE 0x12
+#define LEFT_EDGE 0x13
+#define RIGHT_EDGE 0x14
+#define SPELL_PHONETIC 0x15
+#define SPELL_WORD 0x16
+#define SAY_SCREEN 0x17
+#define SAY_POSITION 0x18
+#define SAY_ATTRIBUTES 0x19
+#define SPEAKUP_OFF 0x1a
+#define SPEAKUP_PARKED 0x1b
+#define SAY_LINE_INDENT 0x1c
+#define SAY_FROM_TOP 0x1d
+#define SAY_TO_BOTTOM 0x1e
+#define SAY_FROM_LEFT 0x1f
+#define SAY_TO_RIGHT 0x20
+#define SAY_CHAR_NUM 0x21
+#define EDIT_SOME 0x22
+#define EDIT_MOST 0x23
+#define SAY_PHONETIC_CHAR 0x24
+#define EDIT_DELIM 0x25
+#define EDIT_REPEAT 0x26
+#define EDIT_EXNUM 0x27
+#define SET_WIN 0x28
+#define CLEAR_WIN 0x29
+#define ENABLE_WIN 0x2a
+#define SAY_WIN 0x2b
+#define SPK_LOCK 0x2c
+#define SPEAKUP_HELP 0x2d
+#define TOGGLE_CURSORING 0x2e
+#define READ_ALL_DOC 0x2f
+#define SPKUP_MAX_FUNC 0x30 /* one greater than the last func handler */
+#define SPK_KEY 0x80
+#define FIRST_EDIT_BITS 0x22
#define FIRST_SET_VAR SPELL_DELAY
-#define VAR_START 0x40 /* increase if adding more than 0x3f functions */
+#define VAR_START 0x40 /* increase if adding more than 0x3f functions */
/* keys for setting variables, must be ordered same as the enum for var_ids */
/* with dec being even and inc being 1 greater */
-#define SPELL_DELAY_DEC (VAR_START+0)
-#define SPELL_DELAY_INC (SPELL_DELAY_DEC+1)
-#define PUNC_LEVEL_DEC (SPELL_DELAY_DEC+2)
-#define PUNC_LEVEL_INC (PUNC_LEVEL_DEC+1)
-#define READING_PUNC_DEC (PUNC_LEVEL_DEC+2)
-#define READING_PUNC_INC (READING_PUNC_DEC+1)
-#define ATTRIB_BLEEP_DEC (READING_PUNC_DEC+2)
-#define ATTRIB_BLEEP_INC (ATTRIB_BLEEP_DEC+1)
-#define BLEEPS_DEC (ATTRIB_BLEEP_DEC+2)
-#define BLEEPS_INC (BLEEPS_DEC+1)
-#define RATE_DEC (BLEEPS_DEC+2)
-#define RATE_INC (RATE_DEC+1)
-#define PITCH_DEC (RATE_DEC+2)
-#define PITCH_INC (PITCH_DEC+1)
-#define VOL_DEC (PITCH_DEC+2)
-#define VOL_INC (VOL_DEC+1)
-#define TONE_DEC (VOL_DEC+2)
-#define TONE_INC (TONE_DEC+1)
-#define PUNCT_DEC (TONE_DEC+2)
-#define PUNCT_INC (PUNCT_DEC+1)
-#define VOICE_DEC (PUNCT_DEC+2)
-#define VOICE_INC (VOICE_DEC+1)
+#define SPELL_DELAY_DEC (VAR_START + 0)
+#define SPELL_DELAY_INC (SPELL_DELAY_DEC + 1)
+#define PUNC_LEVEL_DEC (SPELL_DELAY_DEC + 2)
+#define PUNC_LEVEL_INC (PUNC_LEVEL_DEC + 1)
+#define READING_PUNC_DEC (PUNC_LEVEL_DEC + 2)
+#define READING_PUNC_INC (READING_PUNC_DEC + 1)
+#define ATTRIB_BLEEP_DEC (READING_PUNC_DEC + 2)
+#define ATTRIB_BLEEP_INC (ATTRIB_BLEEP_DEC + 1)
+#define BLEEPS_DEC (ATTRIB_BLEEP_DEC + 2)
+#define BLEEPS_INC (BLEEPS_DEC + 1)
+#define RATE_DEC (BLEEPS_DEC + 2)
+#define RATE_INC (RATE_DEC + 1)
+#define PITCH_DEC (RATE_DEC + 2)
+#define PITCH_INC (PITCH_DEC + 1)
+#define VOL_DEC (PITCH_DEC + 2)
+#define VOL_INC (VOL_DEC + 1)
+#define TONE_DEC (VOL_DEC + 2)
+#define TONE_INC (TONE_DEC + 1)
+#define PUNCT_DEC (TONE_DEC + 2)
+#define PUNCT_INC (PUNCT_DEC + 1)
+#define VOICE_DEC (PUNCT_DEC + 2)
+#define VOICE_INC (VOICE_DEC + 1)
#endif
diff --git a/drivers/staging/speakup/spk_types.h b/drivers/staging/speakup/spk_types.h
index e8ff5d7d6419..b07f6cc4f284 100644
--- a/drivers/staging/speakup/spk_types.h
+++ b/drivers/staging/speakup/spk_types.h
@@ -1,16 +1,14 @@
#ifndef SPEAKUP_TYPES_H
#define SPEAKUP_TYPES_H
-/*
- * This file includes all of the typedefs and structs used in speakup.
- */
+/* This file includes all of the typedefs and structs used in speakup. */
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/wait.h> /* for wait_queue */
-#include <linux/init.h> /* for __init */
+#include <linux/init.h> /* for __init */
#include <linux/module.h>
#include <linux/vt_kern.h>
#include <linux/spinlock.h>
@@ -105,7 +103,7 @@ struct st_var_header {
enum var_id_t var_id;
enum var_type_t var_type;
void *p_val; /* ptr to programs variable to store value */
- void *data; /* ptr to the vars data */
+ void *data; /* ptr to the vars data */
};
struct num_var_t {
@@ -114,8 +112,8 @@ struct num_var_t {
int low;
int high;
short offset, multiplier; /* for fiddling rates etc. */
- char *out_str; /* if synth needs char representation of number */
- int value; /* current value */
+ char *out_str; /* if synth needs char representation of number */
+ int value; /* current value */
};
struct punc_var_t {
@@ -169,7 +167,7 @@ struct spk_synth {
int (*probe)(struct spk_synth *synth);
void (*release)(void);
const char *(*synth_immediate)(struct spk_synth *synth,
- const char *buff);
+ const char *buff);
void (*catch_up)(struct spk_synth *synth);
void (*flush)(struct spk_synth *synth);
int (*is_alive)(struct spk_synth *synth);
@@ -181,7 +179,7 @@ struct spk_synth {
struct attribute_group attributes;
};
-/**
+/*
* module_spk_synth() - Helper macro for registering a speakup driver
* @__spk_synth: spk_synth struct
* Helper macro for speakup drivers which do not do anything special in module
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 54b2f3918628..a61c02ba06da 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -8,7 +8,7 @@
#include <linux/delay.h> /* for loops_per_sec */
#include <linux/kmod.h>
#include <linux/jiffies.h>
-#include <linux/uaccess.h> /* for copy_from_user */
+#include <linux/uaccess.h> /* for copy_from_user */
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/kthread.h>
@@ -67,13 +67,14 @@ int spk_serial_synth_probe(struct spk_synth *synth)
return -ENODEV;
}
pr_info("%s: ttyS%i, Driver Version %s\n",
- synth->long_name, synth->ser, synth->version);
+ synth->long_name, synth->ser, synth->version);
synth->alive = 1;
return 0;
}
EXPORT_SYMBOL_GPL(spk_serial_synth_probe);
-/* Main loop of the progression thread: keep eating from the buffer
+/*
+ * Main loop of the progression thread: keep eating from the buffer
* and push to the serial port, waiting as needed
*
* For devices that have a "full" notification mechanism, the driver can
@@ -303,12 +304,11 @@ void spk_get_index_count(int *linecount, int *sentcount)
sentence_count = ind % 10;
if ((ind / 10) <= synth->indexing.currindex)
- index_count = synth->indexing.currindex-(ind/10);
+ index_count = synth->indexing.currindex - (ind / 10);
else
index_count = synth->indexing.currindex
- -synth->indexing.lowindex
- + synth->indexing.highindex-(ind/10)+1;
-
+ - synth->indexing.lowindex
+ + synth->indexing.highindex - (ind / 10) + 1;
}
*sentcount = sentence_count;
*linecount = index_count;
@@ -406,8 +406,8 @@ static int do_synth_init(struct spk_synth *in_synth)
speakup_register_var(var);
if (!spk_quiet_boot)
synth_printf("%s found\n", synth->long_name);
- if (synth->attributes.name
- && sysfs_create_group(speakup_kobj, &synth->attributes) < 0)
+ if (synth->attributes.name && sysfs_create_group(speakup_kobj,
+ &synth->attributes) < 0)
return -ENOMEM;
synth_flags = synth->flags;
wake_up_interruptible_all(&speakup_event);
@@ -476,10 +476,10 @@ void synth_remove(struct spk_synth *in_synth)
break;
}
for ( ; synths[i] != NULL; i++) /* compress table */
- synths[i] = synths[i+1];
+ synths[i] = synths[i + 1];
module_status = 0;
mutex_unlock(&spk_mutex);
}
EXPORT_SYMBOL_GPL(synth_remove);
-short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC|B_SYM };
+short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
diff --git a/drivers/staging/speakup/thread.c b/drivers/staging/speakup/thread.c
index 90c383ee7c3f..8c64f1ada6e0 100644
--- a/drivers/staging/speakup/thread.c
+++ b/drivers/staging/speakup/thread.c
@@ -27,7 +27,7 @@ int speakup_thread(void *data)
our_sound = spk_unprocessed_sound;
spk_unprocessed_sound.active = 0;
prepare_to_wait(&speakup_event, &wait,
- TASK_INTERRUPTIBLE);
+ TASK_INTERRUPTIBLE);
should_break = kthread_should_stop() ||
our_sound.active ||
(synth && synth->catch_up && synth->alive &&
@@ -47,7 +47,8 @@ int speakup_thread(void *data)
if (our_sound.active)
kd_mksound(our_sound.freq, our_sound.jiffies);
if (synth && synth->catch_up && synth->alive) {
- /* It is up to the callee to take the lock, so that it
+ /*
+ * It is up to the callee to take the lock, so that it
* can sleep whenever it likes
*/
synth->catch_up(synth);
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index 21186e3dc7ad..cc984196020f 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -237,8 +237,7 @@ int spk_set_num_var(int input, struct st_var_header *var, int how)
if (!var_data->u.n.out_str)
l = sprintf(cp, var_data->u.n.synth_fmt, (int)val);
else
- l = sprintf(cp,
- var_data->u.n.synth_fmt, var_data->u.n.out_str[val]);
+ l = sprintf(cp, var_data->u.n.synth_fmt, var_data->u.n.out_str[val]);
synth_printf("%s", cp);
return 0;
}
@@ -266,7 +265,8 @@ int spk_set_string_var(const char *page, struct st_var_header *var, int len)
return 0;
}
-/* spk_set_mask_bits sets or clears the punc/delim/repeat bits,
+/*
+ * spk_set_mask_bits sets or clears the punc/delim/repeat bits,
* if input is null uses the defaults.
* values for how: 0 clears bits of chars supplied,
* 1 clears allk, 2 sets bits for chars
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index 9081b3f8779c..54f490090a59 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -1,20 +1,20 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION */
+/* Copyright (C) 2010 - 2016 UNISYS CORPORATION */
/* All rights reserved. */
#ifndef __IOCHANNEL_H__
#define __IOCHANNEL_H__
/*
* Everything needed for IOPart-GuestPart communication is define in
- * this file. Note: Everything is OS-independent because this file is
+ * this file. Note: Everything is OS-independent because this file is
* used by Windows, Linux and possible EFI drivers.
*/
/*
* Communication flow between the IOPart and GuestPart uses the channel headers
- * channel state. The following states are currently being used:
+ * channel state. The following states are currently being used:
* UNINIT(All Zeroes), CHANNEL_ATTACHING, CHANNEL_ATTACHED, CHANNEL_OPENED
*
- * additional states will be used later. No locking is needed to switch between
+ * Additional states will be used later. No locking is needed to switch between
* states due to the following rules:
*
* 1. IOPart is only the only partition allowed to change from UNIT
@@ -39,10 +39,11 @@
#define ULTRA_VSWITCH_CHANNEL_PROTOCOL_SIGNATURE \
ULTRA_CHANNEL_PROTOCOL_SIGNATURE
-/* Must increment these whenever you insert or delete fields within this channel
- * struct. Also increment whenever you change the meaning of fields within this
- * channel struct so as to break pre-existing software. Note that you can
- * usually add fields to the END of the channel struct withOUT needing to
+/*
+ * Must increment these whenever you insert or delete fields within this channel
+ * struct. Also increment whenever you change the meaning of fields within this
+ * channel struct so as to break pre-existing software. Note that you can
+ * usually add fields to the END of the channel struct without needing to
* increment this.
*/
#define ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID 2
@@ -70,59 +71,62 @@
#define MINNUM(a, b) (((a) < (b)) ? (a) : (b))
#define MAXNUM(a, b) (((a) > (b)) ? (a) : (b))
-/* define the two queues per data channel between iopart and ioguestparts */
-/* used by ioguestpart to 'insert' signals to iopart */
+/* Define the two queues per data channel between iopart and ioguestparts. */
+/* Used by ioguestpart to 'insert' signals to iopart. */
#define IOCHAN_TO_IOPART 0
-/* used by ioguestpart to 'remove' signals from iopart, same previous queue */
+/* Used by ioguestpart to 'remove' signals from iopart, same previous queue. */
#define IOCHAN_FROM_IOPART 1
-/* size of cdb - i.e., scsi cmnd */
+/* Size of cdb - i.e., SCSI cmnd */
#define MAX_CMND_SIZE 16
#define MAX_SENSE_SIZE 64
#define MAX_PHYS_INFO 64
-/* various types of network packets that can be sent in cmdrsp */
+/* Various types of network packets that can be sent in cmdrsp. */
enum net_types {
- NET_RCV_POST = 0, /* submit buffer to hold receiving
+ NET_RCV_POST = 0, /*
+ * Submit buffer to hold receiving
* incoming packet
*/
- /* virtnic -> uisnic */
+ /* visornic -> uisnic */
NET_RCV, /* incoming packet received */
/* uisnic -> virtpci */
NET_XMIT, /* for outgoing net packets */
- /* virtnic -> uisnic */
+ /* visornic -> uisnic */
NET_XMIT_DONE, /* outgoing packet xmitted */
/* uisnic -> virtpci */
NET_RCV_ENBDIS, /* enable/disable packet reception */
- /* virtnic -> uisnic */
+ /* visornic -> uisnic */
NET_RCV_ENBDIS_ACK, /* acknowledge enable/disable packet */
/* reception */
- /* uisnic -> virtnic */
+ /* uisnic -> visornic */
NET_RCV_PROMISC, /* enable/disable promiscuous mode */
- /* virtnic -> uisnic */
- NET_CONNECT_STATUS, /* indicate the loss or restoration of a network
+ /* visornic -> uisnic */
+ NET_CONNECT_STATUS, /*
+ * indicate the loss or restoration of a network
* connection
*/
- /* uisnic -> virtnic */
- NET_MACADDR, /* indicates the client has requested to update
- * its MAC addr
+ /* uisnic -> visornic */
+ NET_MACADDR, /*
+ * Indicates the client has requested to update
+ * it's MAC address
*/
- NET_MACADDR_ACK, /* MAC address */
+ NET_MACADDR_ACK, /* MAC address acknowledge */
};
-#define ETH_MIN_DATA_SIZE 46 /* minimum eth data size */
-#define ETH_MIN_PACKET_SIZE (ETH_HLEN + ETH_MIN_DATA_SIZE)
+#define ETH_MIN_DATA_SIZE 46 /* minimum eth data size */
+#define ETH_MIN_PACKET_SIZE (ETH_HLEN + ETH_MIN_DATA_SIZE)
-#define VISOR_ETH_MAX_MTU 16384 /* maximum data size */
+#define VISOR_ETH_MAX_MTU 16384 /* maximum data size */
#ifndef MAX_MACADDR_LEN
#define MAX_MACADDR_LEN 6 /* number of bytes in MAC address */
-#endif /* MAX_MACADDR_LEN */
+#endif
-/* various types of scsi task mgmt commands */
+/* Various types of scsi task mgmt commands. */
enum task_mgmt_types {
TASK_MGMT_ABORT_TASK = 1,
TASK_MGMT_BUS_RESET,
@@ -130,7 +134,7 @@ enum task_mgmt_types {
TASK_MGMT_TARGET_RESET,
};
-/* various types of vdisk mgmt commands */
+/* Various types of vdisk mgmt commands. */
enum vdisk_mgmt_types {
VDISK_MGMT_ACQUIRE = 1,
VDISK_MGMT_RELEASE,
@@ -144,7 +148,7 @@ struct phys_info {
#define MIN_NUMSIGNALS 64
-/* structs with pragma pack */
+/* Structs with pragma pack. */
struct guest_phys_info {
u64 address;
@@ -154,9 +158,9 @@ struct guest_phys_info {
#define GPI_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct guest_phys_info))
struct uisscsi_dest {
- u32 channel; /* channel == bus number */
- u32 id; /* id == target number */
- u32 lun; /* lun == logical unit number */
+ u32 channel; /* channel == bus number */
+ u32 id; /* id == target number */
+ u32 lun; /* lun == logical unit number */
} __packed;
struct vhba_wwnn {
@@ -164,7 +168,8 @@ struct vhba_wwnn {
u32 wwnn2;
} __packed;
-/* WARNING: Values stired in this structure must contain maximum counts (not
+/*
+ * WARNING: Values stired in this structure must contain maximum counts (not
* maximum values).
*/
struct vhba_config_max {/* 20 bytes */
@@ -187,23 +192,24 @@ struct uiscmdrsp_scsi {
* information for each
* fragment
*/
- enum dma_data_direction data_dir; /* direction of the data, if any */
+ enum dma_data_direction data_dir; /* direction of the data, if any */
struct uisscsi_dest vdest; /* identifies the virtual hba, id, */
/* channel, lun to which cmd was sent */
- /* Needed to queue the rsp back to cmd originator */
- int linuxstat; /* original Linux status used by linux vdisk */
+ /* Needed to queue the rsp back to cmd originator. */
+ int linuxstat; /* original Linux status used by Linux vdisk */
u8 scsistat; /* the scsi status */
u8 addlstat; /* non-scsi status */
#define ADDL_SEL_TIMEOUT 4
- /* the following fields are need to determine the result of command */
+ /* The following fields are need to determine the result of command. */
u8 sensebuf[MAX_SENSE_SIZE]; /* sense info in case cmd failed; */
- /* it holds the sense_data struct; */
- /* see that struct for details. */
- void *vdisk; /* pointer to the vdisk to clean up when IO completes. */
+ /* sensebuf holds the sense_data struct; */
+ /* See sense_data struct for more details. */
+ void *vdisk; /* Pointer to the vdisk to clean up when IO completes. */
int no_disk_result;
- /* used to return no disk inquiry result
+ /*
+ * Used to return no disk inquiry result
* when no_disk_result is set to 1,
* scsi.scsistat is SAM_STAT_GOOD
* scsi.addlstat is 0
@@ -212,35 +218,44 @@ struct uiscmdrsp_scsi {
*/
} __packed;
-/* Defines to support sending correct inquiry result when no disk is
+/*
+ * Defines to support sending correct inquiry result when no disk is
* configured.
*/
-/* From SCSI SPC2 -
+/*
+ * From SCSI SPC2 -
*
* If the target is not capable of supporting a device on this logical unit, the
* device server shall set this field to 7Fh (PERIPHERAL QUALIFIER set to 011b
* and PERIPHERAL DEVICE TYPE set to 1Fh).
*
- *The device server is capable of supporting the specified peripheral device
- *type on this logical unit. However, the physical device is not currently
- *connected to this logical unit.
+ * The device server is capable of supporting the specified peripheral device
+ * type on this logical unit. However, the physical device is not currently
+ * connected to this logical unit.
*/
-#define DEV_NOT_CAPABLE 0x7f /* peripheral qualifier of 0x3 */
- /* peripheral type of 0x1f */
- /* specifies no device but target present */
+#define DEV_NOT_CAPABLE 0x7f /*
+ * peripheral qualifier of 0x3
+ * peripheral type of 0x1f
+ * specifies no device but target present
+ */
-#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20 /* peripheral qualifier of 0x1 */
- /* peripheral type of 0 - disk */
- /* specifies device capable, but not present */
+#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20 /* peripheral qualifier of 0x1
+ * peripheral type of 0 - disk
+ * Specifies device capable, but
+ * not present
+ */
-#define DEV_HISUPPORT 0x10 /* HiSup = 1; shows support for report luns */
- /* must be returned for lun 0. */
+#define DEV_HISUPPORT 0x10 /*
+ * HiSup = 1; shows support for report luns
+ * must be returned for lun 0.
+ */
-/* NOTE: Linux code assumes inquiry contains 36 bytes. Without checking length
- * in buf[4] some linux code accesses bytes beyond 5 to retrieve vendor, product
- * & revision. Yikes! So let us always send back 36 bytes, the minimum for
+/*
+ * NOTE: Linux code assumes inquiry contains 36 bytes. Without checking length
+ * in buf[4] some Linux code accesses bytes beyond 5 to retrieve vendor, product
+ * and revision. Yikes! So let us always send back 36 bytes, the minimum for
* inquiry result.
*/
#define NO_DISK_INQUIRY_RESULT_LEN 36
@@ -248,11 +263,12 @@ struct uiscmdrsp_scsi {
#define MIN_INQUIRY_RESULT_LEN 5 /* 5 bytes minimum for inquiry result */
/* SCSI device version for no disk inquiry result */
-#define SCSI_SPC2_VER 4 /* indicates SCSI SPC2 (SPC3 is 5) */
+#define SCSI_SPC2_VER 4 /* indicates SCSI SPC2 (SPC3 is 5) */
-/* Struct & Defines to support sense information. */
+/* Struct and Defines to support sense information. */
-/* The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is
+/*
+ * The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is
* initialized in exactly the manner that is recommended in Windows (hence the
* odd values).
* When set, these fields will have the following values:
@@ -288,7 +304,7 @@ struct net_pkt_xmt {
struct phys_info frags[MAX_PHYS_INFO]; /* physical page information */
char ethhdr[ETH_HLEN]; /* the ethernet header */
struct {
- /* these are needed for csum at uisnic end */
+ /* These are needed for csum at uisnic end */
u8 valid; /* 1 = struct is valid - else ignore */
u8 hrawoffv; /* 1 = hwrafoff is valid */
u8 nhrawoffv; /* 1 = nhwrafoff is valid */
@@ -300,7 +316,8 @@ struct net_pkt_xmt {
/* nhrawoff points to the start of the NETWORK LAYER HEADER */
} lincsum;
- /* **** NOTE ****
+ /*
+ * NOTE:
* The full packet is described in frags but the ethernet header is
* separately kept in ethhdr so that uisnic doesn't have "MAP" the
* guest memory to get to the header. uisnic needs ethhdr to
@@ -309,14 +326,15 @@ struct net_pkt_xmt {
} __packed;
struct net_pkt_xmtdone {
- u32 xmt_done_result; /* result of NET_XMIT */
+ u32 xmt_done_result; /* result of NET_XMIT */
} __packed;
-/* RCVPOST_BUF_SIZe must be at most page_size(4096) - cache_line_size (64) The
+/*
+ * RCVPOST_BUF_SIZE must be at most page_size(4096) - cache_line_size (64) The
* reason is because dev_skb_alloc which is used to generate RCV_POST skbs in
- * virtnic requires that there is "overhead" in the buffer, and pads 16 bytes. I
- * prefer to use 1 full cache line size for "overhead" so that transfers are
- * better. IOVM requires that a buffer be represented by 1 phys_info structure
+ * visornic requires that there is "overhead" in the buffer, and pads 16 bytes.
+ * Use 1 full cache line size for "overhead" so that transfers are optimized.
+ * IOVM requires that a buffer be represented by 1 phys_info structure
* which can only cover page_size.
*/
#define RCVPOST_BUF_SIZE 4032
@@ -324,26 +342,38 @@ struct net_pkt_xmtdone {
((VISOR_ETH_MAX_MTU + ETH_HLEN + RCVPOST_BUF_SIZE - 1) \
/ RCVPOST_BUF_SIZE)
+/*
+ * rcv buf size must be large enough to include ethernet data len + ethernet
+ * header len - we are choosing 2K because it is guaranteed to be describable.
+ */
struct net_pkt_rcvpost {
- /* rcv buf size must be large enough to include ethernet data len +
- * ethernet header len - we are choosing 2K because it is guaranteed
- * to be describable
- */
- struct phys_info frag; /* physical page information for the */
- /* single fragment 2K rcv buf */
- u64 unique_num;
- /* unique_num ensure that receive posts are returned to */
- /* the Adapter which we sent them originally. */
+ /* Physical page information for the single fragment 2K rcv buf */
+ struct phys_info frag;
+
+ /*
+ * Ensures that receive posts are returned to the adapter which we sent
+ * them from originally.
+ */
+ u64 unique_num;
+
} __packed;
+/*
+ * The number of rcvbuf that can be chained is based on max mtu and size of each
+ * rcvbuf.
+ */
struct net_pkt_rcv {
- /* the number of receive buffers that can be chained */
- /* is based on max mtu and size of each rcv buf */
- u32 rcv_done_len; /* length of received data */
- u8 numrcvbufs; /* number of receive buffers that contain the */
- /* incoming data; guest end MUST chain these together. */
- void *rcvbuf[MAX_NET_RCV_CHAIN]; /* list of chained rcvbufs */
- /* each entry is a receive buffer provided by NET_RCV_POST. */
+ u32 rcv_done_len; /* length of received data */
+
+ /*
+ * numrcvbufs: contain the incoming data; guest side MUST chain these
+ * together.
+ */
+ u8 numrcvbufs;
+
+ void *rcvbuf[MAX_NET_RCV_CHAIN]; /* list of chained rcvbufs */
+
+ /* Each entry is a receive buffer provided by NET_RCV_POST. */
/* NOTE: first rcvbuf in the chain will also be provided in net.buf. */
u64 unique_num;
u32 rcvs_dropped_delta;
@@ -351,12 +381,12 @@ struct net_pkt_rcv {
struct net_pkt_enbdis {
void *context;
- u16 enable; /* 1 = enable, 0 = disable */
+ u16 enable; /* 1 = enable, 0 = disable */
} __packed;
struct net_pkt_macaddr {
void *context;
- u8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */
+ u8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */
} __packed;
/* cmd rsp packet used for VNIC network traffic */
@@ -377,41 +407,44 @@ struct uiscmdrsp_net {
} __packed;
struct uiscmdrsp_scsitaskmgmt {
+ /* The type of task. */
enum task_mgmt_types tasktype;
- /* the type of task */
+ /* The vdisk for which this task mgmt is generated. */
struct uisscsi_dest vdest;
- /* the vdisk for which this task mgmt is generated */
+ /*
+ * This is a handle that the guest has saved off for its own use.
+ * The handle value is preserved by iopart and returned as in task
+ * mgmt rsp.
+ */
u64 handle;
- /* This is a handle that the guest has saved off for its own use.
- * Its value is preserved by iopart & returned as is in the task
- * mgmt rsp.
- */
+ /*
+ * For Linux guests, this is a pointer to wait_queue_head that a
+ * thread is waiting on to see if the taskmgmt command has completed.
+ * When the rsp is received by guest, the thread receiving the
+ * response uses this to notify the thread waiting for taskmgmt
+ * command completion. It's value is preserved by iopart and returned
+ * as in the task mgmt rsp.
+ */
u64 notify_handle;
- /* For linux guests, this is a pointer to wait_queue_head that a
- * thread is waiting on to see if the taskmgmt command has completed.
- * When the rsp is received by guest, the thread receiving the
- * response uses this to notify the thread waiting for taskmgmt
- * command completion. Its value is preserved by iopart & returned
- * as is in the task mgmt rsp.
- */
+ /*
+ * This is a handle to the location in the guest where the result of
+ * the taskmgmt command (result field) is saved to when the response
+ * is handled. It's value is preserved by iopart and returned as in
+ * the task mgmt rsp.
+ */
u64 notifyresult_handle;
- /* this is a handle to location in guest where the result of the
- * taskmgmt command (result field) is to saved off when the response
- * is handled. Its value is preserved by iopart & returned as is in
- * the task mgmt rsp.
- */
+ /* Result of taskmgmt command - set by IOPart - values are: */
char result;
- /* result of taskmgmt command - set by IOPart - values are: */
#define TASK_MGMT_FAILED 0
} __packed;
-/* Used by uissd to send disk add/remove notifications to Guest */
+/* Used by uissd to send disk add/remove notifications to Guest. */
/* Note that the vHba pointer is not used by the Client/Guest side. */
struct uiscmdrsp_disknotify {
u8 add; /* 0-remove, 1-add */
@@ -419,49 +452,50 @@ struct uiscmdrsp_disknotify {
u32 channel, id, lun; /* SCSI Path of Disk to added or removed */
} __packed;
-/* The following is used by virthba/vSCSI to send the Acquire/Release commands
+/*
+ * The following is used by virthba/vSCSI to send the Acquire/Release commands
* to the IOVM.
*/
struct uiscmdrsp_vdiskmgmt {
+ /* The type of task */
enum vdisk_mgmt_types vdisktype;
- /* the type of task */
+ /* The vdisk for which this task mgmt is generated */
struct uisscsi_dest vdest;
- /* the vdisk for which this task mgmt is generated */
+ /*
+ * This is a handle that the guest has saved off for its own use. It's
+ * value is preserved by iopart and returned as in the task mgmt rsp.
+ */
u64 handle;
- /* This is a handle that the guest has saved off for its own use.
- * Its value is preserved by iopart & returned as is in the task
- * mgmt rsp.
- */
+ /*
+ * For Linux guests, this is a pointer to wait_queue_head that a
+ * thread is waiting on to see if the tskmgmt command has completed.
+ * When the rsp is received by guest, the thread receiving the
+ * response uses this to notify the thread waiting for taskmgmt
+ * command completion. It's value is preserved by iopart and returned
+ * as in the task mgmt rsp.
+ */
u64 notify_handle;
- /* For linux guests, this is a pointer to wait_queue_head that a
- * thread is waiting on to see if the tskmgmt command has completed.
- * When the rsp is received by guest, the thread receiving the
- * response uses this to notify the thread waiting for taskmgmt
- * command completion. Its value is preserved by iopart & returned
- * as is in the task mgmt rsp.
- */
+ /*
+ * Handle to the location in guest where the result of the
+ * taskmgmt command (result field) is saved to when the response
+ * is handled. It's value is preserved by iopart and returned as in
+ * the task mgmt rsp.
+ */
u64 notifyresult_handle;
- /* this is a handle to location in guest where the result of the
- * taskmgmt command (result field) is to saved off when the response
- * is handled. Its value is preserved by iopart & returned as is in
- * the task mgmt rsp.
- */
+ /* Result of taskmgmt command - set by IOPart - values are: */
char result;
-
- /* result of taskmgmt command - set by IOPart - values are: */
-#define VDISK_MGMT_FAILED 0
} __packed;
-/* keeping cmd & rsp info in one structure for now cmd rsp packet for scsi */
+/* Keeping cmd and rsp info in one structure for now cmd rsp packet for SCSI */
struct uiscmdrsp {
char cmdtype;
-/* describes what type of information is in the struct */
+/* Describes what type of information is in the struct */
#define CMD_SCSI_TYPE 1
#define CMD_NET_TYPE 2
#define CMD_SCSITASKMGMT_TYPE 3
@@ -474,11 +508,11 @@ struct uiscmdrsp {
struct uiscmdrsp_disknotify disknotify;
struct uiscmdrsp_vdiskmgmt vdiskmgmt;
};
- void *private_data; /* send the response when the cmd is */
- /* done (scsi & scsittaskmgmt). */
+ /* Send the response when the cmd is done (scsi and scsittaskmgmt). */
+ void *private_data;
struct uiscmdrsp *next; /* General Purpose Queue Link */
- struct uiscmdrsp *activeQ_next; /* Used to track active commands */
- struct uiscmdrsp *activeQ_prev; /* Used to track active commands */
+ struct uiscmdrsp *activeQ_next; /* Pointer to the nextactive commands */
+ struct uiscmdrsp *activeQ_prev; /* Pointer to the prevactive commands */
} __packed;
struct iochannel_vhba {
@@ -491,7 +525,8 @@ struct iochannel_vnic {
u32 mtu; /* 4 bytes */
uuid_le zone_uuid; /* 16 bytes */
} __packed;
-/* This is just the header of the IO channel. It is assumed that directly after
+/*
+ * This is just the header of the IO channel. It is assumed that directly after
* this header there is a large region of memory which contains the command and
* response queues as specified in cmd_q and rsp_q SIGNAL_QUEUE_HEADERS.
*/
@@ -505,31 +540,19 @@ struct spar_io_channel_protocol {
} __packed;
#define MAX_CLIENTSTRING_LEN 1024
- /* client_string is NULL termimated so holds max -1 bytes */
+ /* client_string is NULL termimated so holds max-1 bytes */
u8 client_string[MAX_CLIENTSTRING_LEN];
} __packed;
-/* INLINE functions for initializing and accessing I/O data channels */
-#define SIZEOF_PROTOCOL (COVER(sizeof(struct spar_io_channel_protocol), 64))
+/* INLINE functions for initializing and accessing I/O data channels. */
#define SIZEOF_CMDRSP (COVER(sizeof(struct uiscmdrsp), 64))
-#define MIN_IO_CHANNEL_SIZE COVER(SIZEOF_PROTOCOL + \
- 2 * MIN_NUMSIGNALS * SIZEOF_CMDRSP, 4096)
-
-/*
- * INLINE function for expanding a guest's pfn-off-size into multiple 4K page
- * pfn-off-size entires.
- */
-
-/* use 4K page sizes when we it comes to passing page information between */
-/* Guest and IOPartition. */
+/* Use 4K page sizes when passing page info between Guest and IOPartition. */
#define PI_PAGE_SIZE 0x1000
#define PI_PAGE_MASK 0x0FFF
-/* returns next non-zero index on success or zero on failure (i.e. out of
- * room)
- */
-static inline u16
+/* Returns next non-zero index on success or 0 on failure (i.e. out of room). */
+static inline u16
add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
u16 max_pi_arr_entries, struct phys_info pi_arr[])
{
@@ -538,7 +561,7 @@ add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
firstlen = PI_PAGE_SIZE - inp_off;
if (inp_len <= firstlen) {
- /* the input entry spans only one page - add as is */
+ /* The input entry spans only one page - add as is. */
if (index >= max_pi_arr_entries)
return 0;
pi_arr[index].pi_pfn = inp_pfn;
@@ -547,7 +570,7 @@ add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
return index + 1;
}
- /* this entry spans multiple pages */
+ /* This entry spans multiple pages. */
for (len = inp_len, i = 0; len;
len -= pi_arr[index + i].pi_len, i++) {
if (index + i >= max_pi_arr_entries)
@@ -565,4 +588,4 @@ add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
return index + i;
}
-#endif /* __IOCHANNEL_H__ */
+#endif /* __IOCHANNEL_H__ */
diff --git a/drivers/staging/unisys/include/visorbus.h b/drivers/staging/unisys/include/visorbus.h
index 677627c72c4c..03d56f818a86 100644
--- a/drivers/staging/unisys/include/visorbus.h
+++ b/drivers/staging/unisys/include/visorbus.h
@@ -166,6 +166,8 @@ struct visor_device {
struct controlvm_message_header *pending_msg_hdr;
void *vbus_hdr_info;
uuid_le partition_uuid;
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_client_bus_info;
};
#define to_visor_device(x) container_of(x, struct visor_device, device)
diff --git a/drivers/staging/unisys/visorbus/vbuschannel.h b/drivers/staging/unisys/visorbus/vbuschannel.h
index e97917522f6a..b0df26155d02 100644
--- a/drivers/staging/unisys/visorbus/vbuschannel.h
+++ b/drivers/staging/unisys/visorbus/vbuschannel.h
@@ -23,6 +23,7 @@
* the client devices and client drivers for the server end to see.
*/
#include <linux/uuid.h>
+#include <linux/ctype.h>
#include "channel.h"
/* {193b331b-c58f-11da-95a9-00e08161165f} */
@@ -50,12 +51,6 @@ static const uuid_le spar_vbus_channel_protocol_uuid =
SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID, \
SPAR_VBUS_CHANNEL_PROTOCOL_SIGNATURE)
-#define SPAR_VBUS_CHANNEL_OK_SERVER(actual_bytes) \
- (spar_check_channel_server(spar_vbus_channel_protocol_uuid, \
- "vbus", \
- sizeof(struct spar_vbus_channel_protocol),\
- actual_bytes))
-
#pragma pack(push, 1) /* both GCC and VC now allow this pragma */
/*
@@ -72,199 +67,38 @@ struct ultra_vbus_deviceinfo {
};
/**
- * vbuschannel_sanitize_buffer() - remove non-printable chars from buffer
- * @p: destination buffer where chars are written to
- * @remain: number of bytes that can be written starting at #p
- * @src: pointer to source buffer
- * @srcmax: number of valid characters at #src
- *
- * Reads chars from the buffer at @src for @srcmax bytes, and writes to
- * the buffer at @p, which is @remain bytes long, ensuring never to
- * overflow the buffer at @p, using the following rules:
- * - printable characters are simply copied from the buffer at @src to the
- * buffer at @p
- * - intervening streaks of non-printable characters in the buffer at @src
- * are replaced with a single space in the buffer at @p
- * Note that we pay no attention to '\0'-termination.
- *
- * Pass @p == NULL and @remain == 0 for this special behavior -- In this
- * case, we simply return the number of bytes that WOULD HAVE been written
- * to a buffer at @p, had it been infinitely big.
- *
- * Return: the number of bytes written to @p (or WOULD HAVE been written to
- * @p, as described in the previous paragraph)
- */
-static inline int
-vbuschannel_sanitize_buffer(char *p, int remain, char *src, int srcmax)
-{
- int chars = 0;
- int nonprintable_streak = 0;
-
- while (srcmax > 0) {
- if ((*src >= ' ') && (*src < 0x7f)) {
- if (nonprintable_streak) {
- if (remain > 0) {
- *p = ' ';
- p++;
- remain--;
- chars++;
- } else if (!p) {
- chars++;
- }
- nonprintable_streak = 0;
- }
- if (remain > 0) {
- *p = *src;
- p++;
- remain--;
- chars++;
- } else if (!p) {
- chars++;
- }
- } else {
- nonprintable_streak = 1;
- }
- src++;
- srcmax--;
- }
- return chars;
-}
-
-#define VBUSCHANNEL_ADDACHAR(ch, p, remain, chars) \
- do { \
- if (remain <= 0) \
- break; \
- *p = ch; \
- p++; chars++; remain--; \
- } while (0)
-
-/**
- * vbuschannel_itoa() - convert non-negative int to string
- * @p: destination string
- * @remain: max number of bytes that can be written to @p
- * @num: input int to convert
- *
- * Converts the non-negative value at @num to an ascii decimal string
- * at @p, writing at most @remain bytes. Note there is NO '\0' termination
- * written to @p.
- *
- * Return: number of bytes written to @p
- *
- */
-static inline int
-vbuschannel_itoa(char *p, int remain, int num)
-{
- int digits = 0;
- char s[32];
- int i;
-
- if (num == 0) {
- /* '0' is a special case */
- if (remain <= 0)
- return 0;
- *p = '0';
- return 1;
- }
- /* form a backwards decimal ascii string in <s> */
- while (num > 0) {
- if (digits >= (int)sizeof(s))
- return 0;
- s[digits++] = (num % 10) + '0';
- num = num / 10;
- }
- if (remain < digits) {
- /* not enough room left at <p> to hold number, so fill with
- * '?'
- */
- for (i = 0; i < remain; i++, p++)
- *p = '?';
- return remain;
- }
- /* plug in the decimal ascii string representing the number, by */
- /* reversing the string we just built in <s> */
- i = digits;
- while (i > 0) {
- i--;
- *p = s[i];
- p++;
- }
- return digits;
-}
-
-/**
- * vbuschannel_devinfo_to_string() - format a struct ultra_vbus_deviceinfo
- * to a printable string
+ * vbuschannel_print_devinfo() - format a struct ultra_vbus_deviceinfo
+ * and write it to a seq_file
* @devinfo: the struct ultra_vbus_deviceinfo to format
- * @p: destination string area
- * @remain: size of destination string area in bytes
+ * @seq: seq_file to write to
* @devix: the device index to be included in the output data, or -1 if no
* device index is to be included
*
- * Reads @devInfo, and converts its contents to a printable string at @p,
- * writing at most @remain bytes. Note there is NO '\0' termination
- * written to @p.
- *
- * Return: number of bytes written to @p
+ * Reads @devInfo, and writes it in human-readable notation to @seq.
*/
-static inline int
-vbuschannel_devinfo_to_string(struct ultra_vbus_deviceinfo *devinfo,
- char *p, int remain, int devix)
+static inline void
+vbuschannel_print_devinfo(struct ultra_vbus_deviceinfo *devinfo,
+ struct seq_file *seq, int devix)
{
- char *psrc;
- int nsrc, x, i, pad;
- int chars = 0;
-
- psrc = &devinfo->devtype[0];
- nsrc = sizeof(devinfo->devtype);
- if (vbuschannel_sanitize_buffer(NULL, 0, psrc, nsrc) <= 0)
- return 0;
-
- /* emit device index */
- if (devix >= 0) {
- VBUSCHANNEL_ADDACHAR('[', p, remain, chars);
- x = vbuschannel_itoa(p, remain, devix);
- p += x;
- remain -= x;
- chars += x;
- VBUSCHANNEL_ADDACHAR(']', p, remain, chars);
- } else {
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
- }
-
- /* emit device type */
- x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
- p += x;
- remain -= x;
- chars += x;
- pad = 15 - x; /* pad device type to be exactly 15 chars */
- for (i = 0; i < pad; i++)
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
-
- /* emit driver name */
- psrc = &devinfo->drvname[0];
- nsrc = sizeof(devinfo->drvname);
- x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
- p += x;
- remain -= x;
- chars += x;
- pad = 15 - x; /* pad driver name to be exactly 15 chars */
- for (i = 0; i < pad; i++)
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
-
- /* emit strings */
- psrc = &devinfo->infostrs[0];
- nsrc = sizeof(devinfo->infostrs);
- x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
- p += x;
- remain -= x;
- chars += x;
- VBUSCHANNEL_ADDACHAR('\n', p, remain, chars);
-
- return chars;
+ if (!isprint(devinfo->devtype[0]))
+ return; /* uninitialized vbus device entry */
+
+ if (devix >= 0)
+ seq_printf(seq, "[%d]", devix);
+ else
+ /* vbus device entry is for bus or chipset */
+ seq_puts(seq, " ");
+
+ /*
+ * Note: because the s-Par back-end is free to scribble in this area,
+ * we never assume '\0'-termination.
+ */
+ seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->devtype),
+ (int)sizeof(devinfo->devtype), devinfo->devtype);
+ seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->drvname),
+ (int)sizeof(devinfo->drvname), devinfo->drvname);
+ seq_printf(seq, "%.*s\n", (int)sizeof(devinfo->infostrs),
+ devinfo->infostrs);
}
struct spar_vbus_headerinfo {
@@ -293,11 +127,6 @@ struct spar_vbus_channel_protocol {
/* describes client device and driver for each device on the bus */
};
-#define VBUS_CH_SIZE_EXACT(MAXDEVICES) \
- (sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL) + ((MAXDEVICES) * \
- sizeof(ULTRA_VBUS_DEVICEINFO)))
-#define VBUS_CH_SIZE(MAXDEVICES) COVER(VBUS_CH_SIZE_EXACT(MAXDEVICES), 4096)
-
#pragma pack(pop)
#endif
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index fec0a54916fe..3457ef338e1e 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -14,6 +14,7 @@
* details.
*/
+#include <linux/debugfs.h>
#include <linux/uuid.h>
#include "visorbus.h"
@@ -33,6 +34,7 @@ static int visorbus_forcenomatch;
#define POLLJIFFIES_NORMALCHANNEL 10
static int busreg_rc = -ENODEV; /* stores the result from bus registration */
+static struct dentry *visorbus_debugfs_dir;
/*
* DEVICE type attributes
@@ -151,6 +153,8 @@ visorbus_release_busdevice(struct device *xdev)
{
struct visor_device *dev = dev_get_drvdata(xdev);
+ debugfs_remove(dev->debugfs_client_bus_info);
+ debugfs_remove_recursive(dev->debugfs_dir);
kfree(dev);
}
@@ -186,6 +190,7 @@ static ssize_t physaddr_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
visorchannel_get_physaddr(vdev->visorchannel));
}
+static DEVICE_ATTR_RO(physaddr);
static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -197,6 +202,7 @@ static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "0x%lx\n",
visorchannel_get_nbytes(vdev->visorchannel));
}
+static DEVICE_ATTR_RO(nbytes);
static ssize_t clientpartition_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -208,6 +214,7 @@ static ssize_t clientpartition_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
visorchannel_get_clientpartition(vdev->visorchannel));
}
+static DEVICE_ATTR_RO(clientpartition);
static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -220,6 +227,7 @@ static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n",
visorchannel_id(vdev->visorchannel, typeid));
}
+static DEVICE_ATTR_RO(typeguid);
static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -232,6 +240,7 @@ static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n",
visorchannel_zoneid(vdev->visorchannel, zoneid));
}
+static DEVICE_ATTR_RO(zoneguid);
static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -250,12 +259,6 @@ static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
drv = to_visor_driver(xdrv);
return snprintf(buf, PAGE_SIZE, "%s\n", drv->channel_types[i - 1].name);
}
-
-static DEVICE_ATTR_RO(physaddr);
-static DEVICE_ATTR_RO(nbytes);
-static DEVICE_ATTR_RO(clientpartition);
-static DEVICE_ATTR_RO(typeguid);
-static DEVICE_ATTR_RO(zoneguid);
static DEVICE_ATTR_RO(typename);
static struct attribute *channel_attrs[] = {
@@ -295,6 +298,7 @@ static ssize_t partition_handle_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "0x%llx\n", handle);
}
+static DEVICE_ATTR_RO(partition_handle);
static ssize_t partition_guid_show(struct device *dev,
struct device_attribute *attr,
@@ -303,6 +307,7 @@ static ssize_t partition_guid_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "{%pUb}\n", &vdev->partition_uuid);
}
+static DEVICE_ATTR_RO(partition_guid);
static ssize_t partition_name_show(struct device *dev,
struct device_attribute *attr,
@@ -311,6 +316,7 @@ static ssize_t partition_name_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%s\n", vdev->name);
}
+static DEVICE_ATTR_RO(partition_name);
static ssize_t channel_addr_show(struct device *dev,
struct device_attribute *attr,
@@ -320,6 +326,7 @@ static ssize_t channel_addr_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "0x%llx\n", addr);
}
+static DEVICE_ATTR_RO(channel_addr);
static ssize_t channel_bytes_show(struct device *dev,
struct device_attribute *attr,
@@ -329,6 +336,7 @@ static ssize_t channel_bytes_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "0x%llx\n", nbytes);
}
+static DEVICE_ATTR_RO(channel_bytes);
static ssize_t channel_id_show(struct device *dev,
struct device_attribute *attr,
@@ -343,77 +351,7 @@ static ssize_t channel_id_show(struct device *dev,
}
return len;
}
-
-static ssize_t client_bus_info_show(struct device *dev,
- struct device_attribute *attr,
- char *buf) {
- struct visor_device *vdev = to_visor_device(dev);
- struct visorchannel *channel = vdev->visorchannel;
-
- int i, shift, remain = PAGE_SIZE;
- unsigned long off;
- char *pos = buf;
- u8 *partition_name;
- struct ultra_vbus_deviceinfo dev_info;
-
- partition_name = "";
- if (channel) {
- if (vdev->name)
- partition_name = vdev->name;
- shift = snprintf(pos, remain,
- "Client device / client driver info for %s partition (vbus #%u):\n",
- partition_name, vdev->chipset_bus_no);
- pos += shift;
- remain -= shift;
- shift = visorchannel_read(channel,
- offsetof(struct
- spar_vbus_channel_protocol,
- chp_info),
- &dev_info, sizeof(dev_info));
- if (shift >= 0) {
- shift = vbuschannel_devinfo_to_string(&dev_info, pos,
- remain, -1);
- pos += shift;
- remain -= shift;
- }
- shift = visorchannel_read(channel,
- offsetof(struct
- spar_vbus_channel_protocol,
- bus_info),
- &dev_info, sizeof(dev_info));
- if (shift >= 0) {
- shift = vbuschannel_devinfo_to_string(&dev_info, pos,
- remain, -1);
- pos += shift;
- remain -= shift;
- }
- off = offsetof(struct spar_vbus_channel_protocol, dev_info);
- i = 0;
- while (off + sizeof(dev_info) <=
- visorchannel_get_nbytes(channel)) {
- shift = visorchannel_read(channel,
- off, &dev_info,
- sizeof(dev_info));
- if (shift >= 0) {
- shift = vbuschannel_devinfo_to_string
- (&dev_info, pos, remain, i);
- pos += shift;
- remain -= shift;
- }
- off += sizeof(dev_info);
- i++;
- }
- }
- return PAGE_SIZE - remain;
-}
-
-static DEVICE_ATTR_RO(partition_handle);
-static DEVICE_ATTR_RO(partition_guid);
-static DEVICE_ATTR_RO(partition_name);
-static DEVICE_ATTR_RO(channel_addr);
-static DEVICE_ATTR_RO(channel_bytes);
static DEVICE_ATTR_RO(channel_id);
-static DEVICE_ATTR_RO(client_bus_info);
static struct attribute *dev_attrs[] = {
&dev_attr_partition_handle.attr,
@@ -422,7 +360,6 @@ static struct attribute *dev_attrs[] = {
&dev_attr_channel_addr.attr,
&dev_attr_channel_bytes.attr,
&dev_attr_channel_id.attr,
- &dev_attr_client_bus_info.attr,
NULL
};
@@ -435,6 +372,66 @@ static const struct attribute_group *visorbus_groups[] = {
NULL
};
+/*
+ * BUS debugfs entries
+ *
+ * define & implement display of debugfs attributes under
+ * /sys/kernel/debug/visorbus/visorbus<n>.
+ */
+
+static int client_bus_info_debugfs_show(struct seq_file *seq, void *v)
+{
+ struct visor_device *vdev = seq->private;
+ struct visorchannel *channel = vdev->visorchannel;
+
+ int i;
+ unsigned long off;
+ struct ultra_vbus_deviceinfo dev_info;
+
+ if (!channel)
+ return 0;
+
+ seq_printf(seq,
+ "Client device / client driver info for %s partition (vbus #%u):\n",
+ ((vdev->name) ? (char *)(vdev->name) : ""),
+ vdev->chipset_bus_no);
+ if (visorchannel_read(channel,
+ offsetof(struct spar_vbus_channel_protocol,
+ chp_info),
+ &dev_info, sizeof(dev_info)) >= 0)
+ vbuschannel_print_devinfo(&dev_info, seq, -1);
+ if (visorchannel_read(channel,
+ offsetof(struct spar_vbus_channel_protocol,
+ bus_info),
+ &dev_info, sizeof(dev_info)) >= 0)
+ vbuschannel_print_devinfo(&dev_info, seq, -1);
+ off = offsetof(struct spar_vbus_channel_protocol, dev_info);
+ i = 0;
+ while (off + sizeof(dev_info) <= visorchannel_get_nbytes(channel)) {
+ if (visorchannel_read(channel, off, &dev_info,
+ sizeof(dev_info)) >= 0)
+ vbuschannel_print_devinfo(&dev_info, seq, i);
+ off += sizeof(dev_info);
+ i++;
+ }
+
+ return 0;
+}
+
+static int client_bus_info_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, client_bus_info_debugfs_show,
+ inode->i_private);
+}
+
+static const struct file_operations client_bus_info_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = client_bus_info_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static void
dev_periodic_work(unsigned long __opaque)
{
@@ -610,8 +607,8 @@ create_visor_device(struct visor_device *dev)
u32 chipset_bus_no = dev->chipset_bus_no;
u32 chipset_dev_no = dev->chipset_dev_no;
- POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, chipset_dev_no, chipset_bus_no,
- POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, chipset_dev_no, chipset_bus_no,
+ DIAG_SEVERITY_PRINT);
mutex_init(&dev->visordriver_callback_lock);
dev->device.bus = &visorbus_type;
@@ -651,8 +648,8 @@ create_visor_device(struct visor_device *dev)
*/
err = device_add(&dev->device);
if (err < 0) {
- POSTCODE_LINUX_3(DEVICE_ADD_PC, chipset_bus_no,
- DIAG_SEVERITY_ERR);
+ POSTCODE_LINUX(DEVICE_ADD_PC, 0, chipset_bus_no,
+ DIAG_SEVERITY_ERR);
goto err_put;
}
@@ -966,9 +963,10 @@ static int
create_bus_instance(struct visor_device *dev)
{
int id = dev->chipset_bus_no;
+ int err;
struct spar_vbus_headerinfo *hdr_info;
- POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
hdr_info = kzalloc(sizeof(*hdr_info), GFP_KERNEL);
if (!hdr_info)
@@ -979,11 +977,26 @@ create_bus_instance(struct visor_device *dev)
dev->device.groups = visorbus_groups;
dev->device.release = visorbus_release_busdevice;
+ dev->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
+ visorbus_debugfs_dir);
+ if (!dev->debugfs_dir) {
+ err = -ENOMEM;
+ goto err_hdr_info;
+ }
+ dev->debugfs_client_bus_info =
+ debugfs_create_file("client_bus_info", S_IRUSR | S_IRGRP,
+ dev->debugfs_dir, dev,
+ &client_bus_info_debugfs_fops);
+ if (!dev->debugfs_client_bus_info) {
+ err = -ENOMEM;
+ goto err_debugfs_dir;
+ }
+
if (device_register(&dev->device) < 0) {
- POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, id,
- POSTCODE_SEVERITY_ERR);
- kfree(hdr_info);
- return -ENODEV;
+ POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, 0, id,
+ DIAG_SEVERITY_ERR);
+ err = -ENODEV;
+ goto err_debugfs_created;
}
if (get_vbus_header_info(dev->visorchannel, hdr_info) >= 0) {
@@ -998,6 +1011,16 @@ create_bus_instance(struct visor_device *dev)
list_add_tail(&dev->list_all, &list_all_bus_instances);
dev_set_drvdata(&dev->device, dev);
return 0;
+
+err_debugfs_created:
+ debugfs_remove(dev->debugfs_client_bus_info);
+
+err_debugfs_dir:
+ debugfs_remove_recursive(dev->debugfs_dir);
+
+err_hdr_info:
+ kfree(hdr_info);
+ return err;
}
/**
@@ -1069,16 +1092,16 @@ chipset_bus_create(struct visor_device *dev)
int rc;
u32 bus_no = dev->chipset_bus_no;
- POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
rc = create_bus_instance(dev);
- POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
if (rc < 0)
- POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
+ DIAG_SEVERITY_ERR);
else
- POSTCODE_LINUX_3(CHIPSET_INIT_SUCCESS_PC, bus_no,
- POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, bus_no,
+ DIAG_SEVERITY_PRINT);
bus_create_response(dev, rc);
}
@@ -1097,18 +1120,18 @@ chipset_device_create(struct visor_device *dev_info)
u32 bus_no = dev_info->chipset_bus_no;
u32 dev_no = dev_info->chipset_dev_no;
- POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
+ DIAG_SEVERITY_PRINT);
rc = create_visor_device(dev_info);
device_create_response(dev_info, rc);
if (rc < 0)
- POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ DIAG_SEVERITY_ERR);
else
- POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(DEVICE_CREATE_SUCCESS_PC, dev_no, bus_no,
+ DIAG_SEVERITY_PRINT);
}
void
@@ -1274,12 +1297,17 @@ visorbus_init(void)
{
int err;
- POSTCODE_LINUX_3(DRIVER_ENTRY_PC, 0, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(DRIVER_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
+
+ visorbus_debugfs_dir = debugfs_create_dir("visorbus", NULL);
+ if (!visorbus_debugfs_dir)
+ return -ENOMEM;
+
bus_device_info_init(&clientbus_driverinfo, "clientbus", "visorbus");
err = create_bus_type();
if (err < 0) {
- POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, DIAG_SEVERITY_ERR);
+ POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, 0, DIAG_SEVERITY_ERR);
goto error;
}
@@ -1288,7 +1316,7 @@ visorbus_init(void)
return 0;
error:
- POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR);
return err;
}
@@ -1306,6 +1334,7 @@ visorbus_exit(void)
remove_bus_instance(dev);
}
remove_bus_type();
+ debugfs_remove_recursive(visorbus_debugfs_dir);
}
module_param_named(forcematch, visorbus_forcematch, int, S_IRUGO);
diff --git a/drivers/staging/unisys/visorbus/visorbus_private.h b/drivers/staging/unisys/visorbus/visorbus_private.h
index 15403fb52847..49bec1763e33 100644
--- a/drivers/staging/unisys/visorbus/visorbus_private.h
+++ b/drivers/staging/unisys/visorbus/visorbus_private.h
@@ -70,9 +70,9 @@ struct visorchannel *visorchannel_create_with_lock(u64 physaddr,
gfp_t gfp, uuid_le guid);
void visorchannel_destroy(struct visorchannel *channel);
int visorchannel_read(struct visorchannel *channel, ulong offset,
- void *local, ulong nbytes);
+ void *dest, ulong nbytes);
int visorchannel_write(struct visorchannel *channel, ulong offset,
- void *local, ulong nbytes);
+ void *dest, ulong nbytes);
u64 visorchannel_get_physaddr(struct visorchannel *channel);
ulong visorchannel_get_nbytes(struct visorchannel *channel);
char *visorchannel_id(struct visorchannel *channel, char *s);
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index 300a65dc5c6c..f51a7258bef0 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -23,6 +23,7 @@
#include <linux/io.h>
#include "visorbus.h"
+#include "visorbus_private.h"
#include "controlvmchannel.h"
#define MYDRVNAME "visorchannel"
@@ -127,19 +128,19 @@ EXPORT_SYMBOL_GPL(visorchannel_get_uuid);
int
visorchannel_read(struct visorchannel *channel, ulong offset,
- void *local, ulong nbytes)
+ void *dest, ulong nbytes)
{
if (offset + nbytes > channel->nbytes)
return -EIO;
- memcpy(local, channel->mapped + offset, nbytes);
+ memcpy(dest, channel->mapped + offset, nbytes);
return 0;
}
int
visorchannel_write(struct visorchannel *channel, ulong offset,
- void *local, ulong nbytes)
+ void *dest, ulong nbytes)
{
size_t chdr_size = sizeof(struct channel_header);
size_t copy_size;
@@ -150,10 +151,10 @@ visorchannel_write(struct visorchannel *channel, ulong offset,
if (offset < chdr_size) {
copy_size = min(chdr_size - offset, nbytes);
memcpy(((char *)(&channel->chan_hdr)) + offset,
- local, copy_size);
+ dest, copy_size);
}
- memcpy(channel->mapped + offset, local, nbytes);
+ memcpy(channel->mapped + offset, dest, nbytes);
return 0;
}
@@ -236,8 +237,9 @@ signalremove_inner(struct visorchannel *channel, u32 queue, void *msg)
if (error)
return error;
+ /* No signals to remove; have caller try again. */
if (sig_hdr.head == sig_hdr.tail)
- return -EIO; /* no signals to remove */
+ return -EAGAIN;
sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots;
@@ -299,22 +301,30 @@ EXPORT_SYMBOL_GPL(visorchannel_signalremove);
* Return: boolean indicating whether any messages in the designated
* channel/queue are present
*/
+
+static bool
+queue_empty(struct visorchannel *channel, u32 queue)
+{
+ struct signal_queue_header sig_hdr;
+
+ if (sig_read_header(channel, queue, &sig_hdr))
+ return true;
+
+ return (sig_hdr.head == sig_hdr.tail);
+}
+
bool
visorchannel_signalempty(struct visorchannel *channel, u32 queue)
{
- unsigned long flags = 0;
- struct signal_queue_header sig_hdr;
- bool rc = false;
+ bool rc;
+ unsigned long flags;
- if (channel->needs_lock)
- spin_lock_irqsave(&channel->remove_lock, flags);
+ if (!channel->needs_lock)
+ return queue_empty(channel, queue);
- if (sig_read_header(channel, queue, &sig_hdr))
- rc = true;
- if (sig_hdr.head == sig_hdr.tail)
- rc = true;
- if (channel->needs_lock)
- spin_unlock_irqrestore(&channel->remove_lock, flags);
+ spin_lock_irqsave(&channel->remove_lock, flags);
+ rc = queue_empty(channel, queue);
+ spin_unlock_irqrestore(&channel->remove_lock, flags);
return rc;
}
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 59871495ea85..d7148c351d3f 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -29,7 +29,7 @@
#include "visorbus_private.h"
#include "vmcallinterface.h"
-#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
+#define CURRENT_FILE_PC VISOR_BUS_PC_visorchipset_c
#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
@@ -57,7 +57,6 @@ visorchipset_open(struct inode *inode, struct file *file)
if (minor_number)
return -ENODEV;
- file->private_data = NULL;
return 0;
}
@@ -499,7 +498,7 @@ controlvm_init_response(struct controlvm_message *msg,
}
}
-static void
+static int
controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
int response,
enum ultra_chipset_feature features)
@@ -508,34 +507,33 @@ controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
controlvm_init_response(&outmsg, msg_hdr, response);
outmsg.cmd.init_chipset.features = features;
- if (visorchannel_signalinsert(controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg)) {
- return;
- }
+ return visorchannel_signalinsert(controlvm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg);
}
-static void
+static int
chipset_init(struct controlvm_message *inmsg)
{
static int chipset_inited;
enum ultra_chipset_feature features = 0;
int rc = CONTROLVM_RESP_SUCCESS;
+ int res = 0;
- POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(CHIPSET_INIT_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
if (chipset_inited) {
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ res = -EIO;
goto out_respond;
}
chipset_inited = 1;
- POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(CHIPSET_INIT_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
/*
* Set features to indicate we support parahotplug (if Command
* also supports it).
*/
- features =
- inmsg->cmd.init_chipset.
- features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
+ features = inmsg->cmd.init_chipset.features &
+ ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
/*
* Set the "reply" bit so Command knows this is a
@@ -545,25 +543,25 @@ chipset_init(struct controlvm_message *inmsg)
out_respond:
if (inmsg->hdr.flags.response_expected)
- controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
+ res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
+
+ return res;
}
-static void
+static int
controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
{
struct controlvm_message outmsg;
controlvm_init_response(&outmsg, msg_hdr, response);
if (outmsg.hdr.flags.test_message == 1)
- return;
+ return -EINVAL;
- if (visorchannel_signalinsert(controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg)) {
- return;
- }
+ return visorchannel_signalinsert(controlvm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg);
}
-static void controlvm_respond_physdev_changestate(
+static int controlvm_respond_physdev_changestate(
struct controlvm_message_header *msg_hdr, int response,
struct spar_segment_state state)
{
@@ -572,10 +570,8 @@ static void controlvm_respond_physdev_changestate(
controlvm_init_response(&outmsg, msg_hdr, response);
outmsg.cmd.device_change_state.state = state;
outmsg.cmd.device_change_state.flags.phys_device = 1;
- if (visorchannel_signalinsert(controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg)) {
- return;
- }
+ return visorchannel_signalinsert(controlvm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg);
}
enum crash_obj_type {
@@ -583,74 +579,80 @@ enum crash_obj_type {
CRASH_BUS,
};
-static void
+static int
save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
{
u32 local_crash_msg_offset;
u16 local_crash_msg_count;
+ int err;
- if (visorchannel_read(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- saved_crash_message_count),
- &local_crash_msg_count, sizeof(u16)) < 0) {
- POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
- return;
+ err = visorchannel_read(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ saved_crash_message_count),
+ &local_crash_msg_count, sizeof(u16));
+ if (err) {
+ POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
+ return err;
}
if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
- POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
- local_crash_msg_count,
- POSTCODE_SEVERITY_ERR);
- return;
+ POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
+ local_crash_msg_count,
+ DIAG_SEVERITY_ERR);
+ return -EIO;
}
- if (visorchannel_read(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- saved_crash_message_offset),
- &local_crash_msg_offset, sizeof(u32)) < 0) {
- POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
- return;
+ err = visorchannel_read(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ saved_crash_message_offset),
+ &local_crash_msg_offset, sizeof(u32));
+ if (err) {
+ POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
+ return err;
}
if (typ == CRASH_BUS) {
- if (visorchannel_write(controlvm_channel,
- local_crash_msg_offset,
- msg,
- sizeof(struct controlvm_message)) < 0) {
- POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
- return;
+ err = visorchannel_write(controlvm_channel,
+ local_crash_msg_offset,
+ msg,
+ sizeof(struct controlvm_message));
+ if (err) {
+ POSTCODE_LINUX(SAVE_MSG_BUS_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
+ return err;
}
} else {
local_crash_msg_offset += sizeof(struct controlvm_message);
- if (visorchannel_write(controlvm_channel,
- local_crash_msg_offset,
- msg,
- sizeof(struct controlvm_message)) < 0) {
- POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
- return;
+ err = visorchannel_write(controlvm_channel,
+ local_crash_msg_offset,
+ msg,
+ sizeof(struct controlvm_message));
+ if (err) {
+ POSTCODE_LINUX(SAVE_MSG_DEV_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
+ return err;
}
}
+ return 0;
}
-static void
+static int
bus_responder(enum controlvm_id cmd_id,
struct controlvm_message_header *pending_msg_hdr,
int response)
{
if (!pending_msg_hdr)
- return; /* no controlvm response needed */
+ return -EIO;
if (pending_msg_hdr->id != (u32)cmd_id)
- return;
+ return -EINVAL;
- controlvm_respond(pending_msg_hdr, response);
+ return controlvm_respond(pending_msg_hdr, response);
}
-static void
+static int
device_changestate_responder(enum controlvm_id cmd_id,
struct visor_device *p, int response,
struct spar_segment_state response_state)
@@ -660,9 +662,9 @@ device_changestate_responder(enum controlvm_id cmd_id,
u32 dev_no = p->chipset_dev_no;
if (!p->pending_msg_hdr)
- return; /* no controlvm response needed */
+ return -EIO;
if (p->pending_msg_hdr->id != cmd_id)
- return;
+ return -EINVAL;
controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
@@ -670,175 +672,74 @@ device_changestate_responder(enum controlvm_id cmd_id,
outmsg.cmd.device_change_state.dev_no = dev_no;
outmsg.cmd.device_change_state.state = response_state;
- if (visorchannel_signalinsert(controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg))
- return;
+ return visorchannel_signalinsert(controlvm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg);
}
-static void
+static int
device_responder(enum controlvm_id cmd_id,
struct controlvm_message_header *pending_msg_hdr,
int response)
{
if (!pending_msg_hdr)
- return; /* no controlvm response needed */
+ return -EIO;
if (pending_msg_hdr->id != (u32)cmd_id)
- return;
-
- controlvm_respond(pending_msg_hdr, response);
-}
-
-static void
-bus_epilog(struct visor_device *bus_info,
- u32 cmd, struct controlvm_message_header *msg_hdr,
- int response, bool need_response)
-{
- struct controlvm_message_header *pmsg_hdr = NULL;
-
- if (!bus_info) {
- /*
- * relying on a valid passed in response code
- * be lazy and re-use msg_hdr for this failure, is this ok??
- */
- pmsg_hdr = msg_hdr;
- goto out_respond;
- }
-
- if (bus_info->pending_msg_hdr) {
- /* only non-NULL if dev is still waiting on a response */
- response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
- pmsg_hdr = bus_info->pending_msg_hdr;
- goto out_respond;
- }
-
- if (need_response) {
- pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
- if (!pmsg_hdr) {
- POSTCODE_LINUX_4(MALLOC_FAILURE_PC, cmd,
- bus_info->chipset_bus_no,
- POSTCODE_SEVERITY_ERR);
- return;
- }
-
- memcpy(pmsg_hdr, msg_hdr,
- sizeof(struct controlvm_message_header));
- bus_info->pending_msg_hdr = pmsg_hdr;
- }
-
- if (response == CONTROLVM_RESP_SUCCESS) {
- switch (cmd) {
- case CONTROLVM_BUS_CREATE:
- chipset_bus_create(bus_info);
- break;
- case CONTROLVM_BUS_DESTROY:
- chipset_bus_destroy(bus_info);
- break;
- }
- }
-
-out_respond:
- bus_responder(cmd, pmsg_hdr, response);
-}
-
-static void
-device_epilog(struct visor_device *dev_info,
- struct spar_segment_state state, u32 cmd,
- struct controlvm_message_header *msg_hdr, int response,
- bool need_response, bool for_visorbus)
-{
- struct controlvm_message_header *pmsg_hdr = NULL;
-
- if (!dev_info) {
- /*
- * relying on a valid passed in response code
- * be lazy and re-use msg_hdr for this failure, is this ok??
- */
- pmsg_hdr = msg_hdr;
- goto out_respond;
- }
-
- if (dev_info->pending_msg_hdr) {
- /* only non-NULL if dev is still waiting on a response */
- response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
- pmsg_hdr = dev_info->pending_msg_hdr;
- goto out_respond;
- }
-
- if (need_response) {
- pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
- if (!pmsg_hdr) {
- response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- goto out_respond;
- }
-
- memcpy(pmsg_hdr, msg_hdr,
- sizeof(struct controlvm_message_header));
- dev_info->pending_msg_hdr = pmsg_hdr;
- }
-
- if (response >= 0) {
- switch (cmd) {
- case CONTROLVM_DEVICE_CREATE:
- chipset_device_create(dev_info);
- break;
- case CONTROLVM_DEVICE_CHANGESTATE:
- /* ServerReady / ServerRunning / SegmentStateRunning */
- if (state.alive == segment_state_running.alive &&
- state.operating ==
- segment_state_running.operating) {
- chipset_device_resume(dev_info);
- }
- /* ServerNotReady / ServerLost / SegmentStateStandby */
- else if (state.alive == segment_state_standby.alive &&
- state.operating ==
- segment_state_standby.operating) {
- /*
- * technically this is standby case
- * where server is lost
- */
- chipset_device_pause(dev_info);
- }
- break;
- case CONTROLVM_DEVICE_DESTROY:
- chipset_device_destroy(dev_info);
- break;
- }
- }
+ return -EINVAL;
-out_respond:
- device_responder(cmd, pmsg_hdr, response);
+ return controlvm_respond(pending_msg_hdr, response);
}
-static void
+static int
bus_create(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
+ struct controlvm_message_header *pmsg_hdr = NULL;
u32 bus_no = cmd->create_bus.bus_no;
- int rc = CONTROLVM_RESP_SUCCESS;
struct visor_device *bus_info;
struct visorchannel *visorchannel;
+ int err;
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (bus_info && (bus_info->state.created == 1)) {
- POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
- POSTCODE_SEVERITY_ERR);
- rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
- goto out_bus_epilog;
+ POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
+ DIAG_SEVERITY_ERR);
+ err = -EEXIST;
+ goto err_respond;
}
+
bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
if (!bus_info) {
- POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
- POSTCODE_SEVERITY_ERR);
- rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- goto out_bus_epilog;
+ POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
+ DIAG_SEVERITY_ERR);
+ err = -ENOMEM;
+ goto err_respond;
}
INIT_LIST_HEAD(&bus_info->list_all);
bus_info->chipset_bus_no = bus_no;
bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
- POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
+
+ if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0)
+ save_crash_message(inmsg, CRASH_BUS);
+
+ if (inmsg->hdr.flags.response_expected == 1) {
+ pmsg_hdr = kzalloc(sizeof(*pmsg_hdr),
+ GFP_KERNEL);
+ if (!pmsg_hdr) {
+ POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
+ bus_info->chipset_bus_no,
+ DIAG_SEVERITY_ERR);
+ err = -ENOMEM;
+ goto err_free_bus_info;
+ }
+
+ memcpy(pmsg_hdr, &inmsg->hdr,
+ sizeof(struct controlvm_message_header));
+ bus_info->pending_msg_hdr = pmsg_hdr;
+ }
visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
cmd->create_bus.channel_bytes,
@@ -846,89 +747,138 @@ bus_create(struct controlvm_message *inmsg)
cmd->create_bus.bus_data_type_uuid);
if (!visorchannel) {
- POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
- POSTCODE_SEVERITY_ERR);
- rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- kfree(bus_info);
- bus_info = NULL;
- goto out_bus_epilog;
+ POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
+ DIAG_SEVERITY_ERR);
+ err = -ENOMEM;
+ goto err_free_pending_msg;
}
bus_info->visorchannel = visorchannel;
- if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0)
- save_crash_message(inmsg, CRASH_BUS);
- POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
+ /* Response will be handled by chipset_bus_create */
+ chipset_bus_create(bus_info);
+
+ POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
+ return 0;
+
+err_free_pending_msg:
+ kfree(bus_info->pending_msg_hdr);
+
+err_free_bus_info:
+ kfree(bus_info);
-out_bus_epilog:
- bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
- rc, inmsg->hdr.flags.response_expected == 1);
+err_respond:
+ if (inmsg->hdr.flags.response_expected == 1)
+ bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ return err;
}
-static void
+static int
bus_destroy(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
+ struct controlvm_message_header *pmsg_hdr = NULL;
u32 bus_no = cmd->destroy_bus.bus_no;
struct visor_device *bus_info;
- int rc = CONTROLVM_RESP_SUCCESS;
+ int err;
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
- if (!bus_info)
- rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
- else if (bus_info->state.created == 0)
- rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ if (!bus_info) {
+ err = -ENODEV;
+ goto err_respond;
+ }
+ if (bus_info->state.created == 0) {
+ err = -ENOENT;
+ goto err_respond;
+ }
+ if (bus_info->pending_msg_hdr) {
+ /* only non-NULL if dev is still waiting on a response */
+ err = -EEXIST;
+ goto err_respond;
+ }
+ if (inmsg->hdr.flags.response_expected == 1) {
+ pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+ if (!pmsg_hdr) {
+ POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
+ bus_info->chipset_bus_no,
+ DIAG_SEVERITY_ERR);
+ err = -ENOMEM;
+ goto err_respond;
+ }
- bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
- rc, inmsg->hdr.flags.response_expected == 1);
+ memcpy(pmsg_hdr, &inmsg->hdr,
+ sizeof(struct controlvm_message_header));
+ bus_info->pending_msg_hdr = pmsg_hdr;
+ }
- /* bus_info is freed as part of the busdevice_release function */
+ /* Response will be handled by chipset_bus_destroy */
+ chipset_bus_destroy(bus_info);
+ return 0;
+
+err_respond:
+ if (inmsg->hdr.flags.response_expected == 1)
+ bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ return err;
}
-static void
+static int
bus_configure(struct controlvm_message *inmsg,
struct parser_context *parser_ctx)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
u32 bus_no;
struct visor_device *bus_info;
- int rc = CONTROLVM_RESP_SUCCESS;
+ int err = 0;
bus_no = cmd->configure_bus.bus_no;
- POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
- POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(BUS_CONFIGURE_ENTRY_PC, 0, bus_no,
+ DIAG_SEVERITY_PRINT);
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (!bus_info) {
- POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
- POSTCODE_SEVERITY_ERR);
- rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
+ DIAG_SEVERITY_ERR);
+ err = -EINVAL;
+ goto err_respond;
} else if (bus_info->state.created == 0) {
- POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
- POSTCODE_SEVERITY_ERR);
- rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
+ DIAG_SEVERITY_ERR);
+ err = -EINVAL;
+ goto err_respond;
} else if (bus_info->pending_msg_hdr) {
- POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
- POSTCODE_SEVERITY_ERR);
- rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
- } else {
- visorchannel_set_clientpartition
- (bus_info->visorchannel,
- cmd->configure_bus.guest_handle);
- bus_info->partition_uuid = parser_id_get(parser_ctx);
- parser_param_start(parser_ctx, PARSERSTRING_NAME);
- bus_info->name = parser_string_get(parser_ctx);
-
- POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
- POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
+ DIAG_SEVERITY_ERR);
+ err = -EIO;
+ goto err_respond;
}
- bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
- rc, inmsg->hdr.flags.response_expected == 1);
+
+ err = visorchannel_set_clientpartition
+ (bus_info->visorchannel,
+ cmd->configure_bus.guest_handle);
+ if (err)
+ goto err_respond;
+
+ bus_info->partition_uuid = parser_id_get(parser_ctx);
+ parser_param_start(parser_ctx, PARSERSTRING_NAME);
+ bus_info->name = parser_string_get(parser_ctx);
+
+ POSTCODE_LINUX(BUS_CONFIGURE_EXIT_PC, 0, bus_no,
+ DIAG_SEVERITY_PRINT);
+
+ if (inmsg->hdr.flags.response_expected == 1)
+ bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ return 0;
+
+err_respond:
+ if (inmsg->hdr.flags.response_expected == 1)
+ bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ return err;
}
static void
my_device_create(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
+ struct controlvm_message_header *pmsg_hdr = NULL;
u32 bus_no = cmd->create_device.bus_no;
u32 dev_no = cmd->create_device.dev_no;
struct visor_device *dev_info = NULL;
@@ -938,31 +888,31 @@ my_device_create(struct controlvm_message *inmsg)
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (!bus_info) {
- POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ DIAG_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
goto out_respond;
}
if (bus_info->state.created == 0) {
- POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ DIAG_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
goto out_respond;
}
dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
if (dev_info && (dev_info->state.created == 1)) {
- POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ DIAG_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
goto out_respond;
}
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
if (!dev_info) {
- POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ DIAG_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
goto out_respond;
}
@@ -974,8 +924,8 @@ my_device_create(struct controlvm_message *inmsg)
/* not sure where the best place to set the 'parent' */
dev_info->device.parent = &bus_info->device;
- POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
+ DIAG_SEVERITY_PRINT);
visorchannel =
visorchannel_create_with_lock(cmd->create_device.channel_addr,
@@ -984,12 +934,10 @@ my_device_create(struct controlvm_message *inmsg)
cmd->create_device.data_type_uuid);
if (!visorchannel) {
- POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ DIAG_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- kfree(dev_info);
- dev_info = NULL;
- goto out_respond;
+ goto out_free_dev_info;
}
dev_info->visorchannel = visorchannel;
dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
@@ -997,18 +945,36 @@ my_device_create(struct controlvm_message *inmsg)
spar_vhba_channel_protocol_uuid) == 0)
save_crash_message(inmsg, CRASH_DEV);
- POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_INFO);
+ if (inmsg->hdr.flags.response_expected == 1) {
+ pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+ if (!pmsg_hdr) {
+ rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ goto out_free_dev_info;
+ }
+
+ memcpy(pmsg_hdr, &inmsg->hdr,
+ sizeof(struct controlvm_message_header));
+ dev_info->pending_msg_hdr = pmsg_hdr;
+ }
+ /* Chipset_device_create will send response */
+ chipset_device_create(dev_info);
+ POSTCODE_LINUX(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
+ DIAG_SEVERITY_PRINT);
+ return;
+
+out_free_dev_info:
+ kfree(dev_info);
+
out_respond:
- device_epilog(dev_info, segment_state_running,
- CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
- inmsg->hdr.flags.response_expected == 1, 1);
+ if (inmsg->hdr.flags.response_expected == 1)
+ device_responder(inmsg->hdr.id, &inmsg->hdr, rc);
}
static void
my_device_changestate(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
+ struct controlvm_message_header *pmsg_hdr = NULL;
u32 bus_no = cmd->device_change_state.bus_no;
u32 dev_no = cmd->device_change_state.dev_no;
struct spar_segment_state state = cmd->device_change_state.state;
@@ -1017,39 +983,97 @@ my_device_changestate(struct controlvm_message *inmsg)
dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
if (!dev_info) {
- POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
+ DIAG_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
- } else if (dev_info->state.created == 0) {
- POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_ERR);
+ goto err_respond;
+ }
+ if (dev_info->state.created == 0) {
+ POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
+ DIAG_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ goto err_respond;
+ }
+ if (dev_info->pending_msg_hdr) {
+ /* only non-NULL if dev is still waiting on a response */
+ rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
+ goto err_respond;
}
- if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
- device_epilog(dev_info, state,
- CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
- inmsg->hdr.flags.response_expected == 1, 1);
+ if (inmsg->hdr.flags.response_expected == 1) {
+ pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+ if (!pmsg_hdr) {
+ rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ goto err_respond;
+ }
+
+ memcpy(pmsg_hdr, &inmsg->hdr,
+ sizeof(struct controlvm_message_header));
+ dev_info->pending_msg_hdr = pmsg_hdr;
+ }
+
+ if (state.alive == segment_state_running.alive &&
+ state.operating == segment_state_running.operating)
+ /* Response will be sent from chipset_device_resume */
+ chipset_device_resume(dev_info);
+ /* ServerNotReady / ServerLost / SegmentStateStandby */
+ else if (state.alive == segment_state_standby.alive &&
+ state.operating == segment_state_standby.operating)
+ /*
+ * technically this is standby case where server is lost.
+ * Response will be sent from chipset_device_pause.
+ */
+ chipset_device_pause(dev_info);
+
+ return;
+
+err_respond:
+ if (inmsg->hdr.flags.response_expected == 1)
+ device_responder(inmsg->hdr.id, &inmsg->hdr, rc);
}
static void
my_device_destroy(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
+ struct controlvm_message_header *pmsg_hdr = NULL;
u32 bus_no = cmd->destroy_device.bus_no;
u32 dev_no = cmd->destroy_device.dev_no;
struct visor_device *dev_info;
int rc = CONTROLVM_RESP_SUCCESS;
dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
- if (!dev_info)
+ if (!dev_info) {
rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
- else if (dev_info->state.created == 0)
+ goto err_respond;
+ }
+ if (dev_info->state.created == 0) {
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ goto err_respond;
+ }
- if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
- device_epilog(dev_info, segment_state_running,
- CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
- inmsg->hdr.flags.response_expected == 1, 1);
+ if (dev_info->pending_msg_hdr) {
+ /* only non-NULL if dev is still waiting on a response */
+ rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
+ goto err_respond;
+ }
+ if (inmsg->hdr.flags.response_expected == 1) {
+ pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+ if (!pmsg_hdr) {
+ rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ goto err_respond;
+ }
+
+ memcpy(pmsg_hdr, &inmsg->hdr,
+ sizeof(struct controlvm_message_header));
+ dev_info->pending_msg_hdr = pmsg_hdr;
+ }
+
+ chipset_device_destroy(dev_info);
+ return;
+
+err_respond:
+ if (inmsg->hdr.flags.response_expected == 1)
+ device_responder(inmsg->hdr.id, &inmsg->hdr, rc);
}
/**
@@ -1075,7 +1099,6 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
if (!info)
return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
- memset(info, 0, sizeof(struct visor_controlvm_payload_info));
if ((offset == 0) || (bytes == 0))
return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
@@ -1083,6 +1106,7 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
if (!payload)
return -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
+ memset(info, 0, sizeof(struct visor_controlvm_payload_info));
info->offset = offset;
info->bytes = bytes;
info->ptr = payload;
@@ -1111,16 +1135,16 @@ initialize_controlvm_payload(void)
offsetof(struct spar_controlvm_channel_protocol,
request_payload_offset),
&payload_offset, sizeof(payload_offset)) < 0) {
- POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CONTROLVM_INIT_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
return;
}
if (visorchannel_read(controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
request_payload_bytes),
&payload_bytes, sizeof(payload_bytes)) < 0) {
- POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CONTROLVM_INIT_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
return;
}
initialize_controlvm_payload_info(phys_addr,
@@ -1317,7 +1341,7 @@ static struct attribute *visorchipset_install_attrs[] = {
NULL
};
-static struct attribute_group visorchipset_install_group = {
+static const struct attribute_group visorchipset_install_group = {
.name = "install",
.attrs = visorchipset_install_attrs
};
@@ -1540,7 +1564,7 @@ setup_crash_devices_work_queue(struct work_struct *work)
u32 local_crash_msg_offset;
u16 local_crash_msg_count;
- POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(CRASH_DEV_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
/* send init chipset msg */
msg.hdr.id = CONTROLVM_CHIPSET_INIT;
@@ -1554,15 +1578,15 @@ setup_crash_devices_work_queue(struct work_struct *work)
offsetof(struct spar_controlvm_channel_protocol,
saved_crash_message_count),
&local_crash_msg_count, sizeof(u16)) < 0) {
- POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
return;
}
if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
- POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
- local_crash_msg_count,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
+ local_crash_msg_count,
+ DIAG_SEVERITY_ERR);
return;
}
@@ -1571,8 +1595,8 @@ setup_crash_devices_work_queue(struct work_struct *work)
offsetof(struct spar_controlvm_channel_protocol,
saved_crash_message_offset),
&local_crash_msg_offset, sizeof(u32)) < 0) {
- POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
return;
}
@@ -1581,8 +1605,8 @@ setup_crash_devices_work_queue(struct work_struct *work)
local_crash_msg_offset,
&local_crash_bus_msg,
sizeof(struct controlvm_message)) < 0) {
- POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CRASH_DEV_RD_BUS_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
return;
}
@@ -1592,8 +1616,8 @@ setup_crash_devices_work_queue(struct work_struct *work)
sizeof(struct controlvm_message),
&local_crash_dev_msg,
sizeof(struct controlvm_message)) < 0) {
- POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CRASH_DEV_RD_DEV_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
return;
}
@@ -1601,8 +1625,8 @@ setup_crash_devices_work_queue(struct work_struct *work)
if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
bus_create(&local_crash_bus_msg);
} else {
- POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CRASH_DEV_BUS_NULL_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
return;
}
@@ -1610,11 +1634,11 @@ setup_crash_devices_work_queue(struct work_struct *work)
if (local_crash_dev_msg.cmd.create_device.channel_addr) {
my_device_create(&local_crash_dev_msg);
} else {
- POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CRASH_DEV_DEV_NULL_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
return;
}
- POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(CRASH_DEV_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
}
void
@@ -2119,8 +2143,6 @@ visorchipset_init(struct acpi_device *acpi_device)
if (!addr)
goto error;
- memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
-
controlvm_channel = visorchannel_create_with_lock(addr, 0,
GFP_KERNEL, uuid);
if (!controlvm_channel)
@@ -2152,11 +2174,12 @@ visorchipset_init(struct acpi_device *acpi_device)
visorchipset_platform_device.dev.devt = major_dev;
if (platform_device_register(&visorchipset_platform_device) < 0) {
- POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
+ POSTCODE_LINUX(DEVICE_REGISTER_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
err = -ENODEV;
goto error_cancel_work;
}
- POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, 0, DIAG_SEVERITY_PRINT);
err = visorbus_init();
if (err < 0)
@@ -2178,14 +2201,14 @@ error_destroy_channel:
visorchannel_destroy(controlvm_channel);
error:
- POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR);
return err;
}
static int
visorchipset_exit(struct acpi_device *acpi_device)
{
- POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
visorbus_exit();
@@ -2196,7 +2219,7 @@ visorchipset_exit(struct acpi_device *acpi_device)
visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
platform_device_unregister(&visorchipset_platform_device);
- POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
return 0;
}
diff --git a/drivers/staging/unisys/visorbus/vmcallinterface.h b/drivers/staging/unisys/visorbus/vmcallinterface.h
index 86e695d5a441..674a88b657d3 100644
--- a/drivers/staging/unisys/visorbus/vmcallinterface.h
+++ b/drivers/staging/unisys/visorbus/vmcallinterface.h
@@ -92,15 +92,6 @@ enum vmcall_monitor_interface_method_tuple { /* VMCALL identification tuples */
#define ISSUE_IO_VMCALL(method, param, result) \
(result = unisys_vmcall(method, (param) & 0xFFFFFFFF, \
(param) >> 32))
-#define ISSUE_IO_EXTENDED_VMCALL(method, param1, param2, param3) \
- unisys_extended_vmcall(method, param1, param2, param3)
-
- /* The following uses VMCALL_POST_CODE_LOGEVENT interface but is currently
- * not used much
- */
-#define ISSUE_IO_VMCALL_POSTCODE_SEVERITY(postcode, severity) \
- ISSUE_IO_EXTENDED_VMCALL(VMCALL_POST_CODE_LOGEVENT, severity, \
- MDS_APPOS, postcode)
/* Structures for IO VMCALLs */
@@ -117,118 +108,53 @@ struct vmcall_io_controlvm_addr_params {
/******* INFO ON ISSUE_POSTCODE_LINUX() BELOW *******/
enum driver_pc { /* POSTCODE driver identifier tuples */
- /* visorchipset driver files */
- VISOR_CHIPSET_PC = 0xA0,
- VISOR_CHIPSET_PC_controlvm_c = 0xA1,
- VISOR_CHIPSET_PC_controlvm_cm2 = 0xA2,
- VISOR_CHIPSET_PC_controlvm_direct_c = 0xA3,
- VISOR_CHIPSET_PC_file_c = 0xA4,
- VISOR_CHIPSET_PC_parser_c = 0xA5,
- VISOR_CHIPSET_PC_testing_c = 0xA6,
- VISOR_CHIPSET_PC_visorchipset_main_c = 0xA7,
- VISOR_CHIPSET_PC_visorswitchbus_c = 0xA8,
/* visorbus driver files */
- VISOR_BUS_PC = 0xB0,
- VISOR_BUS_PC_businst_attr_c = 0xB1,
- VISOR_BUS_PC_channel_attr_c = 0xB2,
- VISOR_BUS_PC_devmajorminor_attr_c = 0xB3,
- VISOR_BUS_PC_visorbus_main_c = 0xB4,
- /* visorclientbus driver files */
- VISOR_CLIENT_BUS_PC = 0xC0,
- VISOR_CLIENT_BUS_PC_visorclientbus_main_c = 0xC1,
- /* virt hba driver files */
- VIRT_HBA_PC = 0xC2,
- VIRT_HBA_PC_virthba_c = 0xC3,
- /* virtpci driver files */
- VIRT_PCI_PC = 0xC4,
- VIRT_PCI_PC_virtpci_c = 0xC5,
- /* virtnic driver files */
- VIRT_NIC_PC = 0xC6,
- VIRT_NIC_P_virtnic_c = 0xC7,
- /* uislib driver files */
- UISLIB_PC = 0xD0,
- UISLIB_PC_uislib_c = 0xD1,
- UISLIB_PC_uisqueue_c = 0xD2,
- /* 0xD3 RESERVED */
- UISLIB_PC_uisutils_c = 0xD4,
+ VISOR_BUS_PC = 0xF0,
+ VISOR_BUS_PC_visorbus_main_c = 0xFF,
+ VISOR_BUS_PC_visorchipset_c = 0xFE,
};
enum event_pc { /* POSTCODE event identifier tuples */
- ATTACH_PORT_ENTRY_PC = 0x001,
- ATTACH_PORT_FAILURE_PC = 0x002,
- ATTACH_PORT_SUCCESS_PC = 0x003,
- BUS_FAILURE_PC = 0x004,
- BUS_CREATE_ENTRY_PC = 0x005,
- BUS_CREATE_FAILURE_PC = 0x006,
- BUS_CREATE_EXIT_PC = 0x007,
- BUS_CONFIGURE_ENTRY_PC = 0x008,
- BUS_CONFIGURE_FAILURE_PC = 0x009,
- BUS_CONFIGURE_EXIT_PC = 0x00A,
- CHIPSET_INIT_ENTRY_PC = 0x00B,
- CHIPSET_INIT_SUCCESS_PC = 0x00C,
- CHIPSET_INIT_FAILURE_PC = 0x00D,
- CHIPSET_INIT_EXIT_PC = 0x00E,
- CREATE_WORKQUEUE_PC = 0x00F,
- CREATE_WORKQUEUE_FAILED_PC = 0x0A0,
- CONTROLVM_INIT_FAILURE_PC = 0x0A1,
- DEVICE_CREATE_ENTRY_PC = 0x0A2,
- DEVICE_CREATE_FAILURE_PC = 0x0A3,
- DEVICE_CREATE_SUCCESS_PC = 0x0A4,
- DEVICE_CREATE_EXIT_PC = 0x0A5,
- DEVICE_ADD_PC = 0x0A6,
- DEVICE_REGISTER_FAILURE_PC = 0x0A7,
- DEVICE_CHANGESTATE_ENTRY_PC = 0x0A8,
- DEVICE_CHANGESTATE_FAILURE_PC = 0x0A9,
- DEVICE_CHANGESTATE_EXIT_PC = 0x0AA,
- DRIVER_ENTRY_PC = 0x0AB,
- DRIVER_EXIT_PC = 0x0AC,
- MALLOC_FAILURE_PC = 0x0AD,
- QUEUE_DELAYED_WORK_PC = 0x0AE,
- /* 0x0B7 RESERVED */
- VBUS_CHANNEL_ENTRY_PC = 0x0B8,
- VBUS_CHANNEL_FAILURE_PC = 0x0B9,
- VBUS_CHANNEL_EXIT_PC = 0x0BA,
- VHBA_CREATE_ENTRY_PC = 0x0BB,
- VHBA_CREATE_FAILURE_PC = 0x0BC,
- VHBA_CREATE_EXIT_PC = 0x0BD,
- VHBA_CREATE_SUCCESS_PC = 0x0BE,
- VHBA_COMMAND_HANDLER_PC = 0x0BF,
- VHBA_PROBE_ENTRY_PC = 0x0C0,
- VHBA_PROBE_FAILURE_PC = 0x0C1,
- VHBA_PROBE_EXIT_PC = 0x0C2,
- VNIC_CREATE_ENTRY_PC = 0x0C3,
- VNIC_CREATE_FAILURE_PC = 0x0C4,
- VNIC_CREATE_SUCCESS_PC = 0x0C5,
- VNIC_PROBE_ENTRY_PC = 0x0C6,
- VNIC_PROBE_FAILURE_PC = 0x0C7,
- VNIC_PROBE_EXIT_PC = 0x0C8,
- VPCI_CREATE_ENTRY_PC = 0x0C9,
- VPCI_CREATE_FAILURE_PC = 0x0CA,
- VPCI_CREATE_EXIT_PC = 0x0CB,
- VPCI_PROBE_ENTRY_PC = 0x0CC,
- VPCI_PROBE_FAILURE_PC = 0x0CD,
- VPCI_PROBE_EXIT_PC = 0x0CE,
- CRASH_DEV_ENTRY_PC = 0x0CF,
- CRASH_DEV_EXIT_PC = 0x0D0,
- CRASH_DEV_HADDR_NULL = 0x0D1,
- CRASH_DEV_CONTROLVM_NULL = 0x0D2,
- CRASH_DEV_RD_BUS_FAIULRE_PC = 0x0D3,
- CRASH_DEV_RD_DEV_FAIULRE_PC = 0x0D4,
- CRASH_DEV_BUS_NULL_FAILURE_PC = 0x0D5,
- CRASH_DEV_DEV_NULL_FAILURE_PC = 0x0D6,
- CRASH_DEV_CTRL_RD_FAILURE_PC = 0x0D7,
- CRASH_DEV_COUNT_FAILURE_PC = 0x0D8,
- SAVE_MSG_BUS_FAILURE_PC = 0x0D9,
- SAVE_MSG_DEV_FAILURE_PC = 0x0DA,
- CALLHOME_INIT_FAILURE_PC = 0x0DB
+ BUS_CREATE_ENTRY_PC = 0x001,
+ BUS_CREATE_FAILURE_PC = 0x002,
+ BUS_CREATE_EXIT_PC = 0x003,
+ BUS_CONFIGURE_ENTRY_PC = 0x004,
+ BUS_CONFIGURE_FAILURE_PC = 0x005,
+ BUS_CONFIGURE_EXIT_PC = 0x006,
+ CHIPSET_INIT_ENTRY_PC = 0x007,
+ CHIPSET_INIT_SUCCESS_PC = 0x008,
+ CHIPSET_INIT_FAILURE_PC = 0x009,
+ CHIPSET_INIT_EXIT_PC = 0x00A,
+ CONTROLVM_INIT_FAILURE_PC = 0x00B,
+ DEVICE_CREATE_ENTRY_PC = 0x00C,
+ DEVICE_CREATE_FAILURE_PC = 0x00D,
+ DEVICE_CREATE_SUCCESS_PC = 0x00E,
+ DEVICE_CREATE_EXIT_PC = 0x00F,
+ DEVICE_ADD_PC = 0x010,
+ DEVICE_REGISTER_FAILURE_PC = 0x011,
+ DEVICE_CHANGESTATE_FAILURE_PC = 0x012,
+ DRIVER_ENTRY_PC = 0x013,
+ DRIVER_EXIT_PC = 0x014,
+ MALLOC_FAILURE_PC = 0x015,
+ CRASH_DEV_ENTRY_PC = 0x016,
+ CRASH_DEV_EXIT_PC = 0x017,
+ CRASH_DEV_RD_BUS_FAILURE_PC = 0x018,
+ CRASH_DEV_RD_DEV_FAILURE_PC = 0x019,
+ CRASH_DEV_BUS_NULL_FAILURE_PC = 0x01A,
+ CRASH_DEV_DEV_NULL_FAILURE_PC = 0x01B,
+ CRASH_DEV_CTRL_RD_FAILURE_PC = 0x01C,
+ CRASH_DEV_COUNT_FAILURE_PC = 0x01D,
+ SAVE_MSG_BUS_FAILURE_PC = 0x01E,
+ SAVE_MSG_DEV_FAILURE_PC = 0x01F,
};
-#define POSTCODE_SEVERITY_ERR DIAG_SEVERITY_ERR
-#define POSTCODE_SEVERITY_WARNING DIAG_SEVERITY_WARNING
-/* TODO-> Info currently doesn't show, so we set info=warning */
-#define POSTCODE_SEVERITY_INFO DIAG_SEVERITY_PRINT
-
-/* example call of POSTCODE_LINUX_2(VISOR_CHIPSET_PC, POSTCODE_SEVERITY_ERR);
+/* Write a 64-bit value to the hypervisor's log file
+ * POSTCODE_LINUX generates a value in the form 0xAABBBCCCDDDDEEEE where
+ * A is an identifier for the file logging the postcode
+ * B is an identifier for the event logging the postcode
+ * C is the line logging the postcode
+ * D is additional information the caller wants to log
+ * E is additional information the caller wants to log
* Please also note that the resulting postcode is in hex, so if you are
* searching for the __LINE__ number, convert it first to decimal. The line
* number combined with driver and type of call, will allow you to track down
@@ -236,35 +162,16 @@ enum event_pc { /* POSTCODE event identifier tuples */
* entered/exited from.
*/
-/* BASE FUNCTIONS */
-#define POSTCODE_LINUX_A(DRIVER_PC, EVENT_PC, pc32bit, severity) \
-do { \
- unsigned long long post_code_temp; \
- post_code_temp = (((u64)DRIVER_PC) << 56) | (((u64)EVENT_PC) << 44) | \
- ((((u64)__LINE__) & 0xFFF) << 32) | \
- (((u64)pc32bit) & 0xFFFFFFFF); \
- ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity); \
-} while (0)
-
-#define POSTCODE_LINUX_B(DRIVER_PC, EVENT_PC, pc16bit1, pc16bit2, severity) \
+#define POSTCODE_LINUX(EVENT_PC, pc16bit1, pc16bit2, severity) \
do { \
unsigned long long post_code_temp; \
- post_code_temp = (((u64)DRIVER_PC) << 56) | (((u64)EVENT_PC) << 44) | \
+ post_code_temp = (((u64)CURRENT_FILE_PC) << 56) | \
+ (((u64)EVENT_PC) << 44) | \
((((u64)__LINE__) & 0xFFF) << 32) | \
((((u64)pc16bit1) & 0xFFFF) << 16) | \
(((u64)pc16bit2) & 0xFFFF); \
- ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity); \
+ unisys_extended_vmcall(VMCALL_POST_CODE_LOGEVENT, severity, \
+ MDS_APPOS, post_code_temp); \
} while (0)
-/* MOST COMMON */
-#define POSTCODE_LINUX_2(EVENT_PC, severity) \
- POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, 0x0000, severity)
-
-#define POSTCODE_LINUX_3(EVENT_PC, pc32bit, severity) \
- POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, pc32bit, severity)
-
-#define POSTCODE_LINUX_4(EVENT_PC, pc16bit1, pc16bit2, severity) \
- POSTCODE_LINUX_B(CURRENT_FILE_PC, EVENT_PC, pc16bit1, \
- pc16bit2, severity)
-
#endif /* __IOMONINTF_H__ */
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 6f94b646f7c5..949cce680b29 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -409,6 +409,9 @@ devdata_create(struct visor_device *dev, enum visorinput_device_type devtype)
if (!devdata->visorinput_dev)
goto cleanups_register;
break;
+ default:
+ /* No other input devices supported */
+ break;
}
dev_set_drvdata(&dev->device, devdata);
@@ -653,6 +656,9 @@ visorinput_channel_interrupt(struct visor_device *dev)
input_report_rel(visorinput_dev, REL_WHEEL, -1);
input_sync(visorinput_dev);
break;
+ default:
+ /* Unsupported input action */
+ break;
}
}
}
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index f8a584bf4a77..c1f674f5268c 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1371,7 +1371,7 @@ static ssize_t info_debugfs_read(struct file *file, char __user *buf,
" num_rcv_bufs = %d\n",
devdata->num_rcv_bufs);
str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " max_oustanding_next_xmits = %lu\n",
+ " max_outstanding_next_xmits = %lu\n",
devdata->max_outstanding_net_xmits);
str_pos += scnprintf(vbuf + str_pos, len - str_pos,
" upper_threshold_net_xmits = %lu\n",
diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig
index 9676fb29075a..e61e4ca064a8 100644
--- a/drivers/staging/vc04_services/Kconfig
+++ b/drivers/staging/vc04_services/Kconfig
@@ -1,9 +1,10 @@
-config BCM2708_VCHIQ
+config BCM2835_VCHIQ
tristate "Videocore VCHIQ"
- depends on RASPBERRYPI_FIRMWARE && BROKEN
+ depends on HAS_DMA
+ depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
default y
help
Kernel to VideoCore communication interface for the
- BCM2708 family of products.
+ BCM2835 family of products.
Defaults to Y when the Broadcom Videocore services
are included in the build, N otherwise.
diff --git a/drivers/staging/vc04_services/Makefile b/drivers/staging/vc04_services/Makefile
index 90ab4781df2c..1a9e742ee40d 100644
--- a/drivers/staging/vc04_services/Makefile
+++ b/drivers/staging/vc04_services/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_BCM2708_VCHIQ) += vchiq.o
+obj-$(CONFIG_BCM2835_VCHIQ) += vchiq.o
vchiq-objs := \
interface/vchiq_arm/vchiq_core.o \
diff --git a/drivers/staging/vc04_services/interface/vchi/TODO b/drivers/staging/vc04_services/interface/vchi/TODO
new file mode 100644
index 000000000000..03aa65183b25
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchi/TODO
@@ -0,0 +1,50 @@
+1) Port to aarch64
+
+This driver won't be very useful unless we also have it working on
+Raspberry Pi 3. This requires, at least:
+
+ - Figure out an alternative to the dmac_map_area() hack.
+
+ - Decide what to use instead of dsb().
+
+ - Do something about (int) cast of bulk->data in
+ vchiq_bulk_transfer().
+
+ bulk->data is a bus address going across to the firmware. We know
+ our bus addresses are <32bit.
+
+2) Write a DT binding doc and get the corresponding DT node merged to
+ bcm2835.
+
+This will let the driver probe when enabled.
+
+3) Import drivers using VCHI.
+
+VCHI is just a tool to let drivers talk to the firmware. Here are
+some of the ones we want:
+
+ - vc_mem (https://github.com/raspberrypi/linux/blob/rpi-4.4.y/drivers/char/broadcom/vc_mem.c)
+
+ This driver is what the vcdbg userspace program uses to set up its
+ requests to the firmware, which are transmitted across VCHIQ. vcdbg
+ is really useful for debugging firmware interactions.
+
+ - bcm2835-camera (https://github.com/raspberrypi/linux/tree/rpi-4.4.y/drivers/media/platform/bcm2835)
+
+ This driver will let us get images from the camera using the MMAL
+ protocol over VCHI.
+
+ - VCSM (https://github.com/raspberrypi/linux/tree/rpi-4.4.y/drivers/char/broadcom/vc_sm)
+
+ This driver is used for talking about regions of VC memory across
+ firmware protocols including VCHI. We'll want to extend this driver
+ to manage these buffers as dmabufs so that we can zero-copy import
+ camera images into vc4 for rendering/display.
+
+4) Garbage-collect unused code
+
+One of the reasons this driver wasn't upstreamed previously was that
+there's a lot code that got built that's probably unnecessary these
+days. Once we have the set of VCHI-using drivers we want in tree, we
+should be able to do a sweep of the code to see what's left that's
+unused.
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi.h b/drivers/staging/vc04_services/interface/vchi/vchi.h
index 1b17e98f7379..d6937288210c 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi.h
@@ -226,25 +226,12 @@ extern int32_t vchi_service_set_option( const VCHI_SERVICE_HANDLE_T handle,
int value);
// Routine to send a message across a service
-extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle,
- const void *data,
- uint32_t data_size,
- VCHI_FLAGS_T flags,
- void *msg_handle );
-
-// scatter-gather (vector) and send message
-int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle,
- VCHI_MSG_VECTOR_EX_T *vector,
- uint32_t count,
- VCHI_FLAGS_T flags,
- void *msg_handle );
-
-// legacy scatter-gather (vector) and send message, only handles pointers
-int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle,
- VCHI_MSG_VECTOR_T *vector,
- uint32_t count,
- VCHI_FLAGS_T flags,
- void *msg_handle );
+extern int32_t
+ vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ uint32_t data_size);
// Routine to receive a msg from a service
// Dequeue is equivalent to hold, copy into client buffer, release
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h
index ad398bae6ee4..21adf89a9065 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h
@@ -37,4 +37,15 @@
#include "vchiq_if.h"
#include "vchiq_util.h"
+/* Do this so that we can test-build the code on non-rpi systems */
+#if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
+
+#else
+
+#ifndef dsb
+#define dsb(a)
+#endif
+
+#endif /* IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE) */
+
#endif
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 1091b9f1dd07..2b500d85cebc 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -45,16 +45,8 @@
#include <asm/pgtable.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
-#define dmac_map_area __glue(_CACHE,_dma_map_area)
-#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
-
-extern void dmac_map_area(const void *, size_t, int);
-extern void dmac_unmap_area(const void *, size_t, int);
-
#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
-#define VCHIQ_ARM_ADDRESS(x) ((void *)((char *)x + g_virt_to_bus_offset))
-
#include "vchiq_arm.h"
#include "vchiq_2835.h"
#include "vchiq_connected.h"
@@ -70,13 +62,25 @@ typedef struct vchiq_2835_state_struct {
VCHIQ_ARM_STATE_T arm_state;
} VCHIQ_2835_ARM_STATE_T;
+struct vchiq_pagelist_info {
+ PAGELIST_T *pagelist;
+ size_t pagelist_buffer_size;
+ dma_addr_t dma_addr;
+ enum dma_data_direction dma_dir;
+ unsigned int num_pages;
+ unsigned int pages_need_release;
+ struct page **pages;
+ struct scatterlist *scatterlist;
+ unsigned int scatterlist_mapped;
+};
+
static void __iomem *g_regs;
static unsigned int g_cache_line_size = sizeof(CACHE_LINE_SIZE);
static unsigned int g_fragments_size;
static char *g_fragments_base;
static char *g_free_fragments;
static struct semaphore g_free_fragments_sema;
-static unsigned long g_virt_to_bus_offset;
+static struct device *g_dev;
extern int vchiq_arm_log_level;
@@ -85,12 +89,13 @@ static DEFINE_SEMAPHORE(g_free_fragments_mutex);
static irqreturn_t
vchiq_doorbell_irq(int irq, void *dev_id);
-static int
+static struct vchiq_pagelist_info *
create_pagelist(char __user *buf, size_t count, unsigned short type,
- struct task_struct *task, PAGELIST_T ** ppagelist);
+ struct task_struct *task);
static void
-free_pagelist(PAGELIST_T *pagelist, int actual);
+free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
+ int actual);
int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
{
@@ -104,7 +109,14 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
int slot_mem_size, frag_mem_size;
int err, irq, i;
- g_virt_to_bus_offset = virt_to_dma(dev, (void *)0);
+ /*
+ * VCHI messages between the CPU and firmware use
+ * 32-bit bus addresses.
+ */
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
+ if (err < 0)
+ return err;
(void)of_property_read_u32(dev->of_node, "cache-line-size",
&g_cache_line_size);
@@ -121,7 +133,7 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
return -ENOMEM;
}
- WARN_ON(((int)slot_mem & (PAGE_SIZE - 1)) != 0);
+ WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
if (!vchiq_slot_zero)
@@ -173,9 +185,10 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
return err ? : -ENXIO;
}
+ g_dev = dev;
vchiq_log_info(vchiq_arm_log_level,
- "vchiq_init - done (slots %x, phys %pad)",
- (unsigned int)vchiq_slot_zero, &slot_phys);
+ "vchiq_init - done (slots %pK, phys %pad)",
+ vchiq_slot_zero, &slot_phys);
vchiq_call_connected_callbacks();
@@ -213,47 +226,37 @@ remote_event_signal(REMOTE_EVENT_T *event)
event->fired = 1;
- dsb(); /* data barrier operation */
+ dsb(sy); /* data barrier operation */
if (event->armed)
writel(0, g_regs + BELL2); /* trigger vc interrupt */
}
-int
-vchiq_copy_from_user(void *dst, const void *src, int size)
-{
- if ((uint32_t)src < TASK_SIZE) {
- return copy_from_user(dst, src, size);
- } else {
- memcpy(dst, src, size);
- return 0;
- }
-}
-
VCHIQ_STATUS_T
vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
void *offset, int size, int dir)
{
- PAGELIST_T *pagelist;
- int ret;
+ struct vchiq_pagelist_info *pagelistinfo;
WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
- ret = create_pagelist((char __user *)offset, size,
- (dir == VCHIQ_BULK_RECEIVE)
- ? PAGELIST_READ
- : PAGELIST_WRITE,
- current,
- &pagelist);
- if (ret != 0)
+ pagelistinfo = create_pagelist((char __user *)offset, size,
+ (dir == VCHIQ_BULK_RECEIVE)
+ ? PAGELIST_READ
+ : PAGELIST_WRITE,
+ current);
+
+ if (!pagelistinfo)
return VCHIQ_ERROR;
bulk->handle = memhandle;
- bulk->data = VCHIQ_ARM_ADDRESS(pagelist);
+ bulk->data = (void *)(unsigned long)pagelistinfo->dma_addr;
- /* Store the pagelist address in remote_data, which isn't used by the
- slave. */
- bulk->remote_data = pagelist;
+ /*
+ * Store the pagelistinfo address in remote_data,
+ * which isn't used by the slave.
+ */
+ bulk->remote_data = pagelistinfo;
return VCHIQ_SUCCESS;
}
@@ -262,7 +265,8 @@ void
vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
{
if (bulk && bulk->remote_data && bulk->actual)
- free_pagelist((PAGELIST_T *)bulk->remote_data, bulk->actual);
+ free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
+ bulk->actual);
}
void
@@ -350,57 +354,93 @@ vchiq_doorbell_irq(int irq, void *dev_id)
return ret;
}
+static void
+cleaup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
+{
+ if (pagelistinfo->scatterlist_mapped) {
+ dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
+ pagelistinfo->num_pages, pagelistinfo->dma_dir);
+ }
+
+ if (pagelistinfo->pages_need_release) {
+ unsigned int i;
+
+ for (i = 0; i < pagelistinfo->num_pages; i++)
+ put_page(pagelistinfo->pages[i]);
+ }
+
+ dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
+ pagelistinfo->pagelist, pagelistinfo->dma_addr);
+}
+
/* There is a potential problem with partial cache lines (pages?)
** at the ends of the block when reading. If the CPU accessed anything in
** the same line (page?) then it may have pulled old data into the cache,
** obscuring the new data underneath. We can solve this by transferring the
** partial cache lines separately, and allowing the ARM to copy into the
** cached area.
-
-** N.B. This implementation plays slightly fast and loose with the Linux
-** driver programming rules, e.g. its use of dmac_map_area instead of
-** dma_map_single, but it isn't a multi-platform driver and it benefits
-** from increased speed as a result.
*/
-static int
+static struct vchiq_pagelist_info *
create_pagelist(char __user *buf, size_t count, unsigned short type,
- struct task_struct *task, PAGELIST_T ** ppagelist)
+ struct task_struct *task)
{
PAGELIST_T *pagelist;
+ struct vchiq_pagelist_info *pagelistinfo;
struct page **pages;
- unsigned long *addrs;
- unsigned int num_pages, offset, i;
- char *addr, *base_addr, *next_addr;
- int run, addridx, actual_pages;
- unsigned long *need_release;
-
- offset = (unsigned int)buf & (PAGE_SIZE - 1);
+ u32 *addrs;
+ unsigned int num_pages, offset, i, k;
+ int actual_pages;
+ size_t pagelist_size;
+ struct scatterlist *scatterlist, *sg;
+ int dma_buffers;
+ dma_addr_t dma_addr;
+
+ offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1));
num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
- *ppagelist = NULL;
+ pagelist_size = sizeof(PAGELIST_T) +
+ (num_pages * sizeof(u32)) +
+ (num_pages * sizeof(pages[0]) +
+ (num_pages * sizeof(struct scatterlist))) +
+ sizeof(struct vchiq_pagelist_info);
/* Allocate enough storage to hold the page pointers and the page
** list
*/
- pagelist = kmalloc(sizeof(PAGELIST_T) +
- (num_pages * sizeof(unsigned long)) +
- sizeof(unsigned long) +
- (num_pages * sizeof(pages[0])),
- GFP_KERNEL);
-
- vchiq_log_trace(vchiq_arm_log_level,
- "create_pagelist - %x", (unsigned int)pagelist);
+ pagelist = dma_zalloc_coherent(g_dev,
+ pagelist_size,
+ &dma_addr,
+ GFP_KERNEL);
+
+ vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %pK",
+ pagelist);
if (!pagelist)
- return -ENOMEM;
+ return NULL;
- addrs = pagelist->addrs;
- need_release = (unsigned long *)(addrs + num_pages);
- pages = (struct page **)(addrs + num_pages + 1);
+ addrs = pagelist->addrs;
+ pages = (struct page **)(addrs + num_pages);
+ scatterlist = (struct scatterlist *)(pages + num_pages);
+ pagelistinfo = (struct vchiq_pagelist_info *)
+ (scatterlist + num_pages);
+
+ pagelist->length = count;
+ pagelist->type = type;
+ pagelist->offset = offset;
+
+ /* Populate the fields of the pagelistinfo structure */
+ pagelistinfo->pagelist = pagelist;
+ pagelistinfo->pagelist_buffer_size = pagelist_size;
+ pagelistinfo->dma_addr = dma_addr;
+ pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ pagelistinfo->num_pages = num_pages;
+ pagelistinfo->pages_need_release = 0;
+ pagelistinfo->pages = pages;
+ pagelistinfo->scatterlist = scatterlist;
+ pagelistinfo->scatterlist_mapped = 0;
if (is_vmalloc_addr(buf)) {
- int dir = (type == PAGELIST_WRITE) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
unsigned long length = count;
unsigned int off = offset;
@@ -413,14 +453,13 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
if (bytes > length)
bytes = length;
pages[actual_pages] = pg;
- dmac_map_area(page_address(pg) + off, bytes, dir);
length -= bytes;
off = 0;
}
- *need_release = 0; /* do not try and release vmalloc pages */
+ /* do not try and release vmalloc pages */
} else {
down_read(&task->mm->mmap_sem);
- actual_pages = get_user_pages(task, task->mm,
+ actual_pages = get_user_pages(
(unsigned long)buf & ~(PAGE_SIZE - 1),
num_pages,
(type == PAGELIST_READ) ? FOLL_WRITE : 0,
@@ -438,44 +477,59 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
while (actual_pages > 0)
{
actual_pages--;
- page_cache_release(pages[actual_pages]);
+ put_page(pages[actual_pages]);
}
- kfree(pagelist);
- if (actual_pages == 0)
- actual_pages = -ENOMEM;
- return actual_pages;
+ cleaup_pagelistinfo(pagelistinfo);
+ return NULL;
}
- *need_release = 1; /* release user pages */
+ /* release user pages */
+ pagelistinfo->pages_need_release = 1;
}
- pagelist->length = count;
- pagelist->type = type;
- pagelist->offset = offset;
-
- /* Group the pages into runs of contiguous pages */
-
- base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0]));
- next_addr = base_addr + PAGE_SIZE;
- addridx = 0;
- run = 0;
+ /*
+ * Initialize the scatterlist so that the magic cookie
+ * is filled if debugging is enabled
+ */
+ sg_init_table(scatterlist, num_pages);
+ /* Now set the pages for each scatterlist */
+ for (i = 0; i < num_pages; i++)
+ sg_set_page(scatterlist + i, pages[i], PAGE_SIZE, 0);
+
+ dma_buffers = dma_map_sg(g_dev,
+ scatterlist,
+ num_pages,
+ pagelistinfo->dma_dir);
+
+ if (dma_buffers == 0) {
+ cleaup_pagelistinfo(pagelistinfo);
+ return NULL;
+ }
- for (i = 1; i < num_pages; i++) {
- addr = VCHIQ_ARM_ADDRESS(page_address(pages[i]));
- if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
- next_addr += PAGE_SIZE;
- run++;
+ pagelistinfo->scatterlist_mapped = 1;
+
+ /* Combine adjacent blocks for performance */
+ k = 0;
+ for_each_sg(scatterlist, sg, dma_buffers, i) {
+ u32 len = sg_dma_len(sg);
+ u32 addr = sg_dma_address(sg);
+
+ /* Note: addrs is the address + page_count - 1
+ * The firmware expects the block to be page
+ * aligned and a multiple of the page size
+ */
+ WARN_ON(len == 0);
+ WARN_ON(len & ~PAGE_MASK);
+ WARN_ON(addr & ~PAGE_MASK);
+ if (k > 0 &&
+ ((addrs[k - 1] & PAGE_MASK) |
+ ((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT)
+ == addr) {
+ addrs[k - 1] += (len >> PAGE_SHIFT);
} else {
- addrs[addridx] = (unsigned long)base_addr + run;
- addridx++;
- base_addr = addr;
- next_addr = addr + PAGE_SIZE;
- run = 0;
+ addrs[k++] = addr | ((len >> PAGE_SHIFT) - 1);
}
}
- addrs[addridx] = (unsigned long)base_addr + run;
- addridx++;
-
/* Partial cache lines (fragments) require special measures */
if ((type == PAGELIST_READ) &&
((pagelist->offset & (g_cache_line_size - 1)) ||
@@ -484,8 +538,8 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
char *fragments;
if (down_interruptible(&g_free_fragments_sema) != 0) {
- kfree(pagelist);
- return -EINTR;
+ cleaup_pagelistinfo(pagelistinfo);
+ return NULL;
}
WARN_ON(g_free_fragments == NULL);
@@ -499,29 +553,28 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
(fragments - g_fragments_base) / g_fragments_size;
}
- dmac_flush_range(pagelist, addrs + num_pages);
-
- *ppagelist = pagelist;
-
- return 0;
+ return pagelistinfo;
}
static void
-free_pagelist(PAGELIST_T *pagelist, int actual)
+free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
+ int actual)
{
- unsigned long *need_release;
- struct page **pages;
- unsigned int num_pages, i;
-
- vchiq_log_trace(vchiq_arm_log_level,
- "free_pagelist - %x, %d", (unsigned int)pagelist, actual);
+ unsigned int i;
+ PAGELIST_T *pagelist = pagelistinfo->pagelist;
+ struct page **pages = pagelistinfo->pages;
+ unsigned int num_pages = pagelistinfo->num_pages;
- num_pages =
- (pagelist->length + pagelist->offset + PAGE_SIZE - 1) /
- PAGE_SIZE;
+ vchiq_log_trace(vchiq_arm_log_level, "free_pagelist - %pK, %d",
+ pagelistinfo->pagelist, actual);
- need_release = (unsigned long *)(pagelist->addrs + num_pages);
- pages = (struct page **)(pagelist->addrs + num_pages + 1);
+ /*
+ * NOTE: dma_unmap_sg must be called before the
+ * cpu can touch any of the data/pages.
+ */
+ dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
+ pagelistinfo->num_pages, pagelistinfo->dma_dir);
+ pagelistinfo->scatterlist_mapped = 0;
/* Deal with any partial cache lines (fragments) */
if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
@@ -559,27 +612,12 @@ free_pagelist(PAGELIST_T *pagelist, int actual)
up(&g_free_fragments_sema);
}
- if (*need_release) {
- unsigned int length = pagelist->length;
- unsigned int offset = pagelist->offset;
-
- for (i = 0; i < num_pages; i++) {
- struct page *pg = pages[i];
-
- if (pagelist->type != PAGELIST_WRITE) {
- unsigned int bytes = PAGE_SIZE - offset;
-
- if (bytes > length)
- bytes = length;
- dmac_unmap_area(page_address(pg) + offset,
- bytes, DMA_FROM_DEVICE);
- length -= bytes;
- offset = 0;
- set_page_dirty(pg);
- }
- page_cache_release(pg);
- }
+ /* Need to mark all the pages dirty. */
+ if (pagelist->type != PAGELIST_WRITE &&
+ pagelistinfo->pages_need_release) {
+ for (i = 0; i < num_pages; i++)
+ set_page_dirty(pages[i]);
}
- kfree(pagelist);
+ cleaup_pagelistinfo(pagelistinfo);
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 7b6cd4d80621..0d987898b4f8 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -190,8 +190,8 @@ static const char *const ioctl_names[] = {
"CLOSE_DELIVERED"
};
-vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
- (VCHIQ_IOC_MAX + 1));
+vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
+ (VCHIQ_IOC_MAX + 1));
static void
dump_phys_mem(void *virt_addr, uint32_t num_bytes);
@@ -402,6 +402,107 @@ static void close_delivered(USER_SERVICE_T *user_service)
}
}
+struct vchiq_io_copy_callback_context {
+ VCHIQ_ELEMENT_T *current_element;
+ size_t current_element_offset;
+ unsigned long elements_to_go;
+ size_t current_offset;
+};
+
+static ssize_t
+vchiq_ioc_copy_element_data(
+ void *context,
+ void *dest,
+ size_t offset,
+ size_t maxsize)
+{
+ long res;
+ size_t bytes_this_round;
+ struct vchiq_io_copy_callback_context *copy_context =
+ (struct vchiq_io_copy_callback_context *)context;
+
+ if (offset != copy_context->current_offset)
+ return 0;
+
+ if (!copy_context->elements_to_go)
+ return 0;
+
+ /*
+ * Complex logic here to handle the case of 0 size elements
+ * in the middle of the array of elements.
+ *
+ * Need to skip over these 0 size elements.
+ */
+ while (1) {
+ bytes_this_round = min(copy_context->current_element->size -
+ copy_context->current_element_offset,
+ maxsize);
+
+ if (bytes_this_round)
+ break;
+
+ copy_context->elements_to_go--;
+ copy_context->current_element++;
+ copy_context->current_element_offset = 0;
+
+ if (!copy_context->elements_to_go)
+ return 0;
+ }
+
+ res = copy_from_user(dest,
+ copy_context->current_element->data +
+ copy_context->current_element_offset,
+ bytes_this_round);
+
+ if (res != 0)
+ return -EFAULT;
+
+ copy_context->current_element_offset += bytes_this_round;
+ copy_context->current_offset += bytes_this_round;
+
+ /*
+ * Check if done with current element, and if so advance to the next.
+ */
+ if (copy_context->current_element_offset ==
+ copy_context->current_element->size) {
+ copy_context->elements_to_go--;
+ copy_context->current_element++;
+ copy_context->current_element_offset = 0;
+ }
+
+ return bytes_this_round;
+}
+
+/**************************************************************************
+ *
+ * vchiq_ioc_queue_message
+ *
+ **************************************************************************/
+static VCHIQ_STATUS_T
+vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
+ VCHIQ_ELEMENT_T *elements,
+ unsigned long count)
+{
+ struct vchiq_io_copy_callback_context context;
+ unsigned long i;
+ size_t total_size = 0;
+
+ context.current_element = elements;
+ context.current_element_offset = 0;
+ context.elements_to_go = count;
+ context.current_offset = 0;
+
+ for (i = 0; i < count; i++) {
+ if (!elements[i].data && elements[i].size != 0)
+ return -EFAULT;
+
+ total_size += elements[i].size;
+ }
+
+ return vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
+ &context, total_size);
+}
+
/****************************************************************************
*
* vchiq_ioctl
@@ -418,8 +519,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
DEBUG_INITIALISE(g_state.local)
vchiq_log_trace(vchiq_arm_log_level,
- "vchiq_ioctl - instance %x, cmd %s, arg %lx",
- (unsigned int)instance,
+ "vchiq_ioctl - instance %pK, cmd %s, arg %lx",
+ instance,
((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
(_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
@@ -453,7 +554,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ret = -EINVAL;
break;
}
- rc = mutex_lock_interruptible(&instance->state->mutex);
+ rc = mutex_lock_killable(&instance->state->mutex);
if (rc != 0) {
vchiq_log_error(vchiq_arm_log_level,
"vchiq: connect: could not lock mutex for "
@@ -651,7 +752,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
if (copy_from_user(elements, args.elements,
args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
- status = vchiq_queue_message
+ status = vchiq_ioc_queue_message
(args.handle,
elements, args.count);
else
@@ -713,8 +814,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}
vchiq_log_info(vchiq_arm_log_level,
- "found bulk_waiter %x for pid %d",
- (unsigned int)waiter, current->pid);
+ "found bulk_waiter %pK for pid %d", waiter,
+ current->pid);
args.userdata = &waiter->bulk_waiter;
}
status = vchiq_bulk_transfer
@@ -743,8 +844,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
list_add(&waiter->list, &instance->bulk_waiter_list);
mutex_unlock(&instance->bulk_waiter_list_mutex);
vchiq_log_info(vchiq_arm_log_level,
- "saved bulk_waiter %x for pid %d",
- (unsigned int)waiter, current->pid);
+ "saved bulk_waiter %pK for pid %d",
+ waiter, current->pid);
if (copy_to_user((void __user *)
&(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
@@ -826,10 +927,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (args.msgbufsize < msglen) {
vchiq_log_error(
vchiq_arm_log_level,
- "header %x: msgbufsize"
- " %x < msglen %x",
- (unsigned int)header,
- args.msgbufsize,
+ "header %pK: msgbufsize %x < msglen %x",
+ header, args.msgbufsize,
msglen);
WARN(1, "invalid message "
"size\n");
@@ -980,9 +1079,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ret = -EFAULT;
} else {
vchiq_log_error(vchiq_arm_log_level,
- "header %x: bufsize %x < size %x",
- (unsigned int)header, args.bufsize,
- header->size);
+ "header %pK: bufsize %x < size %x",
+ header, args.bufsize, header->size);
WARN(1, "invalid size\n");
ret = -EMSGSIZE;
}
@@ -1284,9 +1382,8 @@ vchiq_release(struct inode *inode, struct file *file)
list);
list_del(pos);
vchiq_log_info(vchiq_arm_log_level,
- "bulk_waiter - cleaned up %x "
- "for pid %d",
- (unsigned int)waiter, waiter->pid);
+ "bulk_waiter - cleaned up %pK for pid %d",
+ waiter, waiter->pid);
kfree(waiter);
}
}
@@ -1385,9 +1482,8 @@ vchiq_dump_platform_instances(void *dump_context)
instance = service->instance;
if (instance && !instance->mark) {
len = snprintf(buf, sizeof(buf),
- "Instance %x: pid %d,%s completions "
- "%d/%d",
- (unsigned int)instance, instance->pid,
+ "Instance %pK: pid %d,%s completions %d/%d",
+ instance, instance->pid,
instance->connected ? " connected, " :
"",
instance->completion_insert -
@@ -1415,8 +1511,7 @@ vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
char buf[80];
int len;
- len = snprintf(buf, sizeof(buf), " instance %x",
- (unsigned int)service->instance);
+ len = snprintf(buf, sizeof(buf), " instance %pK", service->instance);
if ((service->base.callback == service_callback) &&
user_service->is_vchi) {
@@ -1473,8 +1568,7 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
}
down_read(&current->mm->mmap_sem);
- rc = get_user_pages(current, /* task */
- current->mm, /* mm */
+ rc = get_user_pages(
(unsigned long)virt_addr, /* start */
num_pages, /* len */
0, /* gup_flags */
@@ -1485,6 +1579,12 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
prev_idx = -1;
page = NULL;
+ if (rc < 0) {
+ vchiq_log_error(vchiq_arm_log_level,
+ "Failed to get user pages: %d\n", rc);
+ goto out;
+ }
+
while (offset < end_offset) {
int page_offset = offset % PAGE_SIZE;
@@ -1508,11 +1608,13 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
offset += 16;
}
+
+out:
if (page != NULL)
kunmap(page);
for (page_idx = 0; page_idx < num_pages; page_idx++)
- page_cache_release(pages[page_idx]);
+ put_page(pages[page_idx]);
kfree(pages);
}
@@ -1683,8 +1785,6 @@ exit:
VCHIQ_STATUS_T
vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
{
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
-
if (arm_state) {
rwlock_init(&arm_state->susp_res_lock);
@@ -1712,14 +1812,13 @@ vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
arm_state->suspend_timer_running = 0;
- init_timer(&arm_state->suspend_timer);
- arm_state->suspend_timer.data = (unsigned long)(state);
- arm_state->suspend_timer.function = suspend_timer_callback;
+ setup_timer(&arm_state->suspend_timer, suspend_timer_callback,
+ (unsigned long)(state));
arm_state->first_connect = 0;
}
- return status;
+ return VCHIQ_SUCCESS;
}
/*
@@ -2032,20 +2131,20 @@ static void
output_timeout_error(VCHIQ_STATE_T *state)
{
VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
- char service_err[50] = "";
+ char err[50] = "";
int vc_use_count = arm_state->videocore_use_count;
int active_services = state->unused_service;
int i;
if (!arm_state->videocore_use_count) {
- snprintf(service_err, 50, " Videocore usecount is 0");
+ snprintf(err, sizeof(err), " Videocore usecount is 0");
goto output_msg;
}
for (i = 0; i < active_services; i++) {
VCHIQ_SERVICE_T *service_ptr = state->services[i];
if (service_ptr && service_ptr->service_use_count &&
(service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
- snprintf(service_err, 50, " %c%c%c%c(%d) service has "
+ snprintf(err, sizeof(err), " %c%c%c%c(%d) service has "
"use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
service_ptr->base.fourcc),
service_ptr->client_id,
@@ -2059,7 +2158,7 @@ output_timeout_error(VCHIQ_STATE_T *state)
output_msg:
vchiq_log_error(vchiq_susp_log_level,
"timed out waiting for vc suspend (%d).%s",
- arm_state->autosuspend_override, service_err);
+ arm_state->autosuspend_override, err);
}
@@ -2780,7 +2879,7 @@ void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
&vchiq_keepalive_thread_func,
(void *)state,
threadname);
- if (arm_state->ka_thread == NULL) {
+ if (IS_ERR(arm_state->ka_thread)) {
vchiq_log_error(vchiq_susp_log_level,
"vchiq: FATAL: couldn't create thread %s",
threadname);
@@ -2800,28 +2899,27 @@ static int vchiq_probe(struct platform_device *pdev)
void *ptr_err;
fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
-/* Remove comment when booting without Device Tree is no longer supported
if (!fw_node) {
dev_err(&pdev->dev, "Missing firmware node\n");
return -ENOENT;
}
-*/
+
fw = rpi_firmware_get(fw_node);
+ of_node_put(fw_node);
if (!fw)
return -EPROBE_DEFER;
platform_set_drvdata(pdev, fw);
- /* create debugfs entries */
- err = vchiq_debugfs_init();
+ err = vchiq_platform_init(pdev, &g_state);
if (err != 0)
- goto failed_debugfs_init;
+ goto failed_platform_init;
err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
if (err != 0) {
vchiq_log_error(vchiq_arm_log_level,
"Unable to allocate device number");
- goto failed_alloc_chrdev;
+ goto failed_platform_init;
}
cdev_init(&vchiq_cdev, &vchiq_fops);
vchiq_cdev.owner = THIS_MODULE;
@@ -2844,9 +2942,10 @@ static int vchiq_probe(struct platform_device *pdev)
if (IS_ERR(ptr_err))
goto failed_device_create;
- err = vchiq_platform_init(pdev, &g_state);
+ /* create debugfs entries */
+ err = vchiq_debugfs_init();
if (err != 0)
- goto failed_platform_init;
+ goto failed_debugfs_init;
vchiq_log_info(vchiq_arm_log_level,
"vchiq: initialised - version %d (min %d), device %d.%d",
@@ -2855,7 +2954,7 @@ static int vchiq_probe(struct platform_device *pdev)
return 0;
-failed_platform_init:
+failed_debugfs_init:
device_destroy(vchiq_class, vchiq_devid);
failed_device_create:
class_destroy(vchiq_class);
@@ -2864,15 +2963,14 @@ failed_class_create:
err = PTR_ERR(ptr_err);
failed_cdev_add:
unregister_chrdev_region(vchiq_devid, 1);
-failed_alloc_chrdev:
- vchiq_debugfs_deinit();
-failed_debugfs_init:
+failed_platform_init:
vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
return err;
}
static int vchiq_remove(struct platform_device *pdev)
{
+ vchiq_debugfs_deinit();
device_destroy(vchiq_class, vchiq_devid);
class_destroy(vchiq_class);
cdev_del(&vchiq_cdev);
@@ -2890,7 +2988,6 @@ MODULE_DEVICE_TABLE(of, vchiq_of_match);
static struct platform_driver vchiq_driver = {
.driver = {
.name = "bcm2835_vchiq",
- .owner = THIS_MODULE,
.of_match_table = vchiq_of_match,
},
.probe = vchiq_probe,
@@ -2899,4 +2996,5 @@ static struct platform_driver vchiq_driver = {
module_platform_driver(vchiq_driver);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Videocore VCHIQ driver");
MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
index 5efc62ffb2f5..7ea29665bd0c 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
@@ -72,7 +72,7 @@ void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
{
connected_init();
- if (mutex_lock_interruptible(&g_connected_mutex) != 0)
+ if (mutex_lock_killable(&g_connected_mutex) != 0)
return;
if (g_connected)
@@ -107,7 +107,7 @@ void vchiq_call_connected_callbacks(void)
connected_init();
- if (mutex_lock_interruptible(&g_connected_mutex) != 0)
+ if (mutex_lock_killable(&g_connected_mutex) != 0)
return;
for (i = 0; i < g_num_deferred_callbacks; i++)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 2c98da4307df..028e90bc1cdc 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -296,12 +296,13 @@ lock_service(VCHIQ_SERVICE_T *service)
void
unlock_service(VCHIQ_SERVICE_T *service)
{
- VCHIQ_STATE_T *state = service->state;
spin_lock(&service_spinlock);
BUG_ON(!service || (service->ref_count == 0));
if (service && service->ref_count) {
service->ref_count--;
if (!service->ref_count) {
+ VCHIQ_STATE_T *state = service->state;
+
BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
state->services[service->localport] = NULL;
} else
@@ -380,9 +381,9 @@ make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
VCHIQ_HEADER_T *header, void *bulk_userdata)
{
VCHIQ_STATUS_T status;
- vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)",
+ vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
service->state->id, service->localport, reason_names[reason],
- (unsigned int)header, (unsigned int)bulk_userdata);
+ header, bulk_userdata);
status = service->base.callback(reason, header, service->handle,
bulk_userdata);
if (status == VCHIQ_ERROR) {
@@ -406,28 +407,24 @@ vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
}
static inline void
-remote_event_create(REMOTE_EVENT_T *event)
+remote_event_create(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event)
{
event->armed = 0;
/* Don't clear the 'fired' flag because it may already have been set
** by the other side. */
- sema_init(event->event, 0);
-}
-
-static inline void
-remote_event_destroy(REMOTE_EVENT_T *event)
-{
- (void)event;
+ sema_init((struct semaphore *)((char *)state + event->event), 0);
}
static inline int
-remote_event_wait(REMOTE_EVENT_T *event)
+remote_event_wait(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event)
{
if (!event->fired) {
event->armed = 1;
- dsb();
+ dsb(sy);
if (!event->fired) {
- if (down_interruptible(event->event) != 0) {
+ if (down_interruptible(
+ (struct semaphore *)
+ ((char *)state + event->event)) != 0) {
event->armed = 0;
return 0;
}
@@ -441,34 +438,34 @@ remote_event_wait(REMOTE_EVENT_T *event)
}
static inline void
-remote_event_signal_local(REMOTE_EVENT_T *event)
+remote_event_signal_local(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event)
{
event->armed = 0;
- up(event->event);
+ up((struct semaphore *)((char *)state + event->event));
}
static inline void
-remote_event_poll(REMOTE_EVENT_T *event)
+remote_event_poll(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event)
{
if (event->fired && event->armed)
- remote_event_signal_local(event);
+ remote_event_signal_local(state, event);
}
void
remote_event_pollall(VCHIQ_STATE_T *state)
{
- remote_event_poll(&state->local->sync_trigger);
- remote_event_poll(&state->local->sync_release);
- remote_event_poll(&state->local->trigger);
- remote_event_poll(&state->local->recycle);
+ remote_event_poll(state, &state->local->sync_trigger);
+ remote_event_poll(state, &state->local->sync_release);
+ remote_event_poll(state, &state->local->trigger);
+ remote_event_poll(state, &state->local->recycle);
}
/* Round up message sizes so that any space at the end of a slot is always big
** enough for a header. This relies on header size being a power of two, which
** has been verified earlier by a static assertion. */
-static inline unsigned int
-calc_stride(unsigned int size)
+static inline size_t
+calc_stride(size_t size)
{
/* Allow room for the header */
size += sizeof(VCHIQ_HEADER_T);
@@ -541,13 +538,13 @@ request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type)
wmb();
/* ... and ensure the slot handler runs. */
- remote_event_signal_local(&state->local->trigger);
+ remote_event_signal_local(state, &state->local->trigger);
}
/* Called from queue_message, by the slot handler and application threads,
** with slot_mutex held */
static VCHIQ_HEADER_T *
-reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking)
+reserve_space(VCHIQ_STATE_T *state, size_t space, int is_blocking)
{
VCHIQ_SHARED_STATE_T *local = state->local;
int tx_pos = state->local_tx_pos;
@@ -626,8 +623,8 @@ process_free_queue(VCHIQ_STATE_T *state)
char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
int data_found = 0;
- vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x",
- state->id, slot_index, (unsigned int)data,
+ vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
+ state->id, slot_index, data,
local->slot_queue_recycle, slot_queue_available);
/* Initialise the bitmask for services which have used this
@@ -659,16 +656,10 @@ process_free_queue(VCHIQ_STATE_T *state)
up(&service_quota->quota_event);
else if (count == 0) {
vchiq_log_error(vchiq_core_log_level,
- "service %d "
- "message_use_count=%d "
- "(header %x, msgid %x, "
- "header->msgid %x, "
- "header->size %x)",
+ "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
port,
- service_quota->
- message_use_count,
- (unsigned int)header, msgid,
- header->msgid,
+ service_quota->message_use_count,
+ header, msgid, header->msgid,
header->size);
WARN(1, "invalid message use count\n");
}
@@ -690,26 +681,16 @@ process_free_queue(VCHIQ_STATE_T *state)
up(&service_quota->quota_event);
vchiq_log_trace(
vchiq_core_log_level,
- "%d: pfq:%d %x@%x - "
- "slot_use->%d",
+ "%d: pfq:%d %x@%pK - slot_use->%d",
state->id, port,
- header->size,
- (unsigned int)header,
+ header->size, header,
count - 1);
} else {
vchiq_log_error(
vchiq_core_log_level,
- "service %d "
- "slot_use_count"
- "=%d (header %x"
- ", msgid %x, "
- "header->msgid"
- " %x, header->"
- "size %x)",
- port, count,
- (unsigned int)header,
- msgid,
- header->msgid,
+ "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
+ port, count, header,
+ msgid, header->msgid,
header->size);
WARN(1, "bad slot use count\n");
}
@@ -721,10 +702,9 @@ process_free_queue(VCHIQ_STATE_T *state)
pos += calc_stride(header->size);
if (pos > VCHIQ_SLOT_SIZE) {
vchiq_log_error(vchiq_core_log_level,
- "pfq - pos %x: header %x, msgid %x, "
- "header->msgid %x, header->size %x",
- pos, (unsigned int)header, msgid,
- header->msgid, header->size);
+ "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
+ pos, header, msgid, header->msgid,
+ header->size);
WARN(1, "invalid slot position\n");
}
}
@@ -746,18 +726,66 @@ process_free_queue(VCHIQ_STATE_T *state)
}
}
+static ssize_t
+memcpy_copy_callback(
+ void *context, void *dest,
+ size_t offset, size_t maxsize)
+{
+ void *src = context;
+
+ memcpy(dest + offset, src + offset, maxsize);
+ return maxsize;
+}
+
+static ssize_t
+copy_message_data(
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ void *dest,
+ size_t size)
+{
+ size_t pos = 0;
+
+ while (pos < size) {
+ ssize_t callback_result;
+ size_t max_bytes = size - pos;
+
+ callback_result =
+ copy_callback(context, dest + pos,
+ pos, max_bytes);
+
+ if (callback_result < 0)
+ return callback_result;
+
+ if (!callback_result)
+ return -EIO;
+
+ if (callback_result > max_bytes)
+ return -EIO;
+
+ pos += callback_result;
+ }
+
+ return size;
+}
+
/* Called by the slot handler and application threads */
static VCHIQ_STATUS_T
queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
- int msgid, const VCHIQ_ELEMENT_T *elements,
- int count, int size, int flags)
+ int msgid,
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ size_t size,
+ int flags)
{
VCHIQ_SHARED_STATE_T *local;
VCHIQ_SERVICE_QUOTA_T *service_quota = NULL;
VCHIQ_HEADER_T *header;
int type = VCHIQ_MSG_TYPE(msgid);
- unsigned int stride;
+ size_t stride;
local = state->local;
@@ -766,7 +794,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
- (mutex_lock_interruptible(&state->slot_mutex) != 0))
+ (mutex_lock_killable(&state->slot_mutex) != 0))
return VCHIQ_RETRY;
if (type == VCHIQ_MSG_DATA) {
@@ -822,7 +850,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
service_quota->slot_quota))) {
spin_unlock(&quota_spinlock);
vchiq_log_trace(vchiq_core_log_level,
- "%d: qm:%d %s,%x - quota stall "
+ "%d: qm:%d %s,%zx - quota stall "
"(msg %d, slot %d)",
state->id, service->localport,
msg_type_str(type), size,
@@ -835,7 +863,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
return VCHIQ_RETRY;
if (service->closing)
return VCHIQ_ERROR;
- if (mutex_lock_interruptible(&state->slot_mutex) != 0)
+ if (mutex_lock_killable(&state->slot_mutex) != 0)
return VCHIQ_RETRY;
if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
/* The service has been closed */
@@ -863,43 +891,37 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
}
if (type == VCHIQ_MSG_DATA) {
- int i, pos;
+ ssize_t callback_result;
int tx_end_index;
int slot_use_count;
vchiq_log_info(vchiq_core_log_level,
- "%d: qm %s@%x,%x (%d->%d)",
- state->id,
- msg_type_str(VCHIQ_MSG_TYPE(msgid)),
- (unsigned int)header, size,
- VCHIQ_MSG_SRCPORT(msgid),
+ "%d: qm %s@%pK,%zx (%d->%d)",
+ state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ header, size, VCHIQ_MSG_SRCPORT(msgid),
VCHIQ_MSG_DSTPORT(msgid));
BUG_ON(!service);
BUG_ON((flags & (QMFLAGS_NO_MUTEX_LOCK |
QMFLAGS_NO_MUTEX_UNLOCK)) != 0);
- for (i = 0, pos = 0; i < (unsigned int)count;
- pos += elements[i++].size)
- if (elements[i].size) {
- if (vchiq_copy_from_user
- (header->data + pos, elements[i].data,
- (size_t) elements[i].size) !=
- VCHIQ_SUCCESS) {
- mutex_unlock(&state->slot_mutex);
- VCHIQ_SERVICE_STATS_INC(service,
+ callback_result =
+ copy_message_data(copy_callback, context,
+ header->data, size);
+
+ if (callback_result < 0) {
+ mutex_unlock(&state->slot_mutex);
+ VCHIQ_SERVICE_STATS_INC(service,
error_count);
- return VCHIQ_ERROR;
- }
- if (i == 0) {
- if (SRVTRACE_ENABLED(service,
- VCHIQ_LOG_INFO))
- vchiq_log_dump_mem("Sent", 0,
- header->data + pos,
- min(64u,
- elements[0].size));
- }
- }
+ return VCHIQ_ERROR;
+ }
+
+ if (SRVTRACE_ENABLED(service,
+ VCHIQ_LOG_INFO))
+ vchiq_log_dump_mem("Sent", 0,
+ header->data,
+ min((size_t)64,
+ (size_t)callback_result));
spin_lock(&quota_spinlock);
service_quota->message_use_count++;
@@ -927,7 +949,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
if (slot_use_count)
vchiq_log_trace(vchiq_core_log_level,
- "%d: qm:%d %s,%x - slot_use->%d (hdr %p)",
+ "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)",
state->id, service->localport,
msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
slot_use_count, header);
@@ -936,15 +958,22 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
} else {
vchiq_log_info(vchiq_core_log_level,
- "%d: qm %s@%x,%x (%d->%d)", state->id,
+ "%d: qm %s@%pK,%zx (%d->%d)", state->id,
msg_type_str(VCHIQ_MSG_TYPE(msgid)),
- (unsigned int)header, size,
- VCHIQ_MSG_SRCPORT(msgid),
+ header, size, VCHIQ_MSG_SRCPORT(msgid),
VCHIQ_MSG_DSTPORT(msgid));
if (size != 0) {
- WARN_ON(!((count == 1) && (size == elements[0].size)));
- memcpy(header->data, elements[0].data,
- elements[0].size);
+ /* It is assumed for now that this code path
+ * only happens from calls inside this file.
+ *
+ * External callers are through the vchiq_queue_message
+ * path which always sets the type to be VCHIQ_MSG_DATA
+ *
+ * At first glance this appears to be correct but
+ * more review is needed.
+ */
+ copy_message_data(copy_callback, context,
+ header->data, size);
}
VCHIQ_STATS_INC(state, ctrl_tx_count);
}
@@ -960,7 +989,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
vchiq_log_info(SRVTRACE_LEVEL(service),
- "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
+ "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
msg_type_str(VCHIQ_MSG_TYPE(msgid)),
VCHIQ_MSG_TYPE(msgid),
VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
@@ -990,19 +1019,24 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
/* Called by the slot handler and application threads */
static VCHIQ_STATUS_T
queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
- int msgid, const VCHIQ_ELEMENT_T *elements,
- int count, int size, int is_blocking)
+ int msgid,
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ int size,
+ int is_blocking)
{
VCHIQ_SHARED_STATE_T *local;
VCHIQ_HEADER_T *header;
+ ssize_t callback_result;
local = state->local;
if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
- (mutex_lock_interruptible(&state->sync_mutex) != 0))
+ (mutex_lock_killable(&state->sync_mutex) != 0))
return VCHIQ_RETRY;
- remote_event_wait(&local->sync_release);
+ remote_event_wait(state, &local->sync_release);
rmb();
@@ -1017,52 +1051,34 @@ queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
state->id, oldmsgid);
}
- if (service) {
- int i, pos;
+ vchiq_log_info(vchiq_sync_log_level,
+ "%d: qms %s@%pK,%x (%d->%d)", state->id,
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ header, size, VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid));
- vchiq_log_info(vchiq_sync_log_level,
- "%d: qms %s@%x,%x (%d->%d)", state->id,
- msg_type_str(VCHIQ_MSG_TYPE(msgid)),
- (unsigned int)header, size,
- VCHIQ_MSG_SRCPORT(msgid),
- VCHIQ_MSG_DSTPORT(msgid));
+ callback_result =
+ copy_message_data(copy_callback, context,
+ header->data, size);
- for (i = 0, pos = 0; i < (unsigned int)count;
- pos += elements[i++].size)
- if (elements[i].size) {
- if (vchiq_copy_from_user
- (header->data + pos, elements[i].data,
- (size_t) elements[i].size) !=
- VCHIQ_SUCCESS) {
- mutex_unlock(&state->sync_mutex);
- VCHIQ_SERVICE_STATS_INC(service,
- error_count);
- return VCHIQ_ERROR;
- }
- if (i == 0) {
- if (vchiq_sync_log_level >=
- VCHIQ_LOG_TRACE)
- vchiq_log_dump_mem("Sent Sync",
- 0, header->data + pos,
- min(64u,
- elements[0].size));
- }
- }
+ if (callback_result < 0) {
+ mutex_unlock(&state->slot_mutex);
+ VCHIQ_SERVICE_STATS_INC(service,
+ error_count);
+ return VCHIQ_ERROR;
+ }
+
+ if (service) {
+ if (SRVTRACE_ENABLED(service,
+ VCHIQ_LOG_INFO))
+ vchiq_log_dump_mem("Sent", 0,
+ header->data,
+ min((size_t)64,
+ (size_t)callback_result));
VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
} else {
- vchiq_log_info(vchiq_sync_log_level,
- "%d: qms %s@%x,%x (%d->%d)", state->id,
- msg_type_str(VCHIQ_MSG_TYPE(msgid)),
- (unsigned int)header, size,
- VCHIQ_MSG_SRCPORT(msgid),
- VCHIQ_MSG_DSTPORT(msgid));
- if (size != 0) {
- WARN_ON(!((count == 1) && (size == elements[0].size)));
- memcpy(header->data, elements[0].data,
- elements[0].size);
- }
VCHIQ_STATS_INC(state, ctrl_tx_count);
}
@@ -1175,11 +1191,16 @@ notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE;
int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport,
service->remoteport);
- VCHIQ_ELEMENT_T element = { &bulk->actual, 4 };
/* Only reply to non-dummy bulk requests */
if (bulk->remote_data) {
- status = queue_message(service->state, NULL,
- msgid, &element, 1, 4, 0);
+ status = queue_message(
+ service->state,
+ NULL,
+ msgid,
+ memcpy_copy_callback,
+ &bulk->actual,
+ 4,
+ 0);
if (status != VCHIQ_SUCCESS)
break;
}
@@ -1344,7 +1365,7 @@ resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
- rc = mutex_lock_interruptible(&state->bulk_transfer_mutex);
+ rc = mutex_lock_killable(&state->bulk_transfer_mutex);
if (rc != 0)
break;
@@ -1356,26 +1377,22 @@ resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
"Send Bulk to" : "Recv Bulk from";
if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED)
vchiq_log_info(SRVTRACE_LEVEL(service),
- "%s %c%c%c%c d:%d len:%d %x<->%x",
+ "%s %c%c%c%c d:%d len:%d %pK<->%pK",
header,
VCHIQ_FOURCC_AS_4CHARS(
service->base.fourcc),
- service->remoteport,
- bulk->size,
- (unsigned int)bulk->data,
- (unsigned int)bulk->remote_data);
+ service->remoteport, bulk->size,
+ bulk->data, bulk->remote_data);
else
vchiq_log_info(SRVTRACE_LEVEL(service),
"%s %c%c%c%c d:%d ABORTED - tx len:%d,"
- " rx len:%d %x<->%x",
+ " rx len:%d %pK<->%pK",
header,
VCHIQ_FOURCC_AS_4CHARS(
service->base.fourcc),
service->remoteport,
- bulk->size,
- bulk->remote_size,
- (unsigned int)bulk->data,
- (unsigned int)bulk->remote_data);
+ bulk->size, bulk->remote_size,
+ bulk->data, bulk->remote_data);
}
vchiq_complete_bulk(bulk);
@@ -1511,9 +1528,8 @@ parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
fourcc = payload->fourcc;
vchiq_log_info(vchiq_core_log_level,
- "%d: prs OPEN@%x (%d->'%c%c%c%c')",
- state->id, (unsigned int)header,
- localport,
+ "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
+ state->id, header, localport,
VCHIQ_FOURCC_AS_4CHARS(fourcc));
service = get_listening_service(state, fourcc);
@@ -1544,10 +1560,6 @@ parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
struct vchiq_openack_payload ack_payload = {
service->version
};
- VCHIQ_ELEMENT_T body = {
- &ack_payload,
- sizeof(ack_payload)
- };
if (state->version_common <
VCHIQ_VERSION_SYNCHRONOUS_MODE)
@@ -1557,21 +1569,28 @@ parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
if (service->sync &&
(state->version_common >=
VCHIQ_VERSION_SYNCHRONOUS_MODE)) {
- if (queue_message_sync(state, NULL,
+ if (queue_message_sync(
+ state,
+ NULL,
VCHIQ_MAKE_MSG(
VCHIQ_MSG_OPENACK,
service->localport,
remoteport),
- &body, 1, sizeof(ack_payload),
+ memcpy_copy_callback,
+ &ack_payload,
+ sizeof(ack_payload),
0) == VCHIQ_RETRY)
goto bail_not_ready;
} else {
- if (queue_message(state, NULL,
- VCHIQ_MAKE_MSG(
+ if (queue_message(state,
+ NULL,
+ VCHIQ_MAKE_MSG(
VCHIQ_MSG_OPENACK,
service->localport,
remoteport),
- &body, 1, sizeof(ack_payload),
+ memcpy_copy_callback,
+ &ack_payload,
+ sizeof(ack_payload),
0) == VCHIQ_RETRY)
goto bail_not_ready;
}
@@ -1650,7 +1669,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
header = (VCHIQ_HEADER_T *)(state->rx_data +
(state->rx_pos & VCHIQ_SLOT_MASK));
- DEBUG_VALUE(PARSE_HEADER, (int)header);
+ DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
msgid = header->msgid;
DEBUG_VALUE(PARSE_MSGID, msgid);
size = header->size;
@@ -1684,21 +1703,18 @@ parse_rx_slots(VCHIQ_STATE_T *state)
remoteport);
if (service)
vchiq_log_warning(vchiq_core_log_level,
- "%d: prs %s@%x (%d->%d) - "
- "found connected service %d",
+ "%d: prs %s@%pK (%d->%d) - found connected service %d",
state->id, msg_type_str(type),
- (unsigned int)header,
- remoteport, localport,
+ header, remoteport, localport,
service->localport);
}
if (!service) {
vchiq_log_error(vchiq_core_log_level,
- "%d: prs %s@%x (%d->%d) - "
- "invalid/closed service %d",
+ "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
state->id, msg_type_str(type),
- (unsigned int)header,
- remoteport, localport, localport);
+ header, remoteport, localport,
+ localport);
goto skip_message;
}
break;
@@ -1723,12 +1739,11 @@ parse_rx_slots(VCHIQ_STATE_T *state)
min(64, size));
}
- if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size)
- > VCHIQ_SLOT_SIZE) {
+ if (((unsigned long)header & VCHIQ_SLOT_MASK) +
+ calc_stride(size) > VCHIQ_SLOT_SIZE) {
vchiq_log_error(vchiq_core_log_level,
- "header %x (msgid %x) - size %x too big for "
- "slot",
- (unsigned int)header, (unsigned int)msgid,
+ "header %pK (msgid %x) - size %x too big for slot",
+ header, (unsigned int)msgid,
(unsigned int)size);
WARN(1, "oversized for slot\n");
}
@@ -1747,9 +1762,9 @@ parse_rx_slots(VCHIQ_STATE_T *state)
service->peer_version = payload->version;
}
vchiq_log_info(vchiq_core_log_level,
- "%d: prs OPENACK@%x,%x (%d->%d) v:%d",
- state->id, (unsigned int)header, size,
- remoteport, localport, service->peer_version);
+ "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
+ state->id, header, size, remoteport, localport,
+ service->peer_version);
if (service->srvstate ==
VCHIQ_SRVSTATE_OPENING) {
service->remoteport = remoteport;
@@ -1765,9 +1780,8 @@ parse_rx_slots(VCHIQ_STATE_T *state)
WARN_ON(size != 0); /* There should be no data */
vchiq_log_info(vchiq_core_log_level,
- "%d: prs CLOSE@%x (%d->%d)",
- state->id, (unsigned int)header,
- remoteport, localport);
+ "%d: prs CLOSE@%pK (%d->%d)",
+ state->id, header, remoteport, localport);
mark_service_closing_internal(service, 1);
@@ -1783,9 +1797,8 @@ parse_rx_slots(VCHIQ_STATE_T *state)
break;
case VCHIQ_MSG_DATA:
vchiq_log_info(vchiq_core_log_level,
- "%d: prs DATA@%x,%x (%d->%d)",
- state->id, (unsigned int)header, size,
- remoteport, localport);
+ "%d: prs DATA@%pK,%x (%d->%d)",
+ state->id, header, size, remoteport, localport);
if ((service->remoteport == remoteport)
&& (service->srvstate ==
@@ -1808,8 +1821,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
break;
case VCHIQ_MSG_CONNECT:
vchiq_log_info(vchiq_core_log_level,
- "%d: prs CONNECT@%x",
- state->id, (unsigned int)header);
+ "%d: prs CONNECT@%pK", state->id, header);
state->version_common = ((VCHIQ_SLOT_ZERO_T *)
state->slot_data)->version;
up(&state->connect);
@@ -1827,7 +1839,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
int resolved = 0;
DEBUG_TRACE(PARSE_LINE);
- if (mutex_lock_interruptible(
+ if (mutex_lock_killable(
&service->bulk_mutex) != 0) {
DEBUG_TRACE(PARSE_LINE);
goto bail_not_ready;
@@ -1838,17 +1850,15 @@ parse_rx_slots(VCHIQ_STATE_T *state)
bulk = &queue->bulks[
BULK_INDEX(queue->remote_insert)];
bulk->remote_data =
- (void *)((int *)header->data)[0];
+ (void *)(long)((int *)header->data)[0];
bulk->remote_size = ((int *)header->data)[1];
wmb();
vchiq_log_info(vchiq_core_log_level,
- "%d: prs %s@%x (%d->%d) %x@%x",
+ "%d: prs %s@%pK (%d->%d) %x@%pK",
state->id, msg_type_str(type),
- (unsigned int)header,
- remoteport, localport,
- bulk->remote_size,
- (unsigned int)bulk->remote_data);
+ header, remoteport, localport,
+ bulk->remote_size, bulk->remote_data);
queue->remote_insert++;
@@ -1893,7 +1903,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
&service->bulk_rx : &service->bulk_tx;
DEBUG_TRACE(PARSE_LINE);
- if (mutex_lock_interruptible(
+ if (mutex_lock_killable(
&service->bulk_mutex) != 0) {
DEBUG_TRACE(PARSE_LINE);
goto bail_not_ready;
@@ -1901,11 +1911,10 @@ parse_rx_slots(VCHIQ_STATE_T *state)
if ((int)(queue->remote_insert -
queue->local_insert) >= 0) {
vchiq_log_error(vchiq_core_log_level,
- "%d: prs %s@%x (%d->%d) "
+ "%d: prs %s@%pK (%d->%d) "
"unexpected (ri=%d,li=%d)",
state->id, msg_type_str(type),
- (unsigned int)header,
- remoteport, localport,
+ header, remoteport, localport,
queue->remote_insert,
queue->local_insert);
mutex_unlock(&service->bulk_mutex);
@@ -1921,11 +1930,10 @@ parse_rx_slots(VCHIQ_STATE_T *state)
queue->remote_insert++;
vchiq_log_info(vchiq_core_log_level,
- "%d: prs %s@%x (%d->%d) %x@%x",
+ "%d: prs %s@%pK (%d->%d) %x@%pK",
state->id, msg_type_str(type),
- (unsigned int)header,
- remoteport, localport,
- bulk->actual, (unsigned int)bulk->data);
+ header, remoteport, localport,
+ bulk->actual, bulk->data);
vchiq_log_trace(vchiq_core_log_level,
"%d: prs:%d %cx li=%x ri=%x p=%x",
@@ -1947,14 +1955,14 @@ parse_rx_slots(VCHIQ_STATE_T *state)
break;
case VCHIQ_MSG_PADDING:
vchiq_log_trace(vchiq_core_log_level,
- "%d: prs PADDING@%x,%x",
- state->id, (unsigned int)header, size);
+ "%d: prs PADDING@%pK,%x",
+ state->id, header, size);
break;
case VCHIQ_MSG_PAUSE:
/* If initiated, signal the application thread */
vchiq_log_trace(vchiq_core_log_level,
- "%d: prs PAUSE@%x,%x",
- state->id, (unsigned int)header, size);
+ "%d: prs PAUSE@%pK,%x",
+ state->id, header, size);
if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
vchiq_log_error(vchiq_core_log_level,
"%d: PAUSE received in state PAUSED",
@@ -1977,8 +1985,8 @@ parse_rx_slots(VCHIQ_STATE_T *state)
break;
case VCHIQ_MSG_RESUME:
vchiq_log_trace(vchiq_core_log_level,
- "%d: prs RESUME@%x,%x",
- state->id, (unsigned int)header, size);
+ "%d: prs RESUME@%pK,%x",
+ state->id, header, size);
/* Release the slot mutex */
mutex_unlock(&state->slot_mutex);
if (state->is_master)
@@ -1999,8 +2007,8 @@ parse_rx_slots(VCHIQ_STATE_T *state)
default:
vchiq_log_error(vchiq_core_log_level,
- "%d: prs invalid msgid %x@%x,%x",
- state->id, msgid, (unsigned int)header, size);
+ "%d: prs invalid msgid %x@%pK,%x",
+ state->id, msgid, header, size);
WARN(1, "invalid message\n");
break;
}
@@ -2039,7 +2047,7 @@ slot_handler_func(void *v)
while (1) {
DEBUG_COUNT(SLOT_HANDLER_COUNT);
DEBUG_TRACE(SLOT_HANDLER_LINE);
- remote_event_wait(&local->trigger);
+ remote_event_wait(state, &local->trigger);
rmb();
@@ -2128,7 +2136,7 @@ recycle_func(void *v)
VCHIQ_SHARED_STATE_T *local = state->local;
while (1) {
- remote_event_wait(&local->recycle);
+ remote_event_wait(state, &local->recycle);
process_free_queue(state);
}
@@ -2151,7 +2159,7 @@ sync_func(void *v)
int type;
unsigned int localport, remoteport;
- remote_event_wait(&local->sync_trigger);
+ remote_event_wait(state, &local->sync_trigger);
rmb();
@@ -2165,11 +2173,9 @@ sync_func(void *v)
if (!service) {
vchiq_log_error(vchiq_sync_log_level,
- "%d: sf %s@%x (%d->%d) - "
- "invalid/closed service %d",
+ "%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
state->id, msg_type_str(type),
- (unsigned int)header,
- remoteport, localport, localport);
+ header, remoteport, localport, localport);
release_message_sync(state, header);
continue;
}
@@ -2199,9 +2205,9 @@ sync_func(void *v)
service->peer_version = payload->version;
}
vchiq_log_info(vchiq_sync_log_level,
- "%d: sf OPENACK@%x,%x (%d->%d) v:%d",
- state->id, (unsigned int)header, size,
- remoteport, localport, service->peer_version);
+ "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
+ state->id, header, size, remoteport, localport,
+ service->peer_version);
if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
service->remoteport = remoteport;
vchiq_set_service_state(service,
@@ -2214,9 +2220,8 @@ sync_func(void *v)
case VCHIQ_MSG_DATA:
vchiq_log_trace(vchiq_sync_log_level,
- "%d: sf DATA@%x,%x (%d->%d)",
- state->id, (unsigned int)header, size,
- remoteport, localport);
+ "%d: sf DATA@%pK,%x (%d->%d)",
+ state->id, header, size, remoteport, localport);
if ((service->remoteport == remoteport) &&
(service->srvstate ==
@@ -2234,8 +2239,8 @@ sync_func(void *v)
default:
vchiq_log_error(vchiq_sync_log_level,
- "%d: sf unexpected msgid %x@%x,%x",
- state->id, msgid, (unsigned int)header, size);
+ "%d: sf unexpected msgid %x@%pK,%x",
+ state->id, msgid, header, size);
release_message_sync(state, header);
break;
}
@@ -2268,7 +2273,8 @@ get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
VCHIQ_SLOT_ZERO_T *
vchiq_init_slots(void *mem_base, int mem_size)
{
- int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK;
+ int mem_align =
+ (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
VCHIQ_SLOT_ZERO_T *slot_zero =
(VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align);
int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
@@ -2316,16 +2322,16 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
int i;
vchiq_log_warning(vchiq_core_log_level,
- "%s: slot_zero = 0x%08lx, is_master = %d",
- __func__, (unsigned long)slot_zero, is_master);
+ "%s: slot_zero = %pK, is_master = %d",
+ __func__, slot_zero, is_master);
/* Check the input configuration */
if (slot_zero->magic != VCHIQ_MAGIC) {
vchiq_loud_error_header();
vchiq_loud_error("Invalid VCHIQ magic value found.");
- vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)",
- (unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC);
+ vchiq_loud_error("slot_zero=%pK: magic=%x (expected %x)",
+ slot_zero, slot_zero->magic, VCHIQ_MAGIC);
vchiq_loud_error_footer();
return VCHIQ_ERROR;
}
@@ -2333,10 +2339,8 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
if (slot_zero->version < VCHIQ_VERSION_MIN) {
vchiq_loud_error_header();
vchiq_loud_error("Incompatible VCHIQ versions found.");
- vchiq_loud_error("slot_zero=%x: VideoCore version=%d "
- "(minimum %d)",
- (unsigned int)slot_zero, slot_zero->version,
- VCHIQ_VERSION_MIN);
+ vchiq_loud_error("slot_zero=%pK: VideoCore version=%d (minimum %d)",
+ slot_zero, slot_zero->version, VCHIQ_VERSION_MIN);
vchiq_loud_error("Restart with a newer VideoCore image.");
vchiq_loud_error_footer();
return VCHIQ_ERROR;
@@ -2345,10 +2349,8 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
if (VCHIQ_VERSION < slot_zero->version_min) {
vchiq_loud_error_header();
vchiq_loud_error("Incompatible VCHIQ versions found.");
- vchiq_loud_error("slot_zero=%x: version=%d (VideoCore "
- "minimum %d)",
- (unsigned int)slot_zero, VCHIQ_VERSION,
- slot_zero->version_min);
+ vchiq_loud_error("slot_zero=%pK: version=%d (VideoCore minimum %d)",
+ slot_zero, VCHIQ_VERSION, slot_zero->version_min);
vchiq_loud_error("Restart with a newer kernel.");
vchiq_loud_error_footer();
return VCHIQ_ERROR;
@@ -2360,26 +2362,20 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
(slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) {
vchiq_loud_error_header();
if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T))
- vchiq_loud_error("slot_zero=%x: slot_zero_size=%x "
- "(expected %x)",
- (unsigned int)slot_zero,
- slot_zero->slot_zero_size,
- sizeof(VCHIQ_SLOT_ZERO_T));
+ vchiq_loud_error("slot_zero=%pK: slot_zero_size=%d (expected %d)",
+ slot_zero, slot_zero->slot_zero_size,
+ (int)sizeof(VCHIQ_SLOT_ZERO_T));
if (slot_zero->slot_size != VCHIQ_SLOT_SIZE)
- vchiq_loud_error("slot_zero=%x: slot_size=%d "
- "(expected %d",
- (unsigned int)slot_zero, slot_zero->slot_size,
+ vchiq_loud_error("slot_zero=%pK: slot_size=%d (expected %d)",
+ slot_zero, slot_zero->slot_size,
VCHIQ_SLOT_SIZE);
if (slot_zero->max_slots != VCHIQ_MAX_SLOTS)
- vchiq_loud_error("slot_zero=%x: max_slots=%d "
- "(expected %d)",
- (unsigned int)slot_zero, slot_zero->max_slots,
+ vchiq_loud_error("slot_zero=%pK: max_slots=%d (expected %d)",
+ slot_zero, slot_zero->max_slots,
VCHIQ_MAX_SLOTS);
if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)
- vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d "
- "(expected %d)",
- (unsigned int)slot_zero,
- slot_zero->max_slots_per_side,
+ vchiq_loud_error("slot_zero=%pK: max_slots_per_side=%d (expected %d)",
+ slot_zero, slot_zero->max_slots_per_side,
VCHIQ_MAX_SLOTS_PER_SIDE);
vchiq_loud_error_footer();
return VCHIQ_ERROR;
@@ -2463,24 +2459,24 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
state->data_use_count = 0;
state->data_quota = state->slot_queue_available - 1;
- local->trigger.event = &state->trigger_event;
- remote_event_create(&local->trigger);
+ local->trigger.event = offsetof(VCHIQ_STATE_T, trigger_event);
+ remote_event_create(state, &local->trigger);
local->tx_pos = 0;
- local->recycle.event = &state->recycle_event;
- remote_event_create(&local->recycle);
+ local->recycle.event = offsetof(VCHIQ_STATE_T, recycle_event);
+ remote_event_create(state, &local->recycle);
local->slot_queue_recycle = state->slot_queue_available;
- local->sync_trigger.event = &state->sync_trigger_event;
- remote_event_create(&local->sync_trigger);
+ local->sync_trigger.event = offsetof(VCHIQ_STATE_T, sync_trigger_event);
+ remote_event_create(state, &local->sync_trigger);
- local->sync_release.event = &state->sync_release_event;
- remote_event_create(&local->sync_release);
+ local->sync_release.event = offsetof(VCHIQ_STATE_T, sync_release_event);
+ remote_event_create(state, &local->sync_release);
/* At start-of-day, the slot is empty and available */
((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid
= VCHIQ_MSGID_PADDING;
- remote_event_signal_local(&local->sync_release);
+ remote_event_signal_local(state, &local->sync_release);
local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
@@ -2494,7 +2490,7 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
(void *)state,
threadname);
- if (state->slot_handler_thread == NULL) {
+ if (IS_ERR(state->slot_handler_thread)) {
vchiq_loud_error_header();
vchiq_loud_error("couldn't create thread %s", threadname);
vchiq_loud_error_footer();
@@ -2507,7 +2503,7 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
state->recycle_thread = kthread_create(&recycle_func,
(void *)state,
threadname);
- if (state->recycle_thread == NULL) {
+ if (IS_ERR(state->recycle_thread)) {
vchiq_loud_error_header();
vchiq_loud_error("couldn't create thread %s", threadname);
vchiq_loud_error_footer();
@@ -2520,7 +2516,7 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
state->sync_thread = kthread_create(&sync_func,
(void *)state,
threadname);
- if (state->sync_thread == NULL) {
+ if (IS_ERR(state->sync_thread)) {
vchiq_loud_error_header();
vchiq_loud_error("couldn't create thread %s", threadname);
vchiq_loud_error_footer();
@@ -2684,14 +2680,19 @@ vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id)
service->version,
service->version_min
};
- VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) };
VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
service->client_id = client_id;
vchiq_use_service_internal(service);
- status = queue_message(service->state, NULL,
- VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0),
- &body, 1, sizeof(payload), QMFLAGS_IS_BLOCKING);
+ status = queue_message(service->state,
+ NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN,
+ service->localport,
+ 0),
+ memcpy_copy_callback,
+ &payload,
+ sizeof(payload),
+ QMFLAGS_IS_BLOCKING);
if (status == VCHIQ_SUCCESS) {
/* Wait for the ACK/NAK */
if (down_interruptible(&service->remove_event) != 0) {
@@ -2756,20 +2757,16 @@ release_service_messages(VCHIQ_SERVICE_T *service)
if ((port == service->localport) &&
(msgid & VCHIQ_MSGID_CLAIMED)) {
vchiq_log_info(vchiq_core_log_level,
- " fsi - hdr %x",
- (unsigned int)header);
+ " fsi - hdr %pK", header);
release_slot(state, slot_info, header,
NULL);
}
pos += calc_stride(header->size);
if (pos > VCHIQ_SLOT_SIZE) {
vchiq_log_error(vchiq_core_log_level,
- "fsi - pos %x: header %x, "
- "msgid %x, header->msgid %x, "
- "header->size %x",
- pos, (unsigned int)header,
- msgid, header->msgid,
- header->size);
+ "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
+ pos, header, msgid,
+ header->msgid, header->size);
WARN(1, "invalid slot position\n");
}
}
@@ -2783,7 +2780,7 @@ do_abort_bulks(VCHIQ_SERVICE_T *service)
VCHIQ_STATUS_T status;
/* Abort any outstanding bulk transfers */
- if (mutex_lock_interruptible(&service->bulk_mutex) != 0)
+ if (mutex_lock_killable(&service->bulk_mutex) != 0)
return 0;
abort_outstanding_bulks(service, &service->bulk_tx);
abort_outstanding_bulks(service, &service->bulk_rx);
@@ -3303,7 +3300,7 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
queue = (dir == VCHIQ_BULK_TRANSMIT) ?
&service->bulk_tx : &service->bulk_rx;
- if (mutex_lock_interruptible(&service->bulk_mutex) != 0) {
+ if (mutex_lock_killable(&service->bulk_mutex) != 0) {
status = VCHIQ_RETRY;
goto error_exit;
}
@@ -3317,7 +3314,7 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
status = VCHIQ_RETRY;
goto error_exit;
}
- if (mutex_lock_interruptible(&service->bulk_mutex)
+ if (mutex_lock_killable(&service->bulk_mutex)
!= 0) {
status = VCHIQ_RETRY;
goto error_exit;
@@ -3341,14 +3338,13 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
wmb();
vchiq_log_info(vchiq_core_log_level,
- "%d: bt (%d->%d) %cx %x@%x %x",
- state->id,
- service->localport, service->remoteport, dir_char,
- size, (unsigned int)bulk->data, (unsigned int)userdata);
+ "%d: bt (%d->%d) %cx %x@%pK %pK",
+ state->id, service->localport, service->remoteport, dir_char,
+ size, bulk->data, userdata);
/* The slot mutex must be held when the service is being closed, so
claim it here to ensure that isn't happening */
- if (mutex_lock_interruptible(&state->slot_mutex) != 0) {
+ if (mutex_lock_killable(&state->slot_mutex) != 0) {
status = VCHIQ_RETRY;
goto cancel_bulk_error_exit;
}
@@ -3363,16 +3359,19 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
(dir == VCHIQ_BULK_TRANSMIT) ?
VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
} else {
- int payload[2] = { (int)bulk->data, bulk->size };
- VCHIQ_ELEMENT_T element = { payload, sizeof(payload) };
-
- status = queue_message(state, NULL,
- VCHIQ_MAKE_MSG(dir_msgtype,
- service->localport, service->remoteport),
- &element, 1, sizeof(payload),
- QMFLAGS_IS_BLOCKING |
- QMFLAGS_NO_MUTEX_LOCK |
- QMFLAGS_NO_MUTEX_UNLOCK);
+ int payload[2] = { (int)(long)bulk->data, bulk->size };
+
+ status = queue_message(state,
+ NULL,
+ VCHIQ_MAKE_MSG(dir_msgtype,
+ service->localport,
+ service->remoteport),
+ memcpy_copy_callback,
+ &payload,
+ sizeof(payload),
+ QMFLAGS_IS_BLOCKING |
+ QMFLAGS_NO_MUTEX_LOCK |
+ QMFLAGS_NO_MUTEX_UNLOCK);
if (status != VCHIQ_SUCCESS) {
goto unlock_both_error_exit;
}
@@ -3418,26 +3417,22 @@ error_exit:
VCHIQ_STATUS_T
vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
- const VCHIQ_ELEMENT_T *elements, unsigned int count)
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ size_t size)
{
VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
VCHIQ_STATUS_T status = VCHIQ_ERROR;
- unsigned int size = 0;
- unsigned int i;
-
if (!service ||
(vchiq_check_service(service) != VCHIQ_SUCCESS))
goto error_exit;
- for (i = 0; i < (unsigned int)count; i++) {
- if (elements[i].size) {
- if (elements[i].data == NULL) {
- VCHIQ_SERVICE_STATS_INC(service, error_count);
- goto error_exit;
- }
- size += elements[i].size;
- }
+ if (!size) {
+ VCHIQ_SERVICE_STATS_INC(service, error_count);
+ goto error_exit;
+
}
if (size > VCHIQ_MAX_MSG_SIZE) {
@@ -3451,14 +3446,14 @@ vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
service->localport,
service->remoteport),
- elements, count, size, 1);
+ copy_callback, context, size, 1);
break;
case VCHIQ_SRVSTATE_OPENSYNC:
status = queue_message_sync(service->state, service,
VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
service->localport,
service->remoteport),
- elements, count, size, 1);
+ copy_callback, context, size, 1);
break;
default:
status = VCHIQ_ERROR;
@@ -3691,13 +3686,11 @@ vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state)
vchiq_dump(dump_context, buf, len + 1);
len = snprintf(buf, sizeof(buf),
- " tx_pos=%x(@%x), rx_pos=%x(@%x)",
+ " tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
state->local->tx_pos,
- (uint32_t)state->tx_data +
- (state->local_tx_pos & VCHIQ_SLOT_MASK),
+ state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
state->rx_pos,
- (uint32_t)state->rx_data +
- (state->rx_pos & VCHIQ_SLOT_MASK));
+ state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
vchiq_dump(dump_context, buf, len + 1);
len = snprintf(buf, sizeof(buf),
@@ -3747,7 +3740,7 @@ vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
char buf[80];
int len;
- len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)",
+ len = snprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
service->localport, srvstate_names[service->srvstate],
service->ref_count - 1); /*Don't include the lock just taken*/
@@ -3759,7 +3752,7 @@ vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
int tx_pending, rx_pending;
if (service->remoteport != VCHIQ_PORT_FREE) {
int len2 = snprintf(remoteport, sizeof(remoteport),
- "%d", service->remoteport);
+ "%u", service->remoteport);
if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
snprintf(remoteport + len2,
sizeof(remoteport) - len2,
@@ -3888,26 +3881,26 @@ VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
return status;
}
-void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
- size_t numBytes)
+void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *void_mem,
+ size_t num_bytes)
{
- const uint8_t *mem = (const uint8_t *)voidMem;
+ const uint8_t *mem = (const uint8_t *)void_mem;
size_t offset;
- char lineBuf[100];
+ char line_buf[100];
char *s;
- while (numBytes > 0) {
- s = lineBuf;
+ while (num_bytes > 0) {
+ s = line_buf;
for (offset = 0; offset < 16; offset++) {
- if (offset < numBytes)
+ if (offset < num_bytes)
s += snprintf(s, 4, "%02x ", mem[offset]);
else
s += snprintf(s, 4, " ");
}
for (offset = 0; offset < 16; offset++) {
- if (offset < numBytes) {
+ if (offset < num_bytes) {
uint8_t ch = mem[offset];
if ((ch < ' ') || (ch > '~'))
@@ -3919,16 +3912,16 @@ void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
if ((label != NULL) && (*label != '\0'))
vchiq_log_trace(VCHIQ_LOG_TRACE,
- "%s: %08x: %s", label, addr, lineBuf);
+ "%s: %08x: %s", label, addr, line_buf);
else
vchiq_log_trace(VCHIQ_LOG_TRACE,
- "%08x: %s", addr, lineBuf);
+ "%08x: %s", addr, line_buf);
addr += 16;
mem += 16;
- if (numBytes > 16)
- numBytes -= 16;
+ if (num_bytes > 16)
+ num_bytes -= 16;
else
- numBytes = 0;
+ num_bytes = 0;
}
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 9be484c776d0..9e164652548a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -184,11 +184,11 @@ enum {
#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
#define DEBUG_TRACE(d) \
- do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0)
+ do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(sy); } while (0)
#define DEBUG_VALUE(d, v) \
- do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0)
+ do { debug_ptr[DEBUG_ ## d] = (v); dsb(sy); } while (0)
#define DEBUG_COUNT(d) \
- do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0)
+ do { debug_ptr[DEBUG_ ## d]++; dsb(sy); } while (0)
#else /* VCHIQ_ENABLE_DEBUG */
@@ -264,7 +264,8 @@ typedef struct vchiq_bulk_queue_struct {
typedef struct remote_event_struct {
int armed;
int fired;
- struct semaphore *event;
+ /* Contains offset from the beginning of the VCHIQ_STATE_T structure */
+ u32 event;
} REMOTE_EVENT_T;
typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
@@ -633,9 +634,6 @@ vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
extern void
vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
-extern VCHIQ_STATUS_T
-vchiq_copy_from_user(void *dst, const void *src, int size);
-
extern void
remote_event_signal(REMOTE_EVENT_T *event);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index 7e032130d967..f07cd4448ddf 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -120,7 +120,7 @@ static int debugfs_log_open(struct inode *inode, struct file *file)
return single_open(file, debugfs_log_show, inode->i_private);
}
-static int debugfs_log_write(struct file *file,
+static ssize_t debugfs_log_write(struct file *file,
const char __user *buffer,
size_t count, loff_t *ppos)
{
@@ -229,7 +229,7 @@ static int debugfs_trace_open(struct inode *inode, struct file *file)
return single_open(file, debugfs_trace_show, inode->i_private);
}
-static int debugfs_trace_write(struct file *file,
+static ssize_t debugfs_trace_write(struct file *file,
const char __user *buffer,
size_t count, loff_t *ppos)
{
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
index 8067bbe7ce8d..377e8e48bb54 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
@@ -141,9 +141,12 @@ extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
extern VCHIQ_STATUS_T vchiq_use_service_no_resume(
VCHIQ_SERVICE_HANDLE_T service);
extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
-
-extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service,
- const VCHIQ_ELEMENT_T *elements, unsigned int count);
+extern VCHIQ_STATUS_T
+vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ size_t size);
extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
VCHIQ_HEADER_T *header);
extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
index 25e7011edc50..e93922a87263 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
@@ -70,7 +70,7 @@ vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
*
***************************************************************************/
#define VCHIQ_INIT_RETRIES 10
-VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
+VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instance_out)
{
VCHIQ_STATUS_T status = VCHIQ_ERROR;
VCHIQ_STATE_T *state;
@@ -108,7 +108,7 @@ VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
mutex_init(&instance->bulk_waiter_list_mutex);
INIT_LIST_HEAD(&instance->bulk_waiter_list);
- *instanceOut = instance;
+ *instance_out = instance;
status = VCHIQ_SUCCESS;
@@ -134,7 +134,7 @@ VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
vchiq_log_trace(vchiq_core_log_level,
"%s(%p) called", __func__, instance);
- if (mutex_lock_interruptible(&state->mutex) != 0)
+ if (mutex_lock_killable(&state->mutex) != 0)
return VCHIQ_RETRY;
/* Remove all services */
@@ -155,9 +155,8 @@ VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
list);
list_del(pos);
vchiq_log_info(vchiq_arm_log_level,
- "bulk_waiter - cleaned up %x "
- "for pid %d",
- (unsigned int)waiter, waiter->pid);
+ "bulk_waiter - cleaned up %pK for pid %d",
+ waiter, waiter->pid);
kfree(waiter);
}
kfree(instance);
@@ -192,7 +191,7 @@ VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
vchiq_log_trace(vchiq_core_log_level,
"%s(%p) called", __func__, instance);
- if (mutex_lock_interruptible(&state->mutex) != 0) {
+ if (mutex_lock_killable(&state->mutex) != 0) {
vchiq_log_trace(vchiq_core_log_level,
"%s: call to mutex_lock failed", __func__);
status = VCHIQ_RETRY;
@@ -450,8 +449,8 @@ vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
list_add(&waiter->list, &instance->bulk_waiter_list);
mutex_unlock(&instance->bulk_waiter_list_mutex);
vchiq_log_info(vchiq_arm_log_level,
- "saved bulk_waiter %x for pid %d",
- (unsigned int)waiter, current->pid);
+ "saved bulk_waiter %pK for pid %d",
+ waiter, current->pid);
}
return status;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h
index 335446e05476..778063ba312a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h
@@ -52,18 +52,4 @@ static inline int __must_check down_interruptible_killable(struct semaphore *sem
}
#define down_interruptible down_interruptible_killable
-
-static inline int __must_check mutex_lock_interruptible_killable(struct mutex *lock)
-{
- /* Allow interception of killable signals only. We don't want to be interrupted by harmless signals like SIGALRM */
- int ret;
- sigset_t blocked, oldset;
- siginitsetinv(&blocked, SHUTDOWN_SIGS);
- sigprocmask(SIG_SETMASK, &blocked, &oldset);
- ret = mutex_lock_interruptible(lock);
- sigprocmask(SIG_SETMASK, &oldset, NULL);
- return ret;
-}
-#define mutex_lock_interruptible mutex_lock_interruptible_killable
-
#endif
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
index d02e7764bd0d..dd43458f306f 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
@@ -42,13 +42,13 @@
/* ---- Constants and Types ---------------------------------------------- */
typedef struct {
- void *armSharedMemVirt;
- dma_addr_t armSharedMemPhys;
- size_t armSharedMemSize;
+ void *arm_shared_mem_virt;
+ dma_addr_t arm_shared_mem_phys;
+ size_t arm_shared_mem_size;
- void *vcSharedMemVirt;
- dma_addr_t vcSharedMemPhys;
- size_t vcSharedMemSize;
+ void *vc_shared_mem_virt;
+ dma_addr_t vc_shared_mem_phys;
+ size_t vc_shared_mem_size;
} VCHIQ_SHARED_MEM_INFO_T;
/* ---- Variable Externs ------------------------------------------------- */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
index 54a3ecec69ef..12c304ceb952 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
@@ -43,11 +43,13 @@
#define PAGELIST_READ_WITH_FRAGMENTS 2
typedef struct pagelist_struct {
- unsigned long length;
- unsigned short type;
- unsigned short offset;
- unsigned long addrs[1]; /* N.B. 12 LSBs hold the number of following
- pages at consecutive addresses. */
+ u32 length;
+ u16 type;
+ u16 offset;
+ u32 addrs[1]; /* N.B. 12 LSBs hold the number
+ * of following pages at consecutive
+ * addresses.
+ */
} PAGELIST_T;
typedef struct fragments_struct {
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
index 8072ff613636..d9771394a041 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -148,10 +148,10 @@ EXPORT_SYMBOL(vchi_msg_remove);
* Name: vchi_msg_queue
*
* Arguments: VCHI_SERVICE_HANDLE_T handle,
- * const void *data,
- * uint32_t data_size,
- * VCHI_FLAGS_T flags,
- * void *msg_handle,
+ * ssize_t (*copy_callback)(void *context, void *dest,
+ * size_t offset, size_t maxsize),
+ * void *context,
+ * uint32_t data_size
*
* Description: Thin wrapper to queue a message onto a connection
*
@@ -159,28 +159,29 @@ EXPORT_SYMBOL(vchi_msg_remove);
*
***********************************************************/
int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
- const void *data,
- uint32_t data_size,
- VCHI_FLAGS_T flags,
- void *msg_handle)
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ uint32_t data_size)
{
SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
- VCHIQ_ELEMENT_T element = {data, data_size};
VCHIQ_STATUS_T status;
- (void)msg_handle;
-
- WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
+ while (1) {
+ status = vchiq_queue_message(service->handle,
+ copy_callback,
+ context,
+ data_size);
- status = vchiq_queue_message(service->handle, &element, 1);
+ /*
+ * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
+ * implement a retry mechanism since this function is supposed
+ * to block until queued
+ */
+ if (status != VCHIQ_RETRY)
+ break;
- /* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
- ** implement a retry mechanism since this function is supposed
- ** to block until queued
- */
- while (status == VCHIQ_RETRY) {
msleep(1);
- status = vchiq_queue_message(service->handle, &element, 1);
}
return vchiq_status_to_vchi(status);
@@ -229,17 +230,18 @@ int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
return vchiq_status_to_vchi(VCHIQ_ERROR);
}
- status = vchiq_bulk_receive(service->handle, data_dst, data_size,
- bulk_handle, mode);
-
- /* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
- ** implement a retry mechanism since this function is supposed
- ** to block until queued
- */
- while (status == VCHIQ_RETRY) {
- msleep(1);
+ while (1) {
status = vchiq_bulk_receive(service->handle, data_dst,
data_size, bulk_handle, mode);
+ /*
+ * vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
+ * implement a retry mechanism since this function is supposed
+ * to block until queued
+ */
+ if (status != VCHIQ_RETRY)
+ break;
+
+ msleep(1);
}
return vchiq_status_to_vchi(status);
@@ -289,17 +291,19 @@ int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
return vchiq_status_to_vchi(VCHIQ_ERROR);
}
- status = vchiq_bulk_transmit(service->handle, data_src, data_size,
- bulk_handle, mode);
-
- /* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
- ** implement a retry mechanism since this function is supposed
- ** to block until queued
- */
- while (status == VCHIQ_RETRY) {
- msleep(1);
+ while (1) {
status = vchiq_bulk_transmit(service->handle, data_src,
data_size, bulk_handle, mode);
+
+ /*
+ * vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
+ * implement a retry mechanism since this function is supposed
+ * to block until queued
+ */
+ if (status != VCHIQ_RETRY)
+ break;
+
+ msleep(1);
}
return vchiq_status_to_vchi(status);
@@ -350,44 +354,6 @@ int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
EXPORT_SYMBOL(vchi_msg_dequeue);
/***********************************************************
- * Name: vchi_msg_queuev
- *
- * Arguments: VCHI_SERVICE_HANDLE_T handle,
- * VCHI_MSG_VECTOR_T *vector,
- * uint32_t count,
- * VCHI_FLAGS_T flags,
- * void *msg_handle
- *
- * Description: Thin wrapper to queue a message onto a connection
- *
- * Returns: int32_t - success == 0
- *
- ***********************************************************/
-
-vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T));
-vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) ==
- offsetof(VCHIQ_ELEMENT_T, data));
-vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) ==
- offsetof(VCHIQ_ELEMENT_T, size));
-
-int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle,
- VCHI_MSG_VECTOR_T *vector,
- uint32_t count,
- VCHI_FLAGS_T flags,
- void *msg_handle)
-{
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
-
- (void)msg_handle;
-
- WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
-
- return vchiq_status_to_vchi(vchiq_queue_message(service->handle,
- (const VCHIQ_ELEMENT_T *)vector, count));
-}
-EXPORT_SYMBOL(vchi_msg_queuev);
-
-/***********************************************************
* Name: vchi_held_msg_release
*
* Arguments: VCHI_HELD_MSG_T *message
@@ -400,8 +366,16 @@ EXPORT_SYMBOL(vchi_msg_queuev);
***********************************************************/
int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message)
{
- vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service,
- (VCHIQ_HEADER_T *)message->message);
+ /*
+ * Convert the service field pointer back to an
+ * VCHIQ_SERVICE_HANDLE_T which is an int.
+ * This pointer is opaque to everything except
+ * vchi_msg_hold which simply upcasted the int
+ * to a pointer.
+ */
+
+ vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)(long)message->service,
+ (VCHIQ_HEADER_T *)message->message);
return 0;
}
@@ -445,8 +419,16 @@ int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
*data = header->data;
*msg_size = header->size;
+ /*
+ * upcast the VCHIQ_SERVICE_HANDLE_T which is an int
+ * to a pointer and stuff it in the held message.
+ * This pointer is opaque to everything except
+ * vchi_held_msg_release which simply downcasts it back
+ * to an int.
+ */
+
message_handle->service =
- (struct opaque_vchi_service_t *)service->handle;
+ (struct opaque_vchi_service_t *)(long)service->handle;
message_handle->message = header;
return 0;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
index 384acb8d2eae..f76f4d790532 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
@@ -61,8 +61,7 @@ int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
void vchiu_queue_delete(VCHIU_QUEUE_T *queue)
{
- if (queue->storage != NULL)
- kfree(queue->storage);
+ kfree(queue->storage);
}
int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue)
diff --git a/drivers/staging/vme/devices/vme_pio2.h b/drivers/staging/vme/devices/vme_pio2.h
index d5d94c43c074..5577df3199e7 100644
--- a/drivers/staging/vme/devices/vme_pio2.h
+++ b/drivers/staging/vme/devices/vme_pio2.h
@@ -48,8 +48,6 @@ static const int PIO2_REGS_INT_MASK[8] = { PIO2_REGS_INT_MASK0,
PIO2_REGS_INT_MASK6,
PIO2_REGS_INT_MASK7 };
-
-
#define PIO2_REGS_CTRL 0x18
#define PIO2_REGS_VME_VECTOR 0x19
#define PIO2_REGS_CNTR0 0x20
@@ -63,7 +61,6 @@ static const int PIO2_REGS_INT_MASK[8] = { PIO2_REGS_INT_MASK0,
#define PIO2_REGS_ID 0x30
-
/* PIO2_REGS_DATAx (0x0 - 0x3) */
static const int PIO2_CHANNEL_BANK[32] = { 0, 0, 0, 0, 0, 0, 0, 0,
@@ -204,8 +201,6 @@ static const int PIO2_CNTR_SC_DEV[6] = { PIO2_CNTR_SC_DEV0, PIO2_CNTR_SC_DEV1,
#define PIO2_CNTR_BCD 1
-
-
enum pio2_bank_config { NOFIT, INPUT, OUTPUT, BOTH };
enum pio2_int_config { NONE = 0, LOW2HIGH = 1, HIGH2LOW = 2, EITHER = 4 };
@@ -240,10 +235,10 @@ struct pio2_card {
struct pio2_cntr cntr[6];
};
-int pio2_cntr_reset(struct pio2_card *);
+int pio2_cntr_reset(struct pio2_card *card);
-int pio2_gpio_reset(struct pio2_card *);
-int pio2_gpio_init(struct pio2_card *);
-void pio2_gpio_exit(struct pio2_card *);
+int pio2_gpio_reset(struct pio2_card *card);
+int pio2_gpio_init(struct pio2_card *card);
+void pio2_gpio_exit(struct pio2_card *card);
#endif /* _VME_PIO2_H_ */
diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c
index 8e66a520266c..20a2d835fdaa 100644
--- a/drivers/staging/vme/devices/vme_pio2_core.c
+++ b/drivers/staging/vme/devices/vme_pio2_core.c
@@ -365,7 +365,7 @@ static int pio2_probe(struct vme_dev *vdev)
vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
retval = vme_irq_request(vdev, card->irq_level, vec,
- &pio2_int, card);
+ &pio2_int, card);
if (retval < 0) {
dev_err(&card->vdev->dev,
"Unable to attach VME interrupt vector0x%x, level 0x%x\n",
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index d84dffb894f4..87aa5174df22 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -661,7 +661,7 @@ err_sysfs:
}
class_destroy(vme_user_sysfs_class);
- /* Ensure counter set correcty to unalloc all master windows */
+ /* Ensure counter set correctly to unalloc all master windows */
i = MASTER_MAX + 1;
err_master:
while (i > MASTER_MINOR) {
@@ -671,7 +671,7 @@ err_master:
}
/*
- * Ensure counter set correcty to unalloc all slave windows and buffers
+ * Ensure counter set correctly to unalloc all slave windows and buffers
*/
i = SLAVE_MAX + 1;
err_slave:
@@ -716,7 +716,7 @@ static int vme_user_remove(struct vme_dev *dev)
/* Unregister device driver */
cdev_del(vme_user_cdev);
- /* Unregiser the major and minor device numbers */
+ /* Unregister the major and minor device numbers */
unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
return 0;
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index de503a316e71..44dfa5421374 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: baseband.c
*
* Purpose: Implement functions to access baseband
@@ -1916,7 +1911,7 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* byBBAddr - address of register in Baseband
* Out:
* pbyData - data read
@@ -1927,24 +1922,24 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
bool BBbReadEmbedded(struct vnt_private *priv,
unsigned char byBBAddr, unsigned char *pbyData)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
unsigned short ww;
unsigned char byValue;
/* BB reg offset */
- VNSvOutPortB(dwIoBase + MAC_REG_BBREGADR, byBBAddr);
+ VNSvOutPortB(iobase + MAC_REG_BBREGADR, byBBAddr);
/* turn on REGR */
- MACvRegBitsOn(dwIoBase, MAC_REG_BBREGCTL, BBREGCTL_REGR);
+ MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGR);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_BBREGCTL, &byValue);
+ VNSvInPortB(iobase + MAC_REG_BBREGCTL, &byValue);
if (byValue & BBREGCTL_DONE)
break;
}
/* get BB data */
- VNSvInPortB(dwIoBase + MAC_REG_BBREGDATA, pbyData);
+ VNSvInPortB(iobase + MAC_REG_BBREGDATA, pbyData);
if (ww == W_MAX_TIMEOUT) {
pr_debug(" DBG_PORT80(0x30)\n");
@@ -1958,7 +1953,7 @@ bool BBbReadEmbedded(struct vnt_private *priv,
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* byBBAddr - address of register in Baseband
* byData - data to write
* Out:
@@ -1970,20 +1965,20 @@ bool BBbReadEmbedded(struct vnt_private *priv,
bool BBbWriteEmbedded(struct vnt_private *priv,
unsigned char byBBAddr, unsigned char byData)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
unsigned short ww;
unsigned char byValue;
/* BB reg offset */
- VNSvOutPortB(dwIoBase + MAC_REG_BBREGADR, byBBAddr);
+ VNSvOutPortB(iobase + MAC_REG_BBREGADR, byBBAddr);
/* set BB data */
- VNSvOutPortB(dwIoBase + MAC_REG_BBREGDATA, byData);
+ VNSvOutPortB(iobase + MAC_REG_BBREGDATA, byData);
/* turn on BBREGCTL_REGW */
- MACvRegBitsOn(dwIoBase, MAC_REG_BBREGCTL, BBREGCTL_REGW);
+ MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGW);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_BBREGCTL, &byValue);
+ VNSvInPortB(iobase + MAC_REG_BBREGCTL, &byValue);
if (byValue & BBREGCTL_DONE)
break;
}
@@ -2000,7 +1995,7 @@ bool BBbWriteEmbedded(struct vnt_private *priv,
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* byRevId - Revision ID
* byRFType - RF type
* Out:
@@ -2014,7 +2009,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
{
bool bResult = true;
int ii;
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
unsigned char byRFType = priv->byRFType;
unsigned char byLocalID = priv->byLocalID;
@@ -2036,8 +2031,8 @@ bool BBbVT3253Init(struct vnt_private *priv)
byVT3253B0_AGC4_RFMD2959[ii][0],
byVT3253B0_AGC4_RFMD2959[ii][1]);
- VNSvOutPortD(dwIoBase + MAC_REG_ITRTMSET, 0x23);
- MACvRegBitsOn(dwIoBase, MAC_REG_PAPEDELAY, BIT(0));
+ VNSvOutPortD(iobase + MAC_REG_ITRTMSET, 0x23);
+ MACvRegBitsOn(iobase, MAC_REG_PAPEDELAY, BIT(0));
}
priv->abyBBVGA[0] = 0x18;
priv->abyBBVGA[1] = 0x0A;
@@ -2076,8 +2071,8 @@ bool BBbVT3253Init(struct vnt_private *priv)
byVT3253B0_AGC[ii][0],
byVT3253B0_AGC[ii][1]);
- VNSvOutPortB(dwIoBase + MAC_REG_ITRTMSET, 0x23);
- MACvRegBitsOn(dwIoBase, MAC_REG_PAPEDELAY, BIT(0));
+ VNSvOutPortB(iobase + MAC_REG_ITRTMSET, 0x23);
+ MACvRegBitsOn(iobase, MAC_REG_PAPEDELAY, BIT(0));
priv->abyBBVGA[0] = 0x14;
priv->abyBBVGA[1] = 0x0A;
@@ -2098,7 +2093,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
* 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
*/
- /*bResult &= BBbWriteEmbedded(dwIoBase,0x09,0x41);*/
+ /*bResult &= BBbWriteEmbedded(iobase,0x09,0x41);*/
/* Init ANT B select,
* RX Config CR10 = 0x28->0x2A,
@@ -2106,7 +2101,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
* make the ANT_A, ANT_B inverted)
*/
- /*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/
+ /*bResult &= BBbWriteEmbedded(iobase,0x0a,0x28);*/
/* Select VC1/VC2, CR215 = 0x02->0x06 */
bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
@@ -2154,7 +2149,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
priv->ldBmThreshold[2] = 0;
priv->ldBmThreshold[3] = 0;
/* Fix VT3226 DFC system timing issue */
- MACvSetRFLE_LatchBase(dwIoBase);
+ MACvSetRFLE_LatchBase(iobase);
/* {{ RobertYu: 20050104 */
} else if (byRFType == RF_AIROHA7230) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
@@ -2162,16 +2157,15 @@ bool BBbVT3253Init(struct vnt_private *priv)
byVT3253B0_AIROHA2230[ii][0],
byVT3253B0_AIROHA2230[ii][1]);
-
/* {{ RobertYu:20050223, request by JerryChung */
/* Init ANT B select,TX Config CR09 = 0x61->0x45,
* 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
*/
- /*bResult &= BBbWriteEmbedded(dwIoBase,0x09,0x41);*/
+ /*bResult &= BBbWriteEmbedded(iobase,0x09,0x41);*/
/* Init ANT B select,RX Config CR10 = 0x28->0x2A,
* 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted)
*/
- /*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/
+ /*bResult &= BBbWriteEmbedded(iobase,0x0a,0x28);*/
/* Select VC1/VC2, CR215 = 0x02->0x06 */
bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
/* }} */
@@ -2259,7 +2253,7 @@ void BBvSetVGAGainOffset(struct vnt_private *priv, unsigned char byData)
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* Out:
* none
*
@@ -2280,7 +2274,7 @@ BBvSoftwareReset(struct vnt_private *priv)
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* Out:
* none
*
@@ -2302,7 +2296,7 @@ BBvPowerSaveModeON(struct vnt_private *priv)
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* Out:
* none
*
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index b4e8c43180ec..8a567c9155b4 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: baseband.h
*
* Purpose: Implement functions to access baseband
@@ -60,12 +55,6 @@
#define TOP_RATE_2M 0x00200000
#define TOP_RATE_1M 0x00100000
-#define BBvClearFOE(dwIoBase) \
- BBbWriteEmbedded(dwIoBase, 0xB1, 0)
-
-#define BBvSetFOE(dwIoBase) \
- BBbWriteEmbedded(dwIoBase, 0xB1, 0x0C)
-
unsigned int
BBuGetFrameTime(
unsigned char byPreambleType,
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index dbcea4434725..e0c92818ed70 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: card.c
* Purpose: Provide functions to setup NIC operation mode
* Functions:
@@ -36,7 +32,7 @@
*
* Revision History:
* 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
- * 08-26-2003 Kyle Hsu: Modify the defination type of dwIoBase.
+ * 08-26-2003 Kyle Hsu: Modify the defination type of iobase.
* 09-01-2003 Bryan YC Fan: Add vUpdateIFS().
*
*/
@@ -261,7 +257,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
BBbWriteEmbedded(priv, 0x88, 0x02);
bySlot = C_SLOT_LONG;
bySIFS = C_SIFS_BG;
- byDIFS = C_SIFS_BG + 2*C_SLOT_LONG;
+ byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG;
byCWMaxMin = 0xA5;
} else { /* PK_TYPE_11GA & PK_TYPE_11GB */
MACvSetBBType(priv->PortOffset, BB_TYPE_11G);
@@ -289,7 +285,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
byDIFS = C_SIFS_BG + 2 * C_SLOT_SHORT;
} else {
bySlot = C_SLOT_LONG;
- byDIFS = C_SIFS_BG + 2*C_SLOT_LONG;
+ byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG;
}
byCWMaxMin = 0xa4;
@@ -528,8 +524,11 @@ CARDvSafeResetTx(
struct vnt_tx_desc *pCurrTD;
/* initialize TD index */
- priv->apTailTD[0] = priv->apCurrTD[0] = &(priv->apTD0Rings[0]);
- priv->apTailTD[1] = priv->apCurrTD[1] = &(priv->apTD1Rings[0]);
+ priv->apTailTD[0] = &(priv->apTD0Rings[0]);
+ priv->apCurrTD[0] = &(priv->apTD0Rings[0]);
+
+ priv->apTailTD[1] = &(priv->apTD1Rings[0]);
+ priv->apCurrTD[1] = &(priv->apTD1Rings[0]);
for (uu = 0; uu < TYPE_MAXTD; uu++)
priv->iTDUsed[uu] = 0;
@@ -938,20 +937,20 @@ u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2)
*/
bool CARDbGetCurrentTSF(struct vnt_private *priv, u64 *pqwCurrTSF)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
unsigned short ww;
unsigned char byData;
- MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD);
+ MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_TFTCTL, &byData);
+ VNSvInPortB(iobase + MAC_REG_TFTCTL, &byData);
if (!(byData & TFTCTL_TSFCNTRRD))
break;
}
if (ww == W_MAX_TIMEOUT)
return false;
- VNSvInPortD(dwIoBase + MAC_REG_TSFCNTR, (u32 *)pqwCurrTSF);
- VNSvInPortD(dwIoBase + MAC_REG_TSFCNTR + 4, (u32 *)pqwCurrTSF + 1);
+ VNSvInPortD(iobase + MAC_REG_TSFCNTR, (u32 *)pqwCurrTSF);
+ VNSvInPortD(iobase + MAC_REG_TSFCNTR + 4, (u32 *)pqwCurrTSF + 1);
return true;
}
@@ -989,7 +988,7 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
*
* Parameters:
* In:
- * dwIoBase - IO Base
+ * iobase - IO Base
* wBeaconInterval - Beacon Interval
* Out:
* none
@@ -999,16 +998,16 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
void CARDvSetFirstNextTBTT(struct vnt_private *priv,
unsigned short wBeaconInterval)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
u64 qwNextTBTT = 0;
CARDbGetCurrentTSF(priv, &qwNextTBTT); /* Get Local TSF counter */
qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
/* Set NextTBTT */
- VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
- VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32));
- MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
+ VNSvOutPortD(iobase + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
+ VNSvOutPortD(iobase + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32));
+ MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
}
/*
@@ -1028,12 +1027,12 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv,
void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
unsigned short wBeaconInterval)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval);
/* Set NextTBTT */
- VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT, (u32)qwTSF);
- VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT + 4, (u32)(qwTSF >> 32));
- MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
+ VNSvOutPortD(iobase + MAC_REG_NEXTTBTT, (u32)qwTSF);
+ VNSvOutPortD(iobase + MAC_REG_NEXTTBTT + 4, (u32)(qwTSF >> 32));
+ MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
pr_debug("Card:Update Next TBTT[%8llx]\n", qwTSF);
}
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 0203c7fd91a2..44420b5a445f 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: card.h
*
* Purpose: Provide functions to setup NIC operation mode
@@ -50,7 +46,7 @@
#define CB_MAX_CHANNEL_24G 14
#define CB_MAX_CHANNEL_5G 42
-#define CB_MAX_CHANNEL (CB_MAX_CHANNEL_24G+CB_MAX_CHANNEL_5G)
+#define CB_MAX_CHANNEL (CB_MAX_CHANNEL_24G + CB_MAX_CHANNEL_5G)
typedef enum _CARD_PKT_TYPE {
PKT_TYPE_802_11_BCN,
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index 029a8df4ca1c..ab89956511a0 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: channel.c
*
*/
diff --git a/drivers/staging/vt6655/channel.h b/drivers/staging/vt6655/channel.h
index 2d613e7f169c..2621dfabff06 100644
--- a/drivers/staging/vt6655/channel.h
+++ b/drivers/staging/vt6655/channel.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: channel.h
*
*/
diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h
index 2d7f6ae89164..2fee6e759ad8 100644
--- a/drivers/staging/vt6655/desc.h
+++ b/drivers/staging/vt6655/desc.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: desc.h
*
* Purpose:The header file of descriptor
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index 55405e058196..3ae40d846a09 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: device.h
*
* Purpose: MAC Data structure
@@ -283,12 +279,12 @@ struct vnt_private {
unsigned char byOFDMPwrG;
unsigned char byCurPwr;
char byCurPwrdBm;
- unsigned char abyCCKPwrTbl[CB_MAX_CHANNEL_24G+1];
- unsigned char abyOFDMPwrTbl[CB_MAX_CHANNEL+1];
- char abyCCKDefaultPwr[CB_MAX_CHANNEL_24G+1];
- char abyOFDMDefaultPwr[CB_MAX_CHANNEL+1];
- char abyRegPwr[CB_MAX_CHANNEL+1];
- char abyLocalPwr[CB_MAX_CHANNEL+1];
+ unsigned char abyCCKPwrTbl[CB_MAX_CHANNEL_24G + 1];
+ unsigned char abyOFDMPwrTbl[CB_MAX_CHANNEL + 1];
+ char abyCCKDefaultPwr[CB_MAX_CHANNEL_24G + 1];
+ char abyOFDMDefaultPwr[CB_MAX_CHANNEL + 1];
+ char abyRegPwr[CB_MAX_CHANNEL + 1];
+ char abyLocalPwr[CB_MAX_CHANNEL + 1];
/* BaseBand Loopback Use */
unsigned char byBBCR4d;
diff --git a/drivers/staging/vt6655/device_cfg.h b/drivers/staging/vt6655/device_cfg.h
index b4c9547d3138..0298ea923f97 100644
--- a/drivers/staging/vt6655/device_cfg.h
+++ b/drivers/staging/vt6655/device_cfg.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: device_cfg.h
*
* Purpose: Driver configuration header
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index f109eeac358d..da0f71191009 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: device_main.c
*
* Purpose: driver entry for initial, open, close, tx and rx.
@@ -314,7 +310,7 @@ static void device_init_registers(struct vnt_private *priv)
SROMbyReadEmbedded(priv->PortOffset,
(unsigned char)(ii + EEP_OFS_CCK_PWR_TBL));
if (priv->abyCCKPwrTbl[ii + 1] == 0)
- priv->abyCCKPwrTbl[ii+1] = priv->byCCKPwr;
+ priv->abyCCKPwrTbl[ii + 1] = priv->byCCKPwr;
priv->abyOFDMPwrTbl[ii + 1] =
SROMbyReadEmbedded(priv->PortOffset,
@@ -556,7 +552,7 @@ static void device_init_rd0_ring(struct vnt_private *priv)
if (!device_alloc_rx_buf(priv, desc))
dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
- desc->next = &(priv->aRD0Ring[(i+1) % priv->opts.rx_descs0]);
+ desc->next = &(priv->aRD0Ring[(i + 1) % priv->opts.rx_descs0]);
desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
}
@@ -1272,7 +1268,6 @@ static void vnt_remove_interface(struct ieee80211_hw *hw,
priv->op_mode = NL80211_IFTYPE_UNSPECIFIED;
}
-
static int vnt_config(struct ieee80211_hw *hw, u32 changed)
{
struct vnt_private *priv = hw->priv;
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index 700032e9c477..9b3fa779258a 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: dpc.c
*
* Purpose: handle dpc rx functions
diff --git a/drivers/staging/vt6655/dpc.h b/drivers/staging/vt6655/dpc.h
index e80b30816968..6e75fa9c5618 100644
--- a/drivers/staging/vt6655/dpc.h
+++ b/drivers/staging/vt6655/dpc.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: dpc.h
*
* Purpose:
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index e161d5d9aebb..dad9e292d4da 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: key.c
*
* Purpose: Implement functions for 802.11i Key management
diff --git a/drivers/staging/vt6655/key.h b/drivers/staging/vt6655/key.h
index d72719741a56..a5024611af60 100644
--- a/drivers/staging/vt6655/key.h
+++ b/drivers/staging/vt6655/key.h
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: key.h
*
* Purpose: Implement functions for 802.11i Key management
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 8e13f7f41415..4aaa99bafcda 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: mac.c
*
* Purpose: MAC routines
@@ -147,7 +142,6 @@ void MACvSetShortRetryLimit(struct vnt_private *priv,
iowrite8(byRetryLimit, io_base + MAC_REG_SRT);
}
-
/*
* Description:
* Set 802.11 Long Retry Limit
@@ -321,7 +315,7 @@ bool MACbSoftwareReset(struct vnt_private *priv)
*/
bool MACbSafeSoftwareReset(struct vnt_private *priv)
{
- unsigned char abyTmpRegData[MAC_MAX_CONTEXT_SIZE_PAGE0+MAC_MAX_CONTEXT_SIZE_PAGE1];
+ unsigned char abyTmpRegData[MAC_MAX_CONTEXT_SIZE_PAGE0 + MAC_MAX_CONTEXT_SIZE_PAGE1];
bool bRetVal;
/* PATCH....
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 030f529c339b..33b758cb79d4 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: mac.h
*
* Purpose: MAC routines
@@ -554,341 +549,341 @@
/*--------------------- Export Macros ------------------------------*/
-#define MACvRegBitsOn(dwIoBase, byRegOfs, byBits) \
+#define MACvRegBitsOn(iobase, byRegOfs, byBits) \
do { \
unsigned char byData; \
- VNSvInPortB(dwIoBase + byRegOfs, &byData); \
- VNSvOutPortB(dwIoBase + byRegOfs, byData | (byBits)); \
+ VNSvInPortB(iobase + byRegOfs, &byData); \
+ VNSvOutPortB(iobase + byRegOfs, byData | (byBits)); \
} while (0)
-#define MACvWordRegBitsOn(dwIoBase, byRegOfs, wBits) \
+#define MACvWordRegBitsOn(iobase, byRegOfs, wBits) \
do { \
unsigned short wData; \
- VNSvInPortW(dwIoBase + byRegOfs, &wData); \
- VNSvOutPortW(dwIoBase + byRegOfs, wData | (wBits)); \
+ VNSvInPortW(iobase + byRegOfs, &wData); \
+ VNSvOutPortW(iobase + byRegOfs, wData | (wBits)); \
} while (0)
-#define MACvDWordRegBitsOn(dwIoBase, byRegOfs, dwBits) \
+#define MACvDWordRegBitsOn(iobase, byRegOfs, dwBits) \
do { \
unsigned long dwData; \
- VNSvInPortD(dwIoBase + byRegOfs, &dwData); \
- VNSvOutPortD(dwIoBase + byRegOfs, dwData | (dwBits)); \
+ VNSvInPortD(iobase + byRegOfs, &dwData); \
+ VNSvOutPortD(iobase + byRegOfs, dwData | (dwBits)); \
} while (0)
-#define MACvRegBitsOnEx(dwIoBase, byRegOfs, byMask, byBits) \
+#define MACvRegBitsOnEx(iobase, byRegOfs, byMask, byBits) \
do { \
unsigned char byData; \
- VNSvInPortB(dwIoBase + byRegOfs, &byData); \
+ VNSvInPortB(iobase + byRegOfs, &byData); \
byData &= byMask; \
- VNSvOutPortB(dwIoBase + byRegOfs, byData | (byBits)); \
+ VNSvOutPortB(iobase + byRegOfs, byData | (byBits)); \
} while (0)
-#define MACvRegBitsOff(dwIoBase, byRegOfs, byBits) \
+#define MACvRegBitsOff(iobase, byRegOfs, byBits) \
do { \
unsigned char byData; \
- VNSvInPortB(dwIoBase + byRegOfs, &byData); \
- VNSvOutPortB(dwIoBase + byRegOfs, byData & ~(byBits)); \
+ VNSvInPortB(iobase + byRegOfs, &byData); \
+ VNSvOutPortB(iobase + byRegOfs, byData & ~(byBits)); \
} while (0)
-#define MACvWordRegBitsOff(dwIoBase, byRegOfs, wBits) \
+#define MACvWordRegBitsOff(iobase, byRegOfs, wBits) \
do { \
unsigned short wData; \
- VNSvInPortW(dwIoBase + byRegOfs, &wData); \
- VNSvOutPortW(dwIoBase + byRegOfs, wData & ~(wBits)); \
+ VNSvInPortW(iobase + byRegOfs, &wData); \
+ VNSvOutPortW(iobase + byRegOfs, wData & ~(wBits)); \
} while (0)
-#define MACvDWordRegBitsOff(dwIoBase, byRegOfs, dwBits) \
+#define MACvDWordRegBitsOff(iobase, byRegOfs, dwBits) \
do { \
unsigned long dwData; \
- VNSvInPortD(dwIoBase + byRegOfs, &dwData); \
- VNSvOutPortD(dwIoBase + byRegOfs, dwData & ~(dwBits)); \
+ VNSvInPortD(iobase + byRegOfs, &dwData); \
+ VNSvOutPortD(iobase + byRegOfs, dwData & ~(dwBits)); \
} while (0)
-#define MACvGetCurrRx0DescAddr(dwIoBase, pdwCurrDescAddr) \
- VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR0, \
+#define MACvGetCurrRx0DescAddr(iobase, pdwCurrDescAddr) \
+ VNSvInPortD(iobase + MAC_REG_RXDMAPTR0, \
(unsigned long *)pdwCurrDescAddr)
-#define MACvGetCurrRx1DescAddr(dwIoBase, pdwCurrDescAddr) \
- VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR1, \
+#define MACvGetCurrRx1DescAddr(iobase, pdwCurrDescAddr) \
+ VNSvInPortD(iobase + MAC_REG_RXDMAPTR1, \
(unsigned long *)pdwCurrDescAddr)
-#define MACvGetCurrTx0DescAddr(dwIoBase, pdwCurrDescAddr) \
- VNSvInPortD(dwIoBase + MAC_REG_TXDMAPTR0, \
+#define MACvGetCurrTx0DescAddr(iobase, pdwCurrDescAddr) \
+ VNSvInPortD(iobase + MAC_REG_TXDMAPTR0, \
(unsigned long *)pdwCurrDescAddr)
-#define MACvGetCurrAC0DescAddr(dwIoBase, pdwCurrDescAddr) \
- VNSvInPortD(dwIoBase + MAC_REG_AC0DMAPTR, \
+#define MACvGetCurrAC0DescAddr(iobase, pdwCurrDescAddr) \
+ VNSvInPortD(iobase + MAC_REG_AC0DMAPTR, \
(unsigned long *)pdwCurrDescAddr)
-#define MACvGetCurrSyncDescAddr(dwIoBase, pdwCurrDescAddr) \
- VNSvInPortD(dwIoBase + MAC_REG_SYNCDMAPTR, \
+#define MACvGetCurrSyncDescAddr(iobase, pdwCurrDescAddr) \
+ VNSvInPortD(iobase + MAC_REG_SYNCDMAPTR, \
(unsigned long *)pdwCurrDescAddr)
-#define MACvGetCurrATIMDescAddr(dwIoBase, pdwCurrDescAddr) \
- VNSvInPortD(dwIoBase + MAC_REG_ATIMDMAPTR, \
+#define MACvGetCurrATIMDescAddr(iobase, pdwCurrDescAddr) \
+ VNSvInPortD(iobase + MAC_REG_ATIMDMAPTR, \
(unsigned long *)pdwCurrDescAddr)
/* set the chip with current BCN tx descriptor address */
-#define MACvSetCurrBCNTxDescAddr(dwIoBase, dwCurrDescAddr) \
- VNSvOutPortD(dwIoBase + MAC_REG_BCNDMAPTR, \
+#define MACvSetCurrBCNTxDescAddr(iobase, dwCurrDescAddr) \
+ VNSvOutPortD(iobase + MAC_REG_BCNDMAPTR, \
dwCurrDescAddr)
/* set the chip with current BCN length */
-#define MACvSetCurrBCNLength(dwIoBase, wCurrBCNLength) \
- VNSvOutPortW(dwIoBase + MAC_REG_BCNDMACTL+2, \
+#define MACvSetCurrBCNLength(iobase, wCurrBCNLength) \
+ VNSvOutPortW(iobase + MAC_REG_BCNDMACTL+2, \
wCurrBCNLength)
-#define MACvReadBSSIDAddress(dwIoBase, pbyEtherAddr) \
+#define MACvReadBSSIDAddress(iobase, pbyEtherAddr) \
do { \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \
- VNSvInPortB(dwIoBase + MAC_REG_BSSID0, \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
+ VNSvInPortB(iobase + MAC_REG_BSSID0, \
(unsigned char *)pbyEtherAddr); \
- VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 1, \
+ VNSvInPortB(iobase + MAC_REG_BSSID0 + 1, \
pbyEtherAddr + 1); \
- VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 2, \
+ VNSvInPortB(iobase + MAC_REG_BSSID0 + 2, \
pbyEtherAddr + 2); \
- VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 3, \
+ VNSvInPortB(iobase + MAC_REG_BSSID0 + 3, \
pbyEtherAddr + 3); \
- VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 4, \
+ VNSvInPortB(iobase + MAC_REG_BSSID0 + 4, \
pbyEtherAddr + 4); \
- VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 5, \
+ VNSvInPortB(iobase + MAC_REG_BSSID0 + 5, \
pbyEtherAddr + 5); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
} while (0)
-#define MACvWriteBSSIDAddress(dwIoBase, pbyEtherAddr) \
+#define MACvWriteBSSIDAddress(iobase, pbyEtherAddr) \
do { \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \
- VNSvOutPortB(dwIoBase + MAC_REG_BSSID0, \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
+ VNSvOutPortB(iobase + MAC_REG_BSSID0, \
*(pbyEtherAddr)); \
- VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 1, \
+ VNSvOutPortB(iobase + MAC_REG_BSSID0 + 1, \
*(pbyEtherAddr + 1)); \
- VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 2, \
+ VNSvOutPortB(iobase + MAC_REG_BSSID0 + 2, \
*(pbyEtherAddr + 2)); \
- VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 3, \
+ VNSvOutPortB(iobase + MAC_REG_BSSID0 + 3, \
*(pbyEtherAddr + 3)); \
- VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 4, \
+ VNSvOutPortB(iobase + MAC_REG_BSSID0 + 4, \
*(pbyEtherAddr + 4)); \
- VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 5, \
+ VNSvOutPortB(iobase + MAC_REG_BSSID0 + 5, \
*(pbyEtherAddr + 5)); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
} while (0)
-#define MACvReadEtherAddress(dwIoBase, pbyEtherAddr) \
+#define MACvReadEtherAddress(iobase, pbyEtherAddr) \
do { \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \
- VNSvInPortB(dwIoBase + MAC_REG_PAR0, \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
+ VNSvInPortB(iobase + MAC_REG_PAR0, \
(unsigned char *)pbyEtherAddr); \
- VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 1, \
+ VNSvInPortB(iobase + MAC_REG_PAR0 + 1, \
pbyEtherAddr + 1); \
- VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 2, \
+ VNSvInPortB(iobase + MAC_REG_PAR0 + 2, \
pbyEtherAddr + 2); \
- VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 3, \
+ VNSvInPortB(iobase + MAC_REG_PAR0 + 3, \
pbyEtherAddr + 3); \
- VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 4, \
+ VNSvInPortB(iobase + MAC_REG_PAR0 + 4, \
pbyEtherAddr + 4); \
- VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 5, \
+ VNSvInPortB(iobase + MAC_REG_PAR0 + 5, \
pbyEtherAddr + 5); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
} while (0)
-#define MACvWriteEtherAddress(dwIoBase, pbyEtherAddr) \
+#define MACvWriteEtherAddress(iobase, pbyEtherAddr) \
do { \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAR0, \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
+ VNSvOutPortB(iobase + MAC_REG_PAR0, \
*pbyEtherAddr); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 1, \
+ VNSvOutPortB(iobase + MAC_REG_PAR0 + 1, \
*(pbyEtherAddr + 1)); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 2, \
+ VNSvOutPortB(iobase + MAC_REG_PAR0 + 2, \
*(pbyEtherAddr + 2)); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 3, \
+ VNSvOutPortB(iobase + MAC_REG_PAR0 + 3, \
*(pbyEtherAddr + 3)); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 4, \
+ VNSvOutPortB(iobase + MAC_REG_PAR0 + 4, \
*(pbyEtherAddr + 4)); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 5, \
+ VNSvOutPortB(iobase + MAC_REG_PAR0 + 5, \
*(pbyEtherAddr + 5)); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
} while (0)
-#define MACvClearISR(dwIoBase) \
- VNSvOutPortD(dwIoBase + MAC_REG_ISR, IMR_MASK_VALUE)
+#define MACvClearISR(iobase) \
+ VNSvOutPortD(iobase + MAC_REG_ISR, IMR_MASK_VALUE)
-#define MACvStart(dwIoBase) \
- VNSvOutPortB(dwIoBase + MAC_REG_HOSTCR, \
+#define MACvStart(iobase) \
+ VNSvOutPortB(iobase + MAC_REG_HOSTCR, \
(HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON))
-#define MACvRx0PerPktMode(dwIoBase) \
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, RX_PERPKT)
+#define MACvRx0PerPktMode(iobase) \
+ VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, RX_PERPKT)
-#define MACvRx0BufferFillMode(dwIoBase) \
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, RX_PERPKTCLR)
+#define MACvRx0BufferFillMode(iobase) \
+ VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, RX_PERPKTCLR)
-#define MACvRx1PerPktMode(dwIoBase) \
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, RX_PERPKT)
+#define MACvRx1PerPktMode(iobase) \
+ VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, RX_PERPKT)
-#define MACvRx1BufferFillMode(dwIoBase) \
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, RX_PERPKTCLR)
+#define MACvRx1BufferFillMode(iobase) \
+ VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, RX_PERPKTCLR)
-#define MACvRxOn(dwIoBase) \
- MACvRegBitsOn(dwIoBase, MAC_REG_HOSTCR, HOSTCR_RXON)
+#define MACvRxOn(iobase) \
+ MACvRegBitsOn(iobase, MAC_REG_HOSTCR, HOSTCR_RXON)
-#define MACvReceive0(dwIoBase) \
+#define MACvReceive0(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL0, &dwData); \
+ VNSvInPortD(iobase + MAC_REG_RXDMACTL0, &dwData); \
if (dwData & DMACTL_RUN) \
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_WAKE); \
+ VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, DMACTL_WAKE); \
else \
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_RUN); \
+ VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, DMACTL_RUN); \
} while (0)
-#define MACvReceive1(dwIoBase) \
+#define MACvReceive1(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL1, &dwData); \
+ VNSvInPortD(iobase + MAC_REG_RXDMACTL1, &dwData); \
if (dwData & DMACTL_RUN) \
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, DMACTL_WAKE); \
+ VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_WAKE); \
else \
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, DMACTL_RUN); \
+ VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_RUN); \
} while (0)
-#define MACvTxOn(dwIoBase) \
- MACvRegBitsOn(dwIoBase, MAC_REG_HOSTCR, HOSTCR_TXON)
+#define MACvTxOn(iobase) \
+ MACvRegBitsOn(iobase, MAC_REG_HOSTCR, HOSTCR_TXON)
-#define MACvTransmit0(dwIoBase) \
+#define MACvTransmit0(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(dwIoBase + MAC_REG_TXDMACTL0, &dwData); \
+ VNSvInPortD(iobase + MAC_REG_TXDMACTL0, &dwData); \
if (dwData & DMACTL_RUN) \
- VNSvOutPortD(dwIoBase + MAC_REG_TXDMACTL0, DMACTL_WAKE); \
+ VNSvOutPortD(iobase + MAC_REG_TXDMACTL0, DMACTL_WAKE); \
else \
- VNSvOutPortD(dwIoBase + MAC_REG_TXDMACTL0, DMACTL_RUN); \
+ VNSvOutPortD(iobase + MAC_REG_TXDMACTL0, DMACTL_RUN); \
} while (0)
-#define MACvTransmitAC0(dwIoBase) \
+#define MACvTransmitAC0(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(dwIoBase + MAC_REG_AC0DMACTL, &dwData); \
+ VNSvInPortD(iobase + MAC_REG_AC0DMACTL, &dwData); \
if (dwData & DMACTL_RUN) \
- VNSvOutPortD(dwIoBase + MAC_REG_AC0DMACTL, DMACTL_WAKE); \
+ VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_WAKE); \
else \
- VNSvOutPortD(dwIoBase + MAC_REG_AC0DMACTL, DMACTL_RUN); \
+ VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_RUN); \
} while (0)
-#define MACvTransmitSYNC(dwIoBase) \
+#define MACvTransmitSYNC(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(dwIoBase + MAC_REG_SYNCDMACTL, &dwData); \
+ VNSvInPortD(iobase + MAC_REG_SYNCDMACTL, &dwData); \
if (dwData & DMACTL_RUN) \
- VNSvOutPortD(dwIoBase + MAC_REG_SYNCDMACTL, DMACTL_WAKE); \
+ VNSvOutPortD(iobase + MAC_REG_SYNCDMACTL, DMACTL_WAKE); \
else \
- VNSvOutPortD(dwIoBase + MAC_REG_SYNCDMACTL, DMACTL_RUN); \
+ VNSvOutPortD(iobase + MAC_REG_SYNCDMACTL, DMACTL_RUN); \
} while (0)
-#define MACvTransmitATIM(dwIoBase) \
+#define MACvTransmitATIM(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(dwIoBase + MAC_REG_ATIMDMACTL, &dwData); \
+ VNSvInPortD(iobase + MAC_REG_ATIMDMACTL, &dwData); \
if (dwData & DMACTL_RUN) \
- VNSvOutPortD(dwIoBase + MAC_REG_ATIMDMACTL, DMACTL_WAKE); \
+ VNSvOutPortD(iobase + MAC_REG_ATIMDMACTL, DMACTL_WAKE); \
else \
- VNSvOutPortD(dwIoBase + MAC_REG_ATIMDMACTL, DMACTL_RUN); \
+ VNSvOutPortD(iobase + MAC_REG_ATIMDMACTL, DMACTL_RUN); \
} while (0)
-#define MACvTransmitBCN(dwIoBase) \
- VNSvOutPortB(dwIoBase + MAC_REG_BCNDMACTL, BEACON_READY)
+#define MACvTransmitBCN(iobase) \
+ VNSvOutPortB(iobase + MAC_REG_BCNDMACTL, BEACON_READY)
-#define MACvClearStckDS(dwIoBase) \
+#define MACvClearStckDS(iobase) \
do { \
unsigned char byOrgValue; \
- VNSvInPortB(dwIoBase + MAC_REG_STICKHW, &byOrgValue); \
+ VNSvInPortB(iobase + MAC_REG_STICKHW, &byOrgValue); \
byOrgValue = byOrgValue & 0xFC; \
- VNSvOutPortB(dwIoBase + MAC_REG_STICKHW, byOrgValue); \
+ VNSvOutPortB(iobase + MAC_REG_STICKHW, byOrgValue); \
} while (0)
-#define MACvReadISR(dwIoBase, pdwValue) \
- VNSvInPortD(dwIoBase + MAC_REG_ISR, pdwValue)
+#define MACvReadISR(iobase, pdwValue) \
+ VNSvInPortD(iobase + MAC_REG_ISR, pdwValue)
-#define MACvWriteISR(dwIoBase, dwValue) \
- VNSvOutPortD(dwIoBase + MAC_REG_ISR, dwValue)
+#define MACvWriteISR(iobase, dwValue) \
+ VNSvOutPortD(iobase + MAC_REG_ISR, dwValue)
-#define MACvIntEnable(dwIoBase, dwMask) \
- VNSvOutPortD(dwIoBase + MAC_REG_IMR, dwMask)
+#define MACvIntEnable(iobase, dwMask) \
+ VNSvOutPortD(iobase + MAC_REG_IMR, dwMask)
-#define MACvIntDisable(dwIoBase) \
- VNSvOutPortD(dwIoBase + MAC_REG_IMR, 0)
+#define MACvIntDisable(iobase) \
+ VNSvOutPortD(iobase + MAC_REG_IMR, 0)
-#define MACvSelectPage0(dwIoBase) \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0)
+#define MACvSelectPage0(iobase) \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0)
-#define MACvSelectPage1(dwIoBase) \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1)
+#define MACvSelectPage1(iobase) \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1)
-#define MACvReadMIBCounter(dwIoBase, pdwCounter) \
- VNSvInPortD(dwIoBase + MAC_REG_MIBCNTR, pdwCounter)
+#define MACvReadMIBCounter(iobase, pdwCounter) \
+ VNSvInPortD(iobase + MAC_REG_MIBCNTR, pdwCounter)
-#define MACvPwrEvntDisable(dwIoBase) \
- VNSvOutPortW(dwIoBase + MAC_REG_WAKEUPEN0, 0x0000)
+#define MACvPwrEvntDisable(iobase) \
+ VNSvOutPortW(iobase + MAC_REG_WAKEUPEN0, 0x0000)
-#define MACvEnableProtectMD(dwIoBase) \
+#define MACvEnableProtectMD(iobase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \
+ VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
dwOrgValue = dwOrgValue | EnCFG_ProtectMd; \
- VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
+ VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
-#define MACvDisableProtectMD(dwIoBase) \
+#define MACvDisableProtectMD(iobase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \
+ VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
dwOrgValue = dwOrgValue & ~EnCFG_ProtectMd; \
- VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
+ VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
-#define MACvEnableBarkerPreambleMd(dwIoBase) \
+#define MACvEnableBarkerPreambleMd(iobase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \
+ VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
dwOrgValue = dwOrgValue | EnCFG_BarkerPream; \
- VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
+ VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
-#define MACvDisableBarkerPreambleMd(dwIoBase) \
+#define MACvDisableBarkerPreambleMd(iobase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \
+ VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
dwOrgValue = dwOrgValue & ~EnCFG_BarkerPream; \
- VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
+ VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
-#define MACvSetBBType(dwIoBase, byTyp) \
+#define MACvSetBBType(iobase, byTyp) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \
+ VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
dwOrgValue = dwOrgValue & ~EnCFG_BBType_MASK; \
dwOrgValue = dwOrgValue | (unsigned long)byTyp; \
- VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
+ VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
-#define MACvReadATIMW(dwIoBase, pwCounter) \
- VNSvInPortW(dwIoBase + MAC_REG_AIDATIM, pwCounter)
+#define MACvReadATIMW(iobase, pwCounter) \
+ VNSvInPortW(iobase + MAC_REG_AIDATIM, pwCounter)
-#define MACvWriteATIMW(dwIoBase, wCounter) \
- VNSvOutPortW(dwIoBase + MAC_REG_AIDATIM, wCounter)
+#define MACvWriteATIMW(iobase, wCounter) \
+ VNSvOutPortW(iobase + MAC_REG_AIDATIM, wCounter)
-#define MACvWriteCRC16_128(dwIoBase, byRegOfs, wCRC) \
+#define MACvWriteCRC16_128(iobase, byRegOfs, wCRC) \
do { \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \
- VNSvOutPortW(dwIoBase + byRegOfs, wCRC); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
+ VNSvOutPortW(iobase + byRegOfs, wCRC); \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
} while (0)
-#define MACvGPIOIn(dwIoBase, pbyValue) \
- VNSvInPortB(dwIoBase + MAC_REG_GPIOCTL1, pbyValue)
+#define MACvGPIOIn(iobase, pbyValue) \
+ VNSvInPortB(iobase + MAC_REG_GPIOCTL1, pbyValue)
-#define MACvSetRFLE_LatchBase(dwIoBase) \
- MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
+#define MACvSetRFLE_LatchBase(iobase) \
+ MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
bool MACbIsRegBitsOn(struct vnt_private *, unsigned char byRegOfs,
unsigned char byTestBits);
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index 7d6e7464ae51..716d2a80f840 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: power.c
*
* Purpose: Handles 802.11 power management functions
@@ -133,7 +128,6 @@ PSvDisablePowerSaving(
priv->bPWBitOn = false;
}
-
/*
*
* Routine Description:
diff --git a/drivers/staging/vt6655/power.h b/drivers/staging/vt6655/power.h
index d82dd8d6d68b..dfcb0ca8b448 100644
--- a/drivers/staging/vt6655/power.h
+++ b/drivers/staging/vt6655/power.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: power.h
*
* Purpose: Handles 802.11 power management functions
@@ -46,7 +42,6 @@ PSvEnablePowerSaving(
unsigned short wListenInterval
);
-
bool
PSbIsNextTBTTWakeUp(
struct vnt_private *
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 447882c7a6be..edf7db9d53b3 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: rf.c
*
* Purpose: rf function code
@@ -50,359 +45,362 @@
#define AL7230_PWR_IDX_LEN 64
static const unsigned long dwAL2230InitTable[CB_AL2230_INIT_SEQ] = {
- 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x01A00200+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x00FFF300+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0F4DC500+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0805B600+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0146C700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x00068800+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0403B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x00DBBA00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0BDFFC00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x00000D00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x00580F00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW
+ 0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x01A00200 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x00FFF300 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0005A400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0F4DC500 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0805B600 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0146C700 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x00068800 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0403B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x00DBBA00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x00099B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0BDFFC00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x00000D00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x00580F00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW
};
static const unsigned long dwAL2230ChannelTable0[CB_MAX_CHANNEL] = {
- 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
- 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
- 0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
- 0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
- 0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
- 0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
- 0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
- 0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
- 0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
- 0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
- 0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
- 0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
- 0x03F7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
- 0x03E7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 14, Tf = 2412M */
+ 0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+ 0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+ 0x03E79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+ 0x03E79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+ 0x03F7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+ 0x03F7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+ 0x03E7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+ 0x03E7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
+ 0x03F7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
+ 0x03F7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+ 0x03E7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+ 0x03E7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+ 0x03F7C000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+ 0x03E7C000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 14, Tf = 2412M */
};
static const unsigned long dwAL2230ChannelTable1[CB_MAX_CHANNEL] = {
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
- 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
- 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
- 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
- 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
- 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
- 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
- 0x06666100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 14, Tf = 2412M */
+ 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+ 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+ 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+ 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+ 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+ 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+ 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+ 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
+ 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
+ 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+ 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+ 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+ 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+ 0x06666100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 14, Tf = 2412M */
};
static unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
- 0x04040900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04041900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04042900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04043900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04044900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04045900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04046900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04047900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04048900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04049900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0404A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0404B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0404C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0404D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0404E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0404F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04050900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04051900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04052900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04053900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04054900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04055900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04056900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04057900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04058900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04059900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0405A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0405B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0405C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0405D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0405E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0405F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04060900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04061900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04062900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04063900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04064900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04065900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04066900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04067900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04068900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04069900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0406A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0406B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0406C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0406D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0406E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0406F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04070900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04071900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04072900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04073900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04074900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04075900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04076900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04077900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04078900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04079900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0407A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0407B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0407C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0407D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0407E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0407F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW
+ 0x04040900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04041900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04042900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04043900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04044900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04045900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04046900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04047900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04048900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04049900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0404A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0404B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0404C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0404D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0404E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0404F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04050900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04051900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04052900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04053900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04054900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04055900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04056900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04057900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04058900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04059900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0405A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0405B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0405C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0405D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0405E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0405F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04060900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04061900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04062900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04063900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04064900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04065900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04066900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04067900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04068900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04069900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0406A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0406B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0406C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0406D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0406E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0406F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04070900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04071900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04072900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04073900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04074900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04075900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04076900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04077900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04078900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04079900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0407A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0407B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0407C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0407D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0407E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0407F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW
};
/* 40MHz reference frequency
* Need to Pull PLLON(PE3) low when writing channel registers through 3-wire.
*/
static const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
- 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
- 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
- 0x841FF200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 451FE2 */
- 0x3FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 5FDFA3 */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* 11b/g // Need modify for 11a */
+ 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
+ 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
+ 0x841FF200 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 451FE2 */
+ 0x3FDFA300 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 5FDFA3 */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* 11b/g // Need modify for 11a */
/* RoberYu:20050113, Rev0.47 Regsiter Setting Guide */
- 0x802B5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 8D1B55 */
- 0x56AF3600+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 860207 */
- 0x6EBC0800+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0xE0000A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: E0600A */
- 0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
+ 0x802B5500 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 8D1B55 */
+ 0x56AF3600 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0xCE020700 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 860207 */
+ 0x6EBC0800 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x221BB900 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0xE0000A00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: E0600A */
+ 0x08031B00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
/* RoberYu:20050113, Rev0.47 Regsiter Setting Guide */
- 0x000A3C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 00143C */
- 0xFFFFFD00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x00000E00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x1ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* Need modify for 11a: 12BACF */
+ 0x000A3C00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 00143C */
+ 0xFFFFFD00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x00000E00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x1ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* Need modify for 11a: 12BACF */
};
static const unsigned long dwAL7230InitTableAMode[CB_AL7230_INIT_SEQ] = {
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
- 0x451FE200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
- 0x5FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
- 0x67F78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* 11a // Need modify for 11b/g */
- 0x853F5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g, RoberYu:20050113 */
- 0x56AF3600+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
- 0x6EBC0800+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0xE0600A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
- 0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
- 0x00147C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
- 0xFFFFFD00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x00000E00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x12BACF00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* Need modify for 11b/g */
+ 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
+ 0x451FE200 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
+ 0x5FDFA300 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
+ 0x67F78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* 11a // Need modify for 11b/g */
+ 0x853F5500 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g, RoberYu:20050113 */
+ 0x56AF3600 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0xCE020700 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
+ 0x6EBC0800 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x221BB900 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0xE0600A00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
+ 0x08031B00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
+ 0x00147C00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
+ 0xFFFFFD00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x00000E00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x12BACF00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* Need modify for 11b/g */
};
static const unsigned long dwAL7230ChannelTable0[CB_MAX_CHANNEL] = {
- 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
- 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
- 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
- 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
- 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
- 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
- 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
- 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz //RobertYu: 20050218, update for APNode 0.49 */
- 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz //RobertYu: 20050218, update for APNode 0.49 */
- 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz //RobertYu: 20050218, update for APNode 0.49 */
- 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz //RobertYu: 20050218, update for APNode 0.49 */
- 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz //RobertYu: 20050218, update for APNode 0.49 */
- 0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz //RobertYu: 20050218, update for APNode 0.49 */
- 0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
+ 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+ 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+ 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+ 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+ 0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+ 0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+ 0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+ 0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0037C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0037C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
/* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) */
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
- 0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
- 0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
+ 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
+ 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
+ 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
+ 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
+ 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
+ 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
+ 0x0FF53000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
+ 0x0FF53000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
/* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
- * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) */
-
- 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */
- 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */
- 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */
- 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */
- 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */
- 0x0FF55000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */
- 0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */
- 0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */
- 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) //RobertYu: 20050218, update for APNode 0.49 */
- 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */
- 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */
- 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */
- 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */
- 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */
- 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */
- 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */
- 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */
- 0x0FF59000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */
-
- 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
- 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
- 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
- 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
- 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
- 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
- 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
- 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
- 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
- 0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
- 0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
- 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
- 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
- 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
- 0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
- 0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
+ * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
+ */
+
+ 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */
+ 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */
+ 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */
+ 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */
+ 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */
+ 0x0FF55000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */
+ 0x0FF56000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */
+ 0x0FF56000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */
+ 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */
+ 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */
+ 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */
+ 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */
+ 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */
+ 0x0FF58000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */
+ 0x0FF58000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */
+ 0x0FF58000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */
+ 0x0FF59000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */
+
+ 0x0FF5C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
+ 0x0FF5C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
+ 0x0FF5C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
+ 0x0FF5D000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
+ 0x0FF5D000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
+ 0x0FF5D000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
+ 0x0FF5E000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
+ 0x0FF5E000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
+ 0x0FF5E000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
+ 0x0FF5F000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
+ 0x0FF5F000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
+ 0x0FF60000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
+ 0x0FF60000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
+ 0x0FF60000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
+ 0x0FF61000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
+ 0x0FF61000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
};
static const unsigned long dwAL7230ChannelTable1[CB_MAX_CHANNEL] = {
- 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
- 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
- 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
- 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
- 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
- 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
- 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
- 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
- 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
- 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
- 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
- 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
- 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
- 0x06666100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
+ 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+ 0x1B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+ 0x03333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+ 0x0B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+ 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+ 0x1B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+ 0x03333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+ 0x0B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
+ 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
+ 0x1B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+ 0x03333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+ 0x0B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+ 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+ 0x06666100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
/* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) */
- 0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
- 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
- 0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
- 0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
+ 0x1D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
+ 0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
+ 0x08000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
+ 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
+ 0x0D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
+ 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
/* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
- * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) */
- 0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */
- 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */
- 0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */
- 0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */
- 0x10000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) */
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */
- 0x1AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */
- 0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
- 0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
- 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
- 0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
- 0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
- 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
+ * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
+ */
+ 0x1D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */
+ 0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */
+ 0x08000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */
+ 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */
+ 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */
+ 0x05555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */
+ 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */
+ 0x10000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) */
+ 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */
+ 0x1AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */
+ 0x05555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */
+ 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */
+ 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */
+ 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */
+ 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */
+ 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
+ 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
+ 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
+ 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
+ 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
+ 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
+ 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
+ 0x18000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
+ 0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
+ 0x0D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
+ 0x18000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
+ 0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
};
static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
/* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
+ 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
+ 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
/* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
- * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
+ * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
+ */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */
+ 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */
+ 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */
+ 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
+ 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
+ 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
+ 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
+ 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
};
/*
@@ -410,7 +408,7 @@ static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* Out:
* none
*
@@ -419,16 +417,16 @@ static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
*/
static bool s_bAL7230Init(struct vnt_private *priv)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
int ii;
bool ret;
ret = true;
/* 3-wire control for normal mode */
- VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0);
+ VNSvOutPortB(iobase + MAC_REG_SOFTPWRCTL, 0);
- MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
+ MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
BBvPowerSaveModeOFF(priv); /* RobertYu:20050106, have DC value for Calibration */
@@ -436,20 +434,20 @@ static bool s_bAL7230Init(struct vnt_private *priv)
ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[ii]);
/* PLL On */
- MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+ MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
/* Calibration */
MACvTimer0MicroSDelay(priv, 150);/* 150us */
/* TXDCOC:active, RCK:disable */
- ret &= IFRFbWriteEmbedded(priv, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
+ ret &= IFRFbWriteEmbedded(priv, (0x9ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW));
MACvTimer0MicroSDelay(priv, 30);/* 30us */
/* TXDCOC:disable, RCK:active */
- ret &= IFRFbWriteEmbedded(priv, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
+ ret &= IFRFbWriteEmbedded(priv, (0x3ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW));
MACvTimer0MicroSDelay(priv, 30);/* 30us */
/* TXDCOC:disable, RCK:disable */
ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]);
- MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
+ MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
@@ -458,7 +456,7 @@ static bool s_bAL7230Init(struct vnt_private *priv)
/* PE1: TX_ON, PE2: RX_ON, PE3: PLLON */
/* 3-wire control for power saving mode */
- VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
+ VNSvOutPortB(iobase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
return ret;
}
@@ -468,26 +466,26 @@ static bool s_bAL7230Init(struct vnt_private *priv)
*/
static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
bool ret;
ret = true;
/* PLLON Off */
- MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+ MACvWordRegBitsOff(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable0[byChannel - 1]);
ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable1[byChannel - 1]);
ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable2[byChannel - 1]);
/* PLLOn On */
- MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+ MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
/* Set Channel[7] = 0 to tell H/W channel is changing now. */
- VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F));
+ VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel & 0x7F));
MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL7230);
/* Set Channel[7] = 1 to tell H/W channel change is done. */
- VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80));
+ VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel | 0x80));
return ret;
}
@@ -497,7 +495,7 @@ static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byCha
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* dwData - data to write
* Out:
* none
@@ -507,15 +505,15 @@ static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byCha
*/
bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
unsigned short ww;
unsigned long dwValue;
- VNSvOutPortD(dwIoBase + MAC_REG_IFREGCTL, dwData);
+ VNSvOutPortD(iobase + MAC_REG_IFREGCTL, dwData);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortD(dwIoBase + MAC_REG_IFREGCTL, &dwValue);
+ VNSvInPortD(iobase + MAC_REG_IFREGCTL, &dwValue);
if (dwValue & IFREGCTL_DONE)
break;
}
@@ -531,7 +529,7 @@ bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* Out:
* none
*
@@ -540,51 +538,51 @@ bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
*/
static bool RFbAL2230Init(struct vnt_private *priv)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
int ii;
bool ret;
ret = true;
/* 3-wire control for normal mode */
- VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0);
+ VNSvOutPortB(iobase + MAC_REG_SOFTPWRCTL, 0);
- MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
+ MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
/* PLL Off */
- MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+ MACvWordRegBitsOff(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
/* patch abnormal AL2230 frequency output */
- IFRFbWriteEmbedded(priv, (0x07168700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+ IFRFbWriteEmbedded(priv, (0x07168700 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++)
ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[ii]);
MACvTimer0MicroSDelay(priv, 30); /* delay 30 us */
/* PLL On */
- MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+ MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
MACvTimer0MicroSDelay(priv, 150);/* 150us */
- ret &= IFRFbWriteEmbedded(priv, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+ ret &= IFRFbWriteEmbedded(priv, (0x00d80f00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
MACvTimer0MicroSDelay(priv, 30);/* 30us */
- ret &= IFRFbWriteEmbedded(priv, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+ ret &= IFRFbWriteEmbedded(priv, (0x00780f00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
MACvTimer0MicroSDelay(priv, 30);/* 30us */
ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]);
- MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
+ MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
/* 3-wire control for power saving mode */
- VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
+ VNSvOutPortB(iobase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
return ret;
}
static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
bool ret;
ret = true;
@@ -593,10 +591,10 @@ static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byCha
ret &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable1[byChannel - 1]);
/* Set Channel[7] = 0 to tell H/W channel is changing now. */
- VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F));
+ VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel & 0x7F));
MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL2230);
/* Set Channel[7] = 1 to tell H/W channel change is done. */
- VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80));
+ VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel | 0x80));
return ret;
}
@@ -681,7 +679,7 @@ bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType,
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* uChannel - channel number
* bySleepCnt - SleepProgSyn count
*
@@ -691,12 +689,12 @@ bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType,
bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
u16 uChannel)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
int ii;
unsigned char byInitCount = 0;
unsigned char bySleepCount = 0;
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, 0);
+ VNSvOutPortW(iobase + MAC_REG_MISCFFNDEX, 0);
switch (byRFType) {
case RF_AIROHA:
case RF_AL2230S:
@@ -758,7 +756,7 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* dwRFPowerTable - RF Tx Power Setting
* Out:
* none
@@ -830,7 +828,7 @@ bool RFbSetPower(
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* dwRFPowerTable - RF Tx Power Setting
* Out:
* none
@@ -855,20 +853,20 @@ bool RFbRawSetPower(
case RF_AIROHA:
ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
if (rate <= RATE_11M)
- ret &= IFRFbWriteEmbedded(priv, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x0001B400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
else
- ret &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x0005A400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
break;
case RF_AL2230S:
ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
if (rate <= RATE_11M) {
- ret &= IFRFbWriteEmbedded(priv, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
- ret &= IFRFbWriteEmbedded(priv, 0x00299B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x040C1400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x00299B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
} else {
- ret &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
- ret &= IFRFbWriteEmbedded(priv, 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x0005A400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x00099B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
}
break;
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index e9c786995506..b6e853784a26 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: rf.h
*
* Purpose:
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 7e69bc99d60f..3efe19a1b13f 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: rxtx.c
*
* Purpose: handle WMAC/802.3/802.11 rx & tx functions
@@ -1086,8 +1082,8 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
}
/*
- * Use for AUTO FALL BACK
- */
+ * Use for AUTO FALL BACK
+ */
if (fifo_ctl & FIFOCTL_AUTO_FB_0)
byFBOption = AUTO_FB_0;
else if (fifo_ctl & FIFOCTL_AUTO_FB_1)
diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h
index 1e30ecb5c63c..89de67115826 100644
--- a/drivers/staging/vt6655/rxtx.h
+++ b/drivers/staging/vt6655/rxtx.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: rxtx.h
*
* Purpose:
diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c
index ee992772066f..635f271595f6 100644
--- a/drivers/staging/vt6655/srom.c
+++ b/drivers/staging/vt6655/srom.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: srom.c
*
* Purpose:Implement functions to access eeprom
@@ -64,7 +60,7 @@
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* byContntOffset - address of EEPROM
* Out:
* none
@@ -72,7 +68,7 @@
* Return Value: data read
*
*/
-unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
+unsigned char SROMbyReadEmbedded(void __iomem *iobase,
unsigned char byContntOffset)
{
unsigned short wDelay, wNoACK;
@@ -81,18 +77,18 @@ unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
unsigned char byOrg;
byData = 0xFF;
- VNSvInPortB(dwIoBase + MAC_REG_I2MCFG, &byOrg);
+ VNSvInPortB(iobase + MAC_REG_I2MCFG, &byOrg);
/* turn off hardware retry for getting NACK */
- VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY)));
+ VNSvOutPortB(iobase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY)));
for (wNoACK = 0; wNoACK < W_MAX_I2CRETRY; wNoACK++) {
- VNSvOutPortB(dwIoBase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID);
- VNSvOutPortB(dwIoBase + MAC_REG_I2MTGAD, byContntOffset);
+ VNSvOutPortB(iobase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID);
+ VNSvOutPortB(iobase + MAC_REG_I2MTGAD, byContntOffset);
/* issue read command */
- VNSvOutPortB(dwIoBase + MAC_REG_I2MCSR, I2MCSR_EEMR);
+ VNSvOutPortB(iobase + MAC_REG_I2MCSR, I2MCSR_EEMR);
/* wait DONE be set */
for (wDelay = 0; wDelay < W_MAX_TIMEOUT; wDelay++) {
- VNSvInPortB(dwIoBase + MAC_REG_I2MCSR, &byWait);
+ VNSvInPortB(iobase + MAC_REG_I2MCSR, &byWait);
if (byWait & (I2MCSR_DONE | I2MCSR_NACK))
break;
PCAvDelayByIO(CB_DELAY_LOOP_WAIT);
@@ -102,8 +98,8 @@ unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
break;
}
}
- VNSvInPortB(dwIoBase + MAC_REG_I2MDIPT, &byData);
- VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, byOrg);
+ VNSvInPortB(iobase + MAC_REG_I2MDIPT, &byData);
+ VNSvOutPortB(iobase + MAC_REG_I2MCFG, byOrg);
return byData;
}
@@ -112,20 +108,20 @@ unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* Out:
* pbyEepromRegs - EEPROM content Buffer
*
* Return Value: none
*
*/
-void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
+void SROMvReadAllContents(void __iomem *iobase, unsigned char *pbyEepromRegs)
{
int ii;
/* ii = Rom Address */
for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) {
- *pbyEepromRegs = SROMbyReadEmbedded(dwIoBase,
+ *pbyEepromRegs = SROMbyReadEmbedded(iobase,
(unsigned char)ii);
pbyEepromRegs++;
}
@@ -136,21 +132,21 @@ void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* Out:
* pbyEtherAddress - Ethernet Address buffer
*
* Return Value: none
*
*/
-void SROMvReadEtherAddress(void __iomem *dwIoBase,
+void SROMvReadEtherAddress(void __iomem *iobase,
unsigned char *pbyEtherAddress)
{
unsigned char ii;
/* ii = Rom Address */
for (ii = 0; ii < ETH_ALEN; ii++) {
- *pbyEtherAddress = SROMbyReadEmbedded(dwIoBase, ii);
+ *pbyEtherAddress = SROMbyReadEmbedded(iobase, ii);
pbyEtherAddress++;
}
}
diff --git a/drivers/staging/vt6655/srom.h b/drivers/staging/vt6655/srom.h
index 531bf0069373..6e03ab6dfa9d 100644
--- a/drivers/staging/vt6655/srom.h
+++ b/drivers/staging/vt6655/srom.h
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: srom.h
*
* Purpose: Implement functions to access eeprom
@@ -90,12 +85,12 @@
/*--------------------- Export Functions --------------------------*/
-unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
+unsigned char SROMbyReadEmbedded(void __iomem *iobase,
unsigned char byContntOffset);
-void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs);
+void SROMvReadAllContents(void __iomem *iobase, unsigned char *pbyEepromRegs);
-void SROMvReadEtherAddress(void __iomem *dwIoBase,
+void SROMvReadEtherAddress(void __iomem *iobase,
unsigned char *pbyEtherAddress);
#endif /* __EEPROM_H__*/
diff --git a/drivers/staging/vt6655/tmacro.h b/drivers/staging/vt6655/tmacro.h
index 597efefc017f..d6a0563ad55c 100644
--- a/drivers/staging/vt6655/tmacro.h
+++ b/drivers/staging/vt6655/tmacro.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: tmacro.h
*
* Purpose: define basic common types and macros
diff --git a/drivers/staging/vt6655/upc.h b/drivers/staging/vt6655/upc.h
index 85fe0464cfb3..9806b5989014 100644
--- a/drivers/staging/vt6655/upc.h
+++ b/drivers/staging/vt6655/upc.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: upc.h
*
* Purpose: Macros to access device
diff --git a/drivers/staging/vt6656/baseband.h b/drivers/staging/vt6656/baseband.h
index 7cc13874f8f1..fe1c25c64cca 100644
--- a/drivers/staging/vt6656/baseband.h
+++ b/drivers/staging/vt6656/baseband.h
@@ -86,15 +86,15 @@ struct vnt_phy_field {
unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
unsigned int frame_length, u16 tx_rate);
-void vnt_get_phy_field(struct vnt_private *, u32 frame_length,
- u16 tx_rate, u8 pkt_type, struct vnt_phy_field *);
-
-void vnt_set_short_slot_time(struct vnt_private *);
-void vnt_set_vga_gain_offset(struct vnt_private *, u8);
-void vnt_set_antenna_mode(struct vnt_private *, u8);
-int vnt_vt3184_init(struct vnt_private *);
-void vnt_set_deep_sleep(struct vnt_private *);
-void vnt_exit_deep_sleep(struct vnt_private *);
-void vnt_update_pre_ed_threshold(struct vnt_private *, int scanning);
+void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
+ u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy);
+
+void vnt_set_short_slot_time(struct vnt_private *priv);
+void vnt_set_vga_gain_offset(struct vnt_private *priv, u8 data);
+void vnt_set_antenna_mode(struct vnt_private *priv, u8 antenna_mode);
+int vnt_vt3184_init(struct vnt_private *priv);
+void vnt_set_deep_sleep(struct vnt_private *priv);
+void vnt_exit_deep_sleep(struct vnt_private *priv);
+void vnt_update_pre_ed_threshold(struct vnt_private *priv, int scanning);
#endif /* __BASEBAND_H__ */
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index 53b469c71dc2..0e5a99375099 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -501,16 +501,7 @@ u8 vnt_get_pkt_type(struct vnt_private *priv)
*/
u64 vnt_get_tsf_offset(u8 rx_rate, u64 tsf1, u64 tsf2)
{
- u64 tsf_offset = 0;
- u16 rx_bcn_offset;
-
- rx_bcn_offset = cw_rxbcntsf_off[rx_rate % MAX_RATE];
-
- tsf2 += (u64)rx_bcn_offset;
-
- tsf_offset = tsf1 - tsf2;
-
- return tsf_offset;
+ return tsf1 - tsf2 - (u64)cw_rxbcntsf_off[rx_rate % MAX_RATE];
}
/*
@@ -610,8 +601,8 @@ u64 vnt_get_next_tbtt(u64 tsf, u16 beacon_interval)
beacon_int = beacon_interval * 1024;
/* Next TBTT =
- * ((local_current_TSF / beacon_interval) + 1) * beacon_interval
- */
+ * ((local_current_TSF / beacon_interval) + 1) * beacon_interval
+ */
if (beacon_int) {
do_div(tsf, beacon_int);
tsf += 1;
diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
index eeed16e9124e..611da4929ddc 100644
--- a/drivers/staging/vt6656/mac.c
+++ b/drivers/staging/vt6656/mac.c
@@ -121,7 +121,7 @@ void vnt_mac_set_keyentry(struct vnt_private *priv, u16 key_ctl, u32 entry_idx,
u16 offset;
offset = MISCFIFO_KEYETRY0;
- offset += (entry_idx * MISCFIFO_KEYENTRYSIZE);
+ offset += entry_idx * MISCFIFO_KEYENTRYSIZE;
set_key.u.write.key_ctl = cpu_to_le16(key_ctl);
ether_addr_copy(set_key.u.write.addr, addr);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 0594828bdabf..50d02d9aa535 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -85,7 +85,7 @@ MODULE_PARM_DESC(tx_buffers, "Number of receive usb tx buffers");
* Static vars definitions
*/
-static struct usb_device_id vt6656_table[] = {
+static const struct usb_device_id vt6656_table[] = {
{USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)},
{}
};
@@ -326,9 +326,9 @@ static int vnt_init_registers(struct vnt_private *priv)
priv->current_net_addr);
/*
- * set BB and packet type at the same time
- * set Short Slot Time, xIFS, and RSPINF
- */
+ * set BB and packet type at the same time
+ * set Short Slot Time, xIFS, and RSPINF
+ */
if (priv->bb_type == BB_TYPE_11A)
priv->short_slot_time = true;
else
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index 79a3108719a6..6101a35582b6 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -730,9 +730,9 @@ int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate)
return false;
/*
- * 0x080F1B00 for 3 wire control TxGain(D10)
- * and 0x31 as TX Gain value
- */
+ * 0x080F1B00 for 3 wire control TxGain(D10)
+ * and 0x31 as TX Gain value
+ */
power_setting = 0x080c0b00 | (power << 12);
ret &= vnt_rf_write_embedded(priv, power_setting);
@@ -800,8 +800,8 @@ int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate)
/* Convert rssi to dbm */
void vnt_rf_rssi_to_dbm(struct vnt_private *priv, u8 rssi, long *dbm)
{
- u8 idx = (((rssi & 0xc0) >> 6) & 0x03);
- long b = (rssi & 0x3f);
+ u8 idx = ((rssi & 0xc0) >> 6) & 0x03;
+ long b = rssi & 0x3f;
long a = 0;
u8 airoharf[4] = {0, 18, 0, 40};
diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c
index 4b51c0ac27ac..622994795222 100644
--- a/drivers/staging/wilc1000/coreconfigurator.c
+++ b/drivers/staging/wilc1000/coreconfigurator.c
@@ -224,9 +224,7 @@ static inline u16 get_asoc_status(u8 *data)
u16 asoc_status;
asoc_status = data[3];
- asoc_status = (asoc_status << 8) | data[2];
-
- return asoc_status;
+ return (asoc_status << 8) | data[2];
}
static inline u16 get_asoc_id(u8 *data)
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 6ab7443eabde..b00ea75524e4 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -1722,10 +1722,8 @@ _WPAPtk_end_case_:
case PMKSA:
pu8keybuf = kmalloc((pstrHostIFkeyAttr->attr.pmkid.numpmkid * PMKSA_KEY_LEN) + 1, GFP_KERNEL);
- if (!pu8keybuf) {
- netdev_err(vif->ndev, "No buffer to send PMKSA Key\n");
+ if (!pu8keybuf)
return -ENOMEM;
- }
pu8keybuf[0] = pstrHostIFkeyAttr->attr.pmkid.numpmkid;
@@ -1932,7 +1930,7 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif,
wid.val = kmalloc(wid.size, GFP_KERNEL);
stamac = wid.val;
- memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN);
+ ether_addr_copy(stamac, strHostIfStaInactiveT->mac);
result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
wilc_get_vif_idx(vif));
@@ -2168,7 +2166,7 @@ static void Handle_DelStation(struct wilc_vif *vif,
pu8CurrByte = wid.val;
- memcpy(pu8CurrByte, pstrDelStaParam->mac_addr, ETH_ALEN);
+ ether_addr_copy(pu8CurrByte, pstrDelStaParam->mac_addr);
result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
wilc_get_vif_idx(vif));
@@ -2322,10 +2320,8 @@ static u32 Handle_ListenStateExpired(struct wilc_vif *vif,
wid.size = 2;
wid.val = kmalloc(wid.size, GFP_KERNEL);
- if (!wid.val) {
- netdev_err(vif->ndev, "Failed to allocate memory\n");
+ if (!wid.val)
return -ENOMEM;
- }
wid.val[0] = u8remain_on_chan_flag;
wid.val[1] = FALSE_FRMWR_CHANNEL;
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index ddfea29df2a7..f36d3b5a0370 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -367,7 +367,6 @@ extern u8 wilc_connected_ssid[6];
extern u8 wilc_multicast_mac_addr_list[WILC_MULTICAST_TABLE_SIZE][ETH_ALEN];
extern int wilc_connecting;
-extern u8 wilc_initialized;
extern struct timer_list wilc_during_ip_timer;
#endif
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 6370a5efe343..3775706578b2 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -37,6 +37,8 @@ static void linux_wlan_tx_complete(void *priv, int status);
static int mac_init_fn(struct net_device *ndev);
static struct net_device_stats *mac_stats(struct net_device *dev);
static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd);
+static int wilc_mac_open(struct net_device *ndev);
+static int wilc_mac_close(struct net_device *ndev);
static void wilc_set_multicast_list(struct net_device *dev);
bool wilc_enable_ps = true;
@@ -218,17 +220,6 @@ static void deinit_irq(struct net_device *dev)
}
}
-int wilc_lock_timeout(struct wilc *nic, void *vp, u32 timeout)
-{
- /* FIXME: replace with mutex_lock or wait_for_completion */
- int error = -1;
-
- if (vp)
- error = down_timeout(vp,
- msecs_to_jiffies(timeout));
- return error;
-}
-
void wilc_mac_indicate(struct wilc *wilc, int flag)
{
int status;
@@ -269,23 +260,12 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode)
{
- int i = 0;
- int ret = -1;
- struct wilc_vif *vif;
- struct wilc *wilc;
-
- vif = netdev_priv(wilc_netdev);
- wilc = vif->wilc;
+ struct wilc_vif *vif = netdev_priv(wilc_netdev);
- for (i = 0; i < wilc->vif_num; i++)
- if (wilc->vif[i]->ndev == wilc_netdev) {
- memcpy(wilc->vif[i]->bssid, bssid, 6);
- wilc->vif[i]->mode = mode;
- ret = 0;
- break;
- }
+ memcpy(vif->bssid, bssid, 6);
+ vif->mode = mode;
- return ret;
+ return 0;
}
int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
@@ -847,7 +827,7 @@ static int mac_init_fn(struct net_device *ndev)
return 0;
}
-int wilc_mac_open(struct net_device *ndev)
+static int wilc_mac_open(struct net_device *ndev)
{
struct wilc_vif *vif;
@@ -1038,7 +1018,7 @@ int wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
return 0;
}
-int wilc_mac_close(struct net_device *ndev)
+static int wilc_mac_close(struct net_device *ndev)
{
struct wilc_priv *priv;
struct wilc_vif *vif;
@@ -1212,16 +1192,11 @@ void WILC_WFI_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
void wilc_netdev_cleanup(struct wilc *wilc)
{
- int i = 0;
- struct wilc_vif *vif[NUM_CONCURRENT_IFC];
+ int i;
- if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) {
+ if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev))
unregister_inetaddr_notifier(&g_dev_notifier);
- for (i = 0; i < NUM_CONCURRENT_IFC; i++)
- vif[i] = netdev_priv(wilc->vif[i]->ndev);
- }
-
if (wilc && wilc->firmware) {
release_firmware(wilc->firmware);
wilc->firmware = NULL;
@@ -1230,7 +1205,7 @@ void wilc_netdev_cleanup(struct wilc *wilc)
if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) {
for (i = 0; i < NUM_CONCURRENT_IFC; i++)
if (wilc->vif[i]->ndev)
- if (vif[i]->mac_opened)
+ if (wilc->vif[i]->mac_opened)
wilc_mac_close(wilc->vif[i]->ndev);
for (i = 0; i < NUM_CONCURRENT_IFC; i++) {
@@ -1278,9 +1253,9 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
vif->idx = wl->vif_num;
vif->wilc = *wilc;
+ vif->ndev = ndev;
wl->vif[i] = vif;
- wl->vif[wl->vif_num]->ndev = ndev;
- wl->vif_num++;
+ wl->vif_num = i;
ndev->netdev_ops = &wilc_netdev_ops;
{
diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c
index 802bb1d5e207..07260c497db4 100644
--- a/drivers/staging/wilc1000/wilc_debugfs.c
+++ b/drivers/staging/wilc1000/wilc_debugfs.c
@@ -62,16 +62,16 @@ static ssize_t wilc_debug_level_write(struct file *filp, const char __user *buf,
return ret;
if (flag > DBG_LEVEL_ALL) {
- printk("%s, value (0x%08x) is out of range, stay previous flag (0x%08x)\n", __func__, flag, atomic_read(&WILC_DEBUG_LEVEL));
+ pr_info("%s, value (0x%08x) is out of range, stay previous flag (0x%08x)\n", __func__, flag, atomic_read(&WILC_DEBUG_LEVEL));
return -EINVAL;
}
atomic_set(&WILC_DEBUG_LEVEL, (int)flag);
if (flag == 0)
- printk(KERN_INFO "Debug-level disabled\n");
+ pr_info("Debug-level disabled\n");
else
- printk(KERN_INFO "Debug-level enabled\n");
+ pr_info("Debug-level enabled\n");
return count;
}
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index 39b73fb27398..3ad7cec4662d 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -39,6 +39,7 @@ struct wilc_sdio {
};
static struct wilc_sdio g_sdio;
+static const struct wilc_hif_func wilc_hif_sdio;
static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data);
static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data);
@@ -1100,7 +1101,7 @@ static int sdio_sync_ext(struct wilc *wilc, int nint)
*
********************************************/
-const struct wilc_hif_func wilc_hif_sdio = {
+static const struct wilc_hif_func wilc_hif_sdio = {
.hif_init = sdio_init,
.hif_deinit = sdio_deinit,
.hif_read_reg = sdio_read_reg,
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index f08cf6d9e1af..55d53c3a95df 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -30,6 +30,7 @@ struct wilc_spi {
};
static struct wilc_spi g_spi;
+static const struct wilc_hif_func wilc_hif_spi;
static int wilc_spi_read(struct wilc *wilc, u32, u8 *, u32);
static int wilc_spi_write(struct wilc *wilc, u32, u8 *, u32);
@@ -858,7 +859,8 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
/* the SPI to it's initial value. */
if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg)) {
/* Read failed. Try with CRC off. This might happen when module
- * is removed but chip isn't reset*/
+ * is removed but chip isn't reset
+ */
g_spi.crc_off = 1;
dev_err(&spi->dev, "Failed internal read protocol with CRC on, retrying with CRC off...\n");
if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg)) {
@@ -1133,7 +1135,7 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
* Global spi HIF function table
*
********************************************/
-const struct wilc_hif_func wilc_hif_spi = {
+static const struct wilc_hif_func wilc_hif_spi = {
.hif_init = wilc_spi_init,
.hif_deinit = _wilc_spi_deinit,
.hif_read_reg = wilc_spi_read_reg,
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 60d8b055bb2f..c1a24f7bc85f 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -90,17 +90,12 @@ static const struct wiphy_wowlan_support wowlan_support = {
#define IS_MGMT_STATUS_SUCCES 0x040
#define GET_PKT_OFFSET(a) (((a) >> 22) & 0x1ff)
-extern int wilc_mac_open(struct net_device *ndev);
-extern int wilc_mac_close(struct net_device *ndev);
-
static struct network_info last_scanned_shadow[MAX_NUM_SCANNED_NETWORKS_SHADOW];
static u32 last_scanned_cnt;
struct timer_list wilc_during_ip_timer;
static struct timer_list hAgingTimer;
static u8 op_ifcs;
-u8 wilc_initialized = 1;
-
#define CHAN2G(_channel, _freq, _flags) { \
.band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
@@ -1193,6 +1188,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
u32 i = 0;
u32 associatedsta = ~0;
u32 inactive_time = 0;
+
priv = wiphy_priv(wiphy);
vif = netdev_priv(dev);
@@ -1590,28 +1586,25 @@ static int remain_on_channel(struct wiphy *wiphy,
priv->strRemainOnChanParams.u32ListenDuration = duration;
priv->strRemainOnChanParams.u32ListenSessionID++;
- s32Error = wilc_remain_on_channel(vif,
+ return wilc_remain_on_channel(vif,
priv->strRemainOnChanParams.u32ListenSessionID,
duration, chan->hw_value,
WILC_WFI_RemainOnChannelExpired,
WILC_WFI_RemainOnChannelReady, (void *)priv);
-
- return s32Error;
}
static int cancel_remain_on_channel(struct wiphy *wiphy,
struct wireless_dev *wdev,
u64 cookie)
{
- s32 s32Error = 0;
struct wilc_priv *priv;
struct wilc_vif *vif;
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
- s32Error = wilc_listen_state_expired(vif, priv->strRemainOnChanParams.u32ListenSessionID);
- return s32Error;
+ return wilc_listen_state_expired(vif,
+ priv->strRemainOnChanParams.u32ListenSessionID);
}
static int mgmt_tx(struct wiphy *wiphy,
@@ -1935,12 +1928,10 @@ static int start_ap(struct wiphy *wiphy, struct net_device *dev,
wilc_wlan_set_bssid(dev, wl->vif[vif->idx]->src_addr, AP_MODE);
wilc_set_power_mgmt(vif, 0, 0);
- s32Error = wilc_add_beacon(vif, settings->beacon_interval,
+ return wilc_add_beacon(vif, settings->beacon_interval,
settings->dtim_period, beacon->head_len,
(u8 *)beacon->head, beacon->tail_len,
(u8 *)beacon->tail);
-
- return s32Error;
}
static int change_beacon(struct wiphy *wiphy, struct net_device *dev,
@@ -1948,16 +1939,13 @@ static int change_beacon(struct wiphy *wiphy, struct net_device *dev,
{
struct wilc_priv *priv;
struct wilc_vif *vif;
- s32 s32Error = 0;
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
- s32Error = wilc_add_beacon(vif, 0, 0, beacon->head_len,
+ return wilc_add_beacon(vif, 0, 0, beacon->head_len,
(u8 *)beacon->head, beacon->tail_len,
(u8 *)beacon->tail);
-
- return s32Error;
}
static int stop_ap(struct wiphy *wiphy, struct net_device *dev)
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index ec6b1674cf38..d431673bc46c 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -225,7 +225,6 @@ int wilc1000_wlan_init(struct net_device *dev, struct wilc_vif *vif);
void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset);
void wilc_mac_indicate(struct wilc *wilc, int flag);
-int wilc_lock_timeout(struct wilc *wilc, void *, u32 timeout);
void wilc_netdev_cleanup(struct wilc *wilc);
int wilc_netdev_init(struct wilc **wilc, struct device *, int io_type, int gpio,
const struct wilc_hif_func *ops);
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index de6c4ddbf45a..11365efcc5d0 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -248,9 +248,6 @@ struct wilc_hif_func {
void (*disable_interrupt)(struct wilc *nic);
};
-extern const struct wilc_hif_func wilc_hif_spi;
-extern const struct wilc_hif_func wilc_hif_sdio;
-
/********************************************
*
* Configuration Structure
@@ -297,9 +294,6 @@ void wilc_enable_tcp_ack_filter(bool value);
int wilc_wlan_get_num_conn_ifcs(struct wilc *);
int wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev);
-int wilc_mac_open(struct net_device *ndev);
-int wilc_mac_close(struct net_device *ndev);
-
void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size);
void host_wakeup_notify(struct wilc *wilc);
void host_sleep_notify(struct wilc *wilc);
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 182b2d564627..aa0e5a3d4a89 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -323,7 +323,7 @@ static int prism2_scan(struct wiphy *wiphy,
priv->scan_request = request;
- memset(&msg1, 0x00, sizeof(struct p80211msg_dot11req_scan));
+ memset(&msg1, 0x00, sizeof(msg1));
msg1.msgcode = DIDmsg_dot11req_scan;
msg1.bsstype.data = P80211ENUM_bsstype_any;
@@ -375,13 +375,13 @@ static int prism2_scan(struct wiphy *wiphy,
ie_buf[0] = WLAN_EID_SSID;
ie_buf[1] = msg2.ssid.data.len;
ie_len = ie_buf[1] + 2;
- memcpy(&ie_buf[2], &(msg2.ssid.data.data), msg2.ssid.data.len);
+ memcpy(&ie_buf[2], &msg2.ssid.data.data, msg2.ssid.data.len);
freq = ieee80211_channel_to_frequency(msg2.dschannel.data,
NL80211_BAND_2GHZ);
bss = cfg80211_inform_bss(wiphy,
ieee80211_get_channel(wiphy, freq),
CFG80211_BSS_FTYPE_UNKNOWN,
- (const u8 *)&(msg2.bssid.data.data),
+ (const u8 *)&msg2.bssid.data.data,
msg2.timestamp.data, msg2.capinfo.data,
msg2.beaconperiod.data,
ie_buf,
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 43c299c3b631..60caf9c37727 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -137,21 +137,11 @@
#define HFA384x_DLSTATE_FLASHENABLED 2
/*--- Register Field Masks --------------------------*/
-#define HFA384x_CMD_AINFO ((u16)(BIT(14) | BIT(13) \
- | BIT(12) | BIT(11) \
- | BIT(10) | BIT(9) \
- | BIT(8)))
-#define HFA384x_CMD_MACPORT ((u16)(BIT(10) | BIT(9) | \
- BIT(8)))
-#define HFA384x_CMD_PROGMODE ((u16)(BIT(9) | BIT(8)))
-#define HFA384x_CMD_CMDCODE ((u16)(BIT(5) | BIT(4) | \
- BIT(3) | BIT(2) | \
- BIT(1) | BIT(0)))
-
-#define HFA384x_STATUS_RESULT ((u16)(BIT(14) | BIT(13) \
- | BIT(12) | BIT(11) \
- | BIT(10) | BIT(9) \
- | BIT(8)))
+#define HFA384x_CMD_AINFO ((u16)GENMASK(14, 8))
+#define HFA384x_CMD_MACPORT ((u16)GENMASK(10, 8))
+#define HFA384x_CMD_PROGMODE ((u16)GENMASK(9, 8))
+#define HFA384x_CMD_CMDCODE ((u16)GENMASK(5, 0))
+#define HFA384x_STATUS_RESULT ((u16)GENMASK(14, 8))
/*--- Command Code Constants --------------------------*/
/*--- Controller Commands --------------------------*/
@@ -266,7 +256,7 @@
#define HFA384x_RID_DBMCOMMSQUALITY_LEN \
((u16)sizeof(struct hfa384x_dbmcommsquality))
#define HFA384x_RID_JOINREQUEST_LEN \
- ((u16)sizeof(struct hfa384x_JoinRequest_data))
+ ((u16)sizeof(struct hfa384x_join_request_data))
/*--------------------------------------------------------------------
* Information RIDs: Modem Information
@@ -286,7 +276,7 @@
#define HFA384x_RID_CNFWEPFLAGS ((u16)0xFC28)
#define HFA384x_RID_CNFAUTHENTICATION ((u16)0xFC2A)
#define HFA384x_RID_CNFROAMINGMODE ((u16)0xFC2D)
-#define HFA384x_RID_CNFAPBCNint ((u16)0xFC33)
+#define HFA384x_RID_CNFAPBCNINT ((u16)0xFC33)
#define HFA384x_RID_CNFDBMADJUST ((u16)0xFC46)
#define HFA384x_RID_CNFWPADATA ((u16)0xFC48)
#define HFA384x_RID_CNFBASICRATES ((u16)0xFCB3)
@@ -408,27 +398,27 @@ struct hfa384x_caplevel {
#define HFA384x_CREATEIBSS_JOINCREATEIBSS 0
/*-- Configuration Record: HostScanRequest (data portion only) --*/
-struct hfa384x_HostScanRequest_data {
- u16 channelList;
- u16 txRate;
+struct hfa384x_host_scan_request_data {
+ u16 channel_list;
+ u16 tx_rate;
struct hfa384x_bytestr32 ssid;
} __packed;
/*-- Configuration Record: JoinRequest (data portion only) --*/
-struct hfa384x_JoinRequest_data {
+struct hfa384x_join_request_data {
u8 bssid[WLAN_BSSID_LEN];
u16 channel;
} __packed;
/*-- Configuration Record: authenticateStation (data portion only) --*/
-struct hfa384x_authenticateStation_data {
+struct hfa384x_authenticate_station_data {
u8 address[ETH_ALEN];
u16 status;
u16 algorithm;
} __packed;
/*-- Configuration Record: WPAData (data portion only) --*/
-struct hfa384x_WPAData {
+struct hfa384x_wpa_data {
u16 datalen;
u8 data[0]; /* max 80 */
} __packed;
@@ -455,16 +445,16 @@ struct hfa384x_downloadbuffer {
/*-- Information Record: commsquality --*/
struct hfa384x_commsquality {
- u16 CQ_currBSS;
- u16 ASL_currBSS;
- u16 ANL_currFC;
+ u16 cq_curr_bss;
+ u16 asl_curr_bss;
+ u16 anl_curr_fc;
} __packed;
/*-- Information Record: dmbcommsquality --*/
struct hfa384x_dbmcommsquality {
- u16 CQdbm_currBSS;
- u16 ASLdbm_currBSS;
- u16 ANLdbm_currFC;
+ u16 cq_dbm_curr_bss;
+ u16 asl_dbm_curr_bss;
+ u16 anl_dbm_curr_fc;
} __packed;
/*--------------------------------------------------------------------
@@ -511,9 +501,8 @@ struct hfa384x_tx_frame {
#define HFA384x_TXSTATUS_AGEDERR ((u16)BIT(1))
#define HFA384x_TXSTATUS_RETRYERR ((u16)BIT(0))
/*-- Transmit Control Field --*/
-#define HFA384x_TX_MACPORT ((u16)(BIT(10) | \
- BIT(9) | BIT(8)))
-#define HFA384x_TX_STRUCTYPE ((u16)(BIT(4) | BIT(3)))
+#define HFA384x_TX_MACPORT ((u16)GENMASK(10, 8))
+#define HFA384x_TX_STRUCTYPE ((u16)GENMASK(4, 3))
#define HFA384x_TX_TXEX ((u16)BIT(2))
#define HFA384x_TX_TXOK ((u16)BIT(1))
/*--------------------------------------------------------------------
@@ -571,9 +560,7 @@ struct hfa384x_rx_frame {
*/
/*-- Status Fields --*/
-#define HFA384x_RXSTATUS_MACPORT ((u16)(BIT(10) | \
- BIT(9) | \
- BIT(8)))
+#define HFA384x_RXSTATUS_MACPORT ((u16)GENMASK(10, 8))
#define HFA384x_RXSTATUS_FCSERR ((u16)BIT(0))
/*--------------------------------------------------------------------
* Communication Frames: Test/Get/Set Field Values for Receive Frames
@@ -610,7 +597,7 @@ struct hfa384x_rx_frame {
*/
/*-- Inquiry Frame, Diagnose: Communication Tallies --*/
-struct hfa384x_CommTallies16 {
+struct hfa384x_comm_tallies_16 {
u16 txunicastframes;
u16 txmulticastframes;
u16 txfragments;
@@ -634,7 +621,7 @@ struct hfa384x_CommTallies16 {
u16 rxmsginbadmsgfrag;
} __packed;
-struct hfa384x_CommTallies32 {
+struct hfa384x_comm_tallies_32 {
u32 txunicastframes;
u32 txmulticastframes;
u32 txfragments;
@@ -659,7 +646,7 @@ struct hfa384x_CommTallies32 {
} __packed;
/*-- Inquiry Frame, Diagnose: Scan Results & Subfields--*/
-struct hfa384x_ScanResultSub {
+struct hfa384x_scan_result_sub {
u16 chid;
u16 anl;
u16 sl;
@@ -671,14 +658,14 @@ struct hfa384x_ScanResultSub {
u16 proberesp_rate;
} __packed;
-struct hfa384x_ScanResult {
+struct hfa384x_scan_result {
u16 rsvd;
u16 scanreason;
- struct hfa384x_ScanResultSub result[HFA384x_SCANRESULT_MAX];
+ struct hfa384x_scan_result_sub result[HFA384x_SCANRESULT_MAX];
} __packed;
/*-- Inquiry Frame, Diagnose: ChInfo Results & Subfields--*/
-struct hfa384x_ChInfoResultSub {
+struct hfa384x_ch_info_result_sub {
u16 chid;
u16 anl;
u16 pnl;
@@ -688,13 +675,13 @@ struct hfa384x_ChInfoResultSub {
#define HFA384x_CHINFORESULT_BSSACTIVE BIT(0)
#define HFA384x_CHINFORESULT_PCFACTIVE BIT(1)
-struct hfa384x_ChInfoResult {
+struct hfa384x_ch_info_result {
u16 scanchannels;
- struct hfa384x_ChInfoResultSub result[HFA384x_CHINFORESULT_MAX];
+ struct hfa384x_ch_info_result_sub result[HFA384x_CHINFORESULT_MAX];
} __packed;
/*-- Inquiry Frame, Diagnose: Host Scan Results & Subfields--*/
-struct hfa384x_HScanResultSub {
+struct hfa384x_hscan_result_sub {
u16 chid;
u16 anl;
u16 sl;
@@ -707,10 +694,10 @@ struct hfa384x_HScanResultSub {
u16 atim;
} __packed;
-struct hfa384x_HScanResult {
+struct hfa384x_hscan_result {
u16 nresult;
u16 rsvd;
- struct hfa384x_HScanResultSub result[HFA384x_HSCANRESULT_MAX];
+ struct hfa384x_hscan_result_sub result[HFA384x_HSCANRESULT_MAX];
} __packed;
/*-- Unsolicited Frame, MAC Mgmt: LinkStatus --*/
@@ -723,7 +710,7 @@ struct hfa384x_HScanResult {
#define HFA384x_LINK_AP_INRANGE ((u16)5)
#define HFA384x_LINK_ASSOCFAIL ((u16)6)
-struct hfa384x_LinkStatus {
+struct hfa384x_link_status {
u16 linkstatus;
} __packed;
@@ -733,7 +720,7 @@ struct hfa384x_LinkStatus {
#define HFA384x_ASSOCSTATUS_REASSOC ((u16)2)
#define HFA384x_ASSOCSTATUS_AUTHFAIL ((u16)5)
-struct hfa384x_AssocStatus {
+struct hfa384x_assoc_status {
u16 assocstatus;
u8 sta_addr[ETH_ALEN];
/* old_ap_addr is only valid if assocstatus == 2 */
@@ -744,37 +731,37 @@ struct hfa384x_AssocStatus {
/*-- Unsolicited Frame, MAC Mgmt: AuthRequest (AP Only) --*/
-struct hfa384x_AuthRequest {
+struct hfa384x_auth_request {
u8 sta_addr[ETH_ALEN];
u16 algorithm;
} __packed;
/*-- Unsolicited Frame, MAC Mgmt: PSUserCount (AP Only) --*/
-struct hfa384x_PSUserCount {
+struct hfa384x_ps_user_count {
u16 usercnt;
} __packed;
-struct hfa384x_KeyIDChanged {
+struct hfa384x_key_id_changed {
u8 sta_addr[ETH_ALEN];
u16 keyid;
} __packed;
/*-- Collection of all Inf frames ---------------*/
union hfa384x_infodata {
- struct hfa384x_CommTallies16 commtallies16;
- struct hfa384x_CommTallies32 commtallies32;
- struct hfa384x_ScanResult scanresult;
- struct hfa384x_ChInfoResult chinforesult;
- struct hfa384x_HScanResult hscanresult;
- struct hfa384x_LinkStatus linkstatus;
- struct hfa384x_AssocStatus assocstatus;
- struct hfa384x_AuthRequest authreq;
- struct hfa384x_PSUserCount psusercnt;
- struct hfa384x_KeyIDChanged keyidchanged;
-} __packed;
-
-struct hfa384x_InfFrame {
+ struct hfa384x_comm_tallies_16 commtallies16;
+ struct hfa384x_comm_tallies_32 commtallies32;
+ struct hfa384x_scan_result scanresult;
+ struct hfa384x_ch_info_result chinforesult;
+ struct hfa384x_hscan_result hscanresult;
+ struct hfa384x_link_status linkstatus;
+ struct hfa384x_assoc_status assocstatus;
+ struct hfa384x_auth_request authreq;
+ struct hfa384x_ps_user_count psusercnt;
+ struct hfa384x_key_id_changed keyidchanged;
+} __packed;
+
+struct hfa384x_inf_frame {
u16 framelen;
u16 infotype;
union hfa384x_infodata info;
@@ -862,7 +849,7 @@ struct hfa384x_usb_rxfrm {
struct hfa384x_usb_infofrm {
u16 type;
- struct hfa384x_InfFrame info;
+ struct hfa384x_inf_frame info;
} __packed;
struct hfa384x_usb_statusresp {
@@ -1169,7 +1156,6 @@ enum ctlx_state {
CTLX_REQ_COMPLETE, /* OUT URB complete */
CTLX_RESP_COMPLETE /* IN URB received */
};
-typedef enum ctlx_state CTLX_STATE;
struct hfa384x_usbctlx;
struct hfa384x;
@@ -1186,7 +1172,7 @@ struct hfa384x_usbctlx {
union hfa384x_usbout outbuf; /* pkt buf for OUT */
union hfa384x_usbin inbuf; /* pkt buf for IN(a copy) */
- CTLX_STATE state; /* Tracks running state */
+ enum ctlx_state state; /* Tracks running state */
struct completion done;
volatile int reapable; /* Food for the reaper task */
@@ -1294,7 +1280,7 @@ struct hfa384x {
int scanflag; /* to signal scan complete */
int join_ap; /* are we joined to a specific ap */
int join_retries; /* number of join retries till we fail */
- struct hfa384x_JoinRequest_data joinreq; /* join request saved data */
+ struct hfa384x_join_request_data joinreq;/* join request saved data */
struct wlandevice *wlandev;
/* Timer to allow for the deferred processing of linkstatus messages */
@@ -1360,17 +1346,17 @@ struct hfa384x {
struct hfa384x_caplevel cap_act_ap_mfi; /* ap f/w to modem interface */
u32 psusercount; /* Power save user count. */
- struct hfa384x_CommTallies32 tallies; /* Communication tallies. */
+ struct hfa384x_comm_tallies_32 tallies; /* Communication tallies. */
u8 comment[WLAN_COMMENT_MAX + 1]; /* User comment */
/* Channel Info request results (AP only) */
struct {
atomic_t done;
u8 count;
- struct hfa384x_ChInfoResult results;
+ struct hfa384x_ch_info_result results;
} channel_info;
- struct hfa384x_InfFrame *scanresults;
+ struct hfa384x_inf_frame *scanresults;
struct prism2sta_authlist authlist; /* Authenticated station list. */
unsigned int accessmode; /* Access mode. */
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 6a107f8a06e2..4fe037aeef12 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -1,114 +1,114 @@
/* src/prism2/driver/hfa384x_usb.c
-*
-* Functions that talk to the USB variantof the Intersil hfa384x MAC
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file implements functions that correspond to the prism2/hfa384x
-* 802.11 MAC hardware and firmware host interface.
-*
-* The functions can be considered to represent several levels of
-* abstraction. The lowest level functions are simply C-callable wrappers
-* around the register accesses. The next higher level represents C-callable
-* prism2 API functions that match the Intersil documentation as closely
-* as is reasonable. The next higher layer implements common sequences
-* of invocations of the API layer (e.g. write to bap, followed by cmd).
-*
-* Common sequences:
-* hfa384x_drvr_xxx Highest level abstractions provided by the
-* hfa384x code. They are driver defined wrappers
-* for common sequences. These functions generally
-* use the services of the lower levels.
-*
-* hfa384x_drvr_xxxconfig An example of the drvr level abstraction. These
-* functions are wrappers for the RID get/set
-* sequence. They call copy_[to|from]_bap() and
-* cmd_access(). These functions operate on the
-* RIDs and buffers without validation. The caller
-* is responsible for that.
-*
-* API wrapper functions:
-* hfa384x_cmd_xxx functions that provide access to the f/w commands.
-* The function arguments correspond to each command
-* argument, even command arguments that get packed
-* into single registers. These functions _just_
-* issue the command by setting the cmd/parm regs
-* & reading the status/resp regs. Additional
-* activities required to fully use a command
-* (read/write from/to bap, get/set int status etc.)
-* are implemented separately. Think of these as
-* C-callable prism2 commands.
-*
-* Lowest Layer Functions:
-* hfa384x_docmd_xxx These functions implement the sequence required
-* to issue any prism2 command. Primarily used by the
-* hfa384x_cmd_xxx functions.
-*
-* hfa384x_bap_xxx BAP read/write access functions.
-* Note: we usually use BAP0 for non-interrupt context
-* and BAP1 for interrupt context.
-*
-* hfa384x_dl_xxx download related functions.
-*
-* Driver State Issues:
-* Note that there are two pairs of functions that manage the
-* 'initialized' and 'running' states of the hw/MAC combo. The four
-* functions are create(), destroy(), start(), and stop(). create()
-* sets up the data structures required to support the hfa384x_*
-* functions and destroy() cleans them up. The start() function gets
-* the actual hardware running and enables the interrupts. The stop()
-* function shuts the hardware down. The sequence should be:
-* create()
-* start()
-* .
-* . Do interesting things w/ the hardware
-* .
-* stop()
-* destroy()
-*
-* Note that destroy() can be called without calling stop() first.
-* --------------------------------------------------------------------
-*/
+ *
+ * Functions that talk to the USB variantof the Intersil hfa384x MAC
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file implements functions that correspond to the prism2/hfa384x
+ * 802.11 MAC hardware and firmware host interface.
+ *
+ * The functions can be considered to represent several levels of
+ * abstraction. The lowest level functions are simply C-callable wrappers
+ * around the register accesses. The next higher level represents C-callable
+ * prism2 API functions that match the Intersil documentation as closely
+ * as is reasonable. The next higher layer implements common sequences
+ * of invocations of the API layer (e.g. write to bap, followed by cmd).
+ *
+ * Common sequences:
+ * hfa384x_drvr_xxx Highest level abstractions provided by the
+ * hfa384x code. They are driver defined wrappers
+ * for common sequences. These functions generally
+ * use the services of the lower levels.
+ *
+ * hfa384x_drvr_xxxconfig An example of the drvr level abstraction. These
+ * functions are wrappers for the RID get/set
+ * sequence. They call copy_[to|from]_bap() and
+ * cmd_access(). These functions operate on the
+ * RIDs and buffers without validation. The caller
+ * is responsible for that.
+ *
+ * API wrapper functions:
+ * hfa384x_cmd_xxx functions that provide access to the f/w commands.
+ * The function arguments correspond to each command
+ * argument, even command arguments that get packed
+ * into single registers. These functions _just_
+ * issue the command by setting the cmd/parm regs
+ * & reading the status/resp regs. Additional
+ * activities required to fully use a command
+ * (read/write from/to bap, get/set int status etc.)
+ * are implemented separately. Think of these as
+ * C-callable prism2 commands.
+ *
+ * Lowest Layer Functions:
+ * hfa384x_docmd_xxx These functions implement the sequence required
+ * to issue any prism2 command. Primarily used by the
+ * hfa384x_cmd_xxx functions.
+ *
+ * hfa384x_bap_xxx BAP read/write access functions.
+ * Note: we usually use BAP0 for non-interrupt context
+ * and BAP1 for interrupt context.
+ *
+ * hfa384x_dl_xxx download related functions.
+ *
+ * Driver State Issues:
+ * Note that there are two pairs of functions that manage the
+ * 'initialized' and 'running' states of the hw/MAC combo. The four
+ * functions are create(), destroy(), start(), and stop(). create()
+ * sets up the data structures required to support the hfa384x_*
+ * functions and destroy() cleans them up. The start() function gets
+ * the actual hardware running and enables the interrupts. The stop()
+ * function shuts the hardware down. The sequence should be:
+ * create()
+ * start()
+ * .
+ * . Do interesting things w/ the hardware
+ * .
+ * stop()
+ * destroy()
+ *
+ * Note that destroy() can be called without calling stop() first.
+ * --------------------------------------------------------------------
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -153,8 +153,8 @@ enum cmd_mode {
static void dbprint_urb(struct urb *urb);
#endif
-static void
-hfa384x_int_rxmonitor(struct wlandevice *wlandev, struct hfa384x_usb_rxfrm *rxfrm);
+static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
+ struct hfa384x_usb_rxfrm *rxfrm);
static void hfa384x_usb_defer(struct work_struct *data);
@@ -173,7 +173,8 @@ hfa384x_usbin_txcompl(struct wlandevice *wlandev, union hfa384x_usbin *usbin);
static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb);
-static void hfa384x_usbin_info(struct wlandevice *wlandev, union hfa384x_usbin *usbin);
+static void hfa384x_usbin_info(struct wlandevice *wlandev,
+ union hfa384x_usbin *usbin);
static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin,
int urb_status);
@@ -193,9 +194,11 @@ static void hfa384x_usbctlx_completion_task(unsigned long data);
static void hfa384x_usbctlx_reaper_task(unsigned long data);
-static int hfa384x_usbctlx_submit(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx);
+static int hfa384x_usbctlx_submit(struct hfa384x *hw,
+ struct hfa384x_usbctlx *ctlx);
-static void unlocked_usbctlx_complete(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx);
+static void unlocked_usbctlx_complete(struct hfa384x *hw,
+ struct hfa384x_usbctlx *ctlx);
struct usbctlx_completor {
int (*complete)(struct usbctlx_completor *);
@@ -209,7 +212,8 @@ hfa384x_usbctlx_complete_sync(struct hfa384x *hw,
static int
unlocked_usbctlx_cancel_async(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx);
-static void hfa384x_cb_status(struct hfa384x *hw, const struct hfa384x_usbctlx *ctlx);
+static void hfa384x_cb_status(struct hfa384x *hw,
+ const struct hfa384x_usbctlx *ctlx);
static int
usbctlx_get_status(const struct hfa384x_usb_statusresp *cmdresp,
@@ -263,7 +267,7 @@ hfa384x_dowmem(struct hfa384x *hw,
static int hfa384x_isgood_pdrcode(u16 pdrcode);
-static inline const char *ctlxstr(CTLX_STATE s)
+static inline const char *ctlxstr(enum ctlx_state s)
{
static const char * const ctlx_str[] = {
"Initial state",
@@ -307,21 +311,22 @@ void dbprint_urb(struct urb *urb)
#endif
/*----------------------------------------------------------------
-* submit_rx_urb
-*
-* Listen for input data on the BULK-IN pipe. If the pipe has
-* stalled then schedule it to be reset.
-*
-* Arguments:
-* hw device struct
-* memflags memory allocation flags
-*
-* Returns:
-* error code from submission
-*
-* Call context:
-* Any
-----------------------------------------------------------------*/
+ * submit_rx_urb
+ *
+ * Listen for input data on the BULK-IN pipe. If the pipe has
+ * stalled then schedule it to be reset.
+ *
+ * Arguments:
+ * hw device struct
+ * memflags memory allocation flags
+ *
+ * Returns:
+ * error code from submission
+ *
+ * Call context:
+ * Any
+ *----------------------------------------------------------------
+ */
static int submit_rx_urb(struct hfa384x *hw, gfp_t memflags)
{
struct sk_buff *skb;
@@ -367,23 +372,24 @@ done:
}
/*----------------------------------------------------------------
-* submit_tx_urb
-*
-* Prepares and submits the URB of transmitted data. If the
-* submission fails then it will schedule the output pipe to
-* be reset.
-*
-* Arguments:
-* hw device struct
-* tx_urb URB of data for transmission
-* memflags memory allocation flags
-*
-* Returns:
-* error code from submission
-*
-* Call context:
-* Any
-----------------------------------------------------------------*/
+ * submit_tx_urb
+ *
+ * Prepares and submits the URB of transmitted data. If the
+ * submission fails then it will schedule the output pipe to
+ * be reset.
+ *
+ * Arguments:
+ * hw device struct
+ * tx_urb URB of data for transmission
+ * memflags memory allocation flags
+ *
+ * Returns:
+ * error code from submission
+ *
+ * Call context:
+ * Any
+ *----------------------------------------------------------------
+ */
static int submit_tx_urb(struct hfa384x *hw, struct urb *tx_urb, gfp_t memflags)
{
struct net_device *netdev = hw->wlandev->netdev;
@@ -412,21 +418,22 @@ static int submit_tx_urb(struct hfa384x *hw, struct urb *tx_urb, gfp_t memflags)
}
/*----------------------------------------------------------------
-* hfa394x_usb_defer
-*
-* There are some things that the USB stack cannot do while
-* in interrupt context, so we arrange this function to run
-* in process context.
-*
-* Arguments:
-* hw device structure
-*
-* Returns:
-* nothing
-*
-* Call context:
-* process (by design)
-----------------------------------------------------------------*/
+ * hfa394x_usb_defer
+ *
+ * There are some things that the USB stack cannot do while
+ * in interrupt context, so we arrange this function to run
+ * in process context.
+ *
+ * Arguments:
+ * hw device structure
+ *
+ * Returns:
+ * nothing
+ *
+ * Call context:
+ * process (by design)
+ *----------------------------------------------------------------
+ */
static void hfa384x_usb_defer(struct work_struct *data)
{
struct hfa384x *hw = container_of(data, struct hfa384x, usb_work);
@@ -501,29 +508,30 @@ static void hfa384x_usb_defer(struct work_struct *data)
}
/*----------------------------------------------------------------
-* hfa384x_create
-*
-* Sets up the struct hfa384x data structure for use. Note this
-* does _not_ initialize the actual hardware, just the data structures
-* we use to keep track of its state.
-*
-* Arguments:
-* hw device structure
-* irq device irq number
-* iobase i/o base address for register access
-* membase memory base address for register access
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_create
+ *
+ * Sets up the struct hfa384x data structure for use. Note this
+ * does _not_ initialize the actual hardware, just the data structures
+ * we use to keep track of its state.
+ *
+ * Arguments:
+ * hw device structure
+ * irq device irq number
+ * iobase i/o base address for register access
+ * membase memory base address for register access
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
{
- memset(hw, 0, sizeof(struct hfa384x));
+ memset(hw, 0, sizeof(*hw));
hw->usb = usb;
/* set up the endpoints */
@@ -571,27 +579,28 @@ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
}
/*----------------------------------------------------------------
-* hfa384x_destroy
-*
-* Partner to hfa384x_create(). This function cleans up the hw
-* structure so that it can be freed by the caller using a simple
-* kfree. Currently, this function is just a placeholder. If, at some
-* point in the future, an hw in the 'shutdown' state requires a 'deep'
-* kfree, this is where it should be done. Note that if this function
-* is called on a _running_ hw structure, the drvr_stop() function is
-* called.
-*
-* Arguments:
-* hw device structure
-*
-* Returns:
-* nothing, this function is not allowed to fail.
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_destroy
+ *
+ * Partner to hfa384x_create(). This function cleans up the hw
+ * structure so that it can be freed by the caller using a simple
+ * kfree. Currently, this function is just a placeholder. If, at some
+ * point in the future, an hw in the 'shutdown' state requires a 'deep'
+ * kfree, this is where it should be done. Note that if this function
+ * is called on a _running_ hw structure, the drvr_stop() function is
+ * called.
+ *
+ * Arguments:
+ * hw device structure
+ *
+ * Returns:
+ * nothing, this function is not allowed to fail.
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
void hfa384x_destroy(struct hfa384x *hw)
{
struct sk_buff *skb;
@@ -645,10 +654,11 @@ usbctlx_get_rridresult(const struct hfa384x_usb_rridresp *rridresp,
}
/*----------------------------------------------------------------
-* Completor object:
-* This completor must be passed to hfa384x_usbctlx_complete_sync()
-* when processing a CTLX that returns a struct hfa384x_cmdresult structure.
-----------------------------------------------------------------*/
+ * Completor object:
+ * This completor must be passed to hfa384x_usbctlx_complete_sync()
+ * when processing a CTLX that returns a struct hfa384x_cmdresult structure.
+ *----------------------------------------------------------------
+ */
struct usbctlx_cmd_completor {
struct usbctlx_completor head;
@@ -664,24 +674,23 @@ static inline int usbctlx_cmd_completor_fn(struct usbctlx_completor *head)
return usbctlx_get_status(complete->cmdresp, complete->result);
}
-static inline struct usbctlx_completor *init_cmd_completor(
- struct usbctlx_cmd_completor
- *completor,
- const struct hfa384x_usb_statusresp
- *cmdresp,
- struct hfa384x_cmdresult *result)
+static inline struct usbctlx_completor *
+init_cmd_completor(struct usbctlx_cmd_completor *completor,
+ const struct hfa384x_usb_statusresp *cmdresp,
+ struct hfa384x_cmdresult *result)
{
completor->head.complete = usbctlx_cmd_completor_fn;
completor->cmdresp = cmdresp;
completor->result = result;
- return &(completor->head);
+ return &completor->head;
}
/*----------------------------------------------------------------
-* Completor object:
-* This completor must be passed to hfa384x_usbctlx_complete_sync()
-* when processing a CTLX that reads a RID.
-----------------------------------------------------------------*/
+ * Completor object:
+ * This completor must be passed to hfa384x_usbctlx_complete_sync()
+ * when processing a CTLX that reads a RID.
+ *----------------------------------------------------------------
+ */
struct usbctlx_rrid_completor {
struct usbctlx_completor head;
@@ -710,37 +719,38 @@ static int usbctlx_rrid_completor_fn(struct usbctlx_completor *head)
return 0;
}
-static inline struct usbctlx_completor *init_rrid_completor(
- struct usbctlx_rrid_completor
- *completor,
- const struct hfa384x_usb_rridresp
- *rridresp,
- void *riddata,
- unsigned int riddatalen)
+static inline struct usbctlx_completor *
+init_rrid_completor(struct usbctlx_rrid_completor *completor,
+ const struct hfa384x_usb_rridresp *rridresp,
+ void *riddata,
+ unsigned int riddatalen)
{
completor->head.complete = usbctlx_rrid_completor_fn;
completor->rridresp = rridresp;
completor->riddata = riddata;
completor->riddatalen = riddatalen;
- return &(completor->head);
+ return &completor->head;
}
/*----------------------------------------------------------------
-* Completor object:
-* Interprets the results of a synchronous RID-write
-----------------------------------------------------------------*/
+ * Completor object:
+ * Interprets the results of a synchronous RID-write
+ *----------------------------------------------------------------
+ */
#define init_wrid_completor init_cmd_completor
/*----------------------------------------------------------------
-* Completor object:
-* Interprets the results of a synchronous memory-write
-----------------------------------------------------------------*/
+ * Completor object:
+ * Interprets the results of a synchronous memory-write
+ *----------------------------------------------------------------
+ */
#define init_wmem_completor init_cmd_completor
/*----------------------------------------------------------------
-* Completor object:
-* Interprets the results of a synchronous memory-read
-----------------------------------------------------------------*/
+ * Completor object:
+ * Interprets the results of a synchronous memory-read
+ *----------------------------------------------------------------
+ */
struct usbctlx_rmem_completor {
struct usbctlx_completor head;
@@ -759,43 +769,43 @@ static int usbctlx_rmem_completor_fn(struct usbctlx_completor *head)
return 0;
}
-static inline struct usbctlx_completor *init_rmem_completor(
- struct usbctlx_rmem_completor
- *completor,
- struct hfa384x_usb_rmemresp
- *rmemresp,
- void *data,
- unsigned int len)
+static inline struct usbctlx_completor *
+init_rmem_completor(struct usbctlx_rmem_completor *completor,
+ struct hfa384x_usb_rmemresp *rmemresp,
+ void *data,
+ unsigned int len)
{
completor->head.complete = usbctlx_rmem_completor_fn;
completor->rmemresp = rmemresp;
completor->data = data;
completor->len = len;
- return &(completor->head);
+ return &completor->head;
}
/*----------------------------------------------------------------
-* hfa384x_cb_status
-*
-* Ctlx_complete handler for async CMD type control exchanges.
-* mark the hw struct as such.
-*
-* Note: If the handling is changed here, it should probably be
-* changed in docmd as well.
-*
-* Arguments:
-* hw hw struct
-* ctlx completed CTLX
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
-static void hfa384x_cb_status(struct hfa384x *hw, const struct hfa384x_usbctlx *ctlx)
+ * hfa384x_cb_status
+ *
+ * Ctlx_complete handler for async CMD type control exchanges.
+ * mark the hw struct as such.
+ *
+ * Note: If the handling is changed here, it should probably be
+ * changed in docmd as well.
+ *
+ * Arguments:
+ * hw hw struct
+ * ctlx completed CTLX
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
+static void hfa384x_cb_status(struct hfa384x *hw,
+ const struct hfa384x_usbctlx *ctlx)
{
if (ctlx->usercb) {
struct hfa384x_cmdresult cmdresult;
@@ -812,7 +822,8 @@ static void hfa384x_cb_status(struct hfa384x *hw, const struct hfa384x_usbctlx *
}
}
-static inline int hfa384x_docmd_wait(struct hfa384x *hw, struct hfa384x_metacmd *cmd)
+static inline int hfa384x_docmd_wait(struct hfa384x *hw,
+ struct hfa384x_metacmd *cmd)
{
return hfa384x_docmd(hw, DOWAIT, cmd, NULL, NULL, NULL);
}
@@ -905,24 +916,25 @@ hfa384x_dowmem_async(struct hfa384x *hw,
}
/*----------------------------------------------------------------
-* hfa384x_cmd_initialize
-*
-* Issues the initialize command and sets the hw->state based
-* on the result.
-*
-* Arguments:
-* hw device structure
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_cmd_initialize
+ *
+ * Issues the initialize command and sets the hw->state based
+ * on the result.
+ *
+ * Arguments:
+ * hw device structure
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_cmd_initialize(struct hfa384x *hw)
{
int result = 0;
@@ -950,25 +962,26 @@ int hfa384x_cmd_initialize(struct hfa384x *hw)
}
/*----------------------------------------------------------------
-* hfa384x_cmd_disable
-*
-* Issues the disable command to stop communications on one of
-* the MACs 'ports'.
-*
-* Arguments:
-* hw device structure
-* macport MAC port number (host order)
-*
-* Returns:
-* 0 success
-* >0 f/w reported failure - f/w status code
-* <0 driver reported error (timeout|bad arg)
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_cmd_disable
+ *
+ * Issues the disable command to stop communications on one of
+ * the MACs 'ports'.
+ *
+ * Arguments:
+ * hw device structure
+ * macport MAC port number (host order)
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported failure - f/w status code
+ * <0 driver reported error (timeout|bad arg)
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_cmd_disable(struct hfa384x *hw, u16 macport)
{
struct hfa384x_metacmd cmd;
@@ -983,25 +996,26 @@ int hfa384x_cmd_disable(struct hfa384x *hw, u16 macport)
}
/*----------------------------------------------------------------
-* hfa384x_cmd_enable
-*
-* Issues the enable command to enable communications on one of
-* the MACs 'ports'.
-*
-* Arguments:
-* hw device structure
-* macport MAC port number
-*
-* Returns:
-* 0 success
-* >0 f/w reported failure - f/w status code
-* <0 driver reported error (timeout|bad arg)
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_cmd_enable
+ *
+ * Issues the enable command to enable communications on one of
+ * the MACs 'ports'.
+ *
+ * Arguments:
+ * hw device structure
+ * macport MAC port number
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported failure - f/w status code
+ * <0 driver reported error (timeout|bad arg)
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_cmd_enable(struct hfa384x *hw, u16 macport)
{
struct hfa384x_metacmd cmd;
@@ -1016,34 +1030,35 @@ int hfa384x_cmd_enable(struct hfa384x *hw, u16 macport)
}
/*----------------------------------------------------------------
-* hfa384x_cmd_monitor
-*
-* Enables the 'monitor mode' of the MAC. Here's the description of
-* monitor mode that I've received thus far:
-*
-* "The "monitor mode" of operation is that the MAC passes all
-* frames for which the PLCP checks are correct. All received
-* MPDUs are passed to the host with MAC Port = 7, with a
-* receive status of good, FCS error, or undecryptable. Passing
-* certain MPDUs is a violation of the 802.11 standard, but useful
-* for a debugging tool." Normal communication is not possible
-* while monitor mode is enabled.
-*
-* Arguments:
-* hw device structure
-* enable a code (0x0b|0x0f) that enables/disables
-* monitor mode. (host order)
-*
-* Returns:
-* 0 success
-* >0 f/w reported failure - f/w status code
-* <0 driver reported error (timeout|bad arg)
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_cmd_monitor
+ *
+ * Enables the 'monitor mode' of the MAC. Here's the description of
+ * monitor mode that I've received thus far:
+ *
+ * "The "monitor mode" of operation is that the MAC passes all
+ * frames for which the PLCP checks are correct. All received
+ * MPDUs are passed to the host with MAC Port = 7, with a
+ * receive status of good, FCS error, or undecryptable. Passing
+ * certain MPDUs is a violation of the 802.11 standard, but useful
+ * for a debugging tool." Normal communication is not possible
+ * while monitor mode is enabled.
+ *
+ * Arguments:
+ * hw device structure
+ * enable a code (0x0b|0x0f) that enables/disables
+ * monitor mode. (host order)
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported failure - f/w status code
+ * <0 driver reported error (timeout|bad arg)
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_cmd_monitor(struct hfa384x *hw, u16 enable)
{
struct hfa384x_metacmd cmd;
@@ -1058,43 +1073,44 @@ int hfa384x_cmd_monitor(struct hfa384x *hw, u16 enable)
}
/*----------------------------------------------------------------
-* hfa384x_cmd_download
-*
-* Sets the controls for the MAC controller code/data download
-* process. The arguments set the mode and address associated
-* with a download. Note that the aux registers should be enabled
-* prior to setting one of the download enable modes.
-*
-* Arguments:
-* hw device structure
-* mode 0 - Disable programming and begin code exec
-* 1 - Enable volatile mem programming
-* 2 - Enable non-volatile mem programming
-* 3 - Program non-volatile section from NV download
-* buffer.
-* (host order)
-* lowaddr
-* highaddr For mode 1, sets the high & low order bits of
-* the "destination address". This address will be
-* the execution start address when download is
-* subsequently disabled.
-* For mode 2, sets the high & low order bits of
-* the destination in NV ram.
-* For modes 0 & 3, should be zero. (host order)
-* NOTE: these are CMD format.
-* codelen Length of the data to write in mode 2,
-* zero otherwise. (host order)
-*
-* Returns:
-* 0 success
-* >0 f/w reported failure - f/w status code
-* <0 driver reported error (timeout|bad arg)
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_cmd_download
+ *
+ * Sets the controls for the MAC controller code/data download
+ * process. The arguments set the mode and address associated
+ * with a download. Note that the aux registers should be enabled
+ * prior to setting one of the download enable modes.
+ *
+ * Arguments:
+ * hw device structure
+ * mode 0 - Disable programming and begin code exec
+ * 1 - Enable volatile mem programming
+ * 2 - Enable non-volatile mem programming
+ * 3 - Program non-volatile section from NV download
+ * buffer.
+ * (host order)
+ * lowaddr
+ * highaddr For mode 1, sets the high & low order bits of
+ * the "destination address". This address will be
+ * the execution start address when download is
+ * subsequently disabled.
+ * For mode 2, sets the high & low order bits of
+ * the destination in NV ram.
+ * For modes 0 & 3, should be zero. (host order)
+ * NOTE: these are CMD format.
+ * codelen Length of the data to write in mode 2,
+ * zero otherwise. (host order)
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported failure - f/w status code
+ * <0 driver reported error (timeout|bad arg)
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_cmd_download(struct hfa384x *hw, u16 mode, u16 lowaddr,
u16 highaddr, u16 codelen)
{
@@ -1114,29 +1130,31 @@ int hfa384x_cmd_download(struct hfa384x *hw, u16 mode, u16 lowaddr,
}
/*----------------------------------------------------------------
-* hfa384x_corereset
-*
-* Perform a reset of the hfa38xx MAC core. We assume that the hw
-* structure is in its "created" state. That is, it is initialized
-* with proper values. Note that if a reset is done after the
-* device has been active for awhile, the caller might have to clean
-* up some leftover cruft in the hw structure.
-*
-* Arguments:
-* hw device structure
-* holdtime how long (in ms) to hold the reset
-* settletime how long (in ms) to wait after releasing
-* the reset
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
-int hfa384x_corereset(struct hfa384x *hw, int holdtime, int settletime, int genesis)
+ * hfa384x_corereset
+ *
+ * Perform a reset of the hfa38xx MAC core. We assume that the hw
+ * structure is in its "created" state. That is, it is initialized
+ * with proper values. Note that if a reset is done after the
+ * device has been active for awhile, the caller might have to clean
+ * up some leftover cruft in the hw structure.
+ *
+ * Arguments:
+ * hw device structure
+ * holdtime how long (in ms) to hold the reset
+ * settletime how long (in ms) to wait after releasing
+ * the reset
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
+int hfa384x_corereset(struct hfa384x *hw, int holdtime,
+ int settletime, int genesis)
{
int result;
@@ -1150,29 +1168,30 @@ int hfa384x_corereset(struct hfa384x *hw, int holdtime, int settletime, int gene
}
/*----------------------------------------------------------------
-* hfa384x_usbctlx_complete_sync
-*
-* Waits for a synchronous CTLX object to complete,
-* and then handles the response.
-*
-* Arguments:
-* hw device structure
-* ctlx CTLX ptr
-* completor functor object to decide what to
-* do with the CTLX's result.
-*
-* Returns:
-* 0 Success
-* -ERESTARTSYS Interrupted by a signal
-* -EIO CTLX failed
-* -ENODEV Adapter was unplugged
-* ??? Result from completor
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_usbctlx_complete_sync
+ *
+ * Waits for a synchronous CTLX object to complete,
+ * and then handles the response.
+ *
+ * Arguments:
+ * hw device structure
+ * ctlx CTLX ptr
+ * completor functor object to decide what to
+ * do with the CTLX's result.
+ *
+ * Returns:
+ * 0 Success
+ * -ERESTARTSYS Interrupted by a signal
+ * -EIO CTLX failed
+ * -ENODEV Adapter was unplugged
+ * ??? Result from completor
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
static int hfa384x_usbctlx_complete_sync(struct hfa384x *hw,
struct hfa384x_usbctlx *ctlx,
struct usbctlx_completor *completor)
@@ -1257,37 +1276,38 @@ cleanup:
}
/*----------------------------------------------------------------
-* hfa384x_docmd
-*
-* Constructs a command CTLX and submits it.
-*
-* NOTE: Any changes to the 'post-submit' code in this function
-* need to be carried over to hfa384x_cbcmd() since the handling
-* is virtually identical.
-*
-* Arguments:
-* hw device structure
-* mode DOWAIT or DOASYNC
-* cmd cmd structure. Includes all arguments and result
-* data points. All in host order. in host order
-* cmdcb command-specific callback
-* usercb user callback for async calls, NULL for DOWAIT calls
-* usercb_data user supplied data pointer for async calls, NULL
-* for DOASYNC calls
-*
-* Returns:
-* 0 success
-* -EIO CTLX failure
-* -ERESTARTSYS Awakened on signal
-* >0 command indicated error, Status and Resp0-2 are
-* in hw structure.
-*
-* Side effects:
-*
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_docmd
+ *
+ * Constructs a command CTLX and submits it.
+ *
+ * NOTE: Any changes to the 'post-submit' code in this function
+ * need to be carried over to hfa384x_cbcmd() since the handling
+ * is virtually identical.
+ *
+ * Arguments:
+ * hw device structure
+ * mode DOWAIT or DOASYNC
+ * cmd cmd structure. Includes all arguments and result
+ * data points. All in host order. in host order
+ * cmdcb command-specific callback
+ * usercb user callback for async calls, NULL for DOWAIT calls
+ * usercb_data user supplied data pointer for async calls, NULL
+ * for DOASYNC calls
+ *
+ * Returns:
+ * 0 success
+ * -EIO CTLX failure
+ * -ERESTARTSYS Awakened on signal
+ * >0 command indicated error, Status and Resp0-2 are
+ * in hw structure.
+ *
+ * Side effects:
+ *
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
static int
hfa384x_docmd(struct hfa384x *hw,
enum cmd_mode mode,
@@ -1341,41 +1361,42 @@ done:
}
/*----------------------------------------------------------------
-* hfa384x_dorrid
-*
-* Constructs a read rid CTLX and issues it.
-*
-* NOTE: Any changes to the 'post-submit' code in this function
-* need to be carried over to hfa384x_cbrrid() since the handling
-* is virtually identical.
-*
-* Arguments:
-* hw device structure
-* mode DOWAIT or DOASYNC
-* rid Read RID number (host order)
-* riddata Caller supplied buffer that MAC formatted RID.data
-* record will be written to for DOWAIT calls. Should
-* be NULL for DOASYNC calls.
-* riddatalen Buffer length for DOWAIT calls. Zero for DOASYNC calls.
-* cmdcb command callback for async calls, NULL for DOWAIT calls
-* usercb user callback for async calls, NULL for DOWAIT calls
-* usercb_data user supplied data pointer for async calls, NULL
-* for DOWAIT calls
-*
-* Returns:
-* 0 success
-* -EIO CTLX failure
-* -ERESTARTSYS Awakened on signal
-* -ENODATA riddatalen != macdatalen
-* >0 command indicated error, Status and Resp0-2 are
-* in hw structure.
-*
-* Side effects:
-*
-* Call context:
-* interrupt (DOASYNC)
-* process (DOWAIT or DOASYNC)
-----------------------------------------------------------------*/
+ * hfa384x_dorrid
+ *
+ * Constructs a read rid CTLX and issues it.
+ *
+ * NOTE: Any changes to the 'post-submit' code in this function
+ * need to be carried over to hfa384x_cbrrid() since the handling
+ * is virtually identical.
+ *
+ * Arguments:
+ * hw device structure
+ * mode DOWAIT or DOASYNC
+ * rid Read RID number (host order)
+ * riddata Caller supplied buffer that MAC formatted RID.data
+ * record will be written to for DOWAIT calls. Should
+ * be NULL for DOASYNC calls.
+ * riddatalen Buffer length for DOWAIT calls. Zero for DOASYNC calls.
+ * cmdcb command callback for async calls, NULL for DOWAIT calls
+ * usercb user callback for async calls, NULL for DOWAIT calls
+ * usercb_data user supplied data pointer for async calls, NULL
+ * for DOWAIT calls
+ *
+ * Returns:
+ * 0 success
+ * -EIO CTLX failure
+ * -ERESTARTSYS Awakened on signal
+ * -ENODATA riddatalen != macdatalen
+ * >0 command indicated error, Status and Resp0-2 are
+ * in hw structure.
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt (DOASYNC)
+ * process (DOWAIT or DOASYNC)
+ *----------------------------------------------------------------
+ */
static int
hfa384x_dorrid(struct hfa384x *hw,
enum cmd_mode mode,
@@ -1426,37 +1447,38 @@ done:
}
/*----------------------------------------------------------------
-* hfa384x_dowrid
-*
-* Constructs a write rid CTLX and issues it.
-*
-* NOTE: Any changes to the 'post-submit' code in this function
-* need to be carried over to hfa384x_cbwrid() since the handling
-* is virtually identical.
-*
-* Arguments:
-* hw device structure
-* enum cmd_mode DOWAIT or DOASYNC
-* rid RID code
-* riddata Data portion of RID formatted for MAC
-* riddatalen Length of the data portion in bytes
-* cmdcb command callback for async calls, NULL for DOWAIT calls
-* usercb user callback for async calls, NULL for DOWAIT calls
-* usercb_data user supplied data pointer for async calls
-*
-* Returns:
-* 0 success
-* -ETIMEDOUT timed out waiting for register ready or
-* command completion
-* >0 command indicated error, Status and Resp0-2 are
-* in hw structure.
-*
-* Side effects:
-*
-* Call context:
-* interrupt (DOASYNC)
-* process (DOWAIT or DOASYNC)
-----------------------------------------------------------------*/
+ * hfa384x_dowrid
+ *
+ * Constructs a write rid CTLX and issues it.
+ *
+ * NOTE: Any changes to the 'post-submit' code in this function
+ * need to be carried over to hfa384x_cbwrid() since the handling
+ * is virtually identical.
+ *
+ * Arguments:
+ * hw device structure
+ * enum cmd_mode DOWAIT or DOASYNC
+ * rid RID code
+ * riddata Data portion of RID formatted for MAC
+ * riddatalen Length of the data portion in bytes
+ * cmdcb command callback for async calls, NULL for DOWAIT calls
+ * usercb user callback for async calls, NULL for DOWAIT calls
+ * usercb_data user supplied data pointer for async calls
+ *
+ * Returns:
+ * 0 success
+ * -ETIMEDOUT timed out waiting for register ready or
+ * command completion
+ * >0 command indicated error, Status and Resp0-2 are
+ * in hw structure.
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt (DOASYNC)
+ * process (DOWAIT or DOASYNC)
+ *----------------------------------------------------------------
+ */
static int
hfa384x_dowrid(struct hfa384x *hw,
enum cmd_mode mode,
@@ -1512,38 +1534,39 @@ done:
}
/*----------------------------------------------------------------
-* hfa384x_dormem
-*
-* Constructs a readmem CTLX and issues it.
-*
-* NOTE: Any changes to the 'post-submit' code in this function
-* need to be carried over to hfa384x_cbrmem() since the handling
-* is virtually identical.
-*
-* Arguments:
-* hw device structure
-* mode DOWAIT or DOASYNC
-* page MAC address space page (CMD format)
-* offset MAC address space offset
-* data Ptr to data buffer to receive read
-* len Length of the data to read (max == 2048)
-* cmdcb command callback for async calls, NULL for DOWAIT calls
-* usercb user callback for async calls, NULL for DOWAIT calls
-* usercb_data user supplied data pointer for async calls
-*
-* Returns:
-* 0 success
-* -ETIMEDOUT timed out waiting for register ready or
-* command completion
-* >0 command indicated error, Status and Resp0-2 are
-* in hw structure.
-*
-* Side effects:
-*
-* Call context:
-* interrupt (DOASYNC)
-* process (DOWAIT or DOASYNC)
-----------------------------------------------------------------*/
+ * hfa384x_dormem
+ *
+ * Constructs a readmem CTLX and issues it.
+ *
+ * NOTE: Any changes to the 'post-submit' code in this function
+ * need to be carried over to hfa384x_cbrmem() since the handling
+ * is virtually identical.
+ *
+ * Arguments:
+ * hw device structure
+ * mode DOWAIT or DOASYNC
+ * page MAC address space page (CMD format)
+ * offset MAC address space offset
+ * data Ptr to data buffer to receive read
+ * len Length of the data to read (max == 2048)
+ * cmdcb command callback for async calls, NULL for DOWAIT calls
+ * usercb user callback for async calls, NULL for DOWAIT calls
+ * usercb_data user supplied data pointer for async calls
+ *
+ * Returns:
+ * 0 success
+ * -ETIMEDOUT timed out waiting for register ready or
+ * command completion
+ * >0 command indicated error, Status and Resp0-2 are
+ * in hw structure.
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt (DOASYNC)
+ * process (DOWAIT or DOASYNC)
+ *----------------------------------------------------------------
+ */
static int
hfa384x_dormem(struct hfa384x *hw,
enum cmd_mode mode,
@@ -1603,38 +1626,39 @@ done:
}
/*----------------------------------------------------------------
-* hfa384x_dowmem
-*
-* Constructs a writemem CTLX and issues it.
-*
-* NOTE: Any changes to the 'post-submit' code in this function
-* need to be carried over to hfa384x_cbwmem() since the handling
-* is virtually identical.
-*
-* Arguments:
-* hw device structure
-* mode DOWAIT or DOASYNC
-* page MAC address space page (CMD format)
-* offset MAC address space offset
-* data Ptr to data buffer containing write data
-* len Length of the data to read (max == 2048)
-* cmdcb command callback for async calls, NULL for DOWAIT calls
-* usercb user callback for async calls, NULL for DOWAIT calls
-* usercb_data user supplied data pointer for async calls.
-*
-* Returns:
-* 0 success
-* -ETIMEDOUT timed out waiting for register ready or
-* command completion
-* >0 command indicated error, Status and Resp0-2 are
-* in hw structure.
-*
-* Side effects:
-*
-* Call context:
-* interrupt (DOWAIT)
-* process (DOWAIT or DOASYNC)
-----------------------------------------------------------------*/
+ * hfa384x_dowmem
+ *
+ * Constructs a writemem CTLX and issues it.
+ *
+ * NOTE: Any changes to the 'post-submit' code in this function
+ * need to be carried over to hfa384x_cbwmem() since the handling
+ * is virtually identical.
+ *
+ * Arguments:
+ * hw device structure
+ * mode DOWAIT or DOASYNC
+ * page MAC address space page (CMD format)
+ * offset MAC address space offset
+ * data Ptr to data buffer containing write data
+ * len Length of the data to read (max == 2048)
+ * cmdcb command callback for async calls, NULL for DOWAIT calls
+ * usercb user callback for async calls, NULL for DOWAIT calls
+ * usercb_data user supplied data pointer for async calls.
+ *
+ * Returns:
+ * 0 success
+ * -ETIMEDOUT timed out waiting for register ready or
+ * command completion
+ * >0 command indicated error, Status and Resp0-2 are
+ * in hw structure.
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt (DOWAIT)
+ * process (DOWAIT or DOASYNC)
+ *----------------------------------------------------------------
+ */
static int
hfa384x_dowmem(struct hfa384x *hw,
enum cmd_mode mode,
@@ -1694,27 +1718,28 @@ done:
}
/*----------------------------------------------------------------
-* hfa384x_drvr_disable
-*
-* Issues the disable command to stop communications on one of
-* the MACs 'ports'. Only macport 0 is valid for stations.
-* APs may also disable macports 1-6. Only ports that have been
-* previously enabled may be disabled.
-*
-* Arguments:
-* hw device structure
-* macport MAC port number (host order)
-*
-* Returns:
-* 0 success
-* >0 f/w reported failure - f/w status code
-* <0 driver reported error (timeout|bad arg)
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_disable
+ *
+ * Issues the disable command to stop communications on one of
+ * the MACs 'ports'. Only macport 0 is valid for stations.
+ * APs may also disable macports 1-6. Only ports that have been
+ * previously enabled may be disabled.
+ *
+ * Arguments:
+ * hw device structure
+ * macport MAC port number (host order)
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported failure - f/w status code
+ * <0 driver reported error (timeout|bad arg)
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_disable(struct hfa384x *hw, u16 macport)
{
int result = 0;
@@ -1732,27 +1757,28 @@ int hfa384x_drvr_disable(struct hfa384x *hw, u16 macport)
}
/*----------------------------------------------------------------
-* hfa384x_drvr_enable
-*
-* Issues the enable command to enable communications on one of
-* the MACs 'ports'. Only macport 0 is valid for stations.
-* APs may also enable macports 1-6. Only ports that are currently
-* disabled may be enabled.
-*
-* Arguments:
-* hw device structure
-* macport MAC port number
-*
-* Returns:
-* 0 success
-* >0 f/w reported failure - f/w status code
-* <0 driver reported error (timeout|bad arg)
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_enable
+ *
+ * Issues the enable command to enable communications on one of
+ * the MACs 'ports'. Only macport 0 is valid for stations.
+ * APs may also enable macports 1-6. Only ports that are currently
+ * disabled may be enabled.
+ *
+ * Arguments:
+ * hw device structure
+ * macport MAC port number
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported failure - f/w status code
+ * <0 driver reported error (timeout|bad arg)
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_enable(struct hfa384x *hw, u16 macport)
{
int result = 0;
@@ -1770,26 +1796,27 @@ int hfa384x_drvr_enable(struct hfa384x *hw, u16 macport)
}
/*----------------------------------------------------------------
-* hfa384x_drvr_flashdl_enable
-*
-* Begins the flash download state. Checks to see that we're not
-* already in a download state and that a port isn't enabled.
-* Sets the download state and retrieves the flash download
-* buffer location, buffer size, and timeout length.
-*
-* Arguments:
-* hw device structure
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_flashdl_enable
+ *
+ * Begins the flash download state. Checks to see that we're not
+ * already in a download state and that a port isn't enabled.
+ * Sets the download state and retrieves the flash download
+ * buffer location, buffer size, and timeout length.
+ *
+ * Arguments:
+ * hw device structure
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
{
int result = 0;
@@ -1809,7 +1836,7 @@ int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
/* Retrieve the buffer loc&size and timeout */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_DOWNLOADBUFFER,
- &(hw->bufinfo), sizeof(hw->bufinfo));
+ &hw->bufinfo, sizeof(hw->bufinfo));
if (result)
return result;
@@ -1817,7 +1844,7 @@ int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
hw->bufinfo.offset = le16_to_cpu(hw->bufinfo.offset);
hw->bufinfo.len = le16_to_cpu(hw->bufinfo.len);
result = hfa384x_drvr_getconfig16(hw, HFA384x_RID_MAXLOADTIME,
- &(hw->dltimeout));
+ &hw->dltimeout);
if (result)
return result;
@@ -1831,24 +1858,25 @@ int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
}
/*----------------------------------------------------------------
-* hfa384x_drvr_flashdl_disable
-*
-* Ends the flash download state. Note that this will cause the MAC
-* firmware to restart.
-*
-* Arguments:
-* hw device structure
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_flashdl_disable
+ *
+ * Ends the flash download state. Note that this will cause the MAC
+ * firmware to restart.
+ *
+ * Arguments:
+ * hw device structure
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_flashdl_disable(struct hfa384x *hw)
{
/* Check that we're already in the download state */
@@ -1866,35 +1894,37 @@ int hfa384x_drvr_flashdl_disable(struct hfa384x *hw)
}
/*----------------------------------------------------------------
-* hfa384x_drvr_flashdl_write
-*
-* Performs a FLASH download of a chunk of data. First checks to see
-* that we're in the FLASH download state, then sets the download
-* mode, uses the aux functions to 1) copy the data to the flash
-* buffer, 2) sets the download 'write flash' mode, 3) readback and
-* compare. Lather rinse, repeat as many times an necessary to get
-* all the given data into flash.
-* When all data has been written using this function (possibly
-* repeatedly), call drvr_flashdl_disable() to end the download state
-* and restart the MAC.
-*
-* Arguments:
-* hw device structure
-* daddr Card address to write to. (host order)
-* buf Ptr to data to write.
-* len Length of data (host order).
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
-int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len)
+ * hfa384x_drvr_flashdl_write
+ *
+ * Performs a FLASH download of a chunk of data. First checks to see
+ * that we're in the FLASH download state, then sets the download
+ * mode, uses the aux functions to 1) copy the data to the flash
+ * buffer, 2) sets the download 'write flash' mode, 3) readback and
+ * compare. Lather rinse, repeat as many times an necessary to get
+ * all the given data into flash.
+ * When all data has been written using this function (possibly
+ * repeatedly), call drvr_flashdl_disable() to end the download state
+ * and restart the MAC.
+ *
+ * Arguments:
+ * hw device structure
+ * daddr Card address to write to. (host order)
+ * buf Ptr to data to write.
+ * len Length of data (host order).
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
+int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr,
+ void *buf, u32 len)
{
int result = 0;
u32 dlbufaddr;
@@ -2008,30 +2038,31 @@ exit_proc:
}
/*----------------------------------------------------------------
-* hfa384x_drvr_getconfig
-*
-* Performs the sequence necessary to read a config/info item.
-*
-* Arguments:
-* hw device structure
-* rid config/info record id (host order)
-* buf host side record buffer. Upon return it will
-* contain the body portion of the record (minus the
-* RID and len).
-* len buffer length (in bytes, should match record length)
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-* -ENODATA length mismatch between argument and retrieved
-* record.
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_getconfig
+ *
+ * Performs the sequence necessary to read a config/info item.
+ *
+ * Arguments:
+ * hw device structure
+ * rid config/info record id (host order)
+ * buf host side record buffer. Upon return it will
+ * contain the body portion of the record (minus the
+ * RID and len).
+ * len buffer length (in bytes, should match record length)
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ * -ENODATA length mismatch between argument and retrieved
+ * record.
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_getconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len)
{
return hfa384x_dorrid_wait(hw, rid, buf, len);
@@ -2059,7 +2090,8 @@ int hfa384x_drvr_getconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len)
*
* Call context:
* process
- ----------------------------------------------------------------*/
+ *----------------------------------------------------------------
+ */
int
hfa384x_drvr_setconfig_async(struct hfa384x *hw,
u16 rid,
@@ -2071,23 +2103,24 @@ hfa384x_drvr_setconfig_async(struct hfa384x *hw,
}
/*----------------------------------------------------------------
-* hfa384x_drvr_ramdl_disable
-*
-* Ends the ram download state.
-*
-* Arguments:
-* hw device structure
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_ramdl_disable
+ *
+ * Ends the ram download state.
+ *
+ * Arguments:
+ * hw device structure
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_ramdl_disable(struct hfa384x *hw)
{
/* Check that we're already in the download state */
@@ -2105,29 +2138,30 @@ int hfa384x_drvr_ramdl_disable(struct hfa384x *hw)
}
/*----------------------------------------------------------------
-* hfa384x_drvr_ramdl_enable
-*
-* Begins the ram download state. Checks to see that we're not
-* already in a download state and that a port isn't enabled.
-* Sets the download state and calls cmd_download with the
-* ENABLE_VOLATILE subcommand and the exeaddr argument.
-*
-* Arguments:
-* hw device structure
-* exeaddr the card execution address that will be
-* jumped to when ramdl_disable() is called
-* (host order).
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_ramdl_enable
+ *
+ * Begins the ram download state. Checks to see that we're not
+ * already in a download state and that a port isn't enabled.
+ * Sets the download state and calls cmd_download with the
+ * ENABLE_VOLATILE subcommand and the exeaddr argument.
+ *
+ * Arguments:
+ * hw device structure
+ * exeaddr the card execution address that will be
+ * jumped to when ramdl_disable() is called
+ * (host order).
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr)
{
int result = 0;
@@ -2146,7 +2180,8 @@ int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr)
/* Check that we're not already in a download state */
if (hw->dlstate != HFA384x_DLSTATE_DISABLED) {
- netdev_err(hw->wlandev->netdev, "Download state not disabled.\n");
+ netdev_err(hw->wlandev->netdev,
+ "Download state not disabled.\n");
return -EINVAL;
}
@@ -2171,31 +2206,32 @@ int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr)
}
/*----------------------------------------------------------------
-* hfa384x_drvr_ramdl_write
-*
-* Performs a RAM download of a chunk of data. First checks to see
-* that we're in the RAM download state, then uses the [read|write]mem USB
-* commands to 1) copy the data, 2) readback and compare. The download
-* state is unaffected. When all data has been written using
-* this function, call drvr_ramdl_disable() to end the download state
-* and restart the MAC.
-*
-* Arguments:
-* hw device structure
-* daddr Card address to write to. (host order)
-* buf Ptr to data to write.
-* len Length of data (host order).
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_ramdl_write
+ *
+ * Performs a RAM download of a chunk of data. First checks to see
+ * that we're in the RAM download state, then uses the [read|write]mem USB
+ * commands to 1) copy the data, 2) readback and compare. The download
+ * state is unaffected. When all data has been written using
+ * this function, call drvr_ramdl_disable() to end the download state
+ * and restart the MAC.
+ *
+ * Arguments:
+ * hw device structure
+ * daddr Card address to write to. (host order)
+ * buf Ptr to data to write.
+ * len Length of data (host order).
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len)
{
int result = 0;
@@ -2246,36 +2282,37 @@ int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len)
}
/*----------------------------------------------------------------
-* hfa384x_drvr_readpda
-*
-* Performs the sequence to read the PDA space. Note there is no
-* drvr_writepda() function. Writing a PDA is
-* generally implemented by a calling component via calls to
-* cmd_download and writing to the flash download buffer via the
-* aux regs.
-*
-* Arguments:
-* hw device structure
-* buf buffer to store PDA in
-* len buffer length
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-* -ETIMEDOUT timeout waiting for the cmd regs to become
-* available, or waiting for the control reg
-* to indicate the Aux port is enabled.
-* -ENODATA the buffer does NOT contain a valid PDA.
-* Either the card PDA is bad, or the auxdata
-* reads are giving us garbage.
-
-*
-* Side effects:
-*
-* Call context:
-* process or non-card interrupt.
-----------------------------------------------------------------*/
+ * hfa384x_drvr_readpda
+ *
+ * Performs the sequence to read the PDA space. Note there is no
+ * drvr_writepda() function. Writing a PDA is
+ * generally implemented by a calling component via calls to
+ * cmd_download and writing to the flash download buffer via the
+ * aux regs.
+ *
+ * Arguments:
+ * hw device structure
+ * buf buffer to store PDA in
+ * len buffer length
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ * -ETIMEDOUT timeout waiting for the cmd regs to become
+ * available, or waiting for the control reg
+ * to indicate the Aux port is enabled.
+ * -ENODATA the buffer does NOT contain a valid PDA.
+ * Either the card PDA is bad, or the auxdata
+ * reads are giving us garbage.
+ *
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process or non-card interrupt.
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len)
{
int result = 0;
@@ -2306,7 +2343,7 @@ int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len)
/* units of bytes */
result = hfa384x_dormem_wait(hw, currpage, curroffset, buf,
- len);
+ len);
if (result) {
netdev_warn(hw->wlandev->netdev,
@@ -2366,51 +2403,52 @@ int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len)
}
/*----------------------------------------------------------------
-* hfa384x_drvr_setconfig
-*
-* Performs the sequence necessary to write a config/info item.
-*
-* Arguments:
-* hw device structure
-* rid config/info record id (in host order)
-* buf host side record buffer
-* len buffer length (in bytes)
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_setconfig
+ *
+ * Performs the sequence necessary to write a config/info item.
+ *
+ * Arguments:
+ * hw device structure
+ * rid config/info record id (in host order)
+ * buf host side record buffer
+ * len buffer length (in bytes)
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_setconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len)
{
return hfa384x_dowrid_wait(hw, rid, buf, len);
}
/*----------------------------------------------------------------
-* hfa384x_drvr_start
-*
-* Issues the MAC initialize command, sets up some data structures,
-* and enables the interrupts. After this function completes, the
-* low-level stuff should be ready for any/all commands.
-*
-* Arguments:
-* hw device structure
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
-
+ * hfa384x_drvr_start
+ *
+ * Issues the MAC initialize command, sets up some data structures,
+ * and enables the interrupts. After this function completes, the
+ * low-level stuff should be ready for any/all commands.
+ *
+ * Arguments:
+ * hw device structure
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_start(struct hfa384x *hw)
{
int result, result1, result2;
@@ -2494,24 +2532,25 @@ done:
}
/*----------------------------------------------------------------
-* hfa384x_drvr_stop
-*
-* Shuts down the MAC to the point where it is safe to unload the
-* driver. Any subsystem that may be holding a data or function
-* ptr into the driver must be cleared/deinitialized.
-*
-* Arguments:
-* hw device structure
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_stop
+ *
+ * Shuts down the MAC to the point where it is safe to unload the
+ * driver. Any subsystem that may be holding a data or function
+ * ptr into the driver must be cleared/deinitialized.
+ *
+ * Arguments:
+ * hw device structure
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_stop(struct hfa384x *hw)
{
int i;
@@ -2542,26 +2581,27 @@ int hfa384x_drvr_stop(struct hfa384x *hw)
}
/*----------------------------------------------------------------
-* hfa384x_drvr_txframe
-*
-* Takes a frame from prism2sta and queues it for transmission.
-*
-* Arguments:
-* hw device structure
-* skb packet buffer struct. Contains an 802.11
-* data frame.
-* p80211_hdr points to the 802.11 header for the packet.
-* Returns:
-* 0 Success and more buffs available
-* 1 Success but no more buffs
-* 2 Allocation failure
-* 4 Buffer full or queue busy
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_drvr_txframe
+ *
+ * Takes a frame from prism2sta and queues it for transmission.
+ *
+ * Arguments:
+ * hw device structure
+ * skb packet buffer struct. Contains an 802.11
+ * data frame.
+ * p80211_hdr points to the 802.11 header for the packet.
+ * Returns:
+ * 0 Success and more buffs available
+ * 1 Success but no more buffs
+ * 2 Allocation failure
+ * 4 Buffer full or queue busy
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb,
union p80211_hdr *p80211_hdr,
struct p80211_metawep *p80211_wep)
@@ -2608,7 +2648,7 @@ int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb,
cpu_to_le16(hw->txbuff.txfrm.desc.tx_control);
/* copy the header over to the txdesc */
- memcpy(&(hw->txbuff.txfrm.desc.frame_control), p80211_hdr,
+ memcpy(&hw->txbuff.txfrm.desc.frame_control, p80211_hdr,
sizeof(union p80211_hdr));
/* if we're using host WEP, increase size by IV+ICV */
@@ -2638,9 +2678,9 @@ int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb,
memcpy(ptr, p80211_wep->icv, sizeof(p80211_wep->icv));
/* Send the USB packet */
- usb_fill_bulk_urb(&(hw->tx_urb), hw->usb,
+ usb_fill_bulk_urb(&hw->tx_urb, hw->usb,
hw->endp_out,
- &(hw->txbuff), ROUNDUP64(usbpktlen),
+ &hw->txbuff, ROUNDUP64(usbpktlen),
hfa384x_usbout_callback, hw->wlandev);
hw->tx_urb.transfer_flags |= USB_QUEUE_BULK;
@@ -2676,18 +2716,19 @@ void hfa384x_tx_timeout(struct wlandevice *wlandev)
}
/*----------------------------------------------------------------
-* hfa384x_usbctlx_reaper_task
-*
-* Tasklet to delete dead CTLX objects
-*
-* Arguments:
-* data ptr to a struct hfa384x
-*
-* Returns:
-*
-* Call context:
-* Interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbctlx_reaper_task
+ *
+ * Tasklet to delete dead CTLX objects
+ *
+ * Arguments:
+ * data ptr to a struct hfa384x
+ *
+ * Returns:
+ *
+ * Call context:
+ * Interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbctlx_reaper_task(unsigned long data)
{
struct hfa384x *hw = (struct hfa384x *)data;
@@ -2708,19 +2749,20 @@ static void hfa384x_usbctlx_reaper_task(unsigned long data)
}
/*----------------------------------------------------------------
-* hfa384x_usbctlx_completion_task
-*
-* Tasklet to call completion handlers for returned CTLXs
-*
-* Arguments:
-* data ptr to struct hfa384x
-*
-* Returns:
-* Nothing
-*
-* Call context:
-* Interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbctlx_completion_task
+ *
+ * Tasklet to call completion handlers for returned CTLXs
+ *
+ * Arguments:
+ * data ptr to struct hfa384x
+ *
+ * Returns:
+ * Nothing
+ *
+ * Call context:
+ * Interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbctlx_completion_task(unsigned long data)
{
struct hfa384x *hw = (struct hfa384x *)data;
@@ -2781,22 +2823,23 @@ static void hfa384x_usbctlx_completion_task(unsigned long data)
}
/*----------------------------------------------------------------
-* unlocked_usbctlx_cancel_async
-*
-* Mark the CTLX dead asynchronously, and ensure that the
-* next command on the queue is run afterwards.
-*
-* Arguments:
-* hw ptr to the struct hfa384x structure
-* ctlx ptr to a CTLX structure
-*
-* Returns:
-* 0 the CTLX's URB is inactive
-* -EINPROGRESS the URB is currently being unlinked
-*
-* Call context:
-* Either process or interrupt, but presumably interrupt
-----------------------------------------------------------------*/
+ * unlocked_usbctlx_cancel_async
+ *
+ * Mark the CTLX dead asynchronously, and ensure that the
+ * next command on the queue is run afterwards.
+ *
+ * Arguments:
+ * hw ptr to the struct hfa384x structure
+ * ctlx ptr to a CTLX structure
+ *
+ * Returns:
+ * 0 the CTLX's URB is inactive
+ * -EINPROGRESS the URB is currently being unlinked
+ *
+ * Call context:
+ * Either process or interrupt, but presumably interrupt
+ *----------------------------------------------------------------
+ */
static int unlocked_usbctlx_cancel_async(struct hfa384x *hw,
struct hfa384x_usbctlx *ctlx)
{
@@ -2826,28 +2869,30 @@ static int unlocked_usbctlx_cancel_async(struct hfa384x *hw,
}
/*----------------------------------------------------------------
-* unlocked_usbctlx_complete
-*
-* A CTLX has completed. It may have been successful, it may not
-* have been. At this point, the CTLX should be quiescent. The URBs
-* aren't active and the timers should have been stopped.
-*
-* The CTLX is migrated to the "completing" queue, and the completing
-* tasklet is scheduled.
-*
-* Arguments:
-* hw ptr to a struct hfa384x structure
-* ctlx ptr to a ctlx structure
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* Either, assume interrupt
-----------------------------------------------------------------*/
-static void unlocked_usbctlx_complete(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx)
+ * unlocked_usbctlx_complete
+ *
+ * A CTLX has completed. It may have been successful, it may not
+ * have been. At this point, the CTLX should be quiescent. The URBs
+ * aren't active and the timers should have been stopped.
+ *
+ * The CTLX is migrated to the "completing" queue, and the completing
+ * tasklet is scheduled.
+ *
+ * Arguments:
+ * hw ptr to a struct hfa384x structure
+ * ctlx ptr to a ctlx structure
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * Either, assume interrupt
+ *----------------------------------------------------------------
+ */
+static void unlocked_usbctlx_complete(struct hfa384x *hw,
+ struct hfa384x_usbctlx *ctlx)
{
/* Timers have been stopped, and ctlx should be in
* a terminal state. Retire it from the "active"
@@ -2871,21 +2916,22 @@ static void unlocked_usbctlx_complete(struct hfa384x *hw, struct hfa384x_usbctlx
}
/*----------------------------------------------------------------
-* hfa384x_usbctlxq_run
-*
-* Checks to see if the head item is running. If not, starts it.
-*
-* Arguments:
-* hw ptr to struct hfa384x
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* any
-----------------------------------------------------------------*/
+ * hfa384x_usbctlxq_run
+ *
+ * Checks to see if the head item is running. If not, starts it.
+ *
+ * Arguments:
+ * hw ptr to struct hfa384x
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * any
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbctlxq_run(struct hfa384x *hw)
{
unsigned long flags;
@@ -2916,9 +2962,9 @@ static void hfa384x_usbctlxq_run(struct hfa384x *hw)
list_move_tail(&head->list, &hw->ctlxq.active);
/* Fill the out packet */
- usb_fill_bulk_urb(&(hw->ctlx_urb), hw->usb,
+ usb_fill_bulk_urb(&hw->ctlx_urb, hw->usb,
hw->endp_out,
- &(head->outbuf), ROUNDUP64(head->outbufsize),
+ &head->outbuf, ROUNDUP64(head->outbufsize),
hfa384x_ctlxout_callback, hw);
hw->ctlx_urb.transfer_flags |= USB_QUEUE_BULK;
@@ -2971,26 +3017,27 @@ unlock:
}
/*----------------------------------------------------------------
-* hfa384x_usbin_callback
-*
-* Callback for URBs on the BULKIN endpoint.
-*
-* Arguments:
-* urb ptr to the completed urb
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbin_callback
+ *
+ * Callback for URBs on the BULKIN endpoint.
+ *
+ * Arguments:
+ * urb ptr to the completed urb
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbin_callback(struct urb *urb)
{
struct wlandevice *wlandev = urb->context;
struct hfa384x *hw;
- union hfa384x_usbin *usbin = (union hfa384x_usbin *)urb->transfer_buffer;
+ union hfa384x_usbin *usbin;
struct sk_buff *skb = NULL;
int result;
int urb_status;
@@ -3010,7 +3057,10 @@ static void hfa384x_usbin_callback(struct urb *urb)
goto exit;
skb = hw->rx_urb_skb;
- BUG_ON(!skb || (skb->data != urb->transfer_buffer));
+ if (!skb || (skb->data != urb->transfer_buffer)) {
+ WARN_ON(1);
+ return;
+ }
hw->rx_urb_skb = NULL;
@@ -3089,6 +3139,7 @@ static void hfa384x_usbin_callback(struct urb *urb)
/* Note: the check of the sw_support field, the type field doesn't
* have bit 12 set like the docs suggest.
*/
+ usbin = (union hfa384x_usbin *)urb->transfer_buffer;
type = le16_to_cpu(usbin->type);
if (HFA384x_USB_ISRXFRM(type)) {
if (action == HANDLE) {
@@ -3147,25 +3198,26 @@ exit:
}
/*----------------------------------------------------------------
-* hfa384x_usbin_ctlx
-*
-* We've received a URB containing a Prism2 "response" message.
-* This message needs to be matched up with a CTLX on the active
-* queue and our state updated accordingly.
-*
-* Arguments:
-* hw ptr to struct hfa384x
-* usbin ptr to USB IN packet
-* urb_status status of this Bulk-In URB
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbin_ctlx
+ *
+ * We've received a URB containing a Prism2 "response" message.
+ * This message needs to be matched up with a CTLX on the active
+ * queue and our state updated accordingly.
+ *
+ * Arguments:
+ * hw ptr to struct hfa384x
+ * usbin ptr to USB IN packet
+ * urb_status status of this Bulk-In URB
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin,
int urb_status)
{
@@ -3269,22 +3321,23 @@ unlock:
}
/*----------------------------------------------------------------
-* hfa384x_usbin_txcompl
-*
-* At this point we have the results of a previous transmit.
-*
-* Arguments:
-* wlandev wlan device
-* usbin ptr to the usb transfer buffer
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbin_txcompl
+ *
+ * At this point we have the results of a previous transmit.
+ *
+ * Arguments:
+ * wlandev wlan device
+ * usbin ptr to the usb transfer buffer
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbin_txcompl(struct wlandevice *wlandev,
union hfa384x_usbin *usbin)
{
@@ -3300,22 +3353,23 @@ static void hfa384x_usbin_txcompl(struct wlandevice *wlandev,
}
/*----------------------------------------------------------------
-* hfa384x_usbin_rx
-*
-* At this point we have a successful received a rx frame packet.
-*
-* Arguments:
-* wlandev wlan device
-* usbin ptr to the usb transfer buffer
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbin_rx
+ *
+ * At this point we have a successful received a rx frame packet.
+ *
+ * Arguments:
+ * wlandev wlan device
+ * usbin ptr to the usb transfer buffer
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
{
union hfa384x_usbin *usbin = (union hfa384x_usbin *)skb->data;
@@ -3396,30 +3450,31 @@ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
}
/*----------------------------------------------------------------
-* hfa384x_int_rxmonitor
-*
-* Helper function for int_rx. Handles monitor frames.
-* Note that this function allocates space for the FCS and sets it
-* to 0xffffffff. The hfa384x doesn't give us the FCS value but the
-* higher layers expect it. 0xffffffff is used as a flag to indicate
-* the FCS is bogus.
-*
-* Arguments:
-* wlandev wlan device structure
-* rxfrm rx descriptor read from card in int_rx
-*
-* Returns:
-* nothing
-*
-* Side effects:
-* Allocates an skb and passes it up via the PF_PACKET interface.
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_int_rxmonitor
+ *
+ * Helper function for int_rx. Handles monitor frames.
+ * Note that this function allocates space for the FCS and sets it
+ * to 0xffffffff. The hfa384x doesn't give us the FCS value but the
+ * higher layers expect it. 0xffffffff is used as a flag to indicate
+ * the FCS is bogus.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * rxfrm rx descriptor read from card in int_rx
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ * Allocates an skb and passes it up via the PF_PACKET interface.
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
struct hfa384x_usb_rxfrm *rxfrm)
{
- struct hfa384x_rx_frame *rxdesc = &(rxfrm->desc);
+ struct hfa384x_rx_frame *rxdesc = &rxfrm->desc;
unsigned int hdrlen = 0;
unsigned int datalen = 0;
unsigned int skblen = 0;
@@ -3474,9 +3529,10 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
}
/* Copy the 802.11 header to the skb
- (ctl frames may be less than a full header) */
+ * (ctl frames may be less than a full header)
+ */
datap = skb_put(skb, hdrlen);
- memcpy(datap, &(rxdesc->frame_control), hdrlen);
+ memcpy(datap, &rxdesc->frame_control, hdrlen);
/* If any, copy the data from the card to the skb */
if (datalen > 0) {
@@ -3501,22 +3557,23 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
}
/*----------------------------------------------------------------
-* hfa384x_usbin_info
-*
-* At this point we have a successful received a Prism2 info frame.
-*
-* Arguments:
-* wlandev wlan device
-* usbin ptr to the usb transfer buffer
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbin_info
+ *
+ * At this point we have a successful received a Prism2 info frame.
+ *
+ * Arguments:
+ * wlandev wlan device
+ * usbin ptr to the usb transfer buffer
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbin_info(struct wlandevice *wlandev,
union hfa384x_usbin *usbin)
{
@@ -3526,21 +3583,22 @@ static void hfa384x_usbin_info(struct wlandevice *wlandev,
}
/*----------------------------------------------------------------
-* hfa384x_usbout_callback
-*
-* Callback for URBs on the BULKOUT endpoint.
-*
-* Arguments:
-* urb ptr to the completed urb
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbout_callback
+ *
+ * Callback for URBs on the BULKOUT endpoint.
+ *
+ * Arguments:
+ * urb ptr to the completed urb
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbout_callback(struct urb *urb)
{
struct wlandevice *wlandev = urb->context;
@@ -3601,21 +3659,22 @@ static void hfa384x_usbout_callback(struct urb *urb)
}
/*----------------------------------------------------------------
-* hfa384x_ctlxout_callback
-*
-* Callback for control data on the BULKOUT endpoint.
-*
-* Arguments:
-* urb ptr to the completed urb
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_ctlxout_callback
+ *
+ * Callback for control data on the BULKOUT endpoint.
+ *
+ * Arguments:
+ * urb ptr to the completed urb
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_ctlxout_callback(struct urb *urb)
{
struct hfa384x *hw = urb->context;
@@ -3730,23 +3789,24 @@ delresp:
}
/*----------------------------------------------------------------
-* hfa384x_usbctlx_reqtimerfn
-*
-* Timer response function for CTLX request timeouts. If this
-* function is called, it means that the callback for the OUT
-* URB containing a Prism2.x XXX_Request was never called.
-*
-* Arguments:
-* data a ptr to the struct hfa384x
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbctlx_reqtimerfn
+ *
+ * Timer response function for CTLX request timeouts. If this
+ * function is called, it means that the callback for the OUT
+ * URB containing a Prism2.x XXX_Request was never called.
+ *
+ * Arguments:
+ * data a ptr to the struct hfa384x
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbctlx_reqtimerfn(unsigned long data)
{
struct hfa384x *hw = (struct hfa384x *)data;
@@ -3788,23 +3848,24 @@ static void hfa384x_usbctlx_reqtimerfn(unsigned long data)
}
/*----------------------------------------------------------------
-* hfa384x_usbctlx_resptimerfn
-*
-* Timer response function for CTLX response timeouts. If this
-* function is called, it means that the callback for the IN
-* URB containing a Prism2.x XXX_Response was never called.
-*
-* Arguments:
-* data a ptr to the struct hfa384x
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbctlx_resptimerfn
+ *
+ * Timer response function for CTLX response timeouts. If this
+ * function is called, it means that the callback for the IN
+ * URB containing a Prism2.x XXX_Response was never called.
+ *
+ * Arguments:
+ * data a ptr to the struct hfa384x
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbctlx_resptimerfn(unsigned long data)
{
struct hfa384x *hw = (struct hfa384x *)data;
@@ -3830,20 +3891,21 @@ static void hfa384x_usbctlx_resptimerfn(unsigned long data)
}
/*----------------------------------------------------------------
-* hfa384x_usb_throttlefn
-*
-*
-* Arguments:
-* data ptr to hw
-*
-* Returns:
-* Nothing
-*
-* Side effects:
-*
-* Call context:
-* Interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usb_throttlefn
+ *
+ *
+ * Arguments:
+ * data ptr to hw
+ *
+ * Returns:
+ * Nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * Interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usb_throttlefn(unsigned long data)
{
struct hfa384x *hw = (struct hfa384x *)data;
@@ -3869,24 +3931,26 @@ static void hfa384x_usb_throttlefn(unsigned long data)
}
/*----------------------------------------------------------------
-* hfa384x_usbctlx_submit
-*
-* Called from the doxxx functions to submit a CTLX to the queue
-*
-* Arguments:
-* hw ptr to the hw struct
-* ctlx ctlx structure to enqueue
-*
-* Returns:
-* -ENODEV if the adapter is unplugged
-* 0
-*
-* Side effects:
-*
-* Call context:
-* process or interrupt
-----------------------------------------------------------------*/
-static int hfa384x_usbctlx_submit(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx)
+ * hfa384x_usbctlx_submit
+ *
+ * Called from the doxxx functions to submit a CTLX to the queue
+ *
+ * Arguments:
+ * hw ptr to the hw struct
+ * ctlx ctlx structure to enqueue
+ *
+ * Returns:
+ * -ENODEV if the adapter is unplugged
+ * 0
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process or interrupt
+ *----------------------------------------------------------------
+ */
+static int hfa384x_usbctlx_submit(struct hfa384x *hw,
+ struct hfa384x_usbctlx *ctlx)
{
unsigned long flags;
@@ -3906,21 +3970,22 @@ static int hfa384x_usbctlx_submit(struct hfa384x *hw, struct hfa384x_usbctlx *ct
}
/*----------------------------------------------------------------
-* hfa384x_isgood_pdrcore
-*
-* Quick check of PDR codes.
-*
-* Arguments:
-* pdrcode PDR code number (host order)
-*
-* Returns:
-* zero not good.
-* one is good.
-*
-* Side effects:
-*
-* Call context:
-----------------------------------------------------------------*/
+ * hfa384x_isgood_pdrcore
+ *
+ * Quick check of PDR codes.
+ *
+ * Arguments:
+ * pdrcode PDR code number (host order)
+ *
+ * Returns:
+ * zero not good.
+ * one is good.
+ *
+ * Side effects:
+ *
+ * Call context:
+ *----------------------------------------------------------------
+ */
static int hfa384x_isgood_pdrcode(u16 pdrcode)
{
switch (pdrcode) {
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 0247cbc29145..8387e6a3031a 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -1,56 +1,56 @@
/* src/p80211/p80211conv.c
-*
-* Ether/802.11 conversions and packet buffer routines
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file defines the functions that perform Ethernet to/from
-* 802.11 frame conversions.
-*
-* --------------------------------------------------------------------
-*
-*================================================================
-*/
+ *
+ * Ether/802.11 conversions and packet buffer routines
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file defines the functions that perform Ethernet to/from
+ * 802.11 frame conversions.
+ *
+ * --------------------------------------------------------------------
+ *
+ *================================================================
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -79,31 +79,31 @@ static const u8 oui_rfc1042[] = { 0x00, 0x00, 0x00 };
static const u8 oui_8021h[] = { 0x00, 0x00, 0xf8 };
/*----------------------------------------------------------------
-* p80211pb_ether_to_80211
-*
-* Uses the contents of the ether frame and the etherconv setting
-* to build the elements of the 802.11 frame.
-*
-* We don't actually set
-* up the frame header here. That's the MAC's job. We're only handling
-* conversion of DIXII or 802.3+LLC frames to something that works
-* with 802.11.
-*
-* Note -- 802.11 header is NOT part of the skb. Likewise, the 802.11
-* FCS is also not present and will need to be added elsewhere.
-*
-* Arguments:
-* ethconv Conversion type to perform
-* skb skbuff containing the ether frame
-* p80211_hdr 802.11 header
-*
-* Returns:
-* 0 on success, non-zero otherwise
-*
-* Call context:
-* May be called in interrupt or non-interrupt context
-*----------------------------------------------------------------
-*/
+ * p80211pb_ether_to_80211
+ *
+ * Uses the contents of the ether frame and the etherconv setting
+ * to build the elements of the 802.11 frame.
+ *
+ * We don't actually set
+ * up the frame header here. That's the MAC's job. We're only handling
+ * conversion of DIXII or 802.3+LLC frames to something that works
+ * with 802.11.
+ *
+ * Note -- 802.11 header is NOT part of the skb. Likewise, the 802.11
+ * FCS is also not present and will need to be added elsewhere.
+ *
+ * Arguments:
+ * ethconv Conversion type to perform
+ * skb skbuff containing the ether frame
+ * p80211_hdr 802.11 header
+ *
+ * Returns:
+ * 0 on success, non-zero otherwise
+ *
+ * Call context:
+ * May be called in interrupt or non-interrupt context
+ *----------------------------------------------------------------
+ */
int skb_ether_to_p80211(struct wlandevice *wlandev, u32 ethconv,
struct sk_buff *skb, union p80211_hdr *p80211_hdr,
struct p80211_metawep *p80211_wep)
@@ -255,25 +255,25 @@ static void orinoco_spy_gather(struct wlandevice *wlandev, char *mac,
}
/*----------------------------------------------------------------
-* p80211pb_80211_to_ether
-*
-* Uses the contents of a received 802.11 frame and the etherconv
-* setting to build an ether frame.
-*
-* This function extracts the src and dest address from the 802.11
-* frame to use in the construction of the eth frame.
-*
-* Arguments:
-* ethconv Conversion type to perform
-* skb Packet buffer containing the 802.11 frame
-*
-* Returns:
-* 0 on success, non-zero otherwise
-*
-* Call context:
-* May be called in interrupt or non-interrupt context
-*----------------------------------------------------------------
-*/
+ * p80211pb_80211_to_ether
+ *
+ * Uses the contents of a received 802.11 frame and the etherconv
+ * setting to build an ether frame.
+ *
+ * This function extracts the src and dest address from the 802.11
+ * frame to use in the construction of the eth frame.
+ *
+ * Arguments:
+ * ethconv Conversion type to perform
+ * skb Packet buffer containing the 802.11 frame
+ *
+ * Returns:
+ * 0 on success, non-zero otherwise
+ *
+ * Call context:
+ * May be called in interrupt or non-interrupt context
+ *----------------------------------------------------------------
+ */
int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv,
struct sk_buff *skb)
{
@@ -508,22 +508,22 @@ int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv,
}
/*----------------------------------------------------------------
-* p80211_stt_findproto
-*
-* Searches the 802.1h Selective Translation Table for a given
-* protocol.
-*
-* Arguments:
-* proto protocol number (in host order) to search for.
-*
-* Returns:
-* 1 - if the table is empty or a match is found.
-* 0 - if the table is non-empty and a match is not found.
-*
-* Call context:
-* May be called in interrupt or non-interrupt context
-*----------------------------------------------------------------
-*/
+ * p80211_stt_findproto
+ *
+ * Searches the 802.1h Selective Translation Table for a given
+ * protocol.
+ *
+ * Arguments:
+ * proto protocol number (in host order) to search for.
+ *
+ * Returns:
+ * 1 - if the table is empty or a match is found.
+ * 0 - if the table is non-empty and a match is not found.
+ *
+ * Call context:
+ * May be called in interrupt or non-interrupt context
+ *----------------------------------------------------------------
+ */
int p80211_stt_findproto(u16 proto)
{
/* Always return found for now. This is the behavior used by the */
@@ -540,21 +540,21 @@ int p80211_stt_findproto(u16 proto)
}
/*----------------------------------------------------------------
-* p80211skb_rxmeta_detach
-*
-* Disconnects the frmmeta and rxmeta from an skb.
-*
-* Arguments:
-* wlandev The wlandev this skb belongs to.
-* skb The skb we're attaching to.
-*
-* Returns:
-* 0 on success, non-zero otherwise
-*
-* Call context:
-* May be called in interrupt or non-interrupt context
-*----------------------------------------------------------------
-*/
+ * p80211skb_rxmeta_detach
+ *
+ * Disconnects the frmmeta and rxmeta from an skb.
+ *
+ * Arguments:
+ * wlandev The wlandev this skb belongs to.
+ * skb The skb we're attaching to.
+ *
+ * Returns:
+ * 0 on success, non-zero otherwise
+ *
+ * Call context:
+ * May be called in interrupt or non-interrupt context
+ *----------------------------------------------------------------
+ */
void p80211skb_rxmeta_detach(struct sk_buff *skb)
{
struct p80211_rxmeta *rxmeta;
@@ -584,22 +584,22 @@ void p80211skb_rxmeta_detach(struct sk_buff *skb)
}
/*----------------------------------------------------------------
-* p80211skb_rxmeta_attach
-*
-* Allocates a p80211rxmeta structure, initializes it, and attaches
-* it to an skb.
-*
-* Arguments:
-* wlandev The wlandev this skb belongs to.
-* skb The skb we're attaching to.
-*
-* Returns:
-* 0 on success, non-zero otherwise
-*
-* Call context:
-* May be called in interrupt or non-interrupt context
-*----------------------------------------------------------------
-*/
+ * p80211skb_rxmeta_attach
+ *
+ * Allocates a p80211rxmeta structure, initializes it, and attaches
+ * it to an skb.
+ *
+ * Arguments:
+ * wlandev The wlandev this skb belongs to.
+ * skb The skb we're attaching to.
+ *
+ * Returns:
+ * 0 on success, non-zero otherwise
+ *
+ * Call context:
+ * May be called in interrupt or non-interrupt context
+ *----------------------------------------------------------------
+ */
int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
{
int result = 0;
@@ -615,11 +615,9 @@ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
}
/* Allocate the rxmeta */
- rxmeta = kzalloc(sizeof(struct p80211_rxmeta), GFP_ATOMIC);
+ rxmeta = kzalloc(sizeof(*rxmeta), GFP_ATOMIC);
if (!rxmeta) {
- netdev_err(wlandev->netdev,
- "%s: Failed to allocate rxmeta.\n", wlandev->name);
result = 1;
goto exit;
}
@@ -638,22 +636,22 @@ exit:
}
/*----------------------------------------------------------------
-* p80211skb_free
-*
-* Frees an entire p80211skb by checking and freeing the meta struct
-* and then freeing the skb.
-*
-* Arguments:
-* wlandev The wlandev this skb belongs to.
-* skb The skb we're attaching to.
-*
-* Returns:
-* 0 on success, non-zero otherwise
-*
-* Call context:
-* May be called in interrupt or non-interrupt context
-*----------------------------------------------------------------
-*/
+ * p80211skb_free
+ *
+ * Frees an entire p80211skb by checking and freeing the meta struct
+ * and then freeing the skb.
+ *
+ * Arguments:
+ * wlandev The wlandev this skb belongs to.
+ * skb The skb we're attaching to.
+ *
+ * Returns:
+ * 0 on success, non-zero otherwise
+ *
+ * Call context:
+ * May be called in interrupt or non-interrupt context
+ *----------------------------------------------------------------
+ */
void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb)
{
struct p80211_frmmeta *meta;
diff --git a/drivers/staging/wlan-ng/p80211conv.h b/drivers/staging/wlan-ng/p80211conv.h
index 8c10357bedf0..ed70d98e5cf1 100644
--- a/drivers/staging/wlan-ng/p80211conv.h
+++ b/drivers/staging/wlan-ng/p80211conv.h
@@ -1,54 +1,54 @@
/* p80211conv.h
-*
-* Ether/802.11 conversions and packet buffer routines
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file declares the functions, types and macros that perform
-* Ethernet to/from 802.11 frame conversions.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Ether/802.11 conversions and packet buffer routines
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file declares the functions, types and macros that perform
+ * Ethernet to/from 802.11 frame conversions.
+ *
+ * --------------------------------------------------------------------
+ */
#ifndef _LINUX_P80211CONV_H
#define _LINUX_P80211CONV_H
diff --git a/drivers/staging/wlan-ng/p80211hdr.h b/drivers/staging/wlan-ng/p80211hdr.h
index 79d9b20b364d..2c44c613a586 100644
--- a/drivers/staging/wlan-ng/p80211hdr.h
+++ b/drivers/staging/wlan-ng/p80211hdr.h
@@ -1,61 +1,61 @@
/* p80211hdr.h
-*
-* Macros, types, and functions for handling 802.11 MAC headers
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file declares the constants and types used in the interface
-* between a wlan driver and the user mode utilities.
-*
-* Note:
-* - Constant values are always in HOST byte order. To assign
-* values to multi-byte fields they _must_ be converted to
-* ieee byte order. To retrieve multi-byte values from incoming
-* frames, they must be converted to host order.
-*
-* All functions declared here are implemented in p80211.c
-* --------------------------------------------------------------------
-*/
+ *
+ * Macros, types, and functions for handling 802.11 MAC headers
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file declares the constants and types used in the interface
+ * between a wlan driver and the user mode utilities.
+ *
+ * Note:
+ * - Constant values are always in HOST byte order. To assign
+ * values to multi-byte fields they _must_ be converted to
+ * ieee byte order. To retrieve multi-byte values from incoming
+ * frames, they must be converted to host order.
+ *
+ * All functions declared here are implemented in p80211.c
+ * --------------------------------------------------------------------
+ */
#ifndef _P80211HDR_H
#define _P80211HDR_H
@@ -131,8 +131,8 @@
/* SET_FC_FSTYPE(WLAN_FSTYPE_RTS) ); */
/*------------------------------------------------------------*/
-#define WLAN_GET_FC_FTYPE(n) ((((u16)(n)) & (BIT(2) | BIT(3))) >> 2)
-#define WLAN_GET_FC_FSTYPE(n) ((((u16)(n)) & (BIT(4)|BIT(5)|BIT(6)|BIT(7))) >> 4)
+#define WLAN_GET_FC_FTYPE(n) ((((u16)(n)) & GENMASK(3, 2)) >> 2)
+#define WLAN_GET_FC_FSTYPE(n) ((((u16)(n)) & GENMASK(7, 4)) >> 4)
#define WLAN_GET_FC_TODS(n) ((((u16)(n)) & (BIT(8))) >> 8)
#define WLAN_GET_FC_FROMDS(n) ((((u16)(n)) & (BIT(9))) >> 9)
#define WLAN_GET_FC_ISWEP(n) ((((u16)(n)) & (BIT(14))) >> 14)
diff --git a/drivers/staging/wlan-ng/p80211ioctl.h b/drivers/staging/wlan-ng/p80211ioctl.h
index 06c5e36649a7..ab6067e65050 100644
--- a/drivers/staging/wlan-ng/p80211ioctl.h
+++ b/drivers/staging/wlan-ng/p80211ioctl.h
@@ -1,64 +1,64 @@
/* p80211ioctl.h
-*
-* Declares constants and types for the p80211 ioctls
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* While this file is called 'ioctl' is purpose goes a little beyond
-* that. This file defines the types and contants used to implement
-* the p80211 request/confirm/indicate interfaces on Linux. The
-* request/confirm interface is, in fact, normally implemented as an
-* ioctl. The indicate interface on the other hand, is implemented
-* using the Linux 'netlink' interface.
-*
-* The reason I say that request/confirm is 'normally' implemented
-* via ioctl is that we're reserving the right to be able to send
-* request commands via the netlink interface. This will be necessary
-* if we ever need to send request messages when there aren't any
-* wlan network devices present (i.e. sending a message that only p80211
-* cares about.
-* --------------------------------------------------------------------
-*/
+ *
+ * Declares constants and types for the p80211 ioctls
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * While this file is called 'ioctl' is purpose goes a little beyond
+ * that. This file defines the types and contants used to implement
+ * the p80211 request/confirm/indicate interfaces on Linux. The
+ * request/confirm interface is, in fact, normally implemented as an
+ * ioctl. The indicate interface on the other hand, is implemented
+ * using the Linux 'netlink' interface.
+ *
+ * The reason I say that request/confirm is 'normally' implemented
+ * via ioctl is that we're reserving the right to be able to send
+ * request commands via the netlink interface. This will be necessary
+ * if we ever need to send request messages when there aren't any
+ * wlan network devices present (i.e. sending a message that only p80211
+ * cares about.
+ * --------------------------------------------------------------------
+ */
#ifndef _P80211IOCTL_H
#define _P80211IOCTL_H
diff --git a/drivers/staging/wlan-ng/p80211metadef.h b/drivers/staging/wlan-ng/p80211metadef.h
index b0d3567ca0ad..ea3d9ce222b9 100644
--- a/drivers/staging/wlan-ng/p80211metadef.h
+++ b/drivers/staging/wlan-ng/p80211metadef.h
@@ -1,48 +1,48 @@
/* This file is GENERATED AUTOMATICALLY. DO NOT EDIT OR MODIFY.
-* --------------------------------------------------------------------
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*/
+ * --------------------------------------------------------------------
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ */
#ifndef _P80211MKMETADEF_H
#define _P80211MKMETADEF_H
diff --git a/drivers/staging/wlan-ng/p80211mgmt.h b/drivers/staging/wlan-ng/p80211mgmt.h
index 3dd066ac034e..653950fd9843 100644
--- a/drivers/staging/wlan-ng/p80211mgmt.h
+++ b/drivers/staging/wlan-ng/p80211mgmt.h
@@ -1,101 +1,101 @@
/* p80211mgmt.h
-*
-* Macros, types, and functions to handle 802.11 mgmt frames
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file declares the constants and types used in the interface
-* between a wlan driver and the user mode utilities.
-*
-* Notes:
-* - Constant values are always in HOST byte order. To assign
-* values to multi-byte fields they _must_ be converted to
-* ieee byte order. To retrieve multi-byte values from incoming
-* frames, they must be converted to host order.
-*
-* - The len member of the frame structure does NOT!!! include
-* the MAC CRC. Therefore, the len field on rx'd frames should
-* have 4 subtracted from it.
-*
-* All functions declared here are implemented in p80211.c
-*
-* The types, macros, and functions defined here are primarily
-* used for encoding and decoding management frames. They are
-* designed to follow these patterns of use:
-*
-* DECODE:
-* 1) a frame of length len is received into buffer b
-* 2) using the hdr structure and macros, we determine the type
-* 3) an appropriate mgmt frame structure, mf, is allocated and zeroed
-* 4) mf.hdr = b
-* mf.buf = b
-* mf.len = len
-* 5) call mgmt_decode( mf )
-* 6) the frame field pointers in mf are now set. Note that any
-* multi-byte frame field values accessed using the frame field
-* pointers are in ieee byte order and will have to be converted
-* to host order.
-*
-* ENCODE:
-* 1) Library client allocates buffer space for maximum length
-* frame of the desired type
-* 2) Library client allocates a mgmt frame structure, called mf,
-* of the desired type
-* 3) Set the following:
-* mf.type = <desired type>
-* mf.buf = <allocated buffer address>
-* 4) call mgmt_encode( mf )
-* 5) all of the fixed field pointers and fixed length information element
-* pointers in mf are now set to their respective locations in the
-* allocated space (fortunately, all variable length information elements
-* fall at the end of their respective frames).
-* 5a) The length field is set to include the last of the fixed and fixed
-* length fields. It may have to be updated for optional or variable
-* length information elements.
-* 6) Optional and variable length information elements are special cases
-* and must be handled individually by the client code.
-* --------------------------------------------------------------------
-*/
+ *
+ * Macros, types, and functions to handle 802.11 mgmt frames
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file declares the constants and types used in the interface
+ * between a wlan driver and the user mode utilities.
+ *
+ * Notes:
+ * - Constant values are always in HOST byte order. To assign
+ * values to multi-byte fields they _must_ be converted to
+ * ieee byte order. To retrieve multi-byte values from incoming
+ * frames, they must be converted to host order.
+ *
+ * - The len member of the frame structure does NOT!!! include
+ * the MAC CRC. Therefore, the len field on rx'd frames should
+ * have 4 subtracted from it.
+ *
+ * All functions declared here are implemented in p80211.c
+ *
+ * The types, macros, and functions defined here are primarily
+ * used for encoding and decoding management frames. They are
+ * designed to follow these patterns of use:
+ *
+ * DECODE:
+ * 1) a frame of length len is received into buffer b
+ * 2) using the hdr structure and macros, we determine the type
+ * 3) an appropriate mgmt frame structure, mf, is allocated and zeroed
+ * 4) mf.hdr = b
+ * mf.buf = b
+ * mf.len = len
+ * 5) call mgmt_decode( mf )
+ * 6) the frame field pointers in mf are now set. Note that any
+ * multi-byte frame field values accessed using the frame field
+ * pointers are in ieee byte order and will have to be converted
+ * to host order.
+ *
+ * ENCODE:
+ * 1) Library client allocates buffer space for maximum length
+ * frame of the desired type
+ * 2) Library client allocates a mgmt frame structure, called mf,
+ * of the desired type
+ * 3) Set the following:
+ * mf.type = <desired type>
+ * mf.buf = <allocated buffer address>
+ * 4) call mgmt_encode( mf )
+ * 5) all of the fixed field pointers and fixed length information element
+ * pointers in mf are now set to their respective locations in the
+ * allocated space (fortunately, all variable length information elements
+ * fall at the end of their respective frames).
+ * 5a) The length field is set to include the last of the fixed and fixed
+ * length fields. It may have to be updated for optional or variable
+ * length information elements.
+ * 6) Optional and variable length information elements are special cases
+ * and must be handled individually by the client code.
+ * --------------------------------------------------------------------
+ */
#ifndef _P80211MGMT_H
#define _P80211MGMT_H
diff --git a/drivers/staging/wlan-ng/p80211msg.h b/drivers/staging/wlan-ng/p80211msg.h
index 43d2f971e2cd..40c5cf5997c7 100644
--- a/drivers/staging/wlan-ng/p80211msg.h
+++ b/drivers/staging/wlan-ng/p80211msg.h
@@ -1,49 +1,49 @@
/* p80211msg.h
-*
-* Macros, constants, types, and funcs for req and ind messages
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Macros, constants, types, and funcs for req and ind messages
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ */
#ifndef _P80211MSG_H
#define _P80211MSG_H
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 4762d38a720e..73fcf07254fe 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -1,53 +1,53 @@
/* src/p80211/p80211knetdev.c
-*
-* Linux Kernel net device interface
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* The functions required for a Linux network device are defined here.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Linux Kernel net device interface
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * The functions required for a Linux network device are defined here.
+ *
+ * --------------------------------------------------------------------
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -112,17 +112,18 @@ module_param(wlan_wext_write, int, 0644);
MODULE_PARM_DESC(wlan_wext_write, "enable write wireless extensions");
/*----------------------------------------------------------------
-* p80211knetdev_init
-*
-* Init method for a Linux netdevice. Called in response to
-* register_netdev.
-*
-* Arguments:
-* none
-*
-* Returns:
-* nothing
-----------------------------------------------------------------*/
+ * p80211knetdev_init
+ *
+ * Init method for a Linux netdevice. Called in response to
+ * register_netdev.
+ *
+ * Arguments:
+ * none
+ *
+ * Returns:
+ * nothing
+ *----------------------------------------------------------------
+ */
static int p80211knetdev_init(struct net_device *netdev)
{
/* Called in response to register_netdev */
@@ -133,19 +134,20 @@ static int p80211knetdev_init(struct net_device *netdev)
}
/*----------------------------------------------------------------
-* p80211knetdev_open
-*
-* Linux netdevice open method. Following a successful call here,
-* the device is supposed to be ready for tx and rx. In our
-* situation that may not be entirely true due to the state of the
-* MAC below.
-*
-* Arguments:
-* netdev Linux network device structure
-*
-* Returns:
-* zero on success, non-zero otherwise
-----------------------------------------------------------------*/
+ * p80211knetdev_open
+ *
+ * Linux netdevice open method. Following a successful call here,
+ * the device is supposed to be ready for tx and rx. In our
+ * situation that may not be entirely true due to the state of the
+ * MAC below.
+ *
+ * Arguments:
+ * netdev Linux network device structure
+ *
+ * Returns:
+ * zero on success, non-zero otherwise
+ *----------------------------------------------------------------
+ */
static int p80211knetdev_open(struct net_device *netdev)
{
int result = 0; /* success */
@@ -170,17 +172,18 @@ static int p80211knetdev_open(struct net_device *netdev)
}
/*----------------------------------------------------------------
-* p80211knetdev_stop
-*
-* Linux netdevice stop (close) method. Following this call,
-* no frames should go up or down through this interface.
-*
-* Arguments:
-* netdev Linux network device structure
-*
-* Returns:
-* zero on success, non-zero otherwise
-----------------------------------------------------------------*/
+ * p80211knetdev_stop
+ *
+ * Linux netdevice stop (close) method. Following this call,
+ * no frames should go up or down through this interface.
+ *
+ * Arguments:
+ * netdev Linux network device structure
+ *
+ * Returns:
+ * zero on success, non-zero otherwise
+ *----------------------------------------------------------------
+ */
static int p80211knetdev_stop(struct net_device *netdev)
{
int result = 0;
@@ -196,18 +199,19 @@ static int p80211knetdev_stop(struct net_device *netdev)
}
/*----------------------------------------------------------------
-* p80211netdev_rx
-*
-* Frame receive function called by the mac specific driver.
-*
-* Arguments:
-* wlandev WLAN network device structure
-* skb skbuff containing a full 802.11 frame.
-* Returns:
-* nothing
-* Side effects:
-*
-----------------------------------------------------------------*/
+ * p80211netdev_rx
+ *
+ * Frame receive function called by the mac specific driver.
+ *
+ * Arguments:
+ * wlandev WLAN network device structure
+ * skb skbuff containing a full 802.11 frame.
+ * Returns:
+ * nothing
+ * Side effects:
+ *
+ *----------------------------------------------------------------
+ */
void p80211netdev_rx(struct wlandevice *wlandev, struct sk_buff *skb)
{
/* Enqueue for post-irq processing */
@@ -227,7 +231,8 @@ void p80211netdev_rx(struct wlandevice *wlandev, struct sk_buff *skb)
* CONV_TO_ETHER_FAILED if conversion failed
* CONV_TO_ETHER_SKIPPED if frame is ignored
*/
-static int p80211_convert_to_ether(struct wlandevice *wlandev, struct sk_buff *skb)
+static int p80211_convert_to_ether(struct wlandevice *wlandev,
+ struct sk_buff *skb)
{
struct p80211_hdr_a3 *hdr;
@@ -272,7 +277,6 @@ static void p80211netdev_rx_bh(unsigned long arg)
/* Let's empty our our queue */
while ((skb = skb_dequeue(&wlandev->nsd_rxq))) {
if (wlandev->state == WLAN_DEVICE_OPEN) {
-
if (dev->type != ARPHRD_ETHER) {
/* RAW frame; we shouldn't convert it */
/* XXX Append the Prism Header here instead. */
@@ -299,24 +303,25 @@ static void p80211netdev_rx_bh(unsigned long arg)
}
/*----------------------------------------------------------------
-* p80211knetdev_hard_start_xmit
-*
-* Linux netdevice method for transmitting a frame.
-*
-* Arguments:
-* skb Linux sk_buff containing the frame.
-* netdev Linux netdevice.
-*
-* Side effects:
-* If the lower layers report that buffers are full. netdev->tbusy
-* will be set to prevent higher layers from sending more traffic.
-*
-* Note: If this function returns non-zero, higher layers retain
-* ownership of the skb.
-*
-* Returns:
-* zero on success, non-zero on failure.
-----------------------------------------------------------------*/
+ * p80211knetdev_hard_start_xmit
+ *
+ * Linux netdevice method for transmitting a frame.
+ *
+ * Arguments:
+ * skb Linux sk_buff containing the frame.
+ * netdev Linux netdevice.
+ *
+ * Side effects:
+ * If the lower layers report that buffers are full. netdev->tbusy
+ * will be set to prevent higher layers from sending more traffic.
+ *
+ * Note: If this function returns non-zero, higher layers retain
+ * ownership of the skb.
+ *
+ * Returns:
+ * zero on success, non-zero on failure.
+ *----------------------------------------------------------------
+ */
static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
@@ -336,8 +341,8 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
goto failed;
}
- memset(&p80211_hdr, 0, sizeof(union p80211_hdr));
- memset(&p80211_wep, 0, sizeof(struct p80211_metawep));
+ memset(&p80211_hdr, 0, sizeof(p80211_hdr));
+ memset(&p80211_wep, 0, sizeof(p80211_wep));
if (netif_queue_stopped(netdev)) {
netdev_dbg(netdev, "called when queue stopped.\n");
@@ -375,8 +380,8 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
goto failed;
}
/* move the header over */
- memcpy(&p80211_hdr, skb->data, sizeof(union p80211_hdr));
- skb_pull(skb, sizeof(union p80211_hdr));
+ memcpy(&p80211_hdr, skb->data, sizeof(p80211_hdr));
+ skb_pull(skb, sizeof(p80211_hdr));
} else {
if (skb_ether_to_p80211
(wlandev, wlandev->ethconv, skb, &p80211_hdr,
@@ -435,17 +440,18 @@ failed:
}
/*----------------------------------------------------------------
-* p80211knetdev_set_multicast_list
-*
-* Called from higher layers whenever there's a need to set/clear
-* promiscuous mode or rewrite the multicast list.
-*
-* Arguments:
-* none
-*
-* Returns:
-* nothing
-----------------------------------------------------------------*/
+ * p80211knetdev_set_multicast_list
+ *
+ * Called from higher layers whenever there's a need to set/clear
+ * promiscuous mode or rewrite the multicast list.
+ *
+ * Arguments:
+ * none
+ *
+ * Returns:
+ * nothing
+ *----------------------------------------------------------------
+ */
static void p80211knetdev_set_multicast_list(struct net_device *dev)
{
struct wlandevice *wlandev = dev->ml_priv;
@@ -454,12 +460,12 @@ static void p80211knetdev_set_multicast_list(struct net_device *dev)
if (wlandev->set_multicast_list)
wlandev->set_multicast_list(wlandev, dev);
-
}
#ifdef SIOCETHTOOL
-static int p80211netdev_ethtool(struct wlandevice *wlandev, void __user *useraddr)
+static int p80211netdev_ethtool(struct wlandevice *wlandev,
+ void __user *useraddr)
{
u32 ethcmd;
struct ethtool_drvinfo info;
@@ -505,33 +511,35 @@ static int p80211netdev_ethtool(struct wlandevice *wlandev, void __user *useradd
#endif
/*----------------------------------------------------------------
-* p80211knetdev_do_ioctl
-*
-* Handle an ioctl call on one of our devices. Everything Linux
-* ioctl specific is done here. Then we pass the contents of the
-* ifr->data to the request message handler.
-*
-* Arguments:
-* dev Linux kernel netdevice
-* ifr Our private ioctl request structure, typed for the
-* generic struct ifreq so we can use ptr to func
-* w/o cast.
-*
-* Returns:
-* zero on success, a negative errno on failure. Possible values:
-* -ENETDOWN Device isn't up.
-* -EBUSY cmd already in progress
-* -ETIME p80211 cmd timed out (MSD may have its own timers)
-* -EFAULT memory fault copying msg from user buffer
-* -ENOMEM unable to allocate kernel msg buffer
-* -ENOSYS bad magic, it the cmd really for us?
-* -EintR sleeping on cmd, awakened by signal, cmd cancelled.
-*
-* Call Context:
-* Process thread (ioctl caller). TODO: SMP support may require
-* locks.
-----------------------------------------------------------------*/
-static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ * p80211knetdev_do_ioctl
+ *
+ * Handle an ioctl call on one of our devices. Everything Linux
+ * ioctl specific is done here. Then we pass the contents of the
+ * ifr->data to the request message handler.
+ *
+ * Arguments:
+ * dev Linux kernel netdevice
+ * ifr Our private ioctl request structure, typed for the
+ * generic struct ifreq so we can use ptr to func
+ * w/o cast.
+ *
+ * Returns:
+ * zero on success, a negative errno on failure. Possible values:
+ * -ENETDOWN Device isn't up.
+ * -EBUSY cmd already in progress
+ * -ETIME p80211 cmd timed out (MSD may have its own timers)
+ * -EFAULT memory fault copying msg from user buffer
+ * -ENOMEM unable to allocate kernel msg buffer
+ * -EINVAL bad magic, it the cmd really for us?
+ * -EintR sleeping on cmd, awakened by signal, cmd cancelled.
+ *
+ * Call Context:
+ * Process thread (ioctl caller). TODO: SMP support may require
+ * locks.
+ *----------------------------------------------------------------
+ */
+static int p80211knetdev_do_ioctl(struct net_device *dev,
+ struct ifreq *ifr, int cmd)
{
int result = 0;
struct p80211ioctl_req *req = (struct p80211ioctl_req *)ifr;
@@ -550,7 +558,7 @@ static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int
/* Test the magic, assume ifr is good if it's there */
if (req->magic != P80211_IOCTL_MAGIC) {
- result = -ENOSYS;
+ result = -EINVAL;
goto bail;
}
@@ -558,7 +566,7 @@ static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int
result = 0;
goto bail;
} else if (cmd != P80211_IFREQ) {
- result = -ENOSYS;
+ result = -EINVAL;
goto bail;
}
@@ -586,30 +594,31 @@ bail:
}
/*----------------------------------------------------------------
-* p80211knetdev_set_mac_address
-*
-* Handles the ioctl for changing the MACAddress of a netdevice
-*
-* references: linux/netdevice.h and drivers/net/net_init.c
-*
-* NOTE: [MSM] We only prevent address changes when the netdev is
-* up. We don't control anything based on dot11 state. If the
-* address is changed on a STA that's currently associated, you
-* will probably lose the ability to send and receive data frames.
-* Just be aware. Therefore, this should usually only be done
-* prior to scan/join/auth/assoc.
-*
-* Arguments:
-* dev netdevice struct
-* addr the new MACAddress (a struct)
-*
-* Returns:
-* zero on success, a negative errno on failure. Possible values:
-* -EBUSY device is bussy (cmd not possible)
-* -and errors returned by: p80211req_dorequest(..)
-*
-* by: Collin R. Mulliner <collin@mulliner.org>
-----------------------------------------------------------------*/
+ * p80211knetdev_set_mac_address
+ *
+ * Handles the ioctl for changing the MACAddress of a netdevice
+ *
+ * references: linux/netdevice.h and drivers/net/net_init.c
+ *
+ * NOTE: [MSM] We only prevent address changes when the netdev is
+ * up. We don't control anything based on dot11 state. If the
+ * address is changed on a STA that's currently associated, you
+ * will probably lose the ability to send and receive data frames.
+ * Just be aware. Therefore, this should usually only be done
+ * prior to scan/join/auth/assoc.
+ *
+ * Arguments:
+ * dev netdevice struct
+ * addr the new MACAddress (a struct)
+ *
+ * Returns:
+ * zero on success, a negative errno on failure. Possible values:
+ * -EBUSY device is bussy (cmd not possible)
+ * -and errors returned by: p80211req_dorequest(..)
+ *
+ * by: Collin R. Mulliner <collin@mulliner.org>
+ *----------------------------------------------------------------
+ */
static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *new_addr = addr;
@@ -629,9 +638,9 @@ static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr)
resultcode = &dot11req.resultcode;
/* Set up a dot11req_mibset */
- memset(&dot11req, 0, sizeof(struct p80211msg_dot11req_mibset));
+ memset(&dot11req, 0, sizeof(dot11req));
dot11req.msgcode = DIDmsg_dot11req_mibset;
- dot11req.msglen = sizeof(struct p80211msg_dot11req_mibset);
+ dot11req.msglen = sizeof(dot11req);
memcpy(dot11req.devname,
((struct wlandevice *)dev->ml_priv)->name, WLAN_DEVNAMELEN_MAX - 1);
@@ -682,28 +691,29 @@ static const struct net_device_ops p80211_netdev_ops = {
};
/*----------------------------------------------------------------
-* wlan_setup
-*
-* Roughly matches the functionality of ether_setup. Here
-* we set up any members of the wlandevice structure that are common
-* to all devices. Additionally, we allocate a linux 'struct device'
-* and perform the same setup as ether_setup.
-*
-* Note: It's important that the caller have setup the wlandev->name
-* ptr prior to calling this function.
-*
-* Arguments:
-* wlandev ptr to the wlandev structure for the
-* interface.
-* physdev ptr to usb device
-* Returns:
-* zero on success, non-zero otherwise.
-* Call Context:
-* Should be process thread. We'll assume it might be
-* interrupt though. When we add support for statically
-* compiled drivers, this function will be called in the
-* context of the kernel startup code.
-----------------------------------------------------------------*/
+ * wlan_setup
+ *
+ * Roughly matches the functionality of ether_setup. Here
+ * we set up any members of the wlandevice structure that are common
+ * to all devices. Additionally, we allocate a linux 'struct device'
+ * and perform the same setup as ether_setup.
+ *
+ * Note: It's important that the caller have setup the wlandev->name
+ * ptr prior to calling this function.
+ *
+ * Arguments:
+ * wlandev ptr to the wlandev structure for the
+ * interface.
+ * physdev ptr to usb device
+ * Returns:
+ * zero on success, non-zero otherwise.
+ * Call Context:
+ * Should be process thread. We'll assume it might be
+ * interrupt though. When we add support for statically
+ * compiled drivers, this function will be called in the
+ * context of the kernel startup code.
+ *----------------------------------------------------------------
+ */
int wlan_setup(struct wlandevice *wlandev, struct device *physdev)
{
int result = 0;
@@ -757,24 +767,25 @@ int wlan_setup(struct wlandevice *wlandev, struct device *physdev)
}
/*----------------------------------------------------------------
-* wlan_unsetup
-*
-* This function is paired with the wlan_setup routine. It should
-* be called after unregister_wlandev. Basically, all it does is
-* free the 'struct device' that's associated with the wlandev.
-* We do it here because the 'struct device' isn't allocated
-* explicitly in the driver code, it's done in wlan_setup. To
-* do the free in the driver might seem like 'magic'.
-*
-* Arguments:
-* wlandev ptr to the wlandev structure for the
-* interface.
-* Call Context:
-* Should be process thread. We'll assume it might be
-* interrupt though. When we add support for statically
-* compiled drivers, this function will be called in the
-* context of the kernel startup code.
-----------------------------------------------------------------*/
+ * wlan_unsetup
+ *
+ * This function is paired with the wlan_setup routine. It should
+ * be called after unregister_wlandev. Basically, all it does is
+ * free the 'struct device' that's associated with the wlandev.
+ * We do it here because the 'struct device' isn't allocated
+ * explicitly in the driver code, it's done in wlan_setup. To
+ * do the free in the driver might seem like 'magic'.
+ *
+ * Arguments:
+ * wlandev ptr to the wlandev structure for the
+ * interface.
+ * Call Context:
+ * Should be process thread. We'll assume it might be
+ * interrupt though. When we add support for statically
+ * compiled drivers, this function will be called in the
+ * context of the kernel startup code.
+ *----------------------------------------------------------------
+ */
void wlan_unsetup(struct wlandevice *wlandev)
{
struct wireless_dev *wdev;
@@ -791,46 +802,48 @@ void wlan_unsetup(struct wlandevice *wlandev)
}
/*----------------------------------------------------------------
-* register_wlandev
-*
-* Roughly matches the functionality of register_netdev. This function
-* is called after the driver has successfully probed and set up the
-* resources for the device. It's now ready to become a named device
-* in the Linux system.
-*
-* First we allocate a name for the device (if not already set), then
-* we call the Linux function register_netdevice.
-*
-* Arguments:
-* wlandev ptr to the wlandev structure for the
-* interface.
-* Returns:
-* zero on success, non-zero otherwise.
-* Call Context:
-* Can be either interrupt or not.
-----------------------------------------------------------------*/
+ * register_wlandev
+ *
+ * Roughly matches the functionality of register_netdev. This function
+ * is called after the driver has successfully probed and set up the
+ * resources for the device. It's now ready to become a named device
+ * in the Linux system.
+ *
+ * First we allocate a name for the device (if not already set), then
+ * we call the Linux function register_netdevice.
+ *
+ * Arguments:
+ * wlandev ptr to the wlandev structure for the
+ * interface.
+ * Returns:
+ * zero on success, non-zero otherwise.
+ * Call Context:
+ * Can be either interrupt or not.
+ *----------------------------------------------------------------
+ */
int register_wlandev(struct wlandevice *wlandev)
{
return register_netdev(wlandev->netdev);
}
/*----------------------------------------------------------------
-* unregister_wlandev
-*
-* Roughly matches the functionality of unregister_netdev. This
-* function is called to remove a named device from the system.
-*
-* First we tell linux that the device should no longer exist.
-* Then we remove it from the list of known wlan devices.
-*
-* Arguments:
-* wlandev ptr to the wlandev structure for the
-* interface.
-* Returns:
-* zero on success, non-zero otherwise.
-* Call Context:
-* Can be either interrupt or not.
-----------------------------------------------------------------*/
+ * unregister_wlandev
+ *
+ * Roughly matches the functionality of unregister_netdev. This
+ * function is called to remove a named device from the system.
+ *
+ * First we tell linux that the device should no longer exist.
+ * Then we remove it from the list of known wlan devices.
+ *
+ * Arguments:
+ * wlandev ptr to the wlandev structure for the
+ * interface.
+ * Returns:
+ * zero on success, non-zero otherwise.
+ * Call Context:
+ * Can be either interrupt or not.
+ *----------------------------------------------------------------
+ */
int unregister_wlandev(struct wlandevice *wlandev)
{
struct sk_buff *skb;
@@ -845,35 +858,36 @@ int unregister_wlandev(struct wlandevice *wlandev)
}
/*----------------------------------------------------------------
-* p80211netdev_hwremoved
-*
-* Hardware removed notification. This function should be called
-* immediately after an MSD has detected that the underlying hardware
-* has been yanked out from under us. The primary things we need
-* to do are:
-* - Mark the wlandev
-* - Prevent any further traffic from the knetdev i/f
-* - Prevent any further requests from mgmt i/f
-* - If there are any waitq'd mgmt requests or mgmt-frame exchanges,
-* shut them down.
-* - Call the MSD hwremoved function.
-*
-* The remainder of the cleanup will be handled by unregister().
-* Our primary goal here is to prevent as much tickling of the MSD
-* as possible since the MSD is already in a 'wounded' state.
-*
-* TODO: As new features are added, this function should be
-* updated.
-*
-* Arguments:
-* wlandev WLAN network device structure
-* Returns:
-* nothing
-* Side effects:
-*
-* Call context:
-* Usually interrupt.
-----------------------------------------------------------------*/
+ * p80211netdev_hwremoved
+ *
+ * Hardware removed notification. This function should be called
+ * immediately after an MSD has detected that the underlying hardware
+ * has been yanked out from under us. The primary things we need
+ * to do are:
+ * - Mark the wlandev
+ * - Prevent any further traffic from the knetdev i/f
+ * - Prevent any further requests from mgmt i/f
+ * - If there are any waitq'd mgmt requests or mgmt-frame exchanges,
+ * shut them down.
+ * - Call the MSD hwremoved function.
+ *
+ * The remainder of the cleanup will be handled by unregister().
+ * Our primary goal here is to prevent as much tickling of the MSD
+ * as possible since the MSD is already in a 'wounded' state.
+ *
+ * TODO: As new features are added, this function should be
+ * updated.
+ *
+ * Arguments:
+ * wlandev WLAN network device structure
+ * Returns:
+ * nothing
+ * Side effects:
+ *
+ * Call context:
+ * Usually interrupt.
+ *----------------------------------------------------------------
+ */
void p80211netdev_hwremoved(struct wlandevice *wlandev)
{
wlandev->hwremoved = 1;
@@ -884,26 +898,27 @@ void p80211netdev_hwremoved(struct wlandevice *wlandev)
}
/*----------------------------------------------------------------
-* p80211_rx_typedrop
-*
-* Classifies the frame, increments the appropriate counter, and
-* returns 0|1|2 indicating whether the driver should handle, ignore, or
-* drop the frame
-*
-* Arguments:
-* wlandev wlan device structure
-* fc frame control field
-*
-* Returns:
-* zero if the frame should be handled by the driver,
-* one if the frame should be ignored
-* anything else means we drop it.
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * p80211_rx_typedrop
+ *
+ * Classifies the frame, increments the appropriate counter, and
+ * returns 0|1|2 indicating whether the driver should handle, ignore, or
+ * drop the frame
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * fc frame control field
+ *
+ * Returns:
+ * zero if the frame should be handled by the driver,
+ * one if the frame should be ignored
+ * anything else means we drop it.
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc)
{
u16 ftype;
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index 1e6a774fc7c5..8e0d08298c8b 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -1,54 +1,54 @@
/* p80211netdev.h
-*
-* WLAN net device structure and functions
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file declares the structure type that represents each wlan
-* interface.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * WLAN net device structure and functions
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file declares the structure type that represents each wlan
+ * interface.
+ *
+ * --------------------------------------------------------------------
+ */
#ifndef _LINUX_P80211NETDEV_H
#define _LINUX_P80211NETDEV_H
@@ -143,7 +143,7 @@ extern struct iw_handler_def p80211wext_handler_def;
#define NUM_WEPKEYS 4
#define MAX_KEYLEN 32
-#define HOSTWEP_DEFAULTKEY_MASK (BIT(1)|BIT(0))
+#define HOSTWEP_DEFAULTKEY_MASK GENMASK(1, 0)
#define HOSTWEP_SHAREDKEY BIT(3)
#define HOSTWEP_DECRYPT BIT(4)
#define HOSTWEP_ENCRYPT BIT(5)
diff --git a/drivers/staging/wlan-ng/p80211req.c b/drivers/staging/wlan-ng/p80211req.c
index d43e85b5d49b..621df98183bf 100644
--- a/drivers/staging/wlan-ng/p80211req.c
+++ b/drivers/staging/wlan-ng/p80211req.c
@@ -1,54 +1,54 @@
/* src/p80211/p80211req.c
-*
-* Request/Indication/MacMgmt interface handling functions
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file contains the functions, types, and macros to support the
-* MLME request interface that's implemented via the device ioctls.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Request/Indication/MacMgmt interface handling functions
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file contains the functions, types, and macros to support the
+ * MLME request interface that's implemented via the device ioctls.
+ *
+ * --------------------------------------------------------------------
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -72,10 +72,11 @@
#include "p80211metastruct.h"
#include "p80211req.h"
-static void p80211req_handlemsg(struct wlandevice *wlandev, struct p80211msg *msg);
+static void p80211req_handlemsg(struct wlandevice *wlandev,
+ struct p80211msg *msg);
static void p80211req_mibset_mibget(struct wlandevice *wlandev,
- struct p80211msg_dot11req_mibget *mib_msg,
- int isget);
+ struct p80211msg_dot11req_mibget *mib_msg,
+ int isget);
static void p80211req_handle_action(struct wlandevice *wlandev, u32 *data,
int isget, u32 flag)
@@ -93,21 +94,22 @@ static void p80211req_handle_action(struct wlandevice *wlandev, u32 *data,
}
/*----------------------------------------------------------------
-* p80211req_dorequest
-*
-* Handles an MLME request/confirm message.
-*
-* Arguments:
-* wlandev WLAN device struct
-* msgbuf Buffer containing a request message
-*
-* Returns:
-* 0 on success, an errno otherwise
-*
-* Call context:
-* Potentially blocks the caller, so it's a good idea to
-* not call this function from an interrupt context.
-----------------------------------------------------------------*/
+ * p80211req_dorequest
+ *
+ * Handles an MLME request/confirm message.
+ *
+ * Arguments:
+ * wlandev WLAN device struct
+ * msgbuf Buffer containing a request message
+ *
+ * Returns:
+ * 0 on success, an errno otherwise
+ *
+ * Call context:
+ * Potentially blocks the caller, so it's a good idea to
+ * not call this function from an interrupt context.
+ *----------------------------------------------------------------
+ */
int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf)
{
struct p80211msg *msg = (struct p80211msg *)msgbuf;
@@ -122,7 +124,7 @@ int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf)
/* Check Permissions */
if (!capable(CAP_NET_ADMIN) &&
- (msg->msgcode != DIDmsg_dot11req_mibget)) {
+ (msg->msgcode != DIDmsg_dot11req_mibget)) {
netdev_err(wlandev->netdev,
"%s: only dot11req_mibget allowed for non-root.\n",
wlandev->name);
@@ -130,7 +132,7 @@ int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf)
}
/* Check for busy status */
- if (test_and_set_bit(1, &(wlandev->request_pending)))
+ if (test_and_set_bit(1, &wlandev->request_pending))
return -EBUSY;
/* Allow p80211 to look at msg and handle if desired. */
@@ -139,35 +141,36 @@ int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf)
p80211req_handlemsg(wlandev, msg);
/* Pass it down to wlandev via wlandev->mlmerequest */
- if (wlandev->mlmerequest != NULL)
+ if (wlandev->mlmerequest)
wlandev->mlmerequest(wlandev, msg);
- clear_bit(1, &(wlandev->request_pending));
+ clear_bit(1, &wlandev->request_pending);
return 0; /* if result==0, msg->status still may contain an err */
}
/*----------------------------------------------------------------
-* p80211req_handlemsg
-*
-* p80211 message handler. Primarily looks for messages that
-* belong to p80211 and then dispatches the appropriate response.
-* TODO: we don't do anything yet. Once the linuxMIB is better
-* defined we'll need a get/set handler.
-*
-* Arguments:
-* wlandev WLAN device struct
-* msg message structure
-*
-* Returns:
-* nothing (any results are set in the status field of the msg)
-*
-* Call context:
-* Process thread
-----------------------------------------------------------------*/
-static void p80211req_handlemsg(struct wlandevice *wlandev, struct p80211msg *msg)
+ * p80211req_handlemsg
+ *
+ * p80211 message handler. Primarily looks for messages that
+ * belong to p80211 and then dispatches the appropriate response.
+ * TODO: we don't do anything yet. Once the linuxMIB is better
+ * defined we'll need a get/set handler.
+ *
+ * Arguments:
+ * wlandev WLAN device struct
+ * msg message structure
+ *
+ * Returns:
+ * nothing (any results are set in the status field of the msg)
+ *
+ * Call context:
+ * Process thread
+ *----------------------------------------------------------------
+ */
+static void p80211req_handlemsg(struct wlandevice *wlandev,
+ struct p80211msg *msg)
{
switch (msg->msgcode) {
-
case DIDmsg_lnxreq_hostwep:{
struct p80211msg_lnxreq_hostwep *req =
(struct p80211msg_lnxreq_hostwep *)msg;
@@ -192,8 +195,8 @@ static void p80211req_handlemsg(struct wlandevice *wlandev, struct p80211msg *ms
}
static void p80211req_mibset_mibget(struct wlandevice *wlandev,
- struct p80211msg_dot11req_mibget *mib_msg,
- int isget)
+ struct p80211msg_dot11req_mibget *mib_msg,
+ int isget)
{
struct p80211itemd *mibitem = (struct p80211itemd *)mib_msg->mibattribute.data;
struct p80211pstrd *pstr = (struct p80211pstrd *)mibitem->data;
diff --git a/drivers/staging/wlan-ng/p80211req.h b/drivers/staging/wlan-ng/p80211req.h
index 8d3054c22a05..6c72f59993e0 100644
--- a/drivers/staging/wlan-ng/p80211req.h
+++ b/drivers/staging/wlan-ng/p80211req.h
@@ -1,49 +1,49 @@
/* p80211req.h
-*
-* Request handling functions
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Request handling functions
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ */
#ifndef _LINUX_P80211REQ_H
#define _LINUX_P80211REQ_H
diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c
index 23b183738037..6492ffe59085 100644
--- a/drivers/staging/wlan-ng/p80211wep.c
+++ b/drivers/staging/wlan-ng/p80211wep.c
@@ -1,49 +1,49 @@
/* src/p80211/p80211wep.c
-*
-* WEP encode/decode for P80211.
-*
-* Copyright (C) 2002 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * WEP encode/decode for P80211.
+ *
+ * Copyright (C) 2002 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ */
/*================================================================*/
/* System Includes */
@@ -52,8 +52,6 @@
#include <linux/wireless.h>
#include <linux/random.h>
#include <linux/kernel.h>
-
-
#include "p80211hdr.h"
#include "p80211types.h"
#include "p80211msg.h"
@@ -125,14 +123,13 @@ int wep_change_key(struct wlandevice *wlandev, int keynum, u8 *key, int keylen)
return -1;
if (keylen >= MAX_KEYLEN)
return -1;
- if (key == NULL)
+ if (!key)
return -1;
if (keynum < 0)
return -1;
if (keynum >= NUM_WEPKEYS)
return -1;
-
wlandev->wep_keylens[keynum] = keylen;
memcpy(wlandev->wep_keys[keynum], key, keylen);
@@ -176,7 +173,6 @@ int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override,
keylen += 3; /* add in IV bytes */
-
/* set up the RC4 state */
for (i = 0; i < 256; i++)
s[i] = i;
@@ -217,8 +213,8 @@ int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override,
}
/* encrypts in-place. */
-int wep_encrypt(struct wlandevice *wlandev, u8 *buf, u8 *dst, u32 len, int keynum,
- u8 *iv, u8 *icv)
+int wep_encrypt(struct wlandevice *wlandev, u8 *buf,
+ u8 *dst, u32 len, int keynum, u8 *iv, u8 *icv)
{
u32 i, j, k, crc, keylen;
u8 s[256], key[64];
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 96aa21188669..2e349f87e738 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -1,49 +1,49 @@
/* from src/prism2/download/prism2dl.c
-*
-* utility for downloading prism2 images moved into kernelspace
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * utility for downloading prism2 images moved into kernelspace
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ */
/*================================================================*/
/* System Includes */
@@ -124,7 +124,7 @@ struct imgchunk {
/* Data records */
static unsigned int ns3data;
-static struct s3datarec s3data[S3DATA_MAX];
+static struct s3datarec *s3data;
/* Plug records */
static unsigned int ns3plug;
@@ -161,7 +161,7 @@ static struct hfa384x_caplevel priid;
/* Local Function Declarations */
static int prism2_fwapply(const struct ihex_binrec *rfptr,
-struct wlandevice *wlandev);
+ struct wlandevice *wlandev);
static int read_fwfile(const struct ihex_binrec *rfptr);
@@ -172,13 +172,15 @@ static int read_cardpda(struct pda *pda, struct wlandevice *wlandev);
static int mkpdrlist(struct pda *pda);
static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
- struct s3plugrec *s3plug, unsigned int ns3plug, struct pda *pda);
+ struct s3plugrec *s3plug, unsigned int ns3plug,
+ struct pda *pda);
static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks,
- struct s3crcrec *s3crc, unsigned int ns3crc);
+ struct s3crcrec *s3crc, unsigned int ns3crc);
static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk,
- unsigned int nfchunks);
+ unsigned int nfchunks);
+
static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks);
static void free_srecs(void);
@@ -189,30 +191,31 @@ static int validate_identity(void);
/* Function Definitions */
/*----------------------------------------------------------------
-* prism2_fwtry
-*
-* Try and get firmware into memory
-*
-* Arguments:
-* udev usb device structure
-* wlandev wlan device structure
-*
-* Returns:
-* 0 - success
-* ~0 - failure
-----------------------------------------------------------------*/
+ * prism2_fwtry
+ *
+ * Try and get firmware into memory
+ *
+ * Arguments:
+ * udev usb device structure
+ * wlandev wlan device structure
+ *
+ * Returns:
+ * 0 - success
+ * ~0 - failure
+ *----------------------------------------------------------------
+ */
static int prism2_fwtry(struct usb_device *udev, struct wlandevice *wlandev)
{
const struct firmware *fw_entry = NULL;
netdev_info(wlandev->netdev, "prism2_usb: Checking for firmware %s\n",
- PRISM2_USB_FWFILE);
+ PRISM2_USB_FWFILE);
if (request_ihex_firmware(&fw_entry,
PRISM2_USB_FWFILE, &udev->dev) != 0) {
netdev_info(wlandev->netdev,
- "prism2_usb: Firmware not available, but not essential\n");
+ "prism2_usb: Firmware not available, but not essential\n");
netdev_info(wlandev->netdev,
- "prism2_usb: can continue to use card anyway.\n");
+ "prism2_usb: can continue to use card anyway.\n");
return 1;
}
@@ -226,18 +229,19 @@ static int prism2_fwtry(struct usb_device *udev, struct wlandevice *wlandev)
}
/*----------------------------------------------------------------
-* prism2_fwapply
-*
-* Apply the firmware loaded into memory
-*
-* Arguments:
-* rfptr firmware image in kernel memory
-* wlandev device
-*
-* Returns:
-* 0 - success
-* ~0 - failure
-----------------------------------------------------------------*/
+ * prism2_fwapply
+ *
+ * Apply the firmware loaded into memory
+ *
+ * Arguments:
+ * rfptr firmware image in kernel memory
+ * wlandev device
+ *
+ * Returns:
+ * 0 - success
+ * ~0 - failure
+ *----------------------------------------------------------------
+ */
static int prism2_fwapply(const struct ihex_binrec *rfptr,
struct wlandevice *wlandev)
{
@@ -248,7 +252,12 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr,
/* Initialize the data structures */
ns3data = 0;
- memset(s3data, 0, sizeof(s3data));
+ s3data = kcalloc(S3DATA_MAX, sizeof(*s3data), GFP_KERNEL);
+ if (!s3data) {
+ result = -ENOMEM;
+ goto out;
+ }
+
ns3plug = 0;
memset(s3plug, 0, sizeof(s3plug));
ns3crc = 0;
@@ -372,24 +381,25 @@ out:
}
/*----------------------------------------------------------------
-* crcimage
-*
-* Adds a CRC16 in the two bytes prior to each block identified by
-* an S3 CRC record. Currently, we don't actually do a CRC we just
-* insert the value 0xC0DE in hfa384x order.
-*
-* Arguments:
-* fchunk Array of image chunks
-* nfchunks Number of image chunks
-* s3crc Array of crc records
-* ns3crc Number of crc records
-*
-* Returns:
-* 0 success
-* ~0 failure
-----------------------------------------------------------------*/
+ * crcimage
+ *
+ * Adds a CRC16 in the two bytes prior to each block identified by
+ * an S3 CRC record. Currently, we don't actually do a CRC we just
+ * insert the value 0xC0DE in hfa384x order.
+ *
+ * Arguments:
+ * fchunk Array of image chunks
+ * nfchunks Number of image chunks
+ * s3crc Array of crc records
+ * ns3crc Number of crc records
+ *
+ * Returns:
+ * 0 success
+ * ~0 failure
+ *----------------------------------------------------------------
+ */
static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks,
- struct s3crcrec *s3crc, unsigned int ns3crc)
+ struct s3crcrec *s3crc, unsigned int ns3crc)
{
int result = 0;
int i;
@@ -433,22 +443,22 @@ static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks,
dest = fchunk[c].data + chunkoff;
*dest = 0xde;
*(dest + 1) = 0xc0;
-
}
return result;
}
/*----------------------------------------------------------------
-* free_chunks
-*
-* Clears the chunklist data structures in preparation for a new file.
-*
-* Arguments:
-* none
-*
-* Returns:
-* nothing
-----------------------------------------------------------------*/
+ * free_chunks
+ *
+ * Clears the chunklist data structures in preparation for a new file.
+ *
+ * Arguments:
+ * none
+ *
+ * Returns:
+ * nothing
+ *----------------------------------------------------------------
+ */
static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks)
{
int i;
@@ -458,24 +468,24 @@ static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks)
*nfchunks = 0;
memset(fchunk, 0, sizeof(*fchunk));
-
}
/*----------------------------------------------------------------
-* free_srecs
-*
-* Clears the srec data structures in preparation for a new file.
-*
-* Arguments:
-* none
-*
-* Returns:
-* nothing
-----------------------------------------------------------------*/
+ * free_srecs
+ *
+ * Clears the srec data structures in preparation for a new file.
+ *
+ * Arguments:
+ * none
+ *
+ * Returns:
+ * nothing
+ *----------------------------------------------------------------
+ */
static void free_srecs(void)
{
ns3data = 0;
- memset(s3data, 0, sizeof(s3data));
+ kfree(s3data);
ns3plug = 0;
memset(s3plug, 0, sizeof(s3plug));
ns3crc = 0;
@@ -486,19 +496,20 @@ static void free_srecs(void)
}
/*----------------------------------------------------------------
-* mkimage
-*
-* Scans the currently loaded set of S records for data residing
-* in contiguous memory regions. Each contiguous region is then
-* made into a 'chunk'. This function assumes that we're building
-* a new chunk list. Assumes the s3data items are in sorted order.
-*
-* Arguments: none
-*
-* Returns:
-* 0 - success
-* ~0 - failure (probably an errno)
-----------------------------------------------------------------*/
+ * mkimage
+ *
+ * Scans the currently loaded set of S records for data residing
+ * in contiguous memory regions. Each contiguous region is then
+ * made into a 'chunk'. This function assumes that we're building
+ * a new chunk list. Assumes the s3data items are in sorted order.
+ *
+ * Arguments: none
+ *
+ * Returns:
+ * 0 - success
+ * ~0 - failure (probably an errno)
+ *----------------------------------------------------------------
+ */
static int mkimage(struct imgchunk *clist, unsigned int *ccnt)
{
int result = 0;
@@ -577,19 +588,20 @@ static int mkimage(struct imgchunk *clist, unsigned int *ccnt)
}
/*----------------------------------------------------------------
-* mkpdrlist
-*
-* Reads a raw PDA and builds an array of pdrec_t structures.
-*
-* Arguments:
-* pda buffer containing raw PDA bytes
-* pdrec ptr to an array of pdrec_t's. Will be filled on exit.
-* nrec ptr to a variable that will contain the count of PDRs
-*
-* Returns:
-* 0 - success
-* ~0 - failure (probably an errno)
-----------------------------------------------------------------*/
+ * mkpdrlist
+ *
+ * Reads a raw PDA and builds an array of pdrec_t structures.
+ *
+ * Arguments:
+ * pda buffer containing raw PDA bytes
+ * pdrec ptr to an array of pdrec_t's. Will be filled on exit.
+ * nrec ptr to a variable that will contain the count of PDRs
+ *
+ * Returns:
+ * 0 - success
+ * ~0 - failure (probably an errno)
+ *----------------------------------------------------------------
+ */
static int mkpdrlist(struct pda *pda)
{
u16 *pda16 = (u16 *)pda->buf;
@@ -599,7 +611,7 @@ static int mkpdrlist(struct pda *pda)
curroff = 0;
while (curroff < (HFA384x_PDA_LEN_MAX / 2 - 1) &&
le16_to_cpu(pda16[curroff + 1]) != HFA384x_PDR_END_OF_PDA) {
- pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&(pda16[curroff]);
+ pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&pda16[curroff];
if (le16_to_cpu(pda->rec[pda->nrec]->code) ==
HFA384x_PDR_NICID) {
@@ -631,37 +643,38 @@ static int mkpdrlist(struct pda *pda)
(pda->nrec)++;
curroff += le16_to_cpu(pda16[curroff]) + 1;
-
}
if (curroff >= (HFA384x_PDA_LEN_MAX / 2 - 1)) {
pr_err("no end record found or invalid lengths in PDR data, exiting. %x %d\n",
curroff, pda->nrec);
return 1;
}
- pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&(pda16[curroff]);
+ pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&pda16[curroff];
(pda->nrec)++;
return 0;
}
/*----------------------------------------------------------------
-* plugimage
-*
-* Plugs the given image using the given plug records from the given
-* PDA and filename.
-*
-* Arguments:
-* fchunk Array of image chunks
-* nfchunks Number of image chunks
-* s3plug Array of plug records
-* ns3plug Number of plug records
-* pda Current pda data
-*
-* Returns:
-* 0 success
-* ~0 failure
-----------------------------------------------------------------*/
+ * plugimage
+ *
+ * Plugs the given image using the given plug records from the given
+ * PDA and filename.
+ *
+ * Arguments:
+ * fchunk Array of image chunks
+ * nfchunks Number of image chunks
+ * s3plug Array of plug records
+ * ns3plug Number of plug records
+ * pda Current pda data
+ *
+ * Returns:
+ * 0 success
+ * ~0 failure
+ *----------------------------------------------------------------
+ */
static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
- struct s3plugrec *s3plug, unsigned int ns3plug, struct pda *pda)
+ struct s3plugrec *s3plug, unsigned int ns3plug,
+ struct pda *pda)
{
int result = 0;
int i; /* plug index */
@@ -741,31 +754,31 @@ static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
memset(dest, 0, s3plug[i].len);
strncpy(dest, PRISM2_USB_FWFILE, s3plug[i].len - 1);
} else { /* plug a PDR */
- memcpy(dest, &(pda->rec[j]->data), s3plug[i].len);
+ memcpy(dest, &pda->rec[j]->data, s3plug[i].len);
}
}
return result;
-
}
/*----------------------------------------------------------------
-* read_cardpda
-*
-* Sends the command for the driver to read the pda from the card
-* named in the device variable. Upon success, the card pda is
-* stored in the "cardpda" variables. Note that the pda structure
-* is considered 'well formed' after this function. That means
-* that the nrecs is valid, the rec array has been set up, and there's
-* a valid PDAEND record in the raw PDA data.
-*
-* Arguments:
-* pda pda structure
-* wlandev device
-*
-* Returns:
-* 0 - success
-* ~0 - failure (probably an errno)
-----------------------------------------------------------------*/
+ * read_cardpda
+ *
+ * Sends the command for the driver to read the pda from the card
+ * named in the device variable. Upon success, the card pda is
+ * stored in the "cardpda" variables. Note that the pda structure
+ * is considered 'well formed' after this function. That means
+ * that the nrecs is valid, the rec array has been set up, and there's
+ * a valid PDAEND record in the raw PDA data.
+ *
+ * Arguments:
+ * pda pda structure
+ * wlandev device
+ *
+ * Returns:
+ * 0 - success
+ * ~0 - failure (probably an errno)
+ *----------------------------------------------------------------
+ */
static int read_cardpda(struct pda *pda, struct wlandevice *wlandev)
{
int result = 0;
@@ -802,65 +815,66 @@ static int read_cardpda(struct pda *pda, struct wlandevice *wlandev)
}
/*----------------------------------------------------------------
-* read_fwfile
-*
-* Reads the given fw file which should have been compiled from an srec
-* file. Each record in the fw file will either be a plain data record,
-* a start address record, or other records used for plugging.
-*
-* Note that data records are expected to be sorted into
-* ascending address order in the fw file.
-*
-* Note also that the start address record, originally an S7 record in
-* the srec file, is expected in the fw file to be like a data record but
-* with a certain address to make it identifiable.
-*
-* Here's the SREC format that the fw should have come from:
-* S[37]nnaaaaaaaaddd...dddcc
-*
-* nn - number of bytes starting with the address field
-* aaaaaaaa - address in readable (or big endian) format
-* dd....dd - 0-245 data bytes (two chars per byte)
-* cc - checksum
-*
-* The S7 record's (there should be only one) address value gets
-* converted to an S3 record with address of 0xff400000, with the
-* start address being stored as a 4 byte data word. That address is
-* the start execution address used for RAM downloads.
-*
-* The S3 records have a collection of subformats indicated by the
-* value of aaaaaaaa:
-* 0xff000000 - Plug record, data field format:
-* xxxxxxxxaaaaaaaassssssss
-* x - PDR code number (little endian)
-* a - Address in load image to plug (little endian)
-* s - Length of plug data area (little endian)
-*
-* 0xff100000 - CRC16 generation record, data field format:
-* aaaaaaaassssssssbbbbbbbb
-* a - Start address for CRC calculation (little endian)
-* s - Length of data to calculate over (little endian)
-* b - Boolean, true=write crc, false=don't write
-*
-* 0xff200000 - Info record, data field format:
-* ssssttttdd..dd
-* s - Size in words (little endian)
-* t - Info type (little endian), see #defines and
-* struct s3inforec for details about types.
-* d - (s - 1) little endian words giving the contents of
-* the given info type.
-*
-* 0xff400000 - Start address record, data field format:
-* aaaaaaaa
-* a - Address in load image to plug (little endian)
-*
-* Arguments:
-* record firmware image (ihex record structure) in kernel memory
-*
-* Returns:
-* 0 - success
-* ~0 - failure (probably an errno)
-----------------------------------------------------------------*/
+ * read_fwfile
+ *
+ * Reads the given fw file which should have been compiled from an srec
+ * file. Each record in the fw file will either be a plain data record,
+ * a start address record, or other records used for plugging.
+ *
+ * Note that data records are expected to be sorted into
+ * ascending address order in the fw file.
+ *
+ * Note also that the start address record, originally an S7 record in
+ * the srec file, is expected in the fw file to be like a data record but
+ * with a certain address to make it identifiable.
+ *
+ * Here's the SREC format that the fw should have come from:
+ * S[37]nnaaaaaaaaddd...dddcc
+ *
+ * nn - number of bytes starting with the address field
+ * aaaaaaaa - address in readable (or big endian) format
+ * dd....dd - 0-245 data bytes (two chars per byte)
+ * cc - checksum
+ *
+ * The S7 record's (there should be only one) address value gets
+ * converted to an S3 record with address of 0xff400000, with the
+ * start address being stored as a 4 byte data word. That address is
+ * the start execution address used for RAM downloads.
+ *
+ * The S3 records have a collection of subformats indicated by the
+ * value of aaaaaaaa:
+ * 0xff000000 - Plug record, data field format:
+ * xxxxxxxxaaaaaaaassssssss
+ * x - PDR code number (little endian)
+ * a - Address in load image to plug (little endian)
+ * s - Length of plug data area (little endian)
+ *
+ * 0xff100000 - CRC16 generation record, data field format:
+ * aaaaaaaassssssssbbbbbbbb
+ * a - Start address for CRC calculation (little endian)
+ * s - Length of data to calculate over (little endian)
+ * b - Boolean, true=write crc, false=don't write
+ *
+ * 0xff200000 - Info record, data field format:
+ * ssssttttdd..dd
+ * s - Size in words (little endian)
+ * t - Info type (little endian), see #defines and
+ * struct s3inforec for details about types.
+ * d - (s - 1) little endian words giving the contents of
+ * the given info type.
+ *
+ * 0xff400000 - Start address record, data field format:
+ * aaaaaaaa
+ * a - Address in load image to plug (little endian)
+ *
+ * Arguments:
+ * record firmware image (ihex record structure) in kernel memory
+ *
+ * Returns:
+ * 0 - success
+ * ~0 - failure (probably an errno)
+ *----------------------------------------------------------------
+ */
static int read_fwfile(const struct ihex_binrec *record)
{
int i;
@@ -872,7 +886,6 @@ static int read_fwfile(const struct ihex_binrec *record)
pr_debug("Reading fw file ...\n");
while (record) {
-
rcnt++;
len = be16_to_cpu(record->len);
@@ -887,8 +900,8 @@ static int read_fwfile(const struct ihex_binrec *record)
case S3ADDR_START:
startaddr = *ptr32;
pr_debug(" S7 start addr, record=%d addr=0x%08x\n",
- rcnt,
- startaddr);
+ rcnt,
+ startaddr);
break;
case S3ADDR_PLUG:
s3plug[ns3plug].itemcode = *ptr32;
@@ -896,10 +909,10 @@ static int read_fwfile(const struct ihex_binrec *record)
s3plug[ns3plug].len = *(ptr32 + 2);
pr_debug(" S3 plugrec, record=%d itemcode=0x%08x addr=0x%08x len=%d\n",
- rcnt,
- s3plug[ns3plug].itemcode,
- s3plug[ns3plug].addr,
- s3plug[ns3plug].len);
+ rcnt,
+ s3plug[ns3plug].itemcode,
+ s3plug[ns3plug].addr,
+ s3plug[ns3plug].len);
ns3plug++;
if (ns3plug == S3PLUG_MAX) {
@@ -913,10 +926,10 @@ static int read_fwfile(const struct ihex_binrec *record)
s3crc[ns3crc].dowrite = *(ptr32 + 2);
pr_debug(" S3 crcrec, record=%d addr=0x%08x len=%d write=0x%08x\n",
- rcnt,
- s3crc[ns3crc].addr,
- s3crc[ns3crc].len,
- s3crc[ns3crc].dowrite);
+ rcnt,
+ s3crc[ns3crc].addr,
+ s3crc[ns3crc].len,
+ s3crc[ns3crc].dowrite);
ns3crc++;
if (ns3crc == S3CRC_MAX) {
pr_err("S3 crcrec limit reached - aborting\n");
@@ -928,16 +941,16 @@ static int read_fwfile(const struct ihex_binrec *record)
s3info[ns3info].type = *(ptr16 + 1);
pr_debug(" S3 inforec, record=%d len=0x%04x type=0x%04x\n",
- rcnt,
- s3info[ns3info].len,
- s3info[ns3info].type);
+ rcnt,
+ s3info[ns3info].len,
+ s3info[ns3info].type);
if (((s3info[ns3info].len - 1) * sizeof(u16)) >
sizeof(s3info[ns3info].info)) {
pr_err("S3 inforec length too long - aborting\n");
return 1;
}
- tmpinfo = (u16 *)&(s3info[ns3info].info.version);
+ tmpinfo = (u16 *)&s3info[ns3info].info.version;
pr_debug(" info=");
for (i = 0; i < s3info[ns3info].len - 1; i++) {
tmpinfo[i] = *(ptr16 + 2 + i);
@@ -968,22 +981,23 @@ static int read_fwfile(const struct ihex_binrec *record)
}
/*----------------------------------------------------------------
-* writeimage
-*
-* Takes the chunks, builds p80211 messages and sends them down
-* to the driver for writing to the card.
-*
-* Arguments:
-* wlandev device
-* fchunk Array of image chunks
-* nfchunks Number of image chunks
-*
-* Returns:
-* 0 success
-* ~0 failure
-----------------------------------------------------------------*/
+ * writeimage
+ *
+ * Takes the chunks, builds p80211 messages and sends them down
+ * to the driver for writing to the card.
+ *
+ * Arguments:
+ * wlandev device
+ * fchunk Array of image chunks
+ * nfchunks Number of image chunks
+ *
+ * Returns:
+ * 0 success
+ * ~0 failure
+ *----------------------------------------------------------------
+ */
static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk,
- unsigned int nfchunks)
+ unsigned int nfchunks)
{
int result = 0;
struct p80211msg_p2req_ramdl_state *rstmsg;
@@ -1099,7 +1113,6 @@ static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk,
result = 1;
goto free_result;
}
-
}
}
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 170de1c9eac4..c558ad656c49 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -1,61 +1,61 @@
/* src/prism2/driver/prism2mgmt.c
-*
-* Management request handler functions.
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* The functions in this file handle management requests sent from
-* user mode.
-*
-* Most of these functions have two separate blocks of code that are
-* conditional on whether this is a station or an AP. This is used
-* to separate out the STA and AP responses to these management primitives.
-* It's a choice (good, bad, indifferent?) to have the code in the same
-* place so it's clear that the same primitive is implemented in both
-* cases but has different behavior.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Management request handler functions.
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * The functions in this file handle management requests sent from
+ * user mode.
+ *
+ * Most of these functions have two separate blocks of code that are
+ * conditional on whether this is a station or an AP. This is used
+ * to separate out the STA and AP responses to these management primitives.
+ * It's a choice (good, bad, indifferent?) to have the code in the same
+ * place so it's clear that the same primitive is implemented in both
+ * cases but has different behavior.
+ *
+ * --------------------------------------------------------------------
+ */
#include <linux/if_arp.h>
#include <linux/module.h>
@@ -84,35 +84,36 @@
#include "prism2mgmt.h"
/* Converts 802.11 format rate specifications to prism2 */
-#define p80211rate_to_p2bit(n) ((((n)&~BIT(7)) == 2) ? BIT(0) : \
- (((n)&~BIT(7)) == 4) ? BIT(1) : \
- (((n)&~BIT(7)) == 11) ? BIT(2) : \
- (((n)&~BIT(7)) == 22) ? BIT(3) : 0)
+#define p80211rate_to_p2bit(n) ((((n) & ~BIT(7)) == 2) ? BIT(0) : \
+ (((n) & ~BIT(7)) == 4) ? BIT(1) : \
+ (((n) & ~BIT(7)) == 11) ? BIT(2) : \
+ (((n) & ~BIT(7)) == 22) ? BIT(3) : 0)
/*----------------------------------------------------------------
-* prism2mgmt_scan
-*
-* Initiate a scan for BSSs.
-*
-* This function corresponds to MLME-scan.request and part of
-* MLME-scan.confirm. As far as I can tell in the standard, there
-* are no restrictions on when a scan.request may be issued. We have
-* to handle in whatever state the driver/MAC happen to be.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-* interrupt
-----------------------------------------------------------------*/
+ * prism2mgmt_scan
+ *
+ * Initiate a scan for BSSs.
+ *
+ * This function corresponds to MLME-scan.request and part of
+ * MLME-scan.confirm. As far as I can tell in the standard, there
+ * are no restrictions on when a scan.request may be issued. We have
+ * to handle in whatever state the driver/MAC happen to be.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ * interrupt
+ *----------------------------------------------------------------
+ */
int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
{
int result = 0;
@@ -122,7 +123,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
int i, timeout;
int istmpenable = 0;
- struct hfa384x_HostScanRequest_data scanreq;
+ struct hfa384x_host_scan_request_data scanreq;
/* gatekeeper check */
if (HFA384x_FIRMWARE_VERSION(hw->ident_sta_fw.major,
@@ -184,7 +185,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
/* set up the txrate to be 2MBPS. Should be fastest basicrate... */
word = HFA384x_RATEBIT_2;
- scanreq.txRate = cpu_to_le16(word);
+ scanreq.tx_rate = cpu_to_le16(word);
/* set up the channel list */
word = 0;
@@ -196,7 +197,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
/* channel 1 is BIT 0 ... channel 14 is BIT 13 */
word |= (1 << (channel - 1));
}
- scanreq.channelList = cpu_to_le16(word);
+ scanreq.channel_list = cpu_to_le16(word);
/* set up the ssid, if present. */
scanreq.ssid.len = cpu_to_le16(msg->ssid.data.len);
@@ -292,7 +293,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
result = hfa384x_drvr_setconfig(hw,
HFA384x_RID_HOSTSCAN, &scanreq,
- sizeof(struct hfa384x_HostScanRequest_data));
+ sizeof(scanreq));
if (result) {
netdev_err(wlandev->netdev,
"setconfig(SCANREQUEST) failed. result=%d\n",
@@ -347,31 +348,32 @@ exit:
}
/*----------------------------------------------------------------
-* prism2mgmt_scan_results
-*
-* Retrieve the BSS description for one of the BSSs identified in
-* a scan.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-* interrupt
-----------------------------------------------------------------*/
+ * prism2mgmt_scan_results
+ *
+ * Retrieve the BSS description for one of the BSSs identified in
+ * a scan.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ * interrupt
+ *----------------------------------------------------------------
+ */
int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
{
int result = 0;
struct p80211msg_dot11req_scan_results *req;
struct hfa384x *hw = wlandev->priv;
- struct hfa384x_HScanResultSub *item = NULL;
+ struct hfa384x_hscan_result_sub *item = NULL;
int count;
@@ -425,8 +427,8 @@ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
#define REQBASICRATE(N) \
do { \
if ((count >= N) && DOT11_RATE5_ISBASIC_GET( \
- item->supprates[(N)-1])) { \
- req->basicrate ## N .data = item->supprates[(N)-1]; \
+ item->supprates[(N) - 1])) { \
+ req->basicrate ## N .data = item->supprates[(N) - 1]; \
req->basicrate ## N .status = \
P80211ENUM_msgitem_status_data_ok; \
} \
@@ -444,7 +446,7 @@ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
#define REQSUPPRATE(N) \
do { \
if (count >= N) { \
- req->supprate ## N .data = item->supprates[(N)-1]; \
+ req->supprate ## N .data = item->supprates[(N) - 1]; \
req->supprate ## N .status = \
P80211ENUM_msgitem_status_data_ok; \
} \
@@ -507,24 +509,25 @@ exit:
}
/*----------------------------------------------------------------
-* prism2mgmt_start
-*
-* Start a BSS. Any station can do this for IBSS, only AP for ESS.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-* interrupt
-----------------------------------------------------------------*/
+ * prism2mgmt_start
+ *
+ * Start a BSS. Any station can do this for IBSS, only AP for ESS.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ * interrupt
+ *----------------------------------------------------------------
+ */
int prism2mgmt_start(struct wlandevice *wlandev, void *msgp)
{
int result = 0;
@@ -580,7 +583,7 @@ int prism2mgmt_start(struct wlandevice *wlandev, void *msgp)
/* beacon period */
word = msg->beaconperiod.data;
- result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFAPBCNint, word);
+ result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFAPBCNINT, word);
if (result) {
netdev_err(wlandev->netdev,
"Failed to set beacon period=%d.\n", word);
@@ -689,23 +692,24 @@ done:
}
/*----------------------------------------------------------------
-* prism2mgmt_readpda
-*
-* Collect the PDA data and put it in the message.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-----------------------------------------------------------------*/
+ * prism2mgmt_readpda
+ *
+ * Collect the PDA data and put it in the message.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ *----------------------------------------------------------------
+ */
int prism2mgmt_readpda(struct wlandevice *wlandev, void *msgp)
{
struct hfa384x *hw = wlandev->priv;
@@ -748,30 +752,31 @@ int prism2mgmt_readpda(struct wlandevice *wlandev, void *msgp)
}
/*----------------------------------------------------------------
-* prism2mgmt_ramdl_state
-*
-* Establishes the beginning/end of a card RAM download session.
-*
-* It is expected that the ramdl_write() function will be called
-* one or more times between the 'enable' and 'disable' calls to
-* this function.
-*
-* Note: This function should not be called when a mac comm port
-* is active.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-----------------------------------------------------------------*/
+ * prism2mgmt_ramdl_state
+ *
+ * Establishes the beginning/end of a card RAM download session.
+ *
+ * It is expected that the ramdl_write() function will be called
+ * one or more times between the 'enable' and 'disable' calls to
+ * this function.
+ *
+ * Note: This function should not be called when a mac comm port
+ * is active.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ *----------------------------------------------------------------
+ */
int prism2mgmt_ramdl_state(struct wlandevice *wlandev, void *msgp)
{
struct hfa384x *hw = wlandev->priv;
@@ -808,25 +813,26 @@ int prism2mgmt_ramdl_state(struct wlandevice *wlandev, void *msgp)
}
/*----------------------------------------------------------------
-* prism2mgmt_ramdl_write
-*
-* Writes a buffer to the card RAM using the download state. This
-* is for writing code to card RAM. To just read or write raw data
-* use the aux functions.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-----------------------------------------------------------------*/
+ * prism2mgmt_ramdl_write
+ *
+ * Writes a buffer to the card RAM using the download state. This
+ * is for writing code to card RAM. To just read or write raw data
+ * use the aux functions.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ *----------------------------------------------------------------
+ */
int prism2mgmt_ramdl_write(struct wlandevice *wlandev, void *msgp)
{
struct hfa384x *hw = wlandev->priv;
@@ -864,30 +870,31 @@ int prism2mgmt_ramdl_write(struct wlandevice *wlandev, void *msgp)
}
/*----------------------------------------------------------------
-* prism2mgmt_flashdl_state
-*
-* Establishes the beginning/end of a card Flash download session.
-*
-* It is expected that the flashdl_write() function will be called
-* one or more times between the 'enable' and 'disable' calls to
-* this function.
-*
-* Note: This function should not be called when a mac comm port
-* is active.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-----------------------------------------------------------------*/
+ * prism2mgmt_flashdl_state
+ *
+ * Establishes the beginning/end of a card Flash download session.
+ *
+ * It is expected that the flashdl_write() function will be called
+ * one or more times between the 'enable' and 'disable' calls to
+ * this function.
+ *
+ * Note: This function should not be called when a mac comm port
+ * is active.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ *----------------------------------------------------------------
+ */
int prism2mgmt_flashdl_state(struct wlandevice *wlandev, void *msgp)
{
int result = 0;
@@ -942,23 +949,24 @@ int prism2mgmt_flashdl_state(struct wlandevice *wlandev, void *msgp)
}
/*----------------------------------------------------------------
-* prism2mgmt_flashdl_write
-*
-*
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-----------------------------------------------------------------*/
+ * prism2mgmt_flashdl_write
+ *
+ *
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ *----------------------------------------------------------------
+ */
int prism2mgmt_flashdl_write(struct wlandevice *wlandev, void *msgp)
{
struct hfa384x *hw = wlandev->priv;
@@ -1001,24 +1009,25 @@ int prism2mgmt_flashdl_write(struct wlandevice *wlandev, void *msgp)
}
/*----------------------------------------------------------------
-* prism2mgmt_autojoin
-*
-* Associate with an ESS.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-* interrupt
-----------------------------------------------------------------*/
+ * prism2mgmt_autojoin
+ *
+ * Associate with an ESS.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ * interrupt
+ *----------------------------------------------------------------
+ */
int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp)
{
struct hfa384x *hw = wlandev->priv;
@@ -1072,24 +1081,25 @@ int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp)
}
/*----------------------------------------------------------------
-* prism2mgmt_wlansniff
-*
-* Start or stop sniffing.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-* interrupt
-----------------------------------------------------------------*/
+ * prism2mgmt_wlansniff
+ *
+ * Start or stop sniffing.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ * interrupt
+ *----------------------------------------------------------------
+ */
int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
{
int result = 0;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h
index cc1ac7a60dfe..88b979ff68b3 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.h
+++ b/drivers/staging/wlan-ng/prism2mgmt.h
@@ -1,61 +1,61 @@
/* prism2mgmt.h
-*
-* Declares the mgmt command handler functions
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file contains the constants and data structures for interaction
-* with the hfa384x Wireless LAN (WLAN) Media Access Controller (MAC).
-* The hfa384x is a portion of the Harris PRISM(tm) WLAN chipset.
-*
-* [Implementation and usage notes]
-*
-* [References]
-* CW10 Programmer's Manual v1.5
-* IEEE 802.11 D10.0
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Declares the mgmt command handler functions
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file contains the constants and data structures for interaction
+ * with the hfa384x Wireless LAN (WLAN) Media Access Controller (MAC).
+ * The hfa384x is a portion of the Harris PRISM(tm) WLAN chipset.
+ *
+ * [Implementation and usage notes]
+ *
+ * [References]
+ * CW10 Programmer's Manual v1.5
+ * IEEE 802.11 D10.0
+ *
+ * --------------------------------------------------------------------
+ */
#ifndef _PRISM2MGMT_H
#define _PRISM2MGMT_H
@@ -65,7 +65,8 @@ extern int prism2_reset_settletime;
u32 prism2sta_ifstate(struct wlandevice *wlandev, u32 ifstate);
-void prism2sta_ev_info(struct wlandevice *wlandev, struct hfa384x_InfFrame *inf);
+void prism2sta_ev_info(struct wlandevice *wlandev,
+ struct hfa384x_inf_frame *inf);
void prism2sta_ev_txexc(struct wlandevice *wlandev, u16 status);
void prism2sta_ev_tx(struct wlandevice *wlandev, u16 status);
void prism2sta_ev_alloc(struct wlandevice *wlandev);
@@ -83,9 +84,11 @@ int prism2mgmt_flashdl_write(struct wlandevice *wlandev, void *msgp);
int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp);
/*---------------------------------------------------------------
-* conversion functions going between wlan message data types and
-* Prism2 data types
----------------------------------------------------------------*/
+ * conversion functions going between wlan message data types and
+ * Prism2 data types
+ *---------------------------------------------------------------
+ */
+
/* byte area conversion functions*/
void prism2mgmt_bytearea2pstr(u8 *bytearea, struct p80211pstrd *pstr, int len);
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index 63ab6bc88654..8ea6a647d037 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -1,54 +1,54 @@
/* src/prism2/driver/prism2mib.c
-*
-* Management request for mibset/mibget
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* The functions in this file handle the mibset/mibget management
-* functions.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Management request for mibset/mibget
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * The functions in this file handle the mibset/mibget management
+ * functions.
+ *
+ * --------------------------------------------------------------------
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -709,7 +709,7 @@ static int prism2mib_priv(struct mibrec *mib,
switch (mib->did) {
case DIDmib_lnx_lnxConfigTable_lnxRSNAIE:{
- struct hfa384x_WPAData wpa;
+ struct hfa384x_wpa_data wpa;
if (isget) {
hfa384x_drvr_getconfig(hw,
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index e1b4a94292ff..984804b92e05 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -104,32 +104,33 @@ static void prism2sta_reset(struct wlandevice *wlandev);
static int prism2sta_txframe(struct wlandevice *wlandev, struct sk_buff *skb,
union p80211_hdr *p80211_hdr,
struct p80211_metawep *p80211_wep);
-static int prism2sta_mlmerequest(struct wlandevice *wlandev, struct p80211msg *msg);
+static int prism2sta_mlmerequest(struct wlandevice *wlandev,
+ struct p80211msg *msg);
static int prism2sta_getcardinfo(struct wlandevice *wlandev);
static int prism2sta_globalsetup(struct wlandevice *wlandev);
static int prism2sta_setmulticast(struct wlandevice *wlandev,
struct net_device *dev);
static void prism2sta_inf_handover(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
static void prism2sta_inf_tallies(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
static void prism2sta_inf_chinforesults(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
static void prism2sta_inf_linkstatus(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
static void prism2sta_inf_assocstatus(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
static void prism2sta_inf_authreq(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
static void prism2sta_inf_psusercnt(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
/*
* prism2sta_open
@@ -278,7 +279,8 @@ static int prism2sta_txframe(struct wlandevice *wlandev, struct sk_buff *skb,
* Call context:
* process thread
*/
-static int prism2sta_mlmerequest(struct wlandevice *wlandev, struct p80211msg *msg)
+static int prism2sta_mlmerequest(struct wlandevice *wlandev,
+ struct p80211msg *msg)
{
struct hfa384x *hw = wlandev->priv;
@@ -370,9 +372,10 @@ static int prism2sta_mlmerequest(struct wlandevice *wlandev, struct p80211msg *m
qualmsg->noise.status =
P80211ENUM_msgitem_status_data_ok;
- qualmsg->link.data = le16_to_cpu(hw->qual.CQ_currBSS);
- qualmsg->level.data = le16_to_cpu(hw->qual.ASL_currBSS);
- qualmsg->noise.data = le16_to_cpu(hw->qual.ANL_currFC);
+ qualmsg->link.data = le16_to_cpu(hw->qual.cq_curr_bss);
+ qualmsg->level.data =
+ le16_to_cpu(hw->qual.asl_curr_bss);
+ qualmsg->noise.data = le16_to_cpu(hw->qual.anl_curr_fc);
qualmsg->txrate.data = hw->txrate;
break;
@@ -606,8 +609,8 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
hw->ident_nic.minor = le16_to_cpu(hw->ident_nic.minor);
netdev_info(wlandev->netdev, "ident: nic h/w: id=0x%02x %d.%d.%d\n",
- hw->ident_nic.id, hw->ident_nic.major,
- hw->ident_nic.minor, hw->ident_nic.variant);
+ hw->ident_nic.id, hw->ident_nic.major,
+ hw->ident_nic.minor, hw->ident_nic.variant);
/* Primary f/w identity */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRIIDENTITY,
@@ -625,8 +628,8 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
hw->ident_pri_fw.minor = le16_to_cpu(hw->ident_pri_fw.minor);
netdev_info(wlandev->netdev, "ident: pri f/w: id=0x%02x %d.%d.%d\n",
- hw->ident_pri_fw.id, hw->ident_pri_fw.major,
- hw->ident_pri_fw.minor, hw->ident_pri_fw.variant);
+ hw->ident_pri_fw.id, hw->ident_pri_fw.major,
+ hw->ident_pri_fw.minor, hw->ident_pri_fw.variant);
/* Station (Secondary?) f/w identity */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STAIDENTITY,
@@ -639,7 +642,7 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
if (hw->ident_nic.id < 0x8000) {
netdev_err(wlandev->netdev,
- "FATAL: Card is not an Intersil Prism2/2.5/3\n");
+ "FATAL: Card is not an Intersil Prism2/2.5/3\n");
result = -1;
goto failed;
}
@@ -651,19 +654,19 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
hw->ident_sta_fw.minor = le16_to_cpu(hw->ident_sta_fw.minor);
/* strip out the 'special' variant bits */
- hw->mm_mods = hw->ident_sta_fw.variant & (BIT(14) | BIT(15));
- hw->ident_sta_fw.variant &= ~((u16)(BIT(14) | BIT(15)));
+ hw->mm_mods = hw->ident_sta_fw.variant & GENMASK(15, 14);
+ hw->ident_sta_fw.variant &= ~((u16)GENMASK(15, 14));
if (hw->ident_sta_fw.id == 0x1f) {
netdev_info(wlandev->netdev,
- "ident: sta f/w: id=0x%02x %d.%d.%d\n",
- hw->ident_sta_fw.id, hw->ident_sta_fw.major,
- hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
+ "ident: sta f/w: id=0x%02x %d.%d.%d\n",
+ hw->ident_sta_fw.id, hw->ident_sta_fw.major,
+ hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
} else {
netdev_info(wlandev->netdev,
- "ident: ap f/w: id=0x%02x %d.%d.%d\n",
- hw->ident_sta_fw.id, hw->ident_sta_fw.major,
- hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
+ "ident: ap f/w: id=0x%02x %d.%d.%d\n",
+ hw->ident_sta_fw.id, hw->ident_sta_fw.major,
+ hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
netdev_err(wlandev->netdev, "Unsupported Tertiary AP firmware loaded!\n");
goto failed;
}
@@ -687,10 +690,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
hw->cap_sup_mfi.top = le16_to_cpu(hw->cap_sup_mfi.top);
netdev_info(wlandev->netdev,
- "MFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
- hw->cap_sup_mfi.role, hw->cap_sup_mfi.id,
- hw->cap_sup_mfi.variant, hw->cap_sup_mfi.bottom,
- hw->cap_sup_mfi.top);
+ "MFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+ hw->cap_sup_mfi.role, hw->cap_sup_mfi.id,
+ hw->cap_sup_mfi.variant, hw->cap_sup_mfi.bottom,
+ hw->cap_sup_mfi.top);
/* Compatibility range, Controller supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CFISUPRANGE,
@@ -711,10 +714,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
hw->cap_sup_cfi.top = le16_to_cpu(hw->cap_sup_cfi.top);
netdev_info(wlandev->netdev,
- "CFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
- hw->cap_sup_cfi.role, hw->cap_sup_cfi.id,
- hw->cap_sup_cfi.variant, hw->cap_sup_cfi.bottom,
- hw->cap_sup_cfi.top);
+ "CFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+ hw->cap_sup_cfi.role, hw->cap_sup_cfi.id,
+ hw->cap_sup_cfi.variant, hw->cap_sup_cfi.bottom,
+ hw->cap_sup_cfi.top);
/* Compatibility range, Primary f/w supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRISUPRANGE,
@@ -735,10 +738,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
hw->cap_sup_pri.top = le16_to_cpu(hw->cap_sup_pri.top);
netdev_info(wlandev->netdev,
- "PRI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
- hw->cap_sup_pri.role, hw->cap_sup_pri.id,
- hw->cap_sup_pri.variant, hw->cap_sup_pri.bottom,
- hw->cap_sup_pri.top);
+ "PRI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+ hw->cap_sup_pri.role, hw->cap_sup_pri.id,
+ hw->cap_sup_pri.variant, hw->cap_sup_pri.bottom,
+ hw->cap_sup_pri.top);
/* Compatibility range, Station f/w supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STASUPRANGE,
@@ -791,10 +794,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
hw->cap_act_pri_cfi.top = le16_to_cpu(hw->cap_act_pri_cfi.top);
netdev_info(wlandev->netdev,
- "PRI-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
- hw->cap_act_pri_cfi.role, hw->cap_act_pri_cfi.id,
- hw->cap_act_pri_cfi.variant, hw->cap_act_pri_cfi.bottom,
- hw->cap_act_pri_cfi.top);
+ "PRI-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+ hw->cap_act_pri_cfi.role, hw->cap_act_pri_cfi.id,
+ hw->cap_act_pri_cfi.variant, hw->cap_act_pri_cfi.bottom,
+ hw->cap_act_pri_cfi.top);
/* Compatibility range, sta f/w actor, CFI supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STA_CFIACTRANGES,
@@ -815,10 +818,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
hw->cap_act_sta_cfi.top = le16_to_cpu(hw->cap_act_sta_cfi.top);
netdev_info(wlandev->netdev,
- "STA-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
- hw->cap_act_sta_cfi.role, hw->cap_act_sta_cfi.id,
- hw->cap_act_sta_cfi.variant, hw->cap_act_sta_cfi.bottom,
- hw->cap_act_sta_cfi.top);
+ "STA-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+ hw->cap_act_sta_cfi.role, hw->cap_act_sta_cfi.id,
+ hw->cap_act_sta_cfi.variant, hw->cap_act_sta_cfi.bottom,
+ hw->cap_act_sta_cfi.top);
/* Compatibility range, sta f/w actor, MFI supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STA_MFIACTRANGES,
@@ -839,10 +842,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
hw->cap_act_sta_mfi.top = le16_to_cpu(hw->cap_act_sta_mfi.top);
netdev_info(wlandev->netdev,
- "STA-MFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
- hw->cap_act_sta_mfi.role, hw->cap_act_sta_mfi.id,
- hw->cap_act_sta_mfi.variant, hw->cap_act_sta_mfi.bottom,
- hw->cap_act_sta_mfi.top);
+ "STA-MFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+ hw->cap_act_sta_mfi.role, hw->cap_act_sta_mfi.id,
+ hw->cap_act_sta_mfi.variant, hw->cap_act_sta_mfi.bottom,
+ hw->cap_act_sta_mfi.top);
/* Serial Number */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_NICSERIALNUMBER,
@@ -920,7 +923,7 @@ static int prism2sta_globalsetup(struct wlandevice *wlandev)
}
static int prism2sta_setmulticast(struct wlandevice *wlandev,
- struct net_device *dev)
+ struct net_device *dev)
{
int result = 0;
struct hfa384x *hw = wlandev->priv;
@@ -962,7 +965,7 @@ exit:
* interrupt
*/
static void prism2sta_inf_handover(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
pr_debug("received infoframe:HANDOVER (unhandled)\n");
}
@@ -985,7 +988,7 @@ static void prism2sta_inf_handover(struct wlandevice *wlandev,
* interrupt
*/
static void prism2sta_inf_tallies(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
struct hfa384x *hw = wlandev->priv;
u16 *src16;
@@ -999,7 +1002,7 @@ static void prism2sta_inf_tallies(struct wlandevice *wlandev,
* record length of the info record.
*/
- cnt = sizeof(struct hfa384x_CommTallies32) / sizeof(u32);
+ cnt = sizeof(struct hfa384x_comm_tallies_32) / sizeof(u32);
if (inf->framelen > 22) {
dst = (u32 *)&hw->tallies;
src32 = (u32 *)&inf->info.commtallies32;
@@ -1031,19 +1034,19 @@ static void prism2sta_inf_tallies(struct wlandevice *wlandev,
* interrupt
*/
static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
struct hfa384x *hw = wlandev->priv;
int nbss;
- struct hfa384x_ScanResult *sr = &(inf->info.scanresult);
+ struct hfa384x_scan_result *sr = &inf->info.scanresult;
int i;
- struct hfa384x_JoinRequest_data joinreq;
+ struct hfa384x_join_request_data joinreq;
int result;
/* Get the number of results, first in bytes, then in results */
nbss = (inf->framelen * sizeof(u16)) -
sizeof(inf->infotype) - sizeof(inf->info.scanresult.scanreason);
- nbss /= sizeof(struct hfa384x_ScanResultSub);
+ nbss /= sizeof(struct hfa384x_scan_result_sub);
/* Print em */
pr_debug("rx scanresults, reason=%d, nbss=%d:\n",
@@ -1064,7 +1067,7 @@ static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
&joinreq, HFA384x_RID_JOINREQUEST_LEN);
if (result) {
netdev_err(wlandev->netdev, "setconfig(joinreq) failed, result=%d\n",
- result);
+ result);
}
}
@@ -1086,7 +1089,7 @@ static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
* interrupt
*/
static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
struct hfa384x *hw = wlandev->priv;
int nbss;
@@ -1099,7 +1102,7 @@ static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
kfree(hw->scanresults);
- hw->scanresults = kmemdup(inf, sizeof(struct hfa384x_InfFrame), GFP_ATOMIC);
+ hw->scanresults = kmemdup(inf, sizeof(*inf), GFP_ATOMIC);
if (nbss == 0)
nbss = -1;
@@ -1127,7 +1130,7 @@ static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
* interrupt
*/
static void prism2sta_inf_chinforesults(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
struct hfa384x *hw = wlandev->priv;
unsigned int i, n;
@@ -1136,8 +1139,8 @@ static void prism2sta_inf_chinforesults(struct wlandevice *wlandev,
le16_to_cpu(inf->info.chinforesult.scanchannels);
for (i = 0, n = 0; i < HFA384x_CHINFORESULT_MAX; i++) {
- struct hfa384x_ChInfoResultSub *result;
- struct hfa384x_ChInfoResultSub *chinforesult;
+ struct hfa384x_ch_info_result_sub *result;
+ struct hfa384x_ch_info_result_sub *chinforesult;
int chan;
if (!(hw->channel_info.results.scanchannels & (1 << i)))
@@ -1179,10 +1182,10 @@ void prism2sta_processing_defer(struct work_struct *data)
/* First let's process the auth frames */
{
struct sk_buff *skb;
- struct hfa384x_InfFrame *inf;
+ struct hfa384x_inf_frame *inf;
while ((skb = skb_dequeue(&hw->authq))) {
- inf = (struct hfa384x_InfFrame *)skb->data;
+ inf = (struct hfa384x_inf_frame *)skb->data;
prism2sta_inf_authreq_defer(wlandev, inf);
}
@@ -1294,7 +1297,7 @@ void prism2sta_processing_defer(struct work_struct *data)
*/
if (wlandev->netdev->type == ARPHRD_ETHER)
netdev_info(wlandev->netdev,
- "linkstatus=DISCONNECTED (unhandled)\n");
+ "linkstatus=DISCONNECTED (unhandled)\n");
wlandev->macmode = WLAN_MACMODE_NONE;
netif_carrier_off(wlandev->netdev);
@@ -1391,7 +1394,7 @@ void prism2sta_processing_defer(struct work_struct *data)
* Disable Transmits, Ignore receives of data frames
*/
if (hw->join_ap && --hw->join_retries > 0) {
- struct hfa384x_JoinRequest_data joinreq;
+ struct hfa384x_join_request_data joinreq;
joinreq = hw->joinreq;
/* Send the join request */
@@ -1415,7 +1418,7 @@ void prism2sta_processing_defer(struct work_struct *data)
default:
/* This is bad, IO port problems? */
netdev_warn(wlandev->netdev,
- "unknown linkstatus=0x%02x\n", hw->link_status);
+ "unknown linkstatus=0x%02x\n", hw->link_status);
return;
}
@@ -1440,7 +1443,7 @@ void prism2sta_processing_defer(struct work_struct *data)
* interrupt
*/
static void prism2sta_inf_linkstatus(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
struct hfa384x *hw = wlandev->priv;
@@ -1468,10 +1471,10 @@ static void prism2sta_inf_linkstatus(struct wlandevice *wlandev,
* interrupt
*/
static void prism2sta_inf_assocstatus(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
struct hfa384x *hw = wlandev->priv;
- struct hfa384x_AssocStatus rec;
+ struct hfa384x_assoc_status rec;
int i;
memcpy(&rec, &inf->info.assocstatus, sizeof(rec));
@@ -1529,7 +1532,7 @@ static void prism2sta_inf_assocstatus(struct wlandevice *wlandev,
*
*/
static void prism2sta_inf_authreq(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
struct hfa384x *hw = wlandev->priv;
struct sk_buff *skb;
@@ -1544,10 +1547,10 @@ static void prism2sta_inf_authreq(struct wlandevice *wlandev,
}
static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
struct hfa384x *hw = wlandev->priv;
- struct hfa384x_authenticateStation_data rec;
+ struct hfa384x_authenticate_station_data rec;
int i, added, result, cnt;
u8 *addr;
@@ -1718,7 +1721,7 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
* interrupt
*/
static void prism2sta_inf_psusercnt(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
struct hfa384x *hw = wlandev->priv;
@@ -1742,7 +1745,8 @@ static void prism2sta_inf_psusercnt(struct wlandevice *wlandev,
* Call context:
* interrupt
*/
-void prism2sta_ev_info(struct wlandevice *wlandev, struct hfa384x_InfFrame *inf)
+void prism2sta_ev_info(struct wlandevice *wlandev,
+ struct hfa384x_inf_frame *inf)
{
inf->infotype = le16_to_cpu(inf->infotype);
/* Dispatch */
@@ -1785,7 +1789,7 @@ void prism2sta_ev_info(struct wlandevice *wlandev, struct hfa384x_InfFrame *inf)
break;
default:
netdev_warn(wlandev->netdev,
- "Unknown info type=0x%02x\n", inf->infotype);
+ "Unknown info type=0x%02x\n", inf->infotype);
break;
}
}
@@ -1859,32 +1863,32 @@ void prism2sta_ev_alloc(struct wlandevice *wlandev)
}
/*
-* create_wlan
-*
-* Called at module init time. This creates the struct wlandevice structure
-* and initializes it with relevant bits.
-*
-* Arguments:
-* none
-*
-* Returns:
-* the created struct wlandevice structure.
-*
-* Side effects:
-* also allocates the priv/hw structures.
-*
-* Call context:
-* process thread
-*
-*/
+ * create_wlan
+ *
+ * Called at module init time. This creates the struct wlandevice structure
+ * and initializes it with relevant bits.
+ *
+ * Arguments:
+ * none
+ *
+ * Returns:
+ * the created struct wlandevice structure.
+ *
+ * Side effects:
+ * also allocates the priv/hw structures.
+ *
+ * Call context:
+ * process thread
+ *
+ */
static struct wlandevice *create_wlan(void)
{
struct wlandevice *wlandev = NULL;
struct hfa384x *hw = NULL;
/* Alloc our structures */
- wlandev = kzalloc(sizeof(struct wlandevice), GFP_KERNEL);
- hw = kzalloc(sizeof(struct hfa384x), GFP_KERNEL);
+ wlandev = kzalloc(sizeof(*wlandev), GFP_KERNEL);
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (!wlandev || !hw) {
kfree(wlandev);
@@ -1943,9 +1947,9 @@ void prism2sta_commsqual_defer(struct work_struct *data)
}
pr_debug("commsqual %d %d %d\n",
- le16_to_cpu(hw->qual.CQ_currBSS),
- le16_to_cpu(hw->qual.ASL_currBSS),
- le16_to_cpu(hw->qual.ANL_currFC));
+ le16_to_cpu(hw->qual.cq_curr_bss),
+ le16_to_cpu(hw->qual.asl_curr_bss),
+ le16_to_cpu(hw->qual.anl_curr_fc));
}
/* Get the signal rate */
diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h
index 85079fea7152..7a80a90f229f 100644
--- a/drivers/staging/xgifb/XGI_main.h
+++ b/drivers/staging/xgifb/XGI_main.h
@@ -139,7 +139,7 @@ static const struct _XGIbios_mode {
static const unsigned short XGI310paneltype[] = {
LCD_UNKNOWN, LCD_800x600, LCD_1024x768, LCD_1280x1024,
- LCD_640x480, LCD_1024x600, LCD_1152x864, LCD_1280x960,
+ LCD_640x480, LCD_1024x600, LCD_1152x864, LCD_1280x960,
LCD_1152x768, LCD_1400x1050, LCD_1280x768, LCD_1600x1200,
LCD_1024x768, LCD_1024x768, LCD_1024x768};
@@ -174,7 +174,7 @@ static const struct _XGI_tvtype {
{"NTSC", 2},
{"pal", 1},
{"ntsc", 2},
- {"\0", -1}
+ {"\0", -1}
};
static const struct _XGI_vrate {
@@ -183,44 +183,44 @@ static const struct _XGI_vrate {
u16 yres;
u16 refresh;
} XGIfb_vrate[] = {
- {1, 640, 480, 60}, {2, 640, 480, 72},
- {3, 640, 480, 75}, {4, 640, 480, 85},
+ {1, 640, 480, 60}, {2, 640, 480, 72},
+ {3, 640, 480, 75}, {4, 640, 480, 85},
{5, 640, 480, 100}, {6, 640, 480, 120},
- {7, 640, 480, 160}, {8, 640, 480, 200},
+ {7, 640, 480, 160}, {8, 640, 480, 200},
- {1, 720, 480, 60},
- {1, 720, 576, 58},
- {1, 800, 480, 60}, {2, 800, 480, 75}, {3, 800, 480, 85},
- {1, 800, 600, 60}, {2, 800, 600, 72}, {3, 800, 600, 75},
- {4, 800, 600, 85}, {5, 800, 600, 100},
- {6, 800, 600, 120}, {7, 800, 600, 160},
+ {1, 720, 480, 60},
+ {1, 720, 576, 58},
+ {1, 800, 480, 60}, {2, 800, 480, 75}, {3, 800, 480, 85},
+ {1, 800, 600, 60}, {2, 800, 600, 72}, {3, 800, 600, 75},
+ {4, 800, 600, 85}, {5, 800, 600, 100},
+ {6, 800, 600, 120}, {7, 800, 600, 160},
- {1, 1024, 768, 60}, {2, 1024, 768, 70}, {3, 1024, 768, 75},
- {4, 1024, 768, 85}, {5, 1024, 768, 100}, {6, 1024, 768, 120},
- {1, 1024, 576, 60}, {2, 1024, 576, 75}, {3, 1024, 576, 85},
- {1, 1024, 600, 60},
- {1, 1152, 768, 60},
- {1, 1280, 720, 60}, {2, 1280, 720, 75}, {3, 1280, 720, 85},
- {1, 1280, 768, 60},
+ {1, 1024, 768, 60}, {2, 1024, 768, 70}, {3, 1024, 768, 75},
+ {4, 1024, 768, 85}, {5, 1024, 768, 100}, {6, 1024, 768, 120},
+ {1, 1024, 576, 60}, {2, 1024, 576, 75}, {3, 1024, 576, 85},
+ {1, 1024, 600, 60},
+ {1, 1152, 768, 60},
+ {1, 1280, 720, 60}, {2, 1280, 720, 75}, {3, 1280, 720, 85},
+ {1, 1280, 768, 60},
{1, 1280, 1024, 60}, {2, 1280, 1024, 75}, {3, 1280, 1024, 85},
- {1, 1280, 960, 70},
- {1, 1400, 1050, 60},
- {1, 1600, 1200, 60}, {2, 1600, 1200, 65},
+ {1, 1280, 960, 70},
+ {1, 1400, 1050, 60},
+ {1, 1600, 1200, 60}, {2, 1600, 1200, 65},
{3, 1600, 1200, 70}, {4, 1600, 1200, 75},
- {5, 1600, 1200, 85}, {6, 1600, 1200, 100},
+ {5, 1600, 1200, 85}, {6, 1600, 1200, 100},
{7, 1600, 1200, 120},
- {1, 1920, 1440, 60}, {2, 1920, 1440, 65},
+ {1, 1920, 1440, 60}, {2, 1920, 1440, 65},
{3, 1920, 1440, 70}, {4, 1920, 1440, 75},
- {5, 1920, 1440, 85}, {6, 1920, 1440, 100},
- {1, 2048, 1536, 60}, {2, 2048, 1536, 65},
+ {5, 1920, 1440, 85}, {6, 1920, 1440, 100},
+ {1, 2048, 1536, 60}, {2, 2048, 1536, 65},
{3, 2048, 1536, 70}, {4, 2048, 1536, 75},
- {5, 2048, 1536, 85},
- {0, 0, 0, 0}
+ {5, 2048, 1536, 85},
+ {0, 0, 0, 0}
};
static const struct _XGI_TV_filter {
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 0c78491ff5a1..777cd6e11694 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -56,8 +56,8 @@ static inline void dumpVGAReg(struct xgifb_video_info *xgifb_info)
/* --------------- Hardware Access Routines -------------------------- */
static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned char modeno)
+ struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned char modeno)
{
unsigned short ModeNo = modeno;
unsigned short ModeIdIndex = 0, ClockIndex = 0;
@@ -68,7 +68,7 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
XGI_SearchModeID(ModeNo, &ModeIdIndex);
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
- ModeIdIndex, XGI_Pr);
+ ModeIdIndex, XGI_Pr);
ClockIndex = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
@@ -76,11 +76,11 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
}
static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned char modeno,
- u32 *left_margin, u32 *right_margin, u32 *upper_margin,
- u32 *lower_margin, u32 *hsync_len, u32 *vsync_len, u32 *sync,
- u32 *vmode)
+ struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned char modeno, u32 *left_margin,
+ u32 *right_margin, u32 *upper_margin,
+ u32 *lower_margin, u32 *hsync_len,
+ u32 *vsync_len, u32 *sync, u32 *vmode)
{
unsigned short ModeNo = modeno;
unsigned short ModeIdIndex, index = 0;
@@ -95,7 +95,7 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
if (!XGI_SearchModeID(ModeNo, &ModeIdIndex))
return 0;
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
- ModeIdIndex, XGI_Pr);
+ ModeIdIndex, XGI_Pr);
index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
sr_data = XGI_CRT1Table[index].CR[5];
@@ -105,7 +105,7 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
cr_data = XGI_CRT1Table[index].CR[3];
/* Horizontal retrace (=sync) start */
- HRS = (cr_data & 0xff) | ((unsigned short) (sr_data & 0xC0) << 2);
+ HRS = (cr_data & 0xff) | ((unsigned short)(sr_data & 0xC0) << 2);
F = HRS - HDE - 3;
sr_data = XGI_CRT1Table[index].CR[6];
@@ -115,8 +115,8 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
cr_data2 = XGI_CRT1Table[index].CR[4];
/* Horizontal blank end */
- HBE = (cr_data & 0x1f) | ((unsigned short) (cr_data2 & 0x80) >> 2)
- | ((unsigned short) (sr_data & 0x03) << 6);
+ HBE = (cr_data & 0x1f) | ((unsigned short)(cr_data2 & 0x80) >> 2)
+ | ((unsigned short)(sr_data & 0x03) << 6);
/* Horizontal retrace (=sync) end */
HRE = (cr_data2 & 0x1f) | ((sr_data & 0x04) << 3);
@@ -142,15 +142,15 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
cr_data = XGI_CRT1Table[index].CR[10];
/* Vertical retrace (=sync) start */
- VRS = (cr_data & 0xff) | ((unsigned short) (cr_data2 & 0x04) << 6)
- | ((unsigned short) (cr_data2 & 0x80) << 2)
- | ((unsigned short) (sr_data & 0x08) << 7);
+ VRS = (cr_data & 0xff) | ((unsigned short)(cr_data2 & 0x04) << 6)
+ | ((unsigned short)(cr_data2 & 0x80) << 2)
+ | ((unsigned short)(sr_data & 0x08) << 7);
F = VRS + 1 - VDE;
cr_data = XGI_CRT1Table[index].CR[13];
/* Vertical blank end */
- VBE = (cr_data & 0xff) | ((unsigned short) (sr_data & 0x10) << 4);
+ VBE = (cr_data & 0xff) | ((unsigned short)(sr_data & 0x10) << 4);
temp = VBE - ((VDE - 1) & 511);
B = (temp > 0) ? temp : (temp + 512);
@@ -231,11 +231,11 @@ static int XGIfb_GetXG21DefaultLVDSModeIdx(struct xgifb_video_info *xgifb_info)
{
int i = 0;
- while ((XGIbios_mode[i].mode_no != 0)
- && (XGIbios_mode[i].xres <= xgifb_info->lvds_data.LVDSHDE)) {
- if ((XGIbios_mode[i].xres == xgifb_info->lvds_data.LVDSHDE)
- && (XGIbios_mode[i].yres == xgifb_info->lvds_data.LVDSVDE)
- && (XGIbios_mode[i].bpp == 8)) {
+ while ((XGIbios_mode[i].mode_no != 0) &&
+ (XGIbios_mode[i].xres <= xgifb_info->lvds_data.LVDSHDE)) {
+ if ((XGIbios_mode[i].xres == xgifb_info->lvds_data.LVDSHDE) &&
+ (XGIbios_mode[i].yres == xgifb_info->lvds_data.LVDSVDE) &&
+ (XGIbios_mode[i].bpp == 8)) {
return i;
}
i++;
@@ -384,9 +384,8 @@ static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
return -1;
break;
case 640:
- if ((XGIbios_mode[myindex].yres != 400)
- && (XGIbios_mode[myindex].yres
- != 480))
+ if ((XGIbios_mode[myindex].yres != 400) &&
+ (XGIbios_mode[myindex].yres != 480))
return -1;
break;
case 800:
@@ -518,7 +517,7 @@ static void XGIfb_search_crt2type(const char *name)
{
int i = 0;
- if (name == NULL)
+ if (!name)
return;
while (XGI_crt2type[i].type_no != -1) {
@@ -562,7 +561,7 @@ static u8 XGIfb_search_refresh_rate(struct xgifb_video_info *xgifb_info,
!= 1)) {
pr_debug("Adjusting rate from %d down to %d\n",
rate,
- XGIfb_vrate[i-1].refresh);
+ XGIfb_vrate[i - 1].refresh);
xgifb_info->rate_idx =
XGIfb_vrate[i - 1].idx;
xgifb_info->refresh_rate =
@@ -589,7 +588,7 @@ static void XGIfb_search_tvstd(const char *name)
{
int i = 0;
- if (name == NULL)
+ if (!name)
return;
while (XGI_tvtype[i].type_no != -1) {
@@ -683,7 +682,7 @@ static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info)
xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR30, cr30);
xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR31, cr31);
xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR33,
- (xgifb_info->rate_idx & 0x0F));
+ (xgifb_info->rate_idx & 0x0F));
}
static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
@@ -730,7 +729,6 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
if (xgifb_info->display2 == XGIFB_DISP_TV &&
xgifb_info->hasVB == HASVB_301) {
-
reg = xgifb_reg_get(XGIPART4, 0x01);
if (reg < 0xB0) { /* Set filter for XGI301 */
@@ -763,16 +761,13 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
0x01);
if (xgifb_info->TV_type == TVMODE_NTSC) {
-
xgifb_reg_and(XGIPART2, 0x3a, 0x1f);
if (xgifb_info->TV_plug == TVPLUG_SVIDEO) {
-
xgifb_reg_and(XGIPART2, 0x30, 0xdf);
} else if (xgifb_info->TV_plug
== TVPLUG_COMPOSITE) {
-
xgifb_reg_or(XGIPART2, 0x30, 0x20);
switch (xgifb_info->video_width) {
@@ -822,16 +817,13 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
}
} else if (xgifb_info->TV_type == TVMODE_PAL) {
-
xgifb_reg_and(XGIPART2, 0x3A, 0x1F);
if (xgifb_info->TV_plug == TVPLUG_SVIDEO) {
-
xgifb_reg_and(XGIPART2, 0x30, 0xDF);
} else if (xgifb_info->TV_plug
== TVPLUG_COMPOSITE) {
-
xgifb_reg_or(XGIPART2, 0x30, 0x20);
switch (xgifb_info->video_width) {
@@ -912,7 +904,7 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
}
static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
- struct fb_info *info)
+ struct fb_info *info)
{
struct xgifb_video_info *xgifb_info = info->par;
struct xgi_hw_device_info *hw_info = &xgifb_info->hw_info;
@@ -945,17 +937,15 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
if (var->pixclock) {
drate = 1000000000 / var->pixclock;
hrate = (drate * 1000) / htotal;
- xgifb_info->refresh_rate = (unsigned int) (hrate * 2
+ xgifb_info->refresh_rate = (unsigned int)(hrate * 2
/ vtotal);
} else {
xgifb_info->refresh_rate = 60;
}
pr_debug("Change mode to %dx%dx%d-%dHz\n",
- var->xres,
- var->yres,
- var->bits_per_pixel,
- xgifb_info->refresh_rate);
+ var->xres, var->yres, var->bits_per_pixel,
+ xgifb_info->refresh_rate);
old_mode = xgifb_info->mode_idx;
xgifb_info->mode_idx = 0;
@@ -992,7 +982,6 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
}
if (isactive) {
-
XGIfb_pre_setmode(xgifb_info);
if (XGISetModeNew(xgifb_info, hw_info,
XGIbios_mode[xgifb_info->mode_idx].mode_no)
@@ -1064,7 +1053,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
break;
}
}
- XGIfb_bpp_to_var(xgifb_info, var); /*update ARGB info*/
+ XGIfb_bpp_to_var(xgifb_info, var); /* update ARGB info */
dumpVGAReg(xgifb_info);
return 0;
@@ -1150,7 +1139,7 @@ static int XGIfb_setcolreg(unsigned int regno, unsigned int red,
}
break;
case 16:
- ((u32 *) (info->pseudo_palette))[regno] = ((red & 0xf800))
+ ((u32 *)(info->pseudo_palette))[regno] = ((red & 0xf800))
| ((green & 0xfc00) >> 5) | ((blue & 0xf800)
>> 11);
break;
@@ -1158,7 +1147,7 @@ static int XGIfb_setcolreg(unsigned int regno, unsigned int red,
red >>= 8;
green >>= 8;
blue >>= 8;
- ((u32 *) (info->pseudo_palette))[regno] = (red << 16) | (green
+ ((u32 *)(info->pseudo_palette))[regno] = (red << 16) | (green
<< 8) | (blue);
break;
}
@@ -1168,7 +1157,7 @@ static int XGIfb_setcolreg(unsigned int regno, unsigned int red,
/* ----------- FBDev related routines for all series ---------- */
static int XGIfb_get_fix(struct fb_fix_screeninfo *fix, int con,
- struct fb_info *info)
+ struct fb_info *info)
{
struct xgifb_video_info *xgifb_info = info->par;
@@ -1250,7 +1239,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
drate = 1000000000 / var->pixclock;
hrate = (drate * 1000) / htotal;
xgifb_info->refresh_rate =
- (unsigned int) (hrate * 2 / vtotal);
+ (unsigned int)(hrate * 2 / vtotal);
pr_debug(
"%s: pixclock = %d ,htotal=%d, vtotal=%d\n"
"%s: drate=%d, hrate=%d, refresh_rate=%d\n",
@@ -1262,10 +1251,10 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
search_idx = 0;
while ((XGIbios_mode[search_idx].mode_no != 0) &&
- (XGIbios_mode[search_idx].xres <= var->xres)) {
+ (XGIbios_mode[search_idx].xres <= var->xres)) {
if ((XGIbios_mode[search_idx].xres == var->xres) &&
- (XGIbios_mode[search_idx].yres == var->yres) &&
- (XGIbios_mode[search_idx].bpp == var->bits_per_pixel)) {
+ (XGIbios_mode[search_idx].yres == var->yres) &&
+ (XGIbios_mode[search_idx].bpp == var->bits_per_pixel)) {
if (XGIfb_validate_mode(xgifb_info, search_idx) > 0) {
found_mode = 1;
break;
@@ -1275,9 +1264,8 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
}
if (!found_mode) {
-
pr_err("%dx%dx%d is no valid mode\n",
- var->xres, var->yres, var->bits_per_pixel);
+ var->xres, var->yres, var->bits_per_pixel);
search_idx = 0;
while (XGIbios_mode[search_idx].mode_no != 0) {
if ((var->xres <= XGIbios_mode[search_idx].xres) &&
@@ -1296,11 +1284,11 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
var->xres = XGIbios_mode[search_idx].xres;
var->yres = XGIbios_mode[search_idx].yres;
pr_debug("Adapted to mode %dx%dx%d\n",
- var->xres, var->yres, var->bits_per_pixel);
+ var->xres, var->yres, var->bits_per_pixel);
} else {
pr_err("Failed to find similar mode to %dx%dx%d\n",
- var->xres, var->yres, var->bits_per_pixel);
+ var->xres, var->yres, var->bits_per_pixel);
return -EINVAL;
}
}
@@ -1332,7 +1320,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
}
static int XGIfb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
+ struct fb_info *info)
{
int err;
@@ -1344,9 +1332,8 @@ static int XGIfb_pan_display(struct fb_var_screeninfo *var,
if (var->vmode & FB_VMODE_YWRAP) {
if (var->yoffset >= info->var.yres_virtual || var->xoffset)
return -EINVAL;
- } else if (var->xoffset + info->var.xres > info->var.xres_virtual
- || var->yoffset + info->var.yres
- > info->var.yres_virtual) {
+ } else if (var->xoffset + info->var.xres > info->var.xres_virtual ||
+ var->yoffset + info->var.yres > info->var.yres_virtual) {
return -EINVAL;
}
err = XGIfb_pan_var(var, info);
@@ -1401,7 +1388,6 @@ static struct fb_ops XGIfb_ops = {
static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
{
-
u8 ChannelNum, tmp;
u8 reg = 0;
@@ -1474,10 +1460,8 @@ static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
xgifb_info->video_size = xgifb_info->video_size * ChannelNum;
pr_info("SR14=%x DramSzie %x ChannelNum %x\n",
- reg,
- xgifb_info->video_size, ChannelNum);
+ reg, xgifb_info->video_size, ChannelNum);
return 0;
-
}
static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
@@ -1597,7 +1581,6 @@ static int __init XGIfb_setup(char *options)
pr_info("Options: %s\n", options);
while ((this_opt = strsep(&options, ",")) != NULL) {
-
if (!*this_opt)
continue;
@@ -1634,8 +1617,7 @@ static int __init XGIfb_setup(char *options)
return 0;
}
-static int xgifb_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u8 reg, reg1;
u8 CR48, CR38;
@@ -1670,7 +1652,7 @@ static int xgifb_probe(struct pci_dev *pdev,
xgifb_info->mmio_size = pci_resource_len(pdev, 1);
xgifb_info->vga_base = pci_resource_start(pdev, 2) + 0x30;
dev_info(&pdev->dev, "Relocate IO address: %Lx [%08lx]\n",
- (u64) pci_resource_start(pdev, 2),
+ (u64)pci_resource_start(pdev, 2),
xgifb_info->vga_base);
if (pci_enable_device(pdev)) {
@@ -1688,7 +1670,7 @@ static int xgifb_probe(struct pci_dev *pdev,
xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
reg1 = xgifb_reg_get(XGISR, IND_SIS_PASSWORD);
- if (reg1 != 0xa1) { /*I/O error */
+ if (reg1 != 0xa1) { /* I/O error */
dev_err(&pdev->dev, "I/O error\n");
ret = -EIO;
goto error_disable;
@@ -1698,7 +1680,7 @@ static int xgifb_probe(struct pci_dev *pdev,
case PCI_DEVICE_ID_XGI_20:
xgifb_reg_or(XGICR, Index_CR_GPIO_Reg3, GPIOG_EN);
CR48 = xgifb_reg_get(XGICR, Index_CR_GPIO_Reg1);
- if (CR48&GPIOG_READ)
+ if (CR48 & GPIOG_READ)
xgifb_info->chip = XG21;
else
xgifb_info->chip = XG20;
@@ -1727,7 +1709,7 @@ static int xgifb_probe(struct pci_dev *pdev,
xgifb_info->video_size = video_size_max;
}
- /* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */
+ /* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */
xgifb_reg_or(XGISR,
IND_SIS_PCI_ADDRESS_SET,
(SIS_PCI_ADDR_ENABLE | SIS_MEM_MAP_IO_ENABLE));
@@ -1740,7 +1722,7 @@ static int xgifb_probe(struct pci_dev *pdev,
xgifb_info->video_size,
"XGIfb FB")) {
dev_err(&pdev->dev, "Unable request memory size %x\n",
- xgifb_info->video_size);
+ xgifb_info->video_size);
dev_err(&pdev->dev,
"Fatal error: Unable to reserve frame buffer memory. Is there another framebuffer driver active?\n");
ret = -ENODEV;
@@ -1763,13 +1745,13 @@ static int xgifb_probe(struct pci_dev *pdev,
dev_info(&pdev->dev,
"Framebuffer at 0x%Lx, mapped to 0x%p, size %dk\n",
- (u64) xgifb_info->video_base,
+ (u64)xgifb_info->video_base,
xgifb_info->video_vbase,
xgifb_info->video_size / 1024);
dev_info(&pdev->dev,
"MMIO at 0x%Lx, mapped to 0x%p, size %ldk\n",
- (u64) xgifb_info->mmio_base, xgifb_info->mmio_vbase,
+ (u64)xgifb_info->mmio_base, xgifb_info->mmio_vbase,
xgifb_info->mmio_size / 1024);
pci_set_drvdata(pdev, xgifb_info);
@@ -1784,9 +1766,9 @@ static int xgifb_probe(struct pci_dev *pdev,
xgifb_info->hasVB = HASVB_NONE;
} else if (xgifb_info->chip == XG21) {
CR38 = xgifb_reg_get(XGICR, 0x38);
- if ((CR38&0xE0) == 0xC0)
+ if ((CR38 & 0xE0) == 0xC0)
xgifb_info->display2 = XGIFB_DISP_LCD;
- else if ((CR38&0xE0) == 0x60)
+ else if ((CR38 & 0xE0) == 0x60)
xgifb_info->hasVB = HASVB_CHRONTEL;
else
xgifb_info->hasVB = HASVB_NONE;
@@ -1903,8 +1885,7 @@ static int xgifb_probe(struct pci_dev *pdev,
xgifb_info->refresh_rate = refresh_rate;
if (xgifb_info->refresh_rate == 0)
xgifb_info->refresh_rate = 60;
- if (XGIfb_search_refresh_rate(xgifb_info,
- xgifb_info->refresh_rate) == 0) {
+ if (XGIfb_search_refresh_rate(xgifb_info, xgifb_info->refresh_rate) == 0) {
xgifb_info->rate_idx = 1;
xgifb_info->refresh_rate = 60;
}
@@ -1939,15 +1920,13 @@ static int xgifb_probe(struct pci_dev *pdev,
default:
xgifb_info->video_cmap_len = 16;
pr_info("Unsupported depth %d\n",
- xgifb_info->video_bpp);
+ xgifb_info->video_bpp);
break;
}
pr_info("Default mode is %dx%dx%d (%dHz)\n",
- xgifb_info->video_width,
- xgifb_info->video_height,
- xgifb_info->video_bpp,
- xgifb_info->refresh_rate);
+ xgifb_info->video_width, xgifb_info->video_height,
+ xgifb_info->video_bpp, xgifb_info->refresh_rate);
fb_info->var.red.length = 8;
fb_info->var.green.length = 8;
@@ -1964,22 +1943,20 @@ static int xgifb_probe(struct pci_dev *pdev,
XGIfb_bpp_to_var(xgifb_info, &fb_info->var);
- fb_info->var.pixclock = (u32) (1000000000 /
- XGIfb_mode_rate_to_dclock(&xgifb_info->dev_info,
- hw_info,
- XGIbios_mode[xgifb_info->mode_idx].mode_no));
-
- if (XGIfb_mode_rate_to_ddata(&xgifb_info->dev_info, hw_info,
- XGIbios_mode[xgifb_info->mode_idx].mode_no,
- &fb_info->var.left_margin,
- &fb_info->var.right_margin,
- &fb_info->var.upper_margin,
- &fb_info->var.lower_margin,
- &fb_info->var.hsync_len,
- &fb_info->var.vsync_len,
- &fb_info->var.sync,
- &fb_info->var.vmode)) {
-
+ fb_info->var.pixclock = (u32)(1000000000 / XGIfb_mode_rate_to_dclock
+ (&xgifb_info->dev_info, hw_info,
+ XGIbios_mode[xgifb_info->mode_idx].mode_no));
+
+ if (XGIfb_mode_rate_to_ddata(&xgifb_info->dev_info,
+ hw_info, XGIbios_mode[xgifb_info->mode_idx].mode_no,
+ &fb_info->var.left_margin,
+ &fb_info->var.right_margin,
+ &fb_info->var.upper_margin,
+ &fb_info->var.lower_margin,
+ &fb_info->var.hsync_len,
+ &fb_info->var.vsync_len,
+ &fb_info->var.sync,
+ &fb_info->var.vmode)) {
if ((fb_info->var.vmode & FB_VMODE_MASK) ==
FB_VMODE_INTERLACED) {
fb_info->var.yres <<= 1;
@@ -1990,7 +1967,6 @@ static int xgifb_probe(struct pci_dev *pdev,
fb_info->var.yres >>= 1;
fb_info->var.yres_virtual >>= 1;
}
-
}
fb_info->flags = FBINFO_FLAG_DEFAULT;
@@ -2028,9 +2004,7 @@ error:
return ret;
}
-/*****************************************************/
-/* PCI DEVICE HANDLING */
-/*****************************************************/
+/* -------------------- PCI DEVICE HANDLING -------------------- */
static void xgifb_remove(struct pci_dev *pdev)
{
@@ -2054,25 +2028,23 @@ static struct pci_driver xgifb_driver = {
.remove = xgifb_remove
};
-/*****************************************************/
-/* MODULE */
-/*****************************************************/
+/* -------------------- MODULE -------------------- */
-module_param(mode, charp, 0);
+module_param(mode, charp, 0000);
MODULE_PARM_DESC(mode,
- "Selects the desired default display mode in the format XxYxDepth (eg. 1024x768x16).");
+ "Selects the desired default display mode in the format XxYxDepth (eg. 1024x768x16).");
-module_param(forcecrt2type, charp, 0);
+module_param(forcecrt2type, charp, 0000);
MODULE_PARM_DESC(forcecrt2type,
- "Force the second display output type. Possible values are NONE, LCD, TV, VGA, SVIDEO or COMPOSITE.");
+ "Force the second display output type. Possible values are NONE, LCD, TV, VGA, SVIDEO or COMPOSITE.");
-module_param(vesa, int, 0);
+module_param(vesa, int, 0000);
MODULE_PARM_DESC(vesa,
- "Selects the desired default display mode by VESA mode number (eg. 0x117).");
+ "Selects the desired default display mode by VESA mode number (eg. 0x117).");
-module_param(filter, int, 0);
+module_param(filter, int, 0000);
MODULE_PARM_DESC(filter,
- "Selects TV flicker filter type (only for systems with a SiS301 video bridge). Possible values 0-7. Default: [no filter]).");
+ "Selects TV flicker filter type (only for systems with a SiS301 video bridge). Possible values 0-7. Default: [no filter]).");
static int __init xgifb_init(void)
{
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 062ece22ed84..14af157958cd 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -55,8 +55,9 @@ XGINew_GetXG20DRAMType(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_or(pVBInfo->P3d4, 0x4A, 0x80); /* Enable GPIOH read */
/* GPIOF 0:DVI 1:DVO */
data = xgifb_reg_get(pVBInfo->P3d4, 0x48);
- /* HOTPLUG_SUPPORT */
- /* for current XG20 & XG21, GPIOH is floating, driver will
+ /*
+ * HOTPLUG_SUPPORT
+ * for current XG20 & XG21, GPIOH is floating, driver will
* fix DDR temporarily
*/
/* DVI read GPIOH */
@@ -199,7 +200,8 @@ static void XGINew_DDRII_Bootup_XG27(
}
static void XGINew_DDR2_MRS_XG20(struct xgi_hw_device_info *HwDeviceExtension,
- unsigned long P3c4, struct vb_device_info *pVBInfo)
+ unsigned long P3c4,
+ struct vb_device_info *pVBInfo)
{
unsigned long P3d4 = P3c4 + 0x10;
@@ -353,8 +355,8 @@ static void XGINew_DDR2_DefaultRegister(
unsigned long Port, struct vb_device_info *pVBInfo)
{
unsigned long P3d4 = Port, P3c4 = Port - 0x10;
-
- /* keep following setting sequence, each setting in
+ /*
+ * keep following setting sequence, each setting in
* the same reg insert idle
*/
xgifb_reg_set(P3d4, 0x82, 0x77);
@@ -387,7 +389,7 @@ static void XGINew_DDR2_DefaultRegister(
}
static void XGI_SetDRAM_Helper(unsigned long P3d4, u8 seed, u8 temp2, u8 reg,
- u8 shift_factor, u8 mask1, u8 mask2)
+ u8 shift_factor, u8 mask1, u8 mask2)
{
u8 j;
@@ -460,15 +462,15 @@ static void XGINew_SetDRAMDefaultRegister340(
for (j = 0; j <= 6; j++) /* CR90 - CR96 */
xgifb_reg_set(P3d4, (0x90 + j),
- pVBInfo->CR40[14 + j][pVBInfo->ram_type]);
+ pVBInfo->CR40[14 + j][pVBInfo->ram_type]);
for (j = 0; j <= 2; j++) /* CRC3 - CRC5 */
xgifb_reg_set(P3d4, (0xC3 + j),
- pVBInfo->CR40[21 + j][pVBInfo->ram_type]);
+ pVBInfo->CR40[21 + j][pVBInfo->ram_type]);
for (j = 0; j < 2; j++) /* CR8A - CR8B */
xgifb_reg_set(P3d4, (0x8A + j),
- pVBInfo->CR40[1 + j][pVBInfo->ram_type]);
+ pVBInfo->CR40[1 + j][pVBInfo->ram_type]);
if (HwDeviceExtension->jChipType == XG42)
xgifb_reg_set(P3d4, 0x8C, 0x87);
@@ -539,7 +541,8 @@ static unsigned short XGINew_SetDRAMSize20Reg(
}
static int XGINew_ReadWriteRest(unsigned short StopAddr,
- unsigned short StartAddr, struct vb_device_info *pVBInfo)
+ unsigned short StartAddr,
+ struct vb_device_info *pVBInfo)
{
int i;
unsigned long Position = 0;
@@ -583,7 +586,7 @@ static unsigned char XGINew_CheckFrequence(struct vb_device_info *pVBInfo)
}
static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
unsigned char data;
@@ -647,7 +650,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
pVBInfo->ram_bus = 16; /* 16 bits */
/* (0x31:12x8x2) 22bit + 2 rank */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
- /* 0x41:16Mx16 bit*/
+ /* 0x41:16Mx16 bit */
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x41);
usleep_range(15, 1015);
@@ -660,7 +663,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3c4,
0x13,
0x31);
- /* 0x31:8Mx16 bit*/
+ /* 0x31:8Mx16 bit */
xgifb_reg_set(pVBInfo->P3c4,
0x14,
0x31);
@@ -678,7 +681,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
pVBInfo->ram_bus = 8; /* 8 bits */
/* (0x31:12x8x2) 22bit + 2 rank */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
- /* 0x30:8Mx8 bit*/
+ /* 0x30:8Mx8 bit */
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x30);
usleep_range(15, 1015);
@@ -697,7 +700,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
case XG27:
pVBInfo->ram_bus = 16; /* 16 bits */
pVBInfo->ram_channel = 1; /* Single channel */
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x51); /* 32Mx16 bit*/
+ xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x51); /* 32Mx16 bit */
break;
case XG42:
/*
@@ -785,7 +788,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
}
static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
u8 i, size;
unsigned short memsize, start_addr;
@@ -827,8 +830,8 @@ static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension,
}
static void XGINew_SetDRAMSize_340(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
unsigned short data;
@@ -905,9 +908,9 @@ static bool xgifb_read_vbios(struct pci_dev *pdev)
goto error;
if (j == 0xff)
j = 1;
- /*
- * Read the LVDS table index scratch register set by the BIOS.
- */
+
+ /* Read the LVDS table index scratch register set by the BIOS. */
+
entry = xgifb_reg_get(xgifb_info->dev_info.P3d4, 0x36);
if (entry >= j)
entry = 0;
@@ -1039,8 +1042,9 @@ static void XGINew_SetModeScratch(struct vb_device_info *pVBInfo)
}
tempcl |= SetSimuScanMode;
- if ((!(temp & ActiveCRT1)) && ((temp & ActiveLCD) || (temp & ActiveTV)
- || (temp & ActiveCRT2)))
+ if ((!(temp & ActiveCRT1)) && ((temp & ActiveLCD) ||
+ (temp & ActiveTV) ||
+ (temp & ActiveCRT2)))
tempcl ^= (SetSimuScanMode | SwitchCRT2);
if ((temp & ActiveLCD) && (temp & ActiveTV))
tempcl ^= (SetSimuScanMode | SwitchCRT2);
@@ -1085,7 +1089,7 @@ static unsigned short XGINew_SenseLCD(struct xgi_hw_device_info
}
static void XGINew_GetXG21Sense(struct pci_dev *pdev,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
unsigned char Temp;
@@ -1095,7 +1099,7 @@ static void XGINew_GetXG21Sense(struct pci_dev *pdev,
/* LVDS on chip */
xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0);
} else {
- /* Enable GPIOA/B read */
+ /* Enable GPIOA/B read */
xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x03, 0x03);
Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0xC0;
if (Temp == 0xC0) { /* DVI & DVO GPIOA/B pull high */
@@ -1119,7 +1123,7 @@ static void XGINew_GetXG27Sense(struct vb_device_info *pVBInfo)
unsigned char Temp, bCR4A;
bCR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
- /* Enable GPIOA/B/C read */
+ /* Enable GPIOA/B/C read */
xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x07, 0x07);
Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0x07;
xgifb_reg_set(pVBInfo->P3d4, 0x4A, bCR4A);
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index d8010c5c1a70..7c7c8c8f1df3 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -55,7 +55,7 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
pVBInfo->XGINew_CR97 = 0xc1;
pVBInfo->SR18 = XG27_SR18;
- /*Z11m DDR*/
+ /* Z11m DDR */
temp = xgifb_reg_get(pVBInfo->P3c4, 0x3B);
/* SR3B[7][3]MAA15 MAA11 (Power on Trapping) */
if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08))
@@ -73,7 +73,7 @@ static void XGI_SetSeqRegs(struct vb_device_info *pVBInfo)
/* Get SR1,2,3,4 from file */
/* SR1 is with screen off 0x20 */
SRdata = XGI330_StandTable.SR[i];
- xgifb_reg_set(pVBInfo->P3c4, i+1, SRdata); /* Set SR 1 2 3 4 */
+ xgifb_reg_set(pVBInfo->P3c4, i + 1, SRdata); /* Set SR 1 2 3 4 */
}
}
@@ -167,7 +167,8 @@ static unsigned char XGI_SetDefaultVCLK(struct vb_device_info *pVBInfo)
}
static unsigned char XGI_AjustCRT2Rate(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex, unsigned short *i,
+ unsigned short RefreshRateTableIndex,
+ unsigned short *i,
struct vb_device_info *pVBInfo)
{
unsigned short tempax, tempbx, resinfo, modeflag, infoflag;
@@ -244,7 +245,7 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeIdIndex,
}
static void XGI_SetSync(unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
unsigned short sync, temp;
@@ -257,7 +258,7 @@ static void XGI_SetSync(unsigned short RefreshRateTableIndex,
}
static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo,
- struct xgi_hw_device_info *HwDeviceExtension)
+ struct xgi_hw_device_info *HwDeviceExtension)
{
unsigned char data, data1, pushax;
unsigned short i, j;
@@ -359,9 +360,9 @@ static void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex,
}
static void XGI_SetCRT1CRTC(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo,
- struct xgi_hw_device_info *HwDeviceExtension)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo,
+ struct xgi_hw_device_info *HwDeviceExtension)
{
unsigned char index, data;
unsigned short i;
@@ -390,14 +391,14 @@ static void XGI_SetCRT1CRTC(unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->P3d4, 0x14, 0x4F);
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_SetXG21CRTC */
-/* Input : Stand or enhance CRTC table */
-/* Output : Fill CRT Hsync/Vsync to SR2E/SR2F/SR30/SR33/SR34/SR3F */
-/* Description : Set LCD timing */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_SetXG21CRTC
+ * Input : Stand or enhance CRTC table
+ * Output : Fill CRT Hsync/Vsync to SR2E/SR2F/SR30/SR33/SR34/SR3F
+ * Description : Set LCD timing
+ */
static void XGI_SetXG21CRTC(unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
unsigned char index, Tempax, Tempbx, Tempcx, Tempdx;
unsigned short Temp1, Temp2, Temp3;
@@ -506,8 +507,8 @@ static void XGI_SetXG27CRTC(unsigned short RefreshRateTableIndex,
/* SR0B */
Tempax = XGI_CRT1Table[index].CR[5];
- Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8]*/
- Tempbx |= (Tempax << 2); /* Tempbx: HRS[9:0] */
+ Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8] */
+ Tempbx |= Tempax << 2; /* Tempbx: HRS[9:0] */
Tempax = XGI_CRT1Table[index].CR[4]; /* CR5 HRE */
Tempax &= 0x1F; /* Tempax[4:0]: HRE[4:0] */
@@ -530,7 +531,7 @@ static void XGI_SetXG27CRTC(unsigned short RefreshRateTableIndex,
Tempax = XGI_CRT1Table[index].CR[5]; /* SR0B */
Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8]*/
Tempax >>= 6; /* Tempax[1:0]: HRS[9:8]*/
- Tempax |= ((Tempbx << 2) & 0xFF); /* Tempax[7:2]: HRE[5:0] */
+ Tempax |= (Tempbx << 2) & 0xFF; /* Tempax[7:2]: HRE[5:0] */
/* SR2F [7:2][1:0]: HRE[5:0]HRS[9:8] */
xgifb_reg_set(pVBInfo->P3c4, 0x2F, Tempax);
xgifb_reg_and_or(pVBInfo->P3c4, 0x30, 0xE3, 00);
@@ -548,12 +549,12 @@ static void XGI_SetXG27CRTC(unsigned short RefreshRateTableIndex,
Tempax >>= 2; /* Tempax[0]: VRS[8] */
/* SR35[0]: VRS[8] */
xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x01, Tempax);
- Tempcx |= (Tempax << 8); /* Tempcx <= VRS[8:0] */
- Tempcx |= ((Tempbx & 0x80) << 2); /* Tempcx <= VRS[9:0] */
+ Tempcx |= Tempax << 8; /* Tempcx <= VRS[8:0] */
+ Tempcx |= (Tempbx & 0x80) << 2; /* Tempcx <= VRS[9:0] */
/* Tempax: SR0A */
Tempax = XGI_CRT1Table[index].CR[14];
Tempax &= 0x08; /* SR0A[3] VRS[10] */
- Tempcx |= (Tempax << 7); /* Tempcx <= VRS[10:0] */
+ Tempcx |= Tempax << 7; /* Tempcx <= VRS[10:0] */
/* Tempax: CR11 VRE */
Tempax = XGI_CRT1Table[index].CR[11];
@@ -636,12 +637,12 @@ static void xgifb_set_lcd(int chip_id,
xgifb_reg_or(pVBInfo->P3c4, 0x35, 0x80);
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_UpdateXG21CRTC */
-/* Input : */
-/* Output : CRT1 CRTC */
-/* Description : Modify CRT1 Hsync/Vsync to fix LCD mode timing */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_UpdateXG21CRTC
+ * Input :
+ * Output : CRT1 CRTC
+ * Description : Modify CRT1 Hsync/Vsync to fix LCD mode timing
+ */
static void XGI_UpdateXG21CRTC(unsigned short ModeNo,
struct vb_device_info *pVBInfo,
unsigned short RefreshRateTableIndex)
@@ -665,19 +666,19 @@ static void XGI_UpdateXG21CRTC(unsigned short ModeNo,
if (index != -1) {
xgifb_reg_set(pVBInfo->P3d4, 0x02,
- XGI_UpdateCRT1Table[index].CR02);
+ XGI_UpdateCRT1Table[index].CR02);
xgifb_reg_set(pVBInfo->P3d4, 0x03,
- XGI_UpdateCRT1Table[index].CR03);
+ XGI_UpdateCRT1Table[index].CR03);
xgifb_reg_set(pVBInfo->P3d4, 0x15,
- XGI_UpdateCRT1Table[index].CR15);
+ XGI_UpdateCRT1Table[index].CR15);
xgifb_reg_set(pVBInfo->P3d4, 0x16,
- XGI_UpdateCRT1Table[index].CR16);
+ XGI_UpdateCRT1Table[index].CR16);
}
}
static void XGI_SetCRT1DE(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short resindex, tempax, tempbx, tempcx, temp, modeflag;
@@ -715,7 +716,7 @@ static void XGI_SetCRT1DE(unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->P3d4, 0x11, data); /* Unlock CRTC */
xgifb_reg_set(pVBInfo->P3d4, 0x01, (unsigned short)(tempcx & 0xff));
xgifb_reg_and_or(pVBInfo->P3d4, 0x0b, ~0x0c,
- (unsigned short)((tempcx & 0x0ff00) >> 10));
+ (unsigned short)((tempcx & 0x0ff00) >> 10));
xgifb_reg_set(pVBInfo->P3d4, 0x12, (unsigned short)(tempbx & 0xff));
tempax = 0;
tempbx >>= 8;
@@ -796,7 +797,7 @@ static void XGI_SetCRT1Offset(unsigned short ModeNo,
i |= temp;
xgifb_reg_set(pVBInfo->P3c4, 0x0E, i);
- temp = (unsigned char) temp2;
+ temp = (unsigned char)temp2;
temp &= 0xFF; /* al */
xgifb_reg_set(pVBInfo->P3d4, 0x13, temp);
@@ -822,15 +823,15 @@ static void XGI_SetCRT1Offset(unsigned short ModeNo,
}
static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short VCLKIndex, modeflag;
/* si+Ext_ResInfo */
modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) { /*301b*/
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) { /* 301b */
if (pVBInfo->LCDResInfo != Panel_1024x768)
/* LCDXlat2VCLK */
VCLKIndex = VCLK108_2_315 + 5;
@@ -951,8 +952,8 @@ static void XGI_SetCRT1FIFO(struct xgi_hw_device_info *HwDeviceExtension,
}
static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short data, data2 = 0;
short VCLK;
@@ -989,9 +990,9 @@ static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
}
static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short data, data2, data3, infoflag = 0, modeflag, resindex,
xres;
@@ -1087,9 +1088,9 @@ static void XGI_WriteDAC(unsigned short dl,
else
swap(bl, bh);
}
- outb((unsigned short) dh, pVBInfo->P3c9);
- outb((unsigned short) bh, pVBInfo->P3c9);
- outb((unsigned short) bl, pVBInfo->P3c9);
+ outb((unsigned short)dh, pVBInfo->P3c9);
+ outb((unsigned short)bh, pVBInfo->P3c9);
+ outb((unsigned short)bl, pVBInfo->P3c9);
}
static void XGI_LoadDAC(struct vb_device_info *pVBInfo)
@@ -1187,8 +1188,8 @@ static void XGI_GetLVDSResInfo(unsigned short ModeIdIndex,
}
static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short i, tempdx, tempbx, modeflag;
@@ -1201,12 +1202,12 @@ static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table,
while (table[i].PANELID != 0xff) {
tempdx = pVBInfo->LCDResInfo;
if (tempbx & 0x0080) { /* OEMUtil */
- tempbx &= (~0x0080);
+ tempbx &= ~0x0080;
tempdx = pVBInfo->LCDTypeInfo;
}
if (pVBInfo->LCDInfo & EnableScalingLCD)
- tempdx &= (~PanelResInfo);
+ tempdx &= ~PanelResInfo;
if (table[i].PANELID == tempdx) {
tempbx = table[i].MASK;
@@ -1226,8 +1227,8 @@ static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table,
}
static struct SiS_TVData const *XGI_GetTVPtr(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short i, tempdx, tempal, modeflag;
@@ -1441,9 +1442,9 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
tempbx >>= 3;
xgifb_reg_set(pVBInfo->Part1Port, 0x16,
- (unsigned short) (tempbx & 0xff));
+ (unsigned short)(tempbx & 0xff));
xgifb_reg_set(pVBInfo->Part1Port, 0x17,
- (unsigned short) (tempcx & 0xff));
+ (unsigned short)(tempcx & 0xff));
tempax = pVBInfo->HT;
@@ -1469,7 +1470,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part1Port, 0x15, tempax);
xgifb_reg_set(pVBInfo->Part1Port, 0x14,
- (unsigned short) (tempbx & 0xff));
+ (unsigned short)(tempbx & 0xff));
tempax = pVBInfo->VT;
tempbx = LCDPtr1->LCDVDES;
@@ -1480,17 +1481,14 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
if (tempcx >= tempax)
tempcx -= tempax;
- xgifb_reg_set(pVBInfo->Part1Port, 0x1b,
- (unsigned short) (tempbx & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x1c,
- (unsigned short) (tempcx & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x1b, (unsigned short)(tempbx & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x1c, (unsigned short)(tempcx & 0xff));
tempbx = (tempbx >> 8) & 0x07;
tempcx = (tempcx >> 8) & 0x07;
- xgifb_reg_set(pVBInfo->Part1Port, 0x1d,
- (unsigned short) ((tempcx << 3)
- | tempbx));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x1d, (unsigned short)((tempcx << 3) |
+ tempbx));
tempax = pVBInfo->VT;
tempbx = LCDPtr1->LCDVRS;
@@ -1504,10 +1502,8 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
if (tempcx >= tempax)
tempcx -= tempax;
- xgifb_reg_set(pVBInfo->Part1Port, 0x18,
- (unsigned short) (tempbx & 0xff));
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, ~0x0f,
- (unsigned short) (tempcx & 0x0f));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x18, (unsigned short)(tempbx & 0xff));
+ xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, ~0x0f, (unsigned short)(tempcx & 0x0f));
tempax = ((tempbx >> 8) & 0x07) << 3;
@@ -1518,8 +1514,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
if (pVBInfo->LCDInfo & XGI_EnableLVDSDDA)
tempax |= 0x40;
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07,
- tempax);
+ xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07, tempax);
tempbx = pVBInfo->VDE;
tempax = pVBInfo->VGAVDE;
@@ -1527,7 +1522,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
temp = tempax; /* 0430 ylshieh */
temp1 = (temp << 18) / tempbx;
- tempdx = (unsigned short) ((temp << 18) % tempbx);
+ tempdx = (unsigned short)((temp << 18) % tempbx);
if (tempdx != 0)
temp1 += 1;
@@ -1535,12 +1530,10 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
temp2 = temp1;
push3 = temp2;
- xgifb_reg_set(pVBInfo->Part1Port, 0x37,
- (unsigned short) (temp2 & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x36,
- (unsigned short) ((temp2 >> 8) & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x37, (unsigned short)(temp2 & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x36, (unsigned short)((temp2 >> 8) & 0xff));
- tempbx = (unsigned short) (temp2 >> 16);
+ tempbx = (unsigned short)(temp2 >> 16);
tempax = tempbx & 0x03;
tempbx = pVBInfo->VGAVDE;
@@ -1553,24 +1546,20 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
temp2 = push3;
xgifb_reg_set(pVBInfo->Part4Port,
0x3c,
- (unsigned short) (temp2 & 0xff));
+ (unsigned short)(temp2 & 0xff));
xgifb_reg_set(pVBInfo->Part4Port,
0x3b,
- (unsigned short) ((temp2 >> 8) &
+ (unsigned short)((temp2 >> 8) &
0xff));
- tempbx = (unsigned short) (temp2 >> 16);
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x3a,
- ~0xc0,
- (unsigned short) ((tempbx &
- 0xff) << 6));
+ tempbx = (unsigned short)(temp2 >> 16);
+ xgifb_reg_and_or(pVBInfo->Part4Port, 0x3a, ~0xc0,
+ (unsigned short)((tempbx & 0xff) << 6));
tempcx = pVBInfo->VGAVDE;
if (tempcx == pVBInfo->VDE)
- xgifb_reg_and_or(pVBInfo->Part4Port,
- 0x30, ~0x0c, 0x00);
+ xgifb_reg_and_or(pVBInfo->Part4Port, 0x30, ~0x0c, 0x00);
else
- xgifb_reg_and_or(pVBInfo->Part4Port,
- 0x30, ~0x0c, 0x08);
+ xgifb_reg_and_or(pVBInfo->Part4Port, 0x30, ~0x0c, 0x08);
}
tempcx = pVBInfo->VGAHDE;
@@ -1578,7 +1567,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
temp1 = tempcx << 16;
- tempax = (unsigned short) (temp1 / tempbx);
+ tempax = (unsigned short)(temp1 / tempbx);
if ((tempbx & 0xffff) == (tempcx & 0xffff))
tempax = 65535;
@@ -1592,42 +1581,38 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
temp3 = (temp3 & 0xffff0000) + (temp1 & 0xffff);
- tempax = (unsigned short) (temp3 & 0xff);
+ tempax = (unsigned short)(temp3 & 0xff);
xgifb_reg_set(pVBInfo->Part1Port, 0x1f, tempax);
temp1 = pVBInfo->VGAVDE << 18;
temp1 = temp1 / push3;
- tempbx = (unsigned short) (temp1 & 0xffff);
+ tempbx = (unsigned short)(temp1 & 0xffff);
if (pVBInfo->LCDResInfo == Panel_1024x768)
tempbx -= 1;
tempax = ((tempbx >> 8) & 0xff) << 3;
- tempax |= (unsigned short) ((temp3 >> 8) & 0x07);
- xgifb_reg_set(pVBInfo->Part1Port, 0x20,
- (unsigned short) (tempax & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x21,
- (unsigned short) (tempbx & 0xff));
+ tempax |= (unsigned short)((temp3 >> 8) & 0x07);
+ xgifb_reg_set(pVBInfo->Part1Port, 0x20, (unsigned short)(tempax & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x21, (unsigned short)(tempbx & 0xff));
temp3 >>= 16;
if (modeflag & HalfDCLK)
temp3 >>= 1;
- xgifb_reg_set(pVBInfo->Part1Port, 0x22,
- (unsigned short) ((temp3 >> 8) & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x23,
- (unsigned short) (temp3 & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x22, (unsigned short)((temp3 >> 8) & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x23, (unsigned short)(temp3 & 0xff));
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_GETLCDVCLKPtr */
-/* Input : */
-/* Output : al -> VCLK Index */
-/* Description : */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_GETLCDVCLKPtr
+ * Input :
+ * Output : al -> VCLK Index
+ * Description :
+ */
static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
unsigned short index;
@@ -1645,7 +1630,8 @@ static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
}
static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
- unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short index, modeflag;
unsigned char tempal;
@@ -1681,15 +1667,11 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
return tempal;
}
- if (pVBInfo->TVInfo & TVSetYPbPr750p) {
- tempal = XGI_YPbPr750pVCLK;
- return tempal;
- }
+ if (pVBInfo->TVInfo & TVSetYPbPr750p)
+ return XGI_YPbPr750pVCLK;
- if (pVBInfo->TVInfo & TVSetYPbPr525p) {
- tempal = YPbPr525pVCLK;
- return tempal;
- }
+ if (pVBInfo->TVInfo & TVSetYPbPr525p)
+ return YPbPr525pVCLK;
tempal = NTSC1024VCLK;
@@ -1705,12 +1687,11 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
} /* {End of VB} */
inb((pVBInfo->P3ca + 0x02));
- tempal = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
- return tempal;
+ return XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
}
static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
- unsigned char *di_1, struct vb_device_info *pVBInfo)
+ unsigned char *di_1, struct vb_device_info *pVBInfo)
{
if (pVBInfo->VBType & (VB_SIS301 | VB_SIS301B | VB_SIS302B
| VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
@@ -1726,8 +1707,8 @@ static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
}
static void XGI_SetCRT2ECLK(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned char di_0, di_1, tempal;
int i;
@@ -1738,7 +1719,7 @@ static void XGI_SetCRT2ECLK(unsigned short ModeIdIndex,
for (i = 0; i < 4; i++) {
xgifb_reg_and_or(pVBInfo->P3d4, 0x31, ~0x30,
- (unsigned short) (0x10 * i));
+ (unsigned short)(0x10 * i));
if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) &&
!(pVBInfo->VBInfo & SetInSlaveMode)) {
xgifb_reg_set(pVBInfo->P3c4, 0x2e, di_0);
@@ -1876,8 +1857,7 @@ finish:
pVBInfo->VBType = tempbx;
}
-static void XGI_GetVBInfo(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_GetVBInfo(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
unsigned short tempax, push, tempbx, temp, modeflag;
@@ -1921,7 +1901,7 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex,
tempbx |= SetCRT2ToHiVision;
if (temp != YPbPrMode1080i) {
- tempbx &= (~SetCRT2ToHiVision);
+ tempbx &= ~SetCRT2ToHiVision;
tempbx |= SetCRT2ToYPbPr525750;
}
}
@@ -2002,8 +1982,7 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex,
pVBInfo->VBInfo = tempbx;
}
-static void XGI_GetTVInfo(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_GetTVInfo(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
unsigned short tempbx = 0, resinfo = 0, modeflag, index1;
@@ -2078,7 +2057,7 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeIdIndex,
pVBInfo->LCDTypeInfo = 0;
pVBInfo->LCDInfo = 0;
- /* si+Ext_ResInfo // */
+ /* si+Ext_ResInfo */
resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
temp = xgifb_reg_get(pVBInfo->P3d4, 0x36); /* Get LCD Res.Info */
tempbx = temp & 0x0F;
@@ -2175,12 +2154,12 @@ static unsigned char XG21GPIODataTransfer(unsigned char ujDate)
return ujRet;
}
-/*----------------------------------------------------------------------------*/
-/* output */
-/* bl[5] : LVDS signal */
-/* bl[1] : LVDS backlight */
-/* bl[0] : LVDS VDD */
-/*----------------------------------------------------------------------------*/
+/*
+ * output
+ * bl[5] : LVDS signal
+ * bl[1] : LVDS backlight
+ * bl[0] : LVDS VDD
+ */
static unsigned char XGI_XG21GetPSCValue(struct vb_device_info *pVBInfo)
{
unsigned char CR4A, temp;
@@ -2196,12 +2175,12 @@ static unsigned char XGI_XG21GetPSCValue(struct vb_device_info *pVBInfo)
return temp;
}
-/*----------------------------------------------------------------------------*/
-/* output */
-/* bl[5] : LVDS signal */
-/* bl[1] : LVDS backlight */
-/* bl[0] : LVDS VDD */
-/*----------------------------------------------------------------------------*/
+/*
+ * output
+ * bl[5] : LVDS signal
+ * bl[1] : LVDS backlight
+ * bl[0] : LVDS VDD
+ */
static unsigned char XGI_XG27GetPSCValue(struct vb_device_info *pVBInfo)
{
unsigned char CR4A, CRB4, temp;
@@ -2219,17 +2198,17 @@ static unsigned char XGI_XG27GetPSCValue(struct vb_device_info *pVBInfo)
return temp;
}
-/*----------------------------------------------------------------------------*/
-/* input */
-/* bl[5] : 1;LVDS signal on */
-/* bl[1] : 1;LVDS backlight on */
-/* bl[0] : 1:LVDS VDD on */
-/* bh: 100000b : clear bit 5, to set bit5 */
-/* 000010b : clear bit 1, to set bit1 */
-/* 000001b : clear bit 0, to set bit0 */
-/*----------------------------------------------------------------------------*/
+/*
+ * input
+ * bl[5] : 1;LVDS signal on
+ * bl[1] : 1;LVDS backlight on
+ * bl[0] : 1:LVDS VDD on
+ * bh: 100000b : clear bit 5, to set bit5
+ * 000010b : clear bit 1, to set bit1
+ * 000001b : clear bit 0, to set bit0
+ */
static void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
unsigned char CR4A, temp;
@@ -2254,7 +2233,7 @@ static void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
}
static void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
unsigned char CR4A, temp;
unsigned short tempbh0, tempbl0;
@@ -2284,8 +2263,8 @@ static void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
}
static void XGI_DisplayOn(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *pXGIHWDE,
- struct vb_device_info *pVBInfo)
+ struct xgi_hw_device_info *pXGIHWDE,
+ struct vb_device_info *pVBInfo)
{
xgifb_reg_and_or(pVBInfo->P3c4, 0x01, 0xDF, 0x00);
if (pXGIHWDE->jChipType == XG21) {
@@ -2328,8 +2307,8 @@ static void XGI_DisplayOn(struct xgifb_video_info *xgifb_info,
}
void XGI_DisplayOff(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *pXGIHWDE,
- struct vb_device_info *pVBInfo)
+ struct xgi_hw_device_info *pXGIHWDE,
+ struct vb_device_info *pVBInfo)
{
if (pXGIHWDE->jChipType == XG21) {
if (pVBInfo->IF_DEF_LVDS == 1) {
@@ -2448,7 +2427,7 @@ exit:
static unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
{
if ((pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) &&
- (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
+ (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
return 1;
return 0;
@@ -2466,16 +2445,15 @@ static void XGI_GetRAMDAC2DATA(unsigned short ModeIdIndex,
modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
CRT1Index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
CRT1Index &= IndexMask;
- temp1 = (unsigned short) XGI_CRT1Table[CRT1Index].CR[0];
- temp2 = (unsigned short) XGI_CRT1Table[CRT1Index].CR[5];
+ temp1 = (unsigned short)XGI_CRT1Table[CRT1Index].CR[0];
+ temp2 = (unsigned short)XGI_CRT1Table[CRT1Index].CR[5];
tempax = (temp1 & 0xFF) | ((temp2 & 0x03) << 8);
- tempbx = (unsigned short) XGI_CRT1Table[CRT1Index].CR[8];
- tempcx = (unsigned short)
- XGI_CRT1Table[CRT1Index].CR[14] << 8;
+ tempbx = (unsigned short)XGI_CRT1Table[CRT1Index].CR[8];
+ tempcx = (unsigned short)XGI_CRT1Table[CRT1Index].CR[14] << 8;
tempcx &= 0x0100;
tempcx <<= 2;
tempbx |= tempcx;
- temp1 = (unsigned short) XGI_CRT1Table[CRT1Index].CR[9];
+ temp1 = (unsigned short)XGI_CRT1Table[CRT1Index].CR[9];
if (temp1 & 0x01)
tempbx |= 0x0100;
@@ -2497,8 +2475,8 @@ static void XGI_GetRAMDAC2DATA(unsigned short ModeIdIndex,
}
static void XGI_GetCRT2Data(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short tempax = 0, tempbx = 0, modeflag, resinfo;
@@ -2667,8 +2645,8 @@ static void XGI_GetCRT2Data(unsigned short ModeIdIndex,
}
static void XGI_SetCRT2VCLK(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned char di_0, di_1, tempal;
@@ -2739,9 +2717,9 @@ static unsigned short XGI_GetOffset(unsigned short ModeNo,
}
static void XGI_SetCRT2Offset(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short offset;
unsigned char temp;
@@ -2750,11 +2728,11 @@ static void XGI_SetCRT2Offset(unsigned short ModeNo,
return;
offset = XGI_GetOffset(ModeNo, ModeIdIndex, RefreshRateTableIndex);
- temp = (unsigned char) (offset & 0xFF);
+ temp = (unsigned char)(offset & 0xFF);
xgifb_reg_set(pVBInfo->Part1Port, 0x07, temp);
- temp = (unsigned char) ((offset & 0xFF00) >> 8);
+ temp = (unsigned char)((offset & 0xFF00) >> 8);
xgifb_reg_set(pVBInfo->Part1Port, 0x09, temp);
- temp = (unsigned char) (((offset >> 3) & 0xFF) + 1);
+ temp = (unsigned char)(((offset >> 3) & 0xFF) + 1);
xgifb_reg_set(pVBInfo->Part1Port, 0x03, temp);
}
@@ -2767,8 +2745,8 @@ static void XGI_SetCRT2FIFO(struct vb_device_info *pVBInfo)
}
static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
u8 tempcx;
@@ -2783,8 +2761,8 @@ static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
}
static void XGI_SetGroup1(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short temp = 0, tempax = 0, tempbx = 0, tempcx = 0,
pushbx = 0, CRT1Index, modeflag;
@@ -2933,11 +2911,11 @@ static unsigned short XGI_GetVGAHT2(struct vb_device_info *pVBInfo)
tempax = (pVBInfo->VT - pVBInfo->VDE) * pVBInfo->RVBHCFACT;
tempax = (tempax * pVBInfo->HT) / tempbx;
- return (unsigned short) tempax;
+ return (unsigned short)tempax;
}
static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
unsigned short push1, push2, tempax, tempbx = 0, tempcx, temp, resinfo,
modeflag;
@@ -3044,14 +3022,14 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
if (ModeNo == 0x50) {
if (pVBInfo->TVInfo == SetNTSCTV) {
xgifb_reg_set(pVBInfo->Part1Port,
- 0x07, 0x30);
+ 0x07, 0x30);
xgifb_reg_set(pVBInfo->Part1Port,
- 0x08, 0x03);
+ 0x08, 0x03);
} else {
xgifb_reg_set(pVBInfo->Part1Port,
- 0x07, 0x2f);
+ 0x07, 0x2f);
xgifb_reg_set(pVBInfo->Part1Port,
- 0x08, 0x02);
+ 0x08, 0x02);
}
}
}
@@ -3064,7 +3042,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = pVBInfo->VGAVT;
push1 = tempbx;
tempcx = 0x121;
- tempbx = pVBInfo->VGAVDE; /* 0x0E Virtical Display End */
+ tempbx = pVBInfo->VGAVDE; /* 0x0E Vertical Display End */
if (tempbx == 357)
tempbx = 350;
@@ -3116,7 +3094,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
if (tempbx & 0x0400)
tempcx |= 0x0600;
- /* 0x11 Vertival Blank End */
+ /* 0x11 Vertical Blank End */
xgifb_reg_set(pVBInfo->Part1Port, 0x11, 0x00);
tempax = push1;
@@ -3227,7 +3205,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
}
static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
unsigned short i, j, tempax, tempbx, tempcx, temp, push1, push2,
modeflag;
@@ -3315,7 +3293,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax = (tempax & 0x00FF) | ((tempax & 0x00FF) << 8);
push1 = tempax;
temp = (tempax & 0xFF00) >> 8;
- temp += (unsigned short) TimingPoint[0];
+ temp += (unsigned short)TimingPoint[0];
if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
| VB_SIS302LV | VB_XGI301C)) {
@@ -3526,7 +3504,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
tempcx = 0x0101;
- if (pVBInfo->VBInfo & SetCRT2ToTV) { /*301b*/
+ if (pVBInfo->VBInfo & SetCRT2ToTV) { /* 301b */
if (pVBInfo->VGAHDE >= 1024) {
tempcx = 0x1920;
if (pVBInfo->VGAHDE >= 1280) {
@@ -3562,7 +3540,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
if (temp2 != 0)
tempeax += 1;
- tempax = (unsigned short) tempeax;
+ tempax = (unsigned short)tempeax;
/* 301b */
if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
@@ -3572,9 +3550,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
/* end 301b */
tempbx = push1;
- tempbx = (unsigned short) (((tempeax & 0x0000FF00) & 0x1F00)
+ tempbx = (unsigned short)(((tempeax & 0x0000FF00) & 0x1F00)
| (tempbx & 0x00FF));
- tempax = (unsigned short) (((tempeax & 0x000000FF) << 8)
+ tempax = (unsigned short)(((tempeax & 0x000000FF) << 8)
| (tempax & 0x00FF));
temp = (tempax & 0xFF00) >> 8;
} else {
@@ -3622,14 +3600,14 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part2Port, 0x4d, temp);
temp = xgifb_reg_get(pVBInfo->Part2Port, 0x43); /* 301b change */
- xgifb_reg_set(pVBInfo->Part2Port, 0x43, (unsigned short) (temp - 3));
+ xgifb_reg_set(pVBInfo->Part2Port, 0x43, (unsigned short)(temp - 3));
if (!(pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))) {
if (pVBInfo->TVInfo & NTSC1024x768) {
TimingPoint = XGI_NTSC1024AdjTime;
for (i = 0x1c, j = 0; i <= 0x30; i++, j++) {
xgifb_reg_set(pVBInfo->Part2Port, i,
- TimingPoint[j]);
+ TimingPoint[j]);
}
xgifb_reg_set(pVBInfo->Part2Port, 0x43, 0x72);
}
@@ -3639,7 +3617,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBType & VB_XGI301C) {
if (pVBInfo->TVInfo & TVSetPALM)
xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x08,
- 0x08); /* PALM Mode */
+ 0x08); /* PALM Mode */
}
if (pVBInfo->TVInfo & TVSetPALM) {
@@ -3656,8 +3634,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
-static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_SetLCDRegs(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
unsigned short pushbx, tempax, tempbx, tempcx, temp, tempah,
tempbh, tempch;
@@ -3853,12 +3830,12 @@ static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
}
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_GetTap4Ptr */
-/* Input : */
-/* Output : di -> Tap4 Reg. Setting Pointer */
-/* Description : */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_GetTap4Ptr
+ * Input :
+ * Output : di -> Tap4 Reg. Setting Pointer
+ * Description :
+ */
static struct XGI301C_Tap4TimingStruct const
*XGI_GetTap4Ptr(unsigned short tempcx, struct vb_device_info *pVBInfo)
{
@@ -3882,7 +3859,7 @@ static struct XGI301C_Tap4TimingStruct const
if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
if ((pVBInfo->TVInfo & TVSetYPbPr525i) ||
- (pVBInfo->TVInfo & TVSetYPbPr525p))
+ (pVBInfo->TVInfo & TVSetYPbPr525p))
Tap4TimingPtr = xgifb_ntsc_525_tap4_timing;
if (pVBInfo->TVInfo & TVSetYPbPr750p)
Tap4TimingPtr = YPbPr750pTap4Timing;
@@ -3988,8 +3965,8 @@ static void XGI_SetGroup3(unsigned short ModeIdIndex,
}
static void XGI_SetGroup4(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short tempax, tempcx, tempbx, modeflag, temp, temp2;
@@ -4080,12 +4057,12 @@ static void XGI_SetGroup4(unsigned short ModeIdIndex,
if (templong != 0)
tempebx++;
- temp = (unsigned short) (tempebx & 0x000000FF);
+ temp = (unsigned short)(tempebx & 0x000000FF);
xgifb_reg_set(pVBInfo->Part4Port, 0x1B, temp);
- temp = (unsigned short) ((tempebx & 0x0000FF00) >> 8);
+ temp = (unsigned short)((tempebx & 0x0000FF00) >> 8);
xgifb_reg_set(pVBInfo->Part4Port, 0x1A, temp);
- tempbx = (unsigned short) (tempebx >> 16);
+ tempbx = (unsigned short)(tempebx >> 16);
temp = tempbx & 0x00FF;
temp <<= 4;
temp |= ((tempcx & 0xFF00) >> 8);
@@ -4132,8 +4109,7 @@ static void XGI_SetGroup4(unsigned short ModeIdIndex,
| TVSetHiVision))) {
temp |= 0x0001;
if ((pVBInfo->VBInfo & SetInSlaveMode) &&
- !(pVBInfo->TVInfo
- & TVSimuMode))
+ !(pVBInfo->TVInfo & TVSimuMode))
temp &= (~0x0001);
}
}
@@ -4174,7 +4150,8 @@ static void XGI_DisableGatingCRT(struct vb_device_info *pVBInfo)
}
static unsigned char XGI_XG21CheckLVDSMode(struct xgifb_video_info *xgifb_info,
- unsigned short ModeNo, unsigned short ModeIdIndex)
+ unsigned short ModeNo,
+ unsigned short ModeIdIndex)
{
unsigned short xres, yres, colordepth, modeflag, resindex;
@@ -4221,7 +4198,7 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
unsigned short LVDSVT, LVDSVBS, LVDSVRS, LVDSVRE, LVDSVBE;
unsigned short value;
- temp = (unsigned char) ((xgifb_info->lvds_data.LVDS_Capability &
+ temp = (unsigned char)((xgifb_info->lvds_data.LVDS_Capability &
(LCDPolarity << 8)) >> 8);
temp &= LCDPolarity;
Miscdata = inb(pVBInfo->P3cc);
@@ -4354,12 +4331,12 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
if (chip_id == XG27) {
/* Panel VRS SR35[2:0] SR34[7:0] */
xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x07,
- (value & 0x700) >> 8);
+ (value & 0x700) >> 8);
xgifb_reg_set(pVBInfo->P3c4, 0x34, value & 0xFF);
} else {
/* Panel VRS SR3F[1:0] SR34[7:0] SR33[0] */
xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0x03,
- (value & 0x600) >> 9);
+ (value & 0x600) >> 9);
xgifb_reg_set(pVBInfo->P3c4, 0x34, (value >> 1) & 0xFF);
xgifb_reg_and_or(pVBInfo->P3d4, 0x33, ~0x01, value & 0x01);
}
@@ -4372,11 +4349,11 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
/* Panel VRE SR3F[7:2] */
if (chip_id == XG27)
xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC,
- (value << 2) & 0xFC);
+ (value << 2) & 0xFC);
else
/* SR3F[7] has to be 0, h/w bug */
xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC,
- (value << 2) & 0x7C);
+ (value << 2) & 0x7C);
for (temp = 0, value = 0; temp < 3; temp++) {
xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, value);
@@ -4400,13 +4377,13 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
}
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_IsLCDON */
-/* Input : */
-/* Output : 0 : Skip PSC Control */
-/* 1: Disable PSC */
-/* Description : */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_IsLCDON
+ * Input :
+ * Output : 0 : Skip PSC Control
+ * 1: Disable PSC
+ * Description :
+ */
static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo)
{
unsigned short tempax;
@@ -4421,8 +4398,8 @@ static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo)
}
static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
unsigned short tempah = 0;
@@ -4498,23 +4475,23 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
}
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_GetTVPtrIndex */
-/* Input : */
-/* Output : */
-/* Description : bx 0 : ExtNTSC */
-/* 1 : StNTSC */
-/* 2 : ExtPAL */
-/* 3 : StPAL */
-/* 4 : ExtHiTV */
-/* 5 : StHiTV */
-/* 6 : Ext525i */
-/* 7 : St525i */
-/* 8 : Ext525p */
-/* 9 : St525p */
-/* A : Ext750p */
-/* B : St750p */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_GetTVPtrIndex
+ * Input :
+ * Output :
+ * Description : bx 0 : ExtNTSC
+ * 1 : StNTSC
+ * 2 : ExtPAL
+ * 3 : StPAL
+ * 4 : ExtHiTV
+ * 5 : StHiTV
+ * 6 : Ext525i
+ * 7 : St525i
+ * 8 : Ext525p
+ * 9 : St525p
+ * A : Ext750p
+ * B : St750p
+ */
static unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo)
{
unsigned short tempbx = 0;
@@ -4535,24 +4512,24 @@ static unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo)
return tempbx;
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_GetTVPtrIndex2 */
-/* Input : */
-/* Output : bx 0 : NTSC */
-/* 1 : PAL */
-/* 2 : PALM */
-/* 3 : PALN */
-/* 4 : NTSC1024x768 */
-/* 5 : PAL-M 1024x768 */
-/* 6-7: reserved */
-/* cl 0 : YFilter1 */
-/* 1 : YFilter2 */
-/* ch 0 : 301A */
-/* 1 : 301B/302B/301LV/302LV */
-/* Description : */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_GetTVPtrIndex2
+ * Input :
+ * Output : bx 0 : NTSC
+ * 1 : PAL
+ * 2 : PALM
+ * 3 : PALN
+ * 4 : NTSC1024x768
+ * 5 : PAL-M 1024x768
+ * 6-7: reserved
+ * cl 0 : YFilter1
+ * 1 : YFilter2
+ * ch 0 : 301A
+ * 1 : 301B/302B/301LV/302LV
+ * Description :
+ */
static void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char *tempcl,
- unsigned char *tempch, struct vb_device_info *pVBInfo)
+ unsigned char *tempch, struct vb_device_info *pVBInfo)
{
*tempbx = 0;
*tempcl = 0;
@@ -4637,33 +4614,32 @@ static void XGI_SetLCDCap_A(unsigned short tempcx,
if (temp & LCDRGB18Bit) {
xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, 0x0F,
- /* Enable Dither */
- (unsigned short) (0x20 | (tempcx & 0x00C0)));
+ /* Enable Dither */
+ (unsigned short)(0x20 | (tempcx & 0x00C0)));
xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x80);
} else {
xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, 0x0F,
- (unsigned short) (0x30 | (tempcx & 0x00C0)));
+ (unsigned short)(0x30 | (tempcx & 0x00C0)));
xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x00);
}
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_SetLCDCap_B */
-/* Input : cx -> LCD Capability */
-/* Output : */
-/* Description : */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_SetLCDCap_B
+ * Input : cx -> LCD Capability
+ * Output :
+ * Description :
+ */
static void XGI_SetLCDCap_B(unsigned short tempcx,
struct vb_device_info *pVBInfo)
{
if (tempcx & EnableLCD24bpp) /* 24bits */
xgifb_reg_and_or(pVBInfo->Part2Port, 0x1A, 0xE0,
- (unsigned short) (((tempcx & 0x00ff) >> 6)
- | 0x0c));
+ (unsigned short)(((tempcx & 0x00ff) >> 6) | 0x0c));
else
xgifb_reg_and_or(pVBInfo->Part2Port, 0x1A, 0xE0,
- (unsigned short) (((tempcx & 0x00ff) >> 6)
- | 0x18)); /* Enable Dither */
+ (unsigned short)(((tempcx & 0x00ff) >> 6) | 0x18));
+ /* Enable Dither */
}
static void XGI_LongWait(struct vb_device_info *pVBInfo)
@@ -4698,13 +4674,13 @@ static void SetSpectrum(struct vb_device_info *pVBInfo)
XGI_LongWait(pVBInfo);
xgifb_reg_set(pVBInfo->Part4Port, 0x31,
- pVBInfo->LCDCapList[index].Spectrum_31);
+ pVBInfo->LCDCapList[index].Spectrum_31);
xgifb_reg_set(pVBInfo->Part4Port, 0x32,
- pVBInfo->LCDCapList[index].Spectrum_32);
+ pVBInfo->LCDCapList[index].Spectrum_32);
xgifb_reg_set(pVBInfo->Part4Port, 0x33,
- pVBInfo->LCDCapList[index].Spectrum_33);
+ pVBInfo->LCDCapList[index].Spectrum_33);
xgifb_reg_set(pVBInfo->Part4Port, 0x34,
- pVBInfo->LCDCapList[index].Spectrum_34);
+ pVBInfo->LCDCapList[index].Spectrum_34);
XGI_LongWait(pVBInfo);
xgifb_reg_or(pVBInfo->Part4Port, 0x30, 0x40); /* enable spectrum */
}
@@ -4721,13 +4697,13 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
(VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
/* Set 301LV Capability */
xgifb_reg_set(pVBInfo->Part4Port, 0x24,
- (unsigned char) (tempcx & 0x1F));
+ (unsigned char)(tempcx & 0x1F));
}
/* VB Driving */
xgifb_reg_and_or(pVBInfo->Part4Port, 0x0D,
- ~((EnableVBCLKDRVLOW | EnablePLLSPLOW) >> 8),
- (unsigned short) ((tempcx & (EnableVBCLKDRVLOW
- | EnablePLLSPLOW)) >> 8));
+ ~((EnableVBCLKDRVLOW | EnablePLLSPLOW) >> 8),
+ (unsigned short)((tempcx & (EnableVBCLKDRVLOW |
+ EnablePLLSPLOW)) >> 8));
if (pVBInfo->VBInfo & SetCRT2ToLCD)
XGI_SetLCDCap_B(tempcx, pVBInfo);
@@ -4744,12 +4720,12 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
}
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_SetAntiFlicker */
-/* Input : */
-/* Output : */
-/* Description : Set TV Customized Param. */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_SetAntiFlicker
+ * Input :
+ * Output :
+ * Description : Set TV Customized Param.
+ */
static void XGI_SetAntiFlicker(struct vb_device_info *pVBInfo)
{
unsigned short tempbx;
@@ -4792,13 +4768,13 @@ static void XGI_SetPhaseIncr(struct vb_device_info *pVBInfo)
XGI_GetTVPtrIndex2(&tempbx, &tempcl, &tempch, pVBInfo); /* bx, cl, ch */
tempData = TVPhaseList[tempbx];
- xgifb_reg_set(pVBInfo->Part2Port, 0x31, (unsigned short) (tempData
+ xgifb_reg_set(pVBInfo->Part2Port, 0x31, (unsigned short)(tempData
& 0x000000FF));
- xgifb_reg_set(pVBInfo->Part2Port, 0x32, (unsigned short) ((tempData
+ xgifb_reg_set(pVBInfo->Part2Port, 0x32, (unsigned short)((tempData
& 0x0000FF00) >> 8));
- xgifb_reg_set(pVBInfo->Part2Port, 0x33, (unsigned short) ((tempData
+ xgifb_reg_set(pVBInfo->Part2Port, 0x33, (unsigned short)((tempData
& 0x00FF0000) >> 16));
- xgifb_reg_set(pVBInfo->Part2Port, 0x34, (unsigned short) ((tempData
+ xgifb_reg_set(pVBInfo->Part2Port, 0x34, (unsigned short)((tempData
& 0xFF000000) >> 24));
}
@@ -4866,12 +4842,12 @@ static void XGI_SetYFilter(unsigned short ModeIdIndex,
}
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_OEM310Setting */
-/* Input : */
-/* Output : */
-/* Description : Customized Param. for 301 */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_OEM310Setting
+ * Input :
+ * Output :
+ * Description : Customized Param. for 301
+ */
static void XGI_OEM310Setting(unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
@@ -4890,12 +4866,12 @@ static void XGI_OEM310Setting(unsigned short ModeIdIndex,
}
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_SetCRT2ModeRegs */
-/* Input : */
-/* Output : */
-/* Description : Origin code for crt2group */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_SetCRT2ModeRegs
+ * Input :
+ * Output :
+ * Description : Origin code for crt2group
+ */
static void XGI_SetCRT2ModeRegs(struct vb_device_info *pVBInfo)
{
unsigned short tempbl;
@@ -4999,8 +4975,8 @@ reg_and_or:
tempah |= 0x40;
}
- if ((pVBInfo->LCDResInfo == Panel_1280x1024)
- || (pVBInfo->LCDResInfo == Panel_1280x1024x75))
+ if ((pVBInfo->LCDResInfo == Panel_1280x1024) ||
+ (pVBInfo->LCDResInfo == Panel_1280x1024x75))
tempah |= 0x80;
if (pVBInfo->LCDResInfo == Panel_1280x960)
@@ -5068,8 +5044,9 @@ void XGI_LockCRT2(struct vb_device_info *pVBInfo)
}
unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
- unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
const u8 LCDARefreshIndex[] = {
0x00, 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x00 };
@@ -5143,14 +5120,14 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
}
static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
unsigned short RefreshRateTableIndex;
pVBInfo->SetFlag |= ProgrammingCRT2;
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
- ModeIdIndex, pVBInfo);
+ ModeIdIndex, pVBInfo);
XGI_GetLVDSResInfo(ModeIdIndex, pVBInfo);
XGI_GetLVDSData(ModeIdIndex, pVBInfo);
XGI_ModCRT1Regs(ModeIdIndex, HwDeviceExtension, pVBInfo);
@@ -5159,8 +5136,8 @@ static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
}
static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
unsigned short ModeIdIndex, RefreshRateTableIndex;
@@ -5168,7 +5145,7 @@ static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
XGI_SearchModeID(ModeNo, &ModeIdIndex);
pVBInfo->SelectCRT2Rate = 4;
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
- ModeIdIndex, pVBInfo);
+ ModeIdIndex, pVBInfo);
XGI_SaveCRT2Info(ModeNo, pVBInfo);
XGI_GetCRT2ResInfo(ModeIdIndex, pVBInfo);
XGI_GetCRT2Data(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
@@ -5210,39 +5187,39 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
CR63 = xgifb_reg_get(pVBInfo->P3d4, 0x63);
SR01 = xgifb_reg_get(pVBInfo->P3c4, 0x01);
- xgifb_reg_set(pVBInfo->P3c4, 0x01, (unsigned char) (SR01 & 0xDF));
- xgifb_reg_set(pVBInfo->P3d4, 0x63, (unsigned char) (CR63 & 0xBF));
+ xgifb_reg_set(pVBInfo->P3c4, 0x01, (unsigned char)(SR01 & 0xDF));
+ xgifb_reg_set(pVBInfo->P3d4, 0x63, (unsigned char)(CR63 & 0xBF));
CR17 = xgifb_reg_get(pVBInfo->P3d4, 0x17);
- xgifb_reg_set(pVBInfo->P3d4, 0x17, (unsigned char) (CR17 | 0x80));
+ xgifb_reg_set(pVBInfo->P3d4, 0x17, (unsigned char)(CR17 | 0x80));
SR1F = xgifb_reg_get(pVBInfo->P3c4, 0x1F);
- xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char) (SR1F | 0x04));
+ xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char)(SR1F | 0x04));
SR07 = xgifb_reg_get(pVBInfo->P3c4, 0x07);
- xgifb_reg_set(pVBInfo->P3c4, 0x07, (unsigned char) (SR07 & 0xFB));
+ xgifb_reg_set(pVBInfo->P3c4, 0x07, (unsigned char)(SR07 & 0xFB));
SR06 = xgifb_reg_get(pVBInfo->P3c4, 0x06);
- xgifb_reg_set(pVBInfo->P3c4, 0x06, (unsigned char) (SR06 & 0xC3));
+ xgifb_reg_set(pVBInfo->P3c4, 0x06, (unsigned char)(SR06 & 0xC3));
xgifb_reg_set(pVBInfo->P3d4, 0x11, 0x00);
for (i = 0; i < 8; i++)
- xgifb_reg_set(pVBInfo->P3d4, (unsigned short) i, CRTCData[i]);
+ xgifb_reg_set(pVBInfo->P3d4, (unsigned short)i, CRTCData[i]);
for (i = 8; i < 11; i++)
- xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 8),
- CRTCData[i]);
+ xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 8),
+ CRTCData[i]);
for (i = 11; i < 13; i++)
- xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 4),
- CRTCData[i]);
+ xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 4),
+ CRTCData[i]);
for (i = 13; i < 16; i++)
- xgifb_reg_set(pVBInfo->P3c4, (unsigned short) (i - 3),
- CRTCData[i]);
+ xgifb_reg_set(pVBInfo->P3c4, (unsigned short)(i - 3),
+ CRTCData[i]);
- xgifb_reg_set(pVBInfo->P3c4, 0x0E, (unsigned char) (CRTCData[16]
- & 0xE0));
+ xgifb_reg_set(pVBInfo->P3c4, 0x0E, (unsigned char)(CRTCData[16]
+ & 0xE0));
xgifb_reg_set(pVBInfo->P3c4, 0x31, 0x00);
xgifb_reg_set(pVBInfo->P3c4, 0x2B, 0x1B);
@@ -5275,12 +5252,12 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
xgifb_reg_set(pVBInfo->P3d4, 0x53, (xgifb_reg_get(
pVBInfo->P3d4, 0x53) & 0xFD));
- xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char) SR1F);
+ xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char)SR1F);
}
static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
unsigned short tempah;
@@ -5310,11 +5287,11 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
if (!(pVBInfo->VBInfo & DisableCRT2Display)) {
xgifb_reg_and_or(pVBInfo->Part2Port, 0x00, ~0xE0,
- 0x20); /* shampoo 0129 */
+ 0x20); /* shampoo 0129 */
if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
if (pVBInfo->VBInfo &
(SetCRT2ToLCD | XGI_SetCRT2ToLCDA))
- /* LVDS PLL power on */
+ /* LVDS PLL power on */
xgifb_reg_and(pVBInfo->Part4Port, 0x2A,
0x7F);
/* LVDS Driver power on */
@@ -5358,9 +5335,9 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
}
static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+ struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned short ModeNo, unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short RefreshRateTableIndex, temp;
@@ -5389,14 +5366,14 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
}
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
- ModeIdIndex, pVBInfo);
+ ModeIdIndex, pVBInfo);
if (RefreshRateTableIndex != 0xFFFF) {
XGI_SetSync(RefreshRateTableIndex, pVBInfo);
XGI_SetCRT1CRTC(ModeIdIndex, RefreshRateTableIndex,
pVBInfo, HwDeviceExtension);
XGI_SetCRT1DE(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
XGI_SetCRT1Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex,
- HwDeviceExtension, pVBInfo);
+ HwDeviceExtension, pVBInfo);
XGI_SetCRT1VCLK(ModeIdIndex, HwDeviceExtension,
RefreshRateTableIndex, pVBInfo);
}
@@ -5410,15 +5387,15 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
XGI_SetXG21CRTC(RefreshRateTableIndex, pVBInfo);
XGI_UpdateXG21CRTC(ModeNo, pVBInfo,
- RefreshRateTableIndex);
+ RefreshRateTableIndex);
xgifb_set_lcd(HwDeviceExtension->jChipType,
pVBInfo, RefreshRateTableIndex);
if (pVBInfo->IF_DEF_LVDS == 1)
xgifb_set_lvds(xgifb_info,
- HwDeviceExtension->jChipType,
- ModeIdIndex, pVBInfo);
+ HwDeviceExtension->jChipType,
+ ModeIdIndex, pVBInfo);
}
}
@@ -5430,8 +5407,8 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
}
unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeNo)
+ struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned short ModeNo)
{
unsigned short ModeIdIndex;
struct vb_device_info VBINF;
@@ -5440,7 +5417,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
pVBInfo->IF_DEF_LVDS = 0;
if (HwDeviceExtension->jChipType >= XG20)
- pVBInfo->VBType = 0; /*set VBType default 0*/
+ pVBInfo->VBType = 0; /* set VBType default 0 */
XGIRegInit(pVBInfo, xgifb_info->vga_base);
@@ -5473,13 +5450,13 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA) ||
- !(pVBInfo->VBInfo & SwitchCRT2)) {
+ !(pVBInfo->VBInfo & SwitchCRT2)) {
XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
- ModeIdIndex, pVBInfo);
+ ModeIdIndex, pVBInfo);
if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
- HwDeviceExtension, pVBInfo);
+ HwDeviceExtension, pVBInfo);
}
}
@@ -5488,7 +5465,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
case VB_CHIP_301: /* fall through */
case VB_CHIP_302:
XGI_SetCRT2Group301(ModeNo, HwDeviceExtension,
- pVBInfo); /*add for CRT2 */
+ pVBInfo); /* add for CRT2 */
break;
default:
@@ -5497,7 +5474,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
}
XGI_SetCRT2ModeRegs(pVBInfo);
- XGI_OEM310Setting(ModeIdIndex, pVBInfo); /*0212*/
+ XGI_OEM310Setting(ModeIdIndex, pVBInfo); /* 0212 */
XGI_EnableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
} /* !XG20 */
else {
@@ -5515,7 +5492,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
- ModeIdIndex, pVBInfo);
+ ModeIdIndex, pVBInfo);
XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo);
}
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index c801deb142f6..f9f98e06e6d5 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -1701,6 +1701,7 @@ static const struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_1_Vx75[] = {
{ {0x28, 0xF5, 0x00, 0x84, 0xFF, 0x29, 0x90} },/* ; 04 (x768) */
{ {0x28, 0x5A, 0x13, 0x87, 0xFF, 0x29, 0xA9} } /* ; 05 (x1024) */
};
+
/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
static const struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_2_Hx75[] = {
{ {0x7E, 0x3B, 0x9A, 0x44, 0x12, 0x00, 0x01, 0x00} },/* ; 00 (320x) */
@@ -1886,17 +1887,17 @@ static const struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = {
0x6C, 0xC3, 0x35, 0x62,
0x0A, 0xC0, 0x28, 0x10},
/* LCDCap1280x1024 */
- {Panel_1280x1024, XGI_LCDDualLink+DefaultLCDCap,
+ {Panel_1280x1024, XGI_LCDDualLink + DefaultLCDCap,
0x70, 0x03, VCLK108_2_315,
0x70, 0x44, 0xF8, 0x2F,
0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1400x1050 */
- {Panel_1400x1050, XGI_LCDDualLink+DefaultLCDCap,
+ {Panel_1400x1050, XGI_LCDDualLink + DefaultLCDCap,
0x70, 0x03, VCLK108_2_315,
0x70, 0x44, 0xF8, 0x2F,
0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1600x1200 */
- {Panel_1600x1200, XGI_LCDDualLink+DefaultLCDCap,
+ {Panel_1600x1200, XGI_LCDDualLink + DefaultLCDCap,
0xC0, 0x03, VCLK162,
0x43, 0x22, 0x70, 0x24,
0x0A, 0xC0, 0x30, 0x10},
@@ -1905,7 +1906,7 @@ static const struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = {
0x2B, 0x61, 0x2B, 0x61,
0x0A, 0xC0, 0x28, 0x10},
/* LCDCap1280x1024x75 */
- {Panel_1280x1024x75, XGI_LCDDualLink+DefaultLCDCap,
+ {Panel_1280x1024x75, XGI_LCDDualLink + DefaultLCDCap,
0x90, 0x03, VCLK135_5,
0x54, 0x42, 0x4A, 0x61,
0x0A, 0xC0, 0x30, 0x10},
diff --git a/drivers/staging/xgifb/vb_util.h b/drivers/staging/xgifb/vb_util.h
index 08db58b396b2..052694e75053 100644
--- a/drivers/staging/xgifb/vb_util.h
+++ b/drivers/staging/xgifb/vb_util.h
@@ -18,7 +18,7 @@ static inline void xgifb_reg_and_or(unsigned long port, u8 index,
u8 temp;
temp = xgifb_reg_get(port, index);
- temp = (u8) ((temp & data_and) | data_or);
+ temp = (u8)((temp & data_and) | data_or);
xgifb_reg_set(port, index, temp);
}
@@ -28,7 +28,7 @@ static inline void xgifb_reg_and(unsigned long port, u8 index,
u8 temp;
temp = xgifb_reg_get(port, index);
- temp = (u8) (temp & data_and);
+ temp = (u8)(temp & data_and);
xgifb_reg_set(port, index, temp);
}
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 31a096aa16ab..d8a16ca6baa5 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -137,7 +137,7 @@ static int target_fabric_mappedlun_link(
return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access_ro);
}
-static int target_fabric_mappedlun_unlink(
+static void target_fabric_mappedlun_unlink(
struct config_item *lun_acl_ci,
struct config_item *lun_ci)
{
@@ -146,7 +146,7 @@ static int target_fabric_mappedlun_unlink(
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
- return core_dev_del_initiator_node_lun_acl(lun, lacl);
+ core_dev_del_initiator_node_lun_acl(lun, lacl);
}
static struct se_lun_acl *item_to_lun_acl(struct config_item *item)
@@ -669,7 +669,7 @@ out:
return ret;
}
-static int target_fabric_port_unlink(
+static void target_fabric_port_unlink(
struct config_item *lun_ci,
struct config_item *se_dev_ci)
{
@@ -688,7 +688,6 @@ static int target_fabric_port_unlink(
}
core_dev_del_lun(se_tpg, lun);
- return 0;
}
static void target_fabric_port_release(struct config_item *item)
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index ff5de9a96643..9af7842b8178 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -92,7 +92,7 @@ static void ft_free_cmd(struct ft_cmd *cmd)
fp = cmd->req_frame;
lport = fr_dev(fp);
if (fr_seq(fp))
- lport->tt.seq_release(fr_seq(fp));
+ fc_seq_release(fr_seq(fp));
fc_frame_free(fp);
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
ft_sess_put(sess); /* undo get from lookup at recv */
@@ -161,11 +161,11 @@ int ft_queue_status(struct se_cmd *se_cmd)
/*
* Send response.
*/
- cmd->seq = lport->tt.seq_start_next(cmd->seq);
+ cmd->seq = fc_seq_start_next(cmd->seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
- rc = lport->tt.seq_send(lport, cmd->seq, fp);
+ rc = fc_seq_send(lport, cmd->seq, fp);
if (rc) {
pr_info_ratelimited("%s: Failed to send response frame %p, "
"xid <0x%x>\n", __func__, fp, ep->xid);
@@ -177,7 +177,7 @@ int ft_queue_status(struct se_cmd *se_cmd)
se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
return -ENOMEM;
}
- lport->tt.exch_done(cmd->seq);
+ fc_exch_done(cmd->seq);
/*
* Drop the extra ACK_KREF reference taken by target_submit_cmd()
* ahead of ft_check_stop_free() -> transport_generic_free_cmd()
@@ -221,7 +221,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
memset(txrdy, 0, sizeof(*txrdy));
txrdy->ft_burst_len = htonl(se_cmd->data_length);
- cmd->seq = lport->tt.seq_start_next(cmd->seq);
+ cmd->seq = fc_seq_start_next(cmd->seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
@@ -242,7 +242,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
cmd->was_ddp_setup = 1;
}
}
- lport->tt.seq_send(lport, cmd->seq, fp);
+ fc_seq_send(lport, cmd->seq, fp);
return 0;
}
@@ -323,8 +323,8 @@ static void ft_send_resp_status(struct fc_lport *lport,
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
sp = fr_seq(fp);
if (sp) {
- lport->tt.seq_send(lport, sp, fp);
- lport->tt.exch_done(sp);
+ fc_seq_send(lport, sp, fp);
+ fc_exch_done(sp);
} else {
lport->tt.frame_send(lport, fp);
}
@@ -461,7 +461,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
cmd->se_cmd.map_tag = tag;
cmd->sess = sess;
- cmd->seq = lport->tt.seq_assign(lport, fp);
+ cmd->seq = fc_seq_assign(lport, fp);
if (!cmd->seq) {
percpu_ida_free(&se_sess->sess_tag_pool, tag);
goto busy;
@@ -563,7 +563,7 @@ static void ft_send_work(struct work_struct *work)
task_attr = TCM_SIMPLE_TAG;
}
- fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
+ fc_seq_set_resp(cmd->seq, ft_recv_seq, cmd);
cmd->se_cmd.tag = fc_seq_exch(cmd->seq)->rxid;
/*
* Use a single se_cmd->cmd_kref as we expect to release se_cmd
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 6f7c65abfe2a..1eb1f58e00e4 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -82,7 +82,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
- cmd->seq = lport->tt.seq_start_next(cmd->seq);
+ cmd->seq = fc_seq_start_next(cmd->seq);
remaining = se_cmd->data_length;
@@ -174,7 +174,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
f_ctl |= FC_FC_END_SEQ;
fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
FC_TYPE_FCP, f_ctl, fh_off);
- error = lport->tt.seq_send(lport, seq, fp);
+ error = fc_seq_send(lport, seq, fp);
if (error) {
pr_info_ratelimited("%s: Failed to send frame %p, "
"xid <0x%x>, remaining %zu, "
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index 86b996c702a0..75cf0691e6c5 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -1,11 +1,11 @@
/*
- * Thunderbolt Cactus Ridge driver - NHI registers
+ * Thunderbolt driver - NHI registers
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
*/
-#ifndef DSL3510_REGS_H_
-#define DSL3510_REGS_H_
+#ifndef NHI_REGS_H_
+#define NHI_REGS_H_
#include <linux/types.h>
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 208f573495dc..dfbb974927f2 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1012,8 +1012,6 @@ static int get_serial_info(struct tty_struct *tty, struct serial_state *state,
{
struct serial_struct tmp;
- if (!retinfo)
- return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
tty_lock(tty);
tmp.line = tty->index;
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index d6fd0e802ef5..39b3723a32a6 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -63,44 +63,23 @@
#define VERSION_STRING DRIVER_DESC " 2.1d"
-/* Macros definitions */
-
/* Default debug printout level */
#define NOZOMI_DEBUG_LEVEL 0x00
-
-#define P_BUF_SIZE 128
-#define NFO(_err_flag_, args...) \
-do { \
- char tmp[P_BUF_SIZE]; \
- snprintf(tmp, sizeof(tmp), ##args); \
- printk(_err_flag_ "[%d] %s(): %s\n", __LINE__, \
- __func__, tmp); \
-} while (0)
-
-#define DBG1(args...) D_(0x01, ##args)
-#define DBG2(args...) D_(0x02, ##args)
-#define DBG3(args...) D_(0x04, ##args)
-#define DBG4(args...) D_(0x08, ##args)
-#define DBG5(args...) D_(0x10, ##args)
-#define DBG6(args...) D_(0x20, ##args)
-#define DBG7(args...) D_(0x40, ##args)
-#define DBG8(args...) D_(0x80, ##args)
-
-#ifdef DEBUG
-/* Do we need this settable at runtime? */
static int debug = NOZOMI_DEBUG_LEVEL;
+module_param(debug, int, S_IRUGO | S_IWUSR);
-#define D(lvl, args...) do \
- {if (lvl & debug) NFO(KERN_DEBUG, ##args); } \
- while (0)
-#define D_(lvl, args...) D(lvl, ##args)
-
-/* These printouts are always printed */
+/* Macros definitions */
+#define DBG_(lvl, fmt, args...) \
+do { \
+ if (lvl & debug) \
+ pr_debug("[%d] %s(): " fmt "\n", \
+ __LINE__, __func__, ##args); \
+} while (0)
-#else
-static int debug;
-#define D_(lvl, args...)
-#endif
+#define DBG1(args...) DBG_(0x01, ##args)
+#define DBG2(args...) DBG_(0x02, ##args)
+#define DBG3(args...) DBG_(0x04, ##args)
+#define DBG4(args...) DBG_(0x08, ##args)
/* TODO: rewrite to optimize macros... */
@@ -1320,7 +1299,7 @@ static ssize_t card_type_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", dc->card_type);
}
-static DEVICE_ATTR(card_type, S_IRUGO, card_type_show, NULL);
+static DEVICE_ATTR_RO(card_type);
static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1329,7 +1308,7 @@ static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", dc->open_ttys);
}
-static DEVICE_ATTR(open_ttys, S_IRUGO, open_ttys_show, NULL);
+static DEVICE_ATTR_RO(open_ttys);
static void make_sysfs_files(struct nozomi *dc)
{
@@ -1943,7 +1922,5 @@ static __exit void nozomi_exit(void)
module_init(nozomi_init);
module_exit(nozomi_exit);
-module_param(debug, int, S_IRUGO | S_IWUSR);
-
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index b0cc47c77b40..d66c1edd9892 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -1189,8 +1189,6 @@ static int get_config(struct r_port *info, struct rocket_config __user *retinfo)
{
struct rocket_config tmp;
- if (!retinfo)
- return -EFAULT;
memset(&tmp, 0, sizeof (tmp));
mutex_lock(&info->port.mutex);
tmp.line = info->line;
@@ -1255,8 +1253,6 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
struct rocket_ports tmp;
int board;
- if (!retports)
- return -EFAULT;
memset(&tmp, 0, sizeof (tmp));
tmp.tty_major = rocket_driver->major;
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index a697a8585ddc..ce8d4ffcc425 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -80,6 +80,7 @@ struct serial8250_config {
#define UART_CAP_RTOIE (1 << 13) /* UART needs IER bit 4 set (Xscale, Tegra) */
#define UART_CAP_HFIFO (1 << 14) /* UART has a "hidden" FIFO */
#define UART_CAP_RPM (1 << 15) /* Runtime PM is active while idle */
+#define UART_CAP_IRDA (1 << 16) /* UART supports IrDA line discipline */
#define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */
#define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */
@@ -129,8 +130,13 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value)
}
struct uart_8250_port *serial8250_get_port(int line);
+
void serial8250_rpm_get(struct uart_8250_port *p);
void serial8250_rpm_put(struct uart_8250_port *p);
+
+void serial8250_rpm_get_tx(struct uart_8250_port *p);
+void serial8250_rpm_put_tx(struct uart_8250_port *p);
+
int serial8250_em485_init(struct uart_8250_port *p);
void serial8250_em485_destroy(struct uart_8250_port *p);
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 240a361b674f..61569a765d9e 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -425,10 +425,10 @@ struct uart_8250_port *serial8250_get_port(int line)
EXPORT_SYMBOL_GPL(serial8250_get_port);
static void (*serial8250_isa_config)(int port, struct uart_port *up,
- unsigned short *capabilities);
+ u32 *capabilities);
void serial8250_set_isa_configurator(
- void (*v)(int port, struct uart_port *up, unsigned short *capabilities))
+ void (*v)(int port, struct uart_port *up, u32 *capabilities))
{
serial8250_isa_config = v;
}
@@ -830,6 +830,7 @@ static int serial8250_probe(struct platform_device *dev)
uart.port.handle_irq = p->handle_irq;
uart.port.handle_break = p->handle_break;
uart.port.set_termios = p->set_termios;
+ uart.port.set_ldisc = p->set_ldisc;
uart.port.get_mctrl = p->get_mctrl;
uart.port.pm = p->pm;
uart.port.dev = &dev->dev;
@@ -1023,6 +1024,8 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
/* Possibly override set_termios call */
if (up->port.set_termios)
uart->port.set_termios = up->port.set_termios;
+ if (up->port.set_ldisc)
+ uart->port.set_ldisc = up->port.set_ldisc;
if (up->port.get_mctrl)
uart->port.get_mctrl = up->port.get_mctrl;
if (up->port.set_mctrl)
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index fdbddbc6375d..26f17456b0d7 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -72,10 +72,15 @@ int serial8250_tx_dma(struct uart_8250_port *p)
struct dma_async_tx_descriptor *desc;
int ret;
- if (uart_tx_stopped(&p->port) || dma->tx_running ||
- uart_circ_empty(xmit))
+ if (dma->tx_running)
return 0;
+ if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
+ /* We have been called from __dma_tx_complete() */
+ serial8250_rpm_put_tx(p);
+ return 0;
+ }
+
dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
desc = dmaengine_prep_slave_single(dma->txchan,
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 459d726f9d59..c89fafc972b6 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -53,6 +53,8 @@
/* Helper for fifo size calculation */
#define DW_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16)
+/* DesignWare specific register fields */
+#define DW_UART_MCR_SIRE BIT(6)
struct dw8250_data {
u8 usr_reg;
@@ -254,6 +256,22 @@ out:
serial8250_do_set_termios(p, termios, old);
}
+static void dw8250_set_ldisc(struct uart_port *p, struct ktermios *termios)
+{
+ struct uart_8250_port *up = up_to_u8250p(p);
+ unsigned int mcr = p->serial_in(p, UART_MCR);
+
+ if (up->capabilities & UART_CAP_IRDA) {
+ if (termios->c_line == N_IRDA)
+ mcr |= DW_UART_MCR_SIRE;
+ else
+ mcr &= ~DW_UART_MCR_SIRE;
+
+ p->serial_out(p, UART_MCR, mcr);
+ }
+ serial8250_do_set_ldisc(p, termios);
+}
+
/*
* dw8250_fallback_dma_filter will prevent the UART from getting just any free
* channel on platforms that have DMA engines, but don't have any channels
@@ -357,6 +375,9 @@ static void dw8250_setup_port(struct uart_port *p)
if (reg & DW_UART_CPR_AFCE_MODE)
up->capabilities |= UART_CAP_AFE;
+
+ if (reg & DW_UART_CPR_SIR_MODE)
+ up->capabilities |= UART_CAP_IRDA;
}
static int dw8250_probe(struct platform_device *pdev)
@@ -392,6 +413,7 @@ static int dw8250_probe(struct platform_device *pdev)
p->iotype = UPIO_MEM;
p->serial_in = dw8250_serial_in;
p->serial_out = dw8250_serial_out;
+ p->set_ldisc = dw8250_set_ldisc;
p->membase = devm_ioremap(dev, regs->start, resource_size(regs));
if (!p->membase)
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 0facc789fe7d..b67e7a544935 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -21,8 +21,11 @@
#define EXIT_KEY 0xAA
#define CHIP_ID1 0x20
#define CHIP_ID2 0x21
-#define CHIP_ID_0 0x1602
-#define CHIP_ID_1 0x0501
+#define CHIP_ID_F81865 0x0407
+#define CHIP_ID_F81866 0x1010
+#define CHIP_ID_F81216AD 0x1602
+#define CHIP_ID_F81216H 0x0501
+#define CHIP_ID_F81216 0x0802
#define VENDOR_ID1 0x23
#define VENDOR_ID1_VAL 0x19
#define VENDOR_ID2 0x24
@@ -43,12 +46,60 @@
#define RXW4C_IRA BIT(3)
#define TXW4C_IRA BIT(2)
+#define FIFO_CTRL 0xF6
+#define FIFO_MODE_MASK (BIT(1) | BIT(0))
+#define FIFO_MODE_128 (BIT(1) | BIT(0))
+#define RXFTHR_MODE_MASK (BIT(5) | BIT(4))
+#define RXFTHR_MODE_4X BIT(5)
+
+#define F81216_LDN_LOW 0x0
+#define F81216_LDN_HIGH 0x4
+
+/*
+ * F81866 registers
+ *
+ * The IRQ setting mode of F81866 is not the same with F81216 series.
+ * Level/Low: IRQ_MODE0:0, IRQ_MODE1:0
+ * Edge/High: IRQ_MODE0:1, IRQ_MODE1:0
+ */
+#define F81866_IRQ_MODE 0xf0
+#define F81866_IRQ_SHARE BIT(0)
+#define F81866_IRQ_MODE0 BIT(1)
+
+#define F81866_FIFO_CTRL FIFO_CTRL
+#define F81866_IRQ_MODE1 BIT(3)
+
+#define F81866_LDN_LOW 0x10
+#define F81866_LDN_HIGH 0x16
+
struct fintek_8250 {
+ u16 pid;
u16 base_port;
u8 index;
u8 key;
};
+static u8 sio_read_reg(struct fintek_8250 *pdata, u8 reg)
+{
+ outb(reg, pdata->base_port + ADDR_PORT);
+ return inb(pdata->base_port + DATA_PORT);
+}
+
+static void sio_write_reg(struct fintek_8250 *pdata, u8 reg, u8 data)
+{
+ outb(reg, pdata->base_port + ADDR_PORT);
+ outb(data, pdata->base_port + DATA_PORT);
+}
+
+static void sio_write_mask_reg(struct fintek_8250 *pdata, u8 reg, u8 mask,
+ u8 data)
+{
+ u8 tmp;
+
+ tmp = (sio_read_reg(pdata, reg) & ~mask) | (mask & data);
+ sio_write_reg(pdata, reg, tmp);
+}
+
static int fintek_8250_enter_key(u16 base_port, u8 key)
{
if (!request_muxed_region(base_port, 2, "8250_fintek"))
@@ -66,29 +117,55 @@ static void fintek_8250_exit_key(u16 base_port)
release_region(base_port + ADDR_PORT, 2);
}
-static int fintek_8250_check_id(u16 base_port)
+static int fintek_8250_check_id(struct fintek_8250 *pdata)
{
u16 chip;
- outb(VENDOR_ID1, base_port + ADDR_PORT);
- if (inb(base_port + DATA_PORT) != VENDOR_ID1_VAL)
+ if (sio_read_reg(pdata, VENDOR_ID1) != VENDOR_ID1_VAL)
return -ENODEV;
- outb(VENDOR_ID2, base_port + ADDR_PORT);
- if (inb(base_port + DATA_PORT) != VENDOR_ID2_VAL)
+ if (sio_read_reg(pdata, VENDOR_ID2) != VENDOR_ID2_VAL)
return -ENODEV;
- outb(CHIP_ID1, base_port + ADDR_PORT);
- chip = inb(base_port + DATA_PORT);
- outb(CHIP_ID2, base_port + ADDR_PORT);
- chip |= inb(base_port + DATA_PORT) << 8;
-
- if (chip != CHIP_ID_0 && chip != CHIP_ID_1)
+ chip = sio_read_reg(pdata, CHIP_ID1);
+ chip |= sio_read_reg(pdata, CHIP_ID2) << 8;
+
+ switch (chip) {
+ case CHIP_ID_F81865:
+ case CHIP_ID_F81866:
+ case CHIP_ID_F81216AD:
+ case CHIP_ID_F81216H:
+ case CHIP_ID_F81216:
+ break;
+ default:
return -ENODEV;
+ }
+ pdata->pid = chip;
return 0;
}
+static int fintek_8250_get_ldn_range(struct fintek_8250 *pdata, int *min,
+ int *max)
+{
+ switch (pdata->pid) {
+ case CHIP_ID_F81865:
+ case CHIP_ID_F81866:
+ *min = F81866_LDN_LOW;
+ *max = F81866_LDN_HIGH;
+ return 0;
+
+ case CHIP_ID_F81216AD:
+ case CHIP_ID_F81216H:
+ case CHIP_ID_F81216:
+ *min = F81216_LDN_LOW;
+ *max = F81216_LDN_HIGH;
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
static int fintek_8250_rs485_config(struct uart_port *port,
struct serial_rs485 *rs485)
{
@@ -128,10 +205,8 @@ static int fintek_8250_rs485_config(struct uart_port *port,
if (fintek_8250_enter_key(pdata->base_port, pdata->key))
return -EBUSY;
- outb(LDN, pdata->base_port + ADDR_PORT);
- outb(pdata->index, pdata->base_port + DATA_PORT);
- outb(RS485, pdata->base_port + ADDR_PORT);
- outb(config, pdata->base_port + DATA_PORT);
+ sio_write_reg(pdata, LDN, pdata->index);
+ sio_write_reg(pdata, RS485, config);
fintek_8250_exit_key(pdata->base_port);
port->rs485 = *rs485;
@@ -139,40 +214,90 @@ static int fintek_8250_rs485_config(struct uart_port *port,
return 0;
}
-static int find_base_port(struct fintek_8250 *pdata, u16 io_address)
+static void fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool is_level)
+{
+ sio_write_reg(pdata, LDN, pdata->index);
+
+ switch (pdata->pid) {
+ case CHIP_ID_F81866:
+ sio_write_mask_reg(pdata, F81866_FIFO_CTRL, F81866_IRQ_MODE1,
+ 0);
+ /* fall through */
+ case CHIP_ID_F81865:
+ sio_write_mask_reg(pdata, F81866_IRQ_MODE, F81866_IRQ_SHARE,
+ F81866_IRQ_SHARE);
+ sio_write_mask_reg(pdata, F81866_IRQ_MODE, F81866_IRQ_MODE0,
+ is_level ? 0 : F81866_IRQ_MODE0);
+ break;
+
+ case CHIP_ID_F81216AD:
+ case CHIP_ID_F81216H:
+ case CHIP_ID_F81216:
+ sio_write_mask_reg(pdata, FINTEK_IRQ_MODE, IRQ_SHARE,
+ IRQ_SHARE);
+ sio_write_mask_reg(pdata, FINTEK_IRQ_MODE, IRQ_MODE_MASK,
+ is_level ? IRQ_LEVEL_LOW : IRQ_EDGE_HIGH);
+ break;
+ }
+}
+
+static void fintek_8250_set_max_fifo(struct fintek_8250 *pdata)
+{
+ switch (pdata->pid) {
+ case CHIP_ID_F81216H: /* 128Bytes FIFO */
+ case CHIP_ID_F81866:
+ sio_write_mask_reg(pdata, FIFO_CTRL,
+ FIFO_MODE_MASK | RXFTHR_MODE_MASK,
+ FIFO_MODE_128 | RXFTHR_MODE_4X);
+ break;
+
+ default: /* Default 16Bytes FIFO */
+ break;
+ }
+}
+
+static int probe_setup_port(struct fintek_8250 *pdata, u16 io_address,
+ unsigned int irq)
{
static const u16 addr[] = {0x4e, 0x2e};
static const u8 keys[] = {0x77, 0xa0, 0x87, 0x67};
- int i, j, k;
+ struct irq_data *irq_data;
+ bool level_mode = false;
+ int i, j, k, min, max;
for (i = 0; i < ARRAY_SIZE(addr); i++) {
for (j = 0; j < ARRAY_SIZE(keys); j++) {
+ pdata->base_port = addr[i];
+ pdata->key = keys[j];
if (fintek_8250_enter_key(addr[i], keys[j]))
continue;
- if (fintek_8250_check_id(addr[i])) {
+ if (fintek_8250_check_id(pdata) ||
+ fintek_8250_get_ldn_range(pdata, &min, &max)) {
fintek_8250_exit_key(addr[i]);
continue;
}
- for (k = 0; k < 4; k++) {
+ for (k = min; k < max; k++) {
u16 aux;
- outb(LDN, addr[i] + ADDR_PORT);
- outb(k, addr[i] + DATA_PORT);
-
- outb(IO_ADDR1, addr[i] + ADDR_PORT);
- aux = inb(addr[i] + DATA_PORT);
- outb(IO_ADDR2, addr[i] + ADDR_PORT);
- aux |= inb(addr[i] + DATA_PORT) << 8;
+ sio_write_reg(pdata, LDN, k);
+ aux = sio_read_reg(pdata, IO_ADDR1);
+ aux |= sio_read_reg(pdata, IO_ADDR2) << 8;
if (aux != io_address)
continue;
- fintek_8250_exit_key(addr[i]);
- pdata->key = keys[j];
- pdata->base_port = addr[i];
pdata->index = k;
+ irq_data = irq_get_irq_data(irq);
+ if (irq_data)
+ level_mode =
+ irqd_is_level_type(irq_data);
+
+ fintek_8250_set_irq_mode(pdata, level_mode);
+ fintek_8250_set_max_fifo(pdata);
+ fintek_8250_exit_key(addr[i]);
+
return 0;
}
@@ -183,39 +308,29 @@ static int find_base_port(struct fintek_8250 *pdata, u16 io_address)
return -ENODEV;
}
-static int fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool level_mode)
+static void fintek_8250_set_rs485_handler(struct uart_8250_port *uart)
{
- int status;
- u8 tmp;
-
- status = fintek_8250_enter_key(pdata->base_port, pdata->key);
- if (status)
- return status;
-
- outb(LDN, pdata->base_port + ADDR_PORT);
- outb(pdata->index, pdata->base_port + DATA_PORT);
-
- outb(FINTEK_IRQ_MODE, pdata->base_port + ADDR_PORT);
- tmp = inb(pdata->base_port + DATA_PORT);
-
- tmp &= ~IRQ_MODE_MASK;
- tmp |= IRQ_SHARE;
- if (!level_mode)
- tmp |= IRQ_EDGE_HIGH;
-
- outb(tmp, pdata->base_port + DATA_PORT);
- fintek_8250_exit_key(pdata->base_port);
- return 0;
+ struct fintek_8250 *pdata = uart->port.private_data;
+
+ switch (pdata->pid) {
+ case CHIP_ID_F81216AD:
+ case CHIP_ID_F81216H:
+ case CHIP_ID_F81866:
+ case CHIP_ID_F81865:
+ uart->port.rs485_config = fintek_8250_rs485_config;
+ break;
+
+ default: /* No RS485 Auto direction functional */
+ break;
+ }
}
int fintek_8250_probe(struct uart_8250_port *uart)
{
struct fintek_8250 *pdata;
struct fintek_8250 probe_data;
- struct irq_data *irq_data = irq_get_irq_data(uart->port.irq);
- bool level_mode = irqd_is_level_type(irq_data);
- if (find_base_port(&probe_data, uart->port.iobase))
+ if (probe_setup_port(&probe_data, uart->port.iobase, uart->port.irq))
return -ENODEV;
pdata = devm_kzalloc(uart->port.dev, sizeof(*pdata), GFP_KERNEL);
@@ -223,8 +338,8 @@ int fintek_8250_probe(struct uart_8250_port *uart)
return -ENOMEM;
memcpy(pdata, &probe_data, sizeof(probe_data));
- uart->port.rs485_config = fintek_8250_rs485_config;
uart->port.private_data = pdata;
+ fintek_8250_set_rs485_handler(uart);
- return fintek_8250_set_irq_mode(pdata, level_mode);
+ return 0;
}
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index b9923464599f..f607946fd996 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -174,7 +174,7 @@ static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port)
int ret;
chip->dev = &pdev->dev;
- chip->irq = pdev->irq;
+ chip->irq = pci_irq_vector(pdev, 0);
chip->regs = pci_ioremap_bar(pdev, 1);
chip->pdata = &qrk_serial_dma_pdata;
@@ -183,6 +183,9 @@ static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port)
if (ret)
return;
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
/* Special DMA address for UART */
dma->rx_dma_addr = 0xfffff000;
dma->tx_dma_addr = 0xfffff000;
@@ -280,8 +283,6 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
- pci_set_master(pdev);
-
lpss = devm_kzalloc(&pdev->dev, sizeof(*lpss), GFP_KERNEL);
if (!lpss)
return -ENOMEM;
diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
index 39c2324484dd..ac013edf4992 100644
--- a/drivers/tty/serial/8250/8250_mid.c
+++ b/drivers/tty/serial/8250/8250_mid.c
@@ -303,10 +303,10 @@ static void mid8250_remove(struct pci_dev *pdev)
{
struct mid8250 *mid = pci_get_drvdata(pdev);
+ serial8250_unregister_port(mid->line);
+
if (mid->board->exit)
mid->board->exit(mid);
-
- serial8250_unregister_port(mid->line);
}
static const struct mid8250_board pnw_board = {
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 7a8b5fc81a19..d25ab1cd4295 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -332,8 +332,6 @@ static const struct of_device_id of_platform_serial_table[] = {
.data = (void *)PORT_ALTR_16550_F128, },
{ .compatible = "mrvl,mmp-uart",
.data = (void *)PORT_XSCALE, },
- { .compatible = "mrvl,pxa-uart",
- .data = (void *)PORT_XSCALE, },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(of, of_platform_serial_table);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index b98c1578f45a..aa0166b6d450 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -52,6 +52,7 @@ struct serial_private {
struct pci_dev *dev;
unsigned int nr;
struct pci_serial_quirk *quirk;
+ const struct pciserial_board *board;
int line[0];
};
@@ -1329,6 +1330,30 @@ static int pci_default_setup(struct serial_private *priv,
return setup_port(priv, port, bar, offset, board->reg_shift);
}
+static int pci_pericom_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ unsigned int bar, offset = board->first_offset, maxnr;
+
+ bar = FL_GET_BASE(board->flags);
+ if (board->flags & FL_BASE_BARS)
+ bar += idx;
+ else
+ offset += idx * board->uart_offset;
+
+ if (idx==3)
+ offset = 0x38;
+
+ maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) >>
+ (board->reg_shift + 3);
+
+ if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr)
+ return 1;
+
+ return setup_port(priv, port, bar, offset, board->reg_shift);
+}
+
static int
ce4100_serial_setup(struct serial_private *priv,
const struct pciserial_board *board,
@@ -2096,6 +2121,16 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.exit = pci_plx9050_exit,
},
/*
+ * Pericom (Only 7954 - It have a offset jump for port 4)
+ */
+ {
+ .vendor = PCI_VENDOR_ID_PERICOM,
+ .device = PCI_DEVICE_ID_PERICOM_PI7C9X7954,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ /*
* PLX
*/
{
@@ -3862,6 +3897,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
}
}
priv->nr = i;
+ priv->board = board;
return priv;
err_deinit:
@@ -3872,7 +3908,7 @@ err_out:
}
EXPORT_SYMBOL_GPL(pciserial_init_ports);
-void pciserial_remove_ports(struct serial_private *priv)
+void pciserial_detach_ports(struct serial_private *priv)
{
struct pci_serial_quirk *quirk;
int i;
@@ -3886,7 +3922,11 @@ void pciserial_remove_ports(struct serial_private *priv)
quirk = find_quirk(priv->dev);
if (quirk->exit)
quirk->exit(priv->dev);
+}
+void pciserial_remove_ports(struct serial_private *priv)
+{
+ pciserial_detach_ports(priv);
kfree(priv);
}
EXPORT_SYMBOL_GPL(pciserial_remove_ports);
@@ -5577,7 +5617,7 @@ static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev,
return PCI_ERS_RESULT_DISCONNECT;
if (priv)
- pciserial_suspend_ports(priv);
+ pciserial_detach_ports(priv);
pci_disable_device(dev);
@@ -5602,9 +5642,18 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev)
static void serial8250_io_resume(struct pci_dev *dev)
{
struct serial_private *priv = pci_get_drvdata(dev);
+ const struct pciserial_board *board;
- if (priv)
- pciserial_resume_ports(priv);
+ if (!priv)
+ return;
+
+ board = priv->board;
+ kfree(priv);
+ priv = pciserial_init_ports(dev, board);
+
+ if (!IS_ERR(priv)) {
+ pci_set_drvdata(dev, priv);
+ }
}
static const struct pci_error_handlers serial8250_err_handler = {
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 1731b98d2471..fe4399b41df6 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -636,7 +636,7 @@ EXPORT_SYMBOL_GPL(serial8250_em485_destroy);
* once and disable_runtime_pm_tx() will still disable RPM because the fifo is
* empty and the HW can idle again.
*/
-static void serial8250_rpm_get_tx(struct uart_8250_port *p)
+void serial8250_rpm_get_tx(struct uart_8250_port *p)
{
unsigned char rpm_active;
@@ -648,8 +648,9 @@ static void serial8250_rpm_get_tx(struct uart_8250_port *p)
return;
pm_runtime_get_sync(p->port.dev);
}
+EXPORT_SYMBOL_GPL(serial8250_rpm_get_tx);
-static void serial8250_rpm_put_tx(struct uart_8250_port *p)
+void serial8250_rpm_put_tx(struct uart_8250_port *p)
{
unsigned char rpm_active;
@@ -662,6 +663,7 @@ static void serial8250_rpm_put_tx(struct uart_8250_port *p)
pm_runtime_mark_last_busy(p->port.dev);
pm_runtime_put_autosuspend(p->port.dev);
}
+EXPORT_SYMBOL_GPL(serial8250_rpm_put_tx);
/*
* IER sleep support. UARTs which have EFRs need the "extended
@@ -2691,8 +2693,7 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
serial8250_do_set_termios(port, termios, old);
}
-static void
-serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios)
+void serial8250_do_set_ldisc(struct uart_port *port, struct ktermios *termios)
{
if (termios->c_line == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
@@ -2708,7 +2709,16 @@ serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios)
}
}
}
+EXPORT_SYMBOL_GPL(serial8250_do_set_ldisc);
+static void
+serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios)
+{
+ if (port->set_ldisc)
+ port->set_ldisc(port, termios);
+ else
+ serial8250_do_set_ldisc(port, termios);
+}
void serial8250_do_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
new file mode 100644
index 000000000000..4d68731af534
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -0,0 +1,190 @@
+/*
+ * drivers/tty/serial/8250/8250_pxa.c -- driver for PXA on-board UARTS
+ * Copyright: (C) 2013 Sergei Ianovich <ynvich@gmail.com>
+ *
+ * replaces drivers/serial/pxa.c by Nicolas Pitre
+ * Created: Feb 20, 2003
+ * Copyright: (C) 2003 Monta Vista Software, Inc.
+ *
+ * Based on drivers/serial/8250.c by Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include "8250.h"
+
+struct pxa8250_data {
+ int line;
+ struct clk *clk;
+};
+
+static int __maybe_unused serial_pxa_suspend(struct device *dev)
+{
+ struct pxa8250_data *data = dev_get_drvdata(dev);
+
+ serial8250_suspend_port(data->line);
+
+ return 0;
+}
+
+static int __maybe_unused serial_pxa_resume(struct device *dev)
+{
+ struct pxa8250_data *data = dev_get_drvdata(dev);
+
+ serial8250_resume_port(data->line);
+
+ return 0;
+}
+
+static const struct dev_pm_ops serial_pxa_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(serial_pxa_suspend, serial_pxa_resume)
+};
+
+static const struct of_device_id serial_pxa_dt_ids[] = {
+ { .compatible = "mrvl,pxa-uart", },
+ { .compatible = "mrvl,mmp-uart", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, serial_pxa_dt_ids);
+
+/* Uart divisor latch write */
+static void serial_pxa_dl_write(struct uart_8250_port *up, int value)
+{
+ unsigned int dll;
+
+ serial_out(up, UART_DLL, value & 0xff);
+ /*
+ * work around Erratum #74 according to Marvel(R) PXA270M Processor
+ * Specification Update (April 19, 2010)
+ */
+ dll = serial_in(up, UART_DLL);
+ WARN_ON(dll != (value & 0xff));
+
+ serial_out(up, UART_DLM, value >> 8 & 0xff);
+}
+
+
+static void serial_pxa_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct pxa8250_data *data = port->private_data;
+
+ if (!state)
+ clk_prepare_enable(data->clk);
+ else
+ clk_disable_unprepare(data->clk);
+}
+
+static int serial_pxa_probe(struct platform_device *pdev)
+{
+ struct uart_8250_port uart = {};
+ struct pxa8250_data *data;
+ struct resource *mmres, *irqres;
+ int ret;
+
+ mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!mmres || !irqres)
+ return -ENODEV;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(data->clk))
+ return PTR_ERR(data->clk);
+
+ ret = clk_prepare(data->clk);
+ if (ret)
+ return ret;
+
+ uart.port.type = PORT_XSCALE;
+ uart.port.iotype = UPIO_MEM32;
+ uart.port.mapbase = mmres->start;
+ uart.port.regshift = 2;
+ uart.port.irq = irqres->start;
+ uart.port.fifosize = 64;
+ uart.port.flags = UPF_IOREMAP | UPF_SKIP_TEST;
+ uart.port.dev = &pdev->dev;
+ uart.port.uartclk = clk_get_rate(data->clk);
+ uart.port.pm = serial_pxa_pm;
+ uart.port.private_data = data;
+ uart.dl_write = serial_pxa_dl_write;
+
+ ret = serial8250_register_8250_port(&uart);
+ if (ret < 0)
+ goto err_clk;
+
+ data->line = ret;
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+
+ err_clk:
+ clk_unprepare(data->clk);
+ return ret;
+}
+
+static int serial_pxa_remove(struct platform_device *pdev)
+{
+ struct pxa8250_data *data = platform_get_drvdata(pdev);
+
+ serial8250_unregister_port(data->line);
+
+ clk_unprepare(data->clk);
+
+ return 0;
+}
+
+static struct platform_driver serial_pxa_driver = {
+ .probe = serial_pxa_probe,
+ .remove = serial_pxa_remove,
+
+ .driver = {
+ .name = "pxa2xx-uart",
+ .pm = &serial_pxa_pm_ops,
+ .of_match_table = serial_pxa_dt_ids,
+ },
+};
+
+module_platform_driver(serial_pxa_driver);
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+static int __init early_serial_pxa_setup(struct earlycon_device *device,
+ const char *options)
+{
+ struct uart_port *port = &device->port;
+
+ if (!(device->port.membase || device->port.iobase))
+ return -ENODEV;
+
+ port->regshift = 2;
+ return early_serial8250_setup(device, NULL);
+}
+OF_EARLYCON_DECLARE(early_pxa, "mrvl,pxa-uart", early_serial_pxa_setup);
+#endif
+
+MODULE_AUTHOR("Sergei Ianovich");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa2xx-uart");
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index 417d9e7038e1..746680ebf90c 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -24,10 +24,22 @@
/* Most (but not all) of UniPhier UART devices have 64-depth FIFO. */
#define UNIPHIER_UART_DEFAULT_FIFO_SIZE 64
-#define UNIPHIER_UART_CHAR_FCR 3 /* Character / FIFO Control Register */
-#define UNIPHIER_UART_LCR_MCR 4 /* Line/Modem Control Register */
-#define UNIPHIER_UART_LCR_SHIFT 8
-#define UNIPHIER_UART_DLR 9 /* Divisor Latch Register */
+/*
+ * This hardware is similar to 8250, but its register map is a bit different:
+ * - MMIO32 (regshift = 2)
+ * - FCR is not at 2, but 3
+ * - LCR and MCR are not at 3 and 4, they share 4
+ * - Divisor latch at 9, no divisor latch access bit
+ */
+
+#define UNIPHIER_UART_REGSHIFT 2
+
+/* bit[15:8] = CHAR (not used), bit[7:0] = FCR */
+#define UNIPHIER_UART_CHAR_FCR (3 << (UNIPHIER_UART_REGSHIFT))
+/* bit[15:8] = LCR, bit[7:0] = MCR */
+#define UNIPHIER_UART_LCR_MCR (4 << (UNIPHIER_UART_REGSHIFT))
+/* Divisor Latch Register */
+#define UNIPHIER_UART_DLR (9 << (UNIPHIER_UART_REGSHIFT))
struct uniphier8250_priv {
int line;
@@ -44,7 +56,7 @@ static int __init uniphier_early_console_setup(struct earlycon_device *device,
/* This hardware always expects MMIO32 register interface. */
device->port.iotype = UPIO_MEM32;
- device->port.regshift = 2;
+ device->port.regshift = UNIPHIER_UART_REGSHIFT;
/*
* Do not touch the divisor register in early_serial8250_setup();
@@ -68,17 +80,16 @@ static unsigned int uniphier_serial_in(struct uart_port *p, int offset)
switch (offset) {
case UART_LCR:
- valshift = UNIPHIER_UART_LCR_SHIFT;
+ valshift = 8;
/* fall through */
case UART_MCR:
offset = UNIPHIER_UART_LCR_MCR;
break;
default:
+ offset <<= UNIPHIER_UART_REGSHIFT;
break;
}
- offset <<= p->regshift;
-
/*
* The return value must be masked with 0xff because LCR and MCR reside
* in the same register that must be accessed by 32-bit write/read.
@@ -90,27 +101,26 @@ static unsigned int uniphier_serial_in(struct uart_port *p, int offset)
static void uniphier_serial_out(struct uart_port *p, int offset, int value)
{
unsigned int valshift = 0;
- bool normal = false;
+ bool normal = true;
switch (offset) {
case UART_FCR:
offset = UNIPHIER_UART_CHAR_FCR;
break;
case UART_LCR:
- valshift = UNIPHIER_UART_LCR_SHIFT;
+ valshift = 8;
/* Divisor latch access bit does not exist. */
value &= ~UART_LCR_DLAB;
/* fall through */
case UART_MCR:
offset = UNIPHIER_UART_LCR_MCR;
+ normal = false;
break;
default:
- normal = true;
+ offset <<= UNIPHIER_UART_REGSHIFT;
break;
}
- offset <<= p->regshift;
-
if (normal) {
writel(value, p->membase + offset);
} else {
@@ -139,16 +149,12 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value)
*/
static int uniphier_serial_dl_read(struct uart_8250_port *up)
{
- int offset = UNIPHIER_UART_DLR << up->port.regshift;
-
- return readl(up->port.membase + offset);
+ return readl(up->port.membase + UNIPHIER_UART_DLR);
}
static void uniphier_serial_dl_write(struct uart_8250_port *up, int value)
{
- int offset = UNIPHIER_UART_DLR << up->port.regshift;
-
- writel(value, up->port.membase + offset);
+ writel(value, up->port.membase + UNIPHIER_UART_DLR);
}
static int uniphier_of_serial_setup(struct device *dev, struct uart_port *port,
@@ -234,7 +240,7 @@ static int uniphier_uart_probe(struct platform_device *pdev)
up.port.type = PORT_16550A;
up.port.iotype = UPIO_MEM32;
- up.port.regshift = 2;
+ up.port.regshift = UNIPHIER_UART_REGSHIFT;
up.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE;
up.capabilities = UART_CAP_FIFO;
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 899834776b36..0b8b6740ba43 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -439,6 +439,16 @@ config SERIAL_8250_MOXA
This driver can also be built as a module. The module will be called
8250_moxa. If you want to do that, say M here.
+config SERIAL_8250_PXA
+ tristate "PXA serial port support"
+ depends on SERIAL_8250
+ depends on ARCH_PXA || ARCH_MMP
+ help
+ If you have a machine based on an Intel XScale PXA2xx CPU you can
+ enable its onboard serial ports by enabling this option. The option is
+ applicable to both devicetree and legacy boards, and early console is
+ part of its support.
+
config SERIAL_OF_PLATFORM
tristate "Devicetree based probing for 8250 ports"
depends on SERIAL_8250 && OF
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 276c6fb60337..850e721877a9 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_SERIAL_8250_INGENIC) += 8250_ingenic.o
obj-$(CONFIG_SERIAL_8250_LPSS) += 8250_lpss.o
obj-$(CONFIG_SERIAL_8250_MID) += 8250_mid.o
obj-$(CONFIG_SERIAL_8250_MOXA) += 8250_moxa.o
+obj-$(CONFIG_SERIAL_8250_PXA) += 8250_pxa.o
obj-$(CONFIG_SERIAL_OF_PLATFORM) += 8250_of.o
CFLAGS_8250_ingenic.o += -I$(srctree)/scripts/dtc/libfdt
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 25c1d7bc0100..e9cf5b67f1b7 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -438,17 +438,27 @@ config SERIAL_MPSC_CONSOLE
Say Y here if you want to support a serial console on a Marvell MPSC.
config SERIAL_PXA
- bool "PXA serial port support"
+ bool "PXA serial port support (DEPRECATED)"
depends on ARCH_PXA || ARCH_MMP
select SERIAL_CORE
+ select SERIAL_8250_PXA if SERIAL_8250=y
+ select SERIAL_PXA_NON8250 if !SERIAL_8250=y
help
If you have a machine based on an Intel XScale PXA2xx CPU you
can enable its onboard serial ports by enabling this option.
+ Unless you have a specific need, you should use SERIAL_8250_PXA
+ instead of this.
+
+config SERIAL_PXA_NON8250
+ bool
+ depends on !SERIAL_8250
+
config SERIAL_PXA_CONSOLE
- bool "Console on PXA serial port"
+ bool "Console on PXA serial port (DEPRECATED)"
depends on SERIAL_PXA
select SERIAL_CORE_CONSOLE
+ select SERIAL_8250_CONSOLE if SERIAL_8250=y
help
If you have enabled the serial port on the Intel XScale PXA
CPU you can make it the console by answering Y to this option.
@@ -460,6 +470,9 @@ config SERIAL_PXA_CONSOLE
your boot loader (lilo or loadlin) about how to pass options to the
kernel at boot time.)
+ Unless you have a specific need, you should use SERIAL_8250_PXA
+ and SERIAL_8250_CONSOLE instead of this.
+
config SERIAL_SA1100
bool "SA1100 serial port support"
depends on ARCH_SA1100
@@ -1626,7 +1639,7 @@ config SERIAL_STM32
tristate "STMicroelectronics STM32 serial port support"
select SERIAL_CORE
depends on HAS_DMA
- depends on ARM || COMPILE_TEST
+ depends on ARCH_STM32 || COMPILE_TEST
help
This driver is for the on-chip Serial Controller on
STMicroelectronics STM32 MCUs.
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 1278d376da50..2d6288bc4554 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -23,7 +23,7 @@ obj-$(CONFIG_SERIAL_8250) += 8250/
obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o
obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o
obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o
-obj-$(CONFIG_SERIAL_PXA) += pxa.o
+obj-$(CONFIG_SERIAL_PXA_NON8250) += pxa.o
obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o
obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
@@ -62,13 +62,11 @@ obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
-obj-$(CONFIG_SERIAL_KGDB_NMI) += kgdb_nmi.o
obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o
obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
obj-$(CONFIG_SERIAL_ST_ASC) += st-asc.o
obj-$(CONFIG_SERIAL_TILEGX) += tilegx.o
-obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o
@@ -96,3 +94,6 @@ obj-$(CONFIG_SERIAL_MPS2_UART) += mps2-uart.o
# GPIOLIB helpers for modem control lines
obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o
+
+obj-$(CONFIG_SERIAL_KGDB_NMI) += kgdb_nmi.o
+obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index e2c33b9528d8..d4171d71a258 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2315,12 +2315,67 @@ static int __init pl011_console_setup(struct console *co, char *options)
return uart_set_options(&uap->port, co, baud, parity, bits, flow);
}
+/**
+ * pl011_console_match - non-standard console matching
+ * @co: registering console
+ * @name: name from console command line
+ * @idx: index from console command line
+ * @options: ptr to option string from console command line
+ *
+ * Only attempts to match console command lines of the form:
+ * console=pl011,mmio|mmio32,<addr>[,<options>]
+ * console=pl011,0x<addr>[,<options>]
+ * This form is used to register an initial earlycon boot console and
+ * replace it with the amba_console at pl011 driver init.
+ *
+ * Performs console setup for a match (as required by interface)
+ * If no <options> are specified, then assume the h/w is already setup.
+ *
+ * Returns 0 if console matches; otherwise non-zero to use default matching
+ */
+static int __init pl011_console_match(struct console *co, char *name, int idx,
+ char *options)
+{
+ unsigned char iotype;
+ resource_size_t addr;
+ int i;
+
+ if (strcmp(name, "pl011") != 0)
+ return -ENODEV;
+
+ if (uart_parse_earlycon(options, &iotype, &addr, &options))
+ return -ENODEV;
+
+ if (iotype != UPIO_MEM && iotype != UPIO_MEM32)
+ return -ENODEV;
+
+ /* try to match the port specified on the command line */
+ for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
+ struct uart_port *port;
+
+ if (!amba_ports[i])
+ continue;
+
+ port = &amba_ports[i]->port;
+
+ if (port->mapbase != addr)
+ continue;
+
+ co->index = i;
+ port->cons = co;
+ return pl011_console_setup(co, options);
+ }
+
+ return -ENODEV;
+}
+
static struct uart_driver amba_reg;
static struct console amba_console = {
.name = "ttyAMA",
.write = pl011_console_write,
.device = uart_console_device,
.setup = pl011_console_setup,
+ .match = pl011_console_match,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &amba_reg,
@@ -2357,6 +2412,7 @@ static int __init pl011_early_console_setup(struct earlycon_device *device,
return 0;
}
OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
+OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
#else
#define AMBA_CONSOLE NULL
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 6450a38cb1aa..e92c23470e51 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -3213,8 +3213,6 @@ get_serial_info(struct e100_serial * info,
* should set them to something else than 0.
*/
- if (!retinfo)
- return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
tmp.type = info->type;
tmp.line = info->line;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 76103f2c4a80..a1c6519837a4 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -430,6 +430,65 @@ static void lpuart_flush_buffer(struct uart_port *port)
}
}
+#if defined(CONFIG_CONSOLE_POLL)
+
+static int lpuart_poll_init(struct uart_port *port)
+{
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+ unsigned long flags;
+ unsigned char temp;
+
+ sport->port.fifosize = 0;
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+ /* Disable Rx & Tx */
+ writeb(0, sport->port.membase + UARTCR2);
+
+ temp = readb(sport->port.membase + UARTPFIFO);
+ /* Enable Rx and Tx FIFO */
+ writeb(temp | UARTPFIFO_RXFE | UARTPFIFO_TXFE,
+ sport->port.membase + UARTPFIFO);
+
+ /* flush Tx and Rx FIFO */
+ writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
+ sport->port.membase + UARTCFIFO);
+
+ /* explicitly clear RDRF */
+ if (readb(sport->port.membase + UARTSR1) & UARTSR1_RDRF) {
+ readb(sport->port.membase + UARTDR);
+ writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO);
+ }
+
+ writeb(0, sport->port.membase + UARTTWFIFO);
+ writeb(1, sport->port.membase + UARTRWFIFO);
+
+ /* Enable Rx and Tx */
+ writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ return 0;
+}
+
+static void lpuart_poll_put_char(struct uart_port *port, unsigned char c)
+{
+ /* drain */
+ while (!(readb(port->membase + UARTSR1) & UARTSR1_TDRE))
+ barrier();
+
+ writeb(c, port->membase + UARTDR);
+}
+
+static int lpuart_poll_get_char(struct uart_port *port)
+{
+ if (!(readb(port->membase + UARTSR1) & UARTSR1_RDRF))
+ return NO_POLL_CHAR;
+
+ return readb(port->membase + UARTDR);
+}
+
+#endif
+
static inline void lpuart_transmit_buffer(struct lpuart_port *sport)
{
struct circ_buf *xmit = &sport->port.state->xmit;
@@ -1595,6 +1654,11 @@ static const struct uart_ops lpuart_pops = {
.config_port = lpuart_config_port,
.verify_port = lpuart_verify_port,
.flush_buffer = lpuart_flush_buffer,
+#if defined(CONFIG_CONSOLE_POLL)
+ .poll_init = lpuart_poll_init,
+ .poll_get_char = lpuart_poll_get_char,
+ .poll_put_char = lpuart_poll_put_char,
+#endif
};
static const struct uart_ops lpuart32_pops = {
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index d386346248de..157883653256 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1042,6 +1042,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret) {
dev_err(&spi->dev, "SPI setup wasn't successful %d", ret);
+ kfree(ifx_dev);
return -ENODEV;
}
diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
index e5c42fef69d2..3be051abb2a2 100644
--- a/drivers/tty/serial/ioc4_serial.c
+++ b/drivers/tty/serial/ioc4_serial.c
@@ -1082,7 +1082,7 @@ static int inline ioc4_attach_local(struct ioc4_driver_data *idd)
if (!port) {
printk(KERN_WARNING
"IOC4 serial memory not available for port\n");
- return -ENOMEM;
+ goto free;
}
spin_lock_init(&port->ip_lock);
@@ -1190,6 +1190,11 @@ static int inline ioc4_attach_local(struct ioc4_driver_data *idd)
handle_dma_error_intr, port);
}
return 0;
+
+free:
+ while (port_number)
+ kfree(ports[--port_number]);
+ return -ENOMEM;
}
/**
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 770454e0dfa3..8c1c9112b3fd 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1016,7 +1016,7 @@ static void mxs_auart_settermios(struct uart_port *u,
ctrl |= AUART_LINECTRL_EPS;
}
- u->read_status_mask = 0;
+ u->read_status_mask = AUART_STAT_OERR;
if (termios->c_iflag & INPCK)
u->read_status_mask |= AUART_STAT_PERR;
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index cd9d9e878475..75952811c0da 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -925,6 +925,8 @@ static struct platform_driver serial_pxa_driver = {
},
};
+
+/* 8250 driver for PXA serial ports should be used */
static int __init serial_pxa_init(void)
{
int ret;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index fb0672554123..793395451982 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1264,7 +1264,7 @@ static int sc16is7xx_probe(struct device *dev,
/* Setup interrupt */
ret = devm_request_irq(dev, irq, sc16is7xx_irq,
- IRQF_ONESHOT | flags, dev_name(dev), s);
+ flags, dev_name(dev), s);
if (!ret)
return 0;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index f2303f390345..d0847375ea64 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -73,7 +73,7 @@ static inline struct uart_port *uart_port_ref(struct uart_state *state)
static inline void uart_port_deref(struct uart_port *uport)
{
- if (uport && atomic_dec_and_test(&uport->state->refcount))
+ if (atomic_dec_and_test(&uport->state->refcount))
wake_up(&uport->state->remove_wait);
}
@@ -88,9 +88,10 @@ static inline void uart_port_deref(struct uart_port *uport)
#define uart_port_unlock(uport, flags) \
({ \
struct uart_port *__uport = uport; \
- if (__uport) \
+ if (__uport) { \
spin_unlock_irqrestore(&__uport->lock, flags); \
- uart_port_deref(__uport); \
+ uart_port_deref(__uport); \
+ } \
})
static inline struct uart_port *uart_port_check(struct uart_state *state)
@@ -1515,7 +1516,10 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
unsigned long char_time, expire;
port = uart_port_ref(state);
- if (!port || port->type == PORT_UNKNOWN || port->fifosize == 0) {
+ if (!port)
+ return;
+
+ if (port->type == PORT_UNKNOWN || port->fifosize == 0) {
uart_port_deref(port);
return;
}
@@ -2365,9 +2369,10 @@ static int uart_poll_get_char(struct tty_driver *driver, int line)
if (state) {
port = uart_port_ref(state);
- if (port)
+ if (port) {
ret = port->ops->poll_get_char(port);
- uart_port_deref(port);
+ uart_port_deref(port);
+ }
}
return ret;
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 4b26252c2885..91e7dddbf72c 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1142,11 +1142,8 @@ static int sci_dma_rx_push(struct sci_port *s, void *buf, size_t count)
int copied;
copied = tty_insert_flip_string(tport, buf, count);
- if (copied < count) {
- dev_warn(port->dev, "Rx overrun: dropping %zu bytes\n",
- count - copied);
+ if (copied < count)
port->icount.buf_overrun++;
- }
port->icount.rx += copied;
@@ -1161,8 +1158,6 @@ static int sci_dma_rx_find_active(struct sci_port *s)
if (s->active_rx == s->cookie_rx[i])
return i;
- dev_err(s->port.dev, "%s: Rx cookie %d not found!\n", __func__,
- s->active_rx);
return -1;
}
@@ -1223,9 +1218,9 @@ static void sci_dma_rx_complete(void *arg)
dma_async_issue_pending(chan);
+ spin_unlock_irqrestore(&port->lock, flags);
dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n",
__func__, s->cookie_rx[active], active, s->active_rx);
- spin_unlock_irqrestore(&port->lock, flags);
return;
fail:
@@ -1273,8 +1268,6 @@ static void sci_submit_rx(struct sci_port *s)
if (dma_submit_error(s->cookie_rx[i]))
goto fail;
- dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
- s->cookie_rx[i], i);
}
s->active_rx = s->cookie_rx[0];
@@ -1288,7 +1281,6 @@ fail:
for (i = 0; i < 2; i++)
s->cookie_rx[i] = -EINVAL;
s->active_rx = -EINVAL;
- dev_warn(s->port.dev, "Failed to re-start Rx DMA, using PIO\n");
sci_rx_dma_release(s, true);
}
@@ -1358,10 +1350,10 @@ static void rx_timer_fn(unsigned long arg)
int active, count;
u16 scr;
- spin_lock_irqsave(&port->lock, flags);
-
dev_dbg(port->dev, "DMA Rx timed out\n");
+ spin_lock_irqsave(&port->lock, flags);
+
active = sci_dma_rx_find_active(s);
if (active < 0) {
spin_unlock_irqrestore(&port->lock, flags);
@@ -1370,9 +1362,9 @@ static void rx_timer_fn(unsigned long arg)
status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
if (status == DMA_COMPLETE) {
+ spin_unlock_irqrestore(&port->lock, flags);
dev_dbg(port->dev, "Cookie %d #%d has already completed\n",
s->active_rx, active);
- spin_unlock_irqrestore(&port->lock, flags);
/* Let packet complete handler take care of the packet */
return;
@@ -1396,8 +1388,6 @@ static void rx_timer_fn(unsigned long arg)
/* Handle incomplete DMA receive */
dmaengine_terminate_all(s->chan_rx);
read = sg_dma_len(&s->sg_rx[active]) - state.residue;
- dev_dbg(port->dev, "Read %u bytes with cookie %d\n", read,
- s->active_rx);
if (read) {
count = sci_dma_rx_push(s, s->rx_buf[active], read);
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index 4e603d060e80..99ef5c6e4766 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -598,7 +598,8 @@ static int hv_remove(struct platform_device *dev)
uart_remove_one_port(&sunhv_reg, port);
sunserial_unregister_minors(&sunhv_reg, 1);
-
+ kfree(con_read_page);
+ kfree(con_write_page);
kfree(port);
sunhv_port = NULL;
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 9ad98eaa35bf..72df2e1b88af 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1500,6 +1500,7 @@ static int su_probe(struct platform_device *op)
out_unmap:
of_iounmap(&op->resource[0], up->port.membase, up->reg_size);
+ kfree(up);
return err;
}
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index 9d7ab7b66a8a..71e81406ef71 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -9,6 +9,17 @@
* Support for multiple unimaps by Jakub Jelinek <jj@ultra.linux.cz>, July 1998
*
* Fix bug in inverse translation. Stanislav Voronyi <stas@cnti.uanet.kharkov.ua>, Dec 1998
+ *
+ * In order to prevent the following circular lock dependency:
+ * &mm->mmap_sem --> cpu_hotplug.lock --> console_lock --> &mm->mmap_sem
+ *
+ * We cannot allow page fault to happen while holding the console_lock.
+ * Therefore, all the userspace copy operations have to be done outside
+ * the console_lock critical sections.
+ *
+ * As all the affected functions are all called directly from vt_ioctl(), we
+ * can allocate some small buffers directly on stack without worrying about
+ * stack overflow.
*/
#include <linux/module.h>
@@ -22,6 +33,7 @@
#include <linux/console.h>
#include <linux/consolemap.h>
#include <linux/vt_kern.h>
+#include <linux/string.h>
static unsigned short translations[][256] = {
/* 8-bit Latin-1 mapped to Unicode -- trivial mapping */
@@ -309,18 +321,19 @@ static void update_user_maps(void)
int con_set_trans_old(unsigned char __user * arg)
{
int i;
- unsigned short *p = translations[USER_MAP];
+ unsigned short inbuf[E_TABSZ];
if (!access_ok(VERIFY_READ, arg, E_TABSZ))
return -EFAULT;
- console_lock();
- for (i=0; i<E_TABSZ ; i++) {
+ for (i = 0; i < E_TABSZ ; i++) {
unsigned char uc;
__get_user(uc, arg+i);
- p[i] = UNI_DIRECT_BASE | uc;
+ inbuf[i] = UNI_DIRECT_BASE | uc;
}
+ console_lock();
+ memcpy(translations[USER_MAP], inbuf, sizeof(inbuf));
update_user_maps();
console_unlock();
return 0;
@@ -330,35 +343,37 @@ int con_get_trans_old(unsigned char __user * arg)
{
int i, ch;
unsigned short *p = translations[USER_MAP];
+ unsigned char outbuf[E_TABSZ];
if (!access_ok(VERIFY_WRITE, arg, E_TABSZ))
return -EFAULT;
console_lock();
- for (i=0; i<E_TABSZ ; i++)
+ for (i = 0; i < E_TABSZ ; i++)
{
ch = conv_uni_to_pc(vc_cons[fg_console].d, p[i]);
- __put_user((ch & ~0xff) ? 0 : ch, arg+i);
+ outbuf[i] = (ch & ~0xff) ? 0 : ch;
}
console_unlock();
+
+ for (i = 0; i < E_TABSZ ; i++)
+ __put_user(outbuf[i], arg+i);
return 0;
}
int con_set_trans_new(ushort __user * arg)
{
int i;
- unsigned short *p = translations[USER_MAP];
+ unsigned short inbuf[E_TABSZ];
if (!access_ok(VERIFY_READ, arg, E_TABSZ*sizeof(unsigned short)))
return -EFAULT;
- console_lock();
- for (i=0; i<E_TABSZ ; i++) {
- unsigned short us;
- __get_user(us, arg+i);
- p[i] = us;
- }
+ for (i = 0; i < E_TABSZ ; i++)
+ __get_user(inbuf[i], arg+i);
+ console_lock();
+ memcpy(translations[USER_MAP], inbuf, sizeof(inbuf));
update_user_maps();
console_unlock();
return 0;
@@ -367,16 +382,17 @@ int con_set_trans_new(ushort __user * arg)
int con_get_trans_new(ushort __user * arg)
{
int i;
- unsigned short *p = translations[USER_MAP];
+ unsigned short outbuf[E_TABSZ];
if (!access_ok(VERIFY_WRITE, arg, E_TABSZ*sizeof(unsigned short)))
return -EFAULT;
console_lock();
- for (i=0; i<E_TABSZ ; i++)
- __put_user(p[i], arg+i);
+ memcpy(outbuf, translations[USER_MAP], sizeof(outbuf));
console_unlock();
-
+
+ for (i = 0; i < E_TABSZ ; i++)
+ __put_user(outbuf[i], arg+i);
return 0;
}
@@ -536,10 +552,20 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
{
int err = 0, err1, i;
struct uni_pagedir *p, *q;
+ struct unipair *unilist, *plist;
if (!ct)
return 0;
+ unilist = kmalloc_array(ct, sizeof(struct unipair), GFP_KERNEL);
+ if (!unilist)
+ return -ENOMEM;
+
+ for (i = ct, plist = unilist; i; i--, plist++, list++) {
+ __get_user(plist->unicode, &list->unicode);
+ __get_user(plist->fontpos, &list->fontpos);
+ }
+
console_lock();
/* Save original vc_unipagdir_loc in case we allocate a new one */
@@ -557,8 +583,8 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
err1 = con_do_clear_unimap(vc);
if (err1) {
- console_unlock();
- return err1;
+ err = err1;
+ goto out_unlock;
}
/*
@@ -592,8 +618,8 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
*vc->vc_uni_pagedir_loc = p;
con_release_unimap(q);
kfree(q);
- console_unlock();
- return err1;
+ err = err1;
+ goto out_unlock;
}
}
} else {
@@ -617,22 +643,17 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
/*
* Insert user specified unicode pairs into new table.
*/
- while (ct--) {
- unsigned short unicode, fontpos;
- __get_user(unicode, &list->unicode);
- __get_user(fontpos, &list->fontpos);
- if ((err1 = con_insert_unipair(p, unicode,fontpos)) != 0)
+ for (plist = unilist; ct; ct--, plist++) {
+ err1 = con_insert_unipair(p, plist->unicode, plist->fontpos);
+ if (err1)
err = err1;
- list++;
}
/*
* Merge with fontmaps of any other virtual consoles.
*/
- if (con_unify_unimap(vc, p)) {
- console_unlock();
- return err;
- }
+ if (con_unify_unimap(vc, p))
+ goto out_unlock;
for (i = 0; i <= 3; i++)
set_inverse_transl(vc, p, i); /* Update inverse translations */
@@ -640,6 +661,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
out_unlock:
console_unlock();
+ kfree(unilist);
return err;
}
@@ -735,9 +757,15 @@ EXPORT_SYMBOL(con_copy_unimap);
*/
int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list)
{
- int i, j, k, ect;
+ int i, j, k;
+ ushort ect;
u16 **p1, *p2;
struct uni_pagedir *p;
+ struct unipair *unilist, *plist;
+
+ unilist = kmalloc_array(ct, sizeof(struct unipair), GFP_KERNEL);
+ if (!unilist)
+ return -ENOMEM;
console_lock();
@@ -750,21 +778,26 @@ int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct uni
for (j = 0; j < 32; j++) {
p2 = *(p1++);
if (p2)
- for (k = 0; k < 64; k++) {
- if (*p2 < MAX_GLYPH && ect++ < ct) {
- __put_user((u_short)((i<<11)+(j<<6)+k),
- &list->unicode);
- __put_user((u_short) *p2,
- &list->fontpos);
- list++;
+ for (k = 0; k < 64; k++, p2++) {
+ if (*p2 >= MAX_GLYPH)
+ continue;
+ if (ect < ct) {
+ unilist[ect].unicode =
+ (i<<11)+(j<<6)+k;
+ unilist[ect].fontpos = *p2;
}
- p2++;
+ ect++;
}
}
}
}
- __put_user(ect, uct);
console_unlock();
+ for (i = min(ect, ct), plist = unilist; i; i--, list++, plist++) {
+ __put_user(plist->unicode, &list->unicode);
+ __put_user(plist->fontpos, &list->fontpos);
+ }
+ __put_user(ect, uct);
+ kfree(unilist);
return ((ect <= ct) ? 0 : -ENOMEM);
}
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 0f8caae4267d..3dd6a491cdba 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -982,7 +982,7 @@ static void kbd_led_trigger_activate(struct led_classdev *cdev)
KBD_LED_TRIGGER((_led_bit) + 8, _name)
static struct kbd_led_trigger kbd_led_triggers[] = {
- KBD_LED_TRIGGER(VC_SCROLLOCK, "kbd-scrollock"),
+ KBD_LED_TRIGGER(VC_SCROLLOCK, "kbd-scrolllock"),
KBD_LED_TRIGGER(VC_NUMLOCK, "kbd-numlock"),
KBD_LED_TRIGGER(VC_CAPSLOCK, "kbd-capslock"),
KBD_LED_TRIGGER(VC_KANALOCK, "kbd-kanalock"),
@@ -1256,7 +1256,7 @@ static int emulate_raw(struct vc_data *vc, unsigned int keycode,
case KEY_SYSRQ:
/*
* Real AT keyboards (that's what we're trying
- * to emulate here emit 0xe0 0x2a 0xe0 0x37 when
+ * to emulate here) emit 0xe0 0x2a 0xe0 0x37 when
* pressing PrtSc/SysRq alone, but simply 0x54
* when pressing Alt+PrtSc/SysRq.
*/
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 8c3bf3d613c0..4c10a9df3b91 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -315,38 +315,27 @@ void schedule_console_callback(void)
schedule_work(&console_work);
}
-static void scrup(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
+static void con_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
+ enum con_scroll dir, unsigned int nr)
{
- unsigned short *d, *s;
+ u16 *clear, *d, *s;
- if (t+nr >= b)
+ if (t + nr >= b)
nr = b - t - 1;
if (b > vc->vc_rows || t >= b || nr < 1)
return;
- if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_UP, nr))
+ if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, dir, nr))
return;
- d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
- s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr));
- scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row);
- scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_video_erase_char,
- vc->vc_size_row * nr);
-}
-static void scrdown(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
-{
- unsigned short *s;
- unsigned int step;
+ s = clear = (u16 *)(vc->vc_origin + vc->vc_size_row * t);
+ d = (u16 *)(vc->vc_origin + vc->vc_size_row * (t + nr));
- if (t+nr >= b)
- nr = b - t - 1;
- if (b > vc->vc_rows || t >= b || nr < 1)
- return;
- if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_DOWN, nr))
- return;
- s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
- step = vc->vc_cols * nr;
- scr_memmovew(s + step, s, (b - t - nr) * vc->vc_size_row);
- scr_memsetw(s, vc->vc_video_erase_char, 2 * step);
+ if (dir == SM_UP) {
+ clear = s + (b - t - nr) * vc->vc_cols;
+ swap(s, d);
+ }
+ scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row);
+ scr_memsetw(clear, vc->vc_video_erase_char, vc->vc_size_row * nr);
}
static void do_update_region(struct vc_data *vc, unsigned long start, int count)
@@ -1120,7 +1109,7 @@ static void lf(struct vc_data *vc)
* if below scrolling region
*/
if (vc->vc_y + 1 == vc->vc_bottom)
- scrup(vc, vc->vc_top, vc->vc_bottom, 1);
+ con_scroll(vc, vc->vc_top, vc->vc_bottom, SM_UP, 1);
else if (vc->vc_y < vc->vc_rows - 1) {
vc->vc_y++;
vc->vc_pos += vc->vc_size_row;
@@ -1135,7 +1124,7 @@ static void ri(struct vc_data *vc)
* if above scrolling region
*/
if (vc->vc_y == vc->vc_top)
- scrdown(vc, vc->vc_top, vc->vc_bottom, 1);
+ con_scroll(vc, vc->vc_top, vc->vc_bottom, SM_DOWN, 1);
else if (vc->vc_y > 0) {
vc->vc_y--;
vc->vc_pos -= vc->vc_size_row;
@@ -1631,7 +1620,7 @@ static void csi_L(struct vc_data *vc, unsigned int nr)
nr = vc->vc_rows - vc->vc_y;
else if (!nr)
nr = 1;
- scrdown(vc, vc->vc_y, vc->vc_bottom, nr);
+ con_scroll(vc, vc->vc_y, vc->vc_bottom, SM_DOWN, nr);
vc->vc_need_wrap = 0;
}
@@ -1652,7 +1641,7 @@ static void csi_M(struct vc_data *vc, unsigned int nr)
nr = vc->vc_rows - vc->vc_y;
else if (!nr)
nr=1;
- scrup(vc, vc->vc_y, vc->vc_bottom, nr);
+ con_scroll(vc, vc->vc_y, vc->vc_bottom, SM_UP, nr);
vc->vc_need_wrap = 0;
}
@@ -3934,10 +3923,6 @@ void unblank_screen(void)
*/
static void blank_screen_t(unsigned long dummy)
{
- if (unlikely(!keventd_up())) {
- mod_timer(&console_timer, jiffies + (blankinterval * HZ));
- return;
- }
blank_timer_expired = 1;
schedule_work(&console_work);
}
@@ -4295,6 +4280,46 @@ void vcs_scr_updated(struct vc_data *vc)
notify_update(vc);
}
+void vc_scrolldelta_helper(struct vc_data *c, int lines,
+ unsigned int rolled_over, void *base, unsigned int size)
+{
+ unsigned long ubase = (unsigned long)base;
+ ptrdiff_t scr_end = (void *)c->vc_scr_end - base;
+ ptrdiff_t vorigin = (void *)c->vc_visible_origin - base;
+ ptrdiff_t origin = (void *)c->vc_origin - base;
+ int margin = c->vc_size_row * 4;
+ int from, wrap, from_off, avail;
+
+ /* Turn scrollback off */
+ if (!lines) {
+ c->vc_visible_origin = c->vc_origin;
+ return;
+ }
+
+ /* Do we have already enough to allow jumping from 0 to the end? */
+ if (rolled_over > scr_end + margin) {
+ from = scr_end;
+ wrap = rolled_over + c->vc_size_row;
+ } else {
+ from = 0;
+ wrap = size;
+ }
+
+ from_off = (vorigin - from + wrap) % wrap + lines * c->vc_size_row;
+ avail = (origin - from + wrap) % wrap;
+
+ /* Only a little piece would be left? Show all incl. the piece! */
+ if (avail < 2 * margin)
+ margin = 0;
+ if (from_off < margin)
+ from_off = 0;
+ if (from_off > avail - margin)
+ from_off = avail;
+
+ c->vc_visible_origin = ubase + (from + from_off) % wrap;
+}
+EXPORT_SYMBOL_GPL(vc_scrolldelta_helper);
+
/*
* Visible symbols for modules
*/
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 52c98ce1b6fe..7e8dc78a9796 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -155,4 +155,13 @@ config UIO_MF624
If you compile this as a module, it will be called uio_mf624.
+config UIO_HV_GENERIC
+ tristate "Generic driver for Hyper-V VMBus"
+ depends on HYPERV
+ help
+ Generic driver that you can bind, dynamically, to any
+ Hyper-V VMBus device. It is useful to provide direct access
+ to network and storage devices from userspace.
+
+ If you compile this as a module, it will be called uio_hv_generic.
endif
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
index 8560dad52d0f..e9663bb8a4c7 100644
--- a/drivers/uio/Makefile
+++ b/drivers/uio/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_UIO_NETX) += uio_netx.o
obj-$(CONFIG_UIO_PRUSS) += uio_pruss.o
obj-$(CONFIG_UIO_MF624) += uio_mf624.o
obj-$(CONFIG_UIO_FSL_ELBC_GPCM) += uio_fsl_elbc_gpcm.o
+obj-$(CONFIG_UIO_HV_GENERIC) += uio_hv_generic.o
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
new file mode 100644
index 000000000000..50958f167305
--- /dev/null
+++ b/drivers/uio/uio_hv_generic.c
@@ -0,0 +1,218 @@
+/*
+ * uio_hv_generic - generic UIO driver for VMBus
+ *
+ * Copyright (c) 2013-2016 Brocade Communications Systems, Inc.
+ * Copyright (c) 2016, Microsoft Corporation.
+ *
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Since the driver does not declare any device ids, you must allocate
+ * id and bind the device to the driver yourself. For example:
+ *
+ * # echo "f8615163-df3e-46c5-913f-f2d2f965ed0e" \
+ * > /sys/bus/vmbus/drivers/uio_hv_generic
+ * # echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 \
+ * > /sys/bus/vmbus/drivers/hv_netvsc/unbind
+ * # echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 \
+ * > /sys/bus/vmbus/drivers/uio_hv_generic/bind
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uio_driver.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/hyperv.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+#include "../hv/hyperv_vmbus.h"
+
+#define DRIVER_VERSION "0.02.0"
+#define DRIVER_AUTHOR "Stephen Hemminger <sthemmin at microsoft.com>"
+#define DRIVER_DESC "Generic UIO driver for VMBus devices"
+
+/*
+ * List of resources to be mapped to user space
+ * can be extended up to MAX_UIO_MAPS(5) items
+ */
+enum hv_uio_map {
+ TXRX_RING_MAP = 0,
+ INT_PAGE_MAP,
+ MON_PAGE_MAP,
+};
+
+#define HV_RING_SIZE 512
+
+struct hv_uio_private_data {
+ struct uio_info info;
+ struct hv_device *device;
+};
+
+static int
+hv_uio_mmap(struct uio_info *info, struct vm_area_struct *vma)
+{
+ int mi;
+
+ if (vma->vm_pgoff >= MAX_UIO_MAPS)
+ return -EINVAL;
+
+ if (info->mem[vma->vm_pgoff].size == 0)
+ return -EINVAL;
+
+ mi = (int)vma->vm_pgoff;
+
+ return remap_pfn_range(vma, vma->vm_start,
+ info->mem[mi].addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
+/*
+ * This is the irqcontrol callback to be registered to uio_info.
+ * It can be used to disable/enable interrupt from user space processes.
+ *
+ * @param info
+ * pointer to uio_info.
+ * @param irq_state
+ * state value. 1 to enable interrupt, 0 to disable interrupt.
+ */
+static int
+hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
+{
+ struct hv_uio_private_data *pdata = info->priv;
+ struct hv_device *dev = pdata->device;
+
+ dev->channel->inbound.ring_buffer->interrupt_mask = !irq_state;
+ virt_mb();
+
+ return 0;
+}
+
+/*
+ * Callback from vmbus_event when something is in inbound ring.
+ */
+static void hv_uio_channel_cb(void *context)
+{
+ struct hv_uio_private_data *pdata = context;
+ struct hv_device *dev = pdata->device;
+
+ dev->channel->inbound.ring_buffer->interrupt_mask = 1;
+ virt_mb();
+
+ uio_event_notify(&pdata->info);
+}
+
+static int
+hv_uio_probe(struct hv_device *dev,
+ const struct hv_vmbus_device_id *dev_id)
+{
+ struct hv_uio_private_data *pdata;
+ int ret;
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE,
+ HV_RING_SIZE * PAGE_SIZE, NULL, 0,
+ hv_uio_channel_cb, pdata);
+ if (ret)
+ goto fail;
+
+ dev->channel->inbound.ring_buffer->interrupt_mask = 1;
+ dev->channel->batched_reading = false;
+
+ /* Fill general uio info */
+ pdata->info.name = "uio_hv_generic";
+ pdata->info.version = DRIVER_VERSION;
+ pdata->info.irqcontrol = hv_uio_irqcontrol;
+ pdata->info.mmap = hv_uio_mmap;
+ pdata->info.irq = UIO_IRQ_CUSTOM;
+
+ /* mem resources */
+ pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
+ pdata->info.mem[TXRX_RING_MAP].addr
+ = virt_to_phys(dev->channel->ringbuffer_pages);
+ pdata->info.mem[TXRX_RING_MAP].size
+ = dev->channel->ringbuffer_pagecount * PAGE_SIZE;
+ pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_LOGICAL;
+
+ pdata->info.mem[INT_PAGE_MAP].name = "int_page";
+ pdata->info.mem[INT_PAGE_MAP].addr =
+ virt_to_phys(vmbus_connection.int_page);
+ pdata->info.mem[INT_PAGE_MAP].size = PAGE_SIZE;
+ pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
+
+ pdata->info.mem[MON_PAGE_MAP].name = "monitor_pages";
+ pdata->info.mem[MON_PAGE_MAP].addr =
+ virt_to_phys(vmbus_connection.monitor_pages[1]);
+ pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE;
+ pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
+
+ pdata->info.priv = pdata;
+ pdata->device = dev;
+
+ ret = uio_register_device(&dev->device, &pdata->info);
+ if (ret) {
+ dev_err(&dev->device, "hv_uio register failed\n");
+ goto fail_close;
+ }
+
+ hv_set_drvdata(dev, pdata);
+
+ return 0;
+
+fail_close:
+ vmbus_close(dev->channel);
+fail:
+ kfree(pdata);
+
+ return ret;
+}
+
+static int
+hv_uio_remove(struct hv_device *dev)
+{
+ struct hv_uio_private_data *pdata = hv_get_drvdata(dev);
+
+ if (!pdata)
+ return 0;
+
+ uio_unregister_device(&pdata->info);
+ hv_set_drvdata(dev, NULL);
+ vmbus_close(dev->channel);
+ kfree(pdata);
+ return 0;
+}
+
+static struct hv_driver hv_uio_drv = {
+ .name = "uio_hv_generic",
+ .id_table = NULL, /* only dynamic id's */
+ .probe = hv_uio_probe,
+ .remove = hv_uio_remove,
+};
+
+static int __init
+hyperv_module_init(void)
+{
+ return vmbus_driver_register(&hv_uio_drv);
+}
+
+static void __exit
+hyperv_module_exit(void)
+{
+ vmbus_driver_unregister(&hv_uio_drv);
+}
+
+module_init(hyperv_module_init);
+module_exit(hyperv_module_exit);
+
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index ca9e2fafb0b6..31d5b1d3b5af 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -111,6 +111,7 @@ static void pruss_cleanup(struct device *dev, struct uio_pruss_dev *gdev)
gdev->sram_vaddr,
sram_pool_sz);
kfree(gdev->info);
+ clk_disable(gdev->pruss_clk);
clk_put(gdev->pruss_clk);
kfree(gdev);
}
@@ -143,7 +144,14 @@ static int pruss_probe(struct platform_device *pdev)
kfree(gdev);
return ret;
} else {
- clk_enable(gdev->pruss_clk);
+ ret = clk_enable(gdev->pruss_clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock\n");
+ clk_put(gdev->pruss_clk);
+ kfree(gdev->info);
+ kfree(gdev);
+ return ret;
+ }
}
regs_prussio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 644e978cbd3e..fbe493d44e81 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -95,6 +95,8 @@ source "drivers/usb/usbip/Kconfig"
endif
+source "drivers/usb/mtu3/Kconfig"
+
source "drivers/usb/musb/Kconfig"
source "drivers/usb/dwc3/Kconfig"
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index dca78565eb55..7791af6c102c 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_USB_DWC2) += dwc2/
obj-$(CONFIG_USB_ISP1760) += isp1760/
obj-$(CONFIG_USB_MON) += mon/
+obj-$(CONFIG_USB_MTU3) += mtu3/
obj-$(CONFIG_PCI) += host/
obj-$(CONFIG_USB_EHCI_HCD) += host/
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 099179457f60..5f4a8157fad8 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -18,6 +18,7 @@
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include <linux/usb/chipidea.h>
+#include <linux/usb/of.h>
#include <linux/clk.h>
#include "ci.h"
@@ -146,6 +147,9 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
if (of_find_property(np, "external-vbus-divider", NULL))
data->evdo = 1;
+ if (of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI)
+ data->ulpi = 1;
+
return data;
}
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index 409aa5ca8dda..d666c9f036ba 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -19,6 +19,7 @@ struct imx_usbmisc_data {
unsigned int disable_oc:1; /* over current detect disabled */
unsigned int oc_polarity:1; /* over current polarity if oc enabled */
unsigned int evdo:1; /* set external vbus divider option */
+ unsigned int ulpi:1; /* connected to an ULPI phy */
};
int imx_usbmisc_init(struct imx_usbmisc_data *);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index c9e80ad48fdc..cf132f057137 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -365,7 +365,7 @@ static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
if (hwreq->req.length == 0
|| hwreq->req.length % hwep->ep.maxpacket)
mul++;
- node->ptr->token |= mul << __ffs(TD_MULTO);
+ node->ptr->token |= cpu_to_le32(mul << __ffs(TD_MULTO));
}
temp = (u32) (hwreq->req.dma + hwreq->req.actual);
@@ -504,7 +504,7 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
if (hwreq->req.length == 0
|| hwreq->req.length % hwep->ep.maxpacket)
mul++;
- hwep->qh.ptr->cap |= mul << __ffs(QH_MULT);
+ hwep->qh.ptr->cap |= cpu_to_le32(mul << __ffs(QH_MULT));
}
ret = hw_ep_prime(ci, hwep->num, hwep->dir,
@@ -529,7 +529,7 @@ static void free_pending_td(struct ci_hw_ep *hwep)
static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep,
struct td_node *node)
{
- hwep->qh.ptr->td.next = node->dma;
+ hwep->qh.ptr->td.next = cpu_to_le32(node->dma);
hwep->qh.ptr->td.token &=
cpu_to_le32(~(TD_STATUS_HALTED | TD_STATUS_ACTIVE));
@@ -821,7 +821,7 @@ static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
}
if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
- hwreq->req.length > (1 + hwep->ep.mult) * hwep->ep.maxpacket) {
+ hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) {
dev_err(hwep->ci->dev, "request length too big for isochronous\n");
return -EMSGSIZE;
}
@@ -1253,8 +1253,8 @@ static int ep_enable(struct usb_ep *ep,
hwep->num = usb_endpoint_num(desc);
hwep->type = usb_endpoint_type(desc);
- hwep->ep.maxpacket = usb_endpoint_maxp(desc) & 0x07ff;
- hwep->ep.mult = QH_ISO_MULT(usb_endpoint_maxp(desc));
+ hwep->ep.maxpacket = usb_endpoint_maxp(desc);
+ hwep->ep.mult = usb_endpoint_maxp_mult(desc);
if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
cap |= QH_IOS;
diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h
index e66df0020bd4..2ecd1174d66c 100644
--- a/drivers/usb/chipidea/udc.h
+++ b/drivers/usb/chipidea/udc.h
@@ -22,11 +22,11 @@
/* DMA layout of transfer descriptors */
struct ci_hw_td {
/* 0 */
- u32 next;
+ __le32 next;
#define TD_TERMINATE BIT(0)
#define TD_ADDR_MASK (0xFFFFFFEUL << 5)
/* 1 */
- u32 token;
+ __le32 token;
#define TD_STATUS (0x00FFUL << 0)
#define TD_STATUS_TR_ERR BIT(3)
#define TD_STATUS_DT_ERR BIT(5)
@@ -36,7 +36,7 @@ struct ci_hw_td {
#define TD_IOC BIT(15)
#define TD_TOTAL_BYTES (0x7FFFUL << 16)
/* 2 */
- u32 page[5];
+ __le32 page[5];
#define TD_CURR_OFFSET (0x0FFFUL << 0)
#define TD_FRAME_NUM (0x07FFUL << 0)
#define TD_RESERVED_MASK (0x0FFFUL << 0)
@@ -45,18 +45,18 @@ struct ci_hw_td {
/* DMA layout of queue heads */
struct ci_hw_qh {
/* 0 */
- u32 cap;
+ __le32 cap;
#define QH_IOS BIT(15)
#define QH_MAX_PKT (0x07FFUL << 16)
#define QH_ZLT BIT(29)
#define QH_MULT (0x0003UL << 30)
#define QH_ISO_MULT(x) ((x >> 11) & 0x03)
/* 1 */
- u32 curr;
+ __le32 curr;
/* 2 - 8 */
struct ci_hw_td td;
/* 9 */
- u32 RESERVED;
+ __le32 RESERVED;
struct usb_ctrlrequest setup;
} __attribute__ ((packed, aligned(4)));
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 20d02a5e418d..e77a4ed4f021 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -46,11 +46,23 @@
#define MX53_USB_OTG_PHY_CTRL_0_OFFSET 0x08
#define MX53_USB_OTG_PHY_CTRL_1_OFFSET 0x0c
+#define MX53_USB_CTRL_1_OFFSET 0x10
+#define MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_MASK (0x11 << 2)
+#define MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_ULPI BIT(2)
+#define MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_MASK (0x11 << 6)
+#define MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_ULPI BIT(6)
#define MX53_USB_UH2_CTRL_OFFSET 0x14
#define MX53_USB_UH3_CTRL_OFFSET 0x18
+#define MX53_USB_CLKONOFF_CTRL_OFFSET 0x24
+#define MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF BIT(21)
+#define MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF BIT(22)
#define MX53_BM_OVER_CUR_DIS_H1 BIT(5)
#define MX53_BM_OVER_CUR_DIS_OTG BIT(8)
#define MX53_BM_OVER_CUR_DIS_UHx BIT(30)
+#define MX53_USB_CTRL_1_UH2_ULPI_EN BIT(26)
+#define MX53_USB_CTRL_1_UH3_ULPI_EN BIT(27)
+#define MX53_USB_UHx_CTRL_WAKE_UP_EN BIT(7)
+#define MX53_USB_UHx_CTRL_ULPI_INT_EN BIT(8)
#define MX53_USB_PHYCTRL1_PLLDIV_MASK 0x3
#define MX53_USB_PLL_DIV_24_MHZ 0x01
@@ -199,31 +211,77 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
val |= MX53_USB_PLL_DIV_24_MHZ;
writel(val, usbmisc->base + MX53_USB_OTG_PHY_CTRL_1_OFFSET);
- if (data->disable_oc) {
- spin_lock_irqsave(&usbmisc->lock, flags);
- switch (data->index) {
- case 0:
+ spin_lock_irqsave(&usbmisc->lock, flags);
+
+ switch (data->index) {
+ case 0:
+ if (data->disable_oc) {
reg = usbmisc->base + MX53_USB_OTG_PHY_CTRL_0_OFFSET;
val = readl(reg) | MX53_BM_OVER_CUR_DIS_OTG;
- break;
- case 1:
+ writel(val, reg);
+ }
+ break;
+ case 1:
+ if (data->disable_oc) {
reg = usbmisc->base + MX53_USB_OTG_PHY_CTRL_0_OFFSET;
val = readl(reg) | MX53_BM_OVER_CUR_DIS_H1;
- break;
- case 2:
+ writel(val, reg);
+ }
+ break;
+ case 2:
+ if (data->ulpi) {
+ /* set USBH2 into ULPI-mode. */
+ reg = usbmisc->base + MX53_USB_CTRL_1_OFFSET;
+ val = readl(reg) | MX53_USB_CTRL_1_UH2_ULPI_EN;
+ /* select ULPI clock */
+ val &= ~MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_MASK;
+ val |= MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_ULPI;
+ writel(val, reg);
+ /* Set interrupt wake up enable */
+ reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET;
+ val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
+ | MX53_USB_UHx_CTRL_ULPI_INT_EN;
+ writel(val, reg);
+ /* Disable internal 60Mhz clock */
+ reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET;
+ val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF;
+ writel(val, reg);
+ }
+ if (data->disable_oc) {
reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET;
val = readl(reg) | MX53_BM_OVER_CUR_DIS_UHx;
- break;
- case 3:
+ writel(val, reg);
+ }
+ break;
+ case 3:
+ if (data->ulpi) {
+ /* set USBH3 into ULPI-mode. */
+ reg = usbmisc->base + MX53_USB_CTRL_1_OFFSET;
+ val = readl(reg) | MX53_USB_CTRL_1_UH3_ULPI_EN;
+ /* select ULPI clock */
+ val &= ~MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_MASK;
+ val |= MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_ULPI;
+ writel(val, reg);
+ /* Set interrupt wake up enable */
reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET;
- val = readl(reg) | MX53_BM_OVER_CUR_DIS_UHx;
- break;
+ val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
+ | MX53_USB_UHx_CTRL_ULPI_INT_EN;
+ writel(val, reg);
+ /* Disable internal 60Mhz clock */
+ reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET;
+ val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF;
+ writel(val, reg);
}
- if (reg && val)
+ if (data->disable_oc) {
+ reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET;
+ val = readl(reg) | MX53_BM_OVER_CUR_DIS_UHx;
writel(val, reg);
- spin_unlock_irqrestore(&usbmisc->lock, flags);
+ }
+ break;
}
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
return 0;
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index fada988512a1..e35b1508d3eb 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -133,8 +133,8 @@ static int acm_ctrl_msg(struct acm *acm, int request, int value,
buf, len, 5000);
dev_dbg(&acm->control->dev,
- "%s - rq 0x%02x, val %#x, len %#x, result %d\n",
- __func__, request, value, len, retval);
+ "%s - rq 0x%02x, val %#x, len %#x, result %d\n",
+ __func__, request, value, len, retval);
usb_autopm_put_interface(acm->control);
@@ -158,6 +158,17 @@ static inline int acm_set_control(struct acm *acm, int control)
#define acm_send_break(acm, ms) \
acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0)
+static void acm_kill_urbs(struct acm *acm)
+{
+ int i;
+
+ usb_kill_urb(acm->ctrlurb);
+ for (i = 0; i < ACM_NW; i++)
+ usb_kill_urb(acm->wb[i].urb);
+ for (i = 0; i < acm->rx_buflimit; i++)
+ usb_kill_urb(acm->read_urbs[i]);
+}
+
/*
* Write buffer management.
* All of these assume proper locks taken by the caller.
@@ -291,13 +302,13 @@ static void acm_ctrl_irq(struct urb *urb)
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&acm->control->dev,
- "%s - urb shutting down with status: %d\n",
- __func__, status);
+ "%s - urb shutting down with status: %d\n",
+ __func__, status);
return;
default:
dev_dbg(&acm->control->dev,
- "%s - nonzero urb status received: %d\n",
- __func__, status);
+ "%s - nonzero urb status received: %d\n",
+ __func__, status);
goto exit;
}
@@ -306,16 +317,16 @@ static void acm_ctrl_irq(struct urb *urb)
data = (unsigned char *)(dr + 1);
switch (dr->bNotificationType) {
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
- dev_dbg(&acm->control->dev, "%s - network connection: %d\n",
- __func__, dr->wValue);
+ dev_dbg(&acm->control->dev,
+ "%s - network connection: %d\n", __func__, dr->wValue);
break;
case USB_CDC_NOTIFY_SERIAL_STATE:
newctrl = get_unaligned_le16(data);
if (!acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
- dev_dbg(&acm->control->dev, "%s - calling hangup\n",
- __func__);
+ dev_dbg(&acm->control->dev,
+ "%s - calling hangup\n", __func__);
tty_port_tty_hangup(&acm->port, false);
}
@@ -357,8 +368,8 @@ static void acm_ctrl_irq(struct urb *urb)
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval && retval != -EPERM)
- dev_err(&acm->control->dev, "%s - usb_submit_urb failed: %d\n",
- __func__, retval);
+ dev_err(&acm->control->dev,
+ "%s - usb_submit_urb failed: %d\n", __func__, retval);
}
static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
@@ -372,8 +383,8 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
if (res) {
if (res != -EPERM) {
dev_err(&acm->data->dev,
- "urb %d failed submission with %d\n",
- index, res);
+ "urb %d failed submission with %d\n",
+ index, res);
}
set_bit(index, &acm->read_urbs_free);
return res;
@@ -416,30 +427,43 @@ static void acm_read_bulk_callback(struct urb *urb)
int status = urb->status;
dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
- rb->index, urb->actual_length,
- status);
+ rb->index, urb->actual_length, status);
+
+ set_bit(rb->index, &acm->read_urbs_free);
if (!acm->dev) {
- set_bit(rb->index, &acm->read_urbs_free);
dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
return;
}
- if (status) {
- set_bit(rb->index, &acm->read_urbs_free);
- if ((status != -ENOENT) || (urb->actual_length == 0))
- return;
+ switch (status) {
+ case 0:
+ usb_mark_last_busy(acm->dev);
+ acm_process_read_urb(acm, urb);
+ break;
+ case -EPIPE:
+ set_bit(EVENT_RX_STALL, &acm->flags);
+ schedule_work(&acm->work);
+ return;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ dev_dbg(&acm->data->dev,
+ "%s - urb shutting down with status: %d\n",
+ __func__, status);
+ return;
+ default:
+ dev_dbg(&acm->data->dev,
+ "%s - nonzero urb status received: %d\n",
+ __func__, status);
+ break;
}
- usb_mark_last_busy(acm->dev);
-
- acm_process_read_urb(acm, urb);
/*
* Unthrottle may run on another CPU which needs to see events
* in the same order. Submission has an implict barrier
*/
smp_mb__before_atomic();
- set_bit(rb->index, &acm->read_urbs_free);
/* throttle device if requested by tty */
spin_lock_irqsave(&acm->read_lock, flags);
@@ -469,14 +493,30 @@ static void acm_write_bulk(struct urb *urb)
spin_lock_irqsave(&acm->write_lock, flags);
acm_write_done(acm, wb);
spin_unlock_irqrestore(&acm->write_lock, flags);
+ set_bit(EVENT_TTY_WAKEUP, &acm->flags);
schedule_work(&acm->work);
}
static void acm_softint(struct work_struct *work)
{
+ int i;
struct acm *acm = container_of(work, struct acm, work);
- tty_port_tty_wakeup(&acm->port);
+ if (test_bit(EVENT_RX_STALL, &acm->flags)) {
+ if (!(usb_autopm_get_interface(acm->data))) {
+ for (i = 0; i < acm->rx_buflimit; i++)
+ usb_kill_urb(acm->read_urbs[i]);
+ usb_clear_halt(acm->dev, acm->in);
+ acm_submit_read_urbs(acm, GFP_KERNEL);
+ usb_autopm_put_interface(acm->data);
+ }
+ clear_bit(EVENT_RX_STALL, &acm->flags);
+ }
+
+ if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) {
+ tty_port_tty_wakeup(&acm->port);
+ clear_bit(EVENT_TTY_WAKEUP, &acm->flags);
+ }
}
/*
@@ -608,7 +648,6 @@ static void acm_port_shutdown(struct tty_port *port)
struct acm *acm = container_of(port, struct acm, port);
struct urb *urb;
struct acm_wb *wb;
- int i;
/*
* Need to grab write_lock to prevent race with resume, but no need to
@@ -630,11 +669,7 @@ static void acm_port_shutdown(struct tty_port *port)
usb_autopm_put_interface_async(acm->control);
}
- usb_kill_urb(acm->ctrlurb);
- for (i = 0; i < ACM_NW; i++)
- usb_kill_urb(acm->wb[i].urb);
- for (i = 0; i < acm->rx_buflimit; i++)
- usb_kill_urb(acm->read_urbs[i]);
+ acm_kill_urbs(acm);
}
static void acm_tty_cleanup(struct tty_struct *tty)
@@ -837,8 +872,8 @@ static int acm_tty_break_ctl(struct tty_struct *tty, int state)
retval = acm_send_break(acm, state ? 0xffff : 0);
if (retval < 0)
- dev_dbg(&acm->control->dev, "%s - send break failed\n",
- __func__);
+ dev_dbg(&acm->control->dev,
+ "%s - send break failed\n", __func__);
return retval;
}
@@ -877,9 +912,6 @@ static int get_serial_info(struct acm *acm, struct serial_struct __user *info)
{
struct serial_struct tmp;
- if (!info)
- return -EINVAL;
-
memset(&tmp, 0, sizeof(tmp));
tmp.flags = ASYNC_LOW_LATENCY;
tmp.xmit_fifo_size = acm->writesize;
@@ -969,25 +1001,20 @@ static int wait_serial_change(struct acm *acm, unsigned long arg)
return rv;
}
-static int get_serial_usage(struct acm *acm,
- struct serial_icounter_struct __user *count)
+static int acm_tty_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
{
- struct serial_icounter_struct icount;
- int rv = 0;
-
- memset(&icount, 0, sizeof(icount));
- icount.dsr = acm->iocount.dsr;
- icount.rng = acm->iocount.rng;
- icount.dcd = acm->iocount.dcd;
- icount.frame = acm->iocount.frame;
- icount.overrun = acm->iocount.overrun;
- icount.parity = acm->iocount.parity;
- icount.brk = acm->iocount.brk;
+ struct acm *acm = tty->driver_data;
- if (copy_to_user(count, &icount, sizeof(icount)) > 0)
- rv = -EFAULT;
+ icount->dsr = acm->iocount.dsr;
+ icount->rng = acm->iocount.rng;
+ icount->dcd = acm->iocount.dcd;
+ icount->frame = acm->iocount.frame;
+ icount->overrun = acm->iocount.overrun;
+ icount->parity = acm->iocount.parity;
+ icount->brk = acm->iocount.brk;
- return rv;
+ return 0;
}
static int acm_tty_ioctl(struct tty_struct *tty,
@@ -1012,9 +1039,6 @@ static int acm_tty_ioctl(struct tty_struct *tty,
rv = wait_serial_change(acm, arg);
usb_autopm_put_interface(acm->control);
break;
- case TIOCGICOUNT:
- rv = get_serial_usage(acm, (struct serial_icounter_struct __user *) arg);
- break;
}
return rv;
@@ -1088,19 +1112,17 @@ static void acm_write_buffers_free(struct acm *acm)
{
int i;
struct acm_wb *wb;
- struct usb_device *usb_dev = interface_to_usbdev(acm->control);
for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++)
- usb_free_coherent(usb_dev, acm->writesize, wb->buf, wb->dmah);
+ usb_free_coherent(acm->dev, acm->writesize, wb->buf, wb->dmah);
}
static void acm_read_buffers_free(struct acm *acm)
{
- struct usb_device *usb_dev = interface_to_usbdev(acm->control);
int i;
for (i = 0; i < acm->rx_buflimit; i++)
- usb_free_coherent(usb_dev, acm->readsize,
+ usb_free_coherent(acm->dev, acm->readsize,
acm->read_buffers[i].base, acm->read_buffers[i].dma);
}
@@ -1345,9 +1367,16 @@ made_compressed_probe:
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
mutex_init(&acm->mutex);
- acm->is_int_ep = usb_endpoint_xfer_int(epread);
- if (acm->is_int_ep)
+ if (usb_endpoint_xfer_int(epread)) {
acm->bInterval = epread->bInterval;
+ acm->in = usb_rcvintpipe(usb_dev, epread->bEndpointAddress);
+ } else {
+ acm->in = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
+ }
+ if (usb_endpoint_xfer_int(epwrite))
+ acm->out = usb_sndintpipe(usb_dev, epwrite->bEndpointAddress);
+ else
+ acm->out = usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress);
tty_port_init(&acm->port);
acm->port.ops = &acm_port_ops;
init_usb_anchor(&acm->delayed);
@@ -1382,20 +1411,15 @@ made_compressed_probe:
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
urb->transfer_dma = rb->dma;
- if (acm->is_int_ep) {
- usb_fill_int_urb(urb, acm->dev,
- usb_rcvintpipe(usb_dev, epread->bEndpointAddress),
- rb->base,
+ if (usb_endpoint_xfer_int(epread))
+ usb_fill_int_urb(urb, acm->dev, acm->in, rb->base,
acm->readsize,
acm_read_bulk_callback, rb,
acm->bInterval);
- } else {
- usb_fill_bulk_urb(urb, acm->dev,
- usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress),
- rb->base,
+ else
+ usb_fill_bulk_urb(urb, acm->dev, acm->in, rb->base,
acm->readsize,
acm_read_bulk_callback, rb);
- }
acm->read_urbs[i] = urb;
__set_bit(i, &acm->read_urbs_free);
@@ -1408,12 +1432,10 @@ made_compressed_probe:
goto alloc_fail7;
if (usb_endpoint_xfer_int(epwrite))
- usb_fill_int_urb(snd->urb, usb_dev,
- usb_sndintpipe(usb_dev, epwrite->bEndpointAddress),
+ usb_fill_int_urb(snd->urb, usb_dev, acm->out,
NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval);
else
- usb_fill_bulk_urb(snd->urb, usb_dev,
- usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
+ usb_fill_bulk_urb(snd->urb, usb_dev, acm->out,
NULL, acm->writesize, acm_write_bulk, snd);
snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
if (quirks & SEND_ZERO_PACKET)
@@ -1485,8 +1507,8 @@ skip_countries:
}
if (quirks & CLEAR_HALT_CONDITIONS) {
- usb_clear_halt(usb_dev, usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress));
- usb_clear_halt(usb_dev, usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress));
+ usb_clear_halt(usb_dev, acm->in);
+ usb_clear_halt(usb_dev, acm->out);
}
return 0;
@@ -1520,25 +1542,10 @@ alloc_fail:
return rv;
}
-static void stop_data_traffic(struct acm *acm)
-{
- int i;
-
- usb_kill_urb(acm->ctrlurb);
- for (i = 0; i < ACM_NW; i++)
- usb_kill_urb(acm->wb[i].urb);
- for (i = 0; i < acm->rx_buflimit; i++)
- usb_kill_urb(acm->read_urbs[i]);
-
- cancel_work_sync(&acm->work);
-}
-
static void acm_disconnect(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
- struct usb_device *usb_dev = interface_to_usbdev(intf);
struct tty_struct *tty;
- int i;
/* sibling interface is already cleaning up */
if (!acm)
@@ -1564,17 +1571,13 @@ static void acm_disconnect(struct usb_interface *intf)
tty_kref_put(tty);
}
- stop_data_traffic(acm);
+ acm_kill_urbs(acm);
+ cancel_work_sync(&acm->work);
tty_unregister_device(acm_tty_driver, acm->minor);
- usb_free_urb(acm->ctrlurb);
- for (i = 0; i < ACM_NW; i++)
- usb_free_urb(acm->wb[i].urb);
- for (i = 0; i < acm->rx_buflimit; i++)
- usb_free_urb(acm->read_urbs[i]);
acm_write_buffers_free(acm);
- usb_free_coherent(usb_dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
+ usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
acm_read_buffers_free(acm);
if (!acm->combined_interfaces)
@@ -1603,7 +1606,8 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
if (cnt)
return 0;
- stop_data_traffic(acm);
+ acm_kill_urbs(acm);
+ cancel_work_sync(&acm->work);
return 0;
}
@@ -1657,6 +1661,15 @@ static int acm_reset_resume(struct usb_interface *intf)
#endif /* CONFIG_PM */
+static int acm_pre_reset(struct usb_interface *intf)
+{
+ struct acm *acm = usb_get_intfdata(intf);
+
+ clear_bit(EVENT_RX_STALL, &acm->flags);
+
+ return 0;
+}
+
#define NOKIA_PCSUITE_ACM_INFO(x) \
USB_DEVICE_AND_INTERFACE_INFO(0x0421, x, \
USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
@@ -1719,6 +1732,7 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
.driver_info = QUIRK_CONTROL_LINE_STATE, },
{ USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
+ { USB_DEVICE(0x2184, 0x0036) }, /* GW Instek AFG-125 */
{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
},
/* Motorola H24 HSPA module: */
@@ -1898,6 +1912,7 @@ static struct usb_driver acm_driver = {
.resume = acm_resume,
.reset_resume = acm_reset_resume,
#endif
+ .pre_reset = acm_pre_reset,
.id_table = acm_ids,
#ifdef CONFIG_PM
.supports_autosuspend = 1,
@@ -1927,6 +1942,7 @@ static const struct tty_operations acm_ops = {
.set_termios = acm_tty_set_termios,
.tiocmget = acm_tty_tiocmget,
.tiocmset = acm_tty_tiocmset,
+ .get_icount = acm_tty_get_icount,
};
/*
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 1f1eabfd8462..c980f11cdf56 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -83,6 +83,7 @@ struct acm {
struct usb_device *dev; /* the corresponding usb device */
struct usb_interface *control; /* control interface */
struct usb_interface *data; /* data interface */
+ unsigned in, out; /* i/o pipes */
struct tty_port port; /* our tty port data */
struct urb *ctrlurb; /* urbs */
u8 *ctrl_buffer; /* buffers of urbs */
@@ -102,6 +103,9 @@ struct acm {
spinlock_t write_lock;
struct mutex mutex;
bool disconnected;
+ unsigned long flags;
+# define EVENT_TTY_WAKEUP 0
+# define EVENT_RX_STALL 1
struct usb_cdc_line_coding line; /* bits, stop, parity */
struct work_struct work; /* work queue entry for line discipline waking up */
unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
@@ -116,7 +120,6 @@ struct acm {
unsigned int ctrl_caps; /* control capabilities from the class specific header */
unsigned int susp_count; /* number of suspended interfaces */
unsigned int combined_interfaces:1; /* control and data collapsed */
- unsigned int is_int_ep:1; /* interrupt endpoints contrary to spec used */
unsigned int throttled:1; /* actually throttled */
unsigned int throttle_req:1; /* throttle requested */
u8 bInterval;
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index a6c1fae7d52a..f03692ec5520 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -157,6 +157,7 @@ static int usbtmc_open(struct inode *inode, struct file *filp)
}
data = usb_get_intfdata(intf);
+ /* Protect reference to data from file structure until release */
kref_get(&data->kref);
/* Store pointer in file structure's private data field */
@@ -531,7 +532,7 @@ static int usbtmc488_ioctl_simple(struct usbtmc_device_data *data,
}
/*
- * Sends a REQUEST_DEV_DEP_MSG_IN message on the Bulk-IN endpoint.
+ * Sends a REQUEST_DEV_DEP_MSG_IN message on the Bulk-OUT endpoint.
* @transfer_size: number of bytes to request from the device.
*
* See the USBTMC specification, Table 4.
@@ -1471,7 +1472,7 @@ static int usbtmc_probe(struct usb_interface *intf,
if (!data->iin_urb)
goto error_register;
- /* will reference data in int urb */
+ /* Protect interrupt in endpoint data until iin_urb is freed */
kref_get(&data->kref);
/* allocate buffer for interrupt in */
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 98e39f91723a..b9bf6e2eb6fe 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -3,6 +3,9 @@
*
* This implementation plugs in through generic "usb_bus" level methods,
* and should work with all USB controllers, regardless of bus type.
+ *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/module.h>
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index a2d90aca779f..0aa9e7d697a5 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -1,3 +1,8 @@
+/*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/hcd.h>
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index ef04b50e6bbb..f2987ddb1cde 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -182,14 +182,8 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
dir = usb_endpoint_dir_in(desc) ? 'I' : 'O';
- if (speed == USB_SPEED_HIGH) {
- switch (usb_endpoint_maxp(desc) & (0x03 << 11)) {
- case 1 << 11:
- bandwidth = 2; break;
- case 2 << 11:
- bandwidth = 3; break;
- }
- }
+ if (speed == USB_SPEED_HIGH)
+ bandwidth = usb_endpoint_maxp_mult(desc);
/* this isn't checking for illegal values */
switch (usb_endpoint_type(desc)) {
@@ -233,7 +227,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
start += sprintf(start, format_endpt, desc->bEndpointAddress, dir,
desc->bmAttributes, type,
- (usb_endpoint_maxp(desc) & 0x07ff) *
+ usb_endpoint_maxp(desc) *
bandwidth,
interval, unit);
return start;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index dadd1e8dfe09..cdee5130638b 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -15,6 +15,9 @@
* (usb_device_id matching changes by Adam J. Richter)
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ *
* NOTE! This is not actually a driver at all, rather this is
* just a collection of helper routines that implement the
* matching, probing, releasing, suspending and resuming for
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 101983b7e8d2..a60bc830a056 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -5,8 +5,10 @@
* (C) Copyright 2002,2004 IBM Corp.
* (C) Copyright 2006 Novell Inc.
*
- * Endpoint sysfs stuff
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
*
+ * Endpoint sysfs stuff
*/
#include <linux/kernel.h>
@@ -50,8 +52,7 @@ static ssize_t wMaxPacketSize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ep_device *ep = to_ep_device(dev);
- return sprintf(buf, "%04x\n",
- usb_endpoint_maxp(ep->desc) & 0x07ff);
+ return sprintf(buf, "%04x\n", usb_endpoint_maxp(ep->desc));
}
static DEVICE_ATTR_RO(wMaxPacketSize);
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 822ced9639aa..e26bd5e773ad 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -13,6 +13,8 @@
* (usb_device_id matching changes by Adam J. Richter)
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/module.h>
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 358ca8dd784f..bd3e0c5a6db2 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -15,6 +15,8 @@
* (usb_device_id matching changes by Adam J. Richter)
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/usb.h>
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index cbb146736f57..143454ea385b 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -6,6 +6,8 @@
* (C) Copyright 1999 Gregory P. Smith
* (C) Copyright 2001 Brad Hards (bhards@bigpond.net.au)
*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/kernel.h>
@@ -101,6 +103,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
+static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
+ struct usb_port *port_dev);
static inline char *portspeed(struct usb_hub *hub, int portstatus)
{
@@ -899,82 +903,28 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1,
}
/*
- * If USB 3.0 ports are placed into the Disabled state, they will no longer
- * detect any device connects or disconnects. This is generally not what the
- * USB core wants, since it expects a disabled port to produce a port status
- * change event when a new device connects.
- *
- * Instead, set the link state to Disabled, wait for the link to settle into
- * that state, clear any change bits, and then put the port into the RxDetect
- * state.
+ * USB-3 does not have a similar link state as USB-2 that will avoid negotiating
+ * a connection with a plugged-in cable but will signal the host when the cable
+ * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
*/
-static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
-{
- int ret;
- int total_time;
- u16 portchange, portstatus;
-
- if (!hub_is_superspeed(hub->hdev))
- return -EINVAL;
-
- ret = hub_port_status(hub, port1, &portstatus, &portchange);
- if (ret < 0)
- return ret;
-
- /*
- * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
- * Controller [1022:7814] will have spurious result making the following
- * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
- * as high-speed device if we set the usb 3.0 port link state to
- * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
- * check the state here to avoid the bug.
- */
- if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
- USB_SS_PORT_LS_RX_DETECT) {
- dev_dbg(&hub->ports[port1 - 1]->dev,
- "Not disabling port; link state is RxDetect\n");
- return ret;
- }
-
- ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
- if (ret)
- return ret;
-
- /* Wait for the link to enter the disabled state. */
- for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
- ret = hub_port_status(hub, port1, &portstatus, &portchange);
- if (ret < 0)
- return ret;
-
- if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
- USB_SS_PORT_LS_SS_DISABLED)
- break;
- if (total_time >= HUB_DEBOUNCE_TIMEOUT)
- break;
- msleep(HUB_DEBOUNCE_STEP);
- }
- if (total_time >= HUB_DEBOUNCE_TIMEOUT)
- dev_warn(&hub->ports[port1 - 1]->dev,
- "Could not disable after %d ms\n", total_time);
-
- return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
-}
-
static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
{
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *hdev = hub->hdev;
int ret = 0;
- if (port_dev->child && set_state)
- usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
if (!hub->error) {
- if (hub_is_superspeed(hub->hdev))
- ret = hub_usb3_port_disable(hub, port1);
- else
+ if (hub_is_superspeed(hub->hdev)) {
+ hub_usb3_port_prepare_disable(hub, port_dev);
+ ret = hub_set_port_link_state(hub, port_dev->portnum,
+ USB_SS_PORT_LS_U3);
+ } else {
ret = usb_clear_port_feature(hdev, port1,
USB_PORT_FEAT_ENABLE);
+ }
}
+ if (port_dev->child && set_state)
+ usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
if (ret && ret != -ENODEV)
dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret);
return ret;
@@ -2731,8 +2681,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
if (ret < 0)
return ret;
- /* The port state is unknown until the reset completes. */
- if (!(portstatus & USB_PORT_STAT_RESET))
+ /*
+ * The port state is unknown until the reset completes.
+ *
+ * On top of that, some chips may require additional time
+ * to re-establish a connection after the reset is complete,
+ * so also wait for the connection to be re-established.
+ */
+ if (!(portstatus & USB_PORT_STAT_RESET) &&
+ (portstatus & USB_PORT_STAT_CONNECTION))
break;
/* switch to the long delay after two short delay failures */
@@ -4140,6 +4097,26 @@ void usb_unlocked_enable_lpm(struct usb_device *udev)
}
EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
+/* usb3 devices use U3 for disabled, make sure remote wakeup is disabled */
+static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
+ struct usb_port *port_dev)
+{
+ struct usb_device *udev = port_dev->child;
+ int ret;
+
+ if (udev && udev->port_is_suspended && udev->do_remote_wakeup) {
+ ret = hub_set_port_link_state(hub, port_dev->portnum,
+ USB_SS_PORT_LS_U0);
+ if (!ret) {
+ msleep(USB_RESUME_TIMEOUT);
+ ret = usb_disable_remote_wakeup(udev);
+ }
+ if (ret)
+ dev_warn(&udev->dev,
+ "Port disable: can't disable remote wake\n");
+ udev->do_remote_wakeup = 0;
+ }
+}
#else /* CONFIG_PM */
@@ -4147,6 +4124,9 @@ EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
#define hub_resume NULL
#define hub_reset_resume NULL
+static inline void hub_usb3_port_prepare_disable(struct usb_hub *hub,
+ struct usb_port *port_dev) { }
+
int usb_disable_lpm(struct usb_device *udev)
{
return 0;
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
index 3ed5162677ad..1713248ab15a 100644
--- a/drivers/usb/core/ledtrig-usbport.c
+++ b/drivers/usb/core/ledtrig-usbport.c
@@ -74,8 +74,7 @@ static void usbport_trig_update_count(struct usbport_trig_data *usbport_data)
usbport_data->count = 0;
usb_for_each_dev(usbport_data, usbport_trig_usb_dev_check);
- led_cdev->brightness_set(led_cdev,
- usbport_data->count ? LED_FULL : LED_OFF);
+ led_set_brightness(led_cdev, usbport_data->count ? LED_FULL : LED_OFF);
}
/***************************************
@@ -228,12 +227,12 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action,
case USB_DEVICE_ADD:
usbport_trig_add_usb_dev_ports(usb_dev, usbport_data);
if (observed && usbport_data->count++ == 0)
- led_cdev->brightness_set(led_cdev, LED_FULL);
+ led_set_brightness(led_cdev, LED_FULL);
return NOTIFY_OK;
case USB_DEVICE_REMOVE:
usbport_trig_remove_usb_dev_ports(usbport_data, usb_dev);
if (observed && --usbport_data->count == 0)
- led_cdev->brightness_set(led_cdev, LED_OFF);
+ led_set_brightness(led_cdev, LED_OFF);
return NOTIFY_OK;
}
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 3a4707746157..dea55914d641 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1,5 +1,8 @@
/*
* message.c - synchronous message handling
+ *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/pci.h> /* for scatterlist macros */
diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c
index 7728c91dfa2e..b12a463a3e22 100644
--- a/drivers/usb/core/notify.c
+++ b/drivers/usb/core/notify.c
@@ -6,6 +6,8 @@
* notifier functions originally based on those in kernel/sys.c
* but fixed up to not be so broken.
*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
*/
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index c953a0f1c695..dfc68ed24db1 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -7,6 +7,8 @@
*
* All of the sysfs file attributes for usb devices and interfaces.
*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
*/
@@ -14,6 +16,7 @@
#include <linux/string.h>
#include <linux/usb.h>
#include <linux/usb/quirks.h>
+#include <linux/of.h>
#include "usb.h"
/* Active configuration fields */
@@ -104,6 +107,17 @@ static ssize_t bConfigurationValue_store(struct device *dev,
static DEVICE_ATTR_IGNORE_LOCKDEP(bConfigurationValue, S_IRUGO | S_IWUSR,
bConfigurationValue_show, bConfigurationValue_store);
+#ifdef CONFIG_OF
+static ssize_t devspec_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct device_node *of_node = dev->of_node;
+
+ return sprintf(buf, "%s\n", of_node_full_name(of_node));
+}
+static DEVICE_ATTR_RO(devspec);
+#endif
+
/* String fields */
#define usb_string_attr(name) \
static ssize_t name##_show(struct device *dev, \
@@ -786,6 +800,9 @@ static struct attribute *dev_attrs[] = {
&dev_attr_remove.attr,
&dev_attr_removable.attr,
&dev_attr_ltm_capable.attr,
+#ifdef CONFIG_OF
+ &dev_attr_devspec.attr,
+#endif
NULL,
};
static struct attribute_group dev_attr_grp = {
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index a9039696476e..d75cb8c0f7df 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -1,3 +1,8 @@
+/*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
#include <linux/module.h>
#include <linux/string.h>
#include <linux/bitops.h>
@@ -407,11 +412,8 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
}
/* "high bandwidth" mode, 1-3 packets/uframe? */
- if (dev->speed == USB_SPEED_HIGH) {
- int mult = 1 + ((max >> 11) & 0x03);
- max &= 0x07ff;
- max *= mult;
- }
+ if (dev->speed == USB_SPEED_HIGH)
+ max *= usb_endpoint_maxp_mult(&ep->desc);
if (urb->number_of_packets <= 0)
return -EINVAL;
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 592151461017..a2ccc69fb45c 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -12,6 +12,9 @@
* (usb_device_id matching changes by Adam J. Richter)
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ *
* NOTE! This is not actually a driver at all, rather this is
* just a collection of helper routines that implement the
* generic USB things that the real drivers can use..
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 53318126ed91..dc6949248823 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -1,3 +1,8 @@
+/*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
#include <linux/pm.h>
#include <linux/acpi.h>
diff --git a/drivers/usb/dwc2/Makefile b/drivers/usb/dwc2/Makefile
index 50fdaace1e73..b9237e1e45d0 100644
--- a/drivers/usb/dwc2/Makefile
+++ b/drivers/usb/dwc2/Makefile
@@ -3,6 +3,7 @@ ccflags-$(CONFIG_USB_DWC2_VERBOSE) += -DVERBOSE_DEBUG
obj-$(CONFIG_USB_DWC2) += dwc2.o
dwc2-y := core.o core_intr.o platform.o
+dwc2-y += params.o
ifneq ($(filter y,$(CONFIG_USB_DWC2_HOST) $(CONFIG_USB_DWC2_DUAL_ROLE)),)
dwc2-y += hcd.o hcd_intr.o
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 4c0fa0b17353..11d8ae9aead1 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -135,7 +135,7 @@ int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore)
u32 pcgcctl;
int ret = 0;
- if (!hsotg->core_params->hibernation)
+ if (!hsotg->params.hibernation)
return -ENOTSUPP;
pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
@@ -188,7 +188,7 @@ int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg)
u32 pcgcctl;
int ret = 0;
- if (!hsotg->core_params->hibernation)
+ if (!hsotg->params.hibernation)
return -ENOTSUPP;
/* Backup all registers */
@@ -445,7 +445,7 @@ static bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
* the force mode. We only need to call this once during probe if
* dr_mode == OTG.
*/
-static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
+void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
{
u32 gusbcfg;
@@ -541,7 +541,7 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
addr = hsotg->regs + HAINTMSK;
dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(addr));
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
addr = hsotg->regs + HFLBADDR;
dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(addr));
@@ -551,7 +551,7 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(addr));
- for (i = 0; i < hsotg->core_params->host_channels; i++) {
+ for (i = 0; i < hsotg->params.host_channels; i++) {
dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
addr = hsotg->regs + HCCHAR(i);
dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n",
@@ -571,7 +571,7 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
addr = hsotg->regs + HCDMA(i);
dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(addr));
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
addr = hsotg->regs + HCDMAB(i);
dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(addr));
@@ -735,704 +735,13 @@ void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
udelay(1);
}
-#define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c))
-
-/* Parameter access functions */
-void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- switch (val) {
- case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
- if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
- valid = 0;
- break;
- case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
- switch (hsotg->hw_params.op_mode) {
- case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
- case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
- case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
- case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
- break;
- default:
- valid = 0;
- break;
- }
- break;
- case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
- /* always valid */
- break;
- default:
- valid = 0;
- break;
- }
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for otg_cap parameter. Check HW configuration.\n",
- val);
- switch (hsotg->hw_params.op_mode) {
- case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
- val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
- break;
- case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
- case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
- case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
- val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
- break;
- default:
- val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
- break;
- }
- dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
- }
-
- hsotg->core_params->otg_cap = val;
-}
-
-void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
- valid = 0;
- if (val < 0)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for dma_enable parameter. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
- dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
- }
-
- hsotg->core_params->dma_enable = val;
-}
-
-void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
- !hsotg->hw_params.dma_desc_enable))
- valid = 0;
- if (val < 0)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
- val);
- val = (hsotg->core_params->dma_enable > 0 &&
- hsotg->hw_params.dma_desc_enable);
- dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
- }
-
- hsotg->core_params->dma_desc_enable = val;
-}
-
-void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
- !hsotg->hw_params.dma_desc_enable))
- valid = 0;
- if (val < 0)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n",
- val);
- val = (hsotg->core_params->dma_enable > 0 &&
- hsotg->hw_params.dma_desc_enable);
- }
-
- hsotg->core_params->dma_desc_fs_enable = val;
- dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val);
-}
-
-void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
- int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "Wrong value for host_support_fs_low_power\n");
- dev_err(hsotg->dev,
- "host_support_fs_low_power must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev,
- "Setting host_support_fs_low_power to %d\n", val);
- }
-
- hsotg->core_params->host_support_fs_ls_low_power = val;
-}
-
-void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
- valid = 0;
- if (val < 0)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.enable_dynamic_fifo;
- dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
- }
-
- hsotg->core_params->enable_dynamic_fifo = val;
-}
-
-void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.host_rx_fifo_size;
- dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
- }
-
- hsotg->core_params->host_rx_fifo_size = val;
-}
-
-void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.host_nperio_tx_fifo_size;
- dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
- val);
- }
-
- hsotg->core_params->host_nperio_tx_fifo_size = val;
-}
-
-void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.host_perio_tx_fifo_size;
- dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
- val);
- }
-
- hsotg->core_params->host_perio_tx_fifo_size = val;
-}
-
-void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for max_transfer_size. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.max_transfer_size;
- dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
- }
-
- hsotg->core_params->max_transfer_size = val;
-}
-
-void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val < 15 || val > hsotg->hw_params.max_packet_count)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for max_packet_count. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.max_packet_count;
- dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
- }
-
- hsotg->core_params->max_packet_count = val;
-}
-
-void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val < 1 || val > hsotg->hw_params.host_channels)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for host_channels. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.host_channels;
- dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
- }
-
- hsotg->core_params->host_channels = val;
-}
-
-void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 0;
- u32 hs_phy_type, fs_phy_type;
-
- if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
- DWC2_PHY_TYPE_PARAM_ULPI)) {
- if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for phy_type\n");
- dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
- }
-
- valid = 0;
- }
-
- hs_phy_type = hsotg->hw_params.hs_phy_type;
- fs_phy_type = hsotg->hw_params.fs_phy_type;
- if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
- (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
- hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
- valid = 1;
- else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
- (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
- hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
- valid = 1;
- else if (val == DWC2_PHY_TYPE_PARAM_FS &&
- fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
- valid = 1;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for phy_type. Check HW configuration.\n",
- val);
- val = DWC2_PHY_TYPE_PARAM_FS;
- if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
- if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
- hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
- val = DWC2_PHY_TYPE_PARAM_UTMI;
- else
- val = DWC2_PHY_TYPE_PARAM_ULPI;
- }
- dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
- }
-
- hsotg->core_params->phy_type = val;
-}
-
-static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
-{
- return hsotg->core_params->phy_type;
-}
-
-void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for speed parameter\n");
- dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
- }
- valid = 0;
- }
-
- if (val == DWC2_SPEED_PARAM_HIGH &&
- dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for speed parameter. Check HW configuration.\n",
- val);
- val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
- DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
- dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
- }
-
- hsotg->core_params->speed = val;
-}
-
-void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
- DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "Wrong value for host_ls_low_power_phy_clk parameter\n");
- dev_err(hsotg->dev,
- "host_ls_low_power_phy_clk must be 0 or 1\n");
- }
- valid = 0;
- }
-
- if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
- dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
- val);
- val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
- ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
- : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
- dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
- val);
- }
-
- hsotg->core_params->host_ls_low_power_phy_clk = val;
-}
-
-void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
- dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
- }
-
- hsotg->core_params->phy_ulpi_ddr = val;
-}
-
-void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "Wrong value for phy_ulpi_ext_vbus\n");
- dev_err(hsotg->dev,
- "phy_ulpi_ext_vbus must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
- }
-
- hsotg->core_params->phy_ulpi_ext_vbus = val;
-}
-
-void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 0;
-
- switch (hsotg->hw_params.utmi_phy_data_width) {
- case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
- valid = (val == 8);
- break;
- case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
- valid = (val == 16);
- break;
- case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
- valid = (val == 8 || val == 16);
- break;
- }
-
- if (!valid) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "%d invalid for phy_utmi_width. Check HW configuration.\n",
- val);
- }
- val = (hsotg->hw_params.utmi_phy_data_width ==
- GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
- dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
- }
-
- hsotg->core_params->phy_utmi_width = val;
-}
-
-void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
- dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
- }
-
- hsotg->core_params->ulpi_fs_ls = val;
-}
-
-void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for ts_dline\n");
- dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
- }
-
- hsotg->core_params->ts_dline = val;
-}
-
-void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
- dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
- }
-
- valid = 0;
- }
-
- if (val == 1 && !(hsotg->hw_params.i2c_enable))
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for i2c_enable. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.i2c_enable;
- dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
- }
-
- hsotg->core_params->i2c_enable = val;
-}
-
-void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "Wrong value for en_multiple_tx_fifo,\n");
- dev_err(hsotg->dev,
- "en_multiple_tx_fifo must be 0 or 1\n");
- }
- valid = 0;
- }
-
- if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.en_multiple_tx_fifo;
- dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
- }
-
- hsotg->core_params->en_multiple_tx_fifo = val;
-}
-
-void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter reload_ctl\n", val);
- dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
- }
- valid = 0;
- }
-
- if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for parameter reload_ctl. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
- dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
- }
-
- hsotg->core_params->reload_ctl = val;
-}
-
-void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
-{
- if (val != -1)
- hsotg->core_params->ahbcfg = val;
- else
- hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
- GAHBCFG_HBSTLEN_SHIFT;
-}
-
-void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter otg_ver\n", val);
- dev_err(hsotg->dev,
- "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
- }
-
- hsotg->core_params->otg_ver = val;
-}
-
-static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter uframe_sched\n",
- val);
- dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
- }
- val = 1;
- dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
- }
-
- hsotg->core_params->uframe_sched = val;
-}
-
-static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg,
- int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter external_id_pin_ctl\n",
- val);
- dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val);
- }
-
- hsotg->core_params->external_id_pin_ctl = val;
-}
-
-static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg,
- int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter hibernation\n",
- val);
- dev_err(hsotg->dev, "hibernation must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val);
- }
-
- hsotg->core_params->hibernation = val;
-}
-
-/*
- * This function is called during module intialization to pass module parameters
- * for the DWC_otg core.
- */
-void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
- const struct dwc2_core_params *params)
-{
- dev_dbg(hsotg->dev, "%s()\n", __func__);
-
- dwc2_set_param_otg_cap(hsotg, params->otg_cap);
- dwc2_set_param_dma_enable(hsotg, params->dma_enable);
- dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
- dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable);
- dwc2_set_param_host_support_fs_ls_low_power(hsotg,
- params->host_support_fs_ls_low_power);
- dwc2_set_param_enable_dynamic_fifo(hsotg,
- params->enable_dynamic_fifo);
- dwc2_set_param_host_rx_fifo_size(hsotg,
- params->host_rx_fifo_size);
- dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
- params->host_nperio_tx_fifo_size);
- dwc2_set_param_host_perio_tx_fifo_size(hsotg,
- params->host_perio_tx_fifo_size);
- dwc2_set_param_max_transfer_size(hsotg,
- params->max_transfer_size);
- dwc2_set_param_max_packet_count(hsotg,
- params->max_packet_count);
- dwc2_set_param_host_channels(hsotg, params->host_channels);
- dwc2_set_param_phy_type(hsotg, params->phy_type);
- dwc2_set_param_speed(hsotg, params->speed);
- dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
- params->host_ls_low_power_phy_clk);
- dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
- dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
- params->phy_ulpi_ext_vbus);
- dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
- dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
- dwc2_set_param_ts_dline(hsotg, params->ts_dline);
- dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
- dwc2_set_param_en_multiple_tx_fifo(hsotg,
- params->en_multiple_tx_fifo);
- dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
- dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
- dwc2_set_param_otg_ver(hsotg, params->otg_ver);
- dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
- dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl);
- dwc2_set_param_hibernation(hsotg, params->hibernation);
-}
-
/*
* Forces either host or device mode if the controller is not
* currently in that mode.
*
* Returns true if the mode was forced.
*/
-static bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host)
+bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host)
{
if (host && dwc2_is_host_mode(hsotg))
return false;
@@ -1442,232 +751,9 @@ static bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host)
return dwc2_force_mode(hsotg, host);
}
-/*
- * Gets host hardware parameters. Forces host mode if not currently in
- * host mode. Should be called immediately after a core soft reset in
- * order to get the reset values.
- */
-static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg)
-{
- struct dwc2_hw_params *hw = &hsotg->hw_params;
- u32 gnptxfsiz;
- u32 hptxfsiz;
- bool forced;
-
- if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
- return;
-
- forced = dwc2_force_mode_if_needed(hsotg, true);
-
- gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
- hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
- dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
- dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
-
- if (forced)
- dwc2_clear_force_mode(hsotg);
-
- hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
- FIFOSIZE_DEPTH_SHIFT;
- hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
- FIFOSIZE_DEPTH_SHIFT;
-}
-
-/*
- * Gets device hardware parameters. Forces device mode if not
- * currently in device mode. Should be called immediately after a core
- * soft reset in order to get the reset values.
- */
-static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
-{
- struct dwc2_hw_params *hw = &hsotg->hw_params;
- bool forced;
- u32 gnptxfsiz;
-
- if (hsotg->dr_mode == USB_DR_MODE_HOST)
- return;
-
- forced = dwc2_force_mode_if_needed(hsotg, false);
-
- gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
- dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
-
- if (forced)
- dwc2_clear_force_mode(hsotg);
-
- hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
- FIFOSIZE_DEPTH_SHIFT;
-}
-
-/**
- * During device initialization, read various hardware configuration
- * registers and interpret the contents.
- */
-int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
-{
- struct dwc2_hw_params *hw = &hsotg->hw_params;
- unsigned width;
- u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
- u32 grxfsiz;
-
- /*
- * Attempt to ensure this device is really a DWC_otg Controller.
- * Read and verify the GSNPSID register contents. The value should be
- * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
- * as in "OTG version 2.xx" or "OTG version 3.xx".
- */
- hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID);
- if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
- (hw->snpsid & 0xfffff000) != 0x4f543000) {
- dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
- hw->snpsid);
- return -ENODEV;
- }
-
- dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
- hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
- hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
-
- hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1);
- hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
- hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3);
- hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
- grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
-
- dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
- dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
- dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
- dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
- dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
-
- /*
- * Host specific hardware parameters. Reading these parameters
- * requires the controller to be in host mode. The mode will
- * be forced, if necessary, to read these values.
- */
- dwc2_get_host_hwparams(hsotg);
- dwc2_get_dev_hwparams(hsotg);
-
- /* hwcfg1 */
- hw->dev_ep_dirs = hwcfg1;
-
- /* hwcfg2 */
- hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
- GHWCFG2_OP_MODE_SHIFT;
- hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
- GHWCFG2_ARCHITECTURE_SHIFT;
- hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
- hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
- GHWCFG2_NUM_HOST_CHAN_SHIFT);
- hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
- GHWCFG2_HS_PHY_TYPE_SHIFT;
- hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
- GHWCFG2_FS_PHY_TYPE_SHIFT;
- hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
- GHWCFG2_NUM_DEV_EP_SHIFT;
- hw->nperio_tx_q_depth =
- (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
- GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
- hw->host_perio_tx_q_depth =
- (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
- GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
- hw->dev_token_q_depth =
- (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
- GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
-
- /* hwcfg3 */
- width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
- GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
- hw->max_transfer_size = (1 << (width + 11)) - 1;
- width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
- GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
- hw->max_packet_count = (1 << (width + 4)) - 1;
- hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
- hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
- GHWCFG3_DFIFO_DEPTH_SHIFT;
-
- /* hwcfg4 */
- hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
- hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
- GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
- hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
- hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
- hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
- GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
-
- /* fifo sizes */
- hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
- GRXFSIZ_DEPTH_SHIFT;
-
- dev_dbg(hsotg->dev, "Detected values from hardware:\n");
- dev_dbg(hsotg->dev, " op_mode=%d\n",
- hw->op_mode);
- dev_dbg(hsotg->dev, " arch=%d\n",
- hw->arch);
- dev_dbg(hsotg->dev, " dma_desc_enable=%d\n",
- hw->dma_desc_enable);
- dev_dbg(hsotg->dev, " power_optimized=%d\n",
- hw->power_optimized);
- dev_dbg(hsotg->dev, " i2c_enable=%d\n",
- hw->i2c_enable);
- dev_dbg(hsotg->dev, " hs_phy_type=%d\n",
- hw->hs_phy_type);
- dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
- hw->fs_phy_type);
- dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n",
- hw->utmi_phy_data_width);
- dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
- hw->num_dev_ep);
- dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n",
- hw->num_dev_perio_in_ep);
- dev_dbg(hsotg->dev, " host_channels=%d\n",
- hw->host_channels);
- dev_dbg(hsotg->dev, " max_transfer_size=%d\n",
- hw->max_transfer_size);
- dev_dbg(hsotg->dev, " max_packet_count=%d\n",
- hw->max_packet_count);
- dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n",
- hw->nperio_tx_q_depth);
- dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n",
- hw->host_perio_tx_q_depth);
- dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n",
- hw->dev_token_q_depth);
- dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n",
- hw->enable_dynamic_fifo);
- dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n",
- hw->en_multiple_tx_fifo);
- dev_dbg(hsotg->dev, " total_fifo_size=%d\n",
- hw->total_fifo_size);
- dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n",
- hw->host_rx_fifo_size);
- dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n",
- hw->host_nperio_tx_fifo_size);
- dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n",
- hw->host_perio_tx_fifo_size);
- dev_dbg(hsotg->dev, "\n");
-
- return 0;
-}
-
-/*
- * Sets all parameters to the given value.
- *
- * Assumes that the dwc2_core_params struct contains only integers.
- */
-void dwc2_set_all_params(struct dwc2_core_params *params, int value)
-{
- int *p = (int *)params;
- size_t size = sizeof(*params) / sizeof(*p);
- int i;
-
- for (i = 0; i < size; i++)
- p[i] = value;
-}
-
-
u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
{
- return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
+ return hsotg->params.otg_ver == 1 ? 0x0200 : 0x0103;
}
bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 2a21a0414b1d..9548d3e03453 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -172,6 +172,11 @@ struct dwc2_hsotg_req;
* @periodic: Set if this is a periodic ep, such as Interrupt
* @isochronous: Set if this is a isochronous ep
* @send_zlp: Set if we need to send a zero-length packet.
+ * @desc_list_dma: The DMA address of descriptor chain currently in use.
+ * @desc_list: Pointer to descriptor DMA chain head currently in use.
+ * @desc_count: Count of entries within the DMA descriptor chain of EP.
+ * @isoc_chain_num: Number of ISOC chain currently in use - either 0 or 1.
+ * @next_desc: index of next free descriptor in the ISOC chain under SW control.
* @total_data: The total number of data bytes done.
* @fifo_size: The size of the FIFO (for periodic IN endpoints)
* @fifo_load: The amount of data loaded into the FIFO (periodic IN)
@@ -219,6 +224,13 @@ struct dwc2_hsotg_ep {
#define TARGET_FRAME_INITIAL 0xFFFFFFFF
bool frame_overrun;
+ dma_addr_t desc_list_dma;
+ struct dwc2_dma_desc *desc_list;
+ u8 desc_count;
+
+ unsigned char isoc_chain_num;
+ unsigned int next_desc;
+
char name[10];
};
@@ -286,7 +298,7 @@ enum dwc2_ep0_state {
* @otg_ver: OTG version supported
* 0 - 1.3 (default)
* 1 - 2.0
- * @dma_enable: Specifies whether to use slave or DMA mode for accessing
+ * @host_dma: Specifies whether to use slave or DMA mode for accessing
* the data FIFOs. The driver will automatically detect the
* value for this parameter if none is specified.
* 0 - Slave (always available)
@@ -314,7 +326,8 @@ enum dwc2_ep0_state {
* @enable_dynamic_fifo: 0 - Use coreConsultant-specified FIFO size parameters
* 1 - Allow dynamic FIFO sizing (default, if available)
* @en_multiple_tx_fifo: Specifies whether dedicated per-endpoint transmit FIFOs
- * are enabled
+ * are enabled for non-periodic IN endpoints in device
+ * mode.
* @host_rx_fifo_size: Number of 4-byte words in the Rx FIFO in host mode when
* dynamic FIFO sizing is enabled
* 16 to 32768
@@ -417,6 +430,20 @@ enum dwc2_ep0_state {
* needed.
* 0 - No (default)
* 1 - Yes
+ * @g_dma: Enables gadget dma usage (default: autodetect).
+ * @g_dma_desc: Enables gadget descriptor DMA (default: autodetect).
+ * @g_rx_fifo_size: The periodic rx fifo size for the device, in
+ * DWORDS from 16-32768 (default: 2048 if
+ * possible, otherwise autodetect).
+ * @g_np_tx_fifo_size: The non-periodic tx fifo size for the device in
+ * DWORDS from 16-32768 (default: 1024 if
+ * possible, otherwise autodetect).
+ * @g_tx_fifo_size: An array of TX fifo sizes in dedicated fifo
+ * mode. Each value corresponds to one EP
+ * starting from EP1 (max 15 values). Sizes are
+ * in DWORDS with possible values from from
+ * 16-32768 (default: 256, 256, 256, 256, 768,
+ * 768, 768, 768, 0, 0, 0, 0, 0, 0, 0).
*
* The following parameters may be specified when starting the module. These
* parameters define how the DWC_otg controller should be configured. A
@@ -430,11 +457,18 @@ struct dwc2_core_params {
* dwc2_set_all_params!
*/
int otg_cap;
+#define DWC2_CAP_PARAM_HNP_SRP_CAPABLE 0
+#define DWC2_CAP_PARAM_SRP_ONLY_CAPABLE 1
+#define DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE 2
+
int otg_ver;
- int dma_enable;
int dma_desc_enable;
int dma_desc_fs_enable;
int speed;
+#define DWC2_SPEED_PARAM_HIGH 0
+#define DWC2_SPEED_PARAM_FULL 1
+#define DWC2_SPEED_PARAM_LOW 2
+
int enable_dynamic_fifo;
int en_multiple_tx_fifo;
int host_rx_fifo_size;
@@ -444,19 +478,44 @@ struct dwc2_core_params {
int max_packet_count;
int host_channels;
int phy_type;
+#define DWC2_PHY_TYPE_PARAM_FS 0
+#define DWC2_PHY_TYPE_PARAM_UTMI 1
+#define DWC2_PHY_TYPE_PARAM_ULPI 2
+
int phy_utmi_width;
int phy_ulpi_ddr;
int phy_ulpi_ext_vbus;
+#define DWC2_PHY_ULPI_INTERNAL_VBUS 0
+#define DWC2_PHY_ULPI_EXTERNAL_VBUS 1
+
int i2c_enable;
int ulpi_fs_ls;
int host_support_fs_ls_low_power;
int host_ls_low_power_phy_clk;
+#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0
+#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1
+
int ts_dline;
int reload_ctl;
int ahbcfg;
int uframe_sched;
int external_id_pin_ctl;
int hibernation;
+
+ /*
+ * The following parameters are *only* set via device
+ * properties and cannot be set directly in this structure.
+ */
+
+ /* Host parameters */
+ bool host_dma;
+
+ /* Gadget parameters */
+ bool g_dma;
+ bool g_dma_desc;
+ u16 g_rx_fifo_size;
+ u16 g_np_tx_fifo_size;
+ u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
};
/**
@@ -516,10 +575,9 @@ struct dwc2_hw_params {
unsigned op_mode:3;
unsigned arch:2;
unsigned dma_desc_enable:1;
- unsigned dma_desc_fs_enable:1;
unsigned enable_dynamic_fifo:1;
unsigned en_multiple_tx_fifo:1;
- unsigned host_rx_fifo_size:16;
+ unsigned rx_fifo_size:16;
unsigned host_nperio_tx_fifo_size:16;
unsigned dev_nperio_tx_fifo_size:16;
unsigned host_perio_tx_fifo_size:16;
@@ -839,11 +897,13 @@ struct dwc2_hregs_backup {
* @ctrl_req: Request for EP0 control packets.
* @ep0_state: EP0 control transfers state
* @test_mode: USB test mode requested by the host
+ * @setup_desc_dma: EP0 setup stage desc chain DMA address
+ * @setup_desc: EP0 setup stage desc chain pointer
+ * @ctrl_in_desc_dma: EP0 IN data phase desc chain DMA address
+ * @ctrl_in_desc: EP0 IN data phase desc chain pointer
+ * @ctrl_out_desc_dma: EP0 OUT data phase desc chain DMA address
+ * @ctrl_out_desc: EP0 OUT data phase desc chain pointer
* @eps: The endpoints being supplied to the gadget framework
- * @g_using_dma: Indicate if dma usage is enabled
- * @g_rx_fifo_sz: Contains rx fifo size value
- * @g_np_g_tx_fifo_sz: Contains Non-Periodic tx fifo size value
- * @g_tx_fifo_sz: Contains tx fifo size value per endpoints
*/
struct dwc2_hsotg {
struct device *dev;
@@ -851,7 +911,7 @@ struct dwc2_hsotg {
/** Params detected from hardware */
struct dwc2_hw_params hw_params;
/** Params to actually use */
- struct dwc2_core_params *core_params;
+ struct dwc2_core_params params;
enum usb_otg_state op_state;
enum usb_dr_mode dr_mode;
unsigned int hcd_enabled:1;
@@ -891,6 +951,8 @@ struct dwc2_hsotg {
#define DWC2_CORE_REV_2_94a 0x4f54294a
#define DWC2_CORE_REV_3_00a 0x4f54300a
#define DWC2_CORE_REV_3_10a 0x4f54310a
+#define DWC2_FS_IOT_REV_1_00a 0x5531100a
+#define DWC2_HS_IOT_REV_1_00a 0x5532100a
#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
union dwc2_hcd_internal_flags {
@@ -986,15 +1048,18 @@ struct dwc2_hsotg {
enum dwc2_ep0_state ep0_state;
u8 test_mode;
+ dma_addr_t setup_desc_dma[2];
+ struct dwc2_dma_desc *setup_desc[2];
+ dma_addr_t ctrl_in_desc_dma;
+ struct dwc2_dma_desc *ctrl_in_desc;
+ dma_addr_t ctrl_out_desc_dma;
+ struct dwc2_dma_desc *ctrl_out_desc;
+
struct usb_gadget gadget;
unsigned int enabled:1;
unsigned int connected:1;
struct dwc2_hsotg_ep *eps_in[MAX_EPS_CHANNELS];
struct dwc2_hsotg_ep *eps_out[MAX_EPS_CHANNELS];
- u32 g_using_dma;
- u32 g_rx_fifo_sz;
- u32 g_np_g_tx_fifo_sz;
- u32 g_tx_fifo_sz[MAX_EPS_CHANNELS];
#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
};
@@ -1016,6 +1081,22 @@ enum dwc2_halt_status {
DWC2_HC_XFER_URB_DEQUEUE,
};
+/* Core version information */
+static inline bool dwc2_is_iot(struct dwc2_hsotg *hsotg)
+{
+ return (hsotg->hw_params.snpsid & 0xfff00000) == 0x55300000;
+}
+
+static inline bool dwc2_is_fs_iot(struct dwc2_hsotg *hsotg)
+{
+ return (hsotg->hw_params.snpsid & 0xffff0000) == 0x55310000;
+}
+
+static inline bool dwc2_is_hs_iot(struct dwc2_hsotg *hsotg)
+{
+ return (hsotg->hw_params.snpsid & 0xffff0000) == 0x55320000;
+}
+
/*
* The following functions support initialization of the core driver component
* and the DWC_otg controller
@@ -1025,6 +1106,8 @@ extern int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg);
extern int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg);
extern int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore);
+bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host);
+void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg);
void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg);
extern bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg);
@@ -1044,217 +1127,16 @@ extern void dwc2_disable_global_interrupts(struct dwc2_hsotg *hcd);
/* This function should be called on every hardware interrupt. */
extern irqreturn_t dwc2_handle_common_intr(int irq, void *dev);
-/* OTG Core Parameters */
-
-/*
- * Specifies the OTG capabilities. The driver will automatically
- * detect the value for this parameter if none is specified.
- * 0 - HNP and SRP capable (default)
- * 1 - SRP Only capable
- * 2 - No HNP/SRP capable
- */
-extern void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val);
-#define DWC2_CAP_PARAM_HNP_SRP_CAPABLE 0
-#define DWC2_CAP_PARAM_SRP_ONLY_CAPABLE 1
-#define DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE 2
-
-/*
- * Specifies whether to use slave or DMA mode for accessing the data
- * FIFOs. The driver will automatically detect the value for this
- * parameter if none is specified.
- * 0 - Slave
- * 1 - DMA (default, if available)
- */
-extern void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * When DMA mode is enabled specifies whether to use
- * address DMA or DMA Descritor mode for accessing the data
- * FIFOs in device mode. The driver will automatically detect
- * the value for this parameter if none is specified.
- * 0 - address DMA
- * 1 - DMA Descriptor(default, if available)
- */
-extern void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * When DMA mode is enabled specifies whether to use
- * address DMA or DMA Descritor mode with full speed devices
- * for accessing the data FIFOs in host mode.
- * 0 - address DMA
- * 1 - FS DMA Descriptor(default, if available)
- */
-extern void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg,
- int val);
-
-/*
- * Specifies the maximum speed of operation in host and device mode.
- * The actual speed depends on the speed of the attached device and
- * the value of phy_type. The actual speed depends on the speed of the
- * attached device.
- * 0 - High Speed (default)
- * 1 - Full Speed
- */
-extern void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val);
-#define DWC2_SPEED_PARAM_HIGH 0
-#define DWC2_SPEED_PARAM_FULL 1
-
-/*
- * Specifies whether low power mode is supported when attached
- * to a Full Speed or Low Speed device in host mode.
- *
- * 0 - Don't support low power mode (default)
- * 1 - Support low power mode
- */
-extern void dwc2_set_param_host_support_fs_ls_low_power(
- struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies the PHY clock rate in low power mode when connected to a
- * Low Speed device in host mode. This parameter is applicable only if
- * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS
- * then defaults to 6 MHZ otherwise 48 MHZ.
- *
- * 0 - 48 MHz
- * 1 - 6 MHz
- */
-extern void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg,
- int val);
-#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0
-#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1
-
-/*
- * 0 - Use cC FIFO size parameters
- * 1 - Allow dynamic FIFO sizing (default)
- */
-extern void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg,
- int val);
-
-/*
- * Number of 4-byte words in the Rx FIFO in host mode when dynamic
- * FIFO sizing is enabled.
- * 16 to 32768 (default 1024)
- */
-extern void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Number of 4-byte words in the non-periodic Tx FIFO in host mode
- * when Dynamic FIFO sizing is enabled in the core.
- * 16 to 32768 (default 256)
- */
-extern void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg,
- int val);
-
-/*
- * Number of 4-byte words in the host periodic Tx FIFO when dynamic
- * FIFO sizing is enabled.
- * 16 to 32768 (default 256)
- */
-extern void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg,
- int val);
-
-/*
- * The maximum transfer size supported in bytes.
- * 2047 to 65,535 (default 65,535)
- */
-extern void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * The maximum number of packets in a transfer.
- * 15 to 511 (default 511)
- */
-extern void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * The number of host channel registers to use.
- * 1 to 16 (default 11)
- * Note: The FPGA configuration supports a maximum of 11 host channels.
- */
-extern void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies the type of PHY interface to use. By default, the driver
- * will automatically detect the phy_type.
- *
- * 0 - Full Speed PHY
- * 1 - UTMI+ (default)
- * 2 - ULPI
- */
-extern void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val);
-#define DWC2_PHY_TYPE_PARAM_FS 0
-#define DWC2_PHY_TYPE_PARAM_UTMI 1
-#define DWC2_PHY_TYPE_PARAM_ULPI 2
-
-/*
- * Specifies the UTMI+ Data Width. This parameter is
- * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI
- * PHY_TYPE, this parameter indicates the data width between
- * the MAC and the ULPI Wrapper.) Also, this parameter is
- * applicable only if the OTG_HSPHY_WIDTH cC parameter was set
- * to "8 and 16 bits", meaning that the core has been
- * configured to work at either data path width.
- *
- * 8 or 16 bits (default 16)
- */
-extern void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies whether the ULPI operates at double or single
- * data rate. This parameter is only applicable if PHY_TYPE is
- * ULPI.
- *
- * 0 - single data rate ULPI interface with 8 bit wide data
- * bus (default)
- * 1 - double data rate ULPI interface with 4 bit wide data
- * bus
- */
-extern void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies whether to use the internal or external supply to
- * drive the vbus with a ULPI phy.
- */
-extern void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val);
-#define DWC2_PHY_ULPI_INTERNAL_VBUS 0
-#define DWC2_PHY_ULPI_EXTERNAL_VBUS 1
-
-/*
- * Specifies whether to use the I2Cinterface for full speed PHY. This
- * parameter is only applicable if PHY_TYPE is FS.
- * 0 - No (default)
- * 1 - Yes
- */
-extern void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies whether dedicated transmit FIFOs are
- * enabled for non periodic IN endpoints in device mode
- * 0 - No
- * 1 - Yes
- */
-extern void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg,
- int val);
-
-extern void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
- const struct dwc2_core_params *params);
-
-extern void dwc2_set_all_params(struct dwc2_core_params *params, int value);
-
-extern int dwc2_get_hwparams(struct dwc2_hsotg *hsotg);
+/* The device ID match table */
+extern const struct of_device_id dwc2_of_match_table[];
extern int dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg);
extern int dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg);
+/* Parameters */
+int dwc2_get_hwparams(struct dwc2_hsotg *hsotg);
+int dwc2_init_params(struct dwc2_hsotg *hsotg);
+
/*
* The following functions check the controller's OTG operation mode
* capability (GHWCFG2.OTG_MODE).
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index d85c5c9f96c1..5b228ba6045f 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -159,9 +159,9 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
" ++OTG Interrupt: Session Request Success Status Change++\n");
gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
if (gotgctl & GOTGCTL_SESREQSCS) {
- if (hsotg->core_params->phy_type ==
+ if (hsotg->params.phy_type ==
DWC2_PHY_TYPE_PARAM_FS
- && hsotg->core_params->i2c_enable > 0) {
+ && hsotg->params.i2c_enable > 0) {
hsotg->srp_success = 1;
} else {
/* Clear Session Request */
@@ -370,7 +370,7 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
/* Change to L0 state */
hsotg->lx_state = DWC2_L0;
} else {
- if (hsotg->core_params->hibernation)
+ if (hsotg->params.hibernation)
return;
if (hsotg->lx_state != DWC2_L1) {
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index 55d91f24f94a..0a130916a91c 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -213,7 +213,7 @@ static int fifo_show(struct seq_file *seq, void *v)
val = dwc2_readl(regs + GNPTXFSIZ);
seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
val >> FIFOSIZE_DEPTH_SHIFT,
- val & FIFOSIZE_DEPTH_MASK);
+ val & FIFOSIZE_STARTADDR_MASK);
seq_puts(seq, "\nPeriodic TXFIFOs:\n");
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 24fbebc9b409..b95930f20d90 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -93,7 +93,18 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg);
*/
static inline bool using_dma(struct dwc2_hsotg *hsotg)
{
- return hsotg->g_using_dma;
+ return hsotg->params.g_dma;
+}
+
+/*
+ * using_desc_dma - return the descriptor DMA status of the driver.
+ * @hsotg: The driver state.
+ *
+ * Return true if we're using descriptor DMA.
+ */
+static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
+{
+ return hsotg->params.g_dma_desc;
}
/**
@@ -190,16 +201,17 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
unsigned int addr;
int timeout;
u32 val;
+ u32 *txfsz = hsotg->params.g_tx_fifo_size;
/* Reset fifo map if not correctly cleared during previous session */
WARN_ON(hsotg->fifo_map);
hsotg->fifo_map = 0;
/* set RX/NPTX FIFO sizes */
- dwc2_writel(hsotg->g_rx_fifo_sz, hsotg->regs + GRXFSIZ);
- dwc2_writel((hsotg->g_rx_fifo_sz << FIFOSIZE_STARTADDR_SHIFT) |
- (hsotg->g_np_g_tx_fifo_sz << FIFOSIZE_DEPTH_SHIFT),
- hsotg->regs + GNPTXFSIZ);
+ dwc2_writel(hsotg->params.g_rx_fifo_size, hsotg->regs + GRXFSIZ);
+ dwc2_writel((hsotg->params.g_rx_fifo_size << FIFOSIZE_STARTADDR_SHIFT) |
+ (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT),
+ hsotg->regs + GNPTXFSIZ);
/*
* arange all the rest of the TX FIFOs, as some versions of this
@@ -209,7 +221,7 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
*/
/* start at the end of the GNPTXFSIZ, rounded up */
- addr = hsotg->g_rx_fifo_sz + hsotg->g_np_g_tx_fifo_sz;
+ addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size;
/*
* Configure fifos sizes from provided configuration and assign
@@ -217,15 +229,16 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
* given endpoint.
*/
for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
- if (!hsotg->g_tx_fifo_sz[ep])
+ if (!txfsz[ep])
continue;
val = addr;
- val |= hsotg->g_tx_fifo_sz[ep] << FIFOSIZE_DEPTH_SHIFT;
- WARN_ONCE(addr + hsotg->g_tx_fifo_sz[ep] > hsotg->fifo_mem,
+ val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT;
+ WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem,
"insufficient fifo memory");
- addr += hsotg->g_tx_fifo_sz[ep];
+ addr += txfsz[ep];
dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep));
+ val = dwc2_readl(hsotg->regs + DPTXFSIZN(ep));
}
/*
@@ -303,12 +316,55 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_req *hs_req)
{
struct usb_request *req = &hs_req->req;
+ usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
+}
- /* ignore this if we're not moving any data */
- if (hs_req->req.length == 0)
- return;
+/*
+ * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains
+ * for Control endpoint
+ * @hsotg: The device state.
+ *
+ * This function will allocate 4 descriptor chains for EP 0: 2 for
+ * Setup stage, per one for IN and OUT data/status transactions.
+ */
+static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg)
+{
+ hsotg->setup_desc[0] =
+ dmam_alloc_coherent(hsotg->dev,
+ sizeof(struct dwc2_dma_desc),
+ &hsotg->setup_desc_dma[0],
+ GFP_KERNEL);
+ if (!hsotg->setup_desc[0])
+ goto fail;
+
+ hsotg->setup_desc[1] =
+ dmam_alloc_coherent(hsotg->dev,
+ sizeof(struct dwc2_dma_desc),
+ &hsotg->setup_desc_dma[1],
+ GFP_KERNEL);
+ if (!hsotg->setup_desc[1])
+ goto fail;
+
+ hsotg->ctrl_in_desc =
+ dmam_alloc_coherent(hsotg->dev,
+ sizeof(struct dwc2_dma_desc),
+ &hsotg->ctrl_in_desc_dma,
+ GFP_KERNEL);
+ if (!hsotg->ctrl_in_desc)
+ goto fail;
+
+ hsotg->ctrl_out_desc =
+ dmam_alloc_coherent(hsotg->dev,
+ sizeof(struct dwc2_dma_desc),
+ &hsotg->ctrl_out_desc_dma,
+ GFP_KERNEL);
+ if (!hsotg->ctrl_out_desc)
+ goto fail;
- usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
+ return 0;
+
+fail:
+ return -ENOMEM;
}
/**
@@ -541,6 +597,273 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
}
/**
+ * dwc2_gadget_get_chain_limit - get the maximum data payload value of the
+ * DMA descriptor chain prepared for specific endpoint
+ * @hs_ep: The endpoint
+ *
+ * Return the maximum data that can be queued in one go on a given endpoint
+ * depending on its descriptor chain capacity so that transfers that
+ * are too long can be split.
+ */
+static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
+{
+ int is_isoc = hs_ep->isochronous;
+ unsigned int maxsize;
+
+ if (is_isoc)
+ maxsize = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
+ DEV_DMA_ISOC_RX_NBYTES_LIMIT;
+ else
+ maxsize = DEV_DMA_NBYTES_LIMIT;
+
+ /* Above size of one descriptor was chosen, multiple it */
+ maxsize *= MAX_DMA_DESC_NUM_GENERIC;
+
+ return maxsize;
+}
+
+/*
+ * dwc2_gadget_get_desc_params - get DMA descriptor parameters.
+ * @hs_ep: The endpoint
+ * @mask: RX/TX bytes mask to be defined
+ *
+ * Returns maximum data payload for one descriptor after analyzing endpoint
+ * characteristics.
+ * DMA descriptor transfer bytes limit depends on EP type:
+ * Control out - MPS,
+ * Isochronous - descriptor rx/tx bytes bitfield limit,
+ * Control In/Bulk/Interrupt - multiple of mps. This will allow to not
+ * have concatenations from various descriptors within one packet.
+ *
+ * Selects corresponding mask for RX/TX bytes as well.
+ */
+static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
+{
+ u32 mps = hs_ep->ep.maxpacket;
+ int dir_in = hs_ep->dir_in;
+ u32 desc_size = 0;
+
+ if (!hs_ep->index && !dir_in) {
+ desc_size = mps;
+ *mask = DEV_DMA_NBYTES_MASK;
+ } else if (hs_ep->isochronous) {
+ if (dir_in) {
+ desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT;
+ *mask = DEV_DMA_ISOC_TX_NBYTES_MASK;
+ } else {
+ desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT;
+ *mask = DEV_DMA_ISOC_RX_NBYTES_MASK;
+ }
+ } else {
+ desc_size = DEV_DMA_NBYTES_LIMIT;
+ *mask = DEV_DMA_NBYTES_MASK;
+
+ /* Round down desc_size to be mps multiple */
+ desc_size -= desc_size % mps;
+ }
+
+ return desc_size;
+}
+
+/*
+ * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain.
+ * @hs_ep: The endpoint
+ * @dma_buff: DMA address to use
+ * @len: Length of the transfer
+ *
+ * This function will iterate over descriptor chain and fill its entries
+ * with corresponding information based on transfer data.
+ */
+static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
+ dma_addr_t dma_buff,
+ unsigned int len)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ int dir_in = hs_ep->dir_in;
+ struct dwc2_dma_desc *desc = hs_ep->desc_list;
+ u32 mps = hs_ep->ep.maxpacket;
+ u32 maxsize = 0;
+ u32 offset = 0;
+ u32 mask = 0;
+ int i;
+
+ maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
+
+ hs_ep->desc_count = (len / maxsize) +
+ ((len % maxsize) ? 1 : 0);
+ if (len == 0)
+ hs_ep->desc_count = 1;
+
+ for (i = 0; i < hs_ep->desc_count; ++i) {
+ desc->status = 0;
+ desc->status |= (DEV_DMA_BUFF_STS_HBUSY
+ << DEV_DMA_BUFF_STS_SHIFT);
+
+ if (len > maxsize) {
+ if (!hs_ep->index && !dir_in)
+ desc->status |= (DEV_DMA_L | DEV_DMA_IOC);
+
+ desc->status |= (maxsize <<
+ DEV_DMA_NBYTES_SHIFT & mask);
+ desc->buf = dma_buff + offset;
+
+ len -= maxsize;
+ offset += maxsize;
+ } else {
+ desc->status |= (DEV_DMA_L | DEV_DMA_IOC);
+
+ if (dir_in)
+ desc->status |= (len % mps) ? DEV_DMA_SHORT :
+ ((hs_ep->send_zlp) ? DEV_DMA_SHORT : 0);
+ if (len > maxsize)
+ dev_err(hsotg->dev, "wrong len %d\n", len);
+
+ desc->status |=
+ len << DEV_DMA_NBYTES_SHIFT & mask;
+ desc->buf = dma_buff + offset;
+ }
+
+ desc->status &= ~DEV_DMA_BUFF_STS_MASK;
+ desc->status |= (DEV_DMA_BUFF_STS_HREADY
+ << DEV_DMA_BUFF_STS_SHIFT);
+ desc++;
+ }
+}
+
+/*
+ * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain.
+ * @hs_ep: The isochronous endpoint.
+ * @dma_buff: usb requests dma buffer.
+ * @len: usb request transfer length.
+ *
+ * Finds out index of first free entry either in the bottom or up half of
+ * descriptor chain depend on which is under SW control and not processed
+ * by HW. Then fills that descriptor with the data of the arrived usb request,
+ * frame info, sets Last and IOC bits increments next_desc. If filled
+ * descriptor is not the first one, removes L bit from the previous descriptor
+ * status.
+ */
+static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
+ dma_addr_t dma_buff, unsigned int len)
+{
+ struct dwc2_dma_desc *desc;
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ u32 index;
+ u32 maxsize = 0;
+ u32 mask = 0;
+
+ maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
+ if (len > maxsize) {
+ dev_err(hsotg->dev, "wrong len %d\n", len);
+ return -EINVAL;
+ }
+
+ /*
+ * If SW has already filled half of chain, then return and wait for
+ * the other chain to be processed by HW.
+ */
+ if (hs_ep->next_desc == MAX_DMA_DESC_NUM_GENERIC / 2)
+ return -EBUSY;
+
+ /* Increment frame number by interval for IN */
+ if (hs_ep->dir_in)
+ dwc2_gadget_incr_frame_num(hs_ep);
+
+ index = (MAX_DMA_DESC_NUM_GENERIC / 2) * hs_ep->isoc_chain_num +
+ hs_ep->next_desc;
+
+ /* Sanity check of calculated index */
+ if ((hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC) ||
+ (!hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC / 2)) {
+ dev_err(hsotg->dev, "wrong index %d for iso chain\n", index);
+ return -EINVAL;
+ }
+
+ desc = &hs_ep->desc_list[index];
+
+ /* Clear L bit of previous desc if more than one entries in the chain */
+ if (hs_ep->next_desc)
+ hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L;
+
+ dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n",
+ __func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index);
+
+ desc->status = 0;
+ desc->status |= (DEV_DMA_BUFF_STS_HBUSY << DEV_DMA_BUFF_STS_SHIFT);
+
+ desc->buf = dma_buff;
+ desc->status |= (DEV_DMA_L | DEV_DMA_IOC |
+ ((len << DEV_DMA_NBYTES_SHIFT) & mask));
+
+ if (hs_ep->dir_in) {
+ desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) &
+ DEV_DMA_ISOC_PID_MASK) |
+ ((len % hs_ep->ep.maxpacket) ?
+ DEV_DMA_SHORT : 0) |
+ ((hs_ep->target_frame <<
+ DEV_DMA_ISOC_FRNUM_SHIFT) &
+ DEV_DMA_ISOC_FRNUM_MASK);
+ }
+
+ desc->status &= ~DEV_DMA_BUFF_STS_MASK;
+ desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT);
+
+ /* Update index of last configured entry in the chain */
+ hs_ep->next_desc++;
+
+ return 0;
+}
+
+/*
+ * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA
+ * @hs_ep: The isochronous endpoint.
+ *
+ * Prepare first descriptor chain for isochronous endpoints. Afterwards
+ * write DMA address to HW and enable the endpoint.
+ *
+ * Switch between descriptor chains via isoc_chain_num to give SW opportunity
+ * to prepare second descriptor chain while first one is being processed by HW.
+ */
+static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ struct dwc2_hsotg_req *hs_req, *treq;
+ int index = hs_ep->index;
+ int ret;
+ u32 dma_reg;
+ u32 depctl;
+ u32 ctrl;
+
+ if (list_empty(&hs_ep->queue)) {
+ dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
+ return;
+ }
+
+ list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
+ ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma,
+ hs_req->req.length);
+ if (ret) {
+ dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__);
+ break;
+ }
+ }
+
+ depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
+ dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
+
+ /* write descriptor chain address to control register */
+ dwc2_writel(hs_ep->desc_list_dma, hsotg->regs + dma_reg);
+
+ ctrl = dwc2_readl(hsotg->regs + depctl);
+ ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
+ dwc2_writel(ctrl, hsotg->regs + depctl);
+
+ /* Switch ISOC descriptor chain number being processed by SW*/
+ hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1;
+ hs_ep->next_desc = 0;
+}
+
+/**
* dwc2_hsotg_start_req - start a USB request from an endpoint's queue
* @hsotg: The controller state.
* @hs_ep: The endpoint to process a request for
@@ -565,6 +888,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
unsigned length;
unsigned packets;
unsigned maxreq;
+ unsigned int dma_reg;
if (index != 0) {
if (hs_ep->req && !continuing) {
@@ -579,6 +903,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
}
}
+ dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
@@ -598,7 +923,11 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
ureq->length, ureq->actual);
- maxreq = get_ep_limit(hs_ep);
+ if (!using_desc_dma(hsotg))
+ maxreq = get_ep_limit(hs_ep);
+ else
+ maxreq = dwc2_gadget_get_chain_limit(hs_ep);
+
if (length > maxreq) {
int round = maxreq % hs_ep->ep.maxpacket;
@@ -650,22 +979,51 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
/* store the request as the current one we're doing */
hs_ep->req = hs_req;
- /* write size / packets */
- dwc2_writel(epsize, hsotg->regs + epsize_reg);
+ if (using_desc_dma(hsotg)) {
+ u32 offset = 0;
+ u32 mps = hs_ep->ep.maxpacket;
- if (using_dma(hsotg) && !continuing) {
- unsigned int dma_reg;
+ /* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */
+ if (!dir_in) {
+ if (!index)
+ length = mps;
+ else if (length % mps)
+ length += (mps - (length % mps));
+ }
/*
- * write DMA address to control register, buffer already
- * synced by dwc2_hsotg_ep_queue().
+ * If more data to send, adjust DMA for EP0 out data stage.
+ * ureq->dma stays unchanged, hence increment it by already
+ * passed passed data count before starting new transaction.
*/
+ if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT &&
+ continuing)
+ offset = ureq->actual;
- dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
- dwc2_writel(ureq->dma, hsotg->regs + dma_reg);
+ /* Fill DDMA chain entries */
+ dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset,
+ length);
- dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
- __func__, &ureq->dma, dma_reg);
+ /* write descriptor chain address to control register */
+ dwc2_writel(hs_ep->desc_list_dma, hsotg->regs + dma_reg);
+
+ dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n",
+ __func__, (u32)hs_ep->desc_list_dma, dma_reg);
+ } else {
+ /* write size / packets */
+ dwc2_writel(epsize, hsotg->regs + epsize_reg);
+
+ if (using_dma(hsotg) && !continuing && (length != 0)) {
+ /*
+ * write DMA address to control register, buffer
+ * already synced by dwc2_hsotg_ep_queue().
+ */
+
+ dwc2_writel(ureq->dma, hsotg->regs + dma_reg);
+
+ dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
+ __func__, &ureq->dma, dma_reg);
+ }
}
if (hs_ep->isochronous && hs_ep->interval == 1) {
@@ -738,13 +1096,8 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep,
struct usb_request *req)
{
- struct dwc2_hsotg_req *hs_req = our_req(req);
int ret;
- /* if the length is zero, ignore the DMA data */
- if (hs_req->req.length == 0)
- return 0;
-
ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
if (ret)
goto dma_error;
@@ -835,6 +1188,41 @@ static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
return false;
}
+/*
+ * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers
+ * @hsotg: The driver state
+ * @hs_ep: the ep descriptor chain is for
+ *
+ * Called to update EP0 structure's pointers depend on stage of
+ * control transfer.
+ */
+static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep)
+{
+ switch (hsotg->ep0_state) {
+ case DWC2_EP0_SETUP:
+ case DWC2_EP0_STATUS_OUT:
+ hs_ep->desc_list = hsotg->setup_desc[0];
+ hs_ep->desc_list_dma = hsotg->setup_desc_dma[0];
+ break;
+ case DWC2_EP0_DATA_IN:
+ case DWC2_EP0_STATUS_IN:
+ hs_ep->desc_list = hsotg->ctrl_in_desc;
+ hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma;
+ break;
+ case DWC2_EP0_DATA_OUT:
+ hs_ep->desc_list = hsotg->ctrl_out_desc;
+ hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma;
+ break;
+ default:
+ dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n",
+ hsotg->ep0_state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
gfp_t gfp_flags)
{
@@ -870,10 +1258,32 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
if (ret)
return ret;
}
+ /* If using descriptor DMA configure EP0 descriptor chain pointers */
+ if (using_desc_dma(hs) && !hs_ep->index) {
+ ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep);
+ if (ret)
+ return ret;
+ }
first = list_empty(&hs_ep->queue);
list_add_tail(&hs_req->queue, &hs_ep->queue);
+ /*
+ * Handle DDMA isochronous transfers separately - just add new entry
+ * to the half of descriptor chain that is not processed by HW.
+ * Transfer will be started once SW gets either one of NAK or
+ * OutTknEpDis interrupts.
+ */
+ if (using_desc_dma(hs) && hs_ep->isochronous &&
+ hs_ep->target_frame != TARGET_FRAME_INITIAL) {
+ ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma,
+ hs_req->req.length);
+ if (ret)
+ dev_dbg(hs->dev, "%s: ISO desc chain full\n", __func__);
+
+ return 0;
+ }
+
if (first) {
if (!hs_ep->isochronous) {
dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
@@ -1099,10 +1509,8 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
*/
static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
{
- if (list_empty(&hs_ep->queue))
- return NULL;
-
- return list_first_entry(&hs_ep->queue, struct dwc2_hsotg_req, queue);
+ return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req,
+ queue);
}
/**
@@ -1440,14 +1848,21 @@ static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
if (hs_ep->dir_in)
dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n",
- index);
+ index);
else
dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
- index);
+ index);
+ if (using_desc_dma(hsotg)) {
+ /* Not specific buffer needed for ep0 ZLP */
+ dma_addr_t dma = hs_ep->desc_list_dma;
- dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
- DXEPTSIZ_XFERSIZE(0), hsotg->regs +
- epsiz_reg);
+ dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
+ dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
+ } else {
+ dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
+ DXEPTSIZ_XFERSIZE(0), hsotg->regs +
+ epsiz_reg);
+ }
ctrl = dwc2_readl(hsotg->regs + epctl_reg);
ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */
@@ -1510,6 +1925,10 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
spin_lock(&hsotg->lock);
}
+ /* In DDMA don't need to proceed to starting of next ISOC request */
+ if (using_desc_dma(hsotg) && hs_ep->isochronous)
+ return;
+
/*
* Look to see if there is anything else to do. Note, the completion
* of the previous request may have caused a new request to be started
@@ -1521,6 +1940,115 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
}
}
+/*
+ * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA
+ * @hs_ep: The endpoint the request was on.
+ *
+ * Get first request from the ep queue, determine descriptor on which complete
+ * happened. SW based on isoc_chain_num discovers which half of the descriptor
+ * chain is currently in use by HW, adjusts dma_address and calculates index
+ * of completed descriptor based on the value of DEPDMA register. Update actual
+ * length of request, giveback to gadget.
+ */
+static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ struct dwc2_hsotg_req *hs_req;
+ struct usb_request *ureq;
+ int index;
+ dma_addr_t dma_addr;
+ u32 dma_reg;
+ u32 depdma;
+ u32 desc_sts;
+ u32 mask;
+
+ hs_req = get_ep_head(hs_ep);
+ if (!hs_req) {
+ dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__);
+ return;
+ }
+ ureq = &hs_req->req;
+
+ dma_addr = hs_ep->desc_list_dma;
+
+ /*
+ * If lower half of descriptor chain is currently use by SW,
+ * that means higher half is being processed by HW, so shift
+ * DMA address to higher half of descriptor chain.
+ */
+ if (!hs_ep->isoc_chain_num)
+ dma_addr += sizeof(struct dwc2_dma_desc) *
+ (MAX_DMA_DESC_NUM_GENERIC / 2);
+
+ dma_reg = hs_ep->dir_in ? DIEPDMA(hs_ep->index) : DOEPDMA(hs_ep->index);
+ depdma = dwc2_readl(hsotg->regs + dma_reg);
+
+ index = (depdma - dma_addr) / sizeof(struct dwc2_dma_desc) - 1;
+ desc_sts = hs_ep->desc_list[index].status;
+
+ mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK :
+ DEV_DMA_ISOC_RX_NBYTES_MASK;
+ ureq->actual = ureq->length -
+ ((desc_sts & mask) >> DEV_DMA_ISOC_NBYTES_SHIFT);
+
+ /* Adjust actual length for ISOC Out if length is not align of 4 */
+ if (!hs_ep->dir_in && ureq->length & 0x3)
+ ureq->actual += 4 - (ureq->length & 0x3);
+
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
+}
+
+/*
+ * dwc2_gadget_start_next_isoc_ddma - start next isoc request, if any.
+ * @hs_ep: The isochronous endpoint to be re-enabled.
+ *
+ * If ep has been disabled due to last descriptor servicing (IN endpoint) or
+ * BNA (OUT endpoint) check the status of other half of descriptor chain that
+ * was under SW control till HW was busy and restart the endpoint if needed.
+ */
+static void dwc2_gadget_start_next_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ u32 depctl;
+ u32 dma_reg;
+ u32 ctrl;
+ u32 dma_addr = hs_ep->desc_list_dma;
+ unsigned char index = hs_ep->index;
+
+ dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
+ depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
+
+ ctrl = dwc2_readl(hsotg->regs + depctl);
+
+ /*
+ * EP was disabled if HW has processed last descriptor or BNA was set.
+ * So restart ep if SW has prepared new descriptor chain in ep_queue
+ * routine while HW was busy.
+ */
+ if (!(ctrl & DXEPCTL_EPENA)) {
+ if (!hs_ep->next_desc) {
+ dev_dbg(hsotg->dev, "%s: No more ISOC requests\n",
+ __func__);
+ return;
+ }
+
+ dma_addr += sizeof(struct dwc2_dma_desc) *
+ (MAX_DMA_DESC_NUM_GENERIC / 2) *
+ hs_ep->isoc_chain_num;
+ dwc2_writel(dma_addr, hsotg->regs + dma_reg);
+
+ ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
+ dwc2_writel(ctrl, hsotg->regs + depctl);
+
+ /* Switch ISOC descriptor chain number being processed by SW*/
+ hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1;
+ hs_ep->next_desc = 0;
+
+ dev_dbg(hsotg->dev, "%s: Restarted isochronous endpoint\n",
+ __func__);
+ }
+}
+
/**
* dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint
* @hsotg: The device state.
@@ -1618,6 +2146,36 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
dwc2_writel(ctrl, hsotg->regs + epctl_reg);
}
+/*
+ * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
+ * @hs_ep - The endpoint on which transfer went
+ *
+ * Iterate over endpoints descriptor chain and get info on bytes remained
+ * in DMA descriptors after transfer has completed. Used for non isoc EPs.
+ */
+static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ unsigned int bytes_rem = 0;
+ struct dwc2_dma_desc *desc = hs_ep->desc_list;
+ int i;
+ u32 status;
+
+ if (!desc)
+ return -EINVAL;
+
+ for (i = 0; i < hs_ep->desc_count; ++i) {
+ status = desc->status;
+ bytes_rem += status & DEV_DMA_NBYTES_MASK;
+
+ if (status & DEV_DMA_STS_MASK)
+ dev_err(hsotg->dev, "descriptor %d closed with %x\n",
+ i, status & DEV_DMA_STS_MASK);
+ }
+
+ return bytes_rem;
+}
+
/**
* dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
* @hsotg: The device instance
@@ -1648,6 +2206,9 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
return;
}
+ if (using_desc_dma(hsotg))
+ size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
+
if (using_dma(hsotg)) {
unsigned size_done;
@@ -1682,7 +2243,9 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
*/
}
- if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
+ /* DDMA IN status phase will start from StsPhseRcvd interrupt */
+ if (!using_desc_dma(hsotg) && epnum == 0 &&
+ hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
/* Move to STATUS IN */
dwc2_hsotg_ep0_zlp(hsotg, true);
return;
@@ -1812,17 +2375,17 @@ static u32 dwc2_hsotg_ep0_mps(unsigned int mps)
* @hsotg: The driver state.
* @ep: The index number of the endpoint
* @mps: The maximum packet size in bytes
+ * @mc: The multicount value
*
* Configure the maximum packet size for the given endpoint, updating
* the hardware control registers to reflect this.
*/
static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
- unsigned int ep, unsigned int mps, unsigned int dir_in)
+ unsigned int ep, unsigned int mps,
+ unsigned int mc, unsigned int dir_in)
{
struct dwc2_hsotg_ep *hs_ep;
void __iomem *regs = hsotg->regs;
- u32 mpsval;
- u32 mcval;
u32 reg;
hs_ep = index_to_ep(hsotg, ep, dir_in);
@@ -1830,32 +2393,32 @@ static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
return;
if (ep == 0) {
+ u32 mps_bytes = mps;
+
/* EP0 is a special case */
- mpsval = dwc2_hsotg_ep0_mps(mps);
- if (mpsval > 3)
+ mps = dwc2_hsotg_ep0_mps(mps_bytes);
+ if (mps > 3)
goto bad_mps;
- hs_ep->ep.maxpacket = mps;
+ hs_ep->ep.maxpacket = mps_bytes;
hs_ep->mc = 1;
} else {
- mpsval = mps & DXEPCTL_MPS_MASK;
- if (mpsval > 1024)
+ if (mps > 1024)
goto bad_mps;
- mcval = ((mps >> 11) & 0x3) + 1;
- hs_ep->mc = mcval;
- if (mcval > 3)
+ hs_ep->mc = mc;
+ if (mc > 3)
goto bad_mps;
- hs_ep->ep.maxpacket = mpsval;
+ hs_ep->ep.maxpacket = mps;
}
if (dir_in) {
reg = dwc2_readl(regs + DIEPCTL(ep));
reg &= ~DXEPCTL_MPS_MASK;
- reg |= mpsval;
+ reg |= mps;
dwc2_writel(reg, regs + DIEPCTL(ep));
} else {
reg = dwc2_readl(regs + DOEPCTL(ep));
reg &= ~DXEPCTL_MPS_MASK;
- reg |= mpsval;
+ reg |= mps;
dwc2_writel(reg, regs + DOEPCTL(ep));
}
@@ -1954,6 +2517,13 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
/* Finish ZLP handling for IN EP0 transactions */
if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) {
dev_dbg(hsotg->dev, "zlp packet sent\n");
+
+ /*
+ * While send zlp for DWC2_EP0_STATUS_IN EP direction was
+ * changed to IN. Change back to complete OUT transfer request
+ */
+ hs_ep->dir_in = 0;
+
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
if (hsotg->test_mode) {
int ret;
@@ -1979,8 +2549,14 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
* past the end of the buffer (DMA transfers are always 32bit
* aligned).
*/
-
- size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
+ if (using_desc_dma(hsotg)) {
+ size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
+ if (size_left < 0)
+ dev_err(hsotg->dev, "error parsing DDMA results %d\n",
+ size_left);
+ } else {
+ size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
+ }
size_done = hs_ep->size_loaded - size_left;
size_done += hs_ep->last_load;
@@ -2128,12 +2704,28 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
struct dwc2_hsotg *hsotg = ep->parent;
int dir_in = ep->dir_in;
u32 doepmsk;
+ u32 tmp;
if (dir_in || !ep->isochronous)
return;
+ /*
+ * Store frame in which irq was asserted here, as
+ * it can change while completing request below.
+ */
+ tmp = dwc2_hsotg_read_frameno(hsotg);
+
dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), -ENODATA);
+ if (using_desc_dma(hsotg)) {
+ if (ep->target_frame == TARGET_FRAME_INITIAL) {
+ /* Start first ISO Out */
+ ep->target_frame = tmp;
+ dwc2_gadget_start_isoc_ddma(ep);
+ }
+ return;
+ }
+
if (ep->interval > 1 &&
ep->target_frame == TARGET_FRAME_INITIAL) {
u32 dsts;
@@ -2182,6 +2774,12 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
+
+ if (using_desc_dma(hsotg)) {
+ dwc2_gadget_start_isoc_ddma(hs_ep);
+ return;
+ }
+
if (hs_ep->interval > 1) {
u32 ctrl = dwc2_readl(hsotg->regs +
DIEPCTL(hs_ep->index));
@@ -2237,8 +2835,15 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD)))
ints &= ~DXEPINT_XFERCOMPL;
- if (ints & DXEPINT_STSPHSERCVD)
- dev_dbg(hsotg->dev, "%s: StsPhseRcvd asserted\n", __func__);
+ /*
+ * Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP
+ * stage and xfercomplete was generated without SETUP phase done
+ * interrupt. SW should parse received setup packet only after host's
+ * exit from setup phase of control transfer.
+ */
+ if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in &&
+ hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP))
+ ints &= ~DXEPINT_XFERCOMPL;
if (ints & DXEPINT_XFERCOMPL) {
dev_dbg(hsotg->dev,
@@ -2246,11 +2851,17 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
__func__, dwc2_readl(hsotg->regs + epctl_reg),
dwc2_readl(hsotg->regs + epsiz_reg));
- /*
- * we get OutDone from the FIFO, so we only need to look
- * at completing IN requests here
- */
- if (dir_in) {
+ /* In DDMA handle isochronous requests separately */
+ if (using_desc_dma(hsotg) && hs_ep->isochronous) {
+ dwc2_gadget_complete_isoc_request_ddma(hs_ep);
+ /* Try to start next isoc request */
+ dwc2_gadget_start_next_isoc_ddma(hs_ep);
+ } else if (dir_in) {
+ /*
+ * We get OutDone from the FIFO, so we only
+ * need to look at completing IN requests here
+ * if operating slave mode
+ */
if (hs_ep->isochronous && hs_ep->interval > 1)
dwc2_gadget_incr_frame_num(hs_ep);
@@ -2302,9 +2913,30 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
}
}
+ if (ints & DXEPINT_STSPHSERCVD) {
+ dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__);
+
+ /* Move to STATUS IN for DDMA */
+ if (using_desc_dma(hsotg))
+ dwc2_hsotg_ep0_zlp(hsotg, true);
+ }
+
if (ints & DXEPINT_BACK2BACKSETUP)
dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
+ if (ints & DXEPINT_BNAINTR) {
+ dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__);
+
+ /*
+ * Try to start next isoc request, if any.
+ * Sometimes the endpoint remains enabled after BNA interrupt
+ * assertion, which is not expected, hence we can enter here
+ * couple of times.
+ */
+ if (hs_ep->isochronous)
+ dwc2_gadget_start_next_isoc_ddma(hs_ep);
+ }
+
if (dir_in && !hs_ep->isochronous) {
/* not sure if this is important, but we'll clear it anyway */
if (ints & DXEPINT_INTKNTXFEMP) {
@@ -2372,6 +3004,8 @@ static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
case DSTS_ENUMSPD_LS:
hsotg->gadget.speed = USB_SPEED_LOW;
+ ep0_mps = 8;
+ ep_mps = 8;
/*
* note, we don't actually support LS in this driver at the
* moment, and the documentation seems to imply that it isn't
@@ -2390,13 +3024,15 @@ static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
if (ep0_mps) {
int i;
/* Initialize ep0 for both in and out directions */
- dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 1);
- dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0);
+ dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1);
+ dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0);
for (i = 1; i < hsotg->num_of_eps; i++) {
if (hsotg->eps_in[i])
- dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 1);
+ dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
+ 0, 1);
if (hsotg->eps_out[i])
- dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 0);
+ dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
+ 0, 0);
}
}
@@ -2516,6 +3152,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
u32 intmsk;
u32 val;
u32 usbcfg;
+ u32 dcfg = 0;
/* Kill any ep0 requests as controller will be reinitialized */
kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
@@ -2534,10 +3171,17 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
GUSBCFG_HNPCAP);
- /* set the PLL on, remove the HNP/SRP and set the PHY */
- val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
- usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
- (val << GUSBCFG_USBTRDTIM_SHIFT);
+ if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS &&
+ (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
+ hsotg->params.speed == DWC2_SPEED_PARAM_LOW)) {
+ /* FS/LS Dedicated Transceiver Interface */
+ usbcfg |= GUSBCFG_PHYSEL;
+ } else {
+ /* set the PLL on, remove the HNP/SRP and set the PHY */
+ val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
+ usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
+ (val << GUSBCFG_USBTRDTIM_SHIFT);
+ }
dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
dwc2_hsotg_init_fifo(hsotg);
@@ -2545,7 +3189,23 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
if (!is_usb_reset)
__orr32(hsotg->regs + DCTL, DCTL_SFTDISCON);
- dwc2_writel(DCFG_EPMISCNT(1) | DCFG_DEVSPD_HS, hsotg->regs + DCFG);
+ dcfg |= DCFG_EPMISCNT(1);
+
+ switch (hsotg->params.speed) {
+ case DWC2_SPEED_PARAM_LOW:
+ dcfg |= DCFG_DEVSPD_LS;
+ break;
+ case DWC2_SPEED_PARAM_FULL:
+ if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS)
+ dcfg |= DCFG_DEVSPD_FS48;
+ else
+ dcfg |= DCFG_DEVSPD_FS;
+ break;
+ default:
+ dcfg |= DCFG_DEVSPD_HS;
+ }
+
+ dwc2_writel(dcfg, hsotg->regs + DCFG);
/* Clear any pending OTG interrupts */
dwc2_writel(0xffffffff, hsotg->regs + GOTGINT);
@@ -2556,23 +3216,31 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF |
GINTSTS_USBRST | GINTSTS_RESETDET |
GINTSTS_ENUMDONE | GINTSTS_OTGINT |
- GINTSTS_USBSUSP | GINTSTS_WKUPINT |
- GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT;
+ GINTSTS_USBSUSP | GINTSTS_WKUPINT;
- if (hsotg->core_params->external_id_pin_ctl <= 0)
+ if (!using_desc_dma(hsotg))
+ intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT;
+
+ if (hsotg->params.external_id_pin_ctl <= 0)
intmsk |= GINTSTS_CONIDSTSCHNG;
dwc2_writel(intmsk, hsotg->regs + GINTMSK);
- if (using_dma(hsotg))
+ if (using_dma(hsotg)) {
dwc2_writel(GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN |
(GAHBCFG_HBSTLEN_INCR4 << GAHBCFG_HBSTLEN_SHIFT),
hsotg->regs + GAHBCFG);
- else
+
+ /* Set DDMA mode support in the core if needed */
+ if (using_desc_dma(hsotg))
+ __orr32(hsotg->regs + DCFG, DCFG_DESCDMA_EN);
+
+ } else {
dwc2_writel(((hsotg->dedicated_fifos) ?
(GAHBCFG_NP_TXF_EMP_LVL |
GAHBCFG_P_TXF_EMP_LVL) : 0) |
GAHBCFG_GLBL_INTR_EN, hsotg->regs + GAHBCFG);
+ }
/*
* If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
@@ -2588,13 +3256,18 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
/*
* don't need XferCompl, we get that from RXFIFO in slave mode. In
- * DMA mode we may need this.
+ * DMA mode we may need this and StsPhseRcvd.
*/
- dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK) : 0) |
+ dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
+ DOEPMSK_STSPHSERCVDMSK) : 0) |
DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
- DOEPMSK_SETUPMSK | DOEPMSK_STSPHSERCVDMSK,
+ DOEPMSK_SETUPMSK,
hsotg->regs + DOEPMSK);
+ /* Enable BNA interrupt for DDMA */
+ if (using_desc_dma(hsotg))
+ __orr32(hsotg->regs + DOEPMSK, DOEPMSK_BNAMSK);
+
dwc2_writel(0, hsotg->regs + DAINTMSK);
dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
@@ -2935,6 +3608,95 @@ irq_retry:
return IRQ_HANDLED;
}
+static int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hs_otg, u32 reg,
+ u32 bit, u32 timeout)
+{
+ u32 i;
+
+ for (i = 0; i < timeout; i++) {
+ if (dwc2_readl(hs_otg->regs + reg) & bit)
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep)
+{
+ u32 epctrl_reg;
+ u32 epint_reg;
+
+ epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) :
+ DOEPCTL(hs_ep->index);
+ epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) :
+ DOEPINT(hs_ep->index);
+
+ dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__,
+ hs_ep->name);
+
+ if (hs_ep->dir_in) {
+ if (hsotg->dedicated_fifos || hs_ep->periodic) {
+ __orr32(hsotg->regs + epctrl_reg, DXEPCTL_SNAK);
+ /* Wait for Nak effect */
+ if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg,
+ DXEPINT_INEPNAKEFF, 100))
+ dev_warn(hsotg->dev,
+ "%s: timeout DIEPINT.NAKEFF\n",
+ __func__);
+ } else {
+ __orr32(hsotg->regs + DCTL, DCTL_SGNPINNAK);
+ /* Wait for Nak effect */
+ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
+ GINTSTS_GINNAKEFF, 100))
+ dev_warn(hsotg->dev,
+ "%s: timeout GINTSTS.GINNAKEFF\n",
+ __func__);
+ }
+ } else {
+ if (!(dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_GOUTNAKEFF))
+ __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
+
+ /* Wait for global nak to take effect */
+ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
+ GINTSTS_GOUTNAKEFF, 100))
+ dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n",
+ __func__);
+ }
+
+ /* Disable ep */
+ __orr32(hsotg->regs + epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
+
+ /* Wait for ep to be disabled */
+ if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100))
+ dev_warn(hsotg->dev,
+ "%s: timeout DOEPCTL.EPDisable\n", __func__);
+
+ /* Clear EPDISBLD interrupt */
+ __orr32(hsotg->regs + epint_reg, DXEPINT_EPDISBLD);
+
+ if (hs_ep->dir_in) {
+ unsigned short fifo_index;
+
+ if (hsotg->dedicated_fifos || hs_ep->periodic)
+ fifo_index = hs_ep->fifo_index;
+ else
+ fifo_index = 0;
+
+ /* Flush TX FIFO */
+ dwc2_flush_tx_fifo(hsotg, fifo_index);
+
+ /* Clear Global In NP NAK in Shared FIFO for non periodic ep */
+ if (!hsotg->dedicated_fifos && !hs_ep->periodic)
+ __orr32(hsotg->regs + DCTL, DCTL_CGNPINNAK);
+
+ } else {
+ /* Remove global NAKs */
+ __orr32(hsotg->regs + DCTL, DCTL_CGOUTNAK);
+ }
+}
+
/**
* dwc2_hsotg_ep_enable - enable the given endpoint
* @ep: The USB endpint to configure
@@ -2952,6 +3714,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
u32 epctrl_reg;
u32 epctrl;
u32 mps;
+ u32 mc;
u32 mask;
unsigned int dir_in;
unsigned int i, val, size;
@@ -2975,6 +3738,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
}
mps = usb_endpoint_maxp(desc);
+ mc = usb_endpoint_maxp_mult(desc);
/* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */
@@ -2984,6 +3748,18 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
__func__, epctrl, epctrl_reg);
+ /* Allocate DMA descriptor chain for non-ctrl endpoints */
+ if (using_desc_dma(hsotg)) {
+ hs_ep->desc_list = dma_alloc_coherent(hsotg->dev,
+ MAX_DMA_DESC_NUM_GENERIC *
+ sizeof(struct dwc2_dma_desc),
+ &hs_ep->desc_list_dma, GFP_KERNEL);
+ if (!hs_ep->desc_list) {
+ ret = -ENOMEM;
+ goto error2;
+ }
+ }
+
spin_lock_irqsave(&hsotg->lock, flags);
epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK);
@@ -2996,7 +3772,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
epctrl |= DXEPCTL_USBACTEP;
/* update the endpoint state */
- dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, dir_in);
+ dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in);
/* default, set to non-periodic */
hs_ep->isochronous = 0;
@@ -3011,6 +3787,8 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
hs_ep->isochronous = 1;
hs_ep->interval = 1 << (desc->bInterval - 1);
hs_ep->target_frame = TARGET_FRAME_INITIAL;
+ hs_ep->isoc_chain_num = 0;
+ hs_ep->next_desc = 0;
if (dir_in) {
hs_ep->periodic = 1;
mask = dwc2_readl(hsotg->regs + DIEPMSK);
@@ -3067,7 +3845,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
dev_err(hsotg->dev,
"%s: No suitable fifo found\n", __func__);
ret = -ENOMEM;
- goto error;
+ goto error1;
}
hsotg->fifo_map |= 1 << fifo_index;
epctrl |= DXEPCTL_TXFNUM(fifo_index);
@@ -3089,8 +3867,17 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
/* enable the endpoint interrupt */
dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
-error:
+error1:
spin_unlock_irqrestore(&hsotg->lock, flags);
+
+error2:
+ if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
+ dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
+ sizeof(struct dwc2_dma_desc),
+ hs_ep->desc_list, hs_ep->desc_list_dma);
+ hs_ep->desc_list = NULL;
+ }
+
return ret;
}
@@ -3115,11 +3902,23 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
return -EINVAL;
}
+ /* Remove DMA memory allocated for non-control Endpoints */
+ if (using_desc_dma(hsotg)) {
+ dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
+ sizeof(struct dwc2_dma_desc),
+ hs_ep->desc_list, hs_ep->desc_list_dma);
+ hs_ep->desc_list = NULL;
+ }
+
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
spin_lock_irqsave(&hsotg->lock, flags);
ctrl = dwc2_readl(hsotg->regs + epctrl_reg);
+
+ if (ctrl & DXEPCTL_EPENA)
+ dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
+
ctrl &= ~DXEPCTL_EPENA;
ctrl &= ~DXEPCTL_USBACTEP;
ctrl |= DXEPCTL_SNAK;
@@ -3158,77 +3957,6 @@ static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test)
return false;
}
-static int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hs_otg, u32 reg,
- u32 bit, u32 timeout)
-{
- u32 i;
-
- for (i = 0; i < timeout; i++) {
- if (dwc2_readl(hs_otg->regs + reg) & bit)
- return 0;
- udelay(1);
- }
-
- return -ETIMEDOUT;
-}
-
-static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
- struct dwc2_hsotg_ep *hs_ep)
-{
- u32 epctrl_reg;
- u32 epint_reg;
-
- epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) :
- DOEPCTL(hs_ep->index);
- epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) :
- DOEPINT(hs_ep->index);
-
- dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__,
- hs_ep->name);
- if (hs_ep->dir_in) {
- __orr32(hsotg->regs + epctrl_reg, DXEPCTL_SNAK);
- /* Wait for Nak effect */
- if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg,
- DXEPINT_INEPNAKEFF, 100))
- dev_warn(hsotg->dev,
- "%s: timeout DIEPINT.NAKEFF\n", __func__);
- } else {
- if (!(dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_GOUTNAKEFF))
- __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
-
- /* Wait for global nak to take effect */
- if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
- GINTSTS_GOUTNAKEFF, 100))
- dev_warn(hsotg->dev,
- "%s: timeout GINTSTS.GOUTNAKEFF\n", __func__);
- }
-
- /* Disable ep */
- __orr32(hsotg->regs + epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
-
- /* Wait for ep to be disabled */
- if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100))
- dev_warn(hsotg->dev,
- "%s: timeout DOEPCTL.EPDisable\n", __func__);
-
- if (hs_ep->dir_in) {
- if (hsotg->dedicated_fifos) {
- dwc2_writel(GRSTCTL_TXFNUM(hs_ep->fifo_index) |
- GRSTCTL_TXFFLSH, hsotg->regs + GRSTCTL);
- /* Wait for fifo flush */
- if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL,
- GRSTCTL_TXFFLSH, 100))
- dev_warn(hsotg->dev,
- "%s: timeout flushing fifos\n",
- __func__);
- }
- /* TODO: Flush shared tx fifo */
- } else {
- /* Remove global NAKs */
- __bic32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
- }
-}
-
/**
* dwc2_hsotg_ep_dequeue - dequeue given endpoint
* @ep: The endpoint to dequeue.
@@ -3665,14 +4393,21 @@ static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg,
hs_ep->parent = hsotg;
hs_ep->ep.name = hs_ep->name;
- usb_ep_set_maxpacket_limit(&hs_ep->ep, epnum ? 1024 : EP0_MPS_LIMIT);
+
+ if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW)
+ usb_ep_set_maxpacket_limit(&hs_ep->ep, 8);
+ else
+ usb_ep_set_maxpacket_limit(&hs_ep->ep,
+ epnum ? 1024 : EP0_MPS_LIMIT);
hs_ep->ep.ops = &dwc2_hsotg_ep_ops;
if (epnum == 0) {
hs_ep->ep.caps.type_control = true;
} else {
- hs_ep->ep.caps.type_iso = true;
- hs_ep->ep.caps.type_bulk = true;
+ if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) {
+ hs_ep->ep.caps.type_iso = true;
+ hs_ep->ep.caps.type_bulk = true;
+ }
hs_ep->ep.caps.type_int = true;
}
@@ -3802,51 +4537,6 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
#endif
}
-#ifdef CONFIG_OF
-static void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg)
-{
- struct device_node *np = hsotg->dev->of_node;
- u32 len = 0;
- u32 i = 0;
-
- /* Enable dma if requested in device tree */
- hsotg->g_using_dma = of_property_read_bool(np, "g-use-dma");
-
- /*
- * Register TX periodic fifo size per endpoint.
- * EP0 is excluded since it has no fifo configuration.
- */
- if (!of_find_property(np, "g-tx-fifo-size", &len))
- goto rx_fifo;
-
- len /= sizeof(u32);
-
- /* Read tx fifo sizes other than ep0 */
- if (of_property_read_u32_array(np, "g-tx-fifo-size",
- &hsotg->g_tx_fifo_sz[1], len))
- goto rx_fifo;
-
- /* Add ep0 */
- len++;
-
- /* Make remaining TX fifos unavailable */
- if (len < MAX_EPS_CHANNELS) {
- for (i = len; i < MAX_EPS_CHANNELS; i++)
- hsotg->g_tx_fifo_sz[i] = 0;
- }
-
-rx_fifo:
- /* Register RX fifo size */
- of_property_read_u32(np, "g-rx-fifo-size", &hsotg->g_rx_fifo_sz);
-
- /* Register NPTX fifo size */
- of_property_read_u32(np, "g-np-tx-fifo-size",
- &hsotg->g_np_g_tx_fifo_sz);
-}
-#else
-static inline void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg) { }
-#endif
-
/**
* dwc2_gadget_init - init function for gadget
* @dwc2: The data structure for the DWC2 driver.
@@ -3857,33 +4547,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
struct device *dev = hsotg->dev;
int epnum;
int ret;
- int i;
- u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE;
-
- /* Initialize to legacy fifo configuration values */
- hsotg->g_rx_fifo_sz = 2048;
- hsotg->g_np_g_tx_fifo_sz = 1024;
- memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo));
- /* Device tree specific probe */
- dwc2_hsotg_of_probe(hsotg);
-
- /* Check against largest possible value. */
- if (hsotg->g_np_g_tx_fifo_sz >
- hsotg->hw_params.dev_nperio_tx_fifo_size) {
- dev_warn(dev, "Specified GNPTXFDEP=%d > %d\n",
- hsotg->g_np_g_tx_fifo_sz,
- hsotg->hw_params.dev_nperio_tx_fifo_size);
- hsotg->g_np_g_tx_fifo_sz =
- hsotg->hw_params.dev_nperio_tx_fifo_size;
- }
/* Dump fifo information */
dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
- hsotg->g_np_g_tx_fifo_sz);
- dev_dbg(dev, "RXFIFO size: %d\n", hsotg->g_rx_fifo_sz);
- for (i = 0; i < MAX_EPS_CHANNELS; i++)
- dev_dbg(dev, "Periodic TXFIFO%2d size: %d\n", i,
- hsotg->g_tx_fifo_sz[i]);
+ hsotg->params.g_np_tx_fifo_size);
+ dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size);
hsotg->gadget.max_speed = USB_SPEED_HIGH;
hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
@@ -3909,6 +4577,12 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
if (!hsotg->ep0_buff)
return -ENOMEM;
+ if (using_desc_dma(hsotg)) {
+ ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg);
+ if (ret < 0)
+ return ret;
+ }
+
ret = devm_request_irq(hsotg->dev, irq, dwc2_hsotg_irq, IRQF_SHARED,
dev_name(hsotg->dev), hsotg);
if (ret < 0) {
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index df5a06578005..911c3b36ac06 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -79,9 +79,9 @@ static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
/* Enable the interrupts in the GINTMSK */
intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
- if (hsotg->core_params->dma_enable <= 0)
+ if (hsotg->params.host_dma <= 0)
intmsk |= GINTSTS_RXFLVL;
- if (hsotg->core_params->external_id_pin_ctl <= 0)
+ if (hsotg->params.external_id_pin_ctl <= 0)
intmsk |= GINTSTS_CONIDSTSCHNG;
intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
@@ -100,8 +100,8 @@ static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
- hsotg->core_params->ulpi_fs_ls > 0) ||
- hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
+ hsotg->params.ulpi_fs_ls > 0) ||
+ hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
/* Full speed PHY */
val = HCFG_FSLSPCLKSEL_48_MHZ;
} else {
@@ -152,7 +152,7 @@ static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
if (dwc2_is_host_mode(hsotg))
dwc2_init_fs_ls_pclk_sel(hsotg);
- if (hsotg->core_params->i2c_enable > 0) {
+ if (hsotg->params.i2c_enable > 0) {
dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
/* Program GUSBCFG.OtgUtmiFsSel to I2C */
@@ -189,20 +189,20 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
* so only program the first time. Do a soft reset immediately after
* setting phyif.
*/
- switch (hsotg->core_params->phy_type) {
+ switch (hsotg->params.phy_type) {
case DWC2_PHY_TYPE_PARAM_ULPI:
/* ULPI interface */
dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
- if (hsotg->core_params->phy_ulpi_ddr > 0)
+ if (hsotg->params.phy_ulpi_ddr > 0)
usbcfg |= GUSBCFG_DDRSEL;
break;
case DWC2_PHY_TYPE_PARAM_UTMI:
/* UTMI+ interface */
dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
- if (hsotg->core_params->phy_utmi_width == 16)
+ if (hsotg->params.phy_utmi_width == 16)
usbcfg |= GUSBCFG_PHYIF16;
break;
default:
@@ -230,9 +230,10 @@ static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
u32 usbcfg;
int retval = 0;
- if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
- hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
- /* If FS mode with FS PHY */
+ if ((hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
+ hsotg->params.speed == DWC2_SPEED_PARAM_LOW) &&
+ hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
+ /* If FS/LS mode with FS/LS PHY */
retval = dwc2_fs_phy_init(hsotg, select_phy);
if (retval)
return retval;
@@ -245,7 +246,7 @@ static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
- hsotg->core_params->ulpi_fs_ls > 0) {
+ hsotg->params.ulpi_fs_ls > 0) {
dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
usbcfg |= GUSBCFG_ULPI_FS_LS;
@@ -272,9 +273,9 @@ static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
case GHWCFG2_INT_DMA_ARCH:
dev_dbg(hsotg->dev, "Internal DMA Mode\n");
- if (hsotg->core_params->ahbcfg != -1) {
+ if (hsotg->params.ahbcfg != -1) {
ahbcfg &= GAHBCFG_CTRL_MASK;
- ahbcfg |= hsotg->core_params->ahbcfg &
+ ahbcfg |= hsotg->params.ahbcfg &
~GAHBCFG_CTRL_MASK;
}
break;
@@ -285,21 +286,21 @@ static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
break;
}
- dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
- hsotg->core_params->dma_enable,
- hsotg->core_params->dma_desc_enable);
+ dev_dbg(hsotg->dev, "host_dma:%d dma_desc_enable:%d\n",
+ hsotg->params.host_dma,
+ hsotg->params.dma_desc_enable);
- if (hsotg->core_params->dma_enable > 0) {
- if (hsotg->core_params->dma_desc_enable > 0)
+ if (hsotg->params.host_dma > 0) {
+ if (hsotg->params.dma_desc_enable > 0)
dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
else
dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
} else {
dev_dbg(hsotg->dev, "Using Slave mode\n");
- hsotg->core_params->dma_desc_enable = 0;
+ hsotg->params.dma_desc_enable = 0;
}
- if (hsotg->core_params->dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
ahbcfg |= GAHBCFG_DMA_EN;
dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
@@ -316,10 +317,10 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
switch (hsotg->hw_params.op_mode) {
case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
- if (hsotg->core_params->otg_cap ==
+ if (hsotg->params.otg_cap ==
DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
usbcfg |= GUSBCFG_HNPCAP;
- if (hsotg->core_params->otg_cap !=
+ if (hsotg->params.otg_cap !=
DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
usbcfg |= GUSBCFG_SRPCAP;
break;
@@ -327,7 +328,7 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
- if (hsotg->core_params->otg_cap !=
+ if (hsotg->params.otg_cap !=
DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
usbcfg |= GUSBCFG_SRPCAP;
break;
@@ -390,7 +391,7 @@ static void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
*/
static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
{
- struct dwc2_core_params *params = hsotg->core_params;
+ struct dwc2_core_params *params = &hsotg->params;
struct dwc2_hw_params *hw = &hsotg->hw_params;
u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
@@ -449,7 +450,7 @@ static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
{
- struct dwc2_core_params *params = hsotg->core_params;
+ struct dwc2_core_params *params = &hsotg->params;
u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
if (!params->enable_dynamic_fifo)
@@ -490,7 +491,7 @@ static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
dwc2_readl(hsotg->regs + HPTXFSIZ));
- if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
+ if (hsotg->params.en_multiple_tx_fifo > 0 &&
hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
/*
* Global DFIFOCFG calculation for Host mode -
@@ -598,7 +599,7 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan)
{
#ifdef VERBOSE_DEBUG
- int num_channels = hsotg->core_params->host_channels;
+ int num_channels = hsotg->params.host_channels;
struct dwc2_qh *qh;
u32 hcchar;
u32 hcsplt;
@@ -648,6 +649,35 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
#endif /* VERBOSE_DEBUG */
}
+static int _dwc2_hcd_start(struct usb_hcd *hcd);
+
+static void dwc2_host_start(struct dwc2_hsotg *hsotg)
+{
+ struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
+
+ hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg);
+ _dwc2_hcd_start(hcd);
+}
+
+static void dwc2_host_disconnect(struct dwc2_hsotg *hsotg)
+{
+ struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
+
+ hcd->self.is_b_host = 0;
+}
+
+static void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
+ int *hub_addr, int *hub_port)
+{
+ struct urb *urb = context;
+
+ if (urb->dev->tt)
+ *hub_addr = urb->dev->tt->hub->devnum;
+ else
+ *hub_addr = 0;
+ *hub_port = urb->dev->ttport;
+}
+
/*
* =========================================================================
* Low Level Host Channel Access Functions
@@ -741,7 +771,7 @@ static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
* For Descriptor DMA mode core halts the channel on AHB error.
* Interrupt is not required.
*/
- if (hsotg->core_params->dma_desc_enable <= 0) {
+ if (hsotg->params.dma_desc_enable <= 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "desc DMA disabled\n");
hcintmsk |= HCINTMSK_AHBERR;
@@ -774,7 +804,7 @@ static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
{
u32 intmsk;
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA enabled\n");
dwc2_hc_enable_dma_ints(hsotg, chan);
@@ -994,7 +1024,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
/* No need to set the bit in DDMA for disabling the channel */
/* TODO check it everywhere channel is disabled */
- if (hsotg->core_params->dma_desc_enable <= 0) {
+ if (hsotg->params.dma_desc_enable <= 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "desc DMA disabled\n");
hcchar |= HCCHAR_CHENA;
@@ -1004,7 +1034,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
}
hcchar |= HCCHAR_CHDIS;
- if (hsotg->core_params->dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA not enabled\n");
hcchar |= HCCHAR_CHENA;
@@ -1143,7 +1173,7 @@ static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
fifo_space = (dwc2_readl(hsotg->regs + HPTXSTS) &
TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT;
bytes_in_fifo = sizeof(u32) *
- (hsotg->core_params->host_perio_tx_fifo_size -
+ (hsotg->params.host_perio_tx_fifo_size -
fifo_space);
/*
@@ -1339,8 +1369,8 @@ static void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg,
static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan)
{
- u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
- u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
+ u32 max_hc_xfer_size = hsotg->params.max_transfer_size;
+ u16 max_hc_pkt_count = hsotg->params.max_packet_count;
u32 hcchar;
u32 hctsiz = 0;
u16 num_packets;
@@ -1350,7 +1380,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
dev_vdbg(hsotg->dev, "%s()\n", __func__);
if (chan->do_ping) {
- if (hsotg->core_params->dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "ping, no DMA\n");
dwc2_hc_do_ping(hsotg, chan);
@@ -1478,7 +1508,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
TSIZ_SC_MC_PID_SHIFT);
}
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
dwc2_writel((u32)chan->xfer_dma,
hsotg->regs + HCDMA(chan->hc_num));
if (dbg_hc(chan))
@@ -1521,7 +1551,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
chan->xfer_started = 1;
chan->requests++;
- if (hsotg->core_params->dma_enable <= 0 &&
+ if (hsotg->params.host_dma <= 0 &&
!chan->ep_is_in && chan->xfer_len > 0)
/* Load OUT packet into the appropriate Tx FIFO */
dwc2_hc_write_packet(hsotg, chan);
@@ -1799,12 +1829,12 @@ void dwc2_hcd_start(struct dwc2_hsotg *hsotg)
/* Must be called with interrupt disabled and spinlock held */
static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg)
{
- int num_channels = hsotg->core_params->host_channels;
+ int num_channels = hsotg->params.host_channels;
struct dwc2_host_chan *channel;
u32 hcchar;
int i;
- if (hsotg->core_params->dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
/* Flush out any channel requests in slave mode */
for (i = 0; i < num_channels; i++) {
channel = hsotg->hc_ptr_array[i];
@@ -1840,9 +1870,9 @@ static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg)
channel->qh = NULL;
}
/* All channels have been freed, mark them available */
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
hsotg->available_host_channels =
- hsotg->core_params->host_channels;
+ hsotg->params.host_channels;
} else {
hsotg->non_periodic_channels = 0;
hsotg->periodic_channels = 0;
@@ -2077,7 +2107,7 @@ static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg,
* Free the QTD and clean up the associated QH. Leave the QH in the
* schedule if it has any remaining QTDs.
*/
- if (hsotg->core_params->dma_desc_enable <= 0) {
+ if (hsotg->params.dma_desc_enable <= 0) {
u8 in_process = urb_qtd->in_process;
dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh);
@@ -2185,13 +2215,13 @@ static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
/* Set ULPI External VBUS bit if needed */
usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
- if (hsotg->core_params->phy_ulpi_ext_vbus ==
+ if (hsotg->params.phy_ulpi_ext_vbus ==
DWC2_PHY_ULPI_EXTERNAL_VBUS)
usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
/* Set external TS Dline pulsing bit if needed */
usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
- if (hsotg->core_params->ts_dline > 0)
+ if (hsotg->params.ts_dline > 0)
usbcfg |= GUSBCFG_TERMSELDLPULSE;
dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
@@ -2230,10 +2260,10 @@ static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
/* Program the GOTGCTL register */
otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
otgctl &= ~GOTGCTL_OTGVER;
- if (hsotg->core_params->otg_ver > 0)
+ if (hsotg->params.otg_ver > 0)
otgctl |= GOTGCTL_OTGVER;
dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
- dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
+ dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->params.otg_ver);
/* Clear the SRP success bit for FS-I2c */
hsotg->srp_success = 0;
@@ -2277,7 +2307,8 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
/* Initialize Host Configuration Register */
dwc2_init_fs_ls_pclk_sel(hsotg);
- if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
+ if (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
+ hsotg->params.speed == DWC2_SPEED_PARAM_LOW) {
hcfg = dwc2_readl(hsotg->regs + HCFG);
hcfg |= HCFG_FSLSSUPP;
dwc2_writel(hcfg, hsotg->regs + HCFG);
@@ -2288,13 +2319,13 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
* runtime. This bit needs to be programmed during initial configuration
* and its value must not be changed during runtime.
*/
- if (hsotg->core_params->reload_ctl > 0) {
+ if (hsotg->params.reload_ctl > 0) {
hfir = dwc2_readl(hsotg->regs + HFIR);
hfir |= HFIR_RLDCTRL;
dwc2_writel(hfir, hsotg->regs + HFIR);
}
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
u32 op_mode = hsotg->hw_params.op_mode;
if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
@@ -2306,7 +2337,7 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
"Hardware does not support descriptor DMA mode -\n");
dev_err(hsotg->dev,
"falling back to buffer DMA mode.\n");
- hsotg->core_params->dma_desc_enable = 0;
+ hsotg->params.dma_desc_enable = 0;
} else {
hcfg = dwc2_readl(hsotg->regs + HCFG);
hcfg |= HCFG_DESCDMA;
@@ -2332,12 +2363,12 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
otgctl &= ~GOTGCTL_HSTSETHNPEN;
dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
- if (hsotg->core_params->dma_desc_enable <= 0) {
+ if (hsotg->params.dma_desc_enable <= 0) {
int num_channels, i;
u32 hcchar;
/* Flush out any leftover queued requests */
- num_channels = hsotg->core_params->host_channels;
+ num_channels = hsotg->params.host_channels;
for (i = 0; i < num_channels; i++) {
hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
hcchar &= ~HCCHAR_CHENA;
@@ -2399,9 +2430,9 @@ static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg)
hsotg->flags.d32 = 0;
hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active;
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
hsotg->available_host_channels =
- hsotg->core_params->host_channels;
+ hsotg->params.host_channels;
} else {
hsotg->non_periodic_channels = 0;
hsotg->periodic_channels = 0;
@@ -2415,7 +2446,7 @@ static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg)
hc_list_entry)
list_del_init(&chan->hc_list_entry);
- num_channels = hsotg->core_params->host_channels;
+ num_channels = hsotg->params.host_channels;
for (i = 0; i < num_channels; i++) {
chan = hsotg->hc_ptr_array[i];
list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
@@ -2457,7 +2488,7 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
chan->do_ping = 0;
chan->ep_is_in = 0;
chan->data_pid_start = DWC2_HC_PID_SETUP;
- if (hsotg->core_params->dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
chan->xfer_dma = urb->setup_dma;
else
chan->xfer_buf = urb->setup_packet;
@@ -2484,7 +2515,7 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
chan->do_ping = 0;
chan->data_pid_start = DWC2_HC_PID_DATA1;
chan->xfer_len = 0;
- if (hsotg->core_params->dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
chan->xfer_dma = hsotg->status_buf_dma;
else
chan->xfer_buf = hsotg->status_buf;
@@ -2502,13 +2533,13 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
case USB_ENDPOINT_XFER_ISOC:
chan->ep_type = USB_ENDPOINT_XFER_ISOC;
- if (hsotg->core_params->dma_desc_enable > 0)
+ if (hsotg->params.dma_desc_enable > 0)
break;
frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
frame_desc->status = 0;
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
chan->xfer_dma = urb->dma;
chan->xfer_dma += frame_desc->offset +
qtd->isoc_split_offset;
@@ -2690,7 +2721,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
!dwc2_hcd_is_pipe_in(&urb->pipe_info))
urb->actual_length = urb->length;
- if (hsotg->core_params->dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
chan->xfer_dma = urb->dma + urb->actual_length;
else
chan->xfer_buf = (u8 *)urb->buf + urb->actual_length;
@@ -2715,7 +2746,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
*/
chan->multi_count = dwc2_hb_mult(qh->maxp);
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
chan->desc_list_addr = qh->desc_list_dma;
chan->desc_list_sz = qh->desc_list_sz;
}
@@ -2752,7 +2783,7 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
while (qh_ptr != &hsotg->periodic_sched_ready) {
if (list_empty(&hsotg->free_hc_list))
break;
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
if (hsotg->available_host_channels <= 1)
break;
hsotg->available_host_channels--;
@@ -2776,17 +2807,17 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
* schedule. Some free host channels may not be used if they are
* reserved for periodic transfers.
*/
- num_channels = hsotg->core_params->host_channels;
+ num_channels = hsotg->params.host_channels;
qh_ptr = hsotg->non_periodic_sched_inactive.next;
while (qh_ptr != &hsotg->non_periodic_sched_inactive) {
- if (hsotg->core_params->uframe_sched <= 0 &&
+ if (hsotg->params.uframe_sched <= 0 &&
hsotg->non_periodic_channels >= num_channels -
hsotg->periodic_channels)
break;
if (list_empty(&hsotg->free_hc_list))
break;
qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
if (hsotg->available_host_channels < 1)
break;
hsotg->available_host_channels--;
@@ -2808,7 +2839,7 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
else
ret_val = DWC2_TRANSACTION_ALL;
- if (hsotg->core_params->uframe_sched <= 0)
+ if (hsotg->params.uframe_sched <= 0)
hsotg->non_periodic_channels++;
}
@@ -2847,8 +2878,8 @@ static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg,
list_move_tail(&chan->split_order_list_entry,
&hsotg->split_order);
- if (hsotg->core_params->dma_enable > 0) {
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
if (!chan->xfer_started ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
dwc2_hcd_start_xfer_ddma(hsotg, chan->qh);
@@ -2957,7 +2988,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
* The flag prevents any halts to get into the request queue in
* the middle of multiple high-bandwidth packets getting queued.
*/
- if (hsotg->core_params->dma_enable <= 0 &&
+ if (hsotg->params.host_dma <= 0 &&
qh->channel->multi_count > 1)
hsotg->queuing_high_bandwidth = 1;
@@ -2976,7 +3007,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
* controller automatically handles multiple packets for
* high-bandwidth transfers.
*/
- if (hsotg->core_params->dma_enable > 0 || status == 0 ||
+ if (hsotg->params.host_dma > 0 || status == 0 ||
qh->channel->requests == qh->channel->multi_count) {
qh_ptr = qh_ptr->next;
/*
@@ -2993,7 +3024,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
exit:
if (no_queue_space || no_fifo_space ||
- (hsotg->core_params->dma_enable <= 0 &&
+ (hsotg->params.host_dma <= 0 &&
!list_empty(&hsotg->periodic_sched_assigned))) {
/*
* May need to queue more transactions as the request
@@ -3073,7 +3104,7 @@ static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
- if (hsotg->core_params->dma_enable <= 0 && qspcavail == 0) {
+ if (hsotg->params.host_dma <= 0 && qspcavail == 0) {
no_queue_space = 1;
break;
}
@@ -3106,7 +3137,7 @@ next:
hsotg->non_periodic_qh_ptr->next;
} while (hsotg->non_periodic_qh_ptr != orig_qh_ptr);
- if (hsotg->core_params->dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
@@ -3307,7 +3338,7 @@ static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
* If hibernation is supported, Phy clock will be suspended
* after registers are backuped.
*/
- if (!hsotg->core_params->hibernation) {
+ if (!hsotg->params.hibernation) {
/* Suspend the Phy Clock */
pcgctl = dwc2_readl(hsotg->regs + PCGCTL);
pcgctl |= PCGCTL_STOPPCLK;
@@ -3342,7 +3373,7 @@ static void dwc2_port_resume(struct dwc2_hsotg *hsotg)
* If hibernation is supported, Phy clock is already resumed
* after registers restore.
*/
- if (!hsotg->core_params->hibernation) {
+ if (!hsotg->params.hibernation) {
pcgctl = dwc2_readl(hsotg->regs + PCGCTL);
pcgctl &= ~PCGCTL_STOPPCLK;
dwc2_writel(pcgctl, hsotg->regs + PCGCTL);
@@ -3569,7 +3600,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
port_status |= USB_PORT_STAT_TEST;
/* USB_PORT_FEAT_INDICATOR unsupported always 0 */
- if (hsotg->core_params->dma_desc_fs_enable) {
+ if (hsotg->params.dma_desc_fs_enable) {
/*
* Enable descriptor DMA only if a full speed
* device is connected.
@@ -3583,7 +3614,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
u32 hcfg;
dev_info(hsotg->dev, "Enabling descriptor DMA mode\n");
- hsotg->core_params->dma_desc_enable = 1;
+ hsotg->params.dma_desc_enable = 1;
hcfg = dwc2_readl(hsotg->regs + HCFG);
hcfg |= HCFG_DESCDMA;
dwc2_writel(hcfg, hsotg->regs + HCFG);
@@ -3824,7 +3855,7 @@ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
u32 p_tx_status;
int i;
- num_channels = hsotg->core_params->host_channels;
+ num_channels = hsotg->params.host_channels;
dev_dbg(hsotg->dev, "\n");
dev_dbg(hsotg->dev,
"************************************************************\n");
@@ -4020,35 +4051,6 @@ static struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *hcd)
return p->hsotg;
}
-static int _dwc2_hcd_start(struct usb_hcd *hcd);
-
-void dwc2_host_start(struct dwc2_hsotg *hsotg)
-{
- struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
-
- hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg);
- _dwc2_hcd_start(hcd);
-}
-
-void dwc2_host_disconnect(struct dwc2_hsotg *hsotg)
-{
- struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
-
- hcd->self.is_b_host = 0;
-}
-
-void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr,
- int *hub_port)
-{
- struct urb *urb = context;
-
- if (urb->dev->tt)
- *hub_addr = urb->dev->tt->hub->devnum;
- else
- *hub_addr = 0;
- *hub_port = urb->dev->ttport;
-}
-
/**
* dwc2_host_get_tt_info() - Get the dwc2_tt associated with context
*
@@ -4365,7 +4367,7 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
if (!HCD_HW_ACCESSIBLE(hcd))
goto unlock;
- if (!hsotg->core_params->hibernation)
+ if (!hsotg->params.hibernation)
goto skip_power_saving;
/*
@@ -4417,7 +4419,7 @@ static int _dwc2_hcd_resume(struct usb_hcd *hcd)
if (hsotg->lx_state != DWC2_L2)
goto unlock;
- if (!hsotg->core_params->hibernation) {
+ if (!hsotg->params.hibernation) {
hsotg->lx_state = DWC2_L0;
goto unlock;
}
@@ -4510,9 +4512,6 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
case PIPE_ISOCHRONOUS:
pipetype = "ISOCHRONOUS";
break;
- default:
- pipetype = "UNKNOWN";
- break;
}
dev_vdbg(hsotg->dev, " Endpoint type: %s %s (%s)\n", pipetype,
@@ -4609,8 +4608,6 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
case PIPE_INTERRUPT:
ep_type = USB_ENDPOINT_XFER_INT;
break;
- default:
- dev_warn(hsotg->dev, "Wrong ep type\n");
}
dwc2_urb = dwc2_hcd_urb_alloc(hsotg, urb->number_of_packets,
@@ -4919,7 +4916,7 @@ static void dwc2_hcd_free(struct dwc2_hsotg *hsotg)
}
}
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
if (hsotg->status_buf) {
dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE,
hsotg->status_buf,
@@ -4999,16 +4996,16 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
hsotg->last_frame_num = HFNUM_MAX_FRNUM;
/* Check if the bus driver or platform code has setup a dma_mask */
- if (hsotg->core_params->dma_enable > 0 &&
+ if (hsotg->params.host_dma > 0 &&
hsotg->dev->dma_mask == NULL) {
dev_warn(hsotg->dev,
"dma_mask not set, disabling DMA\n");
- hsotg->core_params->dma_enable = 0;
- hsotg->core_params->dma_desc_enable = 0;
+ hsotg->params.host_dma = 0;
+ hsotg->params.dma_desc_enable = 0;
}
/* Set device flags indicating whether the HCD supports DMA */
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
dev_warn(hsotg->dev, "can't set DMA mask\n");
if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
@@ -5019,7 +5016,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
if (!hcd)
goto error1;
- if (hsotg->core_params->dma_enable <= 0)
+ if (hsotg->params.host_dma <= 0)
hcd->self.uses_dma = 0;
hcd->has_tt = 1;
@@ -5067,7 +5064,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
* in the controller. Initialize the channel descriptor array.
*/
INIT_LIST_HEAD(&hsotg->free_hc_list);
- num_channels = hsotg->core_params->host_channels;
+ num_channels = hsotg->params.host_channels;
memset(&hsotg->hc_ptr_array[0], 0, sizeof(hsotg->hc_ptr_array));
for (i = 0; i < num_channels; i++) {
@@ -5091,7 +5088,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
* done after usb_add_hcd since that function allocates the DMA buffer
* pool.
*/
- if (hsotg->core_params->dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
hsotg->status_buf = dma_alloc_coherent(hsotg->dev,
DWC2_HCD_STATUS_BUF_SIZE,
&hsotg->status_buf_dma, GFP_KERNEL);
@@ -5107,10 +5104,10 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
* DMA mode.
* Alignment must be set to 512 bytes.
*/
- if (hsotg->core_params->dma_desc_enable ||
- hsotg->core_params->dma_desc_fs_enable) {
+ if (hsotg->params.dma_desc_enable ||
+ hsotg->params.dma_desc_fs_enable) {
hsotg->desc_gen_cache = kmem_cache_create("dwc2-gen-desc",
- sizeof(struct dwc2_hcd_dma_desc) *
+ sizeof(struct dwc2_dma_desc) *
MAX_DMA_DESC_NUM_GENERIC, 512, SLAB_CACHE_DMA,
NULL);
if (!hsotg->desc_gen_cache) {
@@ -5121,12 +5118,12 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
* Disable descriptor dma mode since it will not be
* usable.
*/
- hsotg->core_params->dma_desc_enable = 0;
- hsotg->core_params->dma_desc_fs_enable = 0;
+ hsotg->params.dma_desc_enable = 0;
+ hsotg->params.dma_desc_fs_enable = 0;
}
hsotg->desc_hsisoc_cache = kmem_cache_create("dwc2-hsisoc-desc",
- sizeof(struct dwc2_hcd_dma_desc) *
+ sizeof(struct dwc2_dma_desc) *
MAX_DMA_DESC_NUM_HS_ISOC, 512, 0, NULL);
if (!hsotg->desc_hsisoc_cache) {
dev_err(hsotg->dev,
@@ -5138,8 +5135,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
* Disable descriptor dma mode since it will not be
* usable.
*/
- hsotg->core_params->dma_desc_enable = 0;
- hsotg->core_params->dma_desc_fs_enable = 0;
+ hsotg->params.dma_desc_enable = 0;
+ hsotg->params.dma_desc_fs_enable = 0;
}
}
@@ -5184,7 +5181,6 @@ error3:
error2:
usb_put_hcd(hcd);
error1:
- kfree(hsotg->core_params);
#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
kfree(hsotg->last_frame_num_array);
@@ -5250,7 +5246,7 @@ int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
hr = &hsotg->hr_backup;
hr->hcfg = dwc2_readl(hsotg->regs + HCFG);
hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK);
- for (i = 0; i < hsotg->core_params->host_channels; ++i)
+ for (i = 0; i < hsotg->params.host_channels; ++i)
hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i));
hr->hprt0 = dwc2_read_hprt0(hsotg);
@@ -5286,7 +5282,7 @@ int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
dwc2_writel(hr->hcfg, hsotg->regs + HCFG);
dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK);
- for (i = 0; i < hsotg->core_params->host_channels; ++i)
+ for (i = 0; i < hsotg->params.host_channels; ++i)
dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i));
dwc2_writel(hr->hprt0, hsotg->regs + HPRT0);
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 7758bfb644ff..1ed5fa2beff4 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -348,7 +348,7 @@ struct dwc2_qh {
struct list_head qtd_list;
struct dwc2_host_chan *channel;
struct list_head qh_list_entry;
- struct dwc2_hcd_dma_desc *desc_list;
+ struct dwc2_dma_desc *desc_list;
dma_addr_t desc_list_dma;
u32 desc_list_sz;
u32 *n_bytes;
@@ -793,11 +793,6 @@ extern void dwc2_hcd_dump_frrem(struct dwc2_hsotg *hsotg);
#define URB_SEND_ZERO_PACKET 0x2
/* Host driver callbacks */
-
-extern void dwc2_host_start(struct dwc2_hsotg *hsotg);
-extern void dwc2_host_disconnect(struct dwc2_hsotg *hsotg);
-extern void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
- int *hub_addr, int *hub_port);
extern struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg,
void *context, gfp_t mem_flags,
int *ttport);
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 0e1d42b5dec5..cf0367768cb3 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -95,7 +95,7 @@ static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
else
desc_cache = hsotg->desc_gen_cache;
- qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) *
+ qh->desc_list_sz = sizeof(struct dwc2_dma_desc) *
dwc2_max_desc_num(qh);
qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA);
@@ -297,7 +297,7 @@ static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan = qh->channel;
if (dwc2_qh_is_non_per(qh)) {
- if (hsotg->core_params->uframe_sched > 0)
+ if (hsotg->params.uframe_sched > 0)
hsotg->available_host_channels++;
else
hsotg->non_periodic_channels--;
@@ -322,7 +322,7 @@ static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
qh->ntd = 0;
if (qh->desc_list)
- memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) *
+ memset(qh->desc_list, 0, sizeof(struct dwc2_dma_desc) *
dwc2_max_desc_num(qh));
}
@@ -404,7 +404,7 @@ void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
qh->ep_type == USB_ENDPOINT_XFER_INT) &&
- (hsotg->core_params->uframe_sched > 0 ||
+ (hsotg->params.uframe_sched > 0 ||
!hsotg->periodic_channels) && hsotg->frame_list) {
dwc2_per_sched_disable(hsotg);
dwc2_frame_list_free(hsotg);
@@ -542,7 +542,7 @@ static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh, u32 max_xfer_size,
u16 idx)
{
- struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
+ struct dwc2_dma_desc *dma_desc = &qh->desc_list[idx];
struct dwc2_hcd_iso_packet_desc *frame_desc;
memset(dma_desc, 0, sizeof(*dma_desc));
@@ -571,8 +571,8 @@ static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
- (idx * sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ (idx * sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
}
@@ -645,8 +645,8 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
qh->desc_list[idx].status |= HOST_DMA_IOC;
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma + (idx *
- sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
}
#else
@@ -679,8 +679,8 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
qh->desc_list[idx].status |= HOST_DMA_IOC;
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
- (idx * sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ (idx * sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
#endif
}
@@ -690,11 +690,11 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_qtd *qtd, struct dwc2_qh *qh,
int n_desc)
{
- struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc];
+ struct dwc2_dma_desc *dma_desc = &qh->desc_list[n_desc];
int len = chan->xfer_len;
- if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1))
- len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1);
+ if (len > HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1))
+ len = HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1);
if (chan->ep_is_in) {
int num_packets;
@@ -721,8 +721,8 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
- (n_desc * sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ (n_desc * sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
/*
@@ -778,8 +778,8 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
((n_desc - 1) *
- sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
}
dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
@@ -808,8 +808,8 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
n_desc - 1, &qh->desc_list[n_desc - 1]);
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma + (n_desc - 1) *
- sizeof(struct dwc2_hcd_dma_desc),
- sizeof(struct dwc2_hcd_dma_desc),
+ sizeof(struct dwc2_dma_desc),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
if (n_desc > 1) {
qh->desc_list[0].status |= HOST_DMA_A;
@@ -817,7 +817,7 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
&qh->desc_list[0]);
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma,
- sizeof(struct dwc2_hcd_dma_desc),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
}
chan->ntd = n_desc;
@@ -893,7 +893,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_qtd *qtd,
struct dwc2_qh *qh, u16 idx)
{
- struct dwc2_hcd_dma_desc *dma_desc;
+ struct dwc2_dma_desc *dma_desc;
struct dwc2_hcd_iso_packet_desc *frame_desc;
u16 remain = 0;
int rc = 0;
@@ -902,8 +902,8 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
return -EINVAL;
dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
- sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_FROM_DEVICE);
dma_desc = &qh->desc_list[idx];
@@ -1066,7 +1066,7 @@ stop_scan:
static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
struct dwc2_qtd *qtd,
- struct dwc2_hcd_dma_desc *dma_desc,
+ struct dwc2_dma_desc *dma_desc,
enum dwc2_halt_status halt_status,
u32 n_bytes, int *xfer_done)
{
@@ -1154,7 +1154,7 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
{
struct dwc2_qh *qh = chan->qh;
struct dwc2_hcd_urb *urb = qtd->urb;
- struct dwc2_hcd_dma_desc *dma_desc;
+ struct dwc2_dma_desc *dma_desc;
u32 n_bytes;
int failed;
@@ -1165,8 +1165,8 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
dma_sync_single_for_cpu(hsotg->dev,
qh->desc_list_dma + (desc_num *
- sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_FROM_DEVICE);
dma_desc = &qh->desc_list[desc_num];
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index 906f223542ee..b8f4b6aaf1d0 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -256,7 +256,7 @@ static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
u32 *hprt0_modify)
{
- struct dwc2_core_params *params = hsotg->core_params;
+ struct dwc2_core_params *params = &hsotg->params;
int do_reset = 0;
u32 usbcfg;
u32 prtspd;
@@ -395,10 +395,10 @@ static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
} else {
hsotg->flags.b.port_enable_change = 1;
- if (hsotg->core_params->dma_desc_fs_enable) {
+ if (hsotg->params.dma_desc_fs_enable) {
u32 hcfg;
- hsotg->core_params->dma_desc_enable = 0;
+ hsotg->params.dma_desc_enable = 0;
hsotg->new_connection = false;
hcfg = dwc2_readl(hsotg->regs + HCFG);
hcfg &= ~HCFG_DESCDMA;
@@ -604,7 +604,7 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state(
/* Skip whole frame */
if (chan->qh->do_split &&
chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
- hsotg->core_params->dma_enable > 0) {
+ hsotg->params.host_dma > 0) {
qtd->complete_split = 0;
qtd->isoc_split_offset = 0;
}
@@ -743,7 +743,7 @@ cleanup:
dwc2_hc_cleanup(hsotg, chan);
list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
hsotg->available_host_channels++;
} else {
switch (chan->ep_type) {
@@ -789,7 +789,7 @@ static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA enabled\n");
dwc2_release_channel(hsotg, chan, qtd, halt_status);
@@ -915,6 +915,8 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
{
struct dwc2_hcd_iso_packet_desc *frame_desc;
u32 len;
+ u32 hctsiz;
+ u32 pid;
if (!qtd->urb)
return 0;
@@ -932,7 +934,10 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
qtd->isoc_split_offset += len;
- if (frame_desc->actual_length >= frame_desc->length) {
+ hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
+ pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
+
+ if (frame_desc->actual_length >= frame_desc->length || pid == 0) {
frame_desc->status = 0;
qtd->isoc_frame_index++;
qtd->complete_split = 0;
@@ -974,7 +979,7 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
if (pipe_type == USB_ENDPOINT_XFER_ISOC)
/* Do not disable the interrupt, just clear it */
@@ -985,7 +990,7 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
/* Handle xfer complete on CSPLIT */
if (chan->qh->do_split) {
if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
- hsotg->core_params->dma_enable > 0) {
+ hsotg->params.host_dma > 0) {
if (qtd->complete_split &&
dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
qtd))
@@ -1097,7 +1102,7 @@ static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
chnum);
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_STALL);
goto handle_stall_done;
@@ -1207,7 +1212,7 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK:
- if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) {
+ if (hsotg->params.host_dma > 0 && chan->ep_is_in) {
/*
* NAK interrupts are enabled on bulk/control IN
* transfers in DMA mode for the sole purpose of
@@ -1353,7 +1358,7 @@ static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
*/
if (chan->do_split && chan->complete_split) {
if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
- hsotg->core_params->dma_enable > 0) {
+ hsotg->params.host_dma > 0) {
qtd->complete_split = 0;
qtd->isoc_split_offset = 0;
qtd->isoc_frame_index++;
@@ -1374,7 +1379,7 @@ static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh = chan->qh;
bool past_end;
- if (hsotg->core_params->uframe_sched <= 0) {
+ if (hsotg->params.uframe_sched <= 0) {
int frnum = dwc2_hcd_get_frame_number(hsotg);
/* Don't have num_hs_transfers; simple logic */
@@ -1467,7 +1472,7 @@ static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_BABBLE_ERR);
goto disable_int;
@@ -1572,7 +1577,7 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
dev_err(hsotg->dev, " Interval: %d\n", urb->interval);
/* Core halts the channel for Descriptor DMA mode */
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_AHB_ERR);
goto handle_ahberr_done;
@@ -1604,7 +1609,7 @@ static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_XACT_ERR);
goto handle_xacterr_done;
@@ -1798,8 +1803,8 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
(chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
- hsotg->core_params->dma_desc_enable <= 0)) {
- if (hsotg->core_params->dma_desc_enable > 0)
+ hsotg->params.dma_desc_enable <= 0)) {
+ if (hsotg->params.dma_desc_enable > 0)
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
chan->halt_status);
else
@@ -1830,7 +1835,7 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
} else if (chan->hcint & HCINTMSK_STALL) {
dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_XACTERR) &&
- hsotg->core_params->dma_desc_enable <= 0) {
+ hsotg->params.dma_desc_enable <= 0) {
if (out_nak_enh) {
if (chan->hcint &
(HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
@@ -1850,10 +1855,10 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
*/
dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
- hsotg->core_params->dma_desc_enable > 0) {
+ hsotg->params.dma_desc_enable > 0) {
dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_AHBERR) &&
- hsotg->core_params->dma_desc_enable > 0) {
+ hsotg->params.dma_desc_enable > 0) {
dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
} else if (chan->hcint & HCINTMSK_BBLERR) {
dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
@@ -1946,7 +1951,7 @@ static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
chnum);
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
} else {
if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
@@ -2023,7 +2028,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
* interrupt unmasked
*/
WARN_ON(hcint != HCINTMSK_CHHLTD);
- if (hsotg->core_params->dma_desc_enable > 0)
+ if (hsotg->params.dma_desc_enable > 0)
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
chan->halt_status);
else
@@ -2051,7 +2056,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
qtd_list_entry);
- if (hsotg->core_params->dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
hcint &= ~HCINTMSK_CHHLTD;
}
@@ -2156,7 +2161,7 @@ static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
}
}
- for (i = 0; i < hsotg->core_params->host_channels; i++) {
+ for (i = 0; i < hsotg->params.host_channels; i++) {
if (haint & (1 << i))
dwc2_hc_n_intr(hsotg, i);
}
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 13754353251f..5713f03a4e56 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -75,7 +75,7 @@ static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
int status;
int num_channels;
- num_channels = hsotg->core_params->host_channels;
+ num_channels = hsotg->params.host_channels;
if (hsotg->periodic_channels + hsotg->non_periodic_channels <
num_channels
&& hsotg->periodic_channels < num_channels - 1) {
@@ -355,6 +355,37 @@ static void pmap_unschedule(unsigned long *map, int bits_per_period,
}
}
+/**
+ * dwc2_get_ls_map() - Get the map used for the given qh
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ *
+ * We'll always get the periodic map out of our TT. Note that even if we're
+ * running the host straight in low speed / full speed mode it appears as if
+ * a TT is allocated for us, so we'll use it. If that ever changes we can
+ * add logic here to get a map out of "hsotg" if !qh->do_split.
+ *
+ * Returns: the map or NULL if a map couldn't be found.
+ */
+static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh)
+{
+ unsigned long *map;
+
+ /* Don't expect to be missing a TT and be doing low speed scheduling */
+ if (WARN_ON(!qh->dwc_tt))
+ return NULL;
+
+ /* Get the map and adjust if this is a multi_tt hub */
+ map = qh->dwc_tt->periodic_bitmaps;
+ if (qh->dwc_tt->usb_tt->multi)
+ map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
+
+ return map;
+}
+
+#ifdef DWC2_PRINT_SCHEDULE
/*
* cat_printf() - A printf() + strcat() helper
*
@@ -454,35 +485,6 @@ static void pmap_print(unsigned long *map, int bits_per_period,
}
}
-/**
- * dwc2_get_ls_map() - Get the map used for the given qh
- *
- * @hsotg: The HCD state structure for the DWC OTG controller.
- * @qh: QH for the periodic transfer.
- *
- * We'll always get the periodic map out of our TT. Note that even if we're
- * running the host straight in low speed / full speed mode it appears as if
- * a TT is allocated for us, so we'll use it. If that ever changes we can
- * add logic here to get a map out of "hsotg" if !qh->do_split.
- *
- * Returns: the map or NULL if a map couldn't be found.
- */
-static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
- struct dwc2_qh *qh)
-{
- unsigned long *map;
-
- /* Don't expect to be missing a TT and be doing low speed scheduling */
- if (WARN_ON(!qh->dwc_tt))
- return NULL;
-
- /* Get the map and adjust if this is a multi_tt hub */
- map = qh->dwc_tt->periodic_bitmaps;
- if (qh->dwc_tt->usb_tt->multi)
- map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
-
- return map;
-}
struct dwc2_qh_print_data {
struct dwc2_hsotg *hsotg;
@@ -519,9 +521,6 @@ static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
* If we don't have tracing turned on, don't run unless the special
* define is turned on.
*/
-#ifndef DWC2_PRINT_SCHEDULE
- return;
-#endif
if (qh->schedule_low_speed) {
unsigned long *map = dwc2_get_ls_map(hsotg, qh);
@@ -559,8 +558,12 @@ static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
DWC2_HS_SCHEDULE_UFRAMES, "uFrame", "us",
dwc2_qh_print, &print_data);
}
-
+ return;
}
+#else
+static inline void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh) {};
+#endif
/**
* dwc2_ls_pmap_schedule() - Schedule a low speed QH
@@ -1104,7 +1107,7 @@ static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
next_active_frame = earliest_frame;
/* Get the "no microframe schduler" out of the way... */
- if (hsotg->core_params->uframe_sched <= 0) {
+ if (hsotg->params.uframe_sched <= 0) {
if (qh->do_split)
/* Splits are active at microframe 0 minus 1 */
next_active_frame |= 0x7;
@@ -1197,7 +1200,7 @@ static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
int status;
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
status = dwc2_uframe_schedule(hsotg, qh);
} else {
status = dwc2_periodic_channel_available(hsotg);
@@ -1218,7 +1221,7 @@ static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
return status;
}
- if (hsotg->core_params->uframe_sched <= 0)
+ if (hsotg->params.uframe_sched <= 0)
/* Reserve periodic channel */
hsotg->periodic_channels++;
@@ -1254,7 +1257,7 @@ static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
/* Update claimed usecs per (micro)frame */
hsotg->periodic_usecs -= qh->host_us;
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
dwc2_uframe_unschedule(hsotg, qh);
} else {
/* Release periodic channel reservation */
@@ -1328,7 +1331,7 @@ static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
int status = 0;
max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
- max_channel_xfer_size = hsotg->core_params->max_transfer_size;
+ max_channel_xfer_size = hsotg->params.max_transfer_size;
if (max_xfer_size > max_channel_xfer_size) {
dev_err(hsotg->dev,
@@ -1391,7 +1394,7 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
qh->unreserve_pending = 0;
- if (hsotg->core_params->dma_desc_enable > 0)
+ if (hsotg->params.dma_desc_enable > 0)
/* Don't rely on SOF and start in ready schedule */
list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
else
@@ -1599,7 +1602,7 @@ struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
dwc2_qh_init(hsotg, qh, urb, mem_flags);
- if (hsotg->core_params->dma_desc_enable > 0 &&
+ if (hsotg->params.dma_desc_enable > 0 &&
dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
dwc2_hcd_qh_free(hsotg, qh);
return NULL;
@@ -1711,7 +1714,7 @@ void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
dwc2_deschedule_periodic(hsotg, qh);
hsotg->periodic_qh_count--;
if (!hsotg->periodic_qh_count &&
- hsotg->core_params->dma_desc_enable <= 0) {
+ hsotg->params.dma_desc_enable <= 0) {
intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
intr_mask &= ~GINTSTS_SOF;
dwc2_writel(intr_mask, hsotg->regs + GINTMSK);
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 91058441e62a..5be056b39e5c 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -412,6 +412,7 @@
/* Device mode registers */
#define DCFG HSOTG_REG(0x800)
+#define DCFG_DESCDMA_EN (1 << 23)
#define DCFG_EPMISCNT_MASK (0x1f << 18)
#define DCFG_EPMISCNT_SHIFT 18
#define DCFG_EPMISCNT_LIMIT 0x1f
@@ -473,6 +474,7 @@
#define DIEPMSK_XFERCOMPLMSK (1 << 0)
#define DOEPMSK HSOTG_REG(0x814)
+#define DOEPMSK_BNAMSK (1 << 9)
#define DOEPMSK_BACK2BACKSETUP (1 << 6)
#define DOEPMSK_STSPHSERCVDMSK (1 << 5)
#define DOEPMSK_OUTTKNEPDISMSK (1 << 4)
@@ -790,7 +792,8 @@
#define HCFIFO(_ch) HSOTG_REG(0x1000 + 0x1000 * (_ch))
/**
- * struct dwc2_hcd_dma_desc - Host-mode DMA descriptor structure
+ * struct dwc2_dma_desc - DMA descriptor structure,
+ * used for both host and gadget modes
*
* @status: DMA descriptor status quadlet
* @buf: DMA descriptor data buffer pointer
@@ -798,10 +801,12 @@
* DMA Descriptor structure contains two quadlets:
* Status quadlet and Data buffer pointer.
*/
-struct dwc2_hcd_dma_desc {
+struct dwc2_dma_desc {
u32 status;
u32 buf;
-};
+} __packed;
+
+/* Host Mode DMA descriptor status quadlet */
#define HOST_DMA_A (1 << 31)
#define HOST_DMA_STS_MASK (0x3 << 28)
@@ -817,8 +822,43 @@ struct dwc2_hcd_dma_desc {
#define HOST_DMA_ISOC_NBYTES_SHIFT 0
#define HOST_DMA_NBYTES_MASK (0x1ffff << 0)
#define HOST_DMA_NBYTES_SHIFT 0
+#define HOST_DMA_NBYTES_LIMIT 131071
+
+/* Device Mode DMA descriptor status quadlet */
+
+#define DEV_DMA_BUFF_STS_MASK (0x3 << 30)
+#define DEV_DMA_BUFF_STS_SHIFT 30
+#define DEV_DMA_BUFF_STS_HREADY 0
+#define DEV_DMA_BUFF_STS_DMABUSY 1
+#define DEV_DMA_BUFF_STS_DMADONE 2
+#define DEV_DMA_BUFF_STS_HBUSY 3
+#define DEV_DMA_STS_MASK (0x3 << 28)
+#define DEV_DMA_STS_SHIFT 28
+#define DEV_DMA_STS_SUCC 0
+#define DEV_DMA_STS_BUFF_FLUSH 1
+#define DEV_DMA_STS_BUFF_ERR 3
+#define DEV_DMA_L (1 << 27)
+#define DEV_DMA_SHORT (1 << 26)
+#define DEV_DMA_IOC (1 << 25)
+#define DEV_DMA_SR (1 << 24)
+#define DEV_DMA_MTRF (1 << 23)
+#define DEV_DMA_ISOC_PID_MASK (0x3 << 23)
+#define DEV_DMA_ISOC_PID_SHIFT 23
+#define DEV_DMA_ISOC_PID_DATA0 0
+#define DEV_DMA_ISOC_PID_DATA2 1
+#define DEV_DMA_ISOC_PID_DATA1 2
+#define DEV_DMA_ISOC_PID_MDATA 3
+#define DEV_DMA_ISOC_FRNUM_MASK (0x7ff << 12)
+#define DEV_DMA_ISOC_FRNUM_SHIFT 12
+#define DEV_DMA_ISOC_TX_NBYTES_MASK (0xfff << 0)
+#define DEV_DMA_ISOC_TX_NBYTES_LIMIT 0xfff
+#define DEV_DMA_ISOC_RX_NBYTES_MASK (0x7ff << 0)
+#define DEV_DMA_ISOC_RX_NBYTES_LIMIT 0x7ff
+#define DEV_DMA_ISOC_NBYTES_SHIFT 0
+#define DEV_DMA_NBYTES_MASK (0xffff << 0)
+#define DEV_DMA_NBYTES_SHIFT 0
+#define DEV_DMA_NBYTES_LIMIT 0xffff
-#define MAX_DMA_DESC_SIZE 131071
#define MAX_DMA_DESC_NUM_GENERIC 64
#define MAX_DMA_DESC_NUM_HS_ISOC 256
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
new file mode 100644
index 000000000000..a786256535b6
--- /dev/null
+++ b/drivers/usb/dwc2/params.c
@@ -0,0 +1,1435 @@
+/*
+ * Copyright (C) 2004-2016 Synopsys, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#include "core.h"
+
+static const struct dwc2_core_params params_hi6220 = {
+ .otg_cap = 2, /* No HNP/SRP capable */
+ .otg_ver = 0, /* 1.3 */
+ .dma_desc_enable = 0,
+ .dma_desc_fs_enable = 0,
+ .speed = 0, /* High Speed */
+ .enable_dynamic_fifo = 1,
+ .en_multiple_tx_fifo = 1,
+ .host_rx_fifo_size = 512,
+ .host_nperio_tx_fifo_size = 512,
+ .host_perio_tx_fifo_size = 512,
+ .max_transfer_size = 65535,
+ .max_packet_count = 511,
+ .host_channels = 16,
+ .phy_type = 1, /* UTMI */
+ .phy_utmi_width = 8,
+ .phy_ulpi_ddr = 0, /* Single */
+ .phy_ulpi_ext_vbus = 0,
+ .i2c_enable = 0,
+ .ulpi_fs_ls = 0,
+ .host_support_fs_ls_low_power = 0,
+ .host_ls_low_power_phy_clk = 0, /* 48 MHz */
+ .ts_dline = 0,
+ .reload_ctl = 0,
+ .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
+ GAHBCFG_HBSTLEN_SHIFT,
+ .uframe_sched = 0,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
+static const struct dwc2_core_params params_bcm2835 = {
+ .otg_cap = 0, /* HNP/SRP capable */
+ .otg_ver = 0, /* 1.3 */
+ .dma_desc_enable = 0,
+ .dma_desc_fs_enable = 0,
+ .speed = 0, /* High Speed */
+ .enable_dynamic_fifo = 1,
+ .en_multiple_tx_fifo = 1,
+ .host_rx_fifo_size = 774, /* 774 DWORDs */
+ .host_nperio_tx_fifo_size = 256, /* 256 DWORDs */
+ .host_perio_tx_fifo_size = 512, /* 512 DWORDs */
+ .max_transfer_size = 65535,
+ .max_packet_count = 511,
+ .host_channels = 8,
+ .phy_type = 1, /* UTMI */
+ .phy_utmi_width = 8, /* 8 bits */
+ .phy_ulpi_ddr = 0, /* Single */
+ .phy_ulpi_ext_vbus = 0,
+ .i2c_enable = 0,
+ .ulpi_fs_ls = 0,
+ .host_support_fs_ls_low_power = 0,
+ .host_ls_low_power_phy_clk = 0, /* 48 MHz */
+ .ts_dline = 0,
+ .reload_ctl = 0,
+ .ahbcfg = 0x10,
+ .uframe_sched = 0,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
+static const struct dwc2_core_params params_rk3066 = {
+ .otg_cap = 2, /* non-HNP/non-SRP */
+ .otg_ver = -1,
+ .dma_desc_enable = 0,
+ .dma_desc_fs_enable = 0,
+ .speed = -1,
+ .enable_dynamic_fifo = 1,
+ .en_multiple_tx_fifo = -1,
+ .host_rx_fifo_size = 525, /* 525 DWORDs */
+ .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */
+ .host_perio_tx_fifo_size = 256, /* 256 DWORDs */
+ .max_transfer_size = -1,
+ .max_packet_count = -1,
+ .host_channels = -1,
+ .phy_type = -1,
+ .phy_utmi_width = -1,
+ .phy_ulpi_ddr = -1,
+ .phy_ulpi_ext_vbus = -1,
+ .i2c_enable = -1,
+ .ulpi_fs_ls = -1,
+ .host_support_fs_ls_low_power = -1,
+ .host_ls_low_power_phy_clk = -1,
+ .ts_dline = -1,
+ .reload_ctl = -1,
+ .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
+ GAHBCFG_HBSTLEN_SHIFT,
+ .uframe_sched = -1,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
+static const struct dwc2_core_params params_ltq = {
+ .otg_cap = 2, /* non-HNP/non-SRP */
+ .otg_ver = -1,
+ .dma_desc_enable = -1,
+ .dma_desc_fs_enable = -1,
+ .speed = -1,
+ .enable_dynamic_fifo = -1,
+ .en_multiple_tx_fifo = -1,
+ .host_rx_fifo_size = 288, /* 288 DWORDs */
+ .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */
+ .host_perio_tx_fifo_size = 96, /* 96 DWORDs */
+ .max_transfer_size = 65535,
+ .max_packet_count = 511,
+ .host_channels = -1,
+ .phy_type = -1,
+ .phy_utmi_width = -1,
+ .phy_ulpi_ddr = -1,
+ .phy_ulpi_ext_vbus = -1,
+ .i2c_enable = -1,
+ .ulpi_fs_ls = -1,
+ .host_support_fs_ls_low_power = -1,
+ .host_ls_low_power_phy_clk = -1,
+ .ts_dline = -1,
+ .reload_ctl = -1,
+ .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
+ GAHBCFG_HBSTLEN_SHIFT,
+ .uframe_sched = -1,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
+static const struct dwc2_core_params params_amlogic = {
+ .otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE,
+ .otg_ver = -1,
+ .dma_desc_enable = 0,
+ .dma_desc_fs_enable = 0,
+ .speed = DWC2_SPEED_PARAM_HIGH,
+ .enable_dynamic_fifo = 1,
+ .en_multiple_tx_fifo = -1,
+ .host_rx_fifo_size = 512,
+ .host_nperio_tx_fifo_size = 500,
+ .host_perio_tx_fifo_size = 500,
+ .max_transfer_size = -1,
+ .max_packet_count = -1,
+ .host_channels = 16,
+ .phy_type = DWC2_PHY_TYPE_PARAM_UTMI,
+ .phy_utmi_width = -1,
+ .phy_ulpi_ddr = -1,
+ .phy_ulpi_ext_vbus = -1,
+ .i2c_enable = -1,
+ .ulpi_fs_ls = -1,
+ .host_support_fs_ls_low_power = -1,
+ .host_ls_low_power_phy_clk = -1,
+ .ts_dline = -1,
+ .reload_ctl = 1,
+ .ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
+ GAHBCFG_HBSTLEN_SHIFT,
+ .uframe_sched = 0,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
+static const struct dwc2_core_params params_default = {
+ .otg_cap = -1,
+ .otg_ver = -1,
+
+ /*
+ * Disable descriptor dma mode by default as the HW can support
+ * it, but does not support it for SPLIT transactions.
+ * Disable it for FS devices as well.
+ */
+ .dma_desc_enable = 0,
+ .dma_desc_fs_enable = 0,
+
+ .speed = -1,
+ .enable_dynamic_fifo = -1,
+ .en_multiple_tx_fifo = -1,
+ .host_rx_fifo_size = -1,
+ .host_nperio_tx_fifo_size = -1,
+ .host_perio_tx_fifo_size = -1,
+ .max_transfer_size = -1,
+ .max_packet_count = -1,
+ .host_channels = -1,
+ .phy_type = -1,
+ .phy_utmi_width = -1,
+ .phy_ulpi_ddr = -1,
+ .phy_ulpi_ext_vbus = -1,
+ .i2c_enable = -1,
+ .ulpi_fs_ls = -1,
+ .host_support_fs_ls_low_power = -1,
+ .host_ls_low_power_phy_clk = -1,
+ .ts_dline = -1,
+ .reload_ctl = -1,
+ .ahbcfg = -1,
+ .uframe_sched = -1,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
+const struct of_device_id dwc2_of_match_table[] = {
+ { .compatible = "brcm,bcm2835-usb", .data = &params_bcm2835 },
+ { .compatible = "hisilicon,hi6220-usb", .data = &params_hi6220 },
+ { .compatible = "rockchip,rk3066-usb", .data = &params_rk3066 },
+ { .compatible = "lantiq,arx100-usb", .data = &params_ltq },
+ { .compatible = "lantiq,xrx200-usb", .data = &params_ltq },
+ { .compatible = "snps,dwc2", .data = NULL },
+ { .compatible = "samsung,s3c6400-hsotg", .data = NULL},
+ { .compatible = "amlogic,meson8b-usb", .data = &params_amlogic },
+ { .compatible = "amlogic,meson-gxbb-usb", .data = &params_amlogic },
+ { .compatible = "amcc,dwc-otg", .data = NULL },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
+
+static void dwc2_get_device_property(struct dwc2_hsotg *hsotg,
+ char *property, u8 size, u64 *value)
+{
+ u8 val8;
+ u16 val16;
+ u32 val32;
+
+ switch (size) {
+ case 0:
+ *value = device_property_read_bool(hsotg->dev, property);
+ break;
+ case 1:
+ if (device_property_read_u8(hsotg->dev, property, &val8))
+ return;
+
+ *value = val8;
+ break;
+ case 2:
+ if (device_property_read_u16(hsotg->dev, property, &val16))
+ return;
+
+ *value = val16;
+ break;
+ case 4:
+ if (device_property_read_u32(hsotg->dev, property, &val32))
+ return;
+
+ *value = val32;
+ break;
+ case 8:
+ if (device_property_read_u64(hsotg->dev, property, value))
+ return;
+
+ break;
+ default:
+ /*
+ * The size is checked by the only function that calls
+ * this so this should never happen.
+ */
+ WARN_ON(1);
+ return;
+ }
+}
+
+static void dwc2_set_core_param(void *param, u8 size, u64 value)
+{
+ switch (size) {
+ case 0:
+ *((bool *)param) = !!value;
+ break;
+ case 1:
+ *((u8 *)param) = (u8)value;
+ break;
+ case 2:
+ *((u16 *)param) = (u16)value;
+ break;
+ case 4:
+ *((u32 *)param) = (u32)value;
+ break;
+ case 8:
+ *((u64 *)param) = (u64)value;
+ break;
+ default:
+ /*
+ * The size is checked by the only function that calls
+ * this so this should never happen.
+ */
+ WARN_ON(1);
+ return;
+ }
+}
+
+/**
+ * dwc2_set_param() - Set a core parameter
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @param: Pointer to the parameter to set
+ * @lookup: True if the property should be looked up
+ * @property: The device property to read
+ * @legacy: The param value to set if @property is not available. This
+ * will typically be the legacy value set in the static
+ * params structure.
+ * @def: The default value
+ * @min: The minimum value
+ * @max: The maximum value
+ * @size: The size of the core parameter in bytes, or 0 for bool.
+ *
+ * This function looks up @property and sets the @param to that value.
+ * If the property doesn't exist it uses the passed-in @value. It will
+ * verify that the value falls between @min and @max. If it doesn't,
+ * it will output an error and set the parameter to either @def or,
+ * failing that, to @min.
+ *
+ * The @size is used to write to @param and to query the device
+ * properties so that this same function can be used with different
+ * types of parameters.
+ */
+static void dwc2_set_param(struct dwc2_hsotg *hsotg, void *param,
+ bool lookup, char *property, u64 legacy,
+ u64 def, u64 min, u64 max, u8 size)
+{
+ u64 sizemax;
+ u64 value;
+
+ if (WARN_ON(!hsotg || !param || !property))
+ return;
+
+ if (WARN((size > 8) || ((size & (size - 1)) != 0),
+ "Invalid size %d for %s\n", size, property))
+ return;
+
+ dev_vdbg(hsotg->dev, "%s: Setting %s: legacy=%llu, def=%llu, min=%llu, max=%llu, size=%d\n",
+ __func__, property, legacy, def, min, max, size);
+
+ sizemax = (1ULL << (size * 8)) - 1;
+ value = legacy;
+
+ /* Override legacy settings. */
+ if (lookup)
+ dwc2_get_device_property(hsotg, property, size, &value);
+
+ /*
+ * While the value is not valid, try setting it to the default
+ * value, and failing that, set it to the minimum.
+ */
+ while ((value < min) || (value > max)) {
+ /* Print an error unless the value is set to auto. */
+ if (value != sizemax)
+ dev_err(hsotg->dev, "Invalid value %llu for param %s\n",
+ value, property);
+
+ /*
+ * If we are already the default, just set it to the
+ * minimum.
+ */
+ if (value == def) {
+ dev_vdbg(hsotg->dev, "%s: setting value to min=%llu\n",
+ __func__, min);
+ value = min;
+ break;
+ }
+
+ /* Try the default value */
+ dev_vdbg(hsotg->dev, "%s: setting value to default=%llu\n",
+ __func__, def);
+ value = def;
+ }
+
+ dev_dbg(hsotg->dev, "Setting %s to %llu\n", property, value);
+ dwc2_set_core_param(param, size, value);
+}
+
+/**
+ * dwc2_set_param_u16() - Set a u16 parameter
+ *
+ * See dwc2_set_param().
+ */
+static void dwc2_set_param_u16(struct dwc2_hsotg *hsotg, u16 *param,
+ bool lookup, char *property, u16 legacy,
+ u16 def, u16 min, u16 max)
+{
+ dwc2_set_param(hsotg, param, lookup, property,
+ legacy, def, min, max, 2);
+}
+
+/**
+ * dwc2_set_param_bool() - Set a bool parameter
+ *
+ * See dwc2_set_param().
+ *
+ * Note: there is no 'legacy' argument here because there is no legacy
+ * source of bool params.
+ */
+static void dwc2_set_param_bool(struct dwc2_hsotg *hsotg, bool *param,
+ bool lookup, char *property,
+ bool def, bool min, bool max)
+{
+ dwc2_set_param(hsotg, param, lookup, property,
+ def, def, min, max, 0);
+}
+
+#define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c))
+
+/* Parameter access functions */
+static void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ switch (val) {
+ case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
+ if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
+ valid = 0;
+ break;
+ case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
+ switch (hsotg->hw_params.op_mode) {
+ case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
+ case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
+ break;
+ default:
+ valid = 0;
+ break;
+ }
+ break;
+ case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
+ /* always valid */
+ break;
+ default:
+ valid = 0;
+ break;
+ }
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for otg_cap parameter. Check HW configuration.\n",
+ val);
+ switch (hsotg->hw_params.op_mode) {
+ case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
+ val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
+ break;
+ case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
+ val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
+ break;
+ default:
+ val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
+ break;
+ }
+ dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
+ }
+
+ hsotg->params.otg_cap = val;
+}
+
+static void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val > 0 && (hsotg->params.host_dma <= 0 ||
+ !hsotg->hw_params.dma_desc_enable))
+ valid = 0;
+ if (val < 0)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
+ val);
+ val = (hsotg->params.host_dma > 0 &&
+ hsotg->hw_params.dma_desc_enable);
+ dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
+ }
+
+ hsotg->params.dma_desc_enable = val;
+}
+
+static void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val > 0 && (hsotg->params.host_dma <= 0 ||
+ !hsotg->hw_params.dma_desc_enable))
+ valid = 0;
+ if (val < 0)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n",
+ val);
+ val = (hsotg->params.host_dma > 0 &&
+ hsotg->hw_params.dma_desc_enable);
+ }
+
+ hsotg->params.dma_desc_fs_enable = val;
+ dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val);
+}
+
+static void
+dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "Wrong value for host_support_fs_low_power\n");
+ dev_err(hsotg->dev,
+ "host_support_fs_low_power must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev,
+ "Setting host_support_fs_low_power to %d\n", val);
+ }
+
+ hsotg->params.host_support_fs_ls_low_power = val;
+}
+
+static void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ int valid = 1;
+
+ if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
+ valid = 0;
+ if (val < 0)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.enable_dynamic_fifo;
+ dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
+ }
+
+ hsotg->params.enable_dynamic_fifo = val;
+}
+
+static void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val < 16 || val > hsotg->hw_params.rx_fifo_size)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.rx_fifo_size;
+ dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
+ }
+
+ hsotg->params.host_rx_fifo_size = val;
+}
+
+static void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ int valid = 1;
+
+ if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.host_nperio_tx_fifo_size;
+ dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
+ val);
+ }
+
+ hsotg->params.host_nperio_tx_fifo_size = val;
+}
+
+static void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ int valid = 1;
+
+ if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.host_perio_tx_fifo_size;
+ dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
+ val);
+ }
+
+ hsotg->params.host_perio_tx_fifo_size = val;
+}
+
+static void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for max_transfer_size. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.max_transfer_size;
+ dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
+ }
+
+ hsotg->params.max_transfer_size = val;
+}
+
+static void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val < 15 || val > hsotg->hw_params.max_packet_count)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for max_packet_count. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.max_packet_count;
+ dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
+ }
+
+ hsotg->params.max_packet_count = val;
+}
+
+static void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val < 1 || val > hsotg->hw_params.host_channels)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for host_channels. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.host_channels;
+ dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
+ }
+
+ hsotg->params.host_channels = val;
+}
+
+static void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 0;
+ u32 hs_phy_type, fs_phy_type;
+
+ if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
+ DWC2_PHY_TYPE_PARAM_ULPI)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev, "Wrong value for phy_type\n");
+ dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
+ }
+
+ valid = 0;
+ }
+
+ hs_phy_type = hsotg->hw_params.hs_phy_type;
+ fs_phy_type = hsotg->hw_params.fs_phy_type;
+ if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
+ (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
+ hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
+ valid = 1;
+ else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
+ (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
+ hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
+ valid = 1;
+ else if (val == DWC2_PHY_TYPE_PARAM_FS &&
+ fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
+ valid = 1;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for phy_type. Check HW configuration.\n",
+ val);
+ val = DWC2_PHY_TYPE_PARAM_FS;
+ if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
+ if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
+ hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
+ val = DWC2_PHY_TYPE_PARAM_UTMI;
+ else
+ val = DWC2_PHY_TYPE_PARAM_ULPI;
+ }
+ dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
+ }
+
+ hsotg->params.phy_type = val;
+}
+
+static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
+{
+ return hsotg->params.phy_type;
+}
+
+static void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 2)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev, "Wrong value for speed parameter\n");
+ dev_err(hsotg->dev, "max_speed parameter must be 0, 1, or 2\n");
+ }
+ valid = 0;
+ }
+
+ if (dwc2_is_hs_iot(hsotg) &&
+ val == DWC2_SPEED_PARAM_LOW)
+ valid = 0;
+
+ if (val == DWC2_SPEED_PARAM_HIGH &&
+ dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for speed parameter. Check HW configuration.\n",
+ val);
+ val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
+ DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
+ dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
+ }
+
+ hsotg->params.speed = val;
+}
+
+static void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ int valid = 1;
+
+ if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
+ DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "Wrong value for host_ls_low_power_phy_clk parameter\n");
+ dev_err(hsotg->dev,
+ "host_ls_low_power_phy_clk must be 0 or 1\n");
+ }
+ valid = 0;
+ }
+
+ if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
+ dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
+ val);
+ val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
+ ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
+ : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
+ dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
+ val);
+ }
+
+ hsotg->params.host_ls_low_power_phy_clk = val;
+}
+
+static void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
+ dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
+ }
+
+ hsotg->params.phy_ulpi_ddr = val;
+}
+
+static void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "Wrong value for phy_ulpi_ext_vbus\n");
+ dev_err(hsotg->dev,
+ "phy_ulpi_ext_vbus must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
+ }
+
+ hsotg->params.phy_ulpi_ext_vbus = val;
+}
+
+static void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 0;
+
+ switch (hsotg->hw_params.utmi_phy_data_width) {
+ case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
+ valid = (val == 8);
+ break;
+ case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
+ valid = (val == 16);
+ break;
+ case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
+ valid = (val == 8 || val == 16);
+ break;
+ }
+
+ if (!valid) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "%d invalid for phy_utmi_width. Check HW configuration.\n",
+ val);
+ }
+ val = (hsotg->hw_params.utmi_phy_data_width ==
+ GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
+ dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
+ }
+
+ hsotg->params.phy_utmi_width = val;
+}
+
+static void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
+ dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
+ }
+
+ hsotg->params.ulpi_fs_ls = val;
+}
+
+static void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev, "Wrong value for ts_dline\n");
+ dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
+ }
+
+ hsotg->params.ts_dline = val;
+}
+
+static void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
+ dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
+ }
+
+ valid = 0;
+ }
+
+ if (val == 1 && !(hsotg->hw_params.i2c_enable))
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for i2c_enable. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.i2c_enable;
+ dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
+ }
+
+ hsotg->params.i2c_enable = val;
+}
+
+static void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ int valid = 1;
+
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "Wrong value for en_multiple_tx_fifo,\n");
+ dev_err(hsotg->dev,
+ "en_multiple_tx_fifo must be 0 or 1\n");
+ }
+ valid = 0;
+ }
+
+ if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.en_multiple_tx_fifo;
+ dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
+ }
+
+ hsotg->params.en_multiple_tx_fifo = val;
+}
+
+static void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter reload_ctl\n", val);
+ dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
+ }
+ valid = 0;
+ }
+
+ if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for parameter reload_ctl. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
+ dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
+ }
+
+ hsotg->params.reload_ctl = val;
+}
+
+static void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
+{
+ if (val != -1)
+ hsotg->params.ahbcfg = val;
+ else
+ hsotg->params.ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
+ GAHBCFG_HBSTLEN_SHIFT;
+}
+
+static void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter otg_ver\n", val);
+ dev_err(hsotg->dev,
+ "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
+ }
+
+ hsotg->params.otg_ver = val;
+}
+
+static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter uframe_sched\n",
+ val);
+ dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
+ }
+ val = 1;
+ dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
+ }
+
+ hsotg->params.uframe_sched = val;
+}
+
+static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter external_id_pin_ctl\n",
+ val);
+ dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val);
+ }
+
+ hsotg->params.external_id_pin_ctl = val;
+}
+
+static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter hibernation\n",
+ val);
+ dev_err(hsotg->dev, "hibernation must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val);
+ }
+
+ hsotg->params.hibernation = val;
+}
+
+static void dwc2_set_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg)
+{
+ int i;
+ int num;
+ char *property = "g-tx-fifo-size";
+ struct dwc2_core_params *p = &hsotg->params;
+
+ memset(p->g_tx_fifo_size, 0, sizeof(p->g_tx_fifo_size));
+
+ /* Read tx fifo sizes */
+ num = device_property_read_u32_array(hsotg->dev, property, NULL, 0);
+
+ if (num > 0) {
+ device_property_read_u32_array(hsotg->dev, property,
+ &p->g_tx_fifo_size[1],
+ num);
+ } else {
+ u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE;
+
+ memcpy(&p->g_tx_fifo_size[1],
+ p_tx_fifo,
+ sizeof(p_tx_fifo));
+
+ num = ARRAY_SIZE(p_tx_fifo);
+ }
+
+ for (i = 0; i < num; i++) {
+ if ((i + 1) >= ARRAY_SIZE(p->g_tx_fifo_size))
+ break;
+
+ dev_dbg(hsotg->dev, "Setting %s[%d] to %d\n",
+ property, i + 1, p->g_tx_fifo_size[i + 1]);
+ }
+}
+
+static void dwc2_set_gadget_dma(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ struct dwc2_core_params *p = &hsotg->params;
+ bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH);
+
+ /* Buffer DMA */
+ dwc2_set_param_bool(hsotg, &p->g_dma,
+ false, "gadget-dma",
+ true, false,
+ dma_capable);
+
+ /* DMA Descriptor */
+ dwc2_set_param_bool(hsotg, &p->g_dma_desc, false,
+ "gadget-dma-desc",
+ p->g_dma, false,
+ !!hw->dma_desc_enable);
+}
+
+/**
+ * dwc2_set_parameters() - Set all core parameters.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @params: The parameters to set
+ */
+static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
+ const struct dwc2_core_params *params)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ struct dwc2_core_params *p = &hsotg->params;
+ bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH);
+
+ dwc2_set_param_otg_cap(hsotg, params->otg_cap);
+ if ((hsotg->dr_mode == USB_DR_MODE_HOST) ||
+ (hsotg->dr_mode == USB_DR_MODE_OTG)) {
+ dev_dbg(hsotg->dev, "Setting HOST parameters\n");
+
+ dwc2_set_param_bool(hsotg, &p->host_dma,
+ false, "host-dma",
+ true, false,
+ dma_capable);
+ }
+ dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
+ dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable);
+
+ dwc2_set_param_host_support_fs_ls_low_power(hsotg,
+ params->host_support_fs_ls_low_power);
+ dwc2_set_param_enable_dynamic_fifo(hsotg,
+ params->enable_dynamic_fifo);
+ dwc2_set_param_host_rx_fifo_size(hsotg,
+ params->host_rx_fifo_size);
+ dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
+ params->host_nperio_tx_fifo_size);
+ dwc2_set_param_host_perio_tx_fifo_size(hsotg,
+ params->host_perio_tx_fifo_size);
+ dwc2_set_param_max_transfer_size(hsotg,
+ params->max_transfer_size);
+ dwc2_set_param_max_packet_count(hsotg,
+ params->max_packet_count);
+ dwc2_set_param_host_channels(hsotg, params->host_channels);
+ dwc2_set_param_phy_type(hsotg, params->phy_type);
+ dwc2_set_param_speed(hsotg, params->speed);
+ dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
+ params->host_ls_low_power_phy_clk);
+ dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
+ dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
+ params->phy_ulpi_ext_vbus);
+ dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
+ dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
+ dwc2_set_param_ts_dline(hsotg, params->ts_dline);
+ dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
+ dwc2_set_param_en_multiple_tx_fifo(hsotg,
+ params->en_multiple_tx_fifo);
+ dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
+ dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
+ dwc2_set_param_otg_ver(hsotg, params->otg_ver);
+ dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
+ dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl);
+ dwc2_set_param_hibernation(hsotg, params->hibernation);
+
+ /*
+ * Set devicetree-only parameters. These parameters do not
+ * take any values from @params.
+ */
+ if ((hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) ||
+ (hsotg->dr_mode == USB_DR_MODE_OTG)) {
+ dev_dbg(hsotg->dev, "Setting peripheral device properties\n");
+
+ dwc2_set_gadget_dma(hsotg);
+
+ /*
+ * The values for g_rx_fifo_size (2048) and
+ * g_np_tx_fifo_size (1024) come from the legacy s3c
+ * gadget driver. These defaults have been hard-coded
+ * for some time so many platforms depend on these
+ * values. Leave them as defaults for now and only
+ * auto-detect if the hardware does not support the
+ * default.
+ */
+ dwc2_set_param_u16(hsotg, &p->g_rx_fifo_size,
+ true, "g-rx-fifo-size", 2048,
+ hw->rx_fifo_size,
+ 16, hw->rx_fifo_size);
+
+ dwc2_set_param_u16(hsotg, &p->g_np_tx_fifo_size,
+ true, "g-np-tx-fifo-size", 1024,
+ hw->dev_nperio_tx_fifo_size,
+ 16, hw->dev_nperio_tx_fifo_size);
+
+ dwc2_set_param_tx_fifo_sizes(hsotg);
+ }
+}
+
+/*
+ * Gets host hardware parameters. Forces host mode if not currently in
+ * host mode. Should be called immediately after a core soft reset in
+ * order to get the reset values.
+ */
+static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ u32 gnptxfsiz;
+ u32 hptxfsiz;
+ bool forced;
+
+ if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
+ return;
+
+ forced = dwc2_force_mode_if_needed(hsotg, true);
+
+ gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
+ hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
+ dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
+ dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
+
+ if (forced)
+ dwc2_clear_force_mode(hsotg);
+
+ hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
+ hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
+}
+
+/*
+ * Gets device hardware parameters. Forces device mode if not
+ * currently in device mode. Should be called immediately after a core
+ * soft reset in order to get the reset values.
+ */
+static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ bool forced;
+ u32 gnptxfsiz;
+
+ if (hsotg->dr_mode == USB_DR_MODE_HOST)
+ return;
+
+ forced = dwc2_force_mode_if_needed(hsotg, false);
+
+ gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
+ dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
+
+ if (forced)
+ dwc2_clear_force_mode(hsotg);
+
+ hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
+}
+
+/**
+ * During device initialization, read various hardware configuration
+ * registers and interpret the contents.
+ */
+int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ unsigned int width;
+ u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
+ u32 grxfsiz;
+
+ /*
+ * Attempt to ensure this device is really a DWC_otg Controller.
+ * Read and verify the GSNPSID register contents. The value should be
+ * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
+ * as in "OTG version 2.xx" or "OTG version 3.xx".
+ */
+ hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID);
+ if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
+ (hw->snpsid & 0xfffff000) != 0x4f543000 &&
+ (hw->snpsid & 0xffff0000) != 0x55310000 &&
+ (hw->snpsid & 0xffff0000) != 0x55320000) {
+ dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
+ hw->snpsid);
+ return -ENODEV;
+ }
+
+ dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
+ hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
+ hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
+
+ hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1);
+ hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
+ hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3);
+ hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
+ grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
+
+ dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
+ dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
+ dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
+ dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
+ dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
+
+ /*
+ * Host specific hardware parameters. Reading these parameters
+ * requires the controller to be in host mode. The mode will
+ * be forced, if necessary, to read these values.
+ */
+ dwc2_get_host_hwparams(hsotg);
+ dwc2_get_dev_hwparams(hsotg);
+
+ /* hwcfg1 */
+ hw->dev_ep_dirs = hwcfg1;
+
+ /* hwcfg2 */
+ hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
+ GHWCFG2_OP_MODE_SHIFT;
+ hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
+ GHWCFG2_ARCHITECTURE_SHIFT;
+ hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
+ hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
+ GHWCFG2_NUM_HOST_CHAN_SHIFT);
+ hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
+ GHWCFG2_HS_PHY_TYPE_SHIFT;
+ hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
+ GHWCFG2_FS_PHY_TYPE_SHIFT;
+ hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
+ GHWCFG2_NUM_DEV_EP_SHIFT;
+ hw->nperio_tx_q_depth =
+ (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
+ GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
+ hw->host_perio_tx_q_depth =
+ (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
+ GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
+ hw->dev_token_q_depth =
+ (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
+ GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
+
+ /* hwcfg3 */
+ width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
+ GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
+ hw->max_transfer_size = (1 << (width + 11)) - 1;
+ width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
+ GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
+ hw->max_packet_count = (1 << (width + 4)) - 1;
+ hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
+ hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
+ GHWCFG3_DFIFO_DEPTH_SHIFT;
+
+ /* hwcfg4 */
+ hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
+ hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
+ GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
+ hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
+ hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
+ hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
+ GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
+
+ /* fifo sizes */
+ hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
+ GRXFSIZ_DEPTH_SHIFT;
+
+ dev_dbg(hsotg->dev, "Detected values from hardware:\n");
+ dev_dbg(hsotg->dev, " op_mode=%d\n",
+ hw->op_mode);
+ dev_dbg(hsotg->dev, " arch=%d\n",
+ hw->arch);
+ dev_dbg(hsotg->dev, " dma_desc_enable=%d\n",
+ hw->dma_desc_enable);
+ dev_dbg(hsotg->dev, " power_optimized=%d\n",
+ hw->power_optimized);
+ dev_dbg(hsotg->dev, " i2c_enable=%d\n",
+ hw->i2c_enable);
+ dev_dbg(hsotg->dev, " hs_phy_type=%d\n",
+ hw->hs_phy_type);
+ dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
+ hw->fs_phy_type);
+ dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n",
+ hw->utmi_phy_data_width);
+ dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
+ hw->num_dev_ep);
+ dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n",
+ hw->num_dev_perio_in_ep);
+ dev_dbg(hsotg->dev, " host_channels=%d\n",
+ hw->host_channels);
+ dev_dbg(hsotg->dev, " max_transfer_size=%d\n",
+ hw->max_transfer_size);
+ dev_dbg(hsotg->dev, " max_packet_count=%d\n",
+ hw->max_packet_count);
+ dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n",
+ hw->nperio_tx_q_depth);
+ dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n",
+ hw->host_perio_tx_q_depth);
+ dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n",
+ hw->dev_token_q_depth);
+ dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n",
+ hw->enable_dynamic_fifo);
+ dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n",
+ hw->en_multiple_tx_fifo);
+ dev_dbg(hsotg->dev, " total_fifo_size=%d\n",
+ hw->total_fifo_size);
+ dev_dbg(hsotg->dev, " rx_fifo_size=%d\n",
+ hw->rx_fifo_size);
+ dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n",
+ hw->host_nperio_tx_fifo_size);
+ dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n",
+ hw->host_perio_tx_fifo_size);
+ dev_dbg(hsotg->dev, "\n");
+
+ return 0;
+}
+
+int dwc2_init_params(struct dwc2_hsotg *hsotg)
+{
+ const struct of_device_id *match;
+ struct dwc2_core_params params;
+
+ match = of_match_device(dwc2_of_match_table, hsotg->dev);
+ if (match && match->data)
+ params = *((struct dwc2_core_params *)match->data);
+ else
+ params = params_default;
+
+ if (dwc2_is_fs_iot(hsotg)) {
+ params.speed = DWC2_SPEED_PARAM_FULL;
+ params.phy_type = DWC2_PHY_TYPE_PARAM_FS;
+ }
+
+ dwc2_set_parameters(hsotg, &params);
+
+ return 0;
+}
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index ae419615a176..a23329e3d7cd 100644
--- a/drivers/usb/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -62,6 +62,20 @@ struct dwc2_pci_glue {
struct platform_device *phy;
};
+static int dwc2_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc2)
+{
+ if (pdev->vendor == PCI_VENDOR_ID_SYNOPSYS &&
+ pdev->device == PCI_PRODUCT_ID_HAPS_HSOTG) {
+ struct property_entry properties[] = {
+ { },
+ };
+
+ return platform_device_add_properties(dwc2, properties);
+ }
+
+ return 0;
+}
+
static void dwc2_pci_remove(struct pci_dev *pci)
{
struct dwc2_pci_glue *glue = pci_get_drvdata(pci);
@@ -122,6 +136,10 @@ static int dwc2_pci_probe(struct pci_dev *pci,
return PTR_ERR(phy);
}
+ ret = dwc2_pci_quirks(pci, dwc2);
+ if (ret)
+ goto err;
+
ret = platform_device_add(dwc2);
if (ret) {
dev_err(dev, "failed to register dwc2 device\n");
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 8e1728b39a49..4fc8c603afb8 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -55,165 +55,6 @@
static const char dwc2_driver_name[] = "dwc2";
-static const struct dwc2_core_params params_hi6220 = {
- .otg_cap = 2, /* No HNP/SRP capable */
- .otg_ver = 0, /* 1.3 */
- .dma_enable = 1,
- .dma_desc_enable = 0,
- .dma_desc_fs_enable = 0,
- .speed = 0, /* High Speed */
- .enable_dynamic_fifo = 1,
- .en_multiple_tx_fifo = 1,
- .host_rx_fifo_size = 512,
- .host_nperio_tx_fifo_size = 512,
- .host_perio_tx_fifo_size = 512,
- .max_transfer_size = 65535,
- .max_packet_count = 511,
- .host_channels = 16,
- .phy_type = 1, /* UTMI */
- .phy_utmi_width = 8,
- .phy_ulpi_ddr = 0, /* Single */
- .phy_ulpi_ext_vbus = 0,
- .i2c_enable = 0,
- .ulpi_fs_ls = 0,
- .host_support_fs_ls_low_power = 0,
- .host_ls_low_power_phy_clk = 0, /* 48 MHz */
- .ts_dline = 0,
- .reload_ctl = 0,
- .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
- GAHBCFG_HBSTLEN_SHIFT,
- .uframe_sched = 0,
- .external_id_pin_ctl = -1,
- .hibernation = -1,
-};
-
-static const struct dwc2_core_params params_bcm2835 = {
- .otg_cap = 0, /* HNP/SRP capable */
- .otg_ver = 0, /* 1.3 */
- .dma_enable = 1,
- .dma_desc_enable = 0,
- .dma_desc_fs_enable = 0,
- .speed = 0, /* High Speed */
- .enable_dynamic_fifo = 1,
- .en_multiple_tx_fifo = 1,
- .host_rx_fifo_size = 774, /* 774 DWORDs */
- .host_nperio_tx_fifo_size = 256, /* 256 DWORDs */
- .host_perio_tx_fifo_size = 512, /* 512 DWORDs */
- .max_transfer_size = 65535,
- .max_packet_count = 511,
- .host_channels = 8,
- .phy_type = 1, /* UTMI */
- .phy_utmi_width = 8, /* 8 bits */
- .phy_ulpi_ddr = 0, /* Single */
- .phy_ulpi_ext_vbus = 0,
- .i2c_enable = 0,
- .ulpi_fs_ls = 0,
- .host_support_fs_ls_low_power = 0,
- .host_ls_low_power_phy_clk = 0, /* 48 MHz */
- .ts_dline = 0,
- .reload_ctl = 0,
- .ahbcfg = 0x10,
- .uframe_sched = 0,
- .external_id_pin_ctl = -1,
- .hibernation = -1,
-};
-
-static const struct dwc2_core_params params_rk3066 = {
- .otg_cap = 2, /* non-HNP/non-SRP */
- .otg_ver = -1,
- .dma_enable = -1,
- .dma_desc_enable = 0,
- .dma_desc_fs_enable = 0,
- .speed = -1,
- .enable_dynamic_fifo = 1,
- .en_multiple_tx_fifo = -1,
- .host_rx_fifo_size = 525, /* 525 DWORDs */
- .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */
- .host_perio_tx_fifo_size = 256, /* 256 DWORDs */
- .max_transfer_size = -1,
- .max_packet_count = -1,
- .host_channels = -1,
- .phy_type = -1,
- .phy_utmi_width = -1,
- .phy_ulpi_ddr = -1,
- .phy_ulpi_ext_vbus = -1,
- .i2c_enable = -1,
- .ulpi_fs_ls = -1,
- .host_support_fs_ls_low_power = -1,
- .host_ls_low_power_phy_clk = -1,
- .ts_dline = -1,
- .reload_ctl = -1,
- .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
- GAHBCFG_HBSTLEN_SHIFT,
- .uframe_sched = -1,
- .external_id_pin_ctl = -1,
- .hibernation = -1,
-};
-
-static const struct dwc2_core_params params_ltq = {
- .otg_cap = 2, /* non-HNP/non-SRP */
- .otg_ver = -1,
- .dma_enable = -1,
- .dma_desc_enable = -1,
- .dma_desc_fs_enable = -1,
- .speed = -1,
- .enable_dynamic_fifo = -1,
- .en_multiple_tx_fifo = -1,
- .host_rx_fifo_size = 288, /* 288 DWORDs */
- .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */
- .host_perio_tx_fifo_size = 96, /* 96 DWORDs */
- .max_transfer_size = 65535,
- .max_packet_count = 511,
- .host_channels = -1,
- .phy_type = -1,
- .phy_utmi_width = -1,
- .phy_ulpi_ddr = -1,
- .phy_ulpi_ext_vbus = -1,
- .i2c_enable = -1,
- .ulpi_fs_ls = -1,
- .host_support_fs_ls_low_power = -1,
- .host_ls_low_power_phy_clk = -1,
- .ts_dline = -1,
- .reload_ctl = -1,
- .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
- GAHBCFG_HBSTLEN_SHIFT,
- .uframe_sched = -1,
- .external_id_pin_ctl = -1,
- .hibernation = -1,
-};
-
-static const struct dwc2_core_params params_amlogic = {
- .otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE,
- .otg_ver = -1,
- .dma_enable = 1,
- .dma_desc_enable = 0,
- .dma_desc_fs_enable = 0,
- .speed = DWC2_SPEED_PARAM_HIGH,
- .enable_dynamic_fifo = 1,
- .en_multiple_tx_fifo = -1,
- .host_rx_fifo_size = 512,
- .host_nperio_tx_fifo_size = 500,
- .host_perio_tx_fifo_size = 500,
- .max_transfer_size = -1,
- .max_packet_count = -1,
- .host_channels = 16,
- .phy_type = DWC2_PHY_TYPE_PARAM_UTMI,
- .phy_utmi_width = -1,
- .phy_ulpi_ddr = -1,
- .phy_ulpi_ext_vbus = -1,
- .i2c_enable = -1,
- .ulpi_fs_ls = -1,
- .host_support_fs_ls_low_power = -1,
- .host_ls_low_power_phy_clk = -1,
- .ts_dline = -1,
- .reload_ctl = 1,
- .ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
- GAHBCFG_HBSTLEN_SHIFT,
- .uframe_sched = 0,
- .external_id_pin_ctl = -1,
- .hibernation = -1,
-};
-
/*
* Check the dr_mode against the module configuration and hardware
* capabilities.
@@ -510,20 +351,6 @@ static void dwc2_driver_shutdown(struct platform_device *dev)
disable_irq(hsotg->irq);
}
-static const struct of_device_id dwc2_of_match_table[] = {
- { .compatible = "brcm,bcm2835-usb", .data = &params_bcm2835 },
- { .compatible = "hisilicon,hi6220-usb", .data = &params_hi6220 },
- { .compatible = "rockchip,rk3066-usb", .data = &params_rk3066 },
- { .compatible = "lantiq,arx100-usb", .data = &params_ltq },
- { .compatible = "lantiq,xrx200-usb", .data = &params_ltq },
- { .compatible = "snps,dwc2", .data = NULL },
- { .compatible = "samsung,s3c6400-hsotg", .data = NULL},
- { .compatible = "amlogic,meson8b-usb", .data = &params_amlogic },
- { .compatible = "amlogic,meson-gxbb-usb", .data = &params_amlogic },
- {},
-};
-MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
-
/**
* dwc2_driver_probe() - Called when the DWC_otg core is bound to the DWC_otg
* driver
@@ -538,30 +365,10 @@ MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
*/
static int dwc2_driver_probe(struct platform_device *dev)
{
- const struct of_device_id *match;
- const struct dwc2_core_params *params;
- struct dwc2_core_params defparams;
struct dwc2_hsotg *hsotg;
struct resource *res;
int retval;
- match = of_match_device(dwc2_of_match_table, &dev->dev);
- if (match && match->data) {
- params = match->data;
- } else {
- /* Default all params to autodetect */
- dwc2_set_all_params(&defparams, -1);
- params = &defparams;
-
- /*
- * Disable descriptor dma mode by default as the HW can support
- * it, but does not support it for SPLIT transactions.
- * Disable it for FS devices as well.
- */
- defparams.dma_desc_enable = 0;
- defparams.dma_desc_fs_enable = 0;
- }
-
hsotg = devm_kzalloc(&dev->dev, sizeof(*hsotg), GFP_KERNEL);
if (!hsotg)
return -ENOMEM;
@@ -591,13 +398,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
spin_lock_init(&hsotg->lock);
- hsotg->core_params = devm_kzalloc(&dev->dev,
- sizeof(*hsotg->core_params), GFP_KERNEL);
- if (!hsotg->core_params)
- return -ENOMEM;
-
- dwc2_set_all_params(hsotg->core_params, -1);
-
hsotg->irq = platform_get_irq(dev, 0);
if (hsotg->irq < 0) {
dev_err(&dev->dev, "missing IRQ resource\n");
@@ -631,11 +431,12 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (retval)
goto error;
- /* Validate parameter values */
- dwc2_set_parameters(hsotg, params);
-
dwc2_force_dr_mode(hsotg);
+ retval = dwc2_init_params(hsotg);
+ if (retval)
+ goto error;
+
if (hsotg->dr_mode != USB_DR_MODE_HOST) {
retval = dwc2_gadget_init(hsotg, hsotg->irq);
if (retval)
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index b97cde76914d..c5aa235863e8 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -62,7 +62,7 @@ config USB_DWC3_OMAP
config USB_DWC3_EXYNOS
tristate "Samsung Exynos Platform"
- depends on ARCH_EXYNOS && OF || COMPILE_TEST
+ depends on (ARCH_EXYNOS || COMPILE_TEST) && OF
default USB_DWC3
help
Recent Exynos5 SoCs ship with one DesignWare Core USB3 IP inside,
@@ -70,7 +70,7 @@ config USB_DWC3_EXYNOS
config USB_DWC3_PCI
tristate "PCIe-based Platforms"
- depends on PCI
+ depends on PCI && ACPI
default USB_DWC3
help
If you're using the DesignWare Core IP with a PCIe, please say
@@ -98,7 +98,7 @@ config USB_DWC3_OF_SIMPLE
config USB_DWC3_ST
tristate "STMicroelectronics Platforms"
- depends on ARCH_STI && OF
+ depends on (ARCH_STI || COMPILE_TEST) && OF
default USB_DWC3
help
STMicroelectronics SoCs with one DesignWare Core USB3 IP
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 22420e17d68b..ffca34029b21 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -3,7 +3,11 @@ CFLAGS_trace.o := -I$(src)
obj-$(CONFIG_USB_DWC3) += dwc3.o
-dwc3-y := core.o debug.o trace.o
+dwc3-y := core.o
+
+ifneq ($(CONFIG_FTRACE),)
+ dwc3-y += trace.o
+endif
ifneq ($(filter y,$(CONFIG_USB_DWC3_HOST) $(CONFIG_USB_DWC3_DUAL_ROLE)),)
dwc3-y += host.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index fea446900cad..369bab16a824 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -169,33 +169,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
return -ETIMEDOUT;
}
-/**
- * dwc3_soft_reset - Issue soft reset
- * @dwc: Pointer to our controller context structure
- */
-static int dwc3_soft_reset(struct dwc3 *dwc)
-{
- unsigned long timeout;
- u32 reg;
-
- timeout = jiffies + msecs_to_jiffies(500);
- dwc3_writel(dwc->regs, DWC3_DCTL, DWC3_DCTL_CSFTRST);
- do {
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- if (!(reg & DWC3_DCTL_CSFTRST))
- break;
-
- if (time_after(jiffies, timeout)) {
- dev_err(dwc->dev, "Reset Timed Out\n");
- return -ETIMEDOUT;
- }
-
- cpu_relax();
- } while (true);
-
- return 0;
-}
-
/*
* dwc3_frame_length_adjustment - Adjusts frame length if required
* @dwc3: Pointer to our controller context structure
@@ -229,7 +202,7 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
struct dwc3_event_buffer *evt)
{
- dma_free_coherent(dwc->dev, evt->length, evt->buf, evt->dma);
+ dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma);
}
/**
@@ -251,7 +224,11 @@ static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
evt->dwc = dwc;
evt->length = length;
- evt->buf = dma_alloc_coherent(dwc->dev, length,
+ evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL);
+ if (!evt->cache)
+ return ERR_PTR(-ENOMEM);
+
+ evt->buf = dma_alloc_coherent(dwc->sysdev, length,
&evt->dma, GFP_KERNEL);
if (!evt->buf)
return ERR_PTR(-ENOMEM);
@@ -305,13 +282,7 @@ static int dwc3_event_buffers_setup(struct dwc3 *dwc)
struct dwc3_event_buffer *evt;
evt = dwc->ev_buf;
- dwc3_trace(trace_dwc3_core,
- "Event buf %p dma %08llx length %d\n",
- evt->buf, (unsigned long long) evt->dma,
- evt->length);
-
evt->lpos = 0;
-
dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
lower_32_bits(evt->dma));
dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0),
@@ -370,11 +341,11 @@ static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
if (!WARN_ON(dwc->scratchbuf))
return 0;
- scratch_addr = dma_map_single(dwc->dev, dwc->scratchbuf,
+ scratch_addr = dma_map_single(dwc->sysdev, dwc->scratchbuf,
dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dwc->dev, scratch_addr)) {
- dev_err(dwc->dev, "failed to map scratch buffer\n");
+ if (dma_mapping_error(dwc->sysdev, scratch_addr)) {
+ dev_err(dwc->sysdev, "failed to map scratch buffer\n");
ret = -EFAULT;
goto err0;
}
@@ -398,7 +369,7 @@ static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
return 0;
err1:
- dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch *
+ dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
err0:
@@ -417,7 +388,7 @@ static void dwc3_free_scratch_buffers(struct dwc3 *dwc)
if (!WARN_ON(dwc->scratchbuf))
return;
- dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch *
+ dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
kfree(dwc->scratchbuf);
}
@@ -428,9 +399,6 @@ static void dwc3_core_num_eps(struct dwc3 *dwc)
dwc->num_in_eps = DWC3_NUM_IN_EPS(parms);
dwc->num_out_eps = DWC3_NUM_EPS(parms) - dwc->num_in_eps;
-
- dwc3_trace(trace_dwc3_core, "found %d IN and %d OUT endpoints",
- dwc->num_in_eps, dwc->num_out_eps);
}
static void dwc3_cache_hwparams(struct dwc3 *dwc)
@@ -524,13 +492,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
}
/* FALLTHROUGH */
case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI:
- /* Making sure the interface and PHY are operational */
- ret = dwc3_soft_reset(dwc);
- if (ret)
- return ret;
-
- udelay(1);
-
ret = dwc3_ulpi_init(dwc);
if (ret)
return ret;
@@ -594,19 +555,12 @@ static void dwc3_core_exit(struct dwc3 *dwc)
phy_power_off(dwc->usb3_generic_phy);
}
-/**
- * dwc3_core_init - Low-level initialization of DWC3 Core
- * @dwc: Pointer to our controller context structure
- *
- * Returns 0 on success otherwise negative errno.
- */
-static int dwc3_core_init(struct dwc3 *dwc)
+static bool dwc3_core_is_valid(struct dwc3 *dwc)
{
- u32 hwparams4 = dwc->hwparams.hwparams4;
- u32 reg;
- int ret;
+ u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
+
/* This should read as U3 followed by revision number */
if ((reg & DWC3_GSNPSID_MASK) == 0x55330000) {
/* Detected DWC_usb3 IP */
@@ -616,36 +570,16 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER);
dwc->revision |= DWC3_REVISION_IS_DWC31;
} else {
- dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
- ret = -ENODEV;
- goto err0;
+ return false;
}
- /*
- * Write Linux Version Code to our GUID register so it's easy to figure
- * out which kernel version a bug was found.
- */
- dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
-
- /* Handle USB2.0-only core configuration */
- if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
- DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
- if (dwc->maximum_speed == USB_SPEED_SUPER)
- dwc->maximum_speed = USB_SPEED_HIGH;
- }
-
- /* issue device SoftReset too */
- ret = dwc3_soft_reset(dwc);
- if (ret)
- goto err0;
-
- ret = dwc3_core_soft_reset(dwc);
- if (ret)
- goto err0;
+ return true;
+}
- ret = dwc3_phy_setup(dwc);
- if (ret)
- goto err0;
+static void dwc3_core_setup_global_control(struct dwc3 *dwc)
+{
+ u32 hwparams4 = dwc->hwparams.hwparams4;
+ u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
@@ -683,13 +617,13 @@ static int dwc3_core_init(struct dwc3 *dwc)
reg |= DWC3_GCTL_GBLHIBERNATIONEN;
break;
default:
- dwc3_trace(trace_dwc3_core, "No power optimization available\n");
+ /* nothing */
+ break;
}
/* check if current dwc3 is on simulation board */
if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
- dwc3_trace(trace_dwc3_core,
- "running on FPGA platform\n");
+ dev_info(dwc->dev, "Running with FPGA optmizations\n");
dwc->is_fpga = true;
}
@@ -714,7 +648,47 @@ static int dwc3_core_init(struct dwc3 *dwc)
reg |= DWC3_GCTL_U2RSTECN;
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+}
+
+/**
+ * dwc3_core_init - Low-level initialization of DWC3 Core
+ * @dwc: Pointer to our controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int dwc3_core_init(struct dwc3 *dwc)
+{
+ u32 reg;
+ int ret;
+
+ if (!dwc3_core_is_valid(dwc)) {
+ dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
+ ret = -ENODEV;
+ goto err0;
+ }
+
+ /*
+ * Write Linux Version Code to our GUID register so it's easy to figure
+ * out which kernel version a bug was found.
+ */
+ dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
+
+ /* Handle USB2.0-only core configuration */
+ if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
+ DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
+ if (dwc->maximum_speed == USB_SPEED_SUPER)
+ dwc->maximum_speed = USB_SPEED_HIGH;
+ }
+
+ ret = dwc3_core_soft_reset(dwc);
+ if (ret)
+ goto err0;
+ ret = dwc3_phy_setup(dwc);
+ if (ret)
+ goto err0;
+
+ dwc3_core_setup_global_control(dwc);
dwc3_core_num_eps(dwc);
ret = dwc3_setup_scratch_buffers(dwc);
@@ -766,6 +740,16 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
}
+ /*
+ * Enable hardware control of sending remote wakeup in HS when
+ * the device is in the L1 state.
+ */
+ if (dwc->revision >= DWC3_REVISION_290A) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+ reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ }
+
return 0;
err4:
@@ -919,57 +903,13 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
}
}
-#define DWC3_ALIGN_MASK (16 - 1)
-
-static int dwc3_probe(struct platform_device *pdev)
+static void dwc3_get_properties(struct dwc3 *dwc)
{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct dwc3 *dwc;
+ struct device *dev = dwc->dev;
u8 lpm_nyet_threshold;
u8 tx_de_emphasis;
u8 hird_threshold;
- int ret;
-
- void __iomem *regs;
- void *mem;
-
- mem = devm_kzalloc(dev, sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
-
- dwc = PTR_ALIGN(mem, DWC3_ALIGN_MASK + 1);
- dwc->mem = mem;
- dwc->dev = dev;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "missing memory resource\n");
- return -ENODEV;
- }
-
- dwc->xhci_resources[0].start = res->start;
- dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
- DWC3_XHCI_REGS_END;
- dwc->xhci_resources[0].flags = res->flags;
- dwc->xhci_resources[0].name = res->name;
-
- res->start += DWC3_GLOBALS_REGS_START;
-
- /*
- * Request memory region but exclude xHCI regs,
- * since it will be requested by the xhci-plat driver.
- */
- regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(regs)) {
- ret = PTR_ERR(regs);
- goto err0;
- }
-
- dwc->regs = regs;
- dwc->regs_size = resource_size(res);
-
/* default to highest possible threshold */
lpm_nyet_threshold = 0xff;
@@ -986,6 +926,13 @@ static int dwc3_probe(struct platform_device *pdev)
dwc->dr_mode = usb_get_dr_mode(dev);
dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
+ dwc->sysdev_is_parent = device_property_read_bool(dev,
+ "linux,sysdev_is_parent");
+ if (dwc->sysdev_is_parent)
+ dwc->sysdev = dwc->dev->parent;
+ else
+ dwc->sysdev = dwc->dev;
+
dwc->has_lpm_erratum = device_property_read_bool(dev,
"snps,has-lpm-erratum");
device_property_read_u8(dev, "snps,lpm-nyet-threshold",
@@ -1041,6 +988,112 @@ static int dwc3_probe(struct platform_device *pdev)
dwc->hird_threshold = hird_threshold
| (dwc->is_utmi_l1_suspend << 4);
+ dwc->imod_interval = 0;
+}
+
+/* check whether the core supports IMOD */
+bool dwc3_has_imod(struct dwc3 *dwc)
+{
+ return ((dwc3_is_usb3(dwc) &&
+ dwc->revision >= DWC3_REVISION_300A) ||
+ (dwc3_is_usb31(dwc) &&
+ dwc->revision >= DWC3_USB31_REVISION_120A));
+}
+
+static void dwc3_check_params(struct dwc3 *dwc)
+{
+ struct device *dev = dwc->dev;
+
+ /* Check for proper value of imod_interval */
+ if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
+ dev_warn(dwc->dev, "Interrupt moderation not supported\n");
+ dwc->imod_interval = 0;
+ }
+
+ /*
+ * Workaround for STAR 9000961433 which affects only version
+ * 3.00a of the DWC_usb3 core. This prevents the controller
+ * interrupt from being masked while handling events. IMOD
+ * allows us to work around this issue. Enable it for the
+ * affected version.
+ */
+ if (!dwc->imod_interval &&
+ (dwc->revision == DWC3_REVISION_300A))
+ dwc->imod_interval = 1;
+
+ /* Check the maximum_speed parameter */
+ switch (dwc->maximum_speed) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
+ break;
+ default:
+ dev_err(dev, "invalid maximum_speed parameter %d\n",
+ dwc->maximum_speed);
+ /* fall through */
+ case USB_SPEED_UNKNOWN:
+ /* default to superspeed */
+ dwc->maximum_speed = USB_SPEED_SUPER;
+
+ /*
+ * default to superspeed plus if we are capable.
+ */
+ if (dwc3_is_usb31(dwc) &&
+ (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
+ DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
+ dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
+
+ break;
+ }
+}
+
+static int dwc3_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct dwc3 *dwc;
+
+ int ret;
+
+ void __iomem *regs;
+
+ dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
+ if (!dwc)
+ return -ENOMEM;
+
+ dwc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "missing memory resource\n");
+ return -ENODEV;
+ }
+
+ dwc->xhci_resources[0].start = res->start;
+ dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
+ DWC3_XHCI_REGS_END;
+ dwc->xhci_resources[0].flags = res->flags;
+ dwc->xhci_resources[0].name = res->name;
+
+ res->start += DWC3_GLOBALS_REGS_START;
+
+ /*
+ * Request memory region but exclude xHCI regs,
+ * since it will be requested by the xhci-plat driver.
+ */
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs)) {
+ ret = PTR_ERR(regs);
+ goto err0;
+ }
+
+ dwc->regs = regs;
+ dwc->regs_size = resource_size(res);
+
+ dwc3_get_properties(dwc);
+
platform_set_drvdata(pdev, dwc);
dwc3_cache_hwparams(dwc);
@@ -1050,12 +1103,6 @@ static int dwc3_probe(struct platform_device *pdev)
spin_lock_init(&dwc->lock);
- if (!dev->dma_mask) {
- dev->dma_mask = dev->parent->dma_mask;
- dev->dma_parms = dev->parent->dma_parms;
- dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
- }
-
pm_runtime_set_active(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
@@ -1087,32 +1134,7 @@ static int dwc3_probe(struct platform_device *pdev)
goto err4;
}
- /* Check the maximum_speed parameter */
- switch (dwc->maximum_speed) {
- case USB_SPEED_LOW:
- case USB_SPEED_FULL:
- case USB_SPEED_HIGH:
- case USB_SPEED_SUPER:
- case USB_SPEED_SUPER_PLUS:
- break;
- default:
- dev_err(dev, "invalid maximum_speed parameter %d\n",
- dwc->maximum_speed);
- /* fall through */
- case USB_SPEED_UNKNOWN:
- /* default to superspeed */
- dwc->maximum_speed = USB_SPEED_SUPER;
-
- /*
- * default to superspeed plus if we are capable.
- */
- if (dwc3_is_usb31(dwc) &&
- (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
- DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
- dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
-
- break;
- }
+ dwc3_check_params(dwc);
ret = dwc3_core_init_mode(dwc);
if (ret)
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 6b60e42626a2..de5a8570be04 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -26,6 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/debugfs.h>
+#include <linux/wait.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -37,6 +38,7 @@
#define DWC3_MSG_MAX 500
/* Global constants */
+#define DWC3_PULL_UP_TIMEOUT 500 /* ms */
#define DWC3_ZLP_BUF_SIZE 1024 /* size of a superspeed bulk */
#define DWC3_EP0_BOUNCE_SIZE 512
#define DWC3_ENDPOINTS_NUM 32
@@ -65,6 +67,7 @@
#define DWC3_DEVICE_EVENT_OVERFLOW 11
#define DWC3_GEVNTCOUNT_MASK 0xfffc
+#define DWC3_GEVNTCOUNT_EHB (1 << 31)
#define DWC3_GSNPSID_MASK 0xffff0000
#define DWC3_GSNPSREV_MASK 0xffff
@@ -147,6 +150,8 @@
#define DWC3_DEPCMDPAR0 0x08
#define DWC3_DEPCMD 0x0c
+#define DWC3_DEV_IMOD(n) (0xca00 + (n * 0x4))
+
/* OTG Registers */
#define DWC3_OCFG 0xcc00
#define DWC3_OCTL 0xcc04
@@ -198,6 +203,9 @@
#define DWC3_GCTL_GBLHIBERNATIONEN (1 << 1)
#define DWC3_GCTL_DSBLCLKGTNG (1 << 0)
+/* Global User Control 1 Register */
+#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW (1 << 24)
+
/* Global USB2 PHY Configuration Register */
#define DWC3_GUSB2PHYCFG_PHYSOFTRST (1 << 31)
#define DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS (1 << 30)
@@ -450,6 +458,8 @@
#define DWC3_DEPCMD_SETTRANSFRESOURCE (0x02 << 0)
#define DWC3_DEPCMD_SETEPCONFIG (0x01 << 0)
+#define DWC3_DEPCMD_CMD(x) ((x) & 0xf)
+
/* The EP number goes 0..31 so ep0 is always out and ep1 is always in */
#define DWC3_DALEPENA_EP(n) (1 << n)
@@ -458,6 +468,11 @@
#define DWC3_DEPCMD_TYPE_BULK 2
#define DWC3_DEPCMD_TYPE_INTR 3
+#define DWC3_DEV_IMOD_COUNT_SHIFT 16
+#define DWC3_DEV_IMOD_COUNT_MASK (0xffff << 16)
+#define DWC3_DEV_IMOD_INTERVAL_SHIFT 0
+#define DWC3_DEV_IMOD_INTERVAL_MASK (0xffff << 0)
+
/* Structures */
struct dwc3_trb;
@@ -465,6 +480,7 @@ struct dwc3_trb;
/**
* struct dwc3_event_buffer - Software event buffer representation
* @buf: _THE_ buffer
+ * @cache: The buffer cache used in the threaded interrupt
* @length: size of this buffer
* @lpos: event offset
* @count: cache of last read event count register
@@ -474,6 +490,7 @@ struct dwc3_trb;
*/
struct dwc3_event_buffer {
void *buf;
+ void *cache;
unsigned length;
unsigned int lpos;
unsigned int count;
@@ -499,6 +516,7 @@ struct dwc3_event_buffer {
* @endpoint: usb endpoint
* @pending_list: list of pending requests for this endpoint
* @started_list: list of started requests on this endpoint
+ * @wait_end_transfer: wait_queue_head_t for waiting on End Transfer complete
* @lock: spinlock for endpoint request queue traversal
* @regs: pointer to first endpoint register
* @trb_pool: array of transaction buffers
@@ -524,12 +542,13 @@ struct dwc3_ep {
struct list_head pending_list;
struct list_head started_list;
+ wait_queue_head_t wait_end_transfer;
+
spinlock_t lock;
void __iomem *regs;
struct dwc3_trb *trb_pool;
dma_addr_t trb_pool_dma;
- const struct usb_ss_ep_comp_descriptor *comp_desc;
struct dwc3 *dwc;
u32 saved_state;
@@ -540,6 +559,8 @@ struct dwc3_ep {
#define DWC3_EP_BUSY (1 << 4)
#define DWC3_EP_PENDING_REQUEST (1 << 5)
#define DWC3_EP_MISSED_ISOC (1 << 6)
+#define DWC3_EP_END_TRANSFER_PENDING (1 << 7)
+#define DWC3_EP_TRANSFER_STARTED (1 << 8)
/* This last one is specific to EP0 */
#define DWC3_EP0_DIR_IN (1 << 31)
@@ -703,7 +724,7 @@ struct dwc3_hwparams {
* @dep: struct dwc3_ep owning this request
* @sg: pointer to first incomplete sg
* @num_pending_sgs: counter to pending sgs
- * @first_trb_index: index to first trb used by this request
+ * @remaining: amount of data remaining
* @epnum: endpoint number to which this request refers
* @trb: pointer to struct dwc3_trb
* @trb_dma: DMA address of @trb
@@ -718,7 +739,7 @@ struct dwc3_request {
struct scatterlist *sg;
unsigned num_pending_sgs;
- u8 first_trb_index;
+ unsigned remaining;
u8 epnum;
struct dwc3_trb *trb;
dma_addr_t trb_dma;
@@ -748,6 +769,7 @@ struct dwc3_scratchpad_array {
* @ep0_usb_req: dummy req used while handling STD USB requests
* @ep0_bounce_addr: dma address of ep0_bounce
* @scratch_addr: dma address of scratchbuf
+ * @ep0_in_setup: one control transfer is completed and enter setup phase
* @lock: for synchronizing
* @dev: pointer to our struct device
* @xhci: pointer to our xHCI child
@@ -784,7 +806,6 @@ struct dwc3_scratchpad_array {
* @ep0state: state of endpoint zero
* @link_state: link state
* @speed: device speed (super, high, full, low)
- * @mem: points to start of memory which is used for this struct.
* @hwparams: copy of hwparams registers
* @root: debugfs root folder pointer
* @regset: debugfs pointer to regdump file
@@ -798,6 +819,7 @@ struct dwc3_scratchpad_array {
* @ep0_bounced: true when we used bounce buffer
* @ep0_expect_in: true when we expect a DATA IN transfer
* @has_hibernation: true when dwc3 was configured with Hibernation
+ * @sysdev_is_parent: true when dwc3 device has a parent driver
* @has_lpm_erratum: true when core was configured with LPM Erratum. Note that
* there's now way for software to detect this in runtime.
* @is_utmi_l1_suspend: the core asserts output signal
@@ -833,6 +855,8 @@ struct dwc3_scratchpad_array {
* 1 - -3.5dB de-emphasis
* 2 - No de-emphasis
* 3 - Reserved
+ * @imod_interval: set the interrupt moderation interval in 250ns
+ * increments or 0 to disable.
*/
struct dwc3 {
struct usb_ctrlrequest *ctrl_req;
@@ -846,11 +870,13 @@ struct dwc3 {
dma_addr_t ep0_bounce_addr;
dma_addr_t scratch_addr;
struct dwc3_request ep0_usb_req;
+ struct completion ep0_in_setup;
/* device lock */
spinlock_t lock;
struct device *dev;
+ struct device *sysdev;
struct platform_device *xhci;
struct resource xhci_resources[DWC3_XHCI_RESOURCES_NUM];
@@ -909,6 +935,7 @@ struct dwc3 {
#define DWC3_REVISION_260A 0x5533260a
#define DWC3_REVISION_270A 0x5533270a
#define DWC3_REVISION_280A 0x5533280a
+#define DWC3_REVISION_290A 0x5533290a
#define DWC3_REVISION_300A 0x5533300a
#define DWC3_REVISION_310A 0x5533310a
@@ -918,6 +945,7 @@ struct dwc3 {
*/
#define DWC3_REVISION_IS_DWC31 0x80000000
#define DWC3_USB31_REVISION_110A (0x3131302a | DWC3_REVISION_IS_DWC31)
+#define DWC3_USB31_REVISION_120A (0x3132302a | DWC3_REVISION_IS_DWC31)
enum dwc3_ep0_next ep0_next_event;
enum dwc3_ep0_state ep0state;
@@ -934,8 +962,6 @@ struct dwc3 {
u8 num_out_eps;
u8 num_in_eps;
- void *mem;
-
struct dwc3_hwparams hwparams;
struct dentry *root;
struct debugfs_regset32 *regset;
@@ -952,6 +978,7 @@ struct dwc3 {
unsigned ep0_bounced:1;
unsigned ep0_expect_in:1;
unsigned has_hibernation:1;
+ unsigned sysdev_is_parent:1;
unsigned has_lpm_erratum:1;
unsigned is_utmi_l1_suspend:1;
unsigned is_fpga:1;
@@ -978,6 +1005,8 @@ struct dwc3 {
unsigned tx_de_emphasis_quirk:1;
unsigned tx_de_emphasis:2;
+
+ u16 imod_interval;
};
/* -------------------------------------------------------------------------- */
@@ -1039,12 +1068,16 @@ struct dwc3_event_depevt {
/* Control-only Status */
#define DEPEVT_STATUS_CONTROL_DATA 1
#define DEPEVT_STATUS_CONTROL_STATUS 2
+#define DEPEVT_STATUS_CONTROL_PHASE(n) ((n) & 3)
/* In response to Start Transfer */
#define DEPEVT_TRANSFER_NO_RESOURCE 1
#define DEPEVT_TRANSFER_BUS_EXPIRY 2
u32 parameters:16;
+
+/* For Command Complete Events */
+#define DEPEVT_PARAMETER_CMD(n) (((n) & (0xf << 8)) >> 8)
} __packed;
/**
@@ -1133,12 +1166,20 @@ struct dwc3_gadget_ep_cmd_params {
void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type);
+/* check whether we are on the DWC_usb3 core */
+static inline bool dwc3_is_usb3(struct dwc3 *dwc)
+{
+ return !(dwc->revision & DWC3_REVISION_IS_DWC31);
+}
+
/* check whether we are on the DWC_usb31 core */
static inline bool dwc3_is_usb31(struct dwc3 *dwc)
{
return !!(dwc->revision & DWC3_REVISION_IS_DWC31);
}
+bool dwc3_has_imod(struct dwc3 *dwc);
+
#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
int dwc3_host_init(struct dwc3 *dwc);
void dwc3_host_exit(struct dwc3 *dwc);
diff --git a/drivers/usb/dwc3/debug.c b/drivers/usb/dwc3/debug.c
deleted file mode 100644
index 0be6885bc370..000000000000
--- a/drivers/usb/dwc3/debug.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * debug.c - DesignWare USB3 DRD Controller Debug/Trace Support
- *
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
- *
- * Author: Felipe Balbi <balbi@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include "debug.h"
-
-void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
-
- trace(&vaf);
-
- va_end(args);
-}
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 33ab2a203c1b..eeed4ffd8131 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -124,6 +124,22 @@ dwc3_gadget_link_string(enum dwc3_link_state link_state)
}
}
+static inline const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
+{
+ switch (state) {
+ case EP0_UNCONNECTED:
+ return "Unconnected";
+ case EP0_SETUP_PHASE:
+ return "Setup Phase";
+ case EP0_DATA_PHASE:
+ return "Data Phase";
+ case EP0_STATUS_PHASE:
+ return "Status Phase";
+ default:
+ return "UNKNOWN";
+ }
+}
+
/**
* dwc3_gadget_event_string - returns event name
* @event: the event code
@@ -184,10 +200,11 @@ dwc3_gadget_event_string(const struct dwc3_event_devt *event)
* @event: then event code
*/
static inline const char *
-dwc3_ep_event_string(const struct dwc3_event_depevt *event)
+dwc3_ep_event_string(const struct dwc3_event_depevt *event, u32 ep0state)
{
u8 epnum = event->endpoint_number;
static char str[256];
+ size_t len;
int status;
int ret;
@@ -199,6 +216,10 @@ dwc3_ep_event_string(const struct dwc3_event_depevt *event)
switch (event->endpoint_event) {
case DWC3_DEPEVT_XFERCOMPLETE:
strcat(str, "Transfer Complete");
+ len = strlen(str);
+
+ if (epnum <= 1)
+ sprintf(str + len, " [%s]", dwc3_ep0_state_string(ep0state));
break;
case DWC3_DEPEVT_XFERINPROGRESS:
strcat(str, "Transfer In-Progress");
@@ -207,6 +228,19 @@ dwc3_ep_event_string(const struct dwc3_event_depevt *event)
strcat(str, "Transfer Not Ready");
status = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
strcat(str, status ? " (Active)" : " (Not Active)");
+
+ /* Control Endpoints */
+ if (epnum <= 1) {
+ int phase = DEPEVT_STATUS_CONTROL_PHASE(event->status);
+
+ switch (phase) {
+ case DEPEVT_STATUS_CONTROL_DATA:
+ strcat(str, " [Data Phase]");
+ break;
+ case DEPEVT_STATUS_CONTROL_STATUS:
+ strcat(str, " [Status Phase]");
+ }
+ }
break;
case DWC3_DEPEVT_RXTXFIFOEVT:
strcat(str, "FIFO");
@@ -270,14 +304,14 @@ static inline const char *dwc3_gadget_event_type_string(u8 event)
}
}
-static inline const char *dwc3_decode_event(u32 event)
+static inline const char *dwc3_decode_event(u32 event, u32 ep0state)
{
const union dwc3_event evt = (union dwc3_event) event;
if (evt.type.is_devspec)
return dwc3_gadget_event_string(&evt.devt);
else
- return dwc3_ep_event_string(&evt.depevt);
+ return dwc3_ep_event_string(&evt.depevt, ep0state);
}
static inline const char *dwc3_ep_cmd_status_string(int status)
@@ -310,7 +344,6 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
}
}
-void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...);
#ifdef CONFIG_DEBUG_FS
extern void dwc3_debugfs_init(struct dwc3 *);
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 2f1fb7e7aa54..e27899bb5706 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/usb/otg.h>
#include <linux/usb/usb_phy_generic.h>
@@ -117,15 +116,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
if (!exynos)
return -ENOMEM;
- /*
- * Right now device-tree probed devices don't get dma_mask set.
- * Since shared usb code relies on it, set it here for now.
- * Once we move to full device tree support this will vanish off.
- */
- ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
platform_set_drvdata(pdev, exynos);
exynos->dev = dev;
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 6df0f5dad9a4..2b73339f286b 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -39,6 +39,27 @@
#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
+#define PCI_INTEL_BXT_DSM_UUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
+#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
+#define PCI_INTEL_BXT_STATE_D0 0
+#define PCI_INTEL_BXT_STATE_D3 3
+
+/**
+ * struct dwc3_pci - Driver private structure
+ * @dwc3: child dwc3 platform_device
+ * @pci: our link to PCI bus
+ * @uuid: _DSM UUID
+ * @has_dsm_for_pm: true for devices which need to run _DSM on runtime PM
+ */
+struct dwc3_pci {
+ struct platform_device *dwc3;
+ struct pci_dev *pci;
+
+ u8 uuid[16];
+
+ unsigned int has_dsm_for_pm:1;
+};
+
static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
@@ -48,8 +69,21 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = {
{ },
};
-static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
+static int dwc3_pci_quirks(struct dwc3_pci *dwc)
{
+ struct platform_device *dwc3 = dwc->dwc3;
+ struct pci_dev *pdev = dwc->pci;
+ int ret;
+
+ struct property_entry sysdev_property[] = {
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ { },
+ };
+
+ ret = platform_device_add_properties(dwc3, sysdev_property);
+ if (ret)
+ return ret;
+
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
pdev->device == PCI_DEVICE_ID_AMD_NL_USB) {
struct property_entry properties[] = {
@@ -89,6 +123,12 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
if (ret < 0)
return ret;
+ if (pdev->device == PCI_DEVICE_ID_INTEL_BXT ||
+ pdev->device == PCI_DEVICE_ID_INTEL_BXT_M) {
+ acpi_str_to_uuid(PCI_INTEL_BXT_DSM_UUID, dwc->uuid);
+ dwc->has_dsm_for_pm = true;
+ }
+
if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) {
struct gpio_desc *gpio;
@@ -139,8 +179,8 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
static int dwc3_pci_probe(struct pci_dev *pci,
const struct pci_device_id *id)
{
+ struct dwc3_pci *dwc;
struct resource res[2];
- struct platform_device *dwc3;
int ret;
struct device *dev = &pci->dev;
@@ -152,11 +192,13 @@ static int dwc3_pci_probe(struct pci_dev *pci,
pci_set_master(pci);
- dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
- if (!dwc3) {
- dev_err(dev, "couldn't allocate dwc3 device\n");
+ dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
+ if (!dwc)
+ return -ENOMEM;
+
+ dwc->dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
+ if (!dwc->dwc3)
return -ENOMEM;
- }
memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
@@ -169,20 +211,21 @@ static int dwc3_pci_probe(struct pci_dev *pci,
res[1].name = "dwc_usb3";
res[1].flags = IORESOURCE_IRQ;
- ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res));
+ ret = platform_device_add_resources(dwc->dwc3, res, ARRAY_SIZE(res));
if (ret) {
dev_err(dev, "couldn't add resources to dwc3 device\n");
return ret;
}
- dwc3->dev.parent = dev;
- ACPI_COMPANION_SET(&dwc3->dev, ACPI_COMPANION(dev));
+ dwc->pci = pci;
+ dwc->dwc3->dev.parent = dev;
+ ACPI_COMPANION_SET(&dwc->dwc3->dev, ACPI_COMPANION(dev));
- ret = dwc3_pci_quirks(pci, dwc3);
+ ret = dwc3_pci_quirks(dwc);
if (ret)
goto err;
- ret = platform_device_add(dwc3);
+ ret = platform_device_add(dwc->dwc3);
if (ret) {
dev_err(dev, "failed to register dwc3 device\n");
goto err;
@@ -190,21 +233,23 @@ static int dwc3_pci_probe(struct pci_dev *pci,
device_init_wakeup(dev, true);
device_set_run_wake(dev, true);
- pci_set_drvdata(pci, dwc3);
+ pci_set_drvdata(pci, dwc);
pm_runtime_put(dev);
return 0;
err:
- platform_device_put(dwc3);
+ platform_device_put(dwc->dwc3);
return ret;
}
static void dwc3_pci_remove(struct pci_dev *pci)
{
+ struct dwc3_pci *dwc = pci_get_drvdata(pci);
+
device_init_wakeup(&pci->dev, false);
pm_runtime_get(&pci->dev);
acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pci->dev));
- platform_device_unregister(pci_get_drvdata(pci));
+ platform_device_unregister(dwc->dwc3);
}
static const struct pci_device_id dwc3_pci_id_table[] = {
@@ -234,40 +279,75 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
};
MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
+#if defined(CONFIG_PM) || defined(CONFIG_PM_SLEEP)
+static int dwc3_pci_dsm(struct dwc3_pci *dwc, int param)
+{
+ union acpi_object *obj;
+ union acpi_object tmp;
+ union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(1, &tmp);
+
+ if (!dwc->has_dsm_for_pm)
+ return 0;
+
+ tmp.type = ACPI_TYPE_INTEGER;
+ tmp.integer.value = param;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(&dwc->pci->dev), dwc->uuid,
+ 1, PCI_INTEL_BXT_FUNC_PMU_PWR, &argv4);
+ if (!obj) {
+ dev_err(&dwc->pci->dev, "failed to evaluate _DSM\n");
+ return -EIO;
+ }
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+#endif /* CONFIG_PM || CONFIG_PM_SLEEP */
+
#ifdef CONFIG_PM
static int dwc3_pci_runtime_suspend(struct device *dev)
{
+ struct dwc3_pci *dwc = dev_get_drvdata(dev);
+
if (device_run_wake(dev))
- return 0;
+ return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D3);
return -EBUSY;
}
static int dwc3_pci_runtime_resume(struct device *dev)
{
- struct platform_device *dwc3 = dev_get_drvdata(dev);
+ struct dwc3_pci *dwc = dev_get_drvdata(dev);
+ struct platform_device *dwc3 = dwc->dwc3;
+ int ret;
+
+ ret = dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0);
+ if (ret)
+ return ret;
return pm_runtime_get(&dwc3->dev);
}
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
-static int dwc3_pci_pm_dummy(struct device *dev)
+static int dwc3_pci_suspend(struct device *dev)
{
- /*
- * There's nothing to do here. No, seriously. Everything is either taken
- * care either by PCI subsystem or dwc3/core.c, so we have nothing
- * missing here.
- *
- * So you'd think we didn't need this at all, but PCI subsystem will
- * bail out if we don't have a valid callback :-s
- */
- return 0;
+ struct dwc3_pci *dwc = dev_get_drvdata(dev);
+
+ return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D3);
+}
+
+static int dwc3_pci_resume(struct device *dev)
+{
+ struct dwc3_pci *dwc = dev_get_drvdata(dev);
+
+ return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0);
}
#endif /* CONFIG_PM_SLEEP */
static struct dev_pm_ops dwc3_pci_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_pm_dummy, dwc3_pci_pm_dummy)
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_suspend, dwc3_pci_resume)
SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_runtime_resume,
NULL)
};
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index aaaf256f71dd..dfbf464eb88c 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -219,7 +219,6 @@ static int st_dwc3_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- dma_set_coherent_mask(dev, dev->coherent_dma_mask);
dwc3_data->dev = dev;
dwc3_data->regmap = regmap;
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index fe79d771dee4..4878d187c7d4 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -39,22 +39,6 @@ static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
struct dwc3_ep *dep, struct dwc3_request *req);
-static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
-{
- switch (state) {
- case EP0_UNCONNECTED:
- return "Unconnected";
- case EP0_SETUP_PHASE:
- return "Setup Phase";
- case EP0_DATA_PHASE:
- return "Data Phase";
- case EP0_STATUS_PHASE:
- return "Status Phase";
- default:
- return "UNKNOWN";
- }
-}
-
static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
u32 len, u32 type, bool chain)
{
@@ -65,10 +49,8 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
int ret;
dep = dwc->eps[epnum];
- if (dep->flags & DWC3_EP_BUSY) {
- dwc3_trace(trace_dwc3_ep0, "%s still busy", dep->name);
+ if (dep->flags & DWC3_EP_BUSY)
return 0;
- }
trb = &dwc->ep0_trb[dep->trb_enqueue];
@@ -99,11 +81,8 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
trace_dwc3_prepare_trb(dep, trb);
ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, &params);
- if (ret < 0) {
- dwc3_trace(trace_dwc3_ep0, "%s STARTTRANSFER failed",
- dep->name);
+ if (ret < 0)
return ret;
- }
dep->flags |= DWC3_EP_BUSY;
dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
@@ -163,9 +142,6 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
if (dwc->ep0state == EP0_STATUS_PHASE)
__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
- else
- dwc3_trace(trace_dwc3_ep0,
- "too early for delayed status");
return 0;
}
@@ -229,9 +205,8 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
spin_lock_irqsave(&dwc->lock, flags);
if (!dep->endpoint.desc) {
- dwc3_trace(trace_dwc3_ep0,
- "trying to queue request %p to disabled %s",
- request, dep->name);
+ dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
+ dep->name);
ret = -ESHUTDOWN;
goto out;
}
@@ -242,11 +217,6 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
goto out;
}
- dwc3_trace(trace_dwc3_ep0,
- "queueing request %p to %s length %d state '%s'",
- request, dep->name, request->length,
- dwc3_ep0_state_string(dwc->ep0state));
-
ret = __dwc3_gadget_ep0_queue(dep, req);
out:
@@ -308,6 +278,8 @@ void dwc3_ep0_out_start(struct dwc3 *dwc)
{
int ret;
+ complete(&dwc->ep0_in_setup);
+
ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8,
DWC3_TRBCTL_CONTROL_SETUP, false);
WARN_ON(ret < 0);
@@ -395,126 +367,198 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc,
return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
}
-static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
+static int dwc3_ep0_handle_u1(struct dwc3 *dwc, enum usb_device_state state,
+ int set)
+{
+ u32 reg;
+
+ if (state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+ if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
+ (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
+ return -EINVAL;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (set)
+ reg |= DWC3_DCTL_INITU1ENA;
+ else
+ reg &= ~DWC3_DCTL_INITU1ENA;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ return 0;
+}
+
+static int dwc3_ep0_handle_u2(struct dwc3 *dwc, enum usb_device_state state,
+ int set)
+{
+ u32 reg;
+
+
+ if (state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+ if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
+ (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
+ return -EINVAL;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (set)
+ reg |= DWC3_DCTL_INITU2ENA;
+ else
+ reg &= ~DWC3_DCTL_INITU2ENA;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ return 0;
+}
+
+static int dwc3_ep0_handle_test(struct dwc3 *dwc, enum usb_device_state state,
+ u32 wIndex, int set)
+{
+ if ((wIndex & 0xff) != 0)
+ return -EINVAL;
+ if (!set)
+ return -EINVAL;
+
+ switch (wIndex >> 8) {
+ case TEST_J:
+ case TEST_K:
+ case TEST_SE0_NAK:
+ case TEST_PACKET:
+ case TEST_FORCE_EN:
+ dwc->test_mode_nr = wIndex >> 8;
+ dwc->test_mode = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dwc3_ep0_handle_device(struct dwc3 *dwc,
struct usb_ctrlrequest *ctrl, int set)
{
- struct dwc3_ep *dep;
- u32 recip;
+ enum usb_device_state state;
u32 wValue;
u32 wIndex;
- u32 reg;
- int ret;
- enum usb_device_state state;
+ int ret = 0;
wValue = le16_to_cpu(ctrl->wValue);
wIndex = le16_to_cpu(ctrl->wIndex);
- recip = ctrl->bRequestType & USB_RECIP_MASK;
state = dwc->gadget.state;
- switch (recip) {
- case USB_RECIP_DEVICE:
+ switch (wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ break;
+ /*
+ * 9.4.1 says only only for SS, in AddressState only for
+ * default control pipe
+ */
+ case USB_DEVICE_U1_ENABLE:
+ ret = dwc3_ep0_handle_u1(dwc, state, set);
+ break;
+ case USB_DEVICE_U2_ENABLE:
+ ret = dwc3_ep0_handle_u2(dwc, state, set);
+ break;
+ case USB_DEVICE_LTM_ENABLE:
+ ret = -EINVAL;
+ break;
+ case USB_DEVICE_TEST_MODE:
+ ret = dwc3_ep0_handle_test(dwc, state, wIndex, set);
+ break;
+ default:
+ ret = -EINVAL;
+ }
- switch (wValue) {
- case USB_DEVICE_REMOTE_WAKEUP:
- break;
+ return ret;
+}
+
+static int dwc3_ep0_handle_intf(struct dwc3 *dwc,
+ struct usb_ctrlrequest *ctrl, int set)
+{
+ enum usb_device_state state;
+ u32 wValue;
+ u32 wIndex;
+ int ret = 0;
+
+ wValue = le16_to_cpu(ctrl->wValue);
+ wIndex = le16_to_cpu(ctrl->wIndex);
+ state = dwc->gadget.state;
+
+ switch (wValue) {
+ case USB_INTRF_FUNC_SUSPEND:
/*
- * 9.4.1 says only only for SS, in AddressState only for
- * default control pipe
+ * REVISIT: Ideally we would enable some low power mode here,
+ * however it's unclear what we should be doing here.
+ *
+ * For now, we're not doing anything, just making sure we return
+ * 0 so USB Command Verifier tests pass without any errors.
*/
- case USB_DEVICE_U1_ENABLE:
- if (state != USB_STATE_CONFIGURED)
- return -EINVAL;
- if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
- (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
- return -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- if (set)
- reg |= DWC3_DCTL_INITU1ENA;
- else
- reg &= ~DWC3_DCTL_INITU1ENA;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
- break;
+ return ret;
+}
- case USB_DEVICE_U2_ENABLE:
- if (state != USB_STATE_CONFIGURED)
- return -EINVAL;
- if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
- (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
- return -EINVAL;
+static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc,
+ struct usb_ctrlrequest *ctrl, int set)
+{
+ struct dwc3_ep *dep;
+ enum usb_device_state state;
+ u32 wValue;
+ u32 wIndex;
+ int ret;
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- if (set)
- reg |= DWC3_DCTL_INITU2ENA;
- else
- reg &= ~DWC3_DCTL_INITU2ENA;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
- break;
+ wValue = le16_to_cpu(ctrl->wValue);
+ wIndex = le16_to_cpu(ctrl->wIndex);
+ state = dwc->gadget.state;
- case USB_DEVICE_LTM_ENABLE:
+ switch (wValue) {
+ case USB_ENDPOINT_HALT:
+ dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
+ if (!dep)
return -EINVAL;
- case USB_DEVICE_TEST_MODE:
- if ((wIndex & 0xff) != 0)
- return -EINVAL;
- if (!set)
- return -EINVAL;
-
- switch (wIndex >> 8) {
- case TEST_J:
- case TEST_K:
- case TEST_SE0_NAK:
- case TEST_PACKET:
- case TEST_FORCE_EN:
- dwc->test_mode_nr = wIndex >> 8;
- dwc->test_mode = true;
- break;
- default:
- return -EINVAL;
- }
+ if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
break;
- default:
+
+ ret = __dwc3_gadget_ep_set_halt(dep, set, true);
+ if (ret)
return -EINVAL;
- }
break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
+ struct usb_ctrlrequest *ctrl, int set)
+{
+ u32 recip;
+ int ret;
+ enum usb_device_state state;
+
+ recip = ctrl->bRequestType & USB_RECIP_MASK;
+ state = dwc->gadget.state;
+
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ ret = dwc3_ep0_handle_device(dwc, ctrl, set);
+ break;
case USB_RECIP_INTERFACE:
- switch (wValue) {
- case USB_INTRF_FUNC_SUSPEND:
- if (wIndex & USB_INTRF_FUNC_SUSPEND_LP)
- /* XXX enable Low power suspend */
- ;
- if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
- /* XXX enable remote wakeup */
- ;
- break;
- default:
- return -EINVAL;
- }
+ ret = dwc3_ep0_handle_intf(dwc, ctrl, set);
break;
-
case USB_RECIP_ENDPOINT:
- switch (wValue) {
- case USB_ENDPOINT_HALT:
- dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
- if (!dep)
- return -EINVAL;
- if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
- break;
- ret = __dwc3_gadget_ep_set_halt(dep, set, true);
- if (ret)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
+ ret = dwc3_ep0_handle_endpoint(dwc, ctrl, set);
break;
-
default:
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+ return ret;
}
static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
@@ -525,13 +569,12 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
addr = le16_to_cpu(ctrl->wValue);
if (addr > 127) {
- dwc3_trace(trace_dwc3_ep0, "invalid device address %d", addr);
+ dev_err(dwc->dev, "invalid device address %d\n", addr);
return -EINVAL;
}
if (state == USB_STATE_CONFIGURED) {
- dwc3_trace(trace_dwc3_ep0,
- "trying to set address when configured");
+ dev_err(dwc->dev, "can't SetAddress() from Configured State\n");
return -EINVAL;
}
@@ -716,35 +759,27 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
switch (ctrl->bRequest) {
case USB_REQ_GET_STATUS:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_GET_STATUS");
ret = dwc3_ep0_handle_status(dwc, ctrl);
break;
case USB_REQ_CLEAR_FEATURE:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_CLEAR_FEATURE");
ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
break;
case USB_REQ_SET_FEATURE:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_FEATURE");
ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
break;
case USB_REQ_SET_ADDRESS:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ADDRESS");
ret = dwc3_ep0_set_address(dwc, ctrl);
break;
case USB_REQ_SET_CONFIGURATION:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_CONFIGURATION");
ret = dwc3_ep0_set_config(dwc, ctrl);
break;
case USB_REQ_SET_SEL:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_SEL");
ret = dwc3_ep0_set_sel(dwc, ctrl);
break;
case USB_REQ_SET_ISOCH_DELAY:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
break;
default:
- dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
ret = dwc3_ep0_delegate_req(dwc, ctrl);
break;
}
@@ -820,9 +855,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
status = DWC3_TRB_SIZE_TRBSTS(trb->size);
if (status == DWC3_TRBSTS_SETUP_PENDING) {
dwc->setup_packet_pending = true;
-
- dwc3_trace(trace_dwc3_ep0, "Setup Pending received");
-
if (r)
dwc3_gadget_giveback(ep0, r, -ECONNRESET);
@@ -912,7 +944,7 @@ static void dwc3_ep0_complete_status(struct dwc3 *dwc,
ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr);
if (ret < 0) {
- dwc3_trace(trace_dwc3_ep0, "Invalid Test #%d",
+ dev_err(dwc->dev, "invalid test #%d\n",
dwc->test_mode_nr);
dwc3_ep0_stall_and_restart(dwc);
return;
@@ -920,10 +952,8 @@ static void dwc3_ep0_complete_status(struct dwc3 *dwc,
}
status = DWC3_TRB_SIZE_TRBSTS(trb->size);
- if (status == DWC3_TRBSTS_SETUP_PENDING) {
+ if (status == DWC3_TRBSTS_SETUP_PENDING)
dwc->setup_packet_pending = true;
- dwc3_trace(trace_dwc3_ep0, "Setup Pending received");
- }
dwc->ep0state = EP0_SETUP_PHASE;
dwc3_ep0_out_start(dwc);
@@ -940,17 +970,14 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
switch (dwc->ep0state) {
case EP0_SETUP_PHASE:
- dwc3_trace(trace_dwc3_ep0, "Setup Phase");
dwc3_ep0_inspect_setup(dwc, event);
break;
case EP0_DATA_PHASE:
- dwc3_trace(trace_dwc3_ep0, "Data Phase");
dwc3_ep0_complete_data(dwc, event);
break;
case EP0_STATUS_PHASE:
- dwc3_trace(trace_dwc3_ep0, "Status Phase");
dwc3_ep0_complete_status(dwc, event);
break;
default:
@@ -974,12 +1001,10 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
u32 transfer_size = 0;
u32 maxpacket;
- ret = usb_gadget_map_request(&dwc->gadget, &req->request,
- dep->number);
- if (ret) {
- dwc3_trace(trace_dwc3_ep0, "failed to map request");
+ ret = usb_gadget_map_request_by_dev(dwc->sysdev,
+ &req->request, dep->number);
+ if (ret)
return;
- }
maxpacket = dep->endpoint.maxpacket;
@@ -1002,12 +1027,10 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
dwc->ep0_bounce_addr, transfer_size,
DWC3_TRBCTL_CONTROL_DATA, false);
} else {
- ret = usb_gadget_map_request(&dwc->gadget, &req->request,
- dep->number);
- if (ret) {
- dwc3_trace(trace_dwc3_ep0, "failed to map request");
+ ret = usb_gadget_map_request_by_dev(dwc->sysdev,
+ &req->request, dep->number);
+ if (ret)
return;
- }
ret = dwc3_ep0_start_trans(dwc, dep->number, req->request.dma,
req->request.length, DWC3_TRBCTL_CONTROL_DATA,
@@ -1065,8 +1088,6 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
{
switch (event->status) {
case DEPEVT_STATUS_CONTROL_DATA:
- dwc3_trace(trace_dwc3_ep0, "Control Data");
-
/*
* We already have a DATA transfer in the controller's cache,
* if we receive a XferNotReady(DATA) we will ignore it, unless
@@ -1079,8 +1100,7 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
if (dwc->ep0_expect_in != event->endpoint_number) {
struct dwc3_ep *dep = dwc->eps[dwc->ep0_expect_in];
- dwc3_trace(trace_dwc3_ep0,
- "Wrong direction for Data phase");
+ dev_err(dwc->dev, "unexpected direction for Data Phase\n");
dwc3_ep0_end_control_data(dwc, dep);
dwc3_ep0_stall_and_restart(dwc);
return;
@@ -1092,13 +1112,10 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS)
return;
- dwc3_trace(trace_dwc3_ep0, "Control Status");
-
dwc->ep0state = EP0_STATUS_PHASE;
if (dwc->delayed_status) {
WARN_ON_ONCE(event->endpoint_number != 1);
- dwc3_trace(trace_dwc3_ep0, "Delayed Status");
return;
}
@@ -1109,10 +1126,6 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
void dwc3_ep0_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
- dwc3_trace(trace_dwc3_ep0, "%s: state '%s'",
- dwc3_ep_event_string(event),
- dwc3_ep0_state_string(dwc->ep0state));
-
switch (event->endpoint_event) {
case DWC3_DEPEVT_XFERCOMPLETE:
dwc3_ep0_xfer_complete(dwc, event);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 1dfa56a5f1c5..efddaf5d11d1 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -139,9 +139,6 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
udelay(5);
}
- dwc3_trace(trace_dwc3_gadget,
- "link state change request timed out");
-
return -ETIMEDOUT;
}
@@ -178,6 +175,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
req->started = false;
list_del(&req->list);
req->trb = NULL;
+ req->remaining = 0;
if (req->request.status == -EINPROGRESS)
req->request.status = status;
@@ -185,8 +183,8 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
if (dwc->ep0_bounced && dep->number == 0)
dwc->ep0_bounced = false;
else
- usb_gadget_unmap_request(&dwc->gadget, &req->request,
- req->direction);
+ usb_gadget_unmap_request_by_dev(dwc->sysdev,
+ &req->request, req->direction);
trace_dwc3_gadget_giveback(req);
@@ -216,7 +214,7 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
ret = -EINVAL;
break;
}
- } while (timeout--);
+ } while (--timeout);
if (!timeout) {
ret = -ETIMEDOUT;
@@ -233,6 +231,7 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
struct dwc3_gadget_ep_cmd_params *params)
{
+ const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
u32 timeout = 500;
u32 reg;
@@ -258,7 +257,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
}
}
- if (cmd == DWC3_DEPCMD_STARTTRANSFER) {
+ if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
int needs_wakeup;
needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
@@ -276,7 +275,28 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
- dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT);
+ /*
+ * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're
+ * not relying on XferNotReady, we can make use of a special "No
+ * Response Update Transfer" command where we should clear both CmdAct
+ * and CmdIOC bits.
+ *
+ * With this, we don't need to wait for command completion and can
+ * straight away issue further commands to the endpoint.
+ *
+ * NOTICE: We're making an assumption that control endpoints will never
+ * make use of Update Transfer command. This is a safe assumption
+ * because we can never have more than one request at a time with
+ * Control Endpoints. If anybody changes that assumption, this chunk
+ * needs to be updated accordingly.
+ */
+ if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER &&
+ !usb_endpoint_xfer_isoc(desc))
+ cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT);
+ else
+ cmd |= DWC3_DEPCMD_CMDACT;
+
+ dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
do {
reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
if (!(reg & DWC3_DEPCMD_CMDACT)) {
@@ -318,6 +338,20 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
+ if (ret == 0) {
+ switch (DWC3_DEPCMD_CMD(cmd)) {
+ case DWC3_DEPCMD_STARTTRANSFER:
+ dep->flags |= DWC3_EP_TRANSFER_STARTED;
+ break;
+ case DWC3_DEPCMD_ENDTRANSFER:
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ break;
+ default:
+ /* nothing */
+ break;
+ }
+ }
+
if (unlikely(susphy)) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg |= DWC3_GUSB2PHYCFG_SUSPHY;
@@ -365,7 +399,7 @@ static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
if (dep->trb_pool)
return 0;
- dep->trb_pool = dma_alloc_coherent(dwc->dev,
+ dep->trb_pool = dma_alloc_coherent(dwc->sysdev,
sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
&dep->trb_pool_dma, GFP_KERNEL);
if (!dep->trb_pool) {
@@ -381,7 +415,7 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
- dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
+ dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
dep->trb_pool, dep->trb_pool_dma);
dep->trb_pool = NULL;
@@ -454,16 +488,19 @@ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
}
static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
- const struct usb_endpoint_descriptor *desc,
- const struct usb_ss_ep_comp_descriptor *comp_desc,
bool modify, bool restore)
{
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+ const struct usb_endpoint_descriptor *desc;
struct dwc3_gadget_ep_cmd_params params;
if (dev_WARN_ONCE(dwc->dev, modify && restore,
"Can't modify and restore\n"))
return -EINVAL;
+ comp_desc = dep->endpoint.comp_desc;
+ desc = dep->endpoint.desc;
+
memset(&params, 0x00, sizeof(params));
params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
@@ -542,24 +579,21 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
* Caller should take care of locking
*/
static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
- const struct usb_endpoint_descriptor *desc,
- const struct usb_ss_ep_comp_descriptor *comp_desc,
bool modify, bool restore)
{
+ const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
+
u32 reg;
int ret;
- dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
-
if (!(dep->flags & DWC3_EP_ENABLED)) {
ret = dwc3_gadget_start_config(dwc, dep);
if (ret)
return ret;
}
- ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify,
- restore);
+ ret = dwc3_gadget_set_ep_config(dwc, dep, modify, restore);
if (ret)
return ret;
@@ -567,17 +601,18 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
struct dwc3_trb *trb_st_hw;
struct dwc3_trb *trb_link;
- dep->endpoint.desc = desc;
- dep->comp_desc = comp_desc;
dep->type = usb_endpoint_type(desc);
dep->flags |= DWC3_EP_ENABLED;
+ dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
reg |= DWC3_DALEPENA_EP(dep->number);
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+ init_waitqueue_head(&dep->wait_end_transfer);
+
if (usb_endpoint_xfer_control(desc))
- return 0;
+ goto out;
/* Initialize the TRB ring */
dep->trb_dequeue = 0;
@@ -595,6 +630,39 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
}
+ /*
+ * Issue StartTransfer here with no-op TRB so we can always rely on No
+ * Response Update Transfer command.
+ */
+ if (usb_endpoint_xfer_bulk(desc)) {
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3_trb *trb;
+ dma_addr_t trb_dma;
+ u32 cmd;
+
+ memset(&params, 0, sizeof(params));
+ trb = &dep->trb_pool[0];
+ trb_dma = dwc3_trb_dma_offset(dep, trb);
+
+ params.param0 = upper_32_bits(trb_dma);
+ params.param1 = lower_32_bits(trb_dma);
+
+ cmd = DWC3_DEPCMD_STARTTRANSFER;
+
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+ if (ret < 0)
+ return ret;
+
+ dep->flags |= DWC3_EP_BUSY;
+
+ dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
+ WARN_ON_ONCE(!dep->resource_index);
+ }
+
+
+out:
+ trace_dwc3_gadget_ep_enable(dep);
+
return 0;
}
@@ -632,7 +700,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
struct dwc3 *dwc = dep->dwc;
u32 reg;
- dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
+ trace_dwc3_gadget_ep_disable(dep);
dwc3_remove_requests(dwc, dep);
@@ -645,10 +713,14 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
dep->stream_capable = false;
- dep->endpoint.desc = NULL;
- dep->comp_desc = NULL;
dep->type = 0;
- dep->flags = 0;
+ dep->flags &= DWC3_EP_END_TRANSFER_PENDING;
+
+ /* Clear out the ep descriptors for non-ep0 */
+ if (dep->number > 1) {
+ dep->endpoint.comp_desc = NULL;
+ dep->endpoint.desc = NULL;
+ }
return 0;
}
@@ -695,7 +767,7 @@ static int dwc3_gadget_ep_enable(struct usb_ep *ep,
return 0;
spin_lock_irqsave(&dwc->lock, flags);
- ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
+ ret = __dwc3_gadget_ep_enable(dep, false, false);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -771,10 +843,9 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
unsigned length, unsigned chain, unsigned node)
{
struct dwc3_trb *trb;
-
- dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s",
- dep->name, req, (unsigned long long) dma,
- length, chain ? " chain" : "");
+ struct dwc3 *dwc = dep->dwc;
+ struct usb_gadget *gadget = &dwc->gadget;
+ enum usb_device_speed speed = gadget->speed;
trb = &dep->trb_pool[dep->trb_enqueue];
@@ -782,7 +853,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
dwc3_gadget_move_started_request(req);
req->trb = trb;
req->trb_dma = dwc3_trb_dma_offset(dep, trb);
- req->first_trb_index = dep->trb_enqueue;
dep->queued_requests++;
}
@@ -798,10 +868,16 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
break;
case USB_ENDPOINT_XFER_ISOC:
- if (!node)
+ if (!node) {
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
- else
+
+ if (speed == USB_SPEED_HIGH) {
+ struct usb_ep *ep = &dep->endpoint;
+ trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1);
+ }
+ } else {
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
+ }
/* always enable Interrupt on Missed ISOC */
trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
@@ -816,15 +892,21 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
* This is only possible with faulty memory because we
* checked it already :)
*/
- BUG();
+ dev_WARN(dwc->dev, "Unknown endpoint type %d\n",
+ usb_endpoint_type(dep->endpoint.desc));
}
/* always enable Continue on Short Packet */
- trb->ctrl |= DWC3_TRB_CTRL_CSP;
+ if (usb_endpoint_dir_out(dep->endpoint.desc)) {
+ trb->ctrl |= DWC3_TRB_CTRL_CSP;
+
+ if (req->request.short_not_ok)
+ trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
+ }
if ((!req->request.no_interrupt && !chain) ||
(dwc3_calc_trbs_left(dep) == 0))
- trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
+ trb->ctrl |= DWC3_TRB_CTRL_IOC;
if (chain)
trb->ctrl |= DWC3_TRB_CTRL_CHN;
@@ -859,6 +941,7 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
{
struct dwc3_trb *tmp;
+ struct dwc3 *dwc = dep->dwc;
u8 trbs_left;
/*
@@ -870,7 +953,8 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
*/
if (dep->trb_enqueue == dep->trb_dequeue) {
tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
- if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
+ if (dev_WARN_ONCE(dwc->dev, tmp->ctrl & DWC3_TRB_CTRL_HWO,
+ "%s No TRBS left\n", dep->name))
return 0;
return DWC3_TRB_NUM - 1;
@@ -941,6 +1025,24 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
if (!dwc3_calc_trbs_left(dep))
return;
+ /*
+ * We can get in a situation where there's a request in the started list
+ * but there weren't enough TRBs to fully kick it in the first time
+ * around, so it has been waiting for more TRBs to be freed up.
+ *
+ * In that case, we should check if we have a request with pending_sgs
+ * in the started list and prepare TRBs for that request first,
+ * otherwise we will prepare TRBs completely out of order and that will
+ * break things.
+ */
+ list_for_each_entry(req, &dep->started_list, list) {
+ if (req->num_pending_sgs > 0)
+ dwc3_prepare_one_trb_sg(dep, req);
+
+ if (!dwc3_calc_trbs_left(dep))
+ return;
+ }
+
list_for_each_entry_safe(req, n, &dep->pending_list, list) {
if (req->num_pending_sgs > 0)
dwc3_prepare_one_trb_sg(dep, req);
@@ -956,7 +1058,6 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_request *req;
- struct dwc3 *dwc = dep->dwc;
int starting;
int ret;
u32 cmd;
@@ -989,9 +1090,10 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
* here and stop, unmap, free and del each of the linked
* requests instead of what we do now.
*/
- usb_gadget_unmap_request(&dwc->gadget, &req->request,
- req->direction);
- list_del(&req->list);
+ if (req->trb)
+ memset(req->trb, 0, sizeof(struct dwc3_trb));
+ dep->queued_requests--;
+ dwc3_gadget_giveback(dep, req, ret);
return ret;
}
@@ -1005,14 +1107,21 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
return 0;
}
+static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ return DWC3_DSTS_SOFFN(reg);
+}
+
static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
struct dwc3_ep *dep, u32 cur_uf)
{
u32 uf;
if (list_empty(&dep->pending_list)) {
- dwc3_trace(trace_dwc3_gadget,
- "ISOC ep %s run out for requests",
+ dev_info(dwc->dev, "%s: ran out of requests\n",
dep->name);
dep->flags |= DWC3_EP_PENDING_REQUEST;
return;
@@ -1041,16 +1150,15 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
int ret;
if (!dep->endpoint.desc) {
- dwc3_trace(trace_dwc3_gadget,
- "trying to queue request %p to disabled %s",
- &req->request, dep->endpoint.name);
+ dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
+ dep->name);
return -ESHUTDOWN;
}
if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
&req->request, req->dep->name)) {
- dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'",
- &req->request, req->dep->name);
+ dev_err(dwc->dev, "%s: request %p belongs to '%s'\n",
+ dep->name, &req->request, req->dep->name);
return -EINVAL;
}
@@ -1063,8 +1171,8 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
trace_dwc3_ep_queue(req);
- ret = usb_gadget_map_request(&dwc->gadget, &req->request,
- dep->direction);
+ ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
+ dep->direction);
if (ret)
return ret;
@@ -1082,10 +1190,17 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
* errors which will force us issue EndTransfer command.
*/
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- if ((dep->flags & DWC3_EP_PENDING_REQUEST) &&
- list_empty(&dep->started_list)) {
- dwc3_stop_active_transfer(dwc, dep->number, true);
- dep->flags = DWC3_EP_ENABLED;
+ if ((dep->flags & DWC3_EP_PENDING_REQUEST)) {
+ if (dep->flags & DWC3_EP_TRANSFER_STARTED) {
+ dwc3_stop_active_transfer(dwc, dep->number, true);
+ dep->flags = DWC3_EP_ENABLED;
+ } else {
+ u32 cur_uf;
+
+ cur_uf = __dwc3_gadget_get_frame(dwc);
+ __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
+ dep->flags &= ~DWC3_EP_PENDING_REQUEST;
+ }
}
return 0;
}
@@ -1094,10 +1209,6 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
return 0;
ret = __dwc3_gadget_kick_transfer(dep, 0);
- if (ret && ret != -EBUSY)
- dwc3_trace(trace_dwc3_gadget,
- "%s: failed to kick transfers",
- dep->name);
if (ret == -EBUSY)
ret = 0;
@@ -1116,7 +1227,6 @@ static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
struct usb_request *request;
struct usb_ep *ep = &dep->endpoint;
- dwc3_trace(trace_dwc3_gadget, "queueing ZLP");
request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
if (!request)
return -ENOMEM;
@@ -1235,9 +1345,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
if (!protocol && ((dep->direction && transfer_in_flight) ||
(!dep->direction && started))) {
- dwc3_trace(trace_dwc3_gadget,
- "%s: pending request, cannot halt",
- dep->name);
return -EAGAIN;
}
@@ -1331,10 +1438,8 @@ static const struct usb_ep_ops dwc3_gadget_ep_ops = {
static int dwc3_gadget_get_frame(struct usb_gadget *g)
{
struct dwc3 *dwc = gadget_to_dwc(g);
- u32 reg;
- reg = dwc3_readl(dwc->regs, DWC3_DSTS);
- return DWC3_DSTS_SOFFN(reg);
+ return __dwc3_gadget_get_frame(dwc);
}
static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
@@ -1357,10 +1462,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
speed = reg & DWC3_DSTS_CONNECTSPD;
if ((speed == DWC3_DSTS_SUPERSPEED) ||
- (speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
- dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed");
+ (speed == DWC3_DSTS_SUPERSPEED_PLUS))
return 0;
- }
link_state = DWC3_DSTS_USBLNKST(reg);
@@ -1369,9 +1472,6 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
break;
default:
- dwc3_trace(trace_dwc3_gadget,
- "can't wakeup from '%s'",
- dwc3_gadget_link_string(link_state));
return -EINVAL;
}
@@ -1476,11 +1576,6 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
if (!timeout)
return -ETIMEDOUT;
- dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
- dwc->gadget_driver
- ? dwc->gadget_driver->function : "no-function",
- is_on ? "connect" : "disconnect");
-
return 0;
}
@@ -1492,6 +1587,21 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
is_on = !!is_on;
+ /*
+ * Per databook, when we want to stop the gadget, if a control transfer
+ * is still in process, complete it and get the core into setup phase.
+ */
+ if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) {
+ reinit_completion(&dwc->ep0_in_setup);
+
+ ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
+ msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
+ if (ret == 0) {
+ dev_err(dwc->dev, "timed out waiting for SETUP phase\n");
+ return -ETIMEDOUT;
+ }
+ }
+
spin_lock_irqsave(&dwc->lock, flags);
ret = dwc3_gadget_run_stop(dwc, is_on, false);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1509,11 +1619,13 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
DWC3_DEVTEN_CMDCMPLTEN |
DWC3_DEVTEN_ERRTICERREN |
DWC3_DEVTEN_WKUPEVTEN |
- DWC3_DEVTEN_ULSTCNGEN |
DWC3_DEVTEN_CONNECTDONEEN |
DWC3_DEVTEN_USBRSTEN |
DWC3_DEVTEN_DISCONNEVTEN);
+ if (dwc->revision < DWC3_REVISION_250A)
+ reg |= DWC3_DEVTEN_ULSTCNGEN;
+
dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
}
@@ -1573,6 +1685,17 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
int ret = 0;
u32 reg;
+ /*
+ * Use IMOD if enabled via dwc->imod_interval. Otherwise, if
+ * the core supports IMOD, disable it.
+ */
+ if (dwc->imod_interval) {
+ dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
+ } else if (dwc3_has_imod(dwc)) {
+ dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0);
+ }
+
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_SPEED_MASK);
@@ -1633,16 +1756,14 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
- false);
+ ret = __dwc3_gadget_ep_enable(dep, false, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err0;
}
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
- false);
+ ret = __dwc3_gadget_ep_enable(dep, false, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err1;
@@ -1708,9 +1829,6 @@ err0:
static void __dwc3_gadget_stop(struct dwc3 *dwc)
{
- if (pm_runtime_suspended(dwc->dev))
- return;
-
dwc3_gadget_disable_irq(dwc);
__dwc3_gadget_ep_disable(dwc->eps[0]);
__dwc3_gadget_ep_disable(dwc->eps[1]);
@@ -1720,9 +1838,30 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
+ int epnum;
spin_lock_irqsave(&dwc->lock, flags);
+
+ if (pm_runtime_suspended(dwc->dev))
+ goto out;
+
__dwc3_gadget_stop(dwc);
+
+ for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ if (!dep)
+ continue;
+
+ if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
+ continue;
+
+ wait_event_lock_irq(dep->wait_end_transfer,
+ !(dep->flags & DWC3_EP_END_TRANSFER_PENDING),
+ dwc->lock);
+ }
+
+out:
dwc->gadget_driver = NULL;
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1765,9 +1904,13 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
(epnum & 1) ? "in" : "out");
dep->endpoint.name = dep->name;
- spin_lock_init(&dep->lock);
- dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
+ if (!(dep->number > 1)) {
+ dep->endpoint.desc = &dwc3_gadget_ep0_desc;
+ dep->endpoint.comp_desc = NULL;
+ }
+
+ spin_lock_init(&dep->lock);
if (epnum == 0 || epnum == 1) {
usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
@@ -1815,15 +1958,13 @@ static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
if (ret < 0) {
- dwc3_trace(trace_dwc3_gadget,
- "failed to allocate OUT endpoints");
+ dev_err(dwc->dev, "failed to initialize OUT endpoints\n");
return ret;
}
ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
if (ret < 0) {
- dwc3_trace(trace_dwc3_gadget,
- "failed to allocate IN endpoints");
+ dev_err(dwc->dev, "failed to initialize IN endpoints\n");
return ret;
}
@@ -1892,15 +2033,12 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
return 1;
count = trb->size & DWC3_TRB_SIZE_MASK;
- req->request.actual += count;
+ req->remaining += count;
if (dep->direction) {
if (count) {
trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
- dwc3_trace(trace_dwc3_gadget,
- "%s: incomplete IN transfer",
- dep->name);
/*
* If missed isoc occurred and there is
* no request queued then issue END
@@ -1946,11 +2084,10 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
struct dwc3_request *req, *n;
struct dwc3_trb *trb;
bool ioc = false;
- int ret;
+ int ret = 0;
list_for_each_entry_safe(req, n, &dep->started_list, list) {
unsigned length;
- unsigned actual;
int chain;
length = req->request.length;
@@ -1964,6 +2101,9 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
for_each_sg(sg, s, pending, i) {
trb = &dep->trb_pool[dep->trb_dequeue];
+ if (trb->ctrl & DWC3_TRB_CTRL_HWO)
+ break;
+
req->sg = sg_next(s);
req->num_pending_sgs--;
@@ -1978,17 +2118,9 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
event, status, chain);
}
- /*
- * We assume here we will always receive the entire data block
- * which we should receive. Meaning, if we program RX to
- * receive 4K but we receive only 2K, we assume that's all we
- * should receive and we simply bounce the request back to the
- * gadget driver for further processing.
- */
- actual = length - req->request.actual;
- req->request.actual = actual;
+ req->request.actual = length - req->remaining;
- if (ret && chain && (actual < length) && req->num_pending_sgs)
+ if ((req->request.actual < length) && req->num_pending_sgs)
return __dwc3_gadget_kick_transfer(dep, 0);
dwc3_gadget_giveback(dep, req, status);
@@ -2096,10 +2228,12 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
{
struct dwc3_ep *dep;
u8 epnum = event->endpoint_number;
+ u8 cmd;
dep = dwc->eps[epnum];
- if (!(dep->flags & DWC3_EP_ENABLED))
+ if (!(dep->flags & DWC3_EP_ENABLED) &&
+ !(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
return;
if (epnum == 0 || epnum == 1) {
@@ -2112,9 +2246,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep->resource_index = 0;
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- dwc3_trace(trace_dwc3_gadget,
- "%s is an Isochronous endpoint",
- dep->name);
+ dev_err(dwc->dev, "XferComplete for Isochronous endpoint\n");
return;
}
@@ -2127,22 +2259,11 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
dwc3_gadget_start_isoc(dwc, dep, event);
} else {
- int active;
int ret;
- active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
-
- dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
- dep->name, active ? "Transfer Active"
- : "Transfer Not Active");
-
ret = __dwc3_gadget_kick_transfer(dep, 0);
if (!ret || ret == -EBUSY)
return;
-
- dwc3_trace(trace_dwc3_gadget,
- "%s: failed to kick transfers",
- dep->name);
}
break;
@@ -2152,26 +2273,16 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep->name);
return;
}
+ break;
+ case DWC3_DEPEVT_EPCMDCMPLT:
+ cmd = DEPEVT_PARAMETER_CMD(event->parameters);
- switch (event->status) {
- case DEPEVT_STREAMEVT_FOUND:
- dwc3_trace(trace_dwc3_gadget,
- "Stream %d found and started",
- event->parameters);
-
- break;
- case DEPEVT_STREAMEVT_NOTFOUND:
- /* FALLTHROUGH */
- default:
- dwc3_trace(trace_dwc3_gadget,
- "unable to find suitable stream");
+ if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
+ dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
+ wake_up(&dep->wait_end_transfer);
}
break;
case DWC3_DEPEVT_RXTXFIFOEVT:
- dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name);
- break;
- case DWC3_DEPEVT_EPCMDCMPLT:
- dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
break;
}
}
@@ -2224,7 +2335,8 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
dep = dwc->eps[epnum];
- if (!dep->resource_index)
+ if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
+ !dep->resource_index)
return;
/*
@@ -2268,25 +2380,9 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
dep->resource_index = 0;
dep->flags &= ~DWC3_EP_BUSY;
- if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A)
+ if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) {
+ dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
udelay(100);
-}
-
-static void dwc3_stop_active_transfers(struct dwc3 *dwc)
-{
- u32 epnum;
-
- for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
- struct dwc3_ep *dep;
-
- dep = dwc->eps[epnum];
- if (!dep)
- continue;
-
- if (!(dep->flags & DWC3_EP_ENABLED))
- continue;
-
- dwc3_remove_requests(dwc, dep);
}
}
@@ -2375,8 +2471,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
reg &= ~DWC3_DCTL_TSTCTRL_MASK;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
dwc->test_mode = false;
-
- dwc3_stop_active_transfers(dwc);
dwc3_clear_stall_all_ep(dwc);
/* Reset device address to zero */
@@ -2385,32 +2479,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
}
-static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
-{
- u32 reg;
- u32 usb30_clock = DWC3_GCTL_CLK_BUS;
-
- /*
- * We change the clock only at SS but I dunno why I would want to do
- * this. Maybe it becomes part of the power saving plan.
- */
-
- if ((speed != DWC3_DSTS_SUPERSPEED) &&
- (speed != DWC3_DSTS_SUPERSPEED_PLUS))
- return;
-
- /*
- * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
- * each time on Connect Done.
- */
- if (!usb30_clock)
- return;
-
- reg = dwc3_readl(dwc->regs, DWC3_GCTL);
- reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
- dwc3_writel(dwc->regs, DWC3_GCTL, reg);
-}
-
static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
{
struct dwc3_ep *dep;
@@ -2422,7 +2490,14 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
speed = reg & DWC3_DSTS_CONNECTSPD;
dwc->speed = speed;
- dwc3_update_ram_clk_sel(dwc, speed);
+ /*
+ * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
+ * each time on Connect Done.
+ *
+ * Currently we always use the reset value. If any platform
+ * wants to set this to a different value, we need to add a
+ * setting and update GCTL.RAMCLKSEL here.
+ */
switch (speed) {
case DWC3_DSTS_SUPERSPEED_PLUS:
@@ -2491,7 +2566,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
*/
WARN_ONCE(dwc->revision < DWC3_REVISION_240A
&& dwc->has_lpm_erratum,
- "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
+ "LPM Erratum not available on dwc3 revisions < 2.40a\n");
if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
@@ -2504,16 +2579,14 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
}
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
- false);
+ ret = __dwc3_gadget_ep_enable(dep, true, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
}
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
- false);
+ ret = __dwc3_gadget_ep_enable(dep, true, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
@@ -2570,8 +2643,6 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
(pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
(next == DWC3_LINK_STATE_RESUME)) {
- dwc3_trace(trace_dwc3_gadget,
- "ignoring transition U3 -> Resume");
return;
}
}
@@ -2705,11 +2776,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
break;
case DWC3_DEVICE_EVENT_EOPF:
/* It changed to be suspend event for version 2.30a and above */
- if (dwc->revision < DWC3_REVISION_230A) {
- dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
- } else {
- dwc3_trace(trace_dwc3_gadget, "U3/L1-L2 Suspend Event");
-
+ if (dwc->revision >= DWC3_REVISION_230A) {
/*
* Ignore suspend event until the gadget enters into
* USB_STATE_CONFIGURED state.
@@ -2720,16 +2787,9 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
}
break;
case DWC3_DEVICE_EVENT_SOF:
- dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
- break;
case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
- dwc3_trace(trace_dwc3_gadget, "Erratic Error");
- break;
case DWC3_DEVICE_EVENT_CMD_CMPL:
- dwc3_trace(trace_dwc3_gadget, "Command Complete");
- break;
case DWC3_DEVICE_EVENT_OVERFLOW:
- dwc3_trace(trace_dwc3_gadget, "Overflow");
break;
default:
dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
@@ -2739,7 +2799,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
static void dwc3_process_event_entry(struct dwc3 *dwc,
const union dwc3_event *event)
{
- trace_dwc3_event(event->raw);
+ trace_dwc3_event(event->raw, dwc);
/* Endpoint IRQ, handle it and return early */
if (event->type.is_devspec == 0) {
@@ -2772,7 +2832,7 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
while (left > 0) {
union dwc3_event event;
- event.raw = *(u32 *) (evt->buf + evt->lpos);
+ event.raw = *(u32 *) (evt->cache + evt->lpos);
dwc3_process_event_entry(dwc, &event);
@@ -2785,10 +2845,8 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
* boundary so I worry about that once we try to handle
* that.
*/
- evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
+ evt->lpos = (evt->lpos + 4) % evt->length;
left -= 4;
-
- dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4);
}
evt->count = 0;
@@ -2800,6 +2858,11 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
reg &= ~DWC3_GEVNTSIZ_INTMASK;
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
+ if (dwc->imod_interval) {
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
+ dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
+ }
+
return ret;
}
@@ -2820,6 +2883,7 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
{
struct dwc3 *dwc = evt->dwc;
+ u32 amount;
u32 count;
u32 reg;
@@ -2843,6 +2907,14 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
reg |= DWC3_GEVNTSIZ_INTMASK;
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
+ amount = min(count, evt->length - evt->lpos);
+ memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount);
+
+ if (amount < count)
+ memcpy(evt->cache, evt->buf, count - amount);
+
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
+
return IRQ_WAKE_THREAD;
}
@@ -2853,6 +2925,39 @@ static irqreturn_t dwc3_interrupt(int irq, void *_evt)
return dwc3_check_event_buf(evt);
}
+static int dwc3_gadget_get_irq(struct dwc3 *dwc)
+{
+ struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+ int irq;
+
+ irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq(dwc3_pdev, 0);
+ if (irq > 0)
+ goto out;
+
+ if (irq != -EPROBE_DEFER)
+ dev_err(dwc->dev, "missing peripheral IRQ\n");
+
+ if (!irq)
+ irq = -EINVAL;
+
+out:
+ return irq;
+}
+
/**
* dwc3_gadget_init - Initializes gadget related registers
* @dwc: pointer to our controller context structure
@@ -2861,35 +2966,18 @@ static irqreturn_t dwc3_interrupt(int irq, void *_evt)
*/
int dwc3_gadget_init(struct dwc3 *dwc)
{
- int ret, irq;
- struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+ int ret;
+ int irq;
- irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
- if (irq == -EPROBE_DEFER)
- return irq;
-
- if (irq <= 0) {
- irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
- if (irq == -EPROBE_DEFER)
- return irq;
-
- if (irq <= 0) {
- irq = platform_get_irq(dwc3_pdev, 0);
- if (irq <= 0) {
- if (irq != -EPROBE_DEFER) {
- dev_err(dwc->dev,
- "missing peripheral IRQ\n");
- }
- if (!irq)
- irq = -EINVAL;
- return irq;
- }
- }
+ irq = dwc3_gadget_get_irq(dwc);
+ if (irq < 0) {
+ ret = irq;
+ goto err0;
}
dwc->irq_gadget = irq;
- dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+ dwc->ctrl_req = dma_alloc_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
&dwc->ctrl_req_addr, GFP_KERNEL);
if (!dwc->ctrl_req) {
dev_err(dwc->dev, "failed to allocate ctrl request\n");
@@ -2897,8 +2985,9 @@ int dwc3_gadget_init(struct dwc3 *dwc)
goto err0;
}
- dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
- &dwc->ep0_trb_addr, GFP_KERNEL);
+ dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev,
+ sizeof(*dwc->ep0_trb) * 2,
+ &dwc->ep0_trb_addr, GFP_KERNEL);
if (!dwc->ep0_trb) {
dev_err(dwc->dev, "failed to allocate ep0 trb\n");
ret = -ENOMEM;
@@ -2911,7 +3000,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
goto err2;
}
- dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
+ dwc->ep0_bounce = dma_alloc_coherent(dwc->sysdev,
DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
GFP_KERNEL);
if (!dwc->ep0_bounce) {
@@ -2926,6 +3015,8 @@ int dwc3_gadget_init(struct dwc3 *dwc)
goto err4;
}
+ init_completion(&dwc->ep0_in_setup);
+
dwc->gadget.ops = &dwc3_gadget_ops;
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->gadget.sg_supported = true;
@@ -2949,8 +3040,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
* composite.c that we are USB 2.0 + LPM ECN.
*/
if (dwc->revision < DWC3_REVISION_220A)
- dwc3_trace(trace_dwc3_gadget,
- "Changing max_speed on rev %08x",
+ dev_info(dwc->dev, "changing max_speed on rev %08x\n",
dwc->revision);
dwc->gadget.max_speed = dwc->maximum_speed;
@@ -2983,18 +3073,18 @@ err5:
err4:
dwc3_gadget_free_endpoints(dwc);
- dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
+ dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE,
dwc->ep0_bounce, dwc->ep0_bounce_addr);
err3:
kfree(dwc->setup_buf);
err2:
- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
+ dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
err1:
- dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+ dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
dwc->ctrl_req, dwc->ctrl_req_addr);
err0:
@@ -3009,16 +3099,16 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
dwc3_gadget_free_endpoints(dwc);
- dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
+ dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE,
dwc->ep0_bounce, dwc->ep0_bounce_addr);
kfree(dwc->setup_buf);
kfree(dwc->zlp_buf);
- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
+ dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
- dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+ dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
dwc->ctrl_req, dwc->ctrl_req_addr);
}
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index e4a1d974a5ae..3129bcf74d7d 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -62,10 +62,7 @@ struct dwc3;
static inline struct dwc3_request *next_request(struct list_head *list)
{
- if (list_empty(list))
- return NULL;
-
- return list_first_entry(list, struct dwc3_request, list);
+ return list_first_entry_or_null(list, struct dwc3_request, list);
}
static inline void dwc3_gadget_move_started_request(struct dwc3_request *req)
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index f6533c68fed1..487f0ff6ae25 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -19,6 +19,39 @@
#include "core.h"
+static int dwc3_host_get_irq(struct dwc3 *dwc)
+{
+ struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+ int irq;
+
+ irq = platform_get_irq_byname(dwc3_pdev, "host");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq(dwc3_pdev, 0);
+ if (irq > 0)
+ goto out;
+
+ if (irq != -EPROBE_DEFER)
+ dev_err(dwc->dev, "missing host IRQ\n");
+
+ if (!irq)
+ irq = -EINVAL;
+
+out:
+ return irq;
+}
+
int dwc3_host_init(struct dwc3 *dwc)
{
struct property_entry props[2];
@@ -27,39 +60,18 @@ int dwc3_host_init(struct dwc3 *dwc)
struct resource *res;
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
- irq = platform_get_irq_byname(dwc3_pdev, "host");
- if (irq == -EPROBE_DEFER)
+ irq = dwc3_host_get_irq(dwc);
+ if (irq < 0)
return irq;
- if (irq <= 0) {
- irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
- if (irq == -EPROBE_DEFER)
- return irq;
-
- if (irq <= 0) {
- irq = platform_get_irq(dwc3_pdev, 0);
- if (irq <= 0) {
- if (irq != -EPROBE_DEFER) {
- dev_err(dwc->dev,
- "missing host IRQ\n");
- }
- if (!irq)
- irq = -EINVAL;
- return irq;
- } else {
- res = platform_get_resource(dwc3_pdev,
- IORESOURCE_IRQ, 0);
- }
- } else {
- res = platform_get_resource_byname(dwc3_pdev,
- IORESOURCE_IRQ,
- "dwc_usb3");
- }
-
- } else {
+ res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ, "host");
+ if (!res)
res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ,
- "host");
- }
+ "dwc_usb3");
+ if (!res)
+ res = platform_get_resource(dwc3_pdev, IORESOURCE_IRQ, 0);
+ if (!res)
+ return -ENOMEM;
dwc->xhci_resources[1].start = irq;
dwc->xhci_resources[1].end = irq;
@@ -72,11 +84,7 @@ int dwc3_host_init(struct dwc3 *dwc)
return -ENOMEM;
}
- dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask);
-
xhci->dev.parent = dwc->dev;
- xhci->dev.dma_mask = dwc->dev->dma_mask;
- xhci->dev.dma_parms = dwc->dev->dma_parms;
dwc->xhci = xhci;
@@ -99,9 +107,9 @@ int dwc3_host_init(struct dwc3 *dwc)
}
phy_create_lookup(dwc->usb2_generic_phy, "usb2-phy",
- dev_name(&xhci->dev));
+ dev_name(dwc->dev));
phy_create_lookup(dwc->usb3_generic_phy, "usb3-phy",
- dev_name(&xhci->dev));
+ dev_name(dwc->dev));
ret = platform_device_add(xhci);
if (ret) {
@@ -112,9 +120,9 @@ int dwc3_host_init(struct dwc3 *dwc)
return 0;
err2:
phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy",
- dev_name(&xhci->dev));
+ dev_name(dwc->dev));
phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
- dev_name(&xhci->dev));
+ dev_name(dwc->dev));
err1:
platform_device_put(xhci);
return ret;
@@ -123,8 +131,8 @@ err1:
void dwc3_host_exit(struct dwc3 *dwc)
{
phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy",
- dev_name(&dwc->xhci->dev));
+ dev_name(dwc->dev));
phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
- dev_name(&dwc->xhci->dev));
+ dev_name(dwc->dev));
platform_device_unregister(dwc->xhci);
}
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
index a06f9a8fecc7..c69b06696824 100644
--- a/drivers/usb/dwc3/io.h
+++ b/drivers/usb/dwc3/io.h
@@ -40,8 +40,7 @@ static inline u32 dwc3_readl(void __iomem *base, u32 offset)
* documentation, so we revert it back to the proper addresses, the
* same way they are described on SNPS documentation
*/
- dwc3_trace(trace_dwc3_readl, "addr %p value %08x",
- base - DWC3_GLOBALS_REGS_START + offset, value);
+ trace_dwc3_readl(base - DWC3_GLOBALS_REGS_START, offset, value);
return value;
}
@@ -60,8 +59,7 @@ static inline void dwc3_writel(void __iomem *base, u32 offset, u32 value)
* documentation, so we revert it back to the proper addresses, the
* same way they are described on SNPS documentation
*/
- dwc3_trace(trace_dwc3_writel, "addr %p value %08x",
- base - DWC3_GLOBALS_REGS_START + offset, value);
+ trace_dwc3_writel(base - DWC3_GLOBALS_REGS_START, offset, value);
}
#endif /* __DRIVERS_USB_DWC3_IO_H */
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index d24cefd191b5..2b124f94d858 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -37,47 +37,66 @@ DECLARE_EVENT_CLASS(dwc3_log_msg,
TP_printk("%s", __get_str(msg))
);
-DEFINE_EVENT(dwc3_log_msg, dwc3_readl,
+DEFINE_EVENT(dwc3_log_msg, dwc3_gadget,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf)
);
-DEFINE_EVENT(dwc3_log_msg, dwc3_writel,
+DEFINE_EVENT(dwc3_log_msg, dwc3_core,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf)
);
-DEFINE_EVENT(dwc3_log_msg, dwc3_gadget,
+DEFINE_EVENT(dwc3_log_msg, dwc3_ep0,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf)
);
-DEFINE_EVENT(dwc3_log_msg, dwc3_core,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
+DECLARE_EVENT_CLASS(dwc3_log_io,
+ TP_PROTO(void *base, u32 offset, u32 value),
+ TP_ARGS(base, offset, value),
+ TP_STRUCT__entry(
+ __field(void *, base)
+ __field(u32, offset)
+ __field(u32, value)
+ ),
+ TP_fast_assign(
+ __entry->base = base;
+ __entry->offset = offset;
+ __entry->value = value;
+ ),
+ TP_printk("addr %p value %08x", __entry->base + __entry->offset,
+ __entry->value)
);
-DEFINE_EVENT(dwc3_log_msg, dwc3_ep0,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
+DEFINE_EVENT(dwc3_log_io, dwc3_readl,
+ TP_PROTO(void *base, u32 offset, u32 value),
+ TP_ARGS(base, offset, value)
+);
+
+DEFINE_EVENT(dwc3_log_io, dwc3_writel,
+ TP_PROTO(void *base, u32 offset, u32 value),
+ TP_ARGS(base, offset, value)
);
DECLARE_EVENT_CLASS(dwc3_log_event,
- TP_PROTO(u32 event),
- TP_ARGS(event),
+ TP_PROTO(u32 event, struct dwc3 *dwc),
+ TP_ARGS(event, dwc),
TP_STRUCT__entry(
__field(u32, event)
+ __field(u32, ep0state)
),
TP_fast_assign(
__entry->event = event;
+ __entry->ep0state = dwc->ep0state;
),
TP_printk("event (%08x): %s", __entry->event,
- dwc3_decode_event(__entry->event))
+ dwc3_decode_event(__entry->event, __entry->ep0state))
);
DEFINE_EVENT(dwc3_log_event, dwc3_event,
- TP_PROTO(u32 event),
- TP_ARGS(event)
+ TP_PROTO(u32 event, struct dwc3 *dwc),
+ TP_ARGS(event, dwc)
);
DECLARE_EVENT_CLASS(dwc3_log_ctrl,
@@ -237,6 +256,7 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
__field(u32, bph)
__field(u32, size)
__field(u32, ctrl)
+ __field(u32, type)
),
TP_fast_assign(
snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
@@ -247,11 +267,31 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
__entry->bph = trb->bph;
__entry->size = trb->size;
__entry->ctrl = trb->ctrl;
+ __entry->type = usb_endpoint_type(dep->endpoint.desc);
),
- TP_printk("%s: %d/%d trb %p buf %08x%08x size %d ctrl %08x (%c%c%c%c:%c%c:%s)",
+ TP_printk("%s: %d/%d trb %p buf %08x%08x size %s%d ctrl %08x (%c%c%c%c:%c%c:%s)",
__get_str(name), __entry->queued, __entry->allocated,
__entry->trb, __entry->bph, __entry->bpl,
- __entry->size, __entry->ctrl,
+ ({char *s;
+ int pcm = ((__entry->size >> 24) & 3) + 1;
+ switch (__entry->type) {
+ case USB_ENDPOINT_XFER_INT:
+ case USB_ENDPOINT_XFER_ISOC:
+ switch (pcm) {
+ case 1:
+ s = "1x ";
+ break;
+ case 2:
+ s = "2x ";
+ break;
+ case 3:
+ s = "3x ";
+ break;
+ }
+ default:
+ s = "";
+ } s; }),
+ DWC3_TRB_SIZE_LENGTH(__entry->size), __entry->ctrl,
__entry->ctrl & DWC3_TRB_CTRL_HWO ? 'H' : 'h',
__entry->ctrl & DWC3_TRB_CTRL_LST ? 'L' : 'l',
__entry->ctrl & DWC3_TRB_CTRL_CHN ? 'C' : 'c',
@@ -301,6 +341,57 @@ DEFINE_EVENT(dwc3_log_trb, dwc3_complete_trb,
TP_ARGS(dep, trb)
);
+DECLARE_EVENT_CLASS(dwc3_log_ep,
+ TP_PROTO(struct dwc3_ep *dep),
+ TP_ARGS(dep),
+ TP_STRUCT__entry(
+ __dynamic_array(char, name, DWC3_MSG_MAX)
+ __field(unsigned, maxpacket)
+ __field(unsigned, maxpacket_limit)
+ __field(unsigned, max_streams)
+ __field(unsigned, maxburst)
+ __field(unsigned, flags)
+ __field(unsigned, direction)
+ __field(u8, trb_enqueue)
+ __field(u8, trb_dequeue)
+ ),
+ TP_fast_assign(
+ snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
+ __entry->maxpacket = dep->endpoint.maxpacket;
+ __entry->maxpacket_limit = dep->endpoint.maxpacket_limit;
+ __entry->max_streams = dep->endpoint.max_streams;
+ __entry->maxburst = dep->endpoint.maxburst;
+ __entry->flags = dep->flags;
+ __entry->direction = dep->direction;
+ __entry->trb_enqueue = dep->trb_enqueue;
+ __entry->trb_dequeue = dep->trb_dequeue;
+ ),
+ TP_printk("%s: mps %d/%d streams %d burst %d ring %d/%d flags %c:%c%c%c%c%c:%c:%c",
+ __get_str(name), __entry->maxpacket,
+ __entry->maxpacket_limit, __entry->max_streams,
+ __entry->maxburst, __entry->trb_enqueue,
+ __entry->trb_dequeue,
+ __entry->flags & DWC3_EP_ENABLED ? 'E' : 'e',
+ __entry->flags & DWC3_EP_STALL ? 'S' : 's',
+ __entry->flags & DWC3_EP_WEDGE ? 'W' : 'w',
+ __entry->flags & DWC3_EP_BUSY ? 'B' : 'b',
+ __entry->flags & DWC3_EP_PENDING_REQUEST ? 'P' : 'p',
+ __entry->flags & DWC3_EP_MISSED_ISOC ? 'M' : 'm',
+ __entry->flags & DWC3_EP_END_TRANSFER_PENDING ? 'E' : 'e',
+ __entry->direction ? '<' : '>'
+ )
+);
+
+DEFINE_EVENT(dwc3_log_ep, dwc3_gadget_ep_enable,
+ TP_PROTO(struct dwc3_ep *dep),
+ TP_ARGS(dep)
+);
+
+DEFINE_EVENT(dwc3_log_ep, dwc3_gadget_ep_disable,
+ TP_PROTO(struct dwc3_ep *dep),
+ TP_ARGS(dep)
+);
+
#endif /* __DWC3_TRACE_H */
/* this part has to be here */
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 32176f779861..41ab61f9b6e0 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -201,7 +201,12 @@ ep_found:
_ep->desc = chosen_desc;
_ep->comp_desc = NULL;
_ep->maxburst = 0;
- _ep->mult = 0;
+ _ep->mult = 1;
+
+ if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) ||
+ usb_endpoint_xfer_int(_ep->desc)))
+ _ep->mult = usb_endpoint_maxp_mult(_ep->desc);
+
if (!want_comp_desc)
return 0;
@@ -218,7 +223,7 @@ ep_found:
switch (usb_endpoint_type(_ep->desc)) {
case USB_ENDPOINT_XFER_ISOC:
/* mult: bits 1:0 of bmAttributes */
- _ep->mult = comp_desc->bmAttributes & 0x3;
+ _ep->mult = (comp_desc->bmAttributes & 0x3) + 1;
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
_ep->maxburst = comp_desc->bMaxBurst + 1;
@@ -2382,18 +2387,8 @@ EXPORT_SYMBOL_GPL(usb_composite_setup_continue);
static char *composite_default_mfr(struct usb_gadget *gadget)
{
- char *mfr;
- int len;
-
- len = snprintf(NULL, 0, "%s %s with %s", init_utsname()->sysname,
- init_utsname()->release, gadget->name);
- len++;
- mfr = kmalloc(len, GFP_KERNEL);
- if (!mfr)
- return NULL;
- snprintf(mfr, len, "%s %s with %s", init_utsname()->sysname,
- init_utsname()->release, gadget->name);
- return mfr;
+ return kasprintf(GFP_KERNEL, "%s %s with %s", init_utsname()->sysname,
+ init_utsname()->release, gadget->name);
}
void usb_composite_overwrite_options(struct usb_composite_dev *cdev,
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 3984787f8e97..78c44979dde3 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -408,7 +408,7 @@ out:
return ret;
}
-static int config_usb_cfg_unlink(
+static void config_usb_cfg_unlink(
struct config_item *usb_cfg_ci,
struct config_item *usb_func_ci)
{
@@ -437,12 +437,11 @@ static int config_usb_cfg_unlink(
list_del(&f->list);
usb_put_function(f);
mutex_unlock(&gi->lock);
- return 0;
+ return;
}
}
mutex_unlock(&gi->lock);
WARN(1, "Unable to locate function to unbind\n");
- return 0;
}
static struct configfs_item_operations gadget_config_item_ops = {
@@ -865,7 +864,7 @@ out:
return ret;
}
-static int os_desc_unlink(struct config_item *os_desc_ci,
+static void os_desc_unlink(struct config_item *os_desc_ci,
struct config_item *usb_cfg_ci)
{
struct gadget_info *gi = container_of(to_config_group(os_desc_ci),
@@ -878,7 +877,6 @@ static int os_desc_unlink(struct config_item *os_desc_ci,
cdev->os_desc_config = NULL;
WARN_ON(gi->composite.gadget_driver.udc_name);
mutex_unlock(&gi->lock);
- return 0;
}
static struct configfs_item_operations os_desc_ops = {
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 17989b72cdae..0780d8311ec6 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -266,7 +266,7 @@ static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
{
struct ffs_data *ffs = req->context;
- complete_all(&ffs->ep0req_completion);
+ complete(&ffs->ep0req_completion);
}
static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index e2966f87c860..7abd70b2a588 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -98,6 +98,60 @@ static struct hid_descriptor hidg_desc = {
/*.desc[0].wDescriptorLenght = DYNAMIC */
};
+/* Super-Speed Support */
+
+static struct usb_endpoint_descriptor hidg_ss_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ /*.wMaxPacketSize = DYNAMIC */
+ .bInterval = 4, /* FIXME: Add this field in the
+ * HID gadget configuration?
+ * (struct hidg_func_descriptor)
+ */
+};
+
+static struct usb_ss_ep_comp_descriptor hidg_ss_in_comp_desc = {
+ .bLength = sizeof(hidg_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ /* .wBytesPerInterval = DYNAMIC */
+};
+
+static struct usb_endpoint_descriptor hidg_ss_out_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ /*.wMaxPacketSize = DYNAMIC */
+ .bInterval = 4, /* FIXME: Add this field in the
+ * HID gadget configuration?
+ * (struct hidg_func_descriptor)
+ */
+};
+
+static struct usb_ss_ep_comp_descriptor hidg_ss_out_comp_desc = {
+ .bLength = sizeof(hidg_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ /* .wBytesPerInterval = DYNAMIC */
+};
+
+static struct usb_descriptor_header *hidg_ss_descriptors[] = {
+ (struct usb_descriptor_header *)&hidg_interface_desc,
+ (struct usb_descriptor_header *)&hidg_desc,
+ (struct usb_descriptor_header *)&hidg_ss_in_ep_desc,
+ (struct usb_descriptor_header *)&hidg_ss_in_comp_desc,
+ (struct usb_descriptor_header *)&hidg_ss_out_ep_desc,
+ (struct usb_descriptor_header *)&hidg_ss_out_comp_desc,
+ NULL,
+};
+
/* High-Speed Support */
static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = {
@@ -624,8 +678,14 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
/* set descriptor dynamic values */
hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
+ hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+ hidg_ss_in_comp_desc.wBytesPerInterval =
+ cpu_to_le16(hidg->report_length);
hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+ hidg_ss_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+ hidg_ss_out_comp_desc.wBytesPerInterval =
+ cpu_to_le16(hidg->report_length);
hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_fs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
/*
@@ -641,8 +701,13 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
hidg_hs_out_ep_desc.bEndpointAddress =
hidg_fs_out_ep_desc.bEndpointAddress;
+ hidg_ss_in_ep_desc.bEndpointAddress =
+ hidg_fs_in_ep_desc.bEndpointAddress;
+ hidg_ss_out_ep_desc.bEndpointAddress =
+ hidg_fs_out_ep_desc.bEndpointAddress;
+
status = usb_assign_descriptors(f, hidg_fs_descriptors,
- hidg_hs_descriptors, NULL, NULL);
+ hidg_hs_descriptors, hidg_ss_descriptors, NULL);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 639603722709..e8008fa35e1e 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -998,7 +998,7 @@ static struct sk_buff *package_for_tx(struct f_ncm *ncm)
/* Merge the skbs */
swap(skb2, ncm->skb_tx_data);
if (ncm->skb_tx_data) {
- dev_kfree_skb_any(ncm->skb_tx_data);
+ dev_consume_skb_any(ncm->skb_tx_data);
ncm->skb_tx_data = NULL;
}
@@ -1009,7 +1009,7 @@ static struct sk_buff *package_for_tx(struct f_ncm *ncm)
/* Copy NTB across. */
ntb_iter = (void *) skb_put(skb2, ncm->skb_tx_ndp->len);
memcpy(ntb_iter, ncm->skb_tx_ndp->data, ncm->skb_tx_ndp->len);
- dev_kfree_skb_any(ncm->skb_tx_ndp);
+ dev_consume_skb_any(ncm->skb_tx_ndp);
ncm->skb_tx_ndp = NULL;
/* Insert zero'd datagram. */
@@ -1078,6 +1078,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
if (!ncm->skb_tx_data)
goto err;
+ ncm->skb_tx_data->dev = ncm->netdev;
ntb_data = (void *) skb_put(ncm->skb_tx_data, ncb_len);
memset(ntb_data, 0, ncb_len);
/* dwSignature */
@@ -1096,6 +1097,8 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
GFP_ATOMIC);
if (!ncm->skb_tx_ndp)
goto err;
+
+ ncm->skb_tx_ndp->dev = ncm->netdev;
ntb_ndp = (void *) skb_put(ncm->skb_tx_ndp,
opts->ndp_size);
memset(ntb_ndp, 0, ncb_len);
@@ -1133,7 +1136,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
memset(ntb_data, 0, dgram_pad);
ntb_data = (void *) skb_put(ncm->skb_tx_data, skb->len);
memcpy(ntb_data, skb->data, skb->len);
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
skb = NULL;
} else if (ncm->skb_tx_data && ncm->timer_force_tx) {
@@ -1329,7 +1332,7 @@ static int ncm_unwrap_ntb(struct gether *port,
} while (ndp_len > 2 * (opts->dgram_item_len * 2));
} while (ndp_index);
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
VDBG(port->func.config->cdev,
"Parsed NTB with %d frames\n", dgram_counter);
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index cd214ec8a601..969cfe741380 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1067,13 +1067,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
if (!agdev->out_ep) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- goto err;
+ return ret;
}
agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
if (!agdev->in_ep) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- goto err;
+ return ret;
}
uac2->p_prm.uac2 = uac2;
@@ -1091,7 +1091,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL,
NULL);
if (ret)
- goto err;
+ return ret;
prm = &agdev->uac2.c_prm;
prm->max_psize = hs_epout_desc.wMaxPacketSize;
@@ -1106,19 +1106,19 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL);
if (!prm->rbuf) {
prm->max_psize = 0;
- goto err_free_descs;
+ goto err;
}
ret = alsa_uac2_init(agdev);
if (ret)
- goto err_free_descs;
+ goto err;
return 0;
-err_free_descs:
- usb_free_all_descriptors(fn);
err:
kfree(agdev->uac2.p_prm.rbuf);
kfree(agdev->uac2.c_prm.rbuf);
+err_free_descs:
+ usb_free_all_descriptors(fn);
return -EINVAL;
}
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index ab6ac1b74ac0..a3b5e468b116 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -80,8 +80,7 @@ static const struct file_operations rndis_proc_fops;
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
/* supported OIDs */
-static const u32 oid_supported_list[] =
-{
+static const u32 oid_supported_list[] = {
/* the general stuff */
RNDIS_OID_GEN_SUPPORTED_LIST,
RNDIS_OID_GEN_HARDWARE_STATUS,
@@ -474,8 +473,7 @@ static int gen_ndis_query_resp(struct rndis_params *params, u32 OID, u8 *buf,
break;
default:
- pr_warning("%s: query unknown OID 0x%08X\n",
- __func__, OID);
+ pr_warn("%s: query unknown OID 0x%08X\n", __func__, OID);
}
if (retval < 0)
length = 0;
@@ -546,8 +544,8 @@ static int gen_ndis_set_resp(struct rndis_params *params, u32 OID,
break;
default:
- pr_warning("%s: set unknown OID 0x%08X, size %d\n",
- __func__, OID, buf_len);
+ pr_warn("%s: set unknown OID 0x%08X, size %d\n",
+ __func__, OID, buf_len);
}
return retval;
@@ -854,7 +852,7 @@ int rndis_msg_parser(struct rndis_params *params, u8 *buf)
* In one case those messages seemed to relate to the host
* suspending itself.
*/
- pr_warning("%s: unknown RNDIS message 0x%08X len %d\n",
+ pr_warn("%s: unknown RNDIS message 0x%08X len %d\n",
__func__, MsgType, MsgLength);
print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET,
buf, MsgLength);
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index ef92eb66d8ad..21e0430ffb98 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -22,8 +22,7 @@
#define RNDIS_MAXIMUM_FRAME_SIZE 1518
#define RNDIS_MAX_TOTAL_SIZE 1558
-typedef struct rndis_init_msg_type
-{
+typedef struct rndis_init_msg_type {
__le32 MessageType;
__le32 MessageLength;
__le32 RequestID;
@@ -32,8 +31,7 @@ typedef struct rndis_init_msg_type
__le32 MaxTransferSize;
} rndis_init_msg_type;
-typedef struct rndis_init_cmplt_type
-{
+typedef struct rndis_init_cmplt_type {
__le32 MessageType;
__le32 MessageLength;
__le32 RequestID;
@@ -49,15 +47,13 @@ typedef struct rndis_init_cmplt_type
__le32 AFListSize;
} rndis_init_cmplt_type;
-typedef struct rndis_halt_msg_type
-{
+typedef struct rndis_halt_msg_type {
__le32 MessageType;
__le32 MessageLength;
__le32 RequestID;
} rndis_halt_msg_type;
-typedef struct rndis_query_msg_type
-{
+typedef struct rndis_query_msg_type {
__le32 MessageType;
__le32 MessageLength;
__le32 RequestID;
@@ -67,8 +63,7 @@ typedef struct rndis_query_msg_type
__le32 DeviceVcHandle;
} rndis_query_msg_type;
-typedef struct rndis_query_cmplt_type
-{
+typedef struct rndis_query_cmplt_type {
__le32 MessageType;
__le32 MessageLength;
__le32 RequestID;
@@ -77,8 +72,7 @@ typedef struct rndis_query_cmplt_type
__le32 InformationBufferOffset;
} rndis_query_cmplt_type;
-typedef struct rndis_set_msg_type
-{
+typedef struct rndis_set_msg_type {
__le32 MessageType;
__le32 MessageLength;
__le32 RequestID;
@@ -88,31 +82,27 @@ typedef struct rndis_set_msg_type
__le32 DeviceVcHandle;
} rndis_set_msg_type;
-typedef struct rndis_set_cmplt_type
-{
+typedef struct rndis_set_cmplt_type {
__le32 MessageType;
__le32 MessageLength;
__le32 RequestID;
__le32 Status;
} rndis_set_cmplt_type;
-typedef struct rndis_reset_msg_type
-{
+typedef struct rndis_reset_msg_type {
__le32 MessageType;
__le32 MessageLength;
__le32 Reserved;
} rndis_reset_msg_type;
-typedef struct rndis_reset_cmplt_type
-{
+typedef struct rndis_reset_cmplt_type {
__le32 MessageType;
__le32 MessageLength;
__le32 Status;
__le32 AddressingReset;
} rndis_reset_cmplt_type;
-typedef struct rndis_indicate_status_msg_type
-{
+typedef struct rndis_indicate_status_msg_type {
__le32 MessageType;
__le32 MessageLength;
__le32 Status;
@@ -120,23 +110,20 @@ typedef struct rndis_indicate_status_msg_type
__le32 StatusBufferOffset;
} rndis_indicate_status_msg_type;
-typedef struct rndis_keepalive_msg_type
-{
+typedef struct rndis_keepalive_msg_type {
__le32 MessageType;
__le32 MessageLength;
__le32 RequestID;
} rndis_keepalive_msg_type;
-typedef struct rndis_keepalive_cmplt_type
-{
+typedef struct rndis_keepalive_cmplt_type {
__le32 MessageType;
__le32 MessageLength;
__le32 RequestID;
__le32 Status;
} rndis_keepalive_cmplt_type;
-struct rndis_packet_msg_type
-{
+struct rndis_packet_msg_type {
__le32 MessageType;
__le32 MessageLength;
__le32 DataOffset;
@@ -150,8 +137,7 @@ struct rndis_packet_msg_type
__le32 Reserved;
} __attribute__ ((packed));
-struct rndis_config_parameter
-{
+struct rndis_config_parameter {
__le32 ParameterNameOffset;
__le32 ParameterNameLength;
__le32 ParameterType;
@@ -160,23 +146,20 @@ struct rndis_config_parameter
};
/* implementation specific */
-enum rndis_state
-{
+enum rndis_state {
RNDIS_UNINITIALIZED,
RNDIS_INITIALIZED,
RNDIS_DATA_INITIALIZED,
};
-typedef struct rndis_resp_t
-{
+typedef struct rndis_resp_t {
struct list_head list;
u8 *buf;
u32 length;
int send;
} rndis_resp_t;
-typedef struct rndis_params
-{
+typedef struct rndis_params {
int confignr;
u8 used;
u16 saved_filter;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 84a1709e0784..b4e5d6dfd549 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -215,7 +215,7 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
if (dev->port_usb->is_fixed)
size = max_t(size_t, size, dev->port_usb->fixed_out_len);
- skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
+ skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
if (skb == NULL) {
DBG(dev, "no rx skb\n");
goto enomem;
@@ -446,16 +446,17 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req)
/* FALLTHROUGH */
case -ECONNRESET: /* unlink */
case -ESHUTDOWN: /* disconnect etc */
+ dev_kfree_skb_any(skb);
break;
case 0:
dev->net->stats.tx_bytes += skb->len;
+ dev_consume_skb_any(skb);
}
dev->net->stats.tx_packets++;
spin_lock(&dev->req_lock);
list_add(&req->list, &dev->tx_reqs);
spin_unlock(&dev->req_lock);
- dev_kfree_skb_any(skb);
atomic_dec(&dev->tx_qlen);
if (netif_carrier_ok(dev->net))
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index e0cd1e4c8892..000677c991b0 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -622,8 +622,8 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
switch (req->status) {
default:
/* presumably a transient fault */
- pr_warning("%s: unexpected %s status %d\n",
- __func__, ep->name, req->status);
+ pr_warn("%s: unexpected %s status %d\n",
+ __func__, ep->name, req->status);
/* FALL THROUGH */
case 0:
/* normal completion */
@@ -1256,7 +1256,8 @@ static void gserial_console_exit(void)
struct gscons_info *info = &gscons_info;
unregister_console(&gserial_cons);
- kthread_stop(info->console_thread);
+ if (info->console_thread != NULL)
+ kthread_stop(info->console_thread);
gs_buf_free(&info->con_buf);
}
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 7d3bb6272e06..11d70dead32b 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -26,14 +26,12 @@
#define UVC_EVENT_DATA (V4L2_EVENT_PRIVATE_START + 5)
#define UVC_EVENT_LAST (V4L2_EVENT_PRIVATE_START + 5)
-struct uvc_request_data
-{
+struct uvc_request_data {
__s32 length;
__u8 data[60];
};
-struct uvc_event
-{
+struct uvc_event {
union {
enum usb_device_speed speed;
struct usb_ctrlrequest req;
@@ -104,8 +102,7 @@ extern unsigned int uvc_gadget_trace_param;
* Structures
*/
-struct uvc_video
-{
+struct uvc_video {
struct usb_ep *ep;
/* Frame parameters */
@@ -134,15 +131,13 @@ struct uvc_video
unsigned int fid;
};
-enum uvc_state
-{
+enum uvc_state {
UVC_STATE_DISCONNECTED,
UVC_STATE_CONNECTED,
UVC_STATE_STREAMING,
};
-struct uvc_device
-{
+struct uvc_device {
struct video_device vdev;
struct v4l2_device v4l2_dev;
enum uvc_state state;
@@ -175,8 +170,7 @@ static inline struct uvc_device *to_uvc(struct usb_function *f)
return container_of(f, struct uvc_device, func);
}
-struct uvc_file_handle
-{
+struct uvc_file_handle {
struct v4l2_fh vfh;
struct uvc_video *device;
};
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 31125a4a2658..4e037d2a7a60 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -547,7 +547,7 @@ out:
return ret;
}
-static int uvcg_control_class_drop_link(struct config_item *src,
+static void uvcg_control_class_drop_link(struct config_item *src,
struct config_item *target)
{
struct config_item *control, *header;
@@ -555,7 +555,6 @@ static int uvcg_control_class_drop_link(struct config_item *src,
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
struct uvc_descriptor_header **class_array;
struct uvcg_control_header *target_hdr;
- int ret = -EINVAL;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
@@ -569,23 +568,17 @@ static int uvcg_control_class_drop_link(struct config_item *src,
mutex_lock(&opts->lock);
class_array = uvcg_get_ctl_class_arr(src, opts);
- if (!class_array)
- goto unlock;
- if (opts->refcnt) {
- ret = -EBUSY;
+ if (!class_array || opts->refcnt)
goto unlock;
- }
target_hdr = to_uvcg_control_header(target);
--target_hdr->linked;
class_array[0] = NULL;
- ret = 0;
unlock:
mutex_unlock(&opts->lock);
out:
mutex_unlock(su_mutex);
- return ret;
}
static struct configfs_item_operations uvcg_control_class_item_ops = {
@@ -777,7 +770,7 @@ out:
return ret;
}
-static int uvcg_streaming_header_drop_link(struct config_item *src,
+static void uvcg_streaming_header_drop_link(struct config_item *src,
struct config_item *target)
{
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
@@ -786,7 +779,6 @@ static int uvcg_streaming_header_drop_link(struct config_item *src,
struct uvcg_streaming_header *src_hdr;
struct uvcg_format *target_fmt = NULL;
struct uvcg_format_ptr *format_ptr, *tmp;
- int ret = -EINVAL;
src_hdr = to_uvcg_streaming_header(src);
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
@@ -811,8 +803,6 @@ static int uvcg_streaming_header_drop_link(struct config_item *src,
out:
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
- return ret;
-
}
static struct configfs_item_operations uvcg_streaming_header_item_ops = {
@@ -2051,7 +2041,7 @@ out:
return ret;
}
-static int uvcg_streaming_class_drop_link(struct config_item *src,
+static void uvcg_streaming_class_drop_link(struct config_item *src,
struct config_item *target)
{
struct config_item *streaming, *header;
@@ -2059,7 +2049,6 @@ static int uvcg_streaming_class_drop_link(struct config_item *src,
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
struct uvc_descriptor_header ***class_array;
struct uvcg_streaming_header *target_hdr;
- int ret = -EINVAL;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
@@ -2076,23 +2065,19 @@ static int uvcg_streaming_class_drop_link(struct config_item *src,
if (!class_array || !*class_array)
goto unlock;
- if (opts->refcnt) {
- ret = -EBUSY;
+ if (opts->refcnt)
goto unlock;
- }
target_hdr = to_uvcg_streaming_header(target);
--target_hdr->linked;
kfree(**class_array);
kfree(*class_array);
*class_array = NULL;
- ret = 0;
unlock:
mutex_unlock(&opts->lock);
out:
mutex_unlock(su_mutex);
- return ret;
}
static struct configfs_item_operations uvcg_streaming_class_item_ops = {
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index f4ccbd56f4d2..3e22b45687d3 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -53,8 +53,7 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
* V4L2 ioctls
*/
-struct uvc_format
-{
+struct uvc_format {
u8 bpp;
u32 fcc;
};
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 3d0d5d94a62f..0f01c04d7cbd 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -243,7 +243,7 @@ uvc_video_alloc_requests(struct uvc_video *video)
req_size = video->ep->maxpacket
* max_t(unsigned int, video->ep->maxburst, 1)
- * (video->ep->mult + 1);
+ * (video->ep->mult);
for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
video->req_buffer[i] = kmalloc(req_size, GFP_KERNEL);
diff --git a/drivers/usb/gadget/udc/at91_udc.h b/drivers/usb/gadget/udc/at91_udc.h
index 0a433e6b346b..9bbe72764f31 100644
--- a/drivers/usb/gadget/udc/at91_udc.h
+++ b/drivers/usb/gadget/udc/at91_udc.h
@@ -175,7 +175,7 @@ struct at91_request {
#endif
#define ERR(stuff...) pr_err("udc: " stuff)
-#define WARNING(stuff...) pr_warning("udc: " stuff)
+#define WARNING(stuff...) pr_warn("udc: " stuff)
#define INFO(stuff...) pr_info("udc: " stuff)
#define DBG(stuff...) pr_debug("udc: " stuff)
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 45bc997d0711..f3212db9bc37 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -529,7 +529,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
- maxpacket = usb_endpoint_maxp(desc) & 0x7ff;
+ maxpacket = usb_endpoint_maxp(desc);
if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index)
|| ep->index == 0
@@ -573,7 +573,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
* Bits 11:12 specify number of _additional_
* transactions per microframe.
*/
- nr_trans = ((usb_endpoint_maxp(desc) >> 11) & 3) + 1;
+ nr_trans = usb_endpoint_maxp_mult(desc);
if (nr_trans > 3)
return -EINVAL;
@@ -1464,8 +1464,8 @@ restart:
pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
DBG(DBG_HW, "Packet length: %u\n", pkt_len);
if (pkt_len != sizeof(crq)) {
- pr_warning("udc: Invalid packet length %u "
- "(expected %zu)\n", pkt_len, sizeof(crq));
+ pr_warn("udc: Invalid packet length %u (expected %zu)\n",
+ pkt_len, sizeof(crq));
set_protocol_stall(udc, ep);
return;
}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.c b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
index 4d5e9188beae..6e920f1dce02 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_cmd.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
@@ -182,7 +182,7 @@ int bdc_config_ep(struct bdc *bdc, struct bdc_ep *ep)
usb_endpoint_xfer_int(desc)) {
param2 |= si;
- mbs = (usb_endpoint_maxp(desc) & 0x1800) >> 11;
+ mbs = usb_endpoint_maxp_mult(desc);
param2 |= mbs << MB_SHIFT;
}
break;
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index ccaa74ab6c0e..ff1ef24d1777 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -446,7 +446,7 @@ static int setup_bd_list_xfr(struct bdc *bdc, struct bdc_req *req, int num_bds)
bd_xfr->start_bdi = bd_list->eqp_bdi;
bd = bdi_to_bd(ep, bd_list->eqp_bdi);
req_len = req->usb_req.length;
- maxp = usb_endpoint_maxp(ep->desc) & 0x7ff;
+ maxp = usb_endpoint_maxp(ep->desc);
tfs = roundup(req->usb_req.length, maxp);
tfs = tfs/maxp;
dev_vdbg(bdc->dev, "%s ep:%s num_bds:%d tfs:%d r_len:%d bd:%p\n",
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 77d07904f932..02b14e91ae6c 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -503,7 +503,7 @@ static int dummy_enable(struct usb_ep *_ep,
* maximum packet size.
* For SS devices the wMaxPacketSize is limited by 1024.
*/
- max = usb_endpoint_maxp(desc) & 0x7ff;
+ max = usb_endpoint_maxp(desc);
/* drivers must not request bad settings, since lower levels
* (hardware or its drivers) may not check. some endpoints
@@ -1483,8 +1483,7 @@ static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep)
int tmp;
/* high bandwidth mode */
- tmp = usb_endpoint_maxp(ep->desc);
- tmp = (tmp >> 11) & 0x03;
+ tmp = usb_endpoint_maxp_mult(ep->desc);
tmp *= 8 /* applies to entire frame */;
limit += limit * tmp;
}
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index aab5221d6c2e..71094e479a96 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -585,8 +585,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
break;
case USB_ENDPOINT_XFER_ISOC:
/* Calculate transactions needed for high bandwidth iso */
- mult = (unsigned char)(1 + ((max >> 11) & 0x03));
- max = max & 0x7ff; /* bit 0~10 */
+ mult = usb_endpoint_maxp_mult(desc);
/* 3 transactions at most */
if (mult > 3)
goto en_done;
diff --git a/drivers/usb/gadget/udc/fsl_usb2_udc.h b/drivers/usb/gadget/udc/fsl_usb2_udc.h
index 84715625b2b3..e92b8408b6f6 100644
--- a/drivers/usb/gadget/udc/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/udc/fsl_usb2_udc.h
@@ -554,7 +554,7 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
#endif
#define ERR(stuff...) pr_err("udc: " stuff)
-#define WARNING(stuff...) pr_warning("udc: " stuff)
+#define WARNING(stuff...) pr_warn("udc: " stuff)
#define INFO(stuff...) pr_info("udc: " stuff)
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index 948845c90e47..42ff308578df 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -218,7 +218,7 @@ static int config_ep(struct fusb300_ep *ep,
(info.type == USB_ENDPOINT_XFER_ISOC)) {
info.interval = desc->bInterval;
if (info.type == USB_ENDPOINT_XFER_ISOC)
- info.bw_num = ((desc->wMaxPacketSize & 0x1800) >> 11);
+ info.bw_num = usb_endpoint_maxp_mult(desc);
}
ep_fifo_setting(fusb300, info);
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 39b7136d31d9..b16f8af34050 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -1539,7 +1539,7 @@ static int gr_ep_enable(struct usb_ep *_ep,
* additional transactions.
*/
max = 0x7ff & usb_endpoint_maxp(desc);
- nt = 0x3 & (usb_endpoint_maxp(desc) >> 11);
+ nt = usb_endpoint_maxp_mult(desc) - 1;
buffer_size = GR_BUFFER_SIZE(epctrl);
if (nt && (mode == 0 || mode == 2)) {
dev_err(dev->dev,
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index 6e977dc22570..de3e03483659 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -637,7 +637,7 @@ static void init_controller(struct m66592 *m66592)
clock = M66592_XTAL48;
break;
default:
- pr_warning("m66592-udc: xtal configuration error\n");
+ pr_warn("m66592-udc: xtal configuration error\n");
clock = 0;
}
@@ -649,7 +649,7 @@ static void init_controller(struct m66592 *m66592)
irq_sense = 0;
break;
default:
- pr_warning("m66592-udc: irq trigger config error\n");
+ pr_warn("m66592-udc: irq trigger config error\n");
irq_sense = 0;
}
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index b9e19a591322..8d726bd767fd 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -462,6 +462,12 @@ static int mv_u3d_req_to_trb(struct mv_u3d_req *req)
req->trb_head->trb_hw,
trb_num * sizeof(*trb_hw),
DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(u3d->gadget.dev.parent,
+ req->trb_head->trb_dma)) {
+ kfree(req->trb_head->trb_hw);
+ kfree(req->trb_head);
+ return -EFAULT;
+ }
req->chain = 1;
}
@@ -487,30 +493,32 @@ mv_u3d_start_queue(struct mv_u3d_ep *ep)
ret = usb_gadget_map_request(&u3d->gadget, &req->req,
mv_u3d_ep_dir(ep));
if (ret)
- return ret;
+ goto break_processing;
req->req.status = -EINPROGRESS;
req->req.actual = 0;
req->trb_count = 0;
- /* build trbs and push them to device queue */
- if (!mv_u3d_req_to_trb(req)) {
- ret = mv_u3d_queue_trb(ep, req);
- if (ret) {
- ep->processing = 0;
- return ret;
- }
- } else {
- ep->processing = 0;
+ /* build trbs */
+ ret = mv_u3d_req_to_trb(req);
+ if (ret) {
dev_err(u3d->dev, "%s, mv_u3d_req_to_trb fail\n", __func__);
- return -ENOMEM;
+ goto break_processing;
}
+ /* and push them to device queue */
+ ret = mv_u3d_queue_trb(ep, req);
+ if (ret)
+ goto break_processing;
+
/* irq handler advances the queue */
- if (req)
- list_add_tail(&req->queue, &ep->queue);
+ list_add_tail(&req->queue, &ep->queue);
return 0;
+
+break_processing:
+ ep->processing = 0;
+ return ret;
}
static int mv_u3d_ep_enable(struct usb_ep *_ep,
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index ce73b3552269..d82a91bddbd9 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -494,8 +494,7 @@ static int mv_ep_enable(struct usb_ep *_ep,
break;
case USB_ENDPOINT_XFER_ISOC:
/* Calculate transactions needed for high bandwidth iso */
- mult = (unsigned char)(1 + ((max >> 11) & 0x03));
- max = max & 0x7ff; /* bit 0~10 */
+ mult = usb_endpoint_maxp_mult(desc);
/* 3 transactions at most */
if (mult > 3)
goto en_done;
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 7c6113432093..078c91d546e0 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -202,10 +202,10 @@ net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
- max = usb_endpoint_maxp(desc) & 0x1fff;
+ max = usb_endpoint_maxp(desc);
spin_lock_irqsave(&dev->lock, flags);
- _ep->maxpacket = max & 0x7fff;
+ _ep->maxpacket = max;
ep->desc = desc;
/* net2272_ep_reset() has already been called */
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 61c938c36d88..85504419ab31 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -224,14 +224,14 @@ net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
}
/* sanity check ep-e/ep-f since their fifos are small */
- max = usb_endpoint_maxp(desc) & 0x1fff;
+ max = usb_endpoint_maxp(desc);
if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY)) {
ret = -ERANGE;
goto print_err;
}
spin_lock_irqsave(&dev->lock, flags);
- _ep->maxpacket = max & 0x7ff;
+ _ep->maxpacket = max;
ep->desc = desc;
/* ep_reset() has already been called */
@@ -1839,7 +1839,7 @@ static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
(t & USB_DIR_IN) ? "in" : "out",
type_string(d->bmAttributes),
- usb_endpoint_maxp(d) & 0x1fff,
+ usb_endpoint_maxp(d),
ep->dma ? "dma" : "pio", ep->fifo_size
);
} else /* ep0 should only have one transfer queued */
diff --git a/drivers/usb/gadget/udc/omap_udc.h b/drivers/usb/gadget/udc/omap_udc.h
index cfadeb5fc5de..26974196cf44 100644
--- a/drivers/usb/gadget/udc/omap_udc.h
+++ b/drivers/usb/gadget/udc/omap_udc.h
@@ -187,7 +187,7 @@ struct omap_udc {
#endif
#define ERR(stuff...) pr_err("udc: " stuff)
-#define WARNING(stuff...) pr_warning("udc: " stuff)
+#define WARNING(stuff...) pr_warn("udc: " stuff)
#define INFO(stuff...) pr_info("udc: " stuff)
#define DBG(stuff...) pr_debug("udc: " stuff)
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.h b/drivers/usb/gadget/udc/pxa25x_udc.h
index 4b8b72d7ab37..a458bec2536d 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.h
+++ b/drivers/usb/gadget/udc/pxa25x_udc.h
@@ -248,7 +248,7 @@ dump_state(struct pxa25x_udc *dev)
#define DBG(lvl, stuff...) do{if ((lvl) <= UDC_DEBUG) DMSG(stuff);}while(0)
#define ERR(stuff...) pr_err("udc: " stuff)
-#define WARNING(stuff...) pr_warning("udc: " stuff)
+#define WARNING(stuff...) pr_warn("udc: " stuff)
#define INFO(stuff...) pr_info("udc: " stuff)
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index eb3571ee59e3..4643a01262b4 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -1047,10 +1047,10 @@ static int s3c2410_udc_ep_enable(struct usb_ep *_ep,
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
- max = usb_endpoint_maxp(desc) & 0x1fff;
+ max = usb_endpoint_maxp(desc);
local_irq_save(flags);
- _ep->maxpacket = max & 0x7ff;
+ _ep->maxpacket = max;
ep->ep.desc = desc;
ep->halted = 0;
ep->bEndpointAddress = desc->bEndpointAddress;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 0b80cee30da4..6361fc739306 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -479,9 +479,10 @@ config USB_OHCI_HCD_OMAP3
OMAP3 and later chips.
config USB_OHCI_HCD_DAVINCI
- bool "OHCI support for TI DaVinci DA8xx"
+ tristate "OHCI support for TI DaVinci DA8xx"
depends on ARCH_DAVINCI_DA8XX
- depends on USB_OHCI_HCD=y
+ depends on USB_OHCI_HCD
+ select PHY_DA8XX_USB
default y
help
Enables support for the DaVinci DA8xx integrated OHCI
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 6ef785b0ea8f..2644537b7bcf 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_USB_OHCI_HCD_AT91) += ohci-at91.o
obj-$(CONFIG_USB_OHCI_HCD_S3C2410) += ohci-s3c2410.o
obj-$(CONFIG_USB_OHCI_HCD_LPC32XX) += ohci-nxp.o
obj-$(CONFIG_USB_OHCI_HCD_PXA27X) += ohci-pxa27x.o
+obj-$(CONFIG_USB_OHCI_HCD_DAVINCI) += ohci-da8xx.o
obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 9f5ffb629973..91701cc68082 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -286,6 +286,9 @@ static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
if (pdata->has_fsl_erratum_a005275 == 1)
ehci->has_fsl_hs_errata = 1;
+ if (pdata->has_fsl_erratum_a005697 == 1)
+ ehci->has_fsl_susp_errata = 1;
+
if ((pdata->operating_mode == FSL_USB2_DR_HOST) ||
(pdata->operating_mode == FSL_USB2_DR_OTG))
if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 0))
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 74f62d68f013..df169c8e7225 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -310,6 +310,14 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
}
spin_unlock_irq(&ehci->lock);
+ if (changed && ehci_has_fsl_susp_errata(ehci))
+ /*
+ * Wait for at least 10 millisecondes to ensure the controller
+ * enter the suspend status before initiating a port resume
+ * using the Force Port Resume bit (Not-EHCI compatible).
+ */
+ usleep_range(10000, 20000);
+
if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) {
/*
* Wait for HCD to enter low-power mode or for the bus
@@ -1200,6 +1208,12 @@ int ehci_hub_control(
wIndex, (temp1 & HOSTPC_PHCD) ?
"succeeded" : "failed");
}
+ if (ehci_has_fsl_susp_errata(ehci)) {
+ /* 10ms for HCD enter suspend */
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ usleep_range(10000, 20000);
+ spin_lock_irqsave(&ehci->lock, flags);
+ }
set_bit(wIndex, &ehci->suspended_ports);
break;
case USB_PORT_FEAT_POWER:
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 3b3649d88c5f..93326974ff4b 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -258,9 +258,8 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
/* These workarounds need to be applied after ehci_setup() */
switch (pdev->vendor) {
case PCI_VENDOR_ID_NEC:
- ehci->need_io_watchdog = 0;
- break;
case PCI_VENDOR_ID_INTEL:
+ case PCI_VENDOR_ID_AMD:
ehci->need_io_watchdog = 0;
break;
case PCI_VENDOR_ID_NVIDIA:
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index eca3710d8fc4..8f3f055c05fa 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -550,11 +550,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
/*-------------------------------------------------------------------------*/
-// high bandwidth multiplier, as encoded in highspeed endpoint descriptors
-#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
-// ... and packet size, for any kind of endpoint descriptor
-#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
-
/*
* reverse of qh_urb_transaction: free a list of TDs.
* used for cleanup after errors, before HC sees an URB's TDs.
@@ -651,7 +646,7 @@ qh_urb_transaction (
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
- maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
+ maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input);
/*
* buffer gets wrapped in one or more qtds;
@@ -770,9 +765,11 @@ qh_make (
gfp_t flags
) {
struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
+ struct usb_host_endpoint *ep;
u32 info1 = 0, info2 = 0;
int is_input, type;
int maxp = 0;
+ int mult;
struct usb_tt *tt = urb->dev->tt;
struct ehci_qh_hw *hw;
@@ -787,13 +784,15 @@ qh_make (
is_input = usb_pipein (urb->pipe);
type = usb_pipetype (urb->pipe);
- maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
+ ep = usb_pipe_endpoint (urb->dev, urb->pipe);
+ maxp = usb_endpoint_maxp (&ep->desc);
+ mult = usb_endpoint_maxp_mult (&ep->desc);
/* 1024 byte maxpacket is a hardware ceiling. High bandwidth
* acts like up to 3KB, but is built from smaller packets.
*/
- if (max_packet(maxp) > 1024) {
- ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp));
+ if (maxp > 1024) {
+ ehci_dbg(ehci, "bogus qh maxpacket %d\n", maxp);
goto done;
}
@@ -809,8 +808,7 @@ qh_make (
unsigned tmp;
qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
- is_input, 0,
- hb_mult(maxp) * max_packet(maxp)));
+ is_input, 0, mult * maxp));
qh->ps.phase = NO_FRAME;
if (urb->dev->speed == USB_SPEED_HIGH) {
@@ -854,7 +852,7 @@ qh_make (
think_time = tt ? tt->think_time : 0;
qh->ps.tt_usecs = NS_TO_US(think_time +
usb_calc_bus_time (urb->dev->speed,
- is_input, 0, max_packet (maxp)));
+ is_input, 0, maxp));
if (urb->interval > ehci->periodic_size)
urb->interval = ehci->periodic_size;
qh->ps.period = urb->interval;
@@ -925,11 +923,11 @@ qh_make (
* to help them do so. So now people expect to use
* such nonconformant devices with Linux too; sigh.
*/
- info1 |= max_packet(maxp) << 16;
+ info1 |= maxp << 16;
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else { /* PIPE_INTERRUPT */
- info1 |= max_packet (maxp) << 16;
- info2 |= hb_mult (maxp) << 30;
+ info1 |= maxp << 16;
+ info2 |= mult << 30;
}
break;
default:
@@ -1221,7 +1219,7 @@ static int submit_single_step_set_feature(
token |= (1 /* "in" */ << 8); /*This is IN stage*/
- maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, 0));
+ maxpacket = usb_maxpacket(urb->dev, urb->pipe, 0);
qtd_fill(ehci, qtd, buf, len, token, maxpacket);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 1dfe54f14737..980a6b3b2da2 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1064,11 +1064,10 @@ iso_stream_init(
/* knows about ITD vs SITD */
if (dev->speed == USB_SPEED_HIGH) {
- unsigned multi = hb_mult(maxp);
+ unsigned multi = usb_endpoint_maxp_mult(&urb->ep->desc);
stream->highspeed = 1;
- maxp = max_packet(maxp);
buf1 |= maxp;
maxp *= multi;
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index e42a29e8e229..63b9d0c67963 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -33,8 +33,7 @@ static const char hcd_name[] = "ehci-w90x900 ";
static struct hc_driver __read_mostly ehci_w90x900_hc_driver;
-static int usb_w90x900_probe(const struct hc_driver *driver,
- struct platform_device *pdev)
+static int ehci_w90x900_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
@@ -42,7 +41,8 @@ static int usb_w90x900_probe(const struct hc_driver *driver,
int retval = 0, irq;
unsigned long val;
- hcd = usb_create_hcd(driver, &pdev->dev, "w90x900 EHCI");
+ hcd = usb_create_hcd(&ehci_w90x900_hc_driver,
+ &pdev->dev, "w90x900 EHCI");
if (!hcd) {
retval = -ENOMEM;
goto err1;
@@ -63,9 +63,9 @@ static int usb_w90x900_probe(const struct hc_driver *driver,
HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
/* enable PHY 0,1,the regs only apply to w90p910
- * 0xA4,0xA8 were offsets of PHY0 and PHY1 controller of
- * w90p910 IC relative to ehci->regs.
- */
+ * 0xA4,0xA8 were offsets of PHY0 and PHY1 controller of
+ * w90p910 IC relative to ehci->regs.
+ */
val = __raw_readl(ehci->regs+PHY0_CTR);
val |= ENPHY;
__raw_writel(val, ehci->regs+PHY0_CTR);
@@ -92,26 +92,12 @@ err1:
return retval;
}
-static void usb_w90x900_remove(struct usb_hcd *hcd,
- struct platform_device *pdev)
-{
- usb_remove_hcd(hcd);
- usb_put_hcd(hcd);
-}
-
-static int ehci_w90x900_probe(struct platform_device *pdev)
-{
- if (usb_disabled())
- return -ENODEV;
-
- return usb_w90x900_probe(&ehci_w90x900_hc_driver, pdev);
-}
-
static int ehci_w90x900_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
- usb_w90x900_remove(hcd, pdev);
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
return 0;
}
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 3f3b74aeca97..a8e36170d8b8 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -219,6 +219,7 @@ struct ehci_hcd { /* one per controller */
unsigned no_selective_suspend:1;
unsigned has_fsl_port_bug:1; /* FreeScale */
unsigned has_fsl_hs_errata:1; /* Freescale HS quirk */
+ unsigned has_fsl_susp_errata:1; /* NXP SUSP quirk */
unsigned big_endian_mmio:1;
unsigned big_endian_desc:1;
unsigned big_endian_capbase:1;
@@ -710,6 +711,13 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
#endif
/*
+ * Some Freescale/NXP processors have an erratum (USB A-005697)
+ * in which we need to wait for 10ms for bus to enter suspend mode
+ * after setting SUSP bit.
+ */
+#define ehci_has_fsl_susp_errata(e) ((e)->has_fsl_susp_errata)
+
+/*
* While most USB host controllers implement their registers in
* little-endian format, a minority (celleb companion chip) implement
* them in big endian format.
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index f07ccb25bc24..e90ddb530765 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -226,6 +226,8 @@ static int fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev)
of_property_read_bool(np, "fsl,usb-erratum-a007792");
pdata->has_fsl_erratum_a005275 =
of_property_read_bool(np, "fsl,usb-erratum-a005275");
+ pdata->has_fsl_erratum_a005697 =
+ of_property_read_bool(np, "fsl,usb_erratum-a005697");
/*
* Determine whether phy_clk_valid needs to be checked
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 6cf82ee460a6..0f2b4b358e1a 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -147,7 +147,7 @@ static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362
if (epq)
DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
else
- pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
+ pr_warn("%s: invalid PTD $%04x\n", __func__, offset);
return epq;
}
@@ -157,8 +157,9 @@ static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
int offset;
if (index * epq->blk_size > epq->buf_size) {
- pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
- epq->buf_size / epq->blk_size);
+ pr_warn("%s: Bad %s index %d(%d)\n",
+ __func__, epq->name, index,
+ epq->buf_size / epq->blk_size);
return -EINVAL;
}
offset = epq->buf_start + index * epq->blk_size;
@@ -902,8 +903,8 @@ static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
ptd_offset = next_ptd(epq, ep);
if (ptd_offset < 0) {
- pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
- ep->num_req, epq->name);
+ pr_warn("%s: req %d No more %s PTD buffers available\n",
+ __func__, ep->num_req, epq->name);
break;
}
}
@@ -973,8 +974,8 @@ static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done
break;
}
if (done_map)
- pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
- epq->skip_map);
+ pr_warn("%s: done_map not clear: %08lx:%08lx\n",
+ __func__, done_map, epq->skip_map);
atomic_dec(&epq->finishing);
}
@@ -1433,7 +1434,7 @@ static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
} else
DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
} else {
- pr_warning("%s: No EP in URB %p\n", __func__, urb);
+ pr_warn("%s: No EP in URB %p\n", __func__, urb);
retval = -EINVAL;
}
done:
@@ -1748,10 +1749,10 @@ static int isp1362_bus_suspend(struct usb_hcd *hcd)
/* FALL THROUGH */
case OHCI_USB_RESET:
status = -EBUSY;
- pr_warning("%s: needs reinit!\n", __func__);
+ pr_warn("%s: needs reinit!\n", __func__);
goto done;
case OHCI_USB_SUSPEND:
- pr_warning("%s: already suspended?\n", __func__);
+ pr_warn("%s: already suspended?\n", __func__);
goto done;
}
DBG(0, "%s: suspend root hub\n", __func__);
@@ -1839,7 +1840,7 @@ static int isp1362_bus_resume(struct usb_hcd *hcd)
isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
if (hcd->state == HC_STATE_RESUMING) {
- pr_warning("%s: duplicate resume\n", __func__);
+ pr_warn("%s: duplicate resume\n", __func__);
status = 0;
} else
switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
@@ -2474,8 +2475,8 @@ static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
__func__, offset);
break;
}
- pr_warning("%s: memory check with offset %02x ok after second read\n",
- __func__, offset);
+ pr_warn("%s: memory check with offset %02x ok after second read\n",
+ __func__, offset);
}
}
kfree(ref);
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index b38a228134df..be9e63836881 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -14,8 +14,8 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/platform_data/atmel.h>
#include <linux/io.h>
@@ -39,8 +39,8 @@
#define AT91_MAX_USBH_PORTS 3
struct at91_usbh_data {
- int vbus_pin[AT91_MAX_USBH_PORTS]; /* port power-control pin */
- int overcurrent_pin[AT91_MAX_USBH_PORTS];
+ struct gpio_desc *vbus_pin[AT91_MAX_USBH_PORTS];
+ struct gpio_desc *overcurrent_pin[AT91_MAX_USBH_PORTS];
u8 ports; /* number of ports on root hub */
u8 overcurrent_supported;
u8 vbus_pin_active_low[AT91_MAX_USBH_PORTS];
@@ -68,8 +68,6 @@ static const struct ohci_driver_overrides ohci_at91_drv_overrides __initconst =
.extra_priv_size = sizeof(struct ohci_at91_priv),
};
-extern int usb_disabled(void);
-
/*-------------------------------------------------------------------------*/
static void at91_start_clock(struct ohci_at91_priv *ohci_at91)
@@ -268,11 +266,8 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int
if (!valid_port(port))
return;
- if (!gpio_is_valid(pdata->vbus_pin[port]))
- return;
-
- gpio_set_value(pdata->vbus_pin[port],
- pdata->vbus_pin_active_low[port] ^ enable);
+ gpiod_set_value(pdata->vbus_pin[port],
+ pdata->vbus_pin_active_low[port] ^ enable);
}
static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
@@ -280,11 +275,8 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
if (!valid_port(port))
return -EINVAL;
- if (!gpio_is_valid(pdata->vbus_pin[port]))
- return -EINVAL;
-
- return gpio_get_value(pdata->vbus_pin[port]) ^
- pdata->vbus_pin_active_low[port];
+ return gpiod_get_value(pdata->vbus_pin[port]) ^
+ pdata->vbus_pin_active_low[port];
}
/*
@@ -474,16 +466,13 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
{
struct platform_device *pdev = data;
struct at91_usbh_data *pdata = dev_get_platdata(&pdev->dev);
- int val, gpio, port;
+ int val, port;
/* From the GPIO notifying the over-current situation, find
* out the corresponding port */
at91_for_each_port(port) {
- if (gpio_is_valid(pdata->overcurrent_pin[port]) &&
- gpio_to_irq(pdata->overcurrent_pin[port]) == irq) {
- gpio = pdata->overcurrent_pin[port];
+ if (gpiod_to_irq(pdata->overcurrent_pin[port]) == irq)
break;
- }
}
if (port == AT91_MAX_USBH_PORTS) {
@@ -491,7 +480,7 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
return IRQ_HANDLED;
}
- val = gpio_get_value(gpio);
+ val = gpiod_get_value(pdata->overcurrent_pin[port]);
/* When notified of an over-current situation, disable power
on the corresponding port, and mark this port in
@@ -522,9 +511,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct at91_usbh_data *pdata;
int i;
- int gpio;
int ret;
- enum of_gpio_flags flags;
+ int err;
u32 ports;
/* Right now device-tree probed devices don't get dma_mask set.
@@ -545,38 +533,16 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
pdata->ports = ports;
at91_for_each_port(i) {
- /*
- * do not configure PIO if not in relation with
- * real USB port on board
- */
- if (i >= pdata->ports) {
- pdata->vbus_pin[i] = -EINVAL;
- pdata->overcurrent_pin[i] = -EINVAL;
+ pdata->vbus_pin[i] = devm_gpiod_get_optional(&pdev->dev,
+ "atmel,vbus-gpio",
+ GPIOD_IN);
+ if (IS_ERR(pdata->vbus_pin[i])) {
+ err = PTR_ERR(pdata->vbus_pin[i]);
+ dev_err(&pdev->dev, "unable to claim gpio \"vbus\": %d\n", err);
continue;
}
- gpio = of_get_named_gpio_flags(np, "atmel,vbus-gpio", i,
- &flags);
- pdata->vbus_pin[i] = gpio;
- if (!gpio_is_valid(gpio))
- continue;
- pdata->vbus_pin_active_low[i] = flags & OF_GPIO_ACTIVE_LOW;
-
- ret = gpio_request(gpio, "ohci_vbus");
- if (ret) {
- dev_err(&pdev->dev,
- "can't request vbus gpio %d\n", gpio);
- continue;
- }
- ret = gpio_direction_output(gpio,
- !pdata->vbus_pin_active_low[i]);
- if (ret) {
- dev_err(&pdev->dev,
- "can't put vbus gpio %d as output %d\n",
- gpio, !pdata->vbus_pin_active_low[i]);
- gpio_free(gpio);
- continue;
- }
+ pdata->vbus_pin_active_low[i] = gpiod_get_value(pdata->vbus_pin[i]);
ohci_at91_usb_set_power(pdata, i, 1);
}
@@ -586,37 +552,21 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
break;
pdata->overcurrent_pin[i] =
- of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags);
-
- if (!gpio_is_valid(pdata->overcurrent_pin[i]))
- continue;
- gpio = pdata->overcurrent_pin[i];
-
- ret = gpio_request(gpio, "ohci_overcurrent");
- if (ret) {
- dev_err(&pdev->dev,
- "can't request overcurrent gpio %d\n",
- gpio);
+ devm_gpiod_get_optional(&pdev->dev,
+ "atmel,oc-gpio", GPIOD_IN);
+ if (IS_ERR(pdata->overcurrent_pin[i])) {
+ err = PTR_ERR(pdata->overcurrent_pin[i]);
+ dev_err(&pdev->dev, "unable to claim gpio \"overcurrent\": %d\n", err);
continue;
}
- ret = gpio_direction_input(gpio);
- if (ret) {
- dev_err(&pdev->dev,
- "can't configure overcurrent gpio %d as input\n",
- gpio);
- gpio_free(gpio);
- continue;
- }
-
- ret = request_irq(gpio_to_irq(gpio),
- ohci_hcd_at91_overcurrent_irq,
- IRQF_SHARED, "ohci_overcurrent", pdev);
- if (ret) {
- gpio_free(gpio);
- dev_err(&pdev->dev,
- "can't get gpio IRQ for overcurrent\n");
- }
+ ret = devm_request_irq(&pdev->dev,
+ gpiod_to_irq(pdata->overcurrent_pin[i]),
+ ohci_hcd_at91_overcurrent_irq,
+ IRQF_SHARED,
+ "ohci_overcurrent", pdev);
+ if (ret)
+ dev_info(&pdev->dev, "failed to request gpio \"overcurrent\" IRQ\n");
}
device_init_wakeup(&pdev->dev, 1);
@@ -629,19 +579,8 @@ static int ohci_hcd_at91_drv_remove(struct platform_device *pdev)
int i;
if (pdata) {
- at91_for_each_port(i) {
- if (!gpio_is_valid(pdata->vbus_pin[i]))
- continue;
+ at91_for_each_port(i)
ohci_at91_usb_set_power(pdata, i, 0);
- gpio_free(pdata->vbus_pin[i]);
- }
-
- at91_for_each_port(i) {
- if (!gpio_is_valid(pdata->overcurrent_pin[i]))
- continue;
- free_irq(gpio_to_irq(pdata->overcurrent_pin[i]), pdev);
- gpio_free(pdata->overcurrent_pin[i]);
- }
}
device_init_wakeup(&pdev->dev, 0);
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index e5c33bc98ea4..05da2cb59612 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -11,62 +11,192 @@
* kind, whether express or implied.
*/
+#include <linux/clk.h>
+#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/clk.h>
-
-#include <mach/da8xx.h>
+#include <linux/phy/phy.h>
#include <linux/platform_data/usb-davinci.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <asm/unaligned.h>
-#ifndef CONFIG_ARCH_DAVINCI_DA8XX
-#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX."
-#endif
+#include "ohci.h"
-#define CFGCHIP2 DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG)
+#define DRIVER_DESC "DA8XX"
+#define DRV_NAME "ohci-da8xx"
-static struct clk *usb11_clk;
-static struct clk *usb20_clk;
+static struct hc_driver __read_mostly ohci_da8xx_hc_driver;
+
+static int (*orig_ohci_hub_control)(struct usb_hcd *hcd, u16 typeReq,
+ u16 wValue, u16 wIndex, char *buf, u16 wLength);
+static int (*orig_ohci_hub_status_data)(struct usb_hcd *hcd, char *buf);
+
+struct da8xx_ohci_hcd {
+ struct usb_hcd *hcd;
+ struct clk *usb11_clk;
+ struct phy *usb11_phy;
+ struct regulator *vbus_reg;
+ struct notifier_block nb;
+ unsigned int reg_enabled;
+};
+
+#define to_da8xx_ohci(hcd) (struct da8xx_ohci_hcd *)(hcd_to_ohci(hcd)->priv)
/* Over-current indicator change bitmask */
static volatile u16 ocic_mask;
-static void ohci_da8xx_clock(int on)
+static int ohci_da8xx_enable(struct usb_hcd *hcd)
{
- u32 cfgchip2;
-
- cfgchip2 = __raw_readl(CFGCHIP2);
- if (on) {
- clk_enable(usb11_clk);
-
- /*
- * If USB 1.1 reference clock is sourced from USB 2.0 PHY, we
- * need to enable the USB 2.0 module clocking, start its PHY,
- * and not allow it to stop the clock during USB 2.0 suspend.
- */
- if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX)) {
- clk_enable(usb20_clk);
-
- cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN);
- cfgchip2 |= CFGCHIP2_PHY_PLLON;
- __raw_writel(cfgchip2, CFGCHIP2);
-
- pr_info("Waiting for USB PHY clock good...\n");
- while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD))
- cpu_relax();
- }
+ struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+ int ret;
+
+ ret = clk_prepare_enable(da8xx_ohci->usb11_clk);
+ if (ret)
+ return ret;
+
+ ret = phy_init(da8xx_ohci->usb11_phy);
+ if (ret)
+ goto err_phy_init;
+
+ ret = phy_power_on(da8xx_ohci->usb11_phy);
+ if (ret)
+ goto err_phy_power_on;
+
+ return 0;
+
+err_phy_power_on:
+ phy_exit(da8xx_ohci->usb11_phy);
+err_phy_init:
+ clk_disable_unprepare(da8xx_ohci->usb11_clk);
+
+ return ret;
+}
+
+static void ohci_da8xx_disable(struct usb_hcd *hcd)
+{
+ struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+
+ phy_power_off(da8xx_ohci->usb11_phy);
+ phy_exit(da8xx_ohci->usb11_phy);
+ clk_disable_unprepare(da8xx_ohci->usb11_clk);
+}
- /* Enable USB 1.1 PHY */
- cfgchip2 |= CFGCHIP2_USB1SUSPENDM;
- } else {
- clk_disable(usb11_clk);
- if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX))
- clk_disable(usb20_clk);
+static int ohci_da8xx_set_power(struct usb_hcd *hcd, int on)
+{
+ struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+ int ret;
+
+ if (hub && hub->set_power)
+ return hub->set_power(1, on);
+
+ if (!da8xx_ohci->vbus_reg)
+ return 0;
- /* Disable USB 1.1 PHY */
- cfgchip2 &= ~CFGCHIP2_USB1SUSPENDM;
+ if (on && !da8xx_ohci->reg_enabled) {
+ ret = regulator_enable(da8xx_ohci->vbus_reg);
+ if (ret) {
+ dev_err(dev, "Failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+ da8xx_ohci->reg_enabled = 1;
+
+ } else if (!on && da8xx_ohci->reg_enabled) {
+ ret = regulator_disable(da8xx_ohci->vbus_reg);
+ if (ret) {
+ dev_err(dev, "Failed to disable regulator: %d\n", ret);
+ return ret;
+ }
+ da8xx_ohci->reg_enabled = 0;
}
- __raw_writel(cfgchip2, CFGCHIP2);
+
+ return 0;
+}
+
+static int ohci_da8xx_get_power(struct usb_hcd *hcd)
+{
+ struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+
+ if (hub && hub->get_power)
+ return hub->get_power(1);
+
+ if (da8xx_ohci->vbus_reg)
+ return regulator_is_enabled(da8xx_ohci->vbus_reg);
+
+ return 1;
+}
+
+static int ohci_da8xx_get_oci(struct usb_hcd *hcd)
+{
+ struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+ unsigned int flags;
+ int ret;
+
+ if (hub && hub->get_oci)
+ return hub->get_oci(1);
+
+ if (!da8xx_ohci->vbus_reg)
+ return 0;
+
+ ret = regulator_get_error_flags(da8xx_ohci->vbus_reg, &flags);
+ if (ret)
+ return ret;
+
+ if (flags & REGULATOR_ERROR_OVER_CURRENT)
+ return 1;
+
+ return 0;
+}
+
+static int ohci_da8xx_has_set_power(struct usb_hcd *hcd)
+{
+ struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+
+ if (hub && hub->set_power)
+ return 1;
+
+ if (da8xx_ohci->vbus_reg)
+ return 1;
+
+ return 0;
+}
+
+static int ohci_da8xx_has_oci(struct usb_hcd *hcd)
+{
+ struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+
+ if (hub && hub->get_oci)
+ return 1;
+
+ if (da8xx_ohci->vbus_reg)
+ return 1;
+
+ return 0;
+}
+
+static int ohci_da8xx_has_potpgt(struct usb_hcd *hcd)
+{
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+
+ if (hub && hub->potpgt)
+ return 1;
+
+ return 0;
}
/*
@@ -82,7 +212,51 @@ static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub,
hub->set_power(port, 0);
}
-static int ohci_da8xx_init(struct usb_hcd *hcd)
+static int ohci_da8xx_regulator_event(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct da8xx_ohci_hcd *da8xx_ohci =
+ container_of(nb, struct da8xx_ohci_hcd, nb);
+
+ if (event & REGULATOR_EVENT_OVER_CURRENT) {
+ ocic_mask |= 1 << 1;
+ ohci_da8xx_set_power(da8xx_ohci->hcd, 0);
+ }
+
+ return 0;
+}
+
+static int ohci_da8xx_register_notify(struct usb_hcd *hcd)
+{
+ struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+ int ret = 0;
+
+ if (hub && hub->ocic_notify) {
+ ret = hub->ocic_notify(ohci_da8xx_ocic_handler);
+ } else if (da8xx_ohci->vbus_reg) {
+ da8xx_ohci->nb.notifier_call = ohci_da8xx_regulator_event;
+ ret = devm_regulator_register_notifier(da8xx_ohci->vbus_reg,
+ &da8xx_ohci->nb);
+ }
+
+ if (ret)
+ dev_err(dev, "Failed to register notifier: %d\n", ret);
+
+ return ret;
+}
+
+static void ohci_da8xx_unregister_notify(struct usb_hcd *hcd)
+{
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+
+ if (hub && hub->ocic_notify)
+ hub->ocic_notify(NULL);
+}
+
+static int ohci_da8xx_reset(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
@@ -92,7 +266,9 @@ static int ohci_da8xx_init(struct usb_hcd *hcd)
dev_dbg(dev, "starting USB controller\n");
- ohci_da8xx_clock(1);
+ result = ohci_da8xx_enable(hcd);
+ if (result < 0)
+ return result;
/*
* DA8xx only have 1 port connected to the pins but the HC root hub
@@ -100,9 +276,11 @@ static int ohci_da8xx_init(struct usb_hcd *hcd)
*/
ohci->num_ports = 1;
- result = ohci_init(ohci);
- if (result < 0)
+ result = ohci_setup(hcd);
+ if (result < 0) {
+ ohci_da8xx_disable(hcd);
return result;
+ }
/*
* Since we're providing a board-specific root hub port power control
@@ -111,45 +289,29 @@ static int ohci_da8xx_init(struct usb_hcd *hcd)
* the correct hub descriptor...
*/
rh_a = ohci_readl(ohci, &ohci->regs->roothub.a);
- if (hub->set_power) {
+ if (ohci_da8xx_has_set_power(hcd)) {
rh_a &= ~RH_A_NPS;
rh_a |= RH_A_PSM;
}
- if (hub->get_oci) {
+ if (ohci_da8xx_has_oci(hcd)) {
rh_a &= ~RH_A_NOCP;
rh_a |= RH_A_OCPM;
}
- rh_a &= ~RH_A_POTPGT;
- rh_a |= hub->potpgt << 24;
+ if (ohci_da8xx_has_potpgt(hcd)) {
+ rh_a &= ~RH_A_POTPGT;
+ rh_a |= hub->potpgt << 24;
+ }
ohci_writel(ohci, rh_a, &ohci->regs->roothub.a);
return result;
}
-static void ohci_da8xx_stop(struct usb_hcd *hcd)
-{
- ohci_stop(hcd);
- ohci_da8xx_clock(0);
-}
-
-static int ohci_da8xx_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int result;
-
- result = ohci_run(ohci);
- if (result < 0)
- ohci_da8xx_stop(hcd);
-
- return result;
-}
-
/*
* Update the status data from the hub with the over-current indicator change.
*/
static int ohci_da8xx_hub_status_data(struct usb_hcd *hcd, char *buf)
{
- int length = ohci_hub_status_data(hcd, buf);
+ int length = orig_ohci_hub_status_data(hcd, buf);
/* See if we have OCIC bit set on port 1 */
if (ocic_mask & (1 << 1)) {
@@ -171,7 +333,6 @@ static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
int temp;
switch (typeReq) {
@@ -185,11 +346,11 @@ static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = roothub_portstatus(hcd_to_ohci(hcd), wIndex - 1);
/* The port power status (PPS) bit defaults to 1 */
- if (hub->get_power && hub->get_power(wIndex) == 0)
+ if (!ohci_da8xx_get_power(hcd))
temp &= ~RH_PS_PPS;
/* The port over-current indicator (POCI) bit is always 0 */
- if (hub->get_oci && hub->get_oci(wIndex) > 0)
+ if (ohci_da8xx_get_oci(hcd) > 0)
temp |= RH_PS_POCI;
/* The over-current indicator change (OCIC) bit is 0 too */
@@ -214,10 +375,7 @@ check_port:
dev_dbg(dev, "%sPortFeature(%u): %s\n",
temp ? "Set" : "Clear", wIndex, "POWER");
- if (!hub->set_power)
- return -EPIPE;
-
- return hub->set_power(wIndex, temp) ? -EPIPE : 0;
+ return ohci_da8xx_set_power(hcd, temp) ? -EPIPE : 0;
case USB_PORT_FEAT_C_OVER_CURRENT:
dev_dbg(dev, "%sPortFeature(%u): %s\n",
temp ? "Set" : "Clear", wIndex,
@@ -231,86 +389,61 @@ check_port:
}
}
- return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+ return orig_ohci_hub_control(hcd, typeReq, wValue,
+ wIndex, buf, wLength);
}
-static const struct hc_driver ohci_da8xx_hc_driver = {
- .description = hcd_name,
- .product_desc = "DA8xx OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .reset = ohci_da8xx_init,
- .start = ohci_da8xx_start,
- .stop = ohci_da8xx_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_da8xx_hub_status_data,
- .hub_control = ohci_da8xx_hub_control,
-
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_OF
+static const struct of_device_id da8xx_ohci_ids[] = {
+ { .compatible = "ti,da830-ohci" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, da8xx_ohci_ids);
+#endif
-
-/**
- * usb_hcd_da8xx_probe - initialize DA8xx-based HCDs
- * Context: !in_interrupt()
- *
- * Allocates basic resources for this USB host controller, and
- * then invokes the start() method for the HCD associated with it
- * through the hotplug entry's driver_data.
- */
-static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
- struct platform_device *pdev)
+static int ohci_da8xx_probe(struct platform_device *pdev)
{
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(&pdev->dev);
+ struct da8xx_ohci_hcd *da8xx_ohci;
struct usb_hcd *hcd;
struct resource *mem;
int error, irq;
+ hcd = usb_create_hcd(&ohci_da8xx_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd)
+ return -ENOMEM;
- if (hub == NULL)
- return -ENODEV;
+ da8xx_ohci = to_da8xx_ohci(hcd);
+ da8xx_ohci->hcd = hcd;
- usb11_clk = devm_clk_get(&pdev->dev, "usb11");
- if (IS_ERR(usb11_clk))
- return PTR_ERR(usb11_clk);
+ da8xx_ohci->usb11_clk = devm_clk_get(&pdev->dev, "usb11");
+ if (IS_ERR(da8xx_ohci->usb11_clk)) {
+ error = PTR_ERR(da8xx_ohci->usb11_clk);
+ if (error != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to get clock.\n");
+ goto err;
+ }
- usb20_clk = devm_clk_get(&pdev->dev, "usb20");
- if (IS_ERR(usb20_clk))
- return PTR_ERR(usb20_clk);
+ da8xx_ohci->usb11_phy = devm_phy_get(&pdev->dev, "usb-phy");
+ if (IS_ERR(da8xx_ohci->usb11_phy)) {
+ error = PTR_ERR(da8xx_ohci->usb11_phy);
+ if (error != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to get phy.\n");
+ goto err;
+ }
- hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
- if (!hcd)
- return -ENOMEM;
+ da8xx_ohci->vbus_reg = devm_regulator_get_optional(&pdev->dev, "vbus");
+ if (IS_ERR(da8xx_ohci->vbus_reg)) {
+ error = PTR_ERR(da8xx_ohci->vbus_reg);
+ if (error == -ENODEV) {
+ da8xx_ohci->vbus_reg = NULL;
+ } else if (error == -EPROBE_DEFER) {
+ goto err;
+ } else {
+ dev_err(&pdev->dev, "Failed to get regulator\n");
+ goto err;
+ }
+ }
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hcd->regs = devm_ioremap_resource(&pdev->dev, mem);
@@ -321,60 +454,38 @@ static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
hcd->rsrc_start = mem->start;
hcd->rsrc_len = resource_size(mem);
- ohci_hcd_init(hcd_to_ohci(hcd));
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
error = -ENODEV;
goto err;
}
+
error = usb_add_hcd(hcd, irq, 0);
if (error)
goto err;
device_wakeup_enable(hcd->self.controller);
- if (hub->ocic_notify) {
- error = hub->ocic_notify(ohci_da8xx_ocic_handler);
- if (!error)
- return 0;
- }
+ error = ohci_da8xx_register_notify(hcd);
+ if (error)
+ goto err_remove_hcd;
+
+ return 0;
+err_remove_hcd:
usb_remove_hcd(hcd);
err:
usb_put_hcd(hcd);
return error;
}
-/**
- * usb_hcd_da8xx_remove - shutdown processing for DA8xx-based HCDs
- * @dev: USB Host Controller being removed
- * Context: !in_interrupt()
- *
- * Reverses the effect of usb_hcd_da8xx_probe(), first invoking
- * the HCD's stop() method. It is always called from a thread
- * context, normally "rmmod", "apmd", or something similar.
- */
-static inline void
-usb_hcd_da8xx_remove(struct usb_hcd *hcd, struct platform_device *pdev)
+static int ohci_da8xx_remove(struct platform_device *pdev)
{
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(&pdev->dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
- hub->ocic_notify(NULL);
+ ohci_da8xx_unregister_notify(hcd);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
-}
-
-static int ohci_hcd_da8xx_drv_probe(struct platform_device *dev)
-{
- return usb_hcd_da8xx_probe(&ohci_da8xx_hc_driver, dev);
-}
-
-static int ohci_hcd_da8xx_drv_remove(struct platform_device *dev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(dev);
-
- usb_hcd_da8xx_remove(hcd, dev);
return 0;
}
@@ -397,7 +508,7 @@ static int ohci_da8xx_suspend(struct platform_device *pdev,
if (ret)
return ret;
- ohci_da8xx_clock(0);
+ ohci_da8xx_disable(hcd);
hcd->state = HC_STATE_SUSPENDED;
return ret;
@@ -407,32 +518,77 @@ static int ohci_da8xx_resume(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
- ohci_da8xx_clock(1);
- dev->dev.power.power_state = PMSG_ON;
- usb_hcd_resume_root_hub(hcd);
+ ret = ohci_da8xx_enable(hcd);
+ if (ret)
+ return ret;
+
+ ohci_resume(hcd, false);
+
return 0;
}
#endif
+static const struct ohci_driver_overrides da8xx_overrides __initconst = {
+ .reset = ohci_da8xx_reset,
+ .extra_priv_size = sizeof(struct da8xx_ohci_hcd),
+};
+
/*
* Driver definition to register with platform structure.
*/
static struct platform_driver ohci_hcd_da8xx_driver = {
- .probe = ohci_hcd_da8xx_drv_probe,
- .remove = ohci_hcd_da8xx_drv_remove,
+ .probe = ohci_da8xx_probe,
+ .remove = ohci_da8xx_remove,
.shutdown = usb_hcd_platform_shutdown,
#ifdef CONFIG_PM
.suspend = ohci_da8xx_suspend,
.resume = ohci_da8xx_resume,
#endif
.driver = {
- .name = "ohci",
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(da8xx_ohci_ids),
},
};
-MODULE_ALIAS("platform:ohci");
+static int __init ohci_da8xx_init(void)
+{
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", DRV_NAME);
+ ohci_init_driver(&ohci_da8xx_hc_driver, &da8xx_overrides);
+
+ /*
+ * The Davinci da8xx HW has some unusual quirks, which require
+ * da8xx-specific workarounds. We override certain hc_driver
+ * functions here to achieve that. We explicitly do not enhance
+ * ohci_driver_overrides to allow this more easily, since this
+ * is an unusual case, and we don't want to encourage others to
+ * override these functions by making it too easy.
+ */
+
+ orig_ohci_hub_control = ohci_da8xx_hc_driver.hub_control;
+ orig_ohci_hub_status_data = ohci_da8xx_hc_driver.hub_status_data;
+
+ ohci_da8xx_hc_driver.hub_status_data = ohci_da8xx_hub_status_data;
+ ohci_da8xx_hc_driver.hub_control = ohci_da8xx_hub_control;
+
+ return platform_driver_register(&ohci_hcd_da8xx_driver);
+}
+module_init(ohci_da8xx_init);
+
+static void __exit ohci_da8xx_exit(void)
+{
+ platform_driver_unregister(&ohci_hcd_da8xx_driver);
+}
+module_exit(ohci_da8xx_exit);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 86612ac3fda2..8685cf3e6292 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1219,11 +1219,6 @@ MODULE_LICENSE ("GPL");
#define SA1111_DRIVER ohci_hcd_sa1111_driver
#endif
-#ifdef CONFIG_USB_OHCI_HCD_DAVINCI
-#include "ohci-da8xx.c"
-#define DAVINCI_PLATFORM_DRIVER ohci_hcd_da8xx_driver
-#endif
-
#ifdef CONFIG_USB_OHCI_HCD_PPC_OF
#include "ohci-ppc-of.c"
#define OF_PLATFORM_DRIVER ohci_hcd_ppc_of_driver
@@ -1303,19 +1298,9 @@ static int __init ohci_hcd_mod_init(void)
goto error_tmio;
#endif
-#ifdef DAVINCI_PLATFORM_DRIVER
- retval = platform_driver_register(&DAVINCI_PLATFORM_DRIVER);
- if (retval < 0)
- goto error_davinci;
-#endif
-
return retval;
/* Error path */
-#ifdef DAVINCI_PLATFORM_DRIVER
- platform_driver_unregister(&DAVINCI_PLATFORM_DRIVER);
- error_davinci:
-#endif
#ifdef TMIO_OHCI_DRIVER
platform_driver_unregister(&TMIO_OHCI_DRIVER);
error_tmio:
@@ -1351,9 +1336,6 @@ module_init(ohci_hcd_mod_init);
static void __exit ohci_hcd_mod_exit(void)
{
-#ifdef DAVINCI_PLATFORM_DRIVER
- platform_driver_unregister(&DAVINCI_PLATFORM_DRIVER);
-#endif
#ifdef TMIO_OHCI_DRIVER
platform_driver_unregister(&TMIO_OHCI_DRIVER);
#endif
diff --git a/drivers/usb/host/ohci-mem.c b/drivers/usb/host/ohci-mem.c
index c9e315c6808a..ed8a762b8670 100644
--- a/drivers/usb/host/ohci-mem.c
+++ b/drivers/usb/host/ohci-mem.c
@@ -88,10 +88,9 @@ td_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
dma_addr_t dma;
struct td *td;
- td = dma_pool_alloc (hc->td_cache, mem_flags, &dma);
+ td = dma_pool_zalloc (hc->td_cache, mem_flags, &dma);
if (td) {
/* in case hc fetches it, make it look dead */
- memset (td, 0, sizeof *td);
td->hwNextTD = cpu_to_hc32 (hc, dma);
td->td_dma = dma;
/* hashed in td_fill */
@@ -122,9 +121,8 @@ ed_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
dma_addr_t dma;
struct ed *ed;
- ed = dma_pool_alloc (hc->ed_cache, mem_flags, &dma);
+ ed = dma_pool_zalloc (hc->ed_cache, mem_flags, &dma);
if (ed) {
- memset (ed, 0, sizeof (*ed));
INIT_LIST_HEAD (&ed->td_list);
ed->dma = dma;
}
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index b7d4756232ae..6df8e2ed40fd 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -56,8 +56,6 @@ static struct hc_driver __read_mostly ohci_nxp_hc_driver;
static struct i2c_client *isp1301_i2c_client;
-extern int usb_disabled(void);
-
static struct clk *usb_host_clk;
static void isp1301_configure_lpc32xx(void)
@@ -127,6 +125,7 @@ static inline void isp1301_vbus_off(void)
static void ohci_nxp_start_hc(void)
{
unsigned long tmp = __raw_readl(USB_OTG_STAT_CONTROL) | HOST_EN;
+
__raw_writel(tmp, USB_OTG_STAT_CONTROL);
isp1301_vbus_on();
}
@@ -134,6 +133,7 @@ static void ohci_nxp_start_hc(void)
static void ohci_nxp_stop_hc(void)
{
unsigned long tmp;
+
isp1301_vbus_off();
tmp = __raw_readl(USB_OTG_STAT_CONTROL) & ~HOST_EN;
__raw_writel(tmp, USB_OTG_STAT_CONTROL);
@@ -155,9 +155,8 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
}
isp1301_i2c_client = isp1301_get_client(isp1301_node);
- if (!isp1301_i2c_client) {
+ if (!isp1301_i2c_client)
return -EPROBE_DEFER;
- }
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 495c1454b9e8..b08e385399b9 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -68,9 +68,6 @@ static inline int tps65010_set_gpio_out_value(unsigned gpio, unsigned value)
#endif
-extern int usb_disabled(void);
-extern int ocpi_enable(void);
-
static struct clk *usb_host_ck;
static struct clk *usb_dc_ck;
@@ -296,15 +293,14 @@ static int ohci_omap_reset(struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
/**
- * usb_hcd_omap_probe - initialize OMAP-based HCDs
+ * ohci_hcd_omap_probe - initialize OMAP-based HCDs
* Context: !in_interrupt()
*
* Allocates basic resources for this USB host controller, and
* then invokes the start() method for the HCD associated with it
* through the hotplug entry's driver_data.
*/
-static int usb_hcd_omap_probe (const struct hc_driver *driver,
- struct platform_device *pdev)
+static int ohci_hcd_omap_probe(struct platform_device *pdev)
{
int retval, irq;
struct usb_hcd *hcd = 0;
@@ -336,7 +332,8 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver,
}
- hcd = usb_create_hcd (driver, &pdev->dev, dev_name(&pdev->dev));
+ hcd = usb_create_hcd(&ohci_omap_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
if (!hcd) {
retval = -ENOMEM;
goto err0;
@@ -384,17 +381,18 @@ err0:
/* may be called with controller, bus, and devices active */
/**
- * usb_hcd_omap_remove - shutdown processing for OMAP-based HCDs
+ * ohci_hcd_omap_remove - shutdown processing for OMAP-based HCDs
* @dev: USB Host Controller being removed
* Context: !in_interrupt()
*
- * Reverses the effect of usb_hcd_omap_probe(), first invoking
+ * Reverses the effect of ohci_hcd_omap_probe(), first invoking
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*/
-static inline void
-usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev)
+static int ohci_hcd_omap_remove(struct platform_device *pdev)
{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
dev_dbg(hcd->self.controller, "stopping USB Controller\n");
usb_remove_hcd(hcd);
omap_ohci_clock_power(0);
@@ -409,21 +407,6 @@ usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev)
usb_put_hcd(hcd);
clk_put(usb_dc_ck);
clk_put(usb_host_ck);
-}
-
-/*-------------------------------------------------------------------------*/
-
-static int ohci_hcd_omap_drv_probe(struct platform_device *dev)
-{
- return usb_hcd_omap_probe(&ohci_omap_hc_driver, dev);
-}
-
-static int ohci_hcd_omap_drv_remove(struct platform_device *dev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(dev);
-
- usb_hcd_omap_remove(hcd, dev);
-
return 0;
}
@@ -472,8 +455,8 @@ static int ohci_omap_resume(struct platform_device *dev)
* Driver definition to register with the OMAP bus
*/
static struct platform_driver ohci_hcd_omap_driver = {
- .probe = ohci_hcd_omap_drv_probe,
- .remove = ohci_hcd_omap_drv_remove,
+ .probe = ohci_hcd_omap_probe,
+ .remove = ohci_hcd_omap_remove,
.shutdown = usb_hcd_platform_shutdown,
#ifdef CONFIG_PM
.suspend = ohci_omap_suspend,
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index a667cf2d5788..79efde8f21e0 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -404,7 +404,7 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
/**
- * usb_hcd_pxa27x_probe - initialize pxa27x-based HCDs
+ * ohci_hcd_pxa27x_probe - initialize pxa27x-based HCDs
* Context: !in_interrupt()
*
* Allocates basic resources for this USB host controller, and
@@ -412,7 +412,7 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
* through the hotplug entry's driver_data.
*
*/
-int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device *pdev)
+static int ohci_hcd_pxa27x_probe(struct platform_device *pdev)
{
int retval, irq;
struct usb_hcd *hcd;
@@ -442,7 +442,7 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
if (IS_ERR(usb_clk))
return PTR_ERR(usb_clk);
- hcd = usb_create_hcd (driver, &pdev->dev, "pxa27x");
+ hcd = usb_create_hcd(&ohci_pxa27x_hc_driver, &pdev->dev, "pxa27x");
if (!hcd)
return -ENOMEM;
@@ -503,17 +503,18 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
/* may be called with controller, bus, and devices active */
/**
- * usb_hcd_pxa27x_remove - shutdown processing for pxa27x-based HCDs
+ * ohci_hcd_pxa27x_remove - shutdown processing for pxa27x-based HCDs
* @dev: USB Host Controller being removed
* Context: !in_interrupt()
*
- * Reverses the effect of usb_hcd_pxa27x_probe(), first invoking
+ * Reverses the effect of ohci_hcd_pxa27x_probe(), first invoking
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*
*/
-void usb_hcd_pxa27x_remove (struct usb_hcd *hcd, struct platform_device *pdev)
+static int ohci_hcd_pxa27x_remove(struct platform_device *pdev)
{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
unsigned int i;
@@ -524,28 +525,11 @@ void usb_hcd_pxa27x_remove (struct usb_hcd *hcd, struct platform_device *pdev)
pxa27x_ohci_set_vbus_power(pxa_ohci, i, false);
usb_put_hcd(hcd);
+ return 0;
}
/*-------------------------------------------------------------------------*/
-static int ohci_hcd_pxa27x_drv_probe(struct platform_device *pdev)
-{
- pr_debug ("In ohci_hcd_pxa27x_drv_probe");
-
- if (usb_disabled())
- return -ENODEV;
-
- return usb_hcd_pxa27x_probe(&ohci_pxa27x_hc_driver, pdev);
-}
-
-static int ohci_hcd_pxa27x_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_hcd_pxa27x_remove(hcd, pdev);
- return 0;
-}
-
#ifdef CONFIG_PM
static int ohci_hcd_pxa27x_drv_suspend(struct device *dev)
{
@@ -598,8 +582,8 @@ static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = {
#endif
static struct platform_driver ohci_hcd_pxa27x_driver = {
- .probe = ohci_hcd_pxa27x_drv_probe,
- .remove = ohci_hcd_pxa27x_drv_remove,
+ .probe = ohci_hcd_pxa27x_probe,
+ .remove = ohci_hcd_pxa27x_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "pxa27x-ohci",
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 7a1919ca543a..b006b93126f7 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -43,6 +43,8 @@ static const char hcd_name[] = "ohci-s3c2410";
static struct clk *clk;
static struct clk *usb_clk;
+static struct hc_driver __read_mostly ohci_s3c2410_hc_driver;
+
/* forward definitions */
static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc);
@@ -321,26 +323,29 @@ static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc)
/* may be called with controller, bus, and devices active */
/*
- * usb_hcd_s3c2410_remove - shutdown processing for HCD
+ * ohci_hcd_s3c2410_remove - shutdown processing for HCD
* @dev: USB Host Controller being removed
* Context: !in_interrupt()
*
- * Reverses the effect of usb_hcd_3c2410_probe(), first invoking
+ * Reverses the effect of ohci_hcd_3c2410_probe(), first invoking
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*
*/
-static void
-usb_hcd_s3c2410_remove(struct usb_hcd *hcd, struct platform_device *dev)
+static int
+ohci_hcd_s3c2410_remove(struct platform_device *dev)
{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+
usb_remove_hcd(hcd);
s3c2410_stop_hc(dev);
usb_put_hcd(hcd);
+ return 0;
}
/**
- * usb_hcd_s3c2410_probe - initialize S3C2410-based HCDs
+ * ohci_hcd_s3c2410_probe - initialize S3C2410-based HCDs
* Context: !in_interrupt()
*
* Allocates basic resources for this USB host controller, and
@@ -348,8 +353,7 @@ usb_hcd_s3c2410_remove(struct usb_hcd *hcd, struct platform_device *dev)
* through the hotplug entry's driver_data.
*
*/
-static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
- struct platform_device *dev)
+static int ohci_hcd_s3c2410_probe(struct platform_device *dev)
{
struct usb_hcd *hcd = NULL;
struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev);
@@ -358,7 +362,7 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
s3c2410_usb_set_power(info, 1, 1);
s3c2410_usb_set_power(info, 2, 1);
- hcd = usb_create_hcd(driver, &dev->dev, "s3c24xx");
+ hcd = usb_create_hcd(&ohci_s3c2410_hc_driver, &dev->dev, "s3c24xx");
if (hcd == NULL)
return -ENOMEM;
@@ -404,21 +408,6 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
/*-------------------------------------------------------------------------*/
-static struct hc_driver __read_mostly ohci_s3c2410_hc_driver;
-
-static int ohci_hcd_s3c2410_drv_probe(struct platform_device *pdev)
-{
- return usb_hcd_s3c2410_probe(&ohci_s3c2410_hc_driver, pdev);
-}
-
-static int ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_hcd_s3c2410_remove(hcd, pdev);
- return 0;
-}
-
#ifdef CONFIG_PM
static int ohci_hcd_s3c2410_drv_suspend(struct device *dev)
{
@@ -457,13 +446,21 @@ static const struct dev_pm_ops ohci_hcd_s3c2410_pm_ops = {
.resume = ohci_hcd_s3c2410_drv_resume,
};
+static const struct of_device_id ohci_hcd_s3c2410_dt_ids[] = {
+ { .compatible = "samsung,s3c2410-ohci" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, ohci_hcd_s3c2410_dt_ids);
+
static struct platform_driver ohci_hcd_s3c2410_driver = {
- .probe = ohci_hcd_s3c2410_drv_probe,
- .remove = ohci_hcd_s3c2410_drv_remove,
+ .probe = ohci_hcd_s3c2410_probe,
+ .remove = ohci_hcd_s3c2410_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "s3c2410-ohci",
.pm = &ohci_hcd_s3c2410_pm_ops,
+ .of_match_table = ohci_hcd_s3c2410_dt_ids,
},
};
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 6afe32381209..321de2e0161b 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1032,7 +1032,6 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
goto fail;
dev->num_rings_cached = 0;
- init_completion(&dev->cmd_completion);
dev->udev = udev;
/* Point to output device context in dcbaa. */
@@ -1370,7 +1369,7 @@ static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
if (udev->speed == USB_SPEED_HIGH &&
(usb_endpoint_xfer_isoc(&ep->desc) ||
usb_endpoint_xfer_int(&ep->desc)))
- return (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
+ return usb_endpoint_maxp_mult(&ep->desc) - 1;
return 0;
}
@@ -1415,10 +1414,10 @@ static u32 xhci_get_max_esit_payload(struct usb_device *udev,
else if (udev->speed >= USB_SPEED_SUPER)
return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
- max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
- max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
+ max_packet = usb_endpoint_maxp(&ep->desc);
+ max_burst = usb_endpoint_maxp_mult(&ep->desc);
/* A 0 in max burst means 1 transfer per ESIT */
- return max_packet * (max_burst + 1);
+ return max_packet * max_burst;
}
/* Set up an endpoint with one ring segment. Do not allocate stream rings.
@@ -1461,7 +1460,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
max_esit_payload = xhci_get_max_esit_payload(udev, ep);
interval = xhci_get_endpoint_interval(udev, ep);
mult = xhci_get_endpoint_mult(udev, ep);
- max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
+ max_packet = usb_endpoint_maxp(&ep->desc);
max_burst = xhci_get_endpoint_max_burst(udev, ep);
avg_trb_len = max_esit_payload;
@@ -2384,7 +2383,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* "physically contiguous and 64-byte (cache line) aligned".
*/
xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
- GFP_KERNEL);
+ flags);
if (!xhci->dcbaa)
goto fail;
memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
@@ -2480,7 +2479,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci->erst.entries = dma_alloc_coherent(dev,
sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
- GFP_KERNEL);
+ flags);
if (!xhci->erst.entries)
goto fail;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
@@ -2536,7 +2535,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* something other than the default (~1ms minimum between interrupts).
* See section 5.5.1.2.
*/
- init_completion(&xhci->addr_dev);
for (i = 0; i < MAX_HC_SLOTS; ++i)
xhci->devs[i] = NULL;
for (i = 0; i < USB_MAXCHILDREN; ++i) {
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 73f763c4f5f5..6e7ddf6cafae 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -337,7 +337,7 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n",
__func__, usb_endpoint_type(&ep->desc), udev->speed,
- GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)),
+ usb_endpoint_maxp(&ep->desc),
usb_endpoint_dir_in(&ep->desc), ep);
if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) {
@@ -403,7 +403,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg(xhci, "%s() type:%d, speed:%d, mpks:%d, dir:%d, ep:%p\n",
__func__, usb_endpoint_type(&ep->desc), udev->speed,
- GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)),
+ usb_endpoint_maxp(&ep->desc),
usb_endpoint_dir_in(&ep->desc), ep);
if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT))
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 79959f17c38c..1094ebd2838f 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -94,6 +94,9 @@ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
int ret;
int i;
+ if (!mtk->has_ippc)
+ return 0;
+
/* power on host ip */
value = readl(&ippc->ip_pw_ctr1);
value &= ~CTRL1_IP_HOST_PDN;
@@ -139,6 +142,9 @@ static int xhci_mtk_host_disable(struct xhci_hcd_mtk *mtk)
int ret;
int i;
+ if (!mtk->has_ippc)
+ return 0;
+
/* power down all u3 ports */
for (i = 0; i < mtk->num_u3_ports; i++) {
value = readl(&ippc->u3_ctrl_p[i]);
@@ -173,6 +179,9 @@ static int xhci_mtk_ssusb_config(struct xhci_hcd_mtk *mtk)
struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs;
u32 value;
+ if (!mtk->has_ippc)
+ return 0;
+
/* reset whole ip */
value = readl(&ippc->ip_pw_ctr0);
value |= CTRL0_IP_SW_RST;
@@ -475,6 +484,7 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
/* called during probe() after chip reset completes */
static int xhci_mtk_setup(struct usb_hcd *hcd)
{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
int ret;
@@ -482,12 +492,21 @@ static int xhci_mtk_setup(struct usb_hcd *hcd)
ret = xhci_mtk_ssusb_config(mtk);
if (ret)
return ret;
+ }
+
+ ret = xhci_gen_setup(hcd, xhci_mtk_quirks);
+ if (ret)
+ return ret;
+
+ if (usb_hcd_is_primary_hcd(hcd)) {
+ mtk->num_u3_ports = xhci->num_usb3_ports;
+ mtk->num_u2_ports = xhci->num_usb2_ports;
ret = xhci_mtk_sch_init(mtk);
if (ret)
return ret;
}
- return xhci_gen_setup(hcd, xhci_mtk_quirks);
+ return ret;
}
static int xhci_mtk_probe(struct platform_device *pdev)
@@ -586,7 +605,7 @@ static int xhci_mtk_probe(struct platform_device *pdev)
mtk->hcd = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, mtk);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac");
hcd->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
@@ -595,11 +614,16 @@ static int xhci_mtk_probe(struct platform_device *pdev)
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- mtk->ippc_regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(mtk->ippc_regs)) {
- ret = PTR_ERR(mtk->ippc_regs);
- goto put_usb2_hcd;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ippc");
+ if (res) { /* ippc register is optional */
+ mtk->ippc_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mtk->ippc_regs)) {
+ ret = PTR_ERR(mtk->ippc_regs);
+ goto put_usb2_hcd;
+ }
+ mtk->has_ippc = true;
+ } else {
+ mtk->has_ippc = false;
}
for (phy_num = 0; phy_num < mtk->num_phys; phy_num++) {
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index 7da677c79ea8..2845c49efe1b 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -118,6 +118,7 @@ struct xhci_hcd_mtk {
struct usb_hcd *hcd;
struct mu3h_sch_bw_info *sch_array;
struct mu3c_ippc_regs __iomem *ippc_regs;
+ bool has_ippc;
int num_u2_ports;
int num_u3_ports;
struct regulator *vusb33;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index ed56bf9ed885..ddfab301e366 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -100,6 +100,12 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
.plat_start = xhci_rcar_start,
};
+static const struct xhci_plat_priv xhci_plat_renesas_rcar_r8a7796 = {
+ .firmware_name = XHCI_RCAR_FIRMWARE_NAME_V3,
+ .init_quirk = xhci_rcar_init_quirk,
+ .plat_start = xhci_rcar_start,
+};
+
static const struct of_device_id usb_xhci_of_match[] = {
{
.compatible = "generic-xhci",
@@ -124,6 +130,9 @@ static const struct of_device_id usb_xhci_of_match[] = {
.compatible = "renesas,xhci-r8a7795",
.data = &xhci_plat_renesas_rcar_gen3,
}, {
+ .compatible = "renesas,xhci-r8a7796",
+ .data = &xhci_plat_renesas_rcar_r8a7796,
+ }, {
.compatible = "renesas,rcar-gen2-xhci",
.data = &xhci_plat_renesas_rcar_gen2,
}, {
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index 0e4535e632ec..d28df386e780 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -19,6 +19,8 @@
#include "xhci-rcar.h"
/*
+* - The V3 firmware is for r8a7796 (with good performance).
+* - The V2 firmware can be used on both r8a7795 (es1.x) and r8a7796.
* - The V2 firmware is possible to use on R-Car Gen2. However, the V2 causes
* performance degradation. So, this driver continues to use the V1 if R-Car
* Gen2.
@@ -26,6 +28,7 @@
*/
MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V1);
MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V2);
+MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V3);
/*** Register Offset ***/
#define RCAR_USB3_INT_ENA 0x224 /* Interrupt Enable */
@@ -92,6 +95,7 @@ static int xhci_rcar_is_gen3(struct device *dev)
struct device_node *node = dev->of_node;
return of_device_is_compatible(node, "renesas,xhci-r8a7795") ||
+ of_device_is_compatible(node, "renesas,xhci-r8a7796") ||
of_device_is_compatible(node, "renesas,rcar-gen3-xhci");
}
diff --git a/drivers/usb/host/xhci-rcar.h b/drivers/usb/host/xhci-rcar.h
index 2941a25cfe98..d2ffe20401cf 100644
--- a/drivers/usb/host/xhci-rcar.h
+++ b/drivers/usb/host/xhci-rcar.h
@@ -13,6 +13,7 @@
#define XHCI_RCAR_FIRMWARE_NAME_V1 "r8a779x_usb3_v1.dlmem"
#define XHCI_RCAR_FIRMWARE_NAME_V2 "r8a779x_usb3_v2.dlmem"
+#define XHCI_RCAR_FIRMWARE_NAME_V3 "r8a779x_usb3_v3.dlmem"
#if IS_ENABLED(CONFIG_USB_XHCI_RCAR)
void xhci_rcar_start(struct usb_hcd *hcd);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 797137e26549..bdf6b13d9b67 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -89,6 +89,11 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
return seg->dma + (segment_offset * sizeof(*trb));
}
+static bool trb_is_noop(union xhci_trb *trb)
+{
+ return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
+}
+
static bool trb_is_link(union xhci_trb *trb)
{
return TRB_TYPE_LINK_LE32(trb->link.control);
@@ -110,6 +115,20 @@ static bool link_trb_toggles_cycle(union xhci_trb *trb)
return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
}
+static bool last_td_in_urb(struct xhci_td *td)
+{
+ struct urb_priv *urb_priv = td->urb->hcpriv;
+
+ return urb_priv->td_cnt == urb_priv->length;
+}
+
+static void inc_td_cnt(struct urb *urb)
+{
+ struct urb_priv *urb_priv = urb->hcpriv;
+
+ urb_priv->td_cnt++;
+}
+
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
* TRB is in a new segment. This does not skip over link TRBs, and it does not
* effect the ring dequeue or enqueue pointers.
@@ -303,7 +322,6 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
"maybe the host is dead\n");
del_timer(&xhci->cmd_timer);
xhci->xhc_state |= XHCI_STATE_DYING;
- xhci_quiesce(xhci);
xhci_halt(xhci);
return -ESHUTDOWN;
}
@@ -473,9 +491,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
if (new_deq == cur_td->last_trb)
td_last_trb_found = true;
- if (cycle_found &&
- TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) &&
- new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE))
+ if (cycle_found && trb_is_link(new_deq) &&
+ link_trb_toggles_cycle(new_deq))
state->new_cycle_state ^= 0x1;
next_trb(xhci, ep_ring, &new_seg, &new_deq);
@@ -511,54 +528,32 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
* of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
*/
static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
- struct xhci_td *cur_td, bool flip_cycle)
+ struct xhci_td *td, bool flip_cycle)
{
- struct xhci_segment *cur_seg;
- union xhci_trb *cur_trb;
-
- for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
- true;
- next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
- /* Unchain any chained Link TRBs, but
- * leave the pointers intact.
- */
- cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
- /* Flip the cycle bit (link TRBs can't be the first
- * or last TRB).
- */
- if (flip_cycle)
- cur_trb->generic.field[3] ^=
- cpu_to_le32(TRB_CYCLE);
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Cancel (unchain) link TRB");
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Address = %p (0x%llx dma); "
- "in seg %p (0x%llx dma)",
- cur_trb,
- (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
- cur_seg,
- (unsigned long long)cur_seg->dma);
+ struct xhci_segment *seg = td->start_seg;
+ union xhci_trb *trb = td->first_trb;
+
+ while (1) {
+ if (trb_is_link(trb)) {
+ /* unchain chained link TRBs */
+ trb->link.control &= cpu_to_le32(~TRB_CHAIN);
} else {
- cur_trb->generic.field[0] = 0;
- cur_trb->generic.field[1] = 0;
- cur_trb->generic.field[2] = 0;
+ trb->generic.field[0] = 0;
+ trb->generic.field[1] = 0;
+ trb->generic.field[2] = 0;
/* Preserve only the cycle bit of this TRB */
- cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
- /* Flip the cycle bit except on the first or last TRB */
- if (flip_cycle && cur_trb != cur_td->first_trb &&
- cur_trb != cur_td->last_trb)
- cur_trb->generic.field[3] ^=
- cpu_to_le32(TRB_CYCLE);
- cur_trb->generic.field[3] |= cpu_to_le32(
+ trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
+ trb->generic.field[3] |= cpu_to_le32(
TRB_TYPE(TRB_TR_NOOP));
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "TRB to noop at offset 0x%llx",
- (unsigned long long)
- xhci_trb_virt_to_dma(cur_seg, cur_trb));
}
- if (cur_trb == cur_td->last_trb)
+ /* flip cycle if asked to */
+ if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
+ trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
+
+ if (trb == td->last_trb)
break;
+
+ next_trb(xhci, ep_ring, &seg, &trb);
}
}
@@ -574,39 +569,33 @@ static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
ep->stop_cmds_pending--;
}
-/* Must be called with xhci->lock held in interrupt context */
+/*
+ * Must be called with xhci->lock held in interrupt context,
+ * releases and re-acquires xhci->lock
+ */
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
- struct xhci_td *cur_td, int status)
+ struct xhci_td *cur_td, int status)
{
- struct usb_hcd *hcd;
- struct urb *urb;
- struct urb_priv *urb_priv;
+ struct urb *urb = cur_td->urb;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
- urb = cur_td->urb;
- urb_priv = urb->hcpriv;
- urb_priv->td_cnt++;
- hcd = bus_to_hcd(urb->dev->bus);
-
- /* Only giveback urb when this is the last td in urb */
- if (urb_priv->td_cnt == urb_priv->length) {
- if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
- xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
- if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
- if (xhci->quirks & XHCI_AMD_PLL_FIX)
- usb_amd_quirk_pll_enable();
- }
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
+ if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
+ if (xhci->quirks & XHCI_AMD_PLL_FIX)
+ usb_amd_quirk_pll_enable();
}
- usb_hcd_unlink_urb_from_ep(hcd, urb);
-
- spin_unlock(&xhci->lock);
- usb_hcd_giveback_urb(hcd, urb, status);
- xhci_urb_free_priv(urb_priv);
- spin_lock(&xhci->lock);
}
+ xhci_urb_free_priv(urb_priv);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock(&xhci->lock);
+ usb_hcd_giveback_urb(hcd, urb, status);
+ spin_lock(&xhci->lock);
}
-void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring,
- struct xhci_td *td)
+static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
+ struct xhci_ring *ring, struct xhci_td *td)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
struct xhci_segment *seg = td->bounce_seg;
@@ -752,7 +741,9 @@ remove_finished_td:
ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
if (ep_ring && cur_td->bounce_seg)
xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
- xhci_giveback_urb_in_irq(xhci, cur_td, 0);
+ inc_td_cnt(cur_td->urb);
+ if (last_td_in_urb(cur_td))
+ xhci_giveback_urb_in_irq(xhci, cur_td, 0);
/* Stop processing the cancelled list if the watchdog timer is
* running.
@@ -777,7 +768,10 @@ static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
if (cur_td->bounce_seg)
xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
- xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
+
+ inc_td_cnt(cur_td->urb);
+ if (last_td_in_urb(cur_td))
+ xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
}
}
@@ -814,7 +808,10 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
cur_td = list_first_entry(&ep->cancelled_td_list,
struct xhci_td, cancelled_td_list);
list_del_init(&cur_td->cancelled_td_list);
- xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
+
+ inc_td_cnt(cur_td->urb);
+ if (last_td_in_urb(cur_td))
+ xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
}
}
@@ -1003,8 +1000,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
break;
case COMP_CTX_STATE:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
- ep_state = le32_to_cpu(ep_ctx->ep_info);
- ep_state &= EP_STATE_MASK;
+ ep_state = GET_EP_CTX_STATE(ep_ctx);
slot_state = le32_to_cpu(slot_ctx->dev_state);
slot_state = GET_SLOT_STATE(slot_state);
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -1096,12 +1092,12 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
}
static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
- u32 cmd_comp_code)
+ struct xhci_command *command, u32 cmd_comp_code)
{
if (cmd_comp_code == COMP_SUCCESS)
- xhci->slot_id = slot_id;
+ command->slot_id = slot_id;
else
- xhci->slot_id = 0;
+ command->slot_id = 0;
}
static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
@@ -1183,7 +1179,7 @@ static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
struct xhci_event_cmd *event)
{
if (!(xhci->quirks & XHCI_NEC_HOST)) {
- xhci->error_bitmask |= 1 << 6;
+ xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
return;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
@@ -1325,14 +1321,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
cmd_trb = xhci->cmd_ring->dequeue;
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
cmd_trb);
- /* Is the command ring deq ptr out of sync with the deq seg ptr? */
- if (cmd_dequeue_dma == 0) {
- xhci->error_bitmask |= 1 << 4;
- return;
- }
- /* Does the DMA address match our internal dequeue pointer address? */
- if (cmd_dma != (u64) cmd_dequeue_dma) {
- xhci->error_bitmask |= 1 << 5;
+ /*
+ * Check whether the completion event is for our internal kept
+ * command.
+ */
+ if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
+ xhci_warn(xhci,
+ "ERROR mismatched command completion event\n");
return;
}
@@ -1371,7 +1366,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
switch (cmd_type) {
case TRB_ENABLE_SLOT:
- xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
+ xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code);
break;
case TRB_DISABLE_SLOT:
xhci_handle_cmd_disable_slot(xhci, slot_id);
@@ -1418,7 +1413,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
break;
default:
/* Skip over unknown commands on the event ring */
- xhci->error_bitmask |= 1 << 6;
+ xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
break;
}
@@ -1519,10 +1514,10 @@ static void handle_port_status(struct xhci_hcd *xhci,
bool bogus_port_status = false;
/* Port status change events always have a successful completion code */
- if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
- xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
- xhci->error_bitmask |= 1 << 8;
- }
+ if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
+ xhci_warn(xhci,
+ "WARN: xHC returned failed port status event\n");
+
port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
@@ -1759,7 +1754,7 @@ struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id,
- struct xhci_td *td, union xhci_trb *event_trb)
+ struct xhci_td *td, union xhci_trb *ep_trb)
{
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
struct xhci_command *command;
@@ -1798,8 +1793,7 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
* endpoint anyway. Check if a babble halted the
* endpoint.
*/
- if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
- cpu_to_le32(EP_STATE_HALTED))
+ if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
return 1;
return 0;
@@ -1824,7 +1818,7 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
* Return 1 if the urb can be given back.
*/
static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
- union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ union xhci_trb *ep_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status, bool skip)
{
struct xhci_virt_device *xdev;
@@ -1833,7 +1827,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
int ep_index;
struct urb *urb = NULL;
struct xhci_ep_ctx *ep_ctx;
- int ret = 0;
struct urb_priv *urb_priv;
u32 trb_comp_code;
@@ -1866,7 +1859,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
* The class driver clears the device side halt later.
*/
xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
- ep_ring->stream_id, td, event_trb);
+ ep_ring->stream_id, td, ep_trb);
} else {
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
@@ -1889,41 +1882,54 @@ td_cleanup:
* unsigned). Play it safe and say we didn't transfer anything.
*/
if (urb->actual_length > urb->transfer_buffer_length) {
- xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n",
- urb->transfer_buffer_length,
- urb->actual_length);
+ xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
+ urb->transfer_buffer_length, urb->actual_length);
urb->actual_length = 0;
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- *status = -EREMOTEIO;
- else
- *status = 0;
+ *status = 0;
}
list_del_init(&td->td_list);
/* Was this TD slated to be cancelled but completed anyway? */
if (!list_empty(&td->cancelled_td_list))
list_del_init(&td->cancelled_td_list);
- urb_priv->td_cnt++;
+ inc_td_cnt(urb);
/* Giveback the urb when all the tds are completed */
- if (urb_priv->td_cnt == urb_priv->length) {
- ret = 1;
- if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
- xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
- if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
- if (xhci->quirks & XHCI_AMD_PLL_FIX)
- usb_amd_quirk_pll_enable();
- }
- }
+ if (last_td_in_urb(td)) {
+ if ((urb->actual_length != urb->transfer_buffer_length &&
+ (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
+ (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
+ xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
+ urb, urb->actual_length,
+ urb->transfer_buffer_length, *status);
+
+ /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+ *status = 0;
+ xhci_giveback_urb_in_irq(xhci, td, *status);
}
+ return 0;
+}
- return ret;
+/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
+static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ union xhci_trb *stop_trb)
+{
+ u32 sum;
+ union xhci_trb *trb = ring->dequeue;
+ struct xhci_segment *seg = ring->deq_seg;
+
+ for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
+ if (!trb_is_noop(trb) && !trb_is_link(trb))
+ sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
+ }
+ return sum;
}
/*
* Process control tds, update urb status and actual_length.
*/
static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
- union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ union xhci_trb *ep_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
struct xhci_virt_device *xdev;
@@ -1932,6 +1938,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
int ep_index;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
+ u32 remaining, requested;
+ bool on_data_stage;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
@@ -1939,195 +1947,161 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+ requested = td->urb->transfer_buffer_length;
+ remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+
+ /* not setup (dequeue), or status stage means we are at data stage */
+ on_data_stage = (ep_trb != ep_ring->dequeue && ep_trb != td->last_trb);
switch (trb_comp_code) {
case COMP_SUCCESS:
- if (event_trb == ep_ring->dequeue) {
- xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
- "without IOC set??\n");
- *status = -ESHUTDOWN;
- } else if (event_trb != td->last_trb) {
- xhci_warn(xhci, "WARN: Success on ctrl data TRB "
- "without IOC set??\n");
+ if (ep_trb != td->last_trb) {
+ xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
+ on_data_stage ? "data" : "setup");
*status = -ESHUTDOWN;
- } else {
- *status = 0;
+ break;
}
+ *status = 0;
break;
case COMP_SHORT_TX:
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- *status = -EREMOTEIO;
- else
- *status = 0;
+ *status = 0;
break;
case COMP_STOP_SHORT:
- if (event_trb == ep_ring->dequeue || event_trb == td->last_trb)
- xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
+ if (on_data_stage)
+ td->urb->actual_length = remaining;
else
- td->urb->actual_length =
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
-
- return finish_td(xhci, td, event_trb, event, ep, status, false);
+ xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
+ goto finish_td;
case COMP_STOP:
- /* Did we stop at data stage? */
- if (event_trb != ep_ring->dequeue && event_trb != td->last_trb)
- td->urb->actual_length =
- td->urb->transfer_buffer_length -
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
- /* fall through */
+ if (on_data_stage)
+ td->urb->actual_length = requested - remaining;
+ goto finish_td;
case COMP_STOP_INVAL:
- return finish_td(xhci, td, event_trb, event, ep, status, false);
+ goto finish_td;
default:
if (!xhci_requires_manual_halt_cleanup(xhci,
- ep_ctx, trb_comp_code))
+ ep_ctx, trb_comp_code))
break;
- xhci_dbg(xhci, "TRB error code %u, "
- "halted endpoint index = %u\n",
- trb_comp_code, ep_index);
+ xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
+ trb_comp_code, ep_index);
/* else fall through */
case COMP_STALL:
/* Did we transfer part of the data (middle) phase? */
- if (event_trb != ep_ring->dequeue &&
- event_trb != td->last_trb)
- td->urb->actual_length =
- td->urb->transfer_buffer_length -
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ if (on_data_stage)
+ td->urb->actual_length = requested - remaining;
else if (!td->urb_length_set)
td->urb->actual_length = 0;
-
- return finish_td(xhci, td, event_trb, event, ep, status, false);
+ goto finish_td;
}
+
+ /* stopped at setup stage, no data transferred */
+ if (ep_trb == ep_ring->dequeue)
+ goto finish_td;
+
/*
- * Did we transfer any data, despite the errors that might have
- * happened? I.e. did we get past the setup stage?
+ * if on data stage then update the actual_length of the URB and flag it
+ * as set, so it won't be overwritten in the event for the last TRB.
*/
- if (event_trb != ep_ring->dequeue) {
- /* The event was for the status stage */
- if (event_trb == td->last_trb) {
- if (td->urb_length_set) {
- /* Don't overwrite a previously set error code
- */
- if ((*status == -EINPROGRESS || *status == 0) &&
- (td->urb->transfer_flags
- & URB_SHORT_NOT_OK))
- /* Did we already see a short data
- * stage? */
- *status = -EREMOTEIO;
- } else {
- td->urb->actual_length =
- td->urb->transfer_buffer_length;
- }
- } else {
- /*
- * Maybe the event was for the data stage? If so, update
- * already the actual_length of the URB and flag it as
- * set, so that it is not overwritten in the event for
- * the last TRB.
- */
- td->urb_length_set = true;
- td->urb->actual_length =
- td->urb->transfer_buffer_length -
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
- xhci_dbg(xhci, "Waiting for status "
- "stage event\n");
- return 0;
- }
+ if (on_data_stage) {
+ td->urb_length_set = true;
+ td->urb->actual_length = requested - remaining;
+ xhci_dbg(xhci, "Waiting for status stage event\n");
+ return 0;
}
- return finish_td(xhci, td, event_trb, event, ep, status, false);
+ /* at status stage */
+ if (!td->urb_length_set)
+ td->urb->actual_length = requested;
+
+finish_td:
+ return finish_td(xhci, td, ep_trb, event, ep, status, false);
}
/*
* Process isochronous tds, update urb packet status and actual_length.
*/
static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
- union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ union xhci_trb *ep_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
int idx;
- int len = 0;
- union xhci_trb *cur_trb;
- struct xhci_segment *cur_seg;
struct usb_iso_packet_descriptor *frame;
u32 trb_comp_code;
- bool skip_td = false;
+ bool sum_trbs_for_length = false;
+ u32 remaining, requested, ep_trb_len;
+ int short_framestatus;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
urb_priv = td->urb->hcpriv;
idx = urb_priv->td_cnt;
frame = &td->urb->iso_frame_desc[idx];
+ requested = frame->length;
+ remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
+ short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
+ -EREMOTEIO : 0;
/* handle completion code */
switch (trb_comp_code) {
case COMP_SUCCESS:
- if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
- frame->status = 0;
+ if (remaining) {
+ frame->status = short_framestatus;
+ if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
+ sum_trbs_for_length = true;
break;
}
- if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
- trb_comp_code = COMP_SHORT_TX;
- /* fallthrough */
- case COMP_STOP_SHORT:
+ frame->status = 0;
+ break;
case COMP_SHORT_TX:
- frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
- -EREMOTEIO : 0;
+ frame->status = short_framestatus;
+ sum_trbs_for_length = true;
break;
case COMP_BW_OVER:
frame->status = -ECOMM;
- skip_td = true;
break;
case COMP_BUFF_OVER:
case COMP_BABBLE:
frame->status = -EOVERFLOW;
- skip_td = true;
break;
case COMP_DEV_ERR:
case COMP_STALL:
frame->status = -EPROTO;
- skip_td = true;
break;
case COMP_TX_ERR:
frame->status = -EPROTO;
- if (event_trb != td->last_trb)
+ if (ep_trb != td->last_trb)
return 0;
- skip_td = true;
break;
case COMP_STOP:
+ sum_trbs_for_length = true;
+ break;
+ case COMP_STOP_SHORT:
+ /* field normally containing residue now contains tranferred */
+ frame->status = short_framestatus;
+ requested = remaining;
+ break;
case COMP_STOP_INVAL:
+ requested = 0;
+ remaining = 0;
break;
default:
+ sum_trbs_for_length = true;
frame->status = -1;
break;
}
- if (trb_comp_code == COMP_SUCCESS || skip_td) {
- frame->actual_length = frame->length;
- td->urb->actual_length += frame->length;
- } else if (trb_comp_code == COMP_STOP_SHORT) {
- frame->actual_length =
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
- td->urb->actual_length += frame->actual_length;
- } else {
- for (cur_trb = ep_ring->dequeue,
- cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
- next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
- !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
- len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
- }
- len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ if (sum_trbs_for_length)
+ frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) +
+ ep_trb_len - remaining;
+ else
+ frame->actual_length = requested;
- if (trb_comp_code != COMP_STOP_INVAL) {
- frame->actual_length = len;
- td->urb->actual_length += len;
- }
- }
+ td->urb->actual_length += frame->actual_length;
- return finish_td(xhci, td, event_trb, event, ep, status, false);
+ return finish_td(xhci, td, ep_trb, event, ep, status, false);
}
static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
@@ -2162,119 +2136,62 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
* Process bulk and interrupt tds, update urb status and actual_length.
*/
static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
- union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ union xhci_trb *ep_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
struct xhci_ring *ep_ring;
- union xhci_trb *cur_trb;
- struct xhci_segment *cur_seg;
u32 trb_comp_code;
+ u32 remaining, requested, ep_trb_len;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+ remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
+ requested = td->urb->transfer_buffer_length;
switch (trb_comp_code) {
case COMP_SUCCESS:
- /* Double check that the HW transferred everything. */
- if (event_trb != td->last_trb ||
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
- xhci_warn(xhci, "WARN Successful completion "
- "on short TX\n");
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- *status = -EREMOTEIO;
- else
- *status = 0;
- if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
- trb_comp_code = COMP_SHORT_TX;
- } else {
- *status = 0;
+ /* handle success with untransferred data as short packet */
+ if (ep_trb != td->last_trb || remaining) {
+ xhci_warn(xhci, "WARN Successful completion on short TX\n");
+ xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
+ td->urb->ep->desc.bEndpointAddress,
+ requested, remaining);
}
+ *status = 0;
break;
- case COMP_STOP_SHORT:
case COMP_SHORT_TX:
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- *status = -EREMOTEIO;
- else
- *status = 0;
+ xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
+ td->urb->ep->desc.bEndpointAddress,
+ requested, remaining);
+ *status = 0;
+ break;
+ case COMP_STOP_SHORT:
+ td->urb->actual_length = remaining;
+ goto finish_td;
+ case COMP_STOP_INVAL:
+ /* stopped on ep trb with invalid length, exclude it */
+ ep_trb_len = 0;
+ remaining = 0;
break;
default:
- /* Others already handled above */
+ /* do nothing */
break;
}
- if (trb_comp_code == COMP_SHORT_TX)
- xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
- "%d bytes untransferred\n",
- td->urb->ep->desc.bEndpointAddress,
- td->urb->transfer_buffer_length,
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
- /* Stopped - short packet completion */
- if (trb_comp_code == COMP_STOP_SHORT) {
- td->urb->actual_length =
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
- if (td->urb->transfer_buffer_length <
- td->urb->actual_length) {
- xhci_warn(xhci, "HC gave bad length of %d bytes txed\n",
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
- td->urb->actual_length = 0;
- /* status will be set by usb core for canceled urbs */
- }
- /* Fast path - was this the last TRB in the TD for this URB? */
- } else if (event_trb == td->last_trb) {
- if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
- td->urb->actual_length =
- td->urb->transfer_buffer_length -
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
- if (td->urb->transfer_buffer_length <
- td->urb->actual_length) {
- xhci_warn(xhci, "HC gave bad length "
- "of %d bytes left\n",
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
- td->urb->actual_length = 0;
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- *status = -EREMOTEIO;
- else
- *status = 0;
- }
- /* Don't overwrite a previously set error code */
- if (*status == -EINPROGRESS) {
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- *status = -EREMOTEIO;
- else
- *status = 0;
- }
- } else {
- td->urb->actual_length =
- td->urb->transfer_buffer_length;
- /* Ignore a short packet completion if the
- * untransferred length was zero.
- */
- if (*status == -EREMOTEIO)
- *status = 0;
- }
- } else {
- /* Slow path - walk the list, starting from the dequeue
- * pointer, to get the actual length transferred.
- */
+ if (ep_trb == td->last_trb)
+ td->urb->actual_length = requested - remaining;
+ else
+ td->urb->actual_length =
+ sum_trb_lengths(xhci, ep_ring, ep_trb) +
+ ep_trb_len - remaining;
+finish_td:
+ if (remaining > requested) {
+ xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
+ remaining);
td->urb->actual_length = 0;
- for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
- cur_trb != event_trb;
- next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
- !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
- td->urb->actual_length +=
- TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
- }
- /* If the ring didn't stop on a Link or No-op TRB, add
- * in the actual bytes transferred from the Normal TRB
- */
- if (trb_comp_code != COMP_STOP_INVAL)
- td->urb->actual_length +=
- TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
}
-
- return finish_td(xhci, td, event_trb, event, ep, status, false);
+ return finish_td(xhci, td, ep_trb, event, ep, status, false);
}
/*
@@ -2293,16 +2210,13 @@ static int handle_tx_event(struct xhci_hcd *xhci,
unsigned int slot_id;
int ep_index;
struct xhci_td *td = NULL;
- dma_addr_t event_dma;
- struct xhci_segment *event_seg;
- union xhci_trb *event_trb;
- struct urb *urb = NULL;
+ dma_addr_t ep_trb_dma;
+ struct xhci_segment *ep_seg;
+ union xhci_trb *ep_trb;
int status = -EINPROGRESS;
- struct urb_priv *urb_priv;
struct xhci_ep_ctx *ep_ctx;
struct list_head *tmp;
u32 trb_comp_code;
- int ret = 0;
int td_num = 0;
bool handling_skipped_tds = false;
@@ -2328,9 +2242,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep = &xdev->eps[ep_index];
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
- if (!ep_ring ||
- (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
- EP_STATE_DISABLED) {
+ if (!ep_ring || GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
"or incorrect stream ring\n");
xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
@@ -2352,7 +2264,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td_num++;
}
- event_dma = le64_to_cpu(event->buffer);
+ ep_trb_dma = le64_to_cpu(event->buffer);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
/* Look for common error cases */
switch (trb_comp_code) {
@@ -2480,7 +2392,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_dbg(xhci, "td_list is empty while skip "
"flag set. Clear skip flag.\n");
}
- ret = 0;
goto cleanup;
}
@@ -2489,7 +2400,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep->skip = false;
xhci_dbg(xhci, "All tds on the ep_ring skipped. "
"Clear skip flag.\n");
- ret = 0;
goto cleanup;
}
@@ -2498,8 +2408,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td_num--;
/* Is this a TRB in the currently executing TD? */
- event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
- td->last_trb, event_dma, false);
+ ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
+ td->last_trb, ep_trb_dma, false);
/*
* Skip the Force Stopped Event. The event_trb(event_dma) of FSE
@@ -2509,13 +2419,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* last TRB of the previous TD. The command completion handle
* will take care the rest.
*/
- if (!event_seg && (trb_comp_code == COMP_STOP ||
+ if (!ep_seg && (trb_comp_code == COMP_STOP ||
trb_comp_code == COMP_STOP_INVAL)) {
- ret = 0;
goto cleanup;
}
- if (!event_seg) {
+ if (!ep_seg) {
if (!ep->skip ||
!usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
/* Some host controllers give a spurious
@@ -2525,7 +2434,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
ep_ring->last_td_was_short) {
ep_ring->last_td_was_short = false;
- ret = 0;
goto cleanup;
}
/* HC is busted, give up! */
@@ -2536,11 +2444,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
trb_comp_code);
trb_in_td(xhci, ep_ring->deq_seg,
ep_ring->dequeue, td->last_trb,
- event_dma, true);
+ ep_trb_dma, true);
return -ESHUTDOWN;
}
- ret = skip_isoc_td(xhci, td, event, ep, &status);
+ skip_isoc_td(xhci, td, event, ep, &status);
goto cleanup;
}
if (trb_comp_code == COMP_SHORT_TX)
@@ -2553,36 +2461,28 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep->skip = false;
}
- event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
- sizeof(*event_trb)];
+ ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) /
+ sizeof(*ep_trb)];
/*
* No-op TRB should not trigger interrupts.
- * If event_trb is a no-op TRB, it means the
+ * If ep_trb is a no-op TRB, it means the
* corresponding TD has been cancelled. Just ignore
* the TD.
*/
- if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
- xhci_dbg(xhci,
- "event_trb is a no-op TRB. Skip it\n");
+ if (trb_is_noop(ep_trb)) {
+ xhci_dbg(xhci, "ep_trb is a no-op TRB. Skip it\n");
goto cleanup;
}
- /* Now update the urb's actual_length and give back to
- * the core
- */
+ /* update the urb's actual_length and give back to the core */
if (usb_endpoint_xfer_control(&td->urb->ep->desc))
- ret = process_ctrl_td(xhci, td, event_trb, event, ep,
- &status);
+ process_ctrl_td(xhci, td, ep_trb, event, ep, &status);
else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
- ret = process_isoc_td(xhci, td, event_trb, event, ep,
- &status);
+ process_isoc_td(xhci, td, ep_trb, event, ep, &status);
else
- ret = process_bulk_intr_td(xhci, td, event_trb, event,
- ep, &status);
-
+ process_bulk_intr_td(xhci, td, ep_trb, event, ep,
+ &status);
cleanup:
-
-
handling_skipped_tds = ep->skip &&
trb_comp_code != COMP_MISSED_INT &&
trb_comp_code != COMP_PING_ERR;
@@ -2594,33 +2494,6 @@ cleanup:
if (!handling_skipped_tds)
inc_deq(xhci, xhci->event_ring);
- if (ret) {
- urb = td->urb;
- urb_priv = urb->hcpriv;
-
- xhci_urb_free_priv(urb_priv);
-
- usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
- if ((urb->actual_length != urb->transfer_buffer_length &&
- (urb->transfer_flags &
- URB_SHORT_NOT_OK)) ||
- (status != 0 &&
- !usb_endpoint_xfer_isoc(&urb->ep->desc)))
- xhci_dbg(xhci, "Giveback URB %p, len = %d, "
- "expected = %d, status = %d\n",
- urb, urb->actual_length,
- urb->transfer_buffer_length,
- status);
- spin_unlock(&xhci->lock);
- /* EHCI, UHCI, and OHCI always unconditionally set the
- * urb->status of an isochronous endpoint to 0.
- */
- if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
- status = 0;
- usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
- spin_lock(&xhci->lock);
- }
-
/*
* If ep->skip is set, it means there are missed tds on the
* endpoint ring need to take care of.
@@ -2644,18 +2517,17 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
int update_ptrs = 1;
int ret;
+ /* Event ring hasn't been allocated yet. */
if (!xhci->event_ring || !xhci->event_ring->dequeue) {
- xhci->error_bitmask |= 1 << 1;
- return 0;
+ xhci_err(xhci, "ERROR event ring not ready\n");
+ return -ENOMEM;
}
event = xhci->event_ring->dequeue;
/* Does the HC or OS own the TRB? */
if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
- xhci->event_ring->cycle_state) {
- xhci->error_bitmask |= 1 << 2;
+ xhci->event_ring->cycle_state)
return 0;
- }
/*
* Barrier between reading the TRB_CYCLE (valid) flag above and any
@@ -2663,7 +2535,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
*/
rmb();
/* FIXME: Handle more event types. */
- switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
+ switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) {
case TRB_TYPE(TRB_COMPLETION):
handle_cmd_completion(xhci, &event->event_cmd);
break;
@@ -2673,9 +2545,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
break;
case TRB_TYPE(TRB_TRANSFER):
ret = handle_tx_event(xhci, &event->trans_event);
- if (ret < 0)
- xhci->error_bitmask |= 1 << 9;
- else
+ if (ret >= 0)
update_ptrs = 0;
break;
case TRB_TYPE(TRB_DEV_NOTE):
@@ -2686,7 +2556,9 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
TRB_TYPE(48))
handle_vendor_event(xhci, event);
else
- xhci->error_bitmask |= 1 << 3;
+ xhci_warn(xhci, "ERROR unknown event type %d\n",
+ TRB_FIELD_TO_TYPE(
+ le32_to_cpu(event->event_cmd.flags)));
}
/* Any of the above functions may drop and re-acquire the lock, so check
* to make sure a watchdog timer didn't mark the host as non-responsive.
@@ -2931,8 +2803,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
return -EINVAL;
}
- ret = prepare_ring(xhci, ep_ring,
- le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
+ ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
num_trbs, mem_flags);
if (ret)
return ret;
@@ -3120,7 +2991,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
if (xhci->quirks & XHCI_MTK_HOST)
trb_buff_len = 0;
- maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+ maxp = usb_endpoint_maxp(&urb->ep->desc);
total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
/* Queueing functions don't count the current TRB into transferred */
@@ -3136,7 +3007,7 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
unsigned int max_pkt;
u32 new_buff_len;
- max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+ max_pkt = usb_endpoint_maxp(&urb->ep->desc);
unalign = (enqd_len + *trb_buff_len) % max_pkt;
/* we got lucky, last normal TRB data on segment is packet aligned */
@@ -3650,7 +3521,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
addr = start_addr + urb->iso_frame_desc[i].offset;
td_len = urb->iso_frame_desc[i].length;
td_remain_len = td_len;
- max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+ max_pkt = usb_endpoint_maxp(&urb->ep->desc);
total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
/* A zero-length transfer still involves at least one packet. */
@@ -3828,7 +3699,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Check the ring to guarantee there is enough room for the whole urb.
* Do not insert any td of the urb to the ring if the check failed.
*/
- ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
+ ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
num_trbs, mem_flags);
if (ret)
return ret;
@@ -3841,8 +3712,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Calculate the start frame and put it in urb->start_frame. */
if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
- if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
- EP_STATE_RUNNING) {
+ if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) {
urb->start_frame = xep->next_frame_id;
goto skip_start_over;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1a4ca02729c2..1cd56417cbec 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -113,12 +113,12 @@ int xhci_halt(struct xhci_hcd *xhci)
ret = xhci_handshake(&xhci->op_regs->status,
STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
- if (!ret) {
- xhci->xhc_state |= XHCI_STATE_HALTED;
- xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
- } else
- xhci_warn(xhci, "Host not halted after %u microseconds.\n",
- XHCI_MAX_HALT_USEC);
+ if (ret) {
+ xhci_warn(xhci, "Host halt failed, %d\n", ret);
+ return ret;
+ }
+ xhci->xhc_state |= XHCI_STATE_HALTED;
+ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
return ret;
}
@@ -167,6 +167,12 @@ int xhci_reset(struct xhci_hcd *xhci)
int ret, i;
state = readl(&xhci->op_regs->status);
+
+ if (state == ~(u32)0) {
+ xhci_warn(xhci, "Host not accessible, reset failed.\n");
+ return -ENODEV;
+ }
+
if ((state & STS_HALT) == 0) {
xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
return 0;
@@ -690,7 +696,6 @@ void xhci_stop(struct usb_hcd *hcd)
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
xhci_halt(xhci);
xhci_reset(xhci);
-
spin_unlock_irq(&xhci->lock);
}
@@ -1645,8 +1650,7 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
/* If the HC already knows the endpoint is disabled,
* or the HCD has noted it is disabled, ignore this request
*/
- if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
- cpu_to_le32(EP_STATE_DISABLED)) ||
+ if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
le32_to_cpu(ctrl_ctx->drop_flags) &
xhci_get_endpoint_flag(&ep->desc)) {
/* Do not warn when called after a usb_device_reset */
@@ -3209,7 +3213,7 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
for (i = 0; i < num_eps; i++) {
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
- max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&eps[i]->desc));
+ max_packet = usb_endpoint_maxp(&eps[i]->desc);
vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
num_stream_ctxs,
num_streams,
@@ -3683,27 +3687,26 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
int ret, slot_id;
struct xhci_command *command;
- command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+ command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
if (!command)
return 0;
/* xhci->slot_id and xhci->addr_dev are not thread-safe */
mutex_lock(&xhci->mutex);
spin_lock_irqsave(&xhci->lock, flags);
- command->completion = &xhci->addr_dev;
ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
mutex_unlock(&xhci->mutex);
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
- kfree(command);
+ xhci_free_command(xhci, command);
return 0;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
wait_for_completion(command->completion);
- slot_id = xhci->slot_id;
+ slot_id = command->slot_id;
mutex_unlock(&xhci->mutex);
if (!slot_id || command->status != COMP_SUCCESS) {
@@ -3711,7 +3714,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
HCS_MAX_SLOTS(
readl(&xhci->cap_regs->hcs_params1)));
- kfree(command);
+ xhci_free_command(xhci, command);
return 0;
}
@@ -3747,7 +3750,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
#endif
- kfree(command);
+ xhci_free_command(xhci, command);
/* Is this a LS or FS device under a HS hub? */
/* Hub or peripherial? */
return 1;
@@ -3755,6 +3758,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
disable_slot:
/* Disable slot, if we can do it without mem alloc */
spin_lock_irqsave(&xhci->lock, flags);
+ kfree(command->completion);
command->completion = NULL;
command->status = 0;
if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
@@ -3816,14 +3820,13 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
}
}
- command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+ command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
if (!command) {
ret = -ENOMEM;
goto out;
}
command->in_ctx = virt_dev->in_ctx;
- command->completion = &xhci->addr_dev;
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
@@ -3941,7 +3944,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
out:
mutex_unlock(&xhci->mutex);
- kfree(command);
+ if (command) {
+ kfree(command->completion);
+ kfree(command);
+ }
return ret;
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f945380035d0..8ccc11a974b8 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -709,6 +709,8 @@ struct xhci_ep_ctx {
#define EP_STATE_HALTED 2
#define EP_STATE_STOPPED 3
#define EP_STATE_ERROR 4
+#define GET_EP_CTX_STATE(ctx) (le32_to_cpu((ctx)->ep_info) & EP_STATE_MASK)
+
/* Mult - Max number of burtst within an interval, in EP companion desc. */
#define EP_MULT(p) (((p) & 0x3) << 8)
#define CTX_TO_EP_MULT(p) (((p) >> 8) & 0x3)
@@ -747,11 +749,6 @@ struct xhci_ep_ctx {
#define MAX_PACKET_MASK (0xffff << 16)
#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
-/* Get max packet size from ep desc. Bit 10..0 specify the max packet size.
- * USB2.0 spec 9.6.6.
- */
-#define GET_MAX_PACKET(p) ((p) & 0x7ff)
-
/* tx_info bitmasks */
#define EP_AVG_TRB_LENGTH(p) ((p) & 0xffff)
#define EP_MAX_ESIT_PAYLOAD_LO(p) (((p) & 0xffff) << 16)
@@ -789,6 +786,7 @@ struct xhci_command {
/* Input context for changing device state */
struct xhci_container_ctx *in_ctx;
u32 status;
+ int slot_id;
/* If completion is null, no one is waiting on this command
* and the structure can be freed after the command completes.
*/
@@ -997,7 +995,6 @@ struct xhci_virt_device {
int num_rings_cached;
#define XHCI_MAX_RINGS_CACHED 31
struct xhci_virt_ep eps[31];
- struct completion cmd_completion;
u8 fake_port;
u8 real_port;
struct xhci_interval_bw_table *bw_table;
@@ -1583,8 +1580,6 @@ struct xhci_hcd {
/* slot enabling and address device helpers */
/* these are not thread safe so use mutex */
struct mutex mutex;
- struct completion addr_dev;
- int slot_id;
/* For USB 3.0 LPM enable/disable. */
struct xhci_command *lpm_command;
/* Internal mirror of the HW's dcbaa */
@@ -1618,8 +1613,6 @@ struct xhci_hcd {
#define XHCI_STATE_DYING (1 << 0)
#define XHCI_STATE_HALTED (1 << 1)
#define XHCI_STATE_REMOVING (1 << 2)
- /* Statistics */
- int error_bitmask;
unsigned int quirks;
#define XHCI_LINK_TRB_QUIRK (1 << 0)
#define XHCI_RESET_EP_QUIRK (1 << 1)
diff --git a/drivers/usb/isp1760/isp1760-if.c b/drivers/usb/isp1760/isp1760-if.c
index 9535b2872183..79205b31e4a9 100644
--- a/drivers/usb/isp1760/isp1760-if.c
+++ b/drivers/usb/isp1760/isp1760-if.c
@@ -197,7 +197,7 @@ static int isp1760_plat_probe(struct platform_device *pdev)
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq_res) {
- pr_warning("isp1760: IRQ resource not available\n");
+ pr_warn("isp1760: IRQ resource not available\n");
return -ENODEV;
}
irqflags = irq_res->flags & IRQF_TRIGGER_MASK;
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index 6ddd08a32777..aa350dc9eb25 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -215,19 +215,7 @@ static int chaoskey_probe(struct usb_interface *interface,
dev->hwrng.name = dev->name ? dev->name : chaoskey_driver.name;
dev->hwrng.read = chaoskey_rng_read;
-
- /* Set the 'quality' metric. Quality is measured in units of
- * 1/1024's of a bit ("mills"). This should be set to 1024,
- * but there is a bug in the hwrng core which masks it with
- * 1023.
- *
- * The patch that has been merged to the crypto development
- * tree for that bug limits the value to 1024 at most, so by
- * setting this to 1024 + 1023, we get 1023 before the fix is
- * merged and 1024 afterwards. We'll patch this driver once
- * both bits of code are in the same tree.
- */
- dev->hwrng.quality = 1024 + 1023;
+ dev->hwrng.quality = 1024;
dev->hwrng_registered = (hwrng_register(&dev->hwrng) == 0);
if (!dev->hwrng_registered)
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index 13731d512624..fc329c98a6e8 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -421,7 +421,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
} else if (result != -EREMOTEIO) {
mutex_unlock(&(rio->lock));
dev_err(&rio->rio_dev->dev,
- "Read Whoops - result:%u partial:%u this_read:%u\n",
+ "Read Whoops - result:%d partial:%u this_read:%u\n",
result, partial, this_read);
return -EIO;
} else {
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index 460cebf322e3..4b5777ec1501 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -686,8 +686,6 @@ static void
sisusbcon_scrolldelta(struct vc_data *c, int lines)
{
struct sisusb_usb_data *sisusb;
- int margin = c->vc_size_row * 4;
- int ul, we, p, st;
sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
if (!sisusb)
@@ -700,39 +698,8 @@ sisusbcon_scrolldelta(struct vc_data *c, int lines)
return;
}
- if (!lines) /* Turn scrollback off */
- c->vc_visible_origin = c->vc_origin;
- else {
-
- if (sisusb->con_rolled_over >
- (c->vc_scr_end - sisusb->scrbuf) + margin) {
-
- ul = c->vc_scr_end - sisusb->scrbuf;
- we = sisusb->con_rolled_over + c->vc_size_row;
-
- } else {
-
- ul = 0;
- we = sisusb->scrbuf_size;
-
- }
-
- p = (c->vc_visible_origin - sisusb->scrbuf - ul + we) % we +
- lines * c->vc_size_row;
-
- st = (c->vc_origin - sisusb->scrbuf - ul + we) % we;
-
- if (st < 2 * margin)
- margin = 0;
-
- if (p < margin)
- p = 0;
-
- if (p > st - margin)
- p = st;
-
- c->vc_visible_origin = sisusb->scrbuf + (p + ul) % we;
- }
+ vc_scrolldelta_helper(c, lines, sisusb->con_rolled_over,
+ (void *)sisusb->scrbuf, sisusb->scrbuf_size);
sisusbcon_set_start_address(sisusb, c);
@@ -808,9 +775,10 @@ sisusbcon_cursor(struct vc_data *c, int mode)
mutex_unlock(&sisusb->lock);
}
-static int
+static bool
sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb,
- int t, int b, int dir, int lines)
+ unsigned int t, unsigned int b, enum con_scroll dir,
+ unsigned int lines)
{
int cols = sisusb->sisusb_num_columns;
int length = ((b - t) * cols) * 2;
@@ -852,8 +820,9 @@ sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb,
}
/* Interface routine */
-static int
-sisusbcon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
+static bool
+sisusbcon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
+ enum con_scroll dir, unsigned int lines)
{
struct sisusb_usb_data *sisusb;
u16 eattr = c->vc_video_erase_char;
@@ -870,17 +839,17 @@ sisusbcon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
*/
if (!lines)
- return 1;
+ return true;
sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
if (!sisusb)
- return 0;
+ return false;
/* sisusb->lock is down */
if (sisusb_is_inactive(c, sisusb)) {
mutex_unlock(&sisusb->lock);
- return 0;
+ return false;
}
/* Special case */
@@ -971,7 +940,7 @@ sisusbcon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
mutex_unlock(&sisusb->lock);
- return 1;
+ return true;
}
/* Interface routine */
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 5c8210dc6fd9..3525626bf086 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1915,7 +1915,7 @@ static struct urb *iso_alloc_urb(
if (bytes < 0 || !desc)
return NULL;
maxp = 0x7ff & usb_endpoint_maxp(desc);
- maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11));
+ maxp *= usb_endpoint_maxp_mult(desc);
packets = DIV_ROUND_UP(bytes, maxp);
urb = usb_alloc_urb(packets, GFP_KERNEL);
@@ -2001,8 +2001,8 @@ test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param,
"iso period %d %sframes, wMaxPacket %d, transactions: %d\n",
1 << (desc->bInterval - 1),
(udev->speed == USB_SPEED_HIGH) ? "micro" : "",
- usb_endpoint_maxp(desc) & 0x7ff,
- 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11)));
+ usb_endpoint_maxp(desc),
+ usb_endpoint_maxp_mult(desc));
dev_info(&dev->intf->dev,
"total %lu msec (%lu packets)\n",
diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig
new file mode 100644
index 000000000000..25cd61947bee
--- /dev/null
+++ b/drivers/usb/mtu3/Kconfig
@@ -0,0 +1,54 @@
+# For MTK USB3.0 IP
+
+config USB_MTU3
+ tristate "MediaTek USB3 Dual Role controller"
+ depends on EXTCON && (USB || USB_GADGET) && HAS_DMA
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD
+ help
+ Say Y or M here if your system runs on MediaTek SoCs with
+ Dual Role SuperSpeed USB controller. You can select usb
+ mode as peripheral role or host role, or both.
+
+ If you don't know what this is, please say N.
+
+ Choose M here to compile this driver as a module, and it
+ will be called mtu3.ko.
+
+
+if USB_MTU3
+choice
+ bool "MTU3 Mode Selection"
+ default USB_MTU3_DUAL_ROLE if (USB && USB_GADGET)
+ default USB_MTU3_HOST if (USB && !USB_GADGET)
+ default USB_MTU3_GADGET if (!USB && USB_GADGET)
+
+config USB_MTU3_HOST
+ bool "Host only mode"
+ depends on USB=y || USB=USB_MTU3
+ help
+ Select this when you want to use MTU3 in host mode only,
+ thereby the gadget feature will be regressed.
+
+config USB_MTU3_GADGET
+ bool "Gadget only mode"
+ depends on USB_GADGET=y || USB_GADGET=USB_MTU3
+ help
+ Select this when you want to use MTU3 in gadget mode only,
+ thereby the host feature will be regressed.
+
+config USB_MTU3_DUAL_ROLE
+ bool "Dual Role mode"
+ depends on ((USB=y || USB=USB_MTU3) && (USB_GADGET=y || USB_GADGET=USB_MTU3))
+ help
+ This is the default mode of working of MTU3 controller where
+ both host and gadget features are enabled.
+
+endchoice
+
+config USB_MTU3_DEBUG
+ bool "Enable Debugging Messages"
+ help
+ Say Y here to enable debugging messages in the MTU3 Driver.
+
+endif
diff --git a/drivers/usb/mtu3/Makefile b/drivers/usb/mtu3/Makefile
new file mode 100644
index 000000000000..60e0fff7a847
--- /dev/null
+++ b/drivers/usb/mtu3/Makefile
@@ -0,0 +1,18 @@
+
+ccflags-$(CONFIG_USB_MTU3_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_USB_MTU3) += mtu3.o
+
+mtu3-y := mtu3_plat.o
+
+ifneq ($(filter y,$(CONFIG_USB_MTU3_HOST) $(CONFIG_USB_MTU3_DUAL_ROLE)),)
+ mtu3-y += mtu3_host.o
+endif
+
+ifneq ($(filter y,$(CONFIG_USB_MTU3_GADGET) $(CONFIG_USB_MTU3_DUAL_ROLE)),)
+ mtu3-y += mtu3_core.o mtu3_gadget_ep0.o mtu3_gadget.o mtu3_qmu.o
+endif
+
+ifneq ($(CONFIG_USB_MTU3_DUAL_ROLE),)
+ mtu3-y += mtu3_dr.o
+endif
diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h
new file mode 100644
index 000000000000..ba9df719f363
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3.h
@@ -0,0 +1,417 @@
+/*
+ * mtu3.h - MediaTek USB3 DRD header
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MTU3_H__
+#define __MTU3_H__
+
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/extcon.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+
+struct mtu3;
+struct mtu3_ep;
+struct mtu3_request;
+
+#include "mtu3_hw_regs.h"
+#include "mtu3_qmu.h"
+
+#define MU3D_EP_TXCR0(epnum) (U3D_TX1CSR0 + (((epnum) - 1) * 0x10))
+#define MU3D_EP_TXCR1(epnum) (U3D_TX1CSR1 + (((epnum) - 1) * 0x10))
+#define MU3D_EP_TXCR2(epnum) (U3D_TX1CSR2 + (((epnum) - 1) * 0x10))
+
+#define MU3D_EP_RXCR0(epnum) (U3D_RX1CSR0 + (((epnum) - 1) * 0x10))
+#define MU3D_EP_RXCR1(epnum) (U3D_RX1CSR1 + (((epnum) - 1) * 0x10))
+#define MU3D_EP_RXCR2(epnum) (U3D_RX1CSR2 + (((epnum) - 1) * 0x10))
+
+#define USB_QMU_RQCSR(epnum) (U3D_RXQCSR1 + (((epnum) - 1) * 0x10))
+#define USB_QMU_RQSAR(epnum) (U3D_RXQSAR1 + (((epnum) - 1) * 0x10))
+#define USB_QMU_RQCPR(epnum) (U3D_RXQCPR1 + (((epnum) - 1) * 0x10))
+
+#define USB_QMU_TQCSR(epnum) (U3D_TXQCSR1 + (((epnum) - 1) * 0x10))
+#define USB_QMU_TQSAR(epnum) (U3D_TXQSAR1 + (((epnum) - 1) * 0x10))
+#define USB_QMU_TQCPR(epnum) (U3D_TXQCPR1 + (((epnum) - 1) * 0x10))
+
+#define SSUSB_U3_CTRL(p) (U3D_SSUSB_U3_CTRL_0P + ((p) * 0x08))
+#define SSUSB_U2_CTRL(p) (U3D_SSUSB_U2_CTRL_0P + ((p) * 0x08))
+
+#define MTU3_DRIVER_NAME "mtu3"
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+#define MTU3_EP_ENABLED BIT(0)
+#define MTU3_EP_STALL BIT(1)
+#define MTU3_EP_WEDGE BIT(2)
+#define MTU3_EP_BUSY BIT(3)
+
+#define MTU3_U3_IP_SLOT_DEFAULT 2
+#define MTU3_U2_IP_SLOT_DEFAULT 1
+
+/**
+ * Normally the device works on HS or SS, to simplify fifo management,
+ * devide fifo into some 512B parts, use bitmap to manage it; And
+ * 128 bits size of bitmap is large enough, that means it can manage
+ * up to 64KB fifo size.
+ * NOTE: MTU3_EP_FIFO_UNIT should be power of two
+ */
+#define MTU3_EP_FIFO_UNIT (1 << 9)
+#define MTU3_FIFO_BIT_SIZE 128
+#define MTU3_U2_IP_EP0_FIFO_SIZE 64
+
+/**
+ * Maximum size of ep0 response buffer for ch9 requests,
+ * the SET_SEL request uses 6 so far, and GET_STATUS is 2
+ */
+#define EP0_RESPONSE_BUF 6
+
+/* device operated link and speed got from DEVICE_CONF register */
+enum mtu3_speed {
+ MTU3_SPEED_INACTIVE = 0,
+ MTU3_SPEED_FULL = 1,
+ MTU3_SPEED_HIGH = 3,
+ MTU3_SPEED_SUPER = 4,
+};
+
+/**
+ * @MU3D_EP0_STATE_SETUP: waits for SETUP or received a SETUP
+ * without data stage.
+ * @MU3D_EP0_STATE_TX: IN data stage
+ * @MU3D_EP0_STATE_RX: OUT data stage
+ * @MU3D_EP0_STATE_TX_END: the last IN data is transferred, and
+ * waits for its completion interrupt
+ * @MU3D_EP0_STATE_STALL: ep0 is in stall status, will be auto-cleared
+ * after receives a SETUP.
+ */
+enum mtu3_g_ep0_state {
+ MU3D_EP0_STATE_SETUP = 1,
+ MU3D_EP0_STATE_TX,
+ MU3D_EP0_STATE_RX,
+ MU3D_EP0_STATE_TX_END,
+ MU3D_EP0_STATE_STALL,
+};
+
+/**
+ * @base: the base address of fifo
+ * @limit: the bitmap size in bits
+ * @bitmap: fifo bitmap in unit of @MTU3_EP_FIFO_UNIT
+ */
+struct mtu3_fifo_info {
+ u32 base;
+ u32 limit;
+ DECLARE_BITMAP(bitmap, MTU3_FIFO_BIT_SIZE);
+};
+
+/**
+ * General Purpose Descriptor (GPD):
+ * The format of TX GPD is a little different from RX one.
+ * And the size of GPD is 16 bytes.
+ *
+ * @flag:
+ * bit0: Hardware Own (HWO)
+ * bit1: Buffer Descriptor Present (BDP), always 0, BD is not supported
+ * bit2: Bypass (BPS), 1: HW skips this GPD if HWO = 1
+ * bit7: Interrupt On Completion (IOC)
+ * @chksum: This is used to validate the contents of this GPD;
+ * If TXQ_CS_EN / RXQ_CS_EN bit is set, an interrupt is issued
+ * when checksum validation fails;
+ * Checksum value is calculated over the 16 bytes of the GPD by default;
+ * @data_buf_len (RX ONLY): This value indicates the length of
+ * the assigned data buffer
+ * @next_gpd: Physical address of the next GPD
+ * @buffer: Physical address of the data buffer
+ * @buf_len:
+ * (TX): This value indicates the length of the assigned data buffer
+ * (RX): The total length of data received
+ * @ext_len: reserved
+ * @ext_flag:
+ * bit5 (TX ONLY): Zero Length Packet (ZLP),
+ */
+struct qmu_gpd {
+ __u8 flag;
+ __u8 chksum;
+ __le16 data_buf_len;
+ __le32 next_gpd;
+ __le32 buffer;
+ __le16 buf_len;
+ __u8 ext_len;
+ __u8 ext_flag;
+} __packed;
+
+/**
+* dma: physical base address of GPD segment
+* start: virtual base address of GPD segment
+* end: the last GPD element
+* enqueue: the first empty GPD to use
+* dequeue: the first completed GPD serviced by ISR
+* NOTE: the size of GPD ring should be >= 2
+*/
+struct mtu3_gpd_ring {
+ dma_addr_t dma;
+ struct qmu_gpd *start;
+ struct qmu_gpd *end;
+ struct qmu_gpd *enqueue;
+ struct qmu_gpd *dequeue;
+};
+
+/**
+* @vbus: vbus 5V used by host mode
+* @edev: external connector used to detect vbus and iddig changes
+* @vbus_nb: notifier for vbus detection
+* @vbus_nb: notifier for iddig(idpin) detection
+* @extcon_reg_dwork: delay work for extcon notifier register, waiting for
+* xHCI driver initialization, it's necessary for system bootup
+* as device.
+* @is_u3_drd: whether port0 supports usb3.0 dual-role device or not
+* @id_*: used to maually switch between host and device modes by idpin
+* @manual_drd_enabled: it's true when supports dual-role device by debugfs
+* to switch host/device modes depending on user input.
+*/
+struct otg_switch_mtk {
+ struct regulator *vbus;
+ struct extcon_dev *edev;
+ struct notifier_block vbus_nb;
+ struct notifier_block id_nb;
+ struct delayed_work extcon_reg_dwork;
+ bool is_u3_drd;
+ /* dual-role switch by debugfs */
+ struct pinctrl *id_pinctrl;
+ struct pinctrl_state *id_float;
+ struct pinctrl_state *id_ground;
+ bool manual_drd_enabled;
+};
+
+/**
+ * @mac_base: register base address of device MAC, exclude xHCI's
+ * @ippc_base: register base address of IP Power and Clock interface (IPPC)
+ * @vusb33: usb3.3V shared by device/host IP
+ * @sys_clk: system clock of mtu3, shared by device/host IP
+ * @dr_mode: works in which mode:
+ * host only, device only or dual-role mode
+ * @u2_ports: number of usb2.0 host ports
+ * @u3_ports: number of usb3.0 host ports
+ * @dbgfs_root: only used when supports manual dual-role switch via debugfs
+ * @wakeup_en: it's true when supports remote wakeup in host mode
+ * @wk_deb_p0: port0's wakeup debounce clock
+ * @wk_deb_p1: it's optional, and depends on port1 is supported or not
+ */
+struct ssusb_mtk {
+ struct device *dev;
+ struct mtu3 *u3d;
+ void __iomem *mac_base;
+ void __iomem *ippc_base;
+ struct phy **phys;
+ int num_phys;
+ /* common power & clock */
+ struct regulator *vusb33;
+ struct clk *sys_clk;
+ /* otg */
+ struct otg_switch_mtk otg_switch;
+ enum usb_dr_mode dr_mode;
+ bool is_host;
+ int u2_ports;
+ int u3_ports;
+ struct dentry *dbgfs_root;
+ /* usb wakeup for host mode */
+ bool wakeup_en;
+ struct clk *wk_deb_p0;
+ struct clk *wk_deb_p1;
+ struct regmap *pericfg;
+};
+
+/**
+ * @fifo_size: it is (@slot + 1) * @fifo_seg_size
+ * @fifo_seg_size: it is roundup_pow_of_two(@maxp)
+ */
+struct mtu3_ep {
+ struct usb_ep ep;
+ char name[12];
+ struct mtu3 *mtu;
+ u8 epnum;
+ u8 type;
+ u8 is_in;
+ u16 maxp;
+ int slot;
+ u32 fifo_size;
+ u32 fifo_addr;
+ u32 fifo_seg_size;
+ struct mtu3_fifo_info *fifo;
+
+ struct list_head req_list;
+ struct mtu3_gpd_ring gpd_ring;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+ const struct usb_endpoint_descriptor *desc;
+
+ int flags;
+ u8 wedged;
+ u8 busy;
+};
+
+struct mtu3_request {
+ struct usb_request request;
+ struct list_head list;
+ struct mtu3_ep *mep;
+ struct mtu3 *mtu;
+ struct qmu_gpd *gpd;
+ int epnum;
+};
+
+static inline struct ssusb_mtk *dev_to_ssusb(struct device *dev)
+{
+ return dev_get_drvdata(dev);
+}
+
+/**
+ * struct mtu3 - device driver instance data.
+ * @slot: MTU3_U2_IP_SLOT_DEFAULT for U2 IP only,
+ * MTU3_U3_IP_SLOT_DEFAULT for U3 IP
+ * @may_wakeup: means device's remote wakeup is enabled
+ * @is_self_powered: is reported in device status and the config descriptor
+ * @ep0_req: dummy request used while handling standard USB requests
+ * for GET_STATUS and SET_SEL
+ * @setup_buf: ep0 response buffer for GET_STATUS and SET_SEL requests
+ */
+struct mtu3 {
+ spinlock_t lock;
+ struct ssusb_mtk *ssusb;
+ struct device *dev;
+ void __iomem *mac_base;
+ void __iomem *ippc_base;
+ int irq;
+
+ struct mtu3_fifo_info tx_fifo;
+ struct mtu3_fifo_info rx_fifo;
+
+ struct mtu3_ep *ep_array;
+ struct mtu3_ep *in_eps;
+ struct mtu3_ep *out_eps;
+ struct mtu3_ep *ep0;
+ int num_eps;
+ int slot;
+ int active_ep;
+
+ struct dma_pool *qmu_gpd_pool;
+ enum mtu3_g_ep0_state ep0_state;
+ struct usb_gadget g; /* the gadget */
+ struct usb_gadget_driver *gadget_driver;
+ struct mtu3_request ep0_req;
+ u8 setup_buf[EP0_RESPONSE_BUF];
+ u32 max_speed;
+
+ unsigned is_active:1;
+ unsigned may_wakeup:1;
+ unsigned is_self_powered:1;
+ unsigned test_mode:1;
+ unsigned softconnect:1;
+ unsigned u1_enable:1;
+ unsigned u2_enable:1;
+ unsigned is_u3_ip:1;
+
+ u8 address;
+ u8 test_mode_nr;
+ u32 hw_version;
+};
+
+static inline struct mtu3 *gadget_to_mtu3(struct usb_gadget *g)
+{
+ return container_of(g, struct mtu3, g);
+}
+
+static inline int is_first_entry(const struct list_head *list,
+ const struct list_head *head)
+{
+ return list_is_last(head, list);
+}
+
+static inline struct mtu3_request *to_mtu3_request(struct usb_request *req)
+{
+ return req ? container_of(req, struct mtu3_request, request) : NULL;
+}
+
+static inline struct mtu3_ep *to_mtu3_ep(struct usb_ep *ep)
+{
+ return ep ? container_of(ep, struct mtu3_ep, ep) : NULL;
+}
+
+static inline struct mtu3_request *next_request(struct mtu3_ep *mep)
+{
+ struct list_head *queue = &mep->req_list;
+
+ if (list_empty(queue))
+ return NULL;
+
+ return list_first_entry(queue, struct mtu3_request, list);
+}
+
+static inline void mtu3_writel(void __iomem *base, u32 offset, u32 data)
+{
+ writel(data, base + offset);
+}
+
+static inline u32 mtu3_readl(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+static inline void mtu3_setbits(void __iomem *base, u32 offset, u32 bits)
+{
+ void __iomem *addr = base + offset;
+ u32 tmp = readl(addr);
+
+ writel((tmp | (bits)), addr);
+}
+
+static inline void mtu3_clrbits(void __iomem *base, u32 offset, u32 bits)
+{
+ void __iomem *addr = base + offset;
+ u32 tmp = readl(addr);
+
+ writel((tmp & ~(bits)), addr);
+}
+
+int ssusb_check_clocks(struct ssusb_mtk *ssusb, u32 ex_clks);
+struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);
+void mtu3_free_request(struct usb_ep *ep, struct usb_request *req);
+void mtu3_req_complete(struct mtu3_ep *mep,
+ struct usb_request *req, int status);
+
+int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
+ int interval, int burst, int mult);
+void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep);
+void mtu3_ep_stall_set(struct mtu3_ep *mep, bool set);
+void mtu3_ep0_setup(struct mtu3 *mtu);
+void mtu3_start(struct mtu3 *mtu);
+void mtu3_stop(struct mtu3 *mtu);
+void mtu3_dev_on_off(struct mtu3 *mtu, int is_on);
+
+int mtu3_gadget_setup(struct mtu3 *mtu);
+void mtu3_gadget_cleanup(struct mtu3 *mtu);
+void mtu3_gadget_reset(struct mtu3 *mtu);
+void mtu3_gadget_suspend(struct mtu3 *mtu);
+void mtu3_gadget_resume(struct mtu3 *mtu);
+void mtu3_gadget_disconnect(struct mtu3 *mtu);
+
+irqreturn_t mtu3_ep0_isr(struct mtu3 *mtu);
+extern const struct usb_ep_ops mtu3_ep0_ops;
+
+#endif
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
new file mode 100644
index 000000000000..99c65b0788ff
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -0,0 +1,863 @@
+/*
+ * mtu3_core.c - hardware access layer and gadget init/exit of
+ * MediaTek usb3 Dual-Role Controller Driver
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#include "mtu3.h"
+
+static int ep_fifo_alloc(struct mtu3_ep *mep, u32 seg_size)
+{
+ struct mtu3_fifo_info *fifo = mep->fifo;
+ u32 num_bits = DIV_ROUND_UP(seg_size, MTU3_EP_FIFO_UNIT);
+ u32 start_bit;
+
+ /* ensure that @mep->fifo_seg_size is power of two */
+ num_bits = roundup_pow_of_two(num_bits);
+ if (num_bits > fifo->limit)
+ return -EINVAL;
+
+ mep->fifo_seg_size = num_bits * MTU3_EP_FIFO_UNIT;
+ num_bits = num_bits * (mep->slot + 1);
+ start_bit = bitmap_find_next_zero_area(fifo->bitmap,
+ fifo->limit, 0, num_bits, 0);
+ if (start_bit >= fifo->limit)
+ return -EOVERFLOW;
+
+ bitmap_set(fifo->bitmap, start_bit, num_bits);
+ mep->fifo_size = num_bits * MTU3_EP_FIFO_UNIT;
+ mep->fifo_addr = fifo->base + MTU3_EP_FIFO_UNIT * start_bit;
+
+ dev_dbg(mep->mtu->dev, "%s fifo:%#x/%#x, start_bit: %d\n",
+ __func__, mep->fifo_seg_size, mep->fifo_size, start_bit);
+
+ return mep->fifo_addr;
+}
+
+static void ep_fifo_free(struct mtu3_ep *mep)
+{
+ struct mtu3_fifo_info *fifo = mep->fifo;
+ u32 addr = mep->fifo_addr;
+ u32 bits = mep->fifo_size / MTU3_EP_FIFO_UNIT;
+ u32 start_bit;
+
+ if (unlikely(addr < fifo->base || bits > fifo->limit))
+ return;
+
+ start_bit = (addr - fifo->base) / MTU3_EP_FIFO_UNIT;
+ bitmap_clear(fifo->bitmap, start_bit, bits);
+ mep->fifo_size = 0;
+ mep->fifo_seg_size = 0;
+
+ dev_dbg(mep->mtu->dev, "%s size:%#x/%#x, start_bit: %d\n",
+ __func__, mep->fifo_seg_size, mep->fifo_size, start_bit);
+}
+
+/* enable/disable U3D SS function */
+static inline void mtu3_ss_func_set(struct mtu3 *mtu, bool enable)
+{
+ /* If usb3_en==0, LTSSM will go to SS.Disable state */
+ if (enable)
+ mtu3_setbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN);
+ else
+ mtu3_clrbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN);
+
+ dev_dbg(mtu->dev, "USB3_EN = %d\n", !!enable);
+}
+
+/* set/clear U3D HS device soft connect */
+static inline void mtu3_hs_softconn_set(struct mtu3 *mtu, bool enable)
+{
+ if (enable) {
+ mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT,
+ SOFT_CONN | SUSPENDM_ENABLE);
+ } else {
+ mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT,
+ SOFT_CONN | SUSPENDM_ENABLE);
+ }
+ dev_dbg(mtu->dev, "SOFTCONN = %d\n", !!enable);
+}
+
+/* only port0 of U2/U3 supports device mode */
+static int mtu3_device_enable(struct mtu3 *mtu)
+{
+ void __iomem *ibase = mtu->ippc_base;
+ u32 check_clk = 0;
+
+ mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+
+ if (mtu->is_u3_ip) {
+ check_clk = SSUSB_U3_MAC_RST_B_STS;
+ mtu3_clrbits(ibase, SSUSB_U3_CTRL(0),
+ (SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN |
+ SSUSB_U3_PORT_HOST_SEL));
+ }
+ mtu3_clrbits(ibase, SSUSB_U2_CTRL(0),
+ (SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN |
+ SSUSB_U2_PORT_HOST_SEL));
+ mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+
+ return ssusb_check_clocks(mtu->ssusb, check_clk);
+}
+
+static void mtu3_device_disable(struct mtu3 *mtu)
+{
+ void __iomem *ibase = mtu->ippc_base;
+
+ if (mtu->is_u3_ip)
+ mtu3_setbits(ibase, SSUSB_U3_CTRL(0),
+ (SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN));
+
+ mtu3_setbits(ibase, SSUSB_U2_CTRL(0),
+ SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN);
+ mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+ mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+}
+
+/* reset U3D's device module. */
+static void mtu3_device_reset(struct mtu3 *mtu)
+{
+ void __iomem *ibase = mtu->ippc_base;
+
+ mtu3_setbits(ibase, U3D_SSUSB_DEV_RST_CTRL, SSUSB_DEV_SW_RST);
+ udelay(1);
+ mtu3_clrbits(ibase, U3D_SSUSB_DEV_RST_CTRL, SSUSB_DEV_SW_RST);
+}
+
+/* disable all interrupts */
+static void mtu3_intr_disable(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+
+ /* Disable level 1 interrupts */
+ mtu3_writel(mbase, U3D_LV1IECR, ~0x0);
+ /* Disable endpoint interrupts */
+ mtu3_writel(mbase, U3D_EPIECR, ~0x0);
+}
+
+static void mtu3_intr_status_clear(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+
+ /* Clear EP0 and Tx/Rx EPn interrupts status */
+ mtu3_writel(mbase, U3D_EPISR, ~0x0);
+ /* Clear U2 USB common interrupts status */
+ mtu3_writel(mbase, U3D_COMMON_USB_INTR, ~0x0);
+ /* Clear U3 LTSSM interrupts status */
+ mtu3_writel(mbase, U3D_LTSSM_INTR, ~0x0);
+ /* Clear speed change interrupt status */
+ mtu3_writel(mbase, U3D_DEV_LINK_INTR, ~0x0);
+}
+
+/* enable system global interrupt */
+static void mtu3_intr_enable(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ u32 value;
+
+ /*Enable level 1 interrupts (BMU, QMU, MAC3, DMA, MAC2, EPCTL) */
+ value = BMU_INTR | QMU_INTR | MAC3_INTR | MAC2_INTR | EP_CTRL_INTR;
+ mtu3_writel(mbase, U3D_LV1IESR, value);
+
+ /* Enable U2 common USB interrupts */
+ value = SUSPEND_INTR | RESUME_INTR | RESET_INTR;
+ mtu3_writel(mbase, U3D_COMMON_USB_INTR_ENABLE, value);
+
+ if (mtu->is_u3_ip) {
+ /* Enable U3 LTSSM interrupts */
+ value = HOT_RST_INTR | WARM_RST_INTR | VBUS_RISE_INTR |
+ VBUS_FALL_INTR | ENTER_U3_INTR | EXIT_U3_INTR;
+ mtu3_writel(mbase, U3D_LTSSM_INTR_ENABLE, value);
+ }
+
+ /* Enable QMU interrupts. */
+ value = TXQ_CSERR_INT | TXQ_LENERR_INT | RXQ_CSERR_INT |
+ RXQ_LENERR_INT | RXQ_ZLPERR_INT;
+ mtu3_writel(mbase, U3D_QIESR1, value);
+
+ /* Enable speed change interrupt */
+ mtu3_writel(mbase, U3D_DEV_LINK_INTR_ENABLE, SSUSB_DEV_SPEED_CHG_INTR);
+}
+
+/* set/clear the stall and toggle bits for non-ep0 */
+void mtu3_ep_stall_set(struct mtu3_ep *mep, bool set)
+{
+ struct mtu3 *mtu = mep->mtu;
+ void __iomem *mbase = mtu->mac_base;
+ u8 epnum = mep->epnum;
+ u32 csr;
+
+ if (mep->is_in) { /* TX */
+ csr = mtu3_readl(mbase, MU3D_EP_TXCR0(epnum)) & TX_W1C_BITS;
+ if (set)
+ csr |= TX_SENDSTALL;
+ else
+ csr = (csr & (~TX_SENDSTALL)) | TX_SENTSTALL;
+ mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), csr);
+ } else { /* RX */
+ csr = mtu3_readl(mbase, MU3D_EP_RXCR0(epnum)) & RX_W1C_BITS;
+ if (set)
+ csr |= RX_SENDSTALL;
+ else
+ csr = (csr & (~RX_SENDSTALL)) | RX_SENTSTALL;
+ mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), csr);
+ }
+
+ if (!set) {
+ mtu3_setbits(mbase, U3D_EP_RST, EP_RST(mep->is_in, epnum));
+ mtu3_clrbits(mbase, U3D_EP_RST, EP_RST(mep->is_in, epnum));
+ mep->flags &= ~MTU3_EP_STALL;
+ } else {
+ mep->flags |= MTU3_EP_STALL;
+ }
+
+ dev_dbg(mtu->dev, "%s: %s\n", mep->name,
+ set ? "SEND STALL" : "CLEAR STALL, with EP RESET");
+}
+
+void mtu3_dev_on_off(struct mtu3 *mtu, int is_on)
+{
+ if (mtu->is_u3_ip && (mtu->max_speed == USB_SPEED_SUPER))
+ mtu3_ss_func_set(mtu, is_on);
+ else
+ mtu3_hs_softconn_set(mtu, is_on);
+
+ dev_info(mtu->dev, "gadget (%s) pullup D%s\n",
+ usb_speed_string(mtu->max_speed), is_on ? "+" : "-");
+}
+
+void mtu3_start(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+
+ dev_dbg(mtu->dev, "%s devctl 0x%x\n", __func__,
+ mtu3_readl(mbase, U3D_DEVICE_CONTROL));
+
+ mtu3_clrbits(mtu->ippc_base, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+
+ /*
+ * When disable U2 port, USB2_CSR's register will be reset to
+ * default value after re-enable it again(HS is enabled by default).
+ * So if force mac to work as FS, disable HS function.
+ */
+ if (mtu->max_speed == USB_SPEED_FULL)
+ mtu3_clrbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
+
+ /* Initialize the default interrupts */
+ mtu3_intr_enable(mtu);
+ mtu->is_active = 1;
+
+ if (mtu->softconnect)
+ mtu3_dev_on_off(mtu, 1);
+}
+
+void mtu3_stop(struct mtu3 *mtu)
+{
+ dev_dbg(mtu->dev, "%s\n", __func__);
+
+ mtu3_intr_disable(mtu);
+ mtu3_intr_status_clear(mtu);
+
+ if (mtu->softconnect)
+ mtu3_dev_on_off(mtu, 0);
+
+ mtu->is_active = 0;
+ mtu3_setbits(mtu->ippc_base, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+}
+
+/* for non-ep0 */
+int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
+ int interval, int burst, int mult)
+{
+ void __iomem *mbase = mtu->mac_base;
+ int epnum = mep->epnum;
+ u32 csr0, csr1, csr2;
+ int fifo_sgsz, fifo_addr;
+ int num_pkts;
+
+ fifo_addr = ep_fifo_alloc(mep, mep->maxp);
+ if (fifo_addr < 0) {
+ dev_err(mtu->dev, "alloc ep fifo failed(%d)\n", mep->maxp);
+ return -ENOMEM;
+ }
+ fifo_sgsz = ilog2(mep->fifo_seg_size);
+ dev_dbg(mtu->dev, "%s fifosz: %x(%x/%x)\n", __func__, fifo_sgsz,
+ mep->fifo_seg_size, mep->fifo_size);
+
+ if (mep->is_in) {
+ csr0 = TX_TXMAXPKTSZ(mep->maxp);
+ csr0 |= TX_DMAREQEN;
+
+ num_pkts = (burst + 1) * (mult + 1) - 1;
+ csr1 = TX_SS_BURST(burst) | TX_SLOT(mep->slot);
+ csr1 |= TX_MAX_PKT(num_pkts) | TX_MULT(mult);
+
+ csr2 = TX_FIFOADDR(fifo_addr >> 4);
+ csr2 |= TX_FIFOSEGSIZE(fifo_sgsz);
+
+ switch (mep->type) {
+ case USB_ENDPOINT_XFER_BULK:
+ csr1 |= TX_TYPE(TYPE_BULK);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ csr1 |= TX_TYPE(TYPE_ISO);
+ csr2 |= TX_BINTERVAL(interval);
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ csr1 |= TX_TYPE(TYPE_INT);
+ csr2 |= TX_BINTERVAL(interval);
+ break;
+ }
+
+ /* Enable QMU Done interrupt */
+ mtu3_setbits(mbase, U3D_QIESR0, QMU_TX_DONE_INT(epnum));
+
+ mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), csr0);
+ mtu3_writel(mbase, MU3D_EP_TXCR1(epnum), csr1);
+ mtu3_writel(mbase, MU3D_EP_TXCR2(epnum), csr2);
+
+ dev_dbg(mtu->dev, "U3D_TX%d CSR0:%#x, CSR1:%#x, CSR2:%#x\n",
+ epnum, mtu3_readl(mbase, MU3D_EP_TXCR0(epnum)),
+ mtu3_readl(mbase, MU3D_EP_TXCR1(epnum)),
+ mtu3_readl(mbase, MU3D_EP_TXCR2(epnum)));
+ } else {
+ csr0 = RX_RXMAXPKTSZ(mep->maxp);
+ csr0 |= RX_DMAREQEN;
+
+ num_pkts = (burst + 1) * (mult + 1) - 1;
+ csr1 = RX_SS_BURST(burst) | RX_SLOT(mep->slot);
+ csr1 |= RX_MAX_PKT(num_pkts) | RX_MULT(mult);
+
+ csr2 = RX_FIFOADDR(fifo_addr >> 4);
+ csr2 |= RX_FIFOSEGSIZE(fifo_sgsz);
+
+ switch (mep->type) {
+ case USB_ENDPOINT_XFER_BULK:
+ csr1 |= RX_TYPE(TYPE_BULK);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ csr1 |= RX_TYPE(TYPE_ISO);
+ csr2 |= RX_BINTERVAL(interval);
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ csr1 |= RX_TYPE(TYPE_INT);
+ csr2 |= RX_BINTERVAL(interval);
+ break;
+ }
+
+ /*Enable QMU Done interrupt */
+ mtu3_setbits(mbase, U3D_QIESR0, QMU_RX_DONE_INT(epnum));
+
+ mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), csr0);
+ mtu3_writel(mbase, MU3D_EP_RXCR1(epnum), csr1);
+ mtu3_writel(mbase, MU3D_EP_RXCR2(epnum), csr2);
+
+ dev_dbg(mtu->dev, "U3D_RX%d CSR0:%#x, CSR1:%#x, CSR2:%#x\n",
+ epnum, mtu3_readl(mbase, MU3D_EP_RXCR0(epnum)),
+ mtu3_readl(mbase, MU3D_EP_RXCR1(epnum)),
+ mtu3_readl(mbase, MU3D_EP_RXCR2(epnum)));
+ }
+
+ dev_dbg(mtu->dev, "csr0:%#x, csr1:%#x, csr2:%#x\n", csr0, csr1, csr2);
+ dev_dbg(mtu->dev, "%s: %s, fifo-addr:%#x, fifo-size:%#x(%#x/%#x)\n",
+ __func__, mep->name, mep->fifo_addr, mep->fifo_size,
+ fifo_sgsz, mep->fifo_seg_size);
+
+ return 0;
+}
+
+/* for non-ep0 */
+void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep)
+{
+ void __iomem *mbase = mtu->mac_base;
+ int epnum = mep->epnum;
+
+ if (mep->is_in) {
+ mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), 0);
+ mtu3_writel(mbase, MU3D_EP_TXCR1(epnum), 0);
+ mtu3_writel(mbase, MU3D_EP_TXCR2(epnum), 0);
+ mtu3_setbits(mbase, U3D_QIECR0, QMU_TX_DONE_INT(epnum));
+ } else {
+ mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), 0);
+ mtu3_writel(mbase, MU3D_EP_RXCR1(epnum), 0);
+ mtu3_writel(mbase, MU3D_EP_RXCR2(epnum), 0);
+ mtu3_setbits(mbase, U3D_QIECR0, QMU_RX_DONE_INT(epnum));
+ }
+
+ ep_fifo_free(mep);
+
+ dev_dbg(mtu->dev, "%s: %s\n", __func__, mep->name);
+}
+
+/*
+ * Two scenarios:
+ * 1. when device IP supports SS, the fifo of EP0, TX EPs, RX EPs
+ * are separated;
+ * 2. when supports only HS, the fifo is shared for all EPs, and
+ * the capability registers of @EPNTXFFSZ or @EPNRXFFSZ indicate
+ * the total fifo size of non-ep0, and ep0's is fixed to 64B,
+ * so the total fifo size is 64B + @EPNTXFFSZ;
+ * Due to the first 64B should be reserved for EP0, non-ep0's fifo
+ * starts from offset 64 and are divided into two equal parts for
+ * TX or RX EPs for simplification.
+ */
+static void get_ep_fifo_config(struct mtu3 *mtu)
+{
+ struct mtu3_fifo_info *tx_fifo;
+ struct mtu3_fifo_info *rx_fifo;
+ u32 fifosize;
+
+ if (mtu->is_u3_ip) {
+ fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNTXFFSZ);
+ tx_fifo = &mtu->tx_fifo;
+ tx_fifo->base = 0;
+ tx_fifo->limit = fifosize / MTU3_EP_FIFO_UNIT;
+ bitmap_zero(tx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
+
+ fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNRXFFSZ);
+ rx_fifo = &mtu->rx_fifo;
+ rx_fifo->base = 0;
+ rx_fifo->limit = fifosize / MTU3_EP_FIFO_UNIT;
+ bitmap_zero(rx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
+ mtu->slot = MTU3_U3_IP_SLOT_DEFAULT;
+ } else {
+ fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNTXFFSZ);
+ tx_fifo = &mtu->tx_fifo;
+ tx_fifo->base = MTU3_U2_IP_EP0_FIFO_SIZE;
+ tx_fifo->limit = (fifosize / MTU3_EP_FIFO_UNIT) >> 1;
+ bitmap_zero(tx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
+
+ rx_fifo = &mtu->rx_fifo;
+ rx_fifo->base =
+ tx_fifo->base + tx_fifo->limit * MTU3_EP_FIFO_UNIT;
+ rx_fifo->limit = tx_fifo->limit;
+ bitmap_zero(rx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
+ mtu->slot = MTU3_U2_IP_SLOT_DEFAULT;
+ }
+
+ dev_dbg(mtu->dev, "%s, TX: base-%d, limit-%d; RX: base-%d, limit-%d\n",
+ __func__, tx_fifo->base, tx_fifo->limit,
+ rx_fifo->base, rx_fifo->limit);
+}
+
+void mtu3_ep0_setup(struct mtu3 *mtu)
+{
+ u32 maxpacket = mtu->g.ep0->maxpacket;
+ u32 csr;
+
+ dev_dbg(mtu->dev, "%s maxpacket: %d\n", __func__, maxpacket);
+
+ csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR);
+ csr &= ~EP0_MAXPKTSZ_MSK;
+ csr |= EP0_MAXPKTSZ(maxpacket);
+ csr &= EP0_W1C_BITS;
+ mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr);
+
+ /* Enable EP0 interrupt */
+ mtu3_writel(mtu->mac_base, U3D_EPIESR, EP0ISR);
+}
+
+static int mtu3_mem_alloc(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ struct mtu3_ep *ep_array;
+ int in_ep_num, out_ep_num;
+ u32 cap_epinfo;
+ int ret;
+ int i;
+
+ cap_epinfo = mtu3_readl(mbase, U3D_CAP_EPINFO);
+ in_ep_num = CAP_TX_EP_NUM(cap_epinfo);
+ out_ep_num = CAP_RX_EP_NUM(cap_epinfo);
+
+ dev_info(mtu->dev, "fifosz/epnum: Tx=%#x/%d, Rx=%#x/%d\n",
+ mtu3_readl(mbase, U3D_CAP_EPNTXFFSZ), in_ep_num,
+ mtu3_readl(mbase, U3D_CAP_EPNRXFFSZ), out_ep_num);
+
+ /* one for ep0, another is reserved */
+ mtu->num_eps = min(in_ep_num, out_ep_num) + 1;
+ ep_array = kcalloc(mtu->num_eps * 2, sizeof(*ep_array), GFP_KERNEL);
+ if (ep_array == NULL)
+ return -ENOMEM;
+
+ mtu->ep_array = ep_array;
+ mtu->in_eps = ep_array;
+ mtu->out_eps = &ep_array[mtu->num_eps];
+ /* ep0 uses in_eps[0], out_eps[0] is reserved */
+ mtu->ep0 = mtu->in_eps;
+ mtu->ep0->mtu = mtu;
+ mtu->ep0->epnum = 0;
+
+ for (i = 1; i < mtu->num_eps; i++) {
+ struct mtu3_ep *mep = mtu->in_eps + i;
+
+ mep->fifo = &mtu->tx_fifo;
+ mep = mtu->out_eps + i;
+ mep->fifo = &mtu->rx_fifo;
+ }
+
+ get_ep_fifo_config(mtu);
+
+ ret = mtu3_qmu_init(mtu);
+ if (ret)
+ kfree(mtu->ep_array);
+
+ return ret;
+}
+
+static void mtu3_mem_free(struct mtu3 *mtu)
+{
+ mtu3_qmu_exit(mtu);
+ kfree(mtu->ep_array);
+}
+
+static void mtu3_set_speed(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+
+ if (!mtu->is_u3_ip && (mtu->max_speed > USB_SPEED_HIGH))
+ mtu->max_speed = USB_SPEED_HIGH;
+
+ if (mtu->max_speed == USB_SPEED_FULL) {
+ /* disable U3 SS function */
+ mtu3_clrbits(mbase, U3D_USB3_CONFIG, USB3_EN);
+ /* disable HS function */
+ mtu3_clrbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
+ } else if (mtu->max_speed == USB_SPEED_HIGH) {
+ mtu3_clrbits(mbase, U3D_USB3_CONFIG, USB3_EN);
+ /* HS/FS detected by HW */
+ mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
+ }
+
+ dev_info(mtu->dev, "max_speed: %s\n",
+ usb_speed_string(mtu->max_speed));
+}
+
+static void mtu3_regs_init(struct mtu3 *mtu)
+{
+
+ void __iomem *mbase = mtu->mac_base;
+
+ /* be sure interrupts are disabled before registration of ISR */
+ mtu3_intr_disable(mtu);
+ mtu3_intr_status_clear(mtu);
+
+ if (mtu->is_u3_ip) {
+ /* disable LGO_U1/U2 by default */
+ mtu3_clrbits(mbase, U3D_LINK_POWER_CONTROL,
+ SW_U1_ACCEPT_ENABLE | SW_U2_ACCEPT_ENABLE |
+ SW_U1_REQUEST_ENABLE | SW_U2_REQUEST_ENABLE);
+ /* device responses to u3_exit from host automatically */
+ mtu3_clrbits(mbase, U3D_LTSSM_CTRL, SOFT_U3_EXIT_EN);
+ /* automatically build U2 link when U3 detect fail */
+ mtu3_setbits(mbase, U3D_USB2_TEST_MODE, U2U3_AUTO_SWITCH);
+ }
+
+ mtu3_set_speed(mtu);
+
+ /* delay about 0.1us from detecting reset to send chirp-K */
+ mtu3_clrbits(mbase, U3D_LINK_RESET_INFO, WTCHRP_MSK);
+ /* U2/U3 detected by HW */
+ mtu3_writel(mbase, U3D_DEVICE_CONF, 0);
+ /* enable QMU 16B checksum */
+ mtu3_setbits(mbase, U3D_QCR0, QMU_CS16B_EN);
+ /* vbus detected by HW */
+ mtu3_clrbits(mbase, U3D_MISC_CTRL, VBUS_FRC_EN | VBUS_ON);
+}
+
+static irqreturn_t mtu3_link_isr(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ enum usb_device_speed udev_speed;
+ u32 maxpkt = 64;
+ u32 link;
+ u32 speed;
+
+ link = mtu3_readl(mbase, U3D_DEV_LINK_INTR);
+ link &= mtu3_readl(mbase, U3D_DEV_LINK_INTR_ENABLE);
+ mtu3_writel(mbase, U3D_DEV_LINK_INTR, link); /* W1C */
+ dev_dbg(mtu->dev, "=== LINK[%x] ===\n", link);
+
+ if (!(link & SSUSB_DEV_SPEED_CHG_INTR))
+ return IRQ_NONE;
+
+ speed = SSUSB_DEV_SPEED(mtu3_readl(mbase, U3D_DEVICE_CONF));
+
+ switch (speed) {
+ case MTU3_SPEED_FULL:
+ udev_speed = USB_SPEED_FULL;
+ /*BESLCK = 4 < BESLCK_U3 = 10 < BESLDCK = 15 */
+ mtu3_writel(mbase, U3D_USB20_LPM_PARAMETER, LPM_BESLDCK(0xf)
+ | LPM_BESLCK(4) | LPM_BESLCK_U3(0xa));
+ mtu3_setbits(mbase, U3D_POWER_MANAGEMENT,
+ LPM_BESL_STALL | LPM_BESLD_STALL);
+ break;
+ case MTU3_SPEED_HIGH:
+ udev_speed = USB_SPEED_HIGH;
+ /*BESLCK = 4 < BESLCK_U3 = 10 < BESLDCK = 15 */
+ mtu3_writel(mbase, U3D_USB20_LPM_PARAMETER, LPM_BESLDCK(0xf)
+ | LPM_BESLCK(4) | LPM_BESLCK_U3(0xa));
+ mtu3_setbits(mbase, U3D_POWER_MANAGEMENT,
+ LPM_BESL_STALL | LPM_BESLD_STALL);
+ break;
+ case MTU3_SPEED_SUPER:
+ udev_speed = USB_SPEED_SUPER;
+ maxpkt = 512;
+ break;
+ default:
+ udev_speed = USB_SPEED_UNKNOWN;
+ break;
+ }
+ dev_dbg(mtu->dev, "%s: %s\n", __func__, usb_speed_string(udev_speed));
+
+ mtu->g.speed = udev_speed;
+ mtu->g.ep0->maxpacket = maxpkt;
+ mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+
+ if (udev_speed == USB_SPEED_UNKNOWN)
+ mtu3_gadget_disconnect(mtu);
+ else
+ mtu3_ep0_setup(mtu);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtu3_u3_ltssm_isr(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ u32 ltssm;
+
+ ltssm = mtu3_readl(mbase, U3D_LTSSM_INTR);
+ ltssm &= mtu3_readl(mbase, U3D_LTSSM_INTR_ENABLE);
+ mtu3_writel(mbase, U3D_LTSSM_INTR, ltssm); /* W1C */
+ dev_dbg(mtu->dev, "=== LTSSM[%x] ===\n", ltssm);
+
+ if (ltssm & (HOT_RST_INTR | WARM_RST_INTR))
+ mtu3_gadget_reset(mtu);
+
+ if (ltssm & VBUS_FALL_INTR)
+ mtu3_ss_func_set(mtu, false);
+
+ if (ltssm & VBUS_RISE_INTR)
+ mtu3_ss_func_set(mtu, true);
+
+ if (ltssm & EXIT_U3_INTR)
+ mtu3_gadget_resume(mtu);
+
+ if (ltssm & ENTER_U3_INTR)
+ mtu3_gadget_suspend(mtu);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtu3_u2_common_isr(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ u32 u2comm;
+
+ u2comm = mtu3_readl(mbase, U3D_COMMON_USB_INTR);
+ u2comm &= mtu3_readl(mbase, U3D_COMMON_USB_INTR_ENABLE);
+ mtu3_writel(mbase, U3D_COMMON_USB_INTR, u2comm); /* W1C */
+ dev_dbg(mtu->dev, "=== U2COMM[%x] ===\n", u2comm);
+
+ if (u2comm & SUSPEND_INTR)
+ mtu3_gadget_suspend(mtu);
+
+ if (u2comm & RESUME_INTR)
+ mtu3_gadget_resume(mtu);
+
+ if (u2comm & RESET_INTR)
+ mtu3_gadget_reset(mtu);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtu3_irq(int irq, void *data)
+{
+ struct mtu3 *mtu = (struct mtu3 *)data;
+ unsigned long flags;
+ u32 level1;
+
+ spin_lock_irqsave(&mtu->lock, flags);
+
+ /* U3D_LV1ISR is RU */
+ level1 = mtu3_readl(mtu->mac_base, U3D_LV1ISR);
+ level1 &= mtu3_readl(mtu->mac_base, U3D_LV1IER);
+
+ if (level1 & EP_CTRL_INTR)
+ mtu3_link_isr(mtu);
+
+ if (level1 & MAC2_INTR)
+ mtu3_u2_common_isr(mtu);
+
+ if (level1 & MAC3_INTR)
+ mtu3_u3_ltssm_isr(mtu);
+
+ if (level1 & BMU_INTR)
+ mtu3_ep0_isr(mtu);
+
+ if (level1 & QMU_INTR)
+ mtu3_qmu_isr(mtu);
+
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int mtu3_hw_init(struct mtu3 *mtu)
+{
+ u32 cap_dev;
+ int ret;
+
+ mtu->hw_version = mtu3_readl(mtu->ippc_base, U3D_SSUSB_HW_ID);
+
+ cap_dev = mtu3_readl(mtu->ippc_base, U3D_SSUSB_IP_DEV_CAP);
+ mtu->is_u3_ip = !!SSUSB_IP_DEV_U3_PORT_NUM(cap_dev);
+
+ dev_info(mtu->dev, "IP version 0x%x(%s IP)\n", mtu->hw_version,
+ mtu->is_u3_ip ? "U3" : "U2");
+
+ mtu3_device_reset(mtu);
+
+ ret = mtu3_device_enable(mtu);
+ if (ret) {
+ dev_err(mtu->dev, "device enable failed %d\n", ret);
+ return ret;
+ }
+
+ ret = mtu3_mem_alloc(mtu);
+ if (ret)
+ return -ENOMEM;
+
+ mtu3_regs_init(mtu);
+
+ return 0;
+}
+
+static void mtu3_hw_exit(struct mtu3 *mtu)
+{
+ mtu3_device_disable(mtu);
+ mtu3_mem_free(mtu);
+}
+
+/*-------------------------------------------------------------------------*/
+
+int ssusb_gadget_init(struct ssusb_mtk *ssusb)
+{
+ struct device *dev = ssusb->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mtu3 *mtu = NULL;
+ struct resource *res;
+ int ret = -ENOMEM;
+
+ mtu = devm_kzalloc(dev, sizeof(struct mtu3), GFP_KERNEL);
+ if (mtu == NULL)
+ return -ENOMEM;
+
+ mtu->irq = platform_get_irq(pdev, 0);
+ if (mtu->irq <= 0) {
+ dev_err(dev, "fail to get irq number\n");
+ return -ENODEV;
+ }
+ dev_info(dev, "irq %d\n", mtu->irq);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac");
+ mtu->mac_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mtu->mac_base)) {
+ dev_err(dev, "error mapping memory for dev mac\n");
+ return PTR_ERR(mtu->mac_base);
+ }
+
+ spin_lock_init(&mtu->lock);
+ mtu->dev = dev;
+ mtu->ippc_base = ssusb->ippc_base;
+ ssusb->mac_base = mtu->mac_base;
+ ssusb->u3d = mtu;
+ mtu->ssusb = ssusb;
+ mtu->max_speed = usb_get_maximum_speed(dev);
+
+ /* check the max_speed parameter */
+ switch (mtu->max_speed) {
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ case USB_SPEED_SUPER:
+ break;
+ default:
+ dev_err(dev, "invalid max_speed: %s\n",
+ usb_speed_string(mtu->max_speed));
+ /* fall through */
+ case USB_SPEED_UNKNOWN:
+ /* default as SS */
+ mtu->max_speed = USB_SPEED_SUPER;
+ break;
+ }
+
+ dev_dbg(dev, "mac_base=0x%p, ippc_base=0x%p\n",
+ mtu->mac_base, mtu->ippc_base);
+
+ ret = mtu3_hw_init(mtu);
+ if (ret) {
+ dev_err(dev, "mtu3 hw init failed:%d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_irq(dev, mtu->irq, mtu3_irq, 0, dev_name(dev), mtu);
+ if (ret) {
+ dev_err(dev, "request irq %d failed!\n", mtu->irq);
+ goto irq_err;
+ }
+
+ device_init_wakeup(dev, true);
+
+ ret = mtu3_gadget_setup(mtu);
+ if (ret) {
+ dev_err(dev, "mtu3 gadget init failed:%d\n", ret);
+ goto gadget_err;
+ }
+
+ /* init as host mode, power down device IP for power saving */
+ if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
+ mtu3_stop(mtu);
+
+ dev_dbg(dev, " %s() done...\n", __func__);
+
+ return 0;
+
+gadget_err:
+ device_init_wakeup(dev, false);
+
+irq_err:
+ mtu3_hw_exit(mtu);
+ ssusb->u3d = NULL;
+ dev_err(dev, " %s() fail...\n", __func__);
+
+ return ret;
+}
+
+void ssusb_gadget_exit(struct ssusb_mtk *ssusb)
+{
+ struct mtu3 *mtu = ssusb->u3d;
+
+ mtu3_gadget_cleanup(mtu);
+ device_init_wakeup(ssusb->dev, false);
+ mtu3_hw_exit(mtu);
+}
diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c
new file mode 100644
index 000000000000..1a8987e7c5b0
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_dr.c
@@ -0,0 +1,379 @@
+/*
+ * mtu3_dr.c - dual role switch and host glue layer
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#include "mtu3.h"
+#include "mtu3_dr.h"
+
+#define USB2_PORT 2
+#define USB3_PORT 3
+
+enum mtu3_vbus_id_state {
+ MTU3_ID_FLOAT = 1,
+ MTU3_ID_GROUND,
+ MTU3_VBUS_OFF,
+ MTU3_VBUS_VALID,
+};
+
+static void toggle_opstate(struct ssusb_mtk *ssusb)
+{
+ if (!ssusb->otg_switch.is_u3_drd) {
+ mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION);
+ mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN);
+ }
+}
+
+/* only port0 supports dual-role mode */
+static int ssusb_port0_switch(struct ssusb_mtk *ssusb,
+ int version, bool tohost)
+{
+ void __iomem *ibase = ssusb->ippc_base;
+ u32 value;
+
+ dev_dbg(ssusb->dev, "%s (switch u%d port0 to %s)\n", __func__,
+ version, tohost ? "host" : "device");
+
+ if (version == USB2_PORT) {
+ /* 1. power off and disable u2 port0 */
+ value = mtu3_readl(ibase, SSUSB_U2_CTRL(0));
+ value |= SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS;
+ mtu3_writel(ibase, SSUSB_U2_CTRL(0), value);
+
+ /* 2. power on, enable u2 port0 and select its mode */
+ value = mtu3_readl(ibase, SSUSB_U2_CTRL(0));
+ value &= ~(SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS);
+ value = tohost ? (value | SSUSB_U2_PORT_HOST_SEL) :
+ (value & (~SSUSB_U2_PORT_HOST_SEL));
+ mtu3_writel(ibase, SSUSB_U2_CTRL(0), value);
+ } else {
+ /* 1. power off and disable u3 port0 */
+ value = mtu3_readl(ibase, SSUSB_U3_CTRL(0));
+ value |= SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS;
+ mtu3_writel(ibase, SSUSB_U3_CTRL(0), value);
+
+ /* 2. power on, enable u3 port0 and select its mode */
+ value = mtu3_readl(ibase, SSUSB_U3_CTRL(0));
+ value &= ~(SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS);
+ value = tohost ? (value | SSUSB_U3_PORT_HOST_SEL) :
+ (value & (~SSUSB_U3_PORT_HOST_SEL));
+ mtu3_writel(ibase, SSUSB_U3_CTRL(0), value);
+ }
+
+ return 0;
+}
+
+static void switch_port_to_host(struct ssusb_mtk *ssusb)
+{
+ u32 check_clk = 0;
+
+ dev_dbg(ssusb->dev, "%s\n", __func__);
+
+ ssusb_port0_switch(ssusb, USB2_PORT, true);
+
+ if (ssusb->otg_switch.is_u3_drd) {
+ ssusb_port0_switch(ssusb, USB3_PORT, true);
+ check_clk = SSUSB_U3_MAC_RST_B_STS;
+ }
+
+ ssusb_check_clocks(ssusb, check_clk);
+
+ /* after all clocks are stable */
+ toggle_opstate(ssusb);
+}
+
+static void switch_port_to_device(struct ssusb_mtk *ssusb)
+{
+ u32 check_clk = 0;
+
+ dev_dbg(ssusb->dev, "%s\n", __func__);
+
+ ssusb_port0_switch(ssusb, USB2_PORT, false);
+
+ if (ssusb->otg_switch.is_u3_drd) {
+ ssusb_port0_switch(ssusb, USB3_PORT, false);
+ check_clk = SSUSB_U3_MAC_RST_B_STS;
+ }
+
+ ssusb_check_clocks(ssusb, check_clk);
+}
+
+int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on)
+{
+ struct ssusb_mtk *ssusb =
+ container_of(otg_sx, struct ssusb_mtk, otg_switch);
+ struct regulator *vbus = otg_sx->vbus;
+ int ret;
+
+ /* vbus is optional */
+ if (!vbus)
+ return 0;
+
+ dev_dbg(ssusb->dev, "%s: turn %s\n", __func__, is_on ? "on" : "off");
+
+ if (is_on) {
+ ret = regulator_enable(vbus);
+ if (ret) {
+ dev_err(ssusb->dev, "vbus regulator enable failed\n");
+ return ret;
+ }
+ } else {
+ regulator_disable(vbus);
+ }
+
+ return 0;
+}
+
+/*
+ * switch to host: -> MTU3_VBUS_OFF --> MTU3_ID_GROUND
+ * switch to device: -> MTU3_ID_FLOAT --> MTU3_VBUS_VALID
+ */
+static void ssusb_set_mailbox(struct otg_switch_mtk *otg_sx,
+ enum mtu3_vbus_id_state status)
+{
+ struct ssusb_mtk *ssusb =
+ container_of(otg_sx, struct ssusb_mtk, otg_switch);
+ struct mtu3 *mtu = ssusb->u3d;
+
+ dev_dbg(ssusb->dev, "mailbox state(%d)\n", status);
+
+ switch (status) {
+ case MTU3_ID_GROUND:
+ switch_port_to_host(ssusb);
+ ssusb_set_vbus(otg_sx, 1);
+ ssusb->is_host = true;
+ break;
+ case MTU3_ID_FLOAT:
+ ssusb->is_host = false;
+ ssusb_set_vbus(otg_sx, 0);
+ switch_port_to_device(ssusb);
+ break;
+ case MTU3_VBUS_OFF:
+ mtu3_stop(mtu);
+ pm_relax(ssusb->dev);
+ break;
+ case MTU3_VBUS_VALID:
+ /* avoid suspend when works as device */
+ pm_stay_awake(ssusb->dev);
+ mtu3_start(mtu);
+ break;
+ default:
+ dev_err(ssusb->dev, "invalid state\n");
+ }
+}
+
+static int ssusb_id_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct otg_switch_mtk *otg_sx =
+ container_of(nb, struct otg_switch_mtk, id_nb);
+
+ if (event)
+ ssusb_set_mailbox(otg_sx, MTU3_ID_GROUND);
+ else
+ ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT);
+
+ return NOTIFY_DONE;
+}
+
+static int ssusb_vbus_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct otg_switch_mtk *otg_sx =
+ container_of(nb, struct otg_switch_mtk, vbus_nb);
+
+ if (event)
+ ssusb_set_mailbox(otg_sx, MTU3_VBUS_VALID);
+ else
+ ssusb_set_mailbox(otg_sx, MTU3_VBUS_OFF);
+
+ return NOTIFY_DONE;
+}
+
+static int ssusb_extcon_register(struct otg_switch_mtk *otg_sx)
+{
+ struct ssusb_mtk *ssusb =
+ container_of(otg_sx, struct ssusb_mtk, otg_switch);
+ struct extcon_dev *edev = otg_sx->edev;
+ int ret;
+
+ /* extcon is optional */
+ if (!edev)
+ return 0;
+
+ otg_sx->vbus_nb.notifier_call = ssusb_vbus_notifier;
+ ret = extcon_register_notifier(edev, EXTCON_USB,
+ &otg_sx->vbus_nb);
+ if (ret < 0)
+ dev_err(ssusb->dev, "failed to register notifier for USB\n");
+
+ otg_sx->id_nb.notifier_call = ssusb_id_notifier;
+ ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
+ &otg_sx->id_nb);
+ if (ret < 0)
+ dev_err(ssusb->dev, "failed to register notifier for USB-HOST\n");
+
+ dev_dbg(ssusb->dev, "EXTCON_USB: %d, EXTCON_USB_HOST: %d\n",
+ extcon_get_cable_state_(edev, EXTCON_USB),
+ extcon_get_cable_state_(edev, EXTCON_USB_HOST));
+
+ /* default as host, switch to device mode if needed */
+ if (extcon_get_cable_state_(edev, EXTCON_USB_HOST) == false)
+ ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT);
+ if (extcon_get_cable_state_(edev, EXTCON_USB) == true)
+ ssusb_set_mailbox(otg_sx, MTU3_VBUS_VALID);
+
+ return 0;
+}
+
+static void extcon_register_dwork(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct otg_switch_mtk *otg_sx =
+ container_of(dwork, struct otg_switch_mtk, extcon_reg_dwork);
+
+ ssusb_extcon_register(otg_sx);
+}
+
+/*
+ * We provide an interface via debugfs to switch between host and device modes
+ * depending on user input.
+ * This is useful in special cases, such as uses TYPE-A receptacle but also
+ * wants to support dual-role mode.
+ * It generates cable state changes by pulling up/down IDPIN and
+ * notifies driver to switch mode by "extcon-usb-gpio".
+ * NOTE: when use MICRO receptacle, should not enable this interface.
+ */
+static void ssusb_mode_manual_switch(struct ssusb_mtk *ssusb, int to_host)
+{
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
+ if (to_host)
+ pinctrl_select_state(otg_sx->id_pinctrl, otg_sx->id_ground);
+ else
+ pinctrl_select_state(otg_sx->id_pinctrl, otg_sx->id_float);
+}
+
+
+static int ssusb_mode_show(struct seq_file *sf, void *unused)
+{
+ struct ssusb_mtk *ssusb = sf->private;
+
+ seq_printf(sf, "current mode: %s(%s drd)\n(echo device/host)\n",
+ ssusb->is_host ? "host" : "device",
+ ssusb->otg_switch.manual_drd_enabled ? "manual" : "auto");
+
+ return 0;
+}
+
+static int ssusb_mode_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ssusb_mode_show, inode->i_private);
+}
+
+static ssize_t ssusb_mode_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct seq_file *sf = file->private_data;
+ struct ssusb_mtk *ssusb = sf->private;
+ char buf[16];
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "host", 4) && !ssusb->is_host) {
+ ssusb_mode_manual_switch(ssusb, 1);
+ } else if (!strncmp(buf, "device", 6) && ssusb->is_host) {
+ ssusb_mode_manual_switch(ssusb, 0);
+ } else {
+ dev_err(ssusb->dev, "wrong or duplicated setting\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations ssusb_mode_fops = {
+ .open = ssusb_mode_open,
+ .write = ssusb_mode_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void ssusb_debugfs_init(struct ssusb_mtk *ssusb)
+{
+ struct dentry *root;
+ struct dentry *file;
+
+ root = debugfs_create_dir(dev_name(ssusb->dev), usb_debug_root);
+ if (IS_ERR_OR_NULL(root)) {
+ if (!root)
+ dev_err(ssusb->dev, "create debugfs root failed\n");
+ return;
+ }
+ ssusb->dbgfs_root = root;
+
+ file = debugfs_create_file("mode", S_IRUGO | S_IWUSR, root,
+ ssusb, &ssusb_mode_fops);
+ if (!file)
+ dev_dbg(ssusb->dev, "create debugfs mode failed\n");
+}
+
+static void ssusb_debugfs_exit(struct ssusb_mtk *ssusb)
+{
+ debugfs_remove_recursive(ssusb->dbgfs_root);
+}
+
+int ssusb_otg_switch_init(struct ssusb_mtk *ssusb)
+{
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
+ INIT_DELAYED_WORK(&otg_sx->extcon_reg_dwork, extcon_register_dwork);
+
+ if (otg_sx->manual_drd_enabled)
+ ssusb_debugfs_init(ssusb);
+
+ /* It is enough to delay 1s for waiting for host initialization */
+ schedule_delayed_work(&otg_sx->extcon_reg_dwork, HZ);
+
+ return 0;
+}
+
+void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb)
+{
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
+ cancel_delayed_work(&otg_sx->extcon_reg_dwork);
+
+ if (otg_sx->edev) {
+ extcon_unregister_notifier(otg_sx->edev,
+ EXTCON_USB, &otg_sx->vbus_nb);
+ extcon_unregister_notifier(otg_sx->edev,
+ EXTCON_USB_HOST, &otg_sx->id_nb);
+ }
+
+ if (otg_sx->manual_drd_enabled)
+ ssusb_debugfs_exit(ssusb);
+}
diff --git a/drivers/usb/mtu3/mtu3_dr.h b/drivers/usb/mtu3/mtu3_dr.h
new file mode 100644
index 000000000000..9b228b5811b0
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_dr.h
@@ -0,0 +1,108 @@
+/*
+ * mtu3_dr.h - dual role switch and host glue layer header
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MTU3_DR_H_
+#define _MTU3_DR_H_
+
+#if IS_ENABLED(CONFIG_USB_MTU3_HOST) || IS_ENABLED(CONFIG_USB_MTU3_DUAL_ROLE)
+
+int ssusb_host_init(struct ssusb_mtk *ssusb, struct device_node *parent_dn);
+void ssusb_host_exit(struct ssusb_mtk *ssusb);
+int ssusb_wakeup_of_property_parse(struct ssusb_mtk *ssusb,
+ struct device_node *dn);
+int ssusb_host_enable(struct ssusb_mtk *ssusb);
+int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend);
+int ssusb_wakeup_enable(struct ssusb_mtk *ssusb);
+void ssusb_wakeup_disable(struct ssusb_mtk *ssusb);
+
+#else
+
+static inline int ssusb_host_init(struct ssusb_mtk *ssusb,
+
+ struct device_node *parent_dn)
+{
+ return 0;
+}
+
+static inline void ssusb_host_exit(struct ssusb_mtk *ssusb)
+{}
+
+static inline int ssusb_wakeup_of_property_parse(
+ struct ssusb_mtk *ssusb, struct device_node *dn)
+{
+ return 0;
+}
+
+static inline int ssusb_host_enable(struct ssusb_mtk *ssusb)
+{
+ return 0;
+}
+
+static inline int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend)
+{
+ return 0;
+}
+
+static inline int ssusb_wakeup_enable(struct ssusb_mtk *ssusb)
+{
+ return 0;
+}
+
+static inline void ssusb_wakeup_disable(struct ssusb_mtk *ssusb)
+{}
+
+#endif
+
+
+#if IS_ENABLED(CONFIG_USB_MTU3_GADGET) || IS_ENABLED(CONFIG_USB_MTU3_DUAL_ROLE)
+int ssusb_gadget_init(struct ssusb_mtk *ssusb);
+void ssusb_gadget_exit(struct ssusb_mtk *ssusb);
+#else
+static inline int ssusb_gadget_init(struct ssusb_mtk *ssusb)
+{
+ return 0;
+}
+
+static inline void ssusb_gadget_exit(struct ssusb_mtk *ssusb)
+{}
+#endif
+
+
+#if IS_ENABLED(CONFIG_USB_MTU3_DUAL_ROLE)
+int ssusb_otg_switch_init(struct ssusb_mtk *ssusb);
+void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb);
+int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on);
+
+#else
+
+static inline int ssusb_otg_switch_init(struct ssusb_mtk *ssusb)
+{
+ return 0;
+}
+
+static inline void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb)
+{}
+
+static inline int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _MTU3_DR_H_ */
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
new file mode 100644
index 000000000000..9dd2441b4fa1
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -0,0 +1,730 @@
+/*
+ * mtu3_gadget.c - MediaTek usb3 DRD peripheral support
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "mtu3.h"
+
+void mtu3_req_complete(struct mtu3_ep *mep,
+ struct usb_request *req, int status)
+__releases(mep->mtu->lock)
+__acquires(mep->mtu->lock)
+{
+ struct mtu3_request *mreq;
+ struct mtu3 *mtu;
+ int busy = mep->busy;
+
+ mreq = to_mtu3_request(req);
+ list_del(&mreq->list);
+ if (mreq->request.status == -EINPROGRESS)
+ mreq->request.status = status;
+
+ mtu = mreq->mtu;
+ mep->busy = 1;
+ spin_unlock(&mtu->lock);
+
+ /* ep0 makes use of PIO, needn't unmap it */
+ if (mep->epnum)
+ usb_gadget_unmap_request(&mtu->g, req, mep->is_in);
+
+ dev_dbg(mtu->dev, "%s complete req: %p, sts %d, %d/%d\n", mep->name,
+ req, req->status, mreq->request.actual, mreq->request.length);
+
+ usb_gadget_giveback_request(&mep->ep, &mreq->request);
+
+ spin_lock(&mtu->lock);
+ mep->busy = busy;
+}
+
+static void nuke(struct mtu3_ep *mep, const int status)
+{
+ struct mtu3_request *mreq = NULL;
+
+ mep->busy = 1;
+ if (list_empty(&mep->req_list))
+ return;
+
+ dev_dbg(mep->mtu->dev, "abort %s's req: sts %d\n", mep->name, status);
+
+ /* exclude EP0 */
+ if (mep->epnum)
+ mtu3_qmu_flush(mep);
+
+ while (!list_empty(&mep->req_list)) {
+ mreq = list_first_entry(&mep->req_list,
+ struct mtu3_request, list);
+ mtu3_req_complete(mep, &mreq->request, status);
+ }
+}
+
+static int mtu3_ep_enable(struct mtu3_ep *mep)
+{
+ const struct usb_endpoint_descriptor *desc;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+ struct mtu3 *mtu = mep->mtu;
+ u32 interval = 0;
+ u32 mult = 0;
+ u32 burst = 0;
+ int max_packet;
+ int ret;
+
+ desc = mep->desc;
+ comp_desc = mep->comp_desc;
+ mep->type = usb_endpoint_type(desc);
+ max_packet = usb_endpoint_maxp(desc);
+ mep->maxp = max_packet & GENMASK(10, 0);
+
+ switch (mtu->g.speed) {
+ case USB_SPEED_SUPER:
+ if (usb_endpoint_xfer_int(desc) ||
+ usb_endpoint_xfer_isoc(desc)) {
+ interval = desc->bInterval;
+ interval = clamp_val(interval, 1, 16) - 1;
+ if (usb_endpoint_xfer_isoc(desc) && comp_desc)
+ mult = comp_desc->bmAttributes;
+ }
+ if (comp_desc)
+ burst = comp_desc->bMaxBurst;
+
+ break;
+ case USB_SPEED_HIGH:
+ if (usb_endpoint_xfer_isoc(desc) ||
+ usb_endpoint_xfer_int(desc)) {
+ interval = desc->bInterval;
+ interval = clamp_val(interval, 1, 16) - 1;
+ burst = (max_packet & GENMASK(12, 11)) >> 11;
+ }
+ break;
+ default:
+ break; /*others are ignored */
+ }
+
+ dev_dbg(mtu->dev, "%s maxp:%d, interval:%d, burst:%d, mult:%d\n",
+ __func__, mep->maxp, interval, burst, mult);
+
+ mep->ep.maxpacket = mep->maxp;
+ mep->ep.desc = desc;
+ mep->ep.comp_desc = comp_desc;
+
+ /* slot mainly affects bulk/isoc transfer, so ignore int */
+ mep->slot = usb_endpoint_xfer_int(desc) ? 0 : mtu->slot;
+
+ ret = mtu3_config_ep(mtu, mep, interval, burst, mult);
+ if (ret < 0)
+ return ret;
+
+ ret = mtu3_gpd_ring_alloc(mep);
+ if (ret < 0) {
+ mtu3_deconfig_ep(mtu, mep);
+ return ret;
+ }
+
+ mtu3_qmu_start(mep);
+
+ return 0;
+}
+
+static int mtu3_ep_disable(struct mtu3_ep *mep)
+{
+ struct mtu3 *mtu = mep->mtu;
+
+ mtu3_qmu_stop(mep);
+
+ /* abort all pending requests */
+ nuke(mep, -ESHUTDOWN);
+ mtu3_deconfig_ep(mtu, mep);
+ mtu3_gpd_ring_free(mep);
+
+ mep->desc = NULL;
+ mep->ep.desc = NULL;
+ mep->comp_desc = NULL;
+ mep->type = 0;
+ mep->flags = 0;
+
+ return 0;
+}
+
+static int mtu3_gadget_ep_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct mtu3_ep *mep;
+ struct mtu3 *mtu;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
+ pr_debug("%s invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!desc->wMaxPacketSize) {
+ pr_debug("%s missing wMaxPacketSize\n", __func__);
+ return -EINVAL;
+ }
+ mep = to_mtu3_ep(ep);
+ mtu = mep->mtu;
+
+ /* check ep number and direction against endpoint */
+ if (usb_endpoint_num(desc) != mep->epnum)
+ return -EINVAL;
+
+ if (!!usb_endpoint_dir_in(desc) ^ !!mep->is_in)
+ return -EINVAL;
+
+ dev_dbg(mtu->dev, "%s %s\n", __func__, ep->name);
+
+ if (mep->flags & MTU3_EP_ENABLED) {
+ dev_WARN_ONCE(mtu->dev, true, "%s is already enabled\n",
+ mep->name);
+ return 0;
+ }
+
+ spin_lock_irqsave(&mtu->lock, flags);
+ mep->desc = desc;
+ mep->comp_desc = ep->comp_desc;
+
+ ret = mtu3_ep_enable(mep);
+ if (ret)
+ goto error;
+
+ mep->busy = 0;
+ mep->wedged = 0;
+ mep->flags |= MTU3_EP_ENABLED;
+ mtu->active_ep++;
+
+error:
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ dev_dbg(mtu->dev, "%s active_ep=%d\n", __func__, mtu->active_ep);
+
+ return ret;
+}
+
+static int mtu3_gadget_ep_disable(struct usb_ep *ep)
+{
+ struct mtu3_ep *mep = to_mtu3_ep(ep);
+ struct mtu3 *mtu = mep->mtu;
+ unsigned long flags;
+
+ dev_dbg(mtu->dev, "%s %s\n", __func__, mep->name);
+
+ if (!(mep->flags & MTU3_EP_ENABLED)) {
+ dev_warn(mtu->dev, "%s is already disabled\n", mep->name);
+ return 0;
+ }
+
+ spin_lock_irqsave(&mtu->lock, flags);
+ mtu3_ep_disable(mep);
+ mep->flags &= ~MTU3_EP_ENABLED;
+ mtu->active_ep--;
+ spin_unlock_irqrestore(&(mtu->lock), flags);
+
+ dev_dbg(mtu->dev, "%s active_ep=%d, mtu3 is_active=%d\n",
+ __func__, mtu->active_ep, mtu->is_active);
+
+ return 0;
+}
+
+struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
+{
+ struct mtu3_ep *mep = to_mtu3_ep(ep);
+ struct mtu3_request *mreq;
+
+ mreq = kzalloc(sizeof(*mreq), gfp_flags);
+ if (!mreq)
+ return NULL;
+
+ mreq->request.dma = DMA_ADDR_INVALID;
+ mreq->epnum = mep->epnum;
+ mreq->mep = mep;
+
+ return &mreq->request;
+}
+
+void mtu3_free_request(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(to_mtu3_request(req));
+}
+
+static int mtu3_gadget_queue(struct usb_ep *ep,
+ struct usb_request *req, gfp_t gfp_flags)
+{
+ struct mtu3_ep *mep;
+ struct mtu3_request *mreq;
+ struct mtu3 *mtu;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!ep || !req)
+ return -EINVAL;
+
+ if (!req->buf)
+ return -ENODATA;
+
+ mep = to_mtu3_ep(ep);
+ mtu = mep->mtu;
+ mreq = to_mtu3_request(req);
+ mreq->mtu = mtu;
+
+ if (mreq->mep != mep)
+ return -EINVAL;
+
+ dev_dbg(mtu->dev, "%s %s EP%d(%s), req=%p, maxp=%d, len#%d\n",
+ __func__, mep->is_in ? "TX" : "RX", mreq->epnum, ep->name,
+ mreq, ep->maxpacket, mreq->request.length);
+
+ if (req->length > GPD_BUF_SIZE) {
+ dev_warn(mtu->dev,
+ "req length > supported MAX:%d requested:%d\n",
+ GPD_BUF_SIZE, req->length);
+ return -EOPNOTSUPP;
+ }
+
+ /* don't queue if the ep is down */
+ if (!mep->desc) {
+ dev_dbg(mtu->dev, "req=%p queued to %s while it's disabled\n",
+ req, ep->name);
+ return -ESHUTDOWN;
+ }
+
+ mreq->request.actual = 0;
+ mreq->request.status = -EINPROGRESS;
+
+ ret = usb_gadget_map_request(&mtu->g, req, mep->is_in);
+ if (ret) {
+ dev_err(mtu->dev, "dma mapping failed\n");
+ return ret;
+ }
+
+ spin_lock_irqsave(&mtu->lock, flags);
+
+ if (mtu3_prepare_transfer(mep)) {
+ ret = -EAGAIN;
+ goto error;
+ }
+
+ list_add_tail(&mreq->list, &mep->req_list);
+ mtu3_insert_gpd(mep, mreq);
+ mtu3_qmu_resume(mep);
+
+error:
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return ret;
+}
+
+static int mtu3_gadget_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtu3_ep *mep = to_mtu3_ep(ep);
+ struct mtu3_request *mreq = to_mtu3_request(req);
+ struct mtu3_request *r;
+ unsigned long flags;
+ int ret = 0;
+ struct mtu3 *mtu = mep->mtu;
+
+ if (!ep || !req || mreq->mep != mep)
+ return -EINVAL;
+
+ dev_dbg(mtu->dev, "%s : req=%p\n", __func__, req);
+
+ spin_lock_irqsave(&mtu->lock, flags);
+
+ list_for_each_entry(r, &mep->req_list, list) {
+ if (r == mreq)
+ break;
+ }
+ if (r != mreq) {
+ dev_dbg(mtu->dev, "req=%p not queued to %s\n", req, ep->name);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ mtu3_qmu_flush(mep); /* REVISIT: set BPS ?? */
+ mtu3_req_complete(mep, req, -ECONNRESET);
+ mtu3_qmu_start(mep);
+
+done:
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return ret;
+}
+
+/*
+ * Set or clear the halt bit of an EP.
+ * A halted EP won't TX/RX any data but will queue requests.
+ */
+static int mtu3_gadget_ep_set_halt(struct usb_ep *ep, int value)
+{
+ struct mtu3_ep *mep = to_mtu3_ep(ep);
+ struct mtu3 *mtu = mep->mtu;
+ struct mtu3_request *mreq;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!ep)
+ return -EINVAL;
+
+ dev_dbg(mtu->dev, "%s : %s...", __func__, ep->name);
+
+ spin_lock_irqsave(&mtu->lock, flags);
+
+ if (mep->type == USB_ENDPOINT_XFER_ISOC) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ mreq = next_request(mep);
+ if (value) {
+ /*
+ * If there is not request for TX-EP, QMU will not transfer
+ * data to TX-FIFO, so no need check whether TX-FIFO
+ * holds bytes or not here
+ */
+ if (mreq) {
+ dev_dbg(mtu->dev, "req in progress, cannot halt %s\n",
+ ep->name);
+ ret = -EAGAIN;
+ goto done;
+ }
+ } else {
+ mep->wedged = 0;
+ }
+
+ dev_dbg(mtu->dev, "%s %s stall\n", ep->name, value ? "set" : "clear");
+
+ mtu3_ep_stall_set(mep, value);
+
+done:
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return ret;
+}
+
+/* Sets the halt feature with the clear requests ignored */
+static int mtu3_gadget_ep_set_wedge(struct usb_ep *ep)
+{
+ struct mtu3_ep *mep = to_mtu3_ep(ep);
+
+ if (!ep)
+ return -EINVAL;
+
+ mep->wedged = 1;
+
+ return usb_ep_set_halt(ep);
+}
+
+static const struct usb_ep_ops mtu3_ep_ops = {
+ .enable = mtu3_gadget_ep_enable,
+ .disable = mtu3_gadget_ep_disable,
+ .alloc_request = mtu3_alloc_request,
+ .free_request = mtu3_free_request,
+ .queue = mtu3_gadget_queue,
+ .dequeue = mtu3_gadget_dequeue,
+ .set_halt = mtu3_gadget_ep_set_halt,
+ .set_wedge = mtu3_gadget_ep_set_wedge,
+};
+
+static int mtu3_gadget_get_frame(struct usb_gadget *gadget)
+{
+ struct mtu3 *mtu = gadget_to_mtu3(gadget);
+
+ return (int)mtu3_readl(mtu->mac_base, U3D_USB20_FRAME_NUM);
+}
+
+static int mtu3_gadget_wakeup(struct usb_gadget *gadget)
+{
+ struct mtu3 *mtu = gadget_to_mtu3(gadget);
+ unsigned long flags;
+
+ dev_dbg(mtu->dev, "%s\n", __func__);
+
+ /* remote wakeup feature is not enabled by host */
+ if (!mtu->may_wakeup)
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(&mtu->lock, flags);
+ if (mtu->g.speed == USB_SPEED_SUPER) {
+ mtu3_setbits(mtu->mac_base, U3D_LINK_POWER_CONTROL, UX_EXIT);
+ } else {
+ mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME);
+ spin_unlock_irqrestore(&mtu->lock, flags);
+ usleep_range(10000, 11000);
+ spin_lock_irqsave(&mtu->lock, flags);
+ mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME);
+ }
+ spin_unlock_irqrestore(&mtu->lock, flags);
+ return 0;
+}
+
+static int mtu3_gadget_set_self_powered(struct usb_gadget *gadget,
+ int is_selfpowered)
+{
+ struct mtu3 *mtu = gadget_to_mtu3(gadget);
+
+ mtu->is_self_powered = !!is_selfpowered;
+ return 0;
+}
+
+static int mtu3_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct mtu3 *mtu = gadget_to_mtu3(gadget);
+ unsigned long flags;
+
+ dev_dbg(mtu->dev, "%s (%s) for %sactive device\n", __func__,
+ is_on ? "on" : "off", mtu->is_active ? "" : "in");
+
+ /* we'd rather not pullup unless the device is active. */
+ spin_lock_irqsave(&mtu->lock, flags);
+
+ is_on = !!is_on;
+ if (!mtu->is_active) {
+ /* save it for mtu3_start() to process the request */
+ mtu->softconnect = is_on;
+ } else if (is_on != mtu->softconnect) {
+ mtu->softconnect = is_on;
+ mtu3_dev_on_off(mtu, is_on);
+ }
+
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return 0;
+}
+
+static int mtu3_gadget_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct mtu3 *mtu = gadget_to_mtu3(gadget);
+ unsigned long flags;
+
+ if (mtu->gadget_driver) {
+ dev_err(mtu->dev, "%s is already bound to %s\n",
+ mtu->g.name, mtu->gadget_driver->driver.name);
+ return -EBUSY;
+ }
+
+ dev_dbg(mtu->dev, "bind driver %s\n", driver->function);
+
+ spin_lock_irqsave(&mtu->lock, flags);
+
+ mtu->softconnect = 0;
+ mtu->gadget_driver = driver;
+
+ if (mtu->ssusb->dr_mode == USB_DR_MODE_PERIPHERAL)
+ mtu3_start(mtu);
+
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return 0;
+}
+
+static void stop_activity(struct mtu3 *mtu)
+{
+ struct usb_gadget_driver *driver = mtu->gadget_driver;
+ int i;
+
+ /* don't disconnect if it's not connected */
+ if (mtu->g.speed == USB_SPEED_UNKNOWN)
+ driver = NULL;
+ else
+ mtu->g.speed = USB_SPEED_UNKNOWN;
+
+ /* deactivate the hardware */
+ if (mtu->softconnect) {
+ mtu->softconnect = 0;
+ mtu3_dev_on_off(mtu, 0);
+ }
+
+ /*
+ * killing any outstanding requests will quiesce the driver;
+ * then report disconnect
+ */
+ nuke(mtu->ep0, -ESHUTDOWN);
+ for (i = 1; i < mtu->num_eps; i++) {
+ nuke(mtu->in_eps + i, -ESHUTDOWN);
+ nuke(mtu->out_eps + i, -ESHUTDOWN);
+ }
+
+ if (driver) {
+ spin_unlock(&mtu->lock);
+ driver->disconnect(&mtu->g);
+ spin_lock(&mtu->lock);
+ }
+}
+
+static int mtu3_gadget_stop(struct usb_gadget *g)
+{
+ struct mtu3 *mtu = gadget_to_mtu3(g);
+ unsigned long flags;
+
+ dev_dbg(mtu->dev, "%s\n", __func__);
+
+ spin_lock_irqsave(&mtu->lock, flags);
+
+ stop_activity(mtu);
+ mtu->gadget_driver = NULL;
+
+ if (mtu->ssusb->dr_mode == USB_DR_MODE_PERIPHERAL)
+ mtu3_stop(mtu);
+
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return 0;
+}
+
+static const struct usb_gadget_ops mtu3_gadget_ops = {
+ .get_frame = mtu3_gadget_get_frame,
+ .wakeup = mtu3_gadget_wakeup,
+ .set_selfpowered = mtu3_gadget_set_self_powered,
+ .pullup = mtu3_gadget_pullup,
+ .udc_start = mtu3_gadget_start,
+ .udc_stop = mtu3_gadget_stop,
+};
+
+static void init_hw_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
+ u32 epnum, u32 is_in)
+{
+ mep->epnum = epnum;
+ mep->mtu = mtu;
+ mep->is_in = is_in;
+
+ INIT_LIST_HEAD(&mep->req_list);
+
+ sprintf(mep->name, "ep%d%s", epnum,
+ !epnum ? "" : (is_in ? "in" : "out"));
+
+ mep->ep.name = mep->name;
+ INIT_LIST_HEAD(&mep->ep.ep_list);
+
+ /* initialize maxpacket as SS */
+ if (!epnum) {
+ usb_ep_set_maxpacket_limit(&mep->ep, 512);
+ mep->ep.caps.type_control = true;
+ mep->ep.ops = &mtu3_ep0_ops;
+ mtu->g.ep0 = &mep->ep;
+ } else {
+ usb_ep_set_maxpacket_limit(&mep->ep, 1024);
+ mep->ep.caps.type_iso = true;
+ mep->ep.caps.type_bulk = true;
+ mep->ep.caps.type_int = true;
+ mep->ep.ops = &mtu3_ep_ops;
+ list_add_tail(&mep->ep.ep_list, &mtu->g.ep_list);
+ }
+
+ dev_dbg(mtu->dev, "%s, name=%s, maxp=%d\n", __func__, mep->ep.name,
+ mep->ep.maxpacket);
+
+ if (!epnum) {
+ mep->ep.caps.dir_in = true;
+ mep->ep.caps.dir_out = true;
+ } else if (is_in) {
+ mep->ep.caps.dir_in = true;
+ } else {
+ mep->ep.caps.dir_out = true;
+ }
+}
+
+static void mtu3_gadget_init_eps(struct mtu3 *mtu)
+{
+ u8 epnum;
+
+ /* initialize endpoint list just once */
+ INIT_LIST_HEAD(&(mtu->g.ep_list));
+
+ dev_dbg(mtu->dev, "%s num_eps(1 for a pair of tx&rx ep)=%d\n",
+ __func__, mtu->num_eps);
+
+ init_hw_ep(mtu, mtu->ep0, 0, 0);
+ for (epnum = 1; epnum < mtu->num_eps; epnum++) {
+ init_hw_ep(mtu, mtu->in_eps + epnum, epnum, 1);
+ init_hw_ep(mtu, mtu->out_eps + epnum, epnum, 0);
+ }
+}
+
+int mtu3_gadget_setup(struct mtu3 *mtu)
+{
+ int ret;
+
+ mtu->g.ops = &mtu3_gadget_ops;
+ mtu->g.max_speed = mtu->max_speed;
+ mtu->g.speed = USB_SPEED_UNKNOWN;
+ mtu->g.sg_supported = 0;
+ mtu->g.name = MTU3_DRIVER_NAME;
+ mtu->is_active = 0;
+
+ mtu3_gadget_init_eps(mtu);
+
+ ret = usb_add_gadget_udc(mtu->dev, &mtu->g);
+ if (ret) {
+ dev_err(mtu->dev, "failed to register udc\n");
+ return ret;
+ }
+
+ usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED);
+
+ return 0;
+}
+
+void mtu3_gadget_cleanup(struct mtu3 *mtu)
+{
+ usb_del_gadget_udc(&mtu->g);
+}
+
+void mtu3_gadget_resume(struct mtu3 *mtu)
+{
+ dev_dbg(mtu->dev, "gadget RESUME\n");
+ if (mtu->gadget_driver && mtu->gadget_driver->resume) {
+ spin_unlock(&mtu->lock);
+ mtu->gadget_driver->resume(&mtu->g);
+ spin_lock(&mtu->lock);
+ }
+}
+
+/* called when SOF packets stop for 3+ msec or enters U3 */
+void mtu3_gadget_suspend(struct mtu3 *mtu)
+{
+ dev_dbg(mtu->dev, "gadget SUSPEND\n");
+ if (mtu->gadget_driver && mtu->gadget_driver->suspend) {
+ spin_unlock(&mtu->lock);
+ mtu->gadget_driver->suspend(&mtu->g);
+ spin_lock(&mtu->lock);
+ }
+}
+
+/* called when VBUS drops below session threshold, and in other cases */
+void mtu3_gadget_disconnect(struct mtu3 *mtu)
+{
+ dev_dbg(mtu->dev, "gadget DISCONNECT\n");
+ if (mtu->gadget_driver && mtu->gadget_driver->disconnect) {
+ spin_unlock(&mtu->lock);
+ mtu->gadget_driver->disconnect(&mtu->g);
+ spin_lock(&mtu->lock);
+ }
+
+ usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED);
+}
+
+void mtu3_gadget_reset(struct mtu3 *mtu)
+{
+ dev_dbg(mtu->dev, "gadget RESET\n");
+
+ /* report disconnect, if we didn't flush EP state */
+ if (mtu->g.speed != USB_SPEED_UNKNOWN)
+ mtu3_gadget_disconnect(mtu);
+
+ mtu->address = 0;
+ mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+ mtu->may_wakeup = 0;
+}
diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c
new file mode 100644
index 000000000000..2d7427b48775
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c
@@ -0,0 +1,881 @@
+/*
+ * mtu3_gadget_ep0.c - MediaTek USB3 DRD peripheral driver ep0 handling
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng.Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "mtu3.h"
+
+/* ep0 is always mtu3->in_eps[0] */
+#define next_ep0_request(mtu) next_request((mtu)->ep0)
+
+/* for high speed test mode; see USB 2.0 spec 7.1.20 */
+static const u8 mtu3_test_packet[53] = {
+ /* implicit SYNC then DATA0 to start */
+
+ /* JKJKJKJK x9 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* JJKKJJKK x8 */
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ /* JJJJKKKK x8 */
+ 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
+ /* JJJJJJJKKKKKKK x8 */
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ /* JJJJJJJK x8 */
+ 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
+ /* JKKKKKKK x10, JK */
+ 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e,
+ /* implicit CRC16 then EOP to end */
+};
+
+static char *decode_ep0_state(struct mtu3 *mtu)
+{
+ switch (mtu->ep0_state) {
+ case MU3D_EP0_STATE_SETUP:
+ return "SETUP";
+ case MU3D_EP0_STATE_TX:
+ return "IN";
+ case MU3D_EP0_STATE_RX:
+ return "OUT";
+ case MU3D_EP0_STATE_TX_END:
+ return "TX-END";
+ case MU3D_EP0_STATE_STALL:
+ return "STALL";
+ default:
+ return "??";
+ }
+}
+
+static void ep0_req_giveback(struct mtu3 *mtu, struct usb_request *req)
+{
+ mtu3_req_complete(mtu->ep0, req, 0);
+}
+
+static int
+forward_to_driver(struct mtu3 *mtu, const struct usb_ctrlrequest *setup)
+__releases(mtu->lock)
+__acquires(mtu->lock)
+{
+ int ret;
+
+ if (!mtu->gadget_driver)
+ return -EOPNOTSUPP;
+
+ spin_unlock(&mtu->lock);
+ ret = mtu->gadget_driver->setup(&mtu->g, setup);
+ spin_lock(&mtu->lock);
+
+ dev_dbg(mtu->dev, "%s ret %d\n", __func__, ret);
+ return ret;
+}
+
+static void ep0_write_fifo(struct mtu3_ep *mep, const u8 *src, u16 len)
+{
+ void __iomem *fifo = mep->mtu->mac_base + U3D_FIFO0;
+ u16 index = 0;
+
+ dev_dbg(mep->mtu->dev, "%s: ep%din, len=%d, buf=%p\n",
+ __func__, mep->epnum, len, src);
+
+ if (len >= 4) {
+ iowrite32_rep(fifo, src, len >> 2);
+ index = len & ~0x03;
+ }
+ if (len & 0x02) {
+ writew(*(u16 *)&src[index], fifo);
+ index += 2;
+ }
+ if (len & 0x01)
+ writeb(src[index], fifo);
+}
+
+static void ep0_read_fifo(struct mtu3_ep *mep, u8 *dst, u16 len)
+{
+ void __iomem *fifo = mep->mtu->mac_base + U3D_FIFO0;
+ u32 value;
+ u16 index = 0;
+
+ dev_dbg(mep->mtu->dev, "%s: ep%dout len=%d buf=%p\n",
+ __func__, mep->epnum, len, dst);
+
+ if (len >= 4) {
+ ioread32_rep(fifo, dst, len >> 2);
+ index = len & ~0x03;
+ }
+ if (len & 0x3) {
+ value = readl(fifo);
+ memcpy(&dst[index], &value, len & 0x3);
+ }
+
+}
+
+static void ep0_load_test_packet(struct mtu3 *mtu)
+{
+ /*
+ * because the length of test packet is less than max packet of HS ep0,
+ * write it into fifo directly.
+ */
+ ep0_write_fifo(mtu->ep0, mtu3_test_packet, sizeof(mtu3_test_packet));
+}
+
+/*
+ * A. send STALL for setup transfer without data stage:
+ * set SENDSTALL and SETUPPKTRDY at the same time;
+ * B. send STALL for other cases:
+ * set SENDSTALL only.
+ */
+static void ep0_stall_set(struct mtu3_ep *mep0, bool set, u32 pktrdy)
+{
+ struct mtu3 *mtu = mep0->mtu;
+ void __iomem *mbase = mtu->mac_base;
+ u32 csr;
+
+ /* EP0_SENTSTALL is W1C */
+ csr = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
+ if (set)
+ csr |= EP0_SENDSTALL | pktrdy;
+ else
+ csr = (csr & ~EP0_SENDSTALL) | EP0_SENTSTALL;
+ mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr);
+
+ mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+
+ dev_dbg(mtu->dev, "ep0: %s STALL, ep0_state: %s\n",
+ set ? "SEND" : "CLEAR", decode_ep0_state(mtu));
+}
+
+static int ep0_queue(struct mtu3_ep *mep0, struct mtu3_request *mreq);
+
+static void ep0_dummy_complete(struct usb_ep *ep, struct usb_request *req)
+{}
+
+static void ep0_set_sel_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtu3_request *mreq;
+ struct mtu3 *mtu;
+ struct usb_set_sel_req sel;
+
+ memcpy(&sel, req->buf, sizeof(sel));
+
+ mreq = to_mtu3_request(req);
+ mtu = mreq->mtu;
+ dev_dbg(mtu->dev, "u1sel:%d, u1pel:%d, u2sel:%d, u2pel:%d\n",
+ sel.u1_sel, sel.u1_pel, sel.u2_sel, sel.u2_pel);
+}
+
+/* queue data stage to handle 6 byte SET_SEL request */
+static int ep0_set_sel(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
+{
+ int ret;
+ u16 length = le16_to_cpu(setup->wLength);
+
+ if (unlikely(length != 6)) {
+ dev_err(mtu->dev, "%s wrong wLength:%d\n",
+ __func__, length);
+ return -EINVAL;
+ }
+
+ mtu->ep0_req.mep = mtu->ep0;
+ mtu->ep0_req.request.length = 6;
+ mtu->ep0_req.request.buf = mtu->setup_buf;
+ mtu->ep0_req.request.complete = ep0_set_sel_complete;
+ ret = ep0_queue(mtu->ep0, &mtu->ep0_req);
+
+ return ret < 0 ? ret : 1;
+}
+
+static int
+ep0_get_status(struct mtu3 *mtu, const struct usb_ctrlrequest *setup)
+{
+ struct mtu3_ep *mep = NULL;
+ int handled = 1;
+ u8 result[2] = {0, 0};
+ u8 epnum = 0;
+ int is_in;
+
+ switch (setup->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ result[0] = mtu->is_self_powered << USB_DEVICE_SELF_POWERED;
+ result[0] |= mtu->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+ /* superspeed only */
+ if (mtu->g.speed == USB_SPEED_SUPER) {
+ result[0] |= mtu->u1_enable << USB_DEV_STAT_U1_ENABLED;
+ result[0] |= mtu->u2_enable << USB_DEV_STAT_U2_ENABLED;
+ }
+
+ dev_dbg(mtu->dev, "%s result=%x, U1=%x, U2=%x\n", __func__,
+ result[0], mtu->u1_enable, mtu->u2_enable);
+
+ break;
+ case USB_RECIP_INTERFACE:
+ break;
+ case USB_RECIP_ENDPOINT:
+ epnum = (u8) le16_to_cpu(setup->wIndex);
+ is_in = epnum & USB_DIR_IN;
+ epnum &= USB_ENDPOINT_NUMBER_MASK;
+
+ if (epnum >= mtu->num_eps) {
+ handled = -EINVAL;
+ break;
+ }
+ if (!epnum)
+ break;
+
+ mep = (is_in ? mtu->in_eps : mtu->out_eps) + epnum;
+ if (!mep->desc) {
+ handled = -EINVAL;
+ break;
+ }
+ if (mep->flags & MTU3_EP_STALL)
+ result[0] |= 1 << USB_ENDPOINT_HALT;
+
+ break;
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+
+ if (handled > 0) {
+ int ret;
+
+ /* prepare a data stage for GET_STATUS */
+ dev_dbg(mtu->dev, "get_status=%x\n", *(u16 *)result);
+ memcpy(mtu->setup_buf, result, sizeof(result));
+ mtu->ep0_req.mep = mtu->ep0;
+ mtu->ep0_req.request.length = 2;
+ mtu->ep0_req.request.buf = &mtu->setup_buf;
+ mtu->ep0_req.request.complete = ep0_dummy_complete;
+ ret = ep0_queue(mtu->ep0, &mtu->ep0_req);
+ if (ret < 0)
+ handled = ret;
+ }
+ return handled;
+}
+
+static int handle_test_mode(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
+{
+ void __iomem *mbase = mtu->mac_base;
+ int handled = 1;
+
+ switch (le16_to_cpu(setup->wIndex) >> 8) {
+ case TEST_J:
+ dev_dbg(mtu->dev, "TEST_J\n");
+ mtu->test_mode_nr = TEST_J_MODE;
+ break;
+ case TEST_K:
+ dev_dbg(mtu->dev, "TEST_K\n");
+ mtu->test_mode_nr = TEST_K_MODE;
+ break;
+ case TEST_SE0_NAK:
+ dev_dbg(mtu->dev, "TEST_SE0_NAK\n");
+ mtu->test_mode_nr = TEST_SE0_NAK_MODE;
+ break;
+ case TEST_PACKET:
+ dev_dbg(mtu->dev, "TEST_PACKET\n");
+ mtu->test_mode_nr = TEST_PACKET_MODE;
+ break;
+ default:
+ handled = -EINVAL;
+ goto out;
+ }
+
+ mtu->test_mode = true;
+
+ /* no TX completion interrupt, and need restart platform after test */
+ if (mtu->test_mode_nr == TEST_PACKET_MODE)
+ ep0_load_test_packet(mtu);
+
+ mtu3_writel(mbase, U3D_USB2_TEST_MODE, mtu->test_mode_nr);
+
+ mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+
+out:
+ return handled;
+}
+
+static int ep0_handle_feature_dev(struct mtu3 *mtu,
+ struct usb_ctrlrequest *setup, bool set)
+{
+ void __iomem *mbase = mtu->mac_base;
+ int handled = -EINVAL;
+ u32 lpc;
+
+ switch (le16_to_cpu(setup->wValue)) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ mtu->may_wakeup = !!set;
+ handled = 1;
+ break;
+ case USB_DEVICE_TEST_MODE:
+ if (!set || (mtu->g.speed != USB_SPEED_HIGH) ||
+ (le16_to_cpu(setup->wIndex) & 0xff))
+ break;
+
+ handled = handle_test_mode(mtu, setup);
+ break;
+ case USB_DEVICE_U1_ENABLE:
+ if (mtu->g.speed != USB_SPEED_SUPER ||
+ mtu->g.state != USB_STATE_CONFIGURED)
+ break;
+
+ lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
+ if (set)
+ lpc |= SW_U1_ACCEPT_ENABLE;
+ else
+ lpc &= ~SW_U1_ACCEPT_ENABLE;
+ mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc);
+
+ mtu->u1_enable = !!set;
+ handled = 1;
+ break;
+ case USB_DEVICE_U2_ENABLE:
+ if (mtu->g.speed != USB_SPEED_SUPER ||
+ mtu->g.state != USB_STATE_CONFIGURED)
+ break;
+
+ lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
+ if (set)
+ lpc |= SW_U2_ACCEPT_ENABLE;
+ else
+ lpc &= ~SW_U2_ACCEPT_ENABLE;
+ mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc);
+
+ mtu->u2_enable = !!set;
+ handled = 1;
+ break;
+ default:
+ handled = -EINVAL;
+ break;
+ }
+ return handled;
+}
+
+static int ep0_handle_feature(struct mtu3 *mtu,
+ struct usb_ctrlrequest *setup, bool set)
+{
+ struct mtu3_ep *mep;
+ int handled = -EINVAL;
+ int is_in;
+ u16 value;
+ u16 index;
+ u8 epnum;
+
+ value = le16_to_cpu(setup->wValue);
+ index = le16_to_cpu(setup->wIndex);
+
+ switch (setup->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ handled = ep0_handle_feature_dev(mtu, setup, set);
+ break;
+ case USB_RECIP_INTERFACE:
+ /* superspeed only */
+ if ((value == USB_INTRF_FUNC_SUSPEND)
+ && (mtu->g.speed == USB_SPEED_SUPER)) {
+ /*
+ * forward the request because function drivers
+ * should handle it
+ */
+ handled = 0;
+ }
+ break;
+ case USB_RECIP_ENDPOINT:
+ epnum = index & USB_ENDPOINT_NUMBER_MASK;
+ if (epnum == 0 || epnum >= mtu->num_eps ||
+ value != USB_ENDPOINT_HALT)
+ break;
+
+ is_in = index & USB_DIR_IN;
+ mep = (is_in ? mtu->in_eps : mtu->out_eps) + epnum;
+ if (!mep->desc)
+ break;
+
+ handled = 1;
+ /* ignore request if endpoint is wedged */
+ if (mep->wedged)
+ break;
+
+ mtu3_ep_stall_set(mep, set);
+ break;
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+ return handled;
+}
+
+/*
+ * handle all control requests can be handled
+ * returns:
+ * negative errno - error happened
+ * zero - need delegate SETUP to gadget driver
+ * positive - already handled
+ */
+static int handle_standard_request(struct mtu3 *mtu,
+ struct usb_ctrlrequest *setup)
+{
+ void __iomem *mbase = mtu->mac_base;
+ enum usb_device_state state = mtu->g.state;
+ int handled = -EINVAL;
+ u32 dev_conf;
+ u16 value;
+
+ value = le16_to_cpu(setup->wValue);
+
+ /* the gadget driver handles everything except what we must handle */
+ switch (setup->bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ /* change it after the status stage */
+ mtu->address = (u8) (value & 0x7f);
+ dev_dbg(mtu->dev, "set address to 0x%x\n", mtu->address);
+
+ dev_conf = mtu3_readl(mbase, U3D_DEVICE_CONF);
+ dev_conf &= ~DEV_ADDR_MSK;
+ dev_conf |= DEV_ADDR(mtu->address);
+ mtu3_writel(mbase, U3D_DEVICE_CONF, dev_conf);
+
+ if (mtu->address)
+ usb_gadget_set_state(&mtu->g, USB_STATE_ADDRESS);
+ else
+ usb_gadget_set_state(&mtu->g, USB_STATE_DEFAULT);
+
+ handled = 1;
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ if (state == USB_STATE_ADDRESS) {
+ usb_gadget_set_state(&mtu->g,
+ USB_STATE_CONFIGURED);
+ } else if (state == USB_STATE_CONFIGURED) {
+ /*
+ * USB2 spec sec 9.4.7, if wValue is 0 then dev
+ * is moved to addressed state
+ */
+ if (!value)
+ usb_gadget_set_state(&mtu->g,
+ USB_STATE_ADDRESS);
+ }
+ handled = 0;
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ handled = ep0_handle_feature(mtu, setup, 0);
+ break;
+ case USB_REQ_SET_FEATURE:
+ handled = ep0_handle_feature(mtu, setup, 1);
+ break;
+ case USB_REQ_GET_STATUS:
+ handled = ep0_get_status(mtu, setup);
+ break;
+ case USB_REQ_SET_SEL:
+ handled = ep0_set_sel(mtu, setup);
+ break;
+ case USB_REQ_SET_ISOCH_DELAY:
+ handled = 1;
+ break;
+ default:
+ /* delegate SET_CONFIGURATION, etc */
+ handled = 0;
+ }
+
+ return handled;
+}
+
+/* receive an data packet (OUT) */
+static void ep0_rx_state(struct mtu3 *mtu)
+{
+ struct mtu3_request *mreq;
+ struct usb_request *req;
+ void __iomem *mbase = mtu->mac_base;
+ u32 maxp;
+ u32 csr;
+ u16 count = 0;
+
+ dev_dbg(mtu->dev, "%s\n", __func__);
+
+ csr = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
+ mreq = next_ep0_request(mtu);
+ req = &mreq->request;
+
+ /* read packet and ack; or stall because of gadget driver bug */
+ if (req) {
+ void *buf = req->buf + req->actual;
+ unsigned int len = req->length - req->actual;
+
+ /* read the buffer */
+ count = mtu3_readl(mbase, U3D_RXCOUNT0);
+ if (count > len) {
+ req->status = -EOVERFLOW;
+ count = len;
+ }
+ ep0_read_fifo(mtu->ep0, buf, count);
+ req->actual += count;
+ csr |= EP0_RXPKTRDY;
+
+ maxp = mtu->g.ep0->maxpacket;
+ if (count < maxp || req->actual == req->length) {
+ mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+ dev_dbg(mtu->dev, "ep0 state: %s\n",
+ decode_ep0_state(mtu));
+
+ csr |= EP0_DATAEND;
+ } else {
+ req = NULL;
+ }
+ } else {
+ csr |= EP0_RXPKTRDY | EP0_SENDSTALL;
+ dev_dbg(mtu->dev, "%s: SENDSTALL\n", __func__);
+ }
+
+ mtu3_writel(mbase, U3D_EP0CSR, csr);
+
+ /* give back the request if have received all data */
+ if (req)
+ ep0_req_giveback(mtu, req);
+
+}
+
+/* transmitting to the host (IN) */
+static void ep0_tx_state(struct mtu3 *mtu)
+{
+ struct mtu3_request *mreq = next_ep0_request(mtu);
+ struct usb_request *req;
+ u32 csr;
+ u8 *src;
+ u8 count;
+ u32 maxp;
+
+ dev_dbg(mtu->dev, "%s\n", __func__);
+
+ if (!mreq)
+ return;
+
+ maxp = mtu->g.ep0->maxpacket;
+ req = &mreq->request;
+
+ /* load the data */
+ src = (u8 *)req->buf + req->actual;
+ count = min(maxp, req->length - req->actual);
+ if (count)
+ ep0_write_fifo(mtu->ep0, src, count);
+
+ dev_dbg(mtu->dev, "%s act=%d, len=%d, cnt=%d, maxp=%d zero=%d\n",
+ __func__, req->actual, req->length, count, maxp, req->zero);
+
+ req->actual += count;
+
+ if ((count < maxp)
+ || ((req->actual == req->length) && !req->zero))
+ mtu->ep0_state = MU3D_EP0_STATE_TX_END;
+
+ /* send it out, triggering a "txpktrdy cleared" irq */
+ csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS;
+ mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr | EP0_TXPKTRDY);
+
+ dev_dbg(mtu->dev, "%s ep0csr=0x%x\n", __func__,
+ mtu3_readl(mtu->mac_base, U3D_EP0CSR));
+}
+
+static void ep0_read_setup(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
+{
+ struct mtu3_request *mreq;
+ u32 count;
+ u32 csr;
+
+ csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS;
+ count = mtu3_readl(mtu->mac_base, U3D_RXCOUNT0);
+
+ ep0_read_fifo(mtu->ep0, (u8 *)setup, count);
+
+ dev_dbg(mtu->dev, "SETUP req%02x.%02x v%04x i%04x l%04x\n",
+ setup->bRequestType, setup->bRequest,
+ le16_to_cpu(setup->wValue), le16_to_cpu(setup->wIndex),
+ le16_to_cpu(setup->wLength));
+
+ /* clean up any leftover transfers */
+ mreq = next_ep0_request(mtu);
+ if (mreq)
+ ep0_req_giveback(mtu, &mreq->request);
+
+ if (le16_to_cpu(setup->wLength) == 0) {
+ ; /* no data stage, nothing to do */
+ } else if (setup->bRequestType & USB_DIR_IN) {
+ mtu3_writel(mtu->mac_base, U3D_EP0CSR,
+ csr | EP0_SETUPPKTRDY | EP0_DPHTX);
+ mtu->ep0_state = MU3D_EP0_STATE_TX;
+ } else {
+ mtu3_writel(mtu->mac_base, U3D_EP0CSR,
+ (csr | EP0_SETUPPKTRDY) & (~EP0_DPHTX));
+ mtu->ep0_state = MU3D_EP0_STATE_RX;
+ }
+}
+
+static int ep0_handle_setup(struct mtu3 *mtu)
+__releases(mtu->lock)
+__acquires(mtu->lock)
+{
+ struct usb_ctrlrequest setup;
+ struct mtu3_request *mreq;
+ void __iomem *mbase = mtu->mac_base;
+ int handled = 0;
+
+ ep0_read_setup(mtu, &setup);
+
+ if ((setup.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+ handled = handle_standard_request(mtu, &setup);
+
+ dev_dbg(mtu->dev, "handled %d, ep0_state: %s\n",
+ handled, decode_ep0_state(mtu));
+
+ if (handled < 0)
+ goto stall;
+ else if (handled > 0)
+ goto finish;
+
+ handled = forward_to_driver(mtu, &setup);
+ if (handled < 0) {
+stall:
+ dev_dbg(mtu->dev, "%s stall (%d)\n", __func__, handled);
+
+ ep0_stall_set(mtu->ep0, true,
+ le16_to_cpu(setup.wLength) ? 0 : EP0_SETUPPKTRDY);
+
+ return 0;
+ }
+
+finish:
+ if (mtu->test_mode) {
+ ; /* nothing to do */
+ } else if (le16_to_cpu(setup.wLength) == 0) { /* no data stage */
+
+ mtu3_writel(mbase, U3D_EP0CSR,
+ (mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS)
+ | EP0_SETUPPKTRDY | EP0_DATAEND);
+
+ /* complete zlp request directly */
+ mreq = next_ep0_request(mtu);
+ if (mreq && !mreq->request.length)
+ ep0_req_giveback(mtu, &mreq->request);
+ }
+
+ return 0;
+}
+
+irqreturn_t mtu3_ep0_isr(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ struct mtu3_request *mreq;
+ u32 int_status;
+ irqreturn_t ret = IRQ_NONE;
+ u32 csr;
+ u32 len;
+
+ int_status = mtu3_readl(mbase, U3D_EPISR);
+ int_status &= mtu3_readl(mbase, U3D_EPIER);
+ mtu3_writel(mbase, U3D_EPISR, int_status); /* W1C */
+
+ /* only handle ep0's */
+ if (!(int_status & EP0ISR))
+ return IRQ_NONE;
+
+ csr = mtu3_readl(mbase, U3D_EP0CSR);
+
+ dev_dbg(mtu->dev, "%s csr=0x%x\n", __func__, csr);
+
+ /* we sent a stall.. need to clear it now.. */
+ if (csr & EP0_SENTSTALL) {
+ ep0_stall_set(mtu->ep0, false, 0);
+ csr = mtu3_readl(mbase, U3D_EP0CSR);
+ ret = IRQ_HANDLED;
+ }
+ dev_dbg(mtu->dev, "ep0_state: %s\n", decode_ep0_state(mtu));
+
+ switch (mtu->ep0_state) {
+ case MU3D_EP0_STATE_TX:
+ /* irq on clearing txpktrdy */
+ if ((csr & EP0_FIFOFULL) == 0) {
+ ep0_tx_state(mtu);
+ ret = IRQ_HANDLED;
+ }
+ break;
+ case MU3D_EP0_STATE_RX:
+ /* irq on set rxpktrdy */
+ if (csr & EP0_RXPKTRDY) {
+ ep0_rx_state(mtu);
+ ret = IRQ_HANDLED;
+ }
+ break;
+ case MU3D_EP0_STATE_TX_END:
+ mtu3_writel(mbase, U3D_EP0CSR,
+ (csr & EP0_W1C_BITS) | EP0_DATAEND);
+
+ mreq = next_ep0_request(mtu);
+ if (mreq)
+ ep0_req_giveback(mtu, &mreq->request);
+
+ mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+ ret = IRQ_HANDLED;
+ dev_dbg(mtu->dev, "ep0_state: %s\n", decode_ep0_state(mtu));
+ break;
+ case MU3D_EP0_STATE_SETUP:
+ if (!(csr & EP0_SETUPPKTRDY))
+ break;
+
+ len = mtu3_readl(mbase, U3D_RXCOUNT0);
+ if (len != 8) {
+ dev_err(mtu->dev, "SETUP packet len %d != 8 ?\n", len);
+ break;
+ }
+
+ ep0_handle_setup(mtu);
+ ret = IRQ_HANDLED;
+ break;
+ default:
+ /* can't happen */
+ ep0_stall_set(mtu->ep0, true, 0);
+ WARN_ON(1);
+ break;
+ }
+
+ return ret;
+}
+
+
+static int mtu3_ep0_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ /* always enabled */
+ return -EINVAL;
+}
+
+static int mtu3_ep0_disable(struct usb_ep *ep)
+{
+ /* always enabled */
+ return -EINVAL;
+}
+
+static int ep0_queue(struct mtu3_ep *mep, struct mtu3_request *mreq)
+{
+ struct mtu3 *mtu = mep->mtu;
+
+ mreq->mtu = mtu;
+ mreq->request.actual = 0;
+ mreq->request.status = -EINPROGRESS;
+
+ dev_dbg(mtu->dev, "%s %s (ep0_state: %s), len#%d\n", __func__,
+ mep->name, decode_ep0_state(mtu), mreq->request.length);
+
+ if (!list_empty(&mep->req_list))
+ return -EBUSY;
+
+ switch (mtu->ep0_state) {
+ case MU3D_EP0_STATE_SETUP:
+ case MU3D_EP0_STATE_RX: /* control-OUT data */
+ case MU3D_EP0_STATE_TX: /* control-IN data */
+ break;
+ default:
+ dev_err(mtu->dev, "%s, error in ep0 state %s\n", __func__,
+ decode_ep0_state(mtu));
+ return -EINVAL;
+ }
+
+ list_add_tail(&mreq->list, &mep->req_list);
+
+ /* sequence #1, IN ... start writing the data */
+ if (mtu->ep0_state == MU3D_EP0_STATE_TX)
+ ep0_tx_state(mtu);
+
+ return 0;
+}
+
+static int mtu3_ep0_queue(struct usb_ep *ep,
+ struct usb_request *req, gfp_t gfp)
+{
+ struct mtu3_ep *mep;
+ struct mtu3_request *mreq;
+ struct mtu3 *mtu;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!ep || !req)
+ return -EINVAL;
+
+ mep = to_mtu3_ep(ep);
+ mtu = mep->mtu;
+ mreq = to_mtu3_request(req);
+
+ spin_lock_irqsave(&mtu->lock, flags);
+ ret = ep0_queue(mep, mreq);
+ spin_unlock_irqrestore(&mtu->lock, flags);
+ return ret;
+}
+
+static int mtu3_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ /* we just won't support this */
+ return -EINVAL;
+}
+
+static int mtu3_ep0_halt(struct usb_ep *ep, int value)
+{
+ struct mtu3_ep *mep;
+ struct mtu3 *mtu;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!ep || !value)
+ return -EINVAL;
+
+ mep = to_mtu3_ep(ep);
+ mtu = mep->mtu;
+
+ dev_dbg(mtu->dev, "%s\n", __func__);
+
+ spin_lock_irqsave(&mtu->lock, flags);
+
+ if (!list_empty(&mep->req_list)) {
+ ret = -EBUSY;
+ goto cleanup;
+ }
+
+ switch (mtu->ep0_state) {
+ /*
+ * stalls are usually issued after parsing SETUP packet, either
+ * directly in irq context from setup() or else later.
+ */
+ case MU3D_EP0_STATE_TX:
+ case MU3D_EP0_STATE_TX_END:
+ case MU3D_EP0_STATE_RX:
+ case MU3D_EP0_STATE_SETUP:
+ ep0_stall_set(mtu->ep0, true, 0);
+ break;
+ default:
+ dev_dbg(mtu->dev, "ep0 can't halt in state %s\n",
+ decode_ep0_state(mtu));
+ ret = -EINVAL;
+ }
+
+cleanup:
+ spin_unlock_irqrestore(&mtu->lock, flags);
+ return ret;
+}
+
+const struct usb_ep_ops mtu3_ep0_ops = {
+ .enable = mtu3_ep0_enable,
+ .disable = mtu3_ep0_disable,
+ .alloc_request = mtu3_alloc_request,
+ .free_request = mtu3_free_request,
+ .queue = mtu3_ep0_queue,
+ .dequeue = mtu3_ep0_dequeue,
+ .set_halt = mtu3_ep0_halt,
+};
diff --git a/drivers/usb/mtu3/mtu3_host.c b/drivers/usb/mtu3/mtu3_host.c
new file mode 100644
index 000000000000..cd4d01087855
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_host.c
@@ -0,0 +1,294 @@
+/*
+ * mtu3_dr.c - dual role switch and host glue layer
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include "mtu3.h"
+#include "mtu3_dr.h"
+
+#define PERI_WK_CTRL1 0x404
+#define UWK_CTL1_IS_C(x) (((x) & 0xf) << 26)
+#define UWK_CTL1_IS_E BIT(25)
+#define UWK_CTL1_IDDIG_C(x) (((x) & 0xf) << 11) /* cycle debounce */
+#define UWK_CTL1_IDDIG_E BIT(10) /* enable debounce */
+#define UWK_CTL1_IDDIG_P BIT(9) /* polarity */
+#define UWK_CTL1_IS_P BIT(6) /* polarity for ip sleep */
+
+/*
+ * ip-sleep wakeup mode:
+ * all clocks can be turn off, but power domain should be kept on
+ */
+static void ssusb_wakeup_ip_sleep_en(struct ssusb_mtk *ssusb)
+{
+ u32 tmp;
+ struct regmap *pericfg = ssusb->pericfg;
+
+ regmap_read(pericfg, PERI_WK_CTRL1, &tmp);
+ tmp &= ~UWK_CTL1_IS_P;
+ tmp &= ~(UWK_CTL1_IS_C(0xf));
+ tmp |= UWK_CTL1_IS_C(0x8);
+ regmap_write(pericfg, PERI_WK_CTRL1, tmp);
+ regmap_write(pericfg, PERI_WK_CTRL1, tmp | UWK_CTL1_IS_E);
+
+ regmap_read(pericfg, PERI_WK_CTRL1, &tmp);
+ dev_dbg(ssusb->dev, "%s(): WK_CTRL1[P6,E25,C26:29]=%#x\n",
+ __func__, tmp);
+}
+
+static void ssusb_wakeup_ip_sleep_dis(struct ssusb_mtk *ssusb)
+{
+ u32 tmp;
+
+ regmap_read(ssusb->pericfg, PERI_WK_CTRL1, &tmp);
+ tmp &= ~UWK_CTL1_IS_E;
+ regmap_write(ssusb->pericfg, PERI_WK_CTRL1, tmp);
+}
+
+int ssusb_wakeup_of_property_parse(struct ssusb_mtk *ssusb,
+ struct device_node *dn)
+{
+ struct device *dev = ssusb->dev;
+
+ /*
+ * Wakeup function is optional, so it is not an error if this property
+ * does not exist, and in such case, no need to get relative
+ * properties anymore.
+ */
+ ssusb->wakeup_en = of_property_read_bool(dn, "mediatek,enable-wakeup");
+ if (!ssusb->wakeup_en)
+ return 0;
+
+ ssusb->wk_deb_p0 = devm_clk_get(dev, "wakeup_deb_p0");
+ if (IS_ERR(ssusb->wk_deb_p0)) {
+ dev_err(dev, "fail to get wakeup_deb_p0\n");
+ return PTR_ERR(ssusb->wk_deb_p0);
+ }
+
+ if (of_property_read_bool(dn, "wakeup_deb_p1")) {
+ ssusb->wk_deb_p1 = devm_clk_get(dev, "wakeup_deb_p1");
+ if (IS_ERR(ssusb->wk_deb_p1)) {
+ dev_err(dev, "fail to get wakeup_deb_p1\n");
+ return PTR_ERR(ssusb->wk_deb_p1);
+ }
+ }
+
+ ssusb->pericfg = syscon_regmap_lookup_by_phandle(dn,
+ "mediatek,syscon-wakeup");
+ if (IS_ERR(ssusb->pericfg)) {
+ dev_err(dev, "fail to get pericfg regs\n");
+ return PTR_ERR(ssusb->pericfg);
+ }
+
+ return 0;
+}
+
+static int ssusb_wakeup_clks_enable(struct ssusb_mtk *ssusb)
+{
+ int ret;
+
+ ret = clk_prepare_enable(ssusb->wk_deb_p0);
+ if (ret) {
+ dev_err(ssusb->dev, "failed to enable wk_deb_p0\n");
+ goto usb_p0_err;
+ }
+
+ ret = clk_prepare_enable(ssusb->wk_deb_p1);
+ if (ret) {
+ dev_err(ssusb->dev, "failed to enable wk_deb_p1\n");
+ goto usb_p1_err;
+ }
+
+ return 0;
+
+usb_p1_err:
+ clk_disable_unprepare(ssusb->wk_deb_p0);
+usb_p0_err:
+ return -EINVAL;
+}
+
+static void ssusb_wakeup_clks_disable(struct ssusb_mtk *ssusb)
+{
+ clk_disable_unprepare(ssusb->wk_deb_p1);
+ clk_disable_unprepare(ssusb->wk_deb_p0);
+}
+
+static void host_ports_num_get(struct ssusb_mtk *ssusb)
+{
+ u32 xhci_cap;
+
+ xhci_cap = mtu3_readl(ssusb->ippc_base, U3D_SSUSB_IP_XHCI_CAP);
+ ssusb->u2_ports = SSUSB_IP_XHCI_U2_PORT_NUM(xhci_cap);
+ ssusb->u3_ports = SSUSB_IP_XHCI_U3_PORT_NUM(xhci_cap);
+
+ dev_dbg(ssusb->dev, "host - u2_ports:%d, u3_ports:%d\n",
+ ssusb->u2_ports, ssusb->u3_ports);
+}
+
+/* only configure ports will be used later */
+int ssusb_host_enable(struct ssusb_mtk *ssusb)
+{
+ void __iomem *ibase = ssusb->ippc_base;
+ int num_u3p = ssusb->u3_ports;
+ int num_u2p = ssusb->u2_ports;
+ u32 check_clk;
+ u32 value;
+ int i;
+
+ /* power on host ip */
+ mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL1, SSUSB_IP_HOST_PDN);
+
+ /* power on and enable all u3 ports */
+ for (i = 0; i < num_u3p; i++) {
+ value = mtu3_readl(ibase, SSUSB_U3_CTRL(i));
+ value &= ~(SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS);
+ value |= SSUSB_U3_PORT_HOST_SEL;
+ mtu3_writel(ibase, SSUSB_U3_CTRL(i), value);
+ }
+
+ /* power on and enable all u2 ports */
+ for (i = 0; i < num_u2p; i++) {
+ value = mtu3_readl(ibase, SSUSB_U2_CTRL(i));
+ value &= ~(SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS);
+ value |= SSUSB_U2_PORT_HOST_SEL;
+ mtu3_writel(ibase, SSUSB_U2_CTRL(i), value);
+ }
+
+ check_clk = SSUSB_XHCI_RST_B_STS;
+ if (num_u3p)
+ check_clk = SSUSB_U3_MAC_RST_B_STS;
+
+ return ssusb_check_clocks(ssusb, check_clk);
+}
+
+int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend)
+{
+ void __iomem *ibase = ssusb->ippc_base;
+ int num_u3p = ssusb->u3_ports;
+ int num_u2p = ssusb->u2_ports;
+ u32 value;
+ int ret;
+ int i;
+
+ /* power down and disable all u3 ports */
+ for (i = 0; i < num_u3p; i++) {
+ value = mtu3_readl(ibase, SSUSB_U3_CTRL(i));
+ value |= SSUSB_U3_PORT_PDN;
+ value |= suspend ? 0 : SSUSB_U3_PORT_DIS;
+ mtu3_writel(ibase, SSUSB_U3_CTRL(i), value);
+ }
+
+ /* power down and disable all u2 ports */
+ for (i = 0; i < num_u2p; i++) {
+ value = mtu3_readl(ibase, SSUSB_U2_CTRL(i));
+ value |= SSUSB_U2_PORT_PDN;
+ value |= suspend ? 0 : SSUSB_U2_PORT_DIS;
+ mtu3_writel(ibase, SSUSB_U2_CTRL(i), value);
+ }
+
+ /* power down host ip */
+ mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL1, SSUSB_IP_HOST_PDN);
+
+ if (!suspend)
+ return 0;
+
+ /* wait for host ip to sleep */
+ ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS1, value,
+ (value & SSUSB_IP_SLEEP_STS), 100, 100000);
+ if (ret)
+ dev_err(ssusb->dev, "ip sleep failed!!!\n");
+
+ return ret;
+}
+
+static void ssusb_host_setup(struct ssusb_mtk *ssusb)
+{
+ host_ports_num_get(ssusb);
+
+ /*
+ * power on host and power on/enable all ports
+ * if support OTG, gadget driver will switch port0 to device mode
+ */
+ ssusb_host_enable(ssusb);
+
+ /* if port0 supports dual-role, works as host mode by default */
+ ssusb_set_vbus(&ssusb->otg_switch, 1);
+}
+
+static void ssusb_host_cleanup(struct ssusb_mtk *ssusb)
+{
+ if (ssusb->is_host)
+ ssusb_set_vbus(&ssusb->otg_switch, 0);
+
+ ssusb_host_disable(ssusb, false);
+}
+
+/*
+ * If host supports multiple ports, the VBUSes(5V) of ports except port0
+ * which supports OTG are better to be enabled by default in DTS.
+ * Because the host driver will keep link with devices attached when system
+ * enters suspend mode, so no need to control VBUSes after initialization.
+ */
+int ssusb_host_init(struct ssusb_mtk *ssusb, struct device_node *parent_dn)
+{
+ struct device *parent_dev = ssusb->dev;
+ int ret;
+
+ ssusb_host_setup(ssusb);
+
+ ret = of_platform_populate(parent_dn, NULL, NULL, parent_dev);
+ if (ret) {
+ dev_dbg(parent_dev, "failed to create child devices at %s\n",
+ parent_dn->full_name);
+ return ret;
+ }
+
+ dev_info(parent_dev, "xHCI platform device register success...\n");
+
+ return 0;
+}
+
+void ssusb_host_exit(struct ssusb_mtk *ssusb)
+{
+ of_platform_depopulate(ssusb->dev);
+ ssusb_host_cleanup(ssusb);
+}
+
+int ssusb_wakeup_enable(struct ssusb_mtk *ssusb)
+{
+ int ret = 0;
+
+ if (ssusb->wakeup_en) {
+ ret = ssusb_wakeup_clks_enable(ssusb);
+ ssusb_wakeup_ip_sleep_en(ssusb);
+ }
+ return ret;
+}
+
+void ssusb_wakeup_disable(struct ssusb_mtk *ssusb)
+{
+ if (ssusb->wakeup_en) {
+ ssusb_wakeup_ip_sleep_dis(ssusb);
+ ssusb_wakeup_clks_disable(ssusb);
+ }
+}
diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h
new file mode 100644
index 000000000000..212367295276
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_hw_regs.h
@@ -0,0 +1,473 @@
+/*
+ * mtu3_hw_regs.h - MediaTek USB3 DRD register and field definitions
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SSUSB_HW_REGS_H_
+#define _SSUSB_HW_REGS_H_
+
+/* segment offset of MAC register */
+#define SSUSB_DEV_BASE 0x0000
+#define SSUSB_EPCTL_CSR_BASE 0x0800
+#define SSUSB_USB3_MAC_CSR_BASE 0x1400
+#define SSUSB_USB3_SYS_CSR_BASE 0x1400
+#define SSUSB_USB2_CSR_BASE 0x2400
+
+/* IPPC register in Infra */
+#define SSUSB_SIFSLV_IPPC_BASE 0x0000
+
+/* --------------- SSUSB_DEV REGISTER DEFINITION --------------- */
+
+#define U3D_LV1ISR (SSUSB_DEV_BASE + 0x0000)
+#define U3D_LV1IER (SSUSB_DEV_BASE + 0x0004)
+#define U3D_LV1IESR (SSUSB_DEV_BASE + 0x0008)
+#define U3D_LV1IECR (SSUSB_DEV_BASE + 0x000C)
+
+#define U3D_EPISR (SSUSB_DEV_BASE + 0x0080)
+#define U3D_EPIER (SSUSB_DEV_BASE + 0x0084)
+#define U3D_EPIESR (SSUSB_DEV_BASE + 0x0088)
+#define U3D_EPIECR (SSUSB_DEV_BASE + 0x008C)
+
+#define U3D_EP0CSR (SSUSB_DEV_BASE + 0x0100)
+#define U3D_RXCOUNT0 (SSUSB_DEV_BASE + 0x0108)
+#define U3D_RESERVED (SSUSB_DEV_BASE + 0x010C)
+#define U3D_TX1CSR0 (SSUSB_DEV_BASE + 0x0110)
+#define U3D_TX1CSR1 (SSUSB_DEV_BASE + 0x0114)
+#define U3D_TX1CSR2 (SSUSB_DEV_BASE + 0x0118)
+
+#define U3D_RX1CSR0 (SSUSB_DEV_BASE + 0x0210)
+#define U3D_RX1CSR1 (SSUSB_DEV_BASE + 0x0214)
+#define U3D_RX1CSR2 (SSUSB_DEV_BASE + 0x0218)
+
+#define U3D_FIFO0 (SSUSB_DEV_BASE + 0x0300)
+
+#define U3D_QCR0 (SSUSB_DEV_BASE + 0x0400)
+#define U3D_QCR1 (SSUSB_DEV_BASE + 0x0404)
+#define U3D_QCR2 (SSUSB_DEV_BASE + 0x0408)
+#define U3D_QCR3 (SSUSB_DEV_BASE + 0x040C)
+
+#define U3D_TXQCSR1 (SSUSB_DEV_BASE + 0x0510)
+#define U3D_TXQSAR1 (SSUSB_DEV_BASE + 0x0514)
+#define U3D_TXQCPR1 (SSUSB_DEV_BASE + 0x0518)
+
+#define U3D_RXQCSR1 (SSUSB_DEV_BASE + 0x0610)
+#define U3D_RXQSAR1 (SSUSB_DEV_BASE + 0x0614)
+#define U3D_RXQCPR1 (SSUSB_DEV_BASE + 0x0618)
+#define U3D_RXQLDPR1 (SSUSB_DEV_BASE + 0x061C)
+
+#define U3D_QISAR0 (SSUSB_DEV_BASE + 0x0700)
+#define U3D_QIER0 (SSUSB_DEV_BASE + 0x0704)
+#define U3D_QIESR0 (SSUSB_DEV_BASE + 0x0708)
+#define U3D_QIECR0 (SSUSB_DEV_BASE + 0x070C)
+#define U3D_QISAR1 (SSUSB_DEV_BASE + 0x0710)
+#define U3D_QIER1 (SSUSB_DEV_BASE + 0x0714)
+#define U3D_QIESR1 (SSUSB_DEV_BASE + 0x0718)
+#define U3D_QIECR1 (SSUSB_DEV_BASE + 0x071C)
+
+#define U3D_TQERRIR0 (SSUSB_DEV_BASE + 0x0780)
+#define U3D_TQERRIER0 (SSUSB_DEV_BASE + 0x0784)
+#define U3D_TQERRIESR0 (SSUSB_DEV_BASE + 0x0788)
+#define U3D_TQERRIECR0 (SSUSB_DEV_BASE + 0x078C)
+#define U3D_RQERRIR0 (SSUSB_DEV_BASE + 0x07C0)
+#define U3D_RQERRIER0 (SSUSB_DEV_BASE + 0x07C4)
+#define U3D_RQERRIESR0 (SSUSB_DEV_BASE + 0x07C8)
+#define U3D_RQERRIECR0 (SSUSB_DEV_BASE + 0x07CC)
+#define U3D_RQERRIR1 (SSUSB_DEV_BASE + 0x07D0)
+#define U3D_RQERRIER1 (SSUSB_DEV_BASE + 0x07D4)
+#define U3D_RQERRIESR1 (SSUSB_DEV_BASE + 0x07D8)
+#define U3D_RQERRIECR1 (SSUSB_DEV_BASE + 0x07DC)
+
+#define U3D_CAP_EP0FFSZ (SSUSB_DEV_BASE + 0x0C04)
+#define U3D_CAP_EPNTXFFSZ (SSUSB_DEV_BASE + 0x0C08)
+#define U3D_CAP_EPNRXFFSZ (SSUSB_DEV_BASE + 0x0C0C)
+#define U3D_CAP_EPINFO (SSUSB_DEV_BASE + 0x0C10)
+#define U3D_MISC_CTRL (SSUSB_DEV_BASE + 0x0C84)
+
+/*---------------- SSUSB_DEV FIELD DEFINITION ---------------*/
+
+/* U3D_LV1ISR */
+#define EP_CTRL_INTR BIT(5)
+#define MAC2_INTR BIT(4)
+#define DMA_INTR BIT(3)
+#define MAC3_INTR BIT(2)
+#define QMU_INTR BIT(1)
+#define BMU_INTR BIT(0)
+
+/* U3D_LV1IECR */
+#define LV1IECR_MSK GENMASK(31, 0)
+
+/* U3D_EPISR */
+#define EPRISR(x) (BIT(16) << (x))
+#define EPTISR(x) (BIT(0) << (x))
+#define EP0ISR BIT(0)
+
+/* U3D_EP0CSR */
+#define EP0_SENDSTALL BIT(25)
+#define EP0_FIFOFULL BIT(23)
+#define EP0_SENTSTALL BIT(22)
+#define EP0_DPHTX BIT(20)
+#define EP0_DATAEND BIT(19)
+#define EP0_TXPKTRDY BIT(18)
+#define EP0_SETUPPKTRDY BIT(17)
+#define EP0_RXPKTRDY BIT(16)
+#define EP0_MAXPKTSZ_MSK GENMASK(9, 0)
+#define EP0_MAXPKTSZ(x) ((x) & EP0_MAXPKTSZ_MSK)
+#define EP0_W1C_BITS (~(EP0_RXPKTRDY | EP0_SETUPPKTRDY | EP0_SENTSTALL))
+
+/* U3D_TX1CSR0 */
+#define TX_DMAREQEN BIT(29)
+#define TX_FIFOFULL BIT(25)
+#define TX_FIFOEMPTY BIT(24)
+#define TX_SENTSTALL BIT(22)
+#define TX_SENDSTALL BIT(21)
+#define TX_TXPKTRDY BIT(16)
+#define TX_TXMAXPKTSZ_MSK GENMASK(10, 0)
+#define TX_TXMAXPKTSZ(x) ((x) & TX_TXMAXPKTSZ_MSK)
+#define TX_W1C_BITS (~(TX_SENTSTALL))
+
+/* U3D_TX1CSR1 */
+#define TX_MULT(x) (((x) & 0x3) << 22)
+#define TX_MAX_PKT(x) (((x) & 0x3f) << 16)
+#define TX_SLOT(x) (((x) & 0x3f) << 8)
+#define TX_TYPE(x) (((x) & 0x3) << 4)
+#define TX_SS_BURST(x) (((x) & 0xf) << 0)
+
+/* for TX_TYPE & RX_TYPE */
+#define TYPE_BULK (0x0)
+#define TYPE_INT (0x1)
+#define TYPE_ISO (0x2)
+#define TYPE_MASK (0x3)
+
+/* U3D_TX1CSR2 */
+#define TX_BINTERVAL(x) (((x) & 0xff) << 24)
+#define TX_FIFOSEGSIZE(x) (((x) & 0xf) << 16)
+#define TX_FIFOADDR(x) (((x) & 0x1fff) << 0)
+
+/* U3D_RX1CSR0 */
+#define RX_DMAREQEN BIT(29)
+#define RX_SENTSTALL BIT(22)
+#define RX_SENDSTALL BIT(21)
+#define RX_RXPKTRDY BIT(16)
+#define RX_RXMAXPKTSZ_MSK GENMASK(10, 0)
+#define RX_RXMAXPKTSZ(x) ((x) & RX_RXMAXPKTSZ_MSK)
+#define RX_W1C_BITS (~(RX_SENTSTALL | RX_RXPKTRDY))
+
+/* U3D_RX1CSR1 */
+#define RX_MULT(x) (((x) & 0x3) << 22)
+#define RX_MAX_PKT(x) (((x) & 0x3f) << 16)
+#define RX_SLOT(x) (((x) & 0x3f) << 8)
+#define RX_TYPE(x) (((x) & 0x3) << 4)
+#define RX_SS_BURST(x) (((x) & 0xf) << 0)
+
+/* U3D_RX1CSR2 */
+#define RX_BINTERVAL(x) (((x) & 0xff) << 24)
+#define RX_FIFOSEGSIZE(x) (((x) & 0xf) << 16)
+#define RX_FIFOADDR(x) (((x) & 0x1fff) << 0)
+
+/* U3D_QCR0 */
+#define QMU_RX_CS_EN(x) (BIT(16) << (x))
+#define QMU_TX_CS_EN(x) (BIT(0) << (x))
+#define QMU_CS16B_EN BIT(0)
+
+/* U3D_QCR1 */
+#define QMU_TX_ZLP(x) (BIT(0) << (x))
+
+/* U3D_QCR3 */
+#define QMU_RX_COZ(x) (BIT(16) << (x))
+#define QMU_RX_ZLP(x) (BIT(0) << (x))
+
+/* U3D_TXQCSR1 */
+/* U3D_RXQCSR1 */
+#define QMU_Q_ACTIVE BIT(15)
+#define QMU_Q_STOP BIT(2)
+#define QMU_Q_RESUME BIT(1)
+#define QMU_Q_START BIT(0)
+
+/* U3D_QISAR0, U3D_QIER0, U3D_QIESR0, U3D_QIECR0 */
+#define QMU_RX_DONE_INT(x) (BIT(16) << (x))
+#define QMU_TX_DONE_INT(x) (BIT(0) << (x))
+
+/* U3D_QISAR1, U3D_QIER1, U3D_QIESR1, U3D_QIECR1 */
+#define RXQ_ZLPERR_INT BIT(20)
+#define RXQ_LENERR_INT BIT(18)
+#define RXQ_CSERR_INT BIT(17)
+#define RXQ_EMPTY_INT BIT(16)
+#define TXQ_LENERR_INT BIT(2)
+#define TXQ_CSERR_INT BIT(1)
+#define TXQ_EMPTY_INT BIT(0)
+
+/* U3D_TQERRIR0, U3D_TQERRIER0, U3D_TQERRIESR0, U3D_TQERRIECR0 */
+#define QMU_TX_LEN_ERR(x) (BIT(16) << (x))
+#define QMU_TX_CS_ERR(x) (BIT(0) << (x))
+
+/* U3D_RQERRIR0, U3D_RQERRIER0, U3D_RQERRIESR0, U3D_RQERRIECR0 */
+#define QMU_RX_LEN_ERR(x) (BIT(16) << (x))
+#define QMU_RX_CS_ERR(x) (BIT(0) << (x))
+
+/* U3D_RQERRIR1, U3D_RQERRIER1, U3D_RQERRIESR1, U3D_RQERRIECR1 */
+#define QMU_RX_ZLP_ERR(n) (BIT(16) << (n))
+
+/* U3D_CAP_EPINFO */
+#define CAP_RX_EP_NUM(x) (((x) >> 8) & 0x1f)
+#define CAP_TX_EP_NUM(x) ((x) & 0x1f)
+
+/* U3D_MISC_CTRL */
+#define VBUS_ON BIT(1)
+#define VBUS_FRC_EN BIT(0)
+
+
+/*---------------- SSUSB_EPCTL_CSR REGISTER DEFINITION ----------------*/
+
+#define U3D_DEVICE_CONF (SSUSB_EPCTL_CSR_BASE + 0x0000)
+#define U3D_EP_RST (SSUSB_EPCTL_CSR_BASE + 0x0004)
+
+#define U3D_DEV_LINK_INTR_ENABLE (SSUSB_EPCTL_CSR_BASE + 0x0050)
+#define U3D_DEV_LINK_INTR (SSUSB_EPCTL_CSR_BASE + 0x0054)
+
+/*---------------- SSUSB_EPCTL_CSR FIELD DEFINITION ----------------*/
+
+/* U3D_DEVICE_CONF */
+#define DEV_ADDR_MSK GENMASK(30, 24)
+#define DEV_ADDR(x) ((0x7f & (x)) << 24)
+#define HW_USB2_3_SEL BIT(18)
+#define SW_USB2_3_SEL_EN BIT(17)
+#define SW_USB2_3_SEL BIT(16)
+#define SSUSB_DEV_SPEED(x) ((x) & 0x7)
+
+/* U3D_EP_RST */
+#define EP1_IN_RST BIT(17)
+#define EP1_OUT_RST BIT(1)
+#define EP_RST(is_in, epnum) (((is_in) ? BIT(16) : BIT(0)) << (epnum))
+#define EP0_RST BIT(0)
+
+/* U3D_DEV_LINK_INTR_ENABLE */
+/* U3D_DEV_LINK_INTR */
+#define SSUSB_DEV_SPEED_CHG_INTR BIT(0)
+
+
+/*---------------- SSUSB_USB3_MAC_CSR REGISTER DEFINITION ----------------*/
+
+#define U3D_LTSSM_CTRL (SSUSB_USB3_MAC_CSR_BASE + 0x0010)
+#define U3D_USB3_CONFIG (SSUSB_USB3_MAC_CSR_BASE + 0x001C)
+
+#define U3D_LTSSM_INTR_ENABLE (SSUSB_USB3_MAC_CSR_BASE + 0x013C)
+#define U3D_LTSSM_INTR (SSUSB_USB3_MAC_CSR_BASE + 0x0140)
+
+/*---------------- SSUSB_USB3_MAC_CSR FIELD DEFINITION ----------------*/
+
+/* U3D_LTSSM_CTRL */
+#define FORCE_POLLING_FAIL BIT(4)
+#define FORCE_RXDETECT_FAIL BIT(3)
+#define SOFT_U3_EXIT_EN BIT(2)
+#define COMPLIANCE_EN BIT(1)
+#define U1_GO_U2_EN BIT(0)
+
+/* U3D_USB3_CONFIG */
+#define USB3_EN BIT(0)
+
+/* U3D_LTSSM_INTR_ENABLE */
+/* U3D_LTSSM_INTR */
+#define U3_RESUME_INTR BIT(18)
+#define U3_LFPS_TMOUT_INTR BIT(17)
+#define VBUS_FALL_INTR BIT(16)
+#define VBUS_RISE_INTR BIT(15)
+#define RXDET_SUCCESS_INTR BIT(14)
+#define EXIT_U3_INTR BIT(13)
+#define EXIT_U2_INTR BIT(12)
+#define EXIT_U1_INTR BIT(11)
+#define ENTER_U3_INTR BIT(10)
+#define ENTER_U2_INTR BIT(9)
+#define ENTER_U1_INTR BIT(8)
+#define ENTER_U0_INTR BIT(7)
+#define RECOVERY_INTR BIT(6)
+#define WARM_RST_INTR BIT(5)
+#define HOT_RST_INTR BIT(4)
+#define LOOPBACK_INTR BIT(3)
+#define COMPLIANCE_INTR BIT(2)
+#define SS_DISABLE_INTR BIT(1)
+#define SS_INACTIVE_INTR BIT(0)
+
+/*---------------- SSUSB_USB3_SYS_CSR REGISTER DEFINITION ----------------*/
+
+#define U3D_LINK_UX_INACT_TIMER (SSUSB_USB3_SYS_CSR_BASE + 0x020C)
+#define U3D_LINK_POWER_CONTROL (SSUSB_USB3_SYS_CSR_BASE + 0x0210)
+#define U3D_LINK_ERR_COUNT (SSUSB_USB3_SYS_CSR_BASE + 0x0214)
+
+/*---------------- SSUSB_USB3_SYS_CSR FIELD DEFINITION ----------------*/
+
+/* U3D_LINK_UX_INACT_TIMER */
+#define DEV_U2_INACT_TIMEOUT_MSK GENMASK(23, 16)
+#define DEV_U2_INACT_TIMEOUT_VALUE(x) (((x) & 0xff) << 16)
+#define U2_INACT_TIMEOUT_MSK GENMASK(15, 8)
+#define U1_INACT_TIMEOUT_MSK GENMASK(7, 0)
+#define U1_INACT_TIMEOUT_VALUE(x) ((x) & 0xff)
+
+/* U3D_LINK_POWER_CONTROL */
+#define SW_U2_ACCEPT_ENABLE BIT(9)
+#define SW_U1_ACCEPT_ENABLE BIT(8)
+#define UX_EXIT BIT(5)
+#define LGO_U3 BIT(4)
+#define LGO_U2 BIT(3)
+#define LGO_U1 BIT(2)
+#define SW_U2_REQUEST_ENABLE BIT(1)
+#define SW_U1_REQUEST_ENABLE BIT(0)
+
+/* U3D_LINK_ERR_COUNT */
+#define CLR_LINK_ERR_CNT BIT(16)
+#define LINK_ERROR_COUNT GENMASK(15, 0)
+
+/*---------------- SSUSB_USB2_CSR REGISTER DEFINITION ----------------*/
+
+#define U3D_POWER_MANAGEMENT (SSUSB_USB2_CSR_BASE + 0x0004)
+#define U3D_DEVICE_CONTROL (SSUSB_USB2_CSR_BASE + 0x000C)
+#define U3D_USB2_TEST_MODE (SSUSB_USB2_CSR_BASE + 0x0014)
+#define U3D_COMMON_USB_INTR_ENABLE (SSUSB_USB2_CSR_BASE + 0x0018)
+#define U3D_COMMON_USB_INTR (SSUSB_USB2_CSR_BASE + 0x001C)
+#define U3D_LINK_RESET_INFO (SSUSB_USB2_CSR_BASE + 0x0024)
+#define U3D_USB20_FRAME_NUM (SSUSB_USB2_CSR_BASE + 0x003C)
+#define U3D_USB20_LPM_PARAMETER (SSUSB_USB2_CSR_BASE + 0x0044)
+#define U3D_USB20_MISC_CONTROL (SSUSB_USB2_CSR_BASE + 0x004C)
+
+/*---------------- SSUSB_USB2_CSR FIELD DEFINITION ----------------*/
+
+/* U3D_POWER_MANAGEMENT */
+#define LPM_BESL_STALL BIT(14)
+#define LPM_BESLD_STALL BIT(13)
+#define LPM_RWP BIT(11)
+#define LPM_HRWE BIT(10)
+#define LPM_MODE(x) (((x) & 0x3) << 8)
+#define ISO_UPDATE BIT(7)
+#define SOFT_CONN BIT(6)
+#define HS_ENABLE BIT(5)
+#define RESUME BIT(2)
+#define SUSPENDM_ENABLE BIT(0)
+
+/* U3D_DEVICE_CONTROL */
+#define DC_HOSTREQ BIT(1)
+#define DC_SESSION BIT(0)
+
+/* U3D_USB2_TEST_MODE */
+#define U2U3_AUTO_SWITCH BIT(10)
+#define LPM_FORCE_STALL BIT(8)
+#define FIFO_ACCESS BIT(6)
+#define FORCE_FS BIT(5)
+#define FORCE_HS BIT(4)
+#define TEST_PACKET_MODE BIT(3)
+#define TEST_K_MODE BIT(2)
+#define TEST_J_MODE BIT(1)
+#define TEST_SE0_NAK_MODE BIT(0)
+
+/* U3D_COMMON_USB_INTR_ENABLE */
+/* U3D_COMMON_USB_INTR */
+#define LPM_RESUME_INTR BIT(9)
+#define LPM_INTR BIT(8)
+#define DISCONN_INTR BIT(5)
+#define CONN_INTR BIT(4)
+#define SOF_INTR BIT(3)
+#define RESET_INTR BIT(2)
+#define RESUME_INTR BIT(1)
+#define SUSPEND_INTR BIT(0)
+
+/* U3D_LINK_RESET_INFO */
+#define WTCHRP_MSK GENMASK(19, 16)
+
+/* U3D_USB20_LPM_PARAMETER */
+#define LPM_BESLCK_U3(x) (((x) & 0xf) << 12)
+#define LPM_BESLCK(x) (((x) & 0xf) << 8)
+#define LPM_BESLDCK(x) (((x) & 0xf) << 4)
+#define LPM_BESL GENMASK(3, 0)
+
+/* U3D_USB20_MISC_CONTROL */
+#define LPM_U3_ACK_EN BIT(0)
+
+/*---------------- SSUSB_SIFSLV_IPPC REGISTER DEFINITION ----------------*/
+
+#define U3D_SSUSB_IP_PW_CTRL0 (SSUSB_SIFSLV_IPPC_BASE + 0x0000)
+#define U3D_SSUSB_IP_PW_CTRL1 (SSUSB_SIFSLV_IPPC_BASE + 0x0004)
+#define U3D_SSUSB_IP_PW_CTRL2 (SSUSB_SIFSLV_IPPC_BASE + 0x0008)
+#define U3D_SSUSB_IP_PW_CTRL3 (SSUSB_SIFSLV_IPPC_BASE + 0x000C)
+#define U3D_SSUSB_IP_PW_STS1 (SSUSB_SIFSLV_IPPC_BASE + 0x0010)
+#define U3D_SSUSB_IP_PW_STS2 (SSUSB_SIFSLV_IPPC_BASE + 0x0014)
+#define U3D_SSUSB_OTG_STS (SSUSB_SIFSLV_IPPC_BASE + 0x0018)
+#define U3D_SSUSB_OTG_STS_CLR (SSUSB_SIFSLV_IPPC_BASE + 0x001C)
+#define U3D_SSUSB_IP_XHCI_CAP (SSUSB_SIFSLV_IPPC_BASE + 0x0024)
+#define U3D_SSUSB_IP_DEV_CAP (SSUSB_SIFSLV_IPPC_BASE + 0x0028)
+#define U3D_SSUSB_OTG_INT_EN (SSUSB_SIFSLV_IPPC_BASE + 0x002C)
+#define U3D_SSUSB_U3_CTRL_0P (SSUSB_SIFSLV_IPPC_BASE + 0x0030)
+#define U3D_SSUSB_U2_CTRL_0P (SSUSB_SIFSLV_IPPC_BASE + 0x0050)
+#define U3D_SSUSB_REF_CK_CTRL (SSUSB_SIFSLV_IPPC_BASE + 0x008C)
+#define U3D_SSUSB_DEV_RST_CTRL (SSUSB_SIFSLV_IPPC_BASE + 0x0098)
+#define U3D_SSUSB_HW_ID (SSUSB_SIFSLV_IPPC_BASE + 0x00A0)
+#define U3D_SSUSB_HW_SUB_ID (SSUSB_SIFSLV_IPPC_BASE + 0x00A4)
+#define U3D_SSUSB_IP_SPARE0 (SSUSB_SIFSLV_IPPC_BASE + 0x00C8)
+
+/*---------------- SSUSB_SIFSLV_IPPC FIELD DEFINITION ----------------*/
+
+/* U3D_SSUSB_IP_PW_CTRL0 */
+#define SSUSB_IP_SW_RST BIT(0)
+
+/* U3D_SSUSB_IP_PW_CTRL1 */
+#define SSUSB_IP_HOST_PDN BIT(0)
+
+/* U3D_SSUSB_IP_PW_CTRL2 */
+#define SSUSB_IP_DEV_PDN BIT(0)
+
+/* U3D_SSUSB_IP_PW_CTRL3 */
+#define SSUSB_IP_PCIE_PDN BIT(0)
+
+/* U3D_SSUSB_IP_PW_STS1 */
+#define SSUSB_IP_SLEEP_STS BIT(30)
+#define SSUSB_U3_MAC_RST_B_STS BIT(16)
+#define SSUSB_XHCI_RST_B_STS BIT(11)
+#define SSUSB_SYS125_RST_B_STS BIT(10)
+#define SSUSB_REF_RST_B_STS BIT(8)
+#define SSUSB_SYSPLL_STABLE BIT(0)
+
+/* U3D_SSUSB_IP_PW_STS2 */
+#define SSUSB_U2_MAC_SYS_RST_B_STS BIT(0)
+
+/* U3D_SSUSB_OTG_STS */
+#define SSUSB_VBUS_VALID BIT(9)
+
+/* U3D_SSUSB_OTG_STS_CLR */
+#define SSUSB_VBUS_INTR_CLR BIT(6)
+
+/* U3D_SSUSB_IP_XHCI_CAP */
+#define SSUSB_IP_XHCI_U2_PORT_NUM(x) (((x) >> 8) & 0xff)
+#define SSUSB_IP_XHCI_U3_PORT_NUM(x) ((x) & 0xff)
+
+/* U3D_SSUSB_IP_DEV_CAP */
+#define SSUSB_IP_DEV_U3_PORT_NUM(x) ((x) & 0xff)
+
+/* U3D_SSUSB_OTG_INT_EN */
+#define SSUSB_VBUS_CHG_INT_A_EN BIT(7)
+#define SSUSB_VBUS_CHG_INT_B_EN BIT(6)
+
+/* U3D_SSUSB_U3_CTRL_0P */
+#define SSUSB_U3_PORT_HOST_SEL BIT(2)
+#define SSUSB_U3_PORT_PDN BIT(1)
+#define SSUSB_U3_PORT_DIS BIT(0)
+
+/* U3D_SSUSB_U2_CTRL_0P */
+#define SSUSB_U2_PORT_OTG_SEL BIT(7)
+#define SSUSB_U2_PORT_HOST_SEL BIT(2)
+#define SSUSB_U2_PORT_PDN BIT(1)
+#define SSUSB_U2_PORT_DIS BIT(0)
+
+/* U3D_SSUSB_DEV_RST_CTRL */
+#define SSUSB_DEV_SW_RST BIT(0)
+
+#endif /* _SSUSB_HW_REGS_H_ */
diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c
new file mode 100644
index 000000000000..783367805c99
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_plat.c
@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+
+#include "mtu3.h"
+#include "mtu3_dr.h"
+
+/* u2-port0 should be powered on and enabled; */
+int ssusb_check_clocks(struct ssusb_mtk *ssusb, u32 ex_clks)
+{
+ void __iomem *ibase = ssusb->ippc_base;
+ u32 value, check_val;
+ int ret;
+
+ check_val = ex_clks | SSUSB_SYS125_RST_B_STS | SSUSB_SYSPLL_STABLE |
+ SSUSB_REF_RST_B_STS;
+
+ ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS1, value,
+ (check_val == (value & check_val)), 100, 20000);
+ if (ret) {
+ dev_err(ssusb->dev, "clks of sts1 are not stable!\n");
+ return ret;
+ }
+
+ ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS2, value,
+ (value & SSUSB_U2_MAC_SYS_RST_B_STS), 100, 10000);
+ if (ret) {
+ dev_err(ssusb->dev, "mac2 clock is not stable\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ssusb_phy_init(struct ssusb_mtk *ssusb)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ssusb->num_phys; i++) {
+ ret = phy_init(ssusb->phys[i]);
+ if (ret)
+ goto exit_phy;
+ }
+ return 0;
+
+exit_phy:
+ for (; i > 0; i--)
+ phy_exit(ssusb->phys[i - 1]);
+
+ return ret;
+}
+
+static int ssusb_phy_exit(struct ssusb_mtk *ssusb)
+{
+ int i;
+
+ for (i = 0; i < ssusb->num_phys; i++)
+ phy_exit(ssusb->phys[i]);
+
+ return 0;
+}
+
+static int ssusb_phy_power_on(struct ssusb_mtk *ssusb)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ssusb->num_phys; i++) {
+ ret = phy_power_on(ssusb->phys[i]);
+ if (ret)
+ goto power_off_phy;
+ }
+ return 0;
+
+power_off_phy:
+ for (; i > 0; i--)
+ phy_power_off(ssusb->phys[i - 1]);
+
+ return ret;
+}
+
+static void ssusb_phy_power_off(struct ssusb_mtk *ssusb)
+{
+ unsigned int i;
+
+ for (i = 0; i < ssusb->num_phys; i++)
+ phy_power_off(ssusb->phys[i]);
+}
+
+static int ssusb_rscs_init(struct ssusb_mtk *ssusb)
+{
+ int ret = 0;
+
+ ret = regulator_enable(ssusb->vusb33);
+ if (ret) {
+ dev_err(ssusb->dev, "failed to enable vusb33\n");
+ goto vusb33_err;
+ }
+
+ ret = clk_prepare_enable(ssusb->sys_clk);
+ if (ret) {
+ dev_err(ssusb->dev, "failed to enable sys_clk\n");
+ goto clk_err;
+ }
+
+ ret = ssusb_phy_init(ssusb);
+ if (ret) {
+ dev_err(ssusb->dev, "failed to init phy\n");
+ goto phy_init_err;
+ }
+
+ ret = ssusb_phy_power_on(ssusb);
+ if (ret) {
+ dev_err(ssusb->dev, "failed to power on phy\n");
+ goto phy_err;
+ }
+
+ return 0;
+
+phy_err:
+ ssusb_phy_exit(ssusb);
+phy_init_err:
+ clk_disable_unprepare(ssusb->sys_clk);
+clk_err:
+ regulator_disable(ssusb->vusb33);
+vusb33_err:
+
+ return ret;
+}
+
+static void ssusb_rscs_exit(struct ssusb_mtk *ssusb)
+{
+ clk_disable_unprepare(ssusb->sys_clk);
+ regulator_disable(ssusb->vusb33);
+ ssusb_phy_power_off(ssusb);
+ ssusb_phy_exit(ssusb);
+}
+
+static void ssusb_ip_sw_reset(struct ssusb_mtk *ssusb)
+{
+ /* reset whole ip (xhci & u3d) */
+ mtu3_setbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST);
+ udelay(1);
+ mtu3_clrbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST);
+}
+
+static int get_iddig_pinctrl(struct ssusb_mtk *ssusb)
+{
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
+ otg_sx->id_pinctrl = devm_pinctrl_get(ssusb->dev);
+ if (IS_ERR(otg_sx->id_pinctrl)) {
+ dev_err(ssusb->dev, "Cannot find id pinctrl!\n");
+ return PTR_ERR(otg_sx->id_pinctrl);
+ }
+
+ otg_sx->id_float =
+ pinctrl_lookup_state(otg_sx->id_pinctrl, "id_float");
+ if (IS_ERR(otg_sx->id_float)) {
+ dev_err(ssusb->dev, "Cannot find pinctrl id_float!\n");
+ return PTR_ERR(otg_sx->id_float);
+ }
+
+ otg_sx->id_ground =
+ pinctrl_lookup_state(otg_sx->id_pinctrl, "id_ground");
+ if (IS_ERR(otg_sx->id_ground)) {
+ dev_err(ssusb->dev, "Cannot find pinctrl id_ground!\n");
+ return PTR_ERR(otg_sx->id_ground);
+ }
+
+ return 0;
+}
+
+static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+ struct device *dev = &pdev->dev;
+ struct regulator *vbus;
+ struct resource *res;
+ int i;
+ int ret;
+
+ ssusb->num_phys = of_count_phandle_with_args(node,
+ "phys", "#phy-cells");
+ if (ssusb->num_phys > 0) {
+ ssusb->phys = devm_kcalloc(dev, ssusb->num_phys,
+ sizeof(*ssusb->phys), GFP_KERNEL);
+ if (!ssusb->phys)
+ return -ENOMEM;
+ } else {
+ ssusb->num_phys = 0;
+ }
+
+ for (i = 0; i < ssusb->num_phys; i++) {
+ ssusb->phys[i] = devm_of_phy_get_by_index(dev, node, i);
+ if (IS_ERR(ssusb->phys[i])) {
+ dev_err(dev, "failed to get phy-%d\n", i);
+ return PTR_ERR(ssusb->phys[i]);
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ippc");
+ ssusb->ippc_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ssusb->ippc_base)) {
+ dev_err(dev, "failed to map memory for ippc\n");
+ return PTR_ERR(ssusb->ippc_base);
+ }
+
+ ssusb->vusb33 = devm_regulator_get(&pdev->dev, "vusb33");
+ if (IS_ERR(ssusb->vusb33)) {
+ dev_err(dev, "failed to get vusb33\n");
+ return PTR_ERR(ssusb->vusb33);
+ }
+
+ ssusb->sys_clk = devm_clk_get(dev, "sys_ck");
+ if (IS_ERR(ssusb->sys_clk)) {
+ dev_err(dev, "failed to get sys clock\n");
+ return PTR_ERR(ssusb->sys_clk);
+ }
+
+ ssusb->dr_mode = usb_get_dr_mode(dev);
+ if (ssusb->dr_mode == USB_DR_MODE_UNKNOWN) {
+ dev_err(dev, "dr_mode is error\n");
+ return -EINVAL;
+ }
+
+ if (ssusb->dr_mode == USB_DR_MODE_PERIPHERAL)
+ return 0;
+
+ /* if host role is supported */
+ ret = ssusb_wakeup_of_property_parse(ssusb, node);
+ if (ret)
+ return ret;
+
+ if (ssusb->dr_mode != USB_DR_MODE_OTG)
+ return 0;
+
+ /* if dual-role mode is supported */
+ vbus = devm_regulator_get(&pdev->dev, "vbus");
+ if (IS_ERR(vbus)) {
+ dev_err(dev, "failed to get vbus\n");
+ return PTR_ERR(vbus);
+ }
+ otg_sx->vbus = vbus;
+
+ otg_sx->is_u3_drd = of_property_read_bool(node, "mediatek,usb3-drd");
+ otg_sx->manual_drd_enabled =
+ of_property_read_bool(node, "enable-manual-drd");
+
+ if (of_property_read_bool(node, "extcon")) {
+ otg_sx->edev = extcon_get_edev_by_phandle(ssusb->dev, 0);
+ if (IS_ERR(otg_sx->edev)) {
+ dev_err(ssusb->dev, "couldn't get extcon device\n");
+ return -EPROBE_DEFER;
+ }
+ if (otg_sx->manual_drd_enabled) {
+ ret = get_iddig_pinctrl(ssusb);
+ if (ret)
+ return ret;
+ }
+ }
+
+ dev_info(dev, "dr_mode: %d, is_u3_dr: %d\n",
+ ssusb->dr_mode, otg_sx->is_u3_drd);
+
+ return 0;
+}
+
+static int mtu3_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct ssusb_mtk *ssusb;
+ int ret = -ENOMEM;
+
+ /* all elements are set to ZERO as default value */
+ ssusb = devm_kzalloc(dev, sizeof(*ssusb), GFP_KERNEL);
+ if (!ssusb)
+ return -ENOMEM;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "No suitable DMA config available\n");
+ return -ENOTSUPP;
+ }
+
+ platform_set_drvdata(pdev, ssusb);
+ ssusb->dev = dev;
+
+ ret = get_ssusb_rscs(pdev, ssusb);
+ if (ret)
+ return ret;
+
+ /* enable power domain */
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+ device_enable_async_suspend(dev);
+
+ ret = ssusb_rscs_init(ssusb);
+ if (ret)
+ goto comm_init_err;
+
+ ssusb_ip_sw_reset(ssusb);
+
+ if (IS_ENABLED(CONFIG_USB_MTU3_HOST))
+ ssusb->dr_mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_MTU3_GADGET))
+ ssusb->dr_mode = USB_DR_MODE_PERIPHERAL;
+
+ /* default as host */
+ ssusb->is_host = !(ssusb->dr_mode == USB_DR_MODE_PERIPHERAL);
+
+ switch (ssusb->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ ret = ssusb_gadget_init(ssusb);
+ if (ret) {
+ dev_err(dev, "failed to initialize gadget\n");
+ goto comm_exit;
+ }
+ break;
+ case USB_DR_MODE_HOST:
+ ret = ssusb_host_init(ssusb, node);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ goto comm_exit;
+ }
+ break;
+ case USB_DR_MODE_OTG:
+ ret = ssusb_gadget_init(ssusb);
+ if (ret) {
+ dev_err(dev, "failed to initialize gadget\n");
+ goto comm_exit;
+ }
+
+ ret = ssusb_host_init(ssusb, node);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ goto gadget_exit;
+ }
+
+ ssusb_otg_switch_init(ssusb);
+ break;
+ default:
+ dev_err(dev, "unsupported mode: %d\n", ssusb->dr_mode);
+ ret = -EINVAL;
+ goto comm_exit;
+ }
+
+ return 0;
+
+gadget_exit:
+ ssusb_gadget_exit(ssusb);
+comm_exit:
+ ssusb_rscs_exit(ssusb);
+comm_init_err:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int mtu3_remove(struct platform_device *pdev)
+{
+ struct ssusb_mtk *ssusb = platform_get_drvdata(pdev);
+
+ switch (ssusb->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ ssusb_gadget_exit(ssusb);
+ break;
+ case USB_DR_MODE_HOST:
+ ssusb_host_exit(ssusb);
+ break;
+ case USB_DR_MODE_OTG:
+ ssusb_otg_switch_exit(ssusb);
+ ssusb_gadget_exit(ssusb);
+ ssusb_host_exit(ssusb);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ssusb_rscs_exit(ssusb);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+/*
+ * when support dual-role mode, we reject suspend when
+ * it works as device mode;
+ */
+static int __maybe_unused mtu3_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ssusb_mtk *ssusb = platform_get_drvdata(pdev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* REVISIT: disconnect it for only device mode? */
+ if (!ssusb->is_host)
+ return 0;
+
+ ssusb_host_disable(ssusb, true);
+ ssusb_phy_power_off(ssusb);
+ clk_disable_unprepare(ssusb->sys_clk);
+ ssusb_wakeup_enable(ssusb);
+
+ return 0;
+}
+
+static int __maybe_unused mtu3_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ssusb_mtk *ssusb = platform_get_drvdata(pdev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (!ssusb->is_host)
+ return 0;
+
+ ssusb_wakeup_disable(ssusb);
+ clk_prepare_enable(ssusb->sys_clk);
+ ssusb_phy_power_on(ssusb);
+ ssusb_host_enable(ssusb);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mtu3_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mtu3_suspend, mtu3_resume)
+};
+
+#define DEV_PM_OPS (IS_ENABLED(CONFIG_PM) ? &mtu3_pm_ops : NULL)
+
+#ifdef CONFIG_OF
+
+static const struct of_device_id mtu3_of_match[] = {
+ {.compatible = "mediatek,mt8173-mtu3",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mtu3_of_match);
+
+#endif
+
+static struct platform_driver mtu3_driver = {
+ .probe = mtu3_probe,
+ .remove = mtu3_remove,
+ .driver = {
+ .name = MTU3_DRIVER_NAME,
+ .pm = DEV_PM_OPS,
+ .of_match_table = of_match_ptr(mtu3_of_match),
+ },
+};
+module_platform_driver(mtu3_driver);
+
+MODULE_AUTHOR("Chunfeng Yun <chunfeng.yun@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek USB3 DRD Controller Driver");
diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
new file mode 100644
index 000000000000..7d9ba8a52368
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_qmu.c
@@ -0,0 +1,573 @@
+/*
+ * mtu3_qmu.c - Queue Management Unit driver for device controller
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Queue Management Unit (QMU) is designed to unload SW effort
+ * to serve DMA interrupts.
+ * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
+ * SW links data buffers and triggers QMU to send / receive data to
+ * host / from device at a time.
+ * And now only GPD is supported.
+ *
+ * For more detailed information, please refer to QMU Programming Guide
+ */
+
+#include <linux/dmapool.h>
+#include <linux/iopoll.h>
+
+#include "mtu3.h"
+
+#define QMU_CHECKSUM_LEN 16
+
+#define GPD_FLAGS_HWO BIT(0)
+#define GPD_FLAGS_BDP BIT(1)
+#define GPD_FLAGS_BPS BIT(2)
+#define GPD_FLAGS_IOC BIT(7)
+
+#define GPD_EXT_FLAG_ZLP BIT(5)
+
+
+static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
+ dma_addr_t dma_addr)
+{
+ dma_addr_t dma_base = ring->dma;
+ struct qmu_gpd *gpd_head = ring->start;
+ u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head);
+
+ if (offset >= MAX_GPD_NUM)
+ return NULL;
+
+ return gpd_head + offset;
+}
+
+static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring,
+ struct qmu_gpd *gpd)
+{
+ dma_addr_t dma_base = ring->dma;
+ struct qmu_gpd *gpd_head = ring->start;
+ u32 offset;
+
+ offset = gpd - gpd_head;
+ if (offset >= MAX_GPD_NUM)
+ return 0;
+
+ return dma_base + (offset * sizeof(*gpd));
+}
+
+static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd)
+{
+ ring->start = gpd;
+ ring->enqueue = gpd;
+ ring->dequeue = gpd;
+ ring->end = gpd + MAX_GPD_NUM - 1;
+}
+
+static void reset_gpd_list(struct mtu3_ep *mep)
+{
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ struct qmu_gpd *gpd = ring->start;
+
+ if (gpd) {
+ gpd->flag &= ~GPD_FLAGS_HWO;
+ gpd_ring_init(ring, gpd);
+ }
+}
+
+int mtu3_gpd_ring_alloc(struct mtu3_ep *mep)
+{
+ struct qmu_gpd *gpd;
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+
+ /* software own all gpds as default */
+ gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma);
+ if (gpd == NULL)
+ return -ENOMEM;
+
+ gpd_ring_init(ring, gpd);
+
+ return 0;
+}
+
+void mtu3_gpd_ring_free(struct mtu3_ep *mep)
+{
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+
+ dma_pool_free(mep->mtu->qmu_gpd_pool,
+ ring->start, ring->dma);
+ memset(ring, 0, sizeof(*ring));
+}
+
+/*
+ * calculate check sum of a gpd or bd
+ * add "noinline" and "mb" to prevent wrong calculation
+ */
+static noinline u8 qmu_calc_checksum(u8 *data)
+{
+ u8 chksum = 0;
+ int i;
+
+ data[1] = 0x0; /* set checksum to 0 */
+
+ mb(); /* ensure the gpd/bd is really up-to-date */
+ for (i = 0; i < QMU_CHECKSUM_LEN; i++)
+ chksum += data[i];
+
+ /* Default: HWO=1, @flag[bit0] */
+ chksum += 1;
+
+ return 0xFF - chksum;
+}
+
+void mtu3_qmu_resume(struct mtu3_ep *mep)
+{
+ struct mtu3 *mtu = mep->mtu;
+ void __iomem *mbase = mtu->mac_base;
+ int epnum = mep->epnum;
+ u32 offset;
+
+ offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
+
+ mtu3_writel(mbase, offset, QMU_Q_RESUME);
+ if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE))
+ mtu3_writel(mbase, offset, QMU_Q_RESUME);
+}
+
+static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
+{
+ if (ring->enqueue < ring->end)
+ ring->enqueue++;
+ else
+ ring->enqueue = ring->start;
+
+ return ring->enqueue;
+}
+
+static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
+{
+ if (ring->dequeue < ring->end)
+ ring->dequeue++;
+ else
+ ring->dequeue = ring->start;
+
+ return ring->dequeue;
+}
+
+/* check if a ring is emtpy */
+static int gpd_ring_empty(struct mtu3_gpd_ring *ring)
+{
+ struct qmu_gpd *enq = ring->enqueue;
+ struct qmu_gpd *next;
+
+ if (ring->enqueue < ring->end)
+ next = enq + 1;
+ else
+ next = ring->start;
+
+ /* one gpd is reserved to simplify gpd preparation */
+ return next == ring->dequeue;
+}
+
+int mtu3_prepare_transfer(struct mtu3_ep *mep)
+{
+ return gpd_ring_empty(&mep->gpd_ring);
+}
+
+static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
+{
+ struct qmu_gpd *enq;
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ struct qmu_gpd *gpd = ring->enqueue;
+ struct usb_request *req = &mreq->request;
+
+ /* set all fields to zero as default value */
+ memset(gpd, 0, sizeof(*gpd));
+
+ gpd->buffer = cpu_to_le32((u32)req->dma);
+ gpd->buf_len = cpu_to_le16(req->length);
+ gpd->flag |= GPD_FLAGS_IOC;
+
+ /* get the next GPD */
+ enq = advance_enq_gpd(ring);
+ dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p\n",
+ mep->epnum, gpd, enq);
+
+ enq->flag &= ~GPD_FLAGS_HWO;
+ gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq));
+
+ if (req->zero)
+ gpd->ext_flag |= GPD_EXT_FLAG_ZLP;
+
+ gpd->chksum = qmu_calc_checksum((u8 *)gpd);
+ gpd->flag |= GPD_FLAGS_HWO;
+
+ mreq->gpd = gpd;
+
+ return 0;
+}
+
+static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
+{
+ struct qmu_gpd *enq;
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ struct qmu_gpd *gpd = ring->enqueue;
+ struct usb_request *req = &mreq->request;
+
+ /* set all fields to zero as default value */
+ memset(gpd, 0, sizeof(*gpd));
+
+ gpd->buffer = cpu_to_le32((u32)req->dma);
+ gpd->data_buf_len = cpu_to_le16(req->length);
+ gpd->flag |= GPD_FLAGS_IOC;
+
+ /* get the next GPD */
+ enq = advance_enq_gpd(ring);
+ dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p\n",
+ mep->epnum, gpd, enq);
+
+ enq->flag &= ~GPD_FLAGS_HWO;
+ gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq));
+ gpd->chksum = qmu_calc_checksum((u8 *)gpd);
+ gpd->flag |= GPD_FLAGS_HWO;
+
+ mreq->gpd = gpd;
+
+ return 0;
+}
+
+void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
+{
+
+ if (mep->is_in)
+ mtu3_prepare_tx_gpd(mep, mreq);
+ else
+ mtu3_prepare_rx_gpd(mep, mreq);
+}
+
+int mtu3_qmu_start(struct mtu3_ep *mep)
+{
+ struct mtu3 *mtu = mep->mtu;
+ void __iomem *mbase = mtu->mac_base;
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ u8 epnum = mep->epnum;
+
+ if (mep->is_in) {
+ /* set QMU start address */
+ mtu3_writel(mbase, USB_QMU_TQSAR(mep->epnum), ring->dma);
+ mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
+ mtu3_setbits(mbase, U3D_QCR0, QMU_TX_CS_EN(epnum));
+ /* send zero length packet according to ZLP flag in GPD */
+ mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum));
+ mtu3_writel(mbase, U3D_TQERRIESR0,
+ QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum));
+
+ if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) {
+ dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum);
+ return 0;
+ }
+ mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START);
+
+ } else {
+ mtu3_writel(mbase, USB_QMU_RQSAR(mep->epnum), ring->dma);
+ mtu3_setbits(mbase, MU3D_EP_RXCR0(mep->epnum), RX_DMAREQEN);
+ mtu3_setbits(mbase, U3D_QCR0, QMU_RX_CS_EN(epnum));
+ /* don't expect ZLP */
+ mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum));
+ /* move to next GPD when receive ZLP */
+ mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum));
+ mtu3_writel(mbase, U3D_RQERRIESR0,
+ QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum));
+ mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum));
+
+ if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) {
+ dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum);
+ return 0;
+ }
+ mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START);
+ }
+
+ return 0;
+}
+
+/* may called in atomic context */
+void mtu3_qmu_stop(struct mtu3_ep *mep)
+{
+ struct mtu3 *mtu = mep->mtu;
+ void __iomem *mbase = mtu->mac_base;
+ int epnum = mep->epnum;
+ u32 value = 0;
+ u32 qcsr;
+ int ret;
+
+ qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
+
+ if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) {
+ dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name);
+ return;
+ }
+ mtu3_writel(mbase, qcsr, QMU_Q_STOP);
+
+ ret = readl_poll_timeout_atomic(mbase + qcsr, value,
+ !(value & QMU_Q_ACTIVE), 1, 1000);
+ if (ret) {
+ dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name);
+ return;
+ }
+
+ dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name);
+}
+
+void mtu3_qmu_flush(struct mtu3_ep *mep)
+{
+
+ dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__,
+ ((mep->is_in) ? "TX" : "RX"));
+
+ /*Stop QMU */
+ mtu3_qmu_stop(mep);
+ reset_gpd_list(mep);
+}
+
+/*
+ * QMU can't transfer zero length packet directly (a hardware limit
+ * on old SoCs), so when needs to send ZLP, we intentionally trigger
+ * a length error interrupt, and in the ISR sends a ZLP by BMU.
+ */
+static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
+{
+ struct mtu3_ep *mep = mtu->in_eps + epnum;
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ void __iomem *mbase = mtu->mac_base;
+ struct qmu_gpd *gpd_current = NULL;
+ dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
+ struct usb_request *req = NULL;
+ struct mtu3_request *mreq;
+ u32 txcsr = 0;
+ int ret;
+
+ mreq = next_request(mep);
+ if (mreq && mreq->request.length == 0)
+ req = &mreq->request;
+ else
+ return;
+
+ gpd_current = gpd_dma_to_virt(ring, gpd_dma);
+
+ if (le16_to_cpu(gpd_current->buf_len) != 0) {
+ dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum);
+ return;
+ }
+
+ dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq);
+
+ mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
+
+ ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum),
+ txcsr, !(txcsr & TX_FIFOFULL), 1, 1000);
+ if (ret) {
+ dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__);
+ return;
+ }
+ mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
+
+ /* by pass the current GDP */
+ gpd_current->flag |= GPD_FLAGS_BPS;
+ gpd_current->chksum = qmu_calc_checksum((u8 *)gpd_current);
+ gpd_current->flag |= GPD_FLAGS_HWO;
+
+ /*enable DMAREQEN, switch back to QMU mode */
+ mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
+ mtu3_qmu_resume(mep);
+}
+
+/*
+ * NOTE: request list maybe is already empty as following case:
+ * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->
+ * queue_tx --> process_tasklet(meanwhile, the second one is transferred,
+ * tasklet process both of them)-->qmu_interrupt for second one.
+ * To avoid upper case, put qmu_done_tx in ISR directly to process it.
+ */
+static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
+{
+ struct mtu3_ep *mep = mtu->in_eps + epnum;
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ void __iomem *mbase = mtu->mac_base;
+ struct qmu_gpd *gpd = ring->dequeue;
+ struct qmu_gpd *gpd_current = NULL;
+ dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
+ struct usb_request *request = NULL;
+ struct mtu3_request *mreq;
+
+ /*transfer phy address got from QMU register to virtual address */
+ gpd_current = gpd_dma_to_virt(ring, gpd_dma);
+
+ dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
+ __func__, epnum, gpd, gpd_current, ring->enqueue);
+
+ while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
+
+ mreq = next_request(mep);
+
+ if (mreq == NULL || mreq->gpd != gpd) {
+ dev_err(mtu->dev, "no correct TX req is found\n");
+ break;
+ }
+
+ request = &mreq->request;
+ request->actual = le16_to_cpu(gpd->buf_len);
+ mtu3_req_complete(mep, request, 0);
+
+ gpd = advance_deq_gpd(ring);
+ }
+
+ dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
+ __func__, epnum, ring->dequeue, ring->enqueue);
+
+}
+
+static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
+{
+ struct mtu3_ep *mep = mtu->out_eps + epnum;
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ void __iomem *mbase = mtu->mac_base;
+ struct qmu_gpd *gpd = ring->dequeue;
+ struct qmu_gpd *gpd_current = NULL;
+ dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
+ struct usb_request *req = NULL;
+ struct mtu3_request *mreq;
+
+ gpd_current = gpd_dma_to_virt(ring, gpd_dma);
+
+ dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
+ __func__, epnum, gpd, gpd_current, ring->enqueue);
+
+ while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
+
+ mreq = next_request(mep);
+
+ if (mreq == NULL || mreq->gpd != gpd) {
+ dev_err(mtu->dev, "no correct RX req is found\n");
+ break;
+ }
+ req = &mreq->request;
+
+ req->actual = le16_to_cpu(gpd->buf_len);
+ mtu3_req_complete(mep, req, 0);
+
+ gpd = advance_deq_gpd(ring);
+ }
+
+ dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
+ __func__, epnum, ring->dequeue, ring->enqueue);
+}
+
+static void qmu_done_isr(struct mtu3 *mtu, u32 done_status)
+{
+ int i;
+
+ for (i = 1; i < mtu->num_eps; i++) {
+ if (done_status & QMU_RX_DONE_INT(i))
+ qmu_done_rx(mtu, i);
+ if (done_status & QMU_TX_DONE_INT(i))
+ qmu_done_tx(mtu, i);
+ }
+}
+
+static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status)
+{
+ void __iomem *mbase = mtu->mac_base;
+ u32 errval;
+ int i;
+
+ if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) {
+ errval = mtu3_readl(mbase, U3D_RQERRIR0);
+ for (i = 1; i < mtu->num_eps; i++) {
+ if (errval & QMU_RX_CS_ERR(i))
+ dev_err(mtu->dev, "Rx %d CS error!\n", i);
+
+ if (errval & QMU_RX_LEN_ERR(i))
+ dev_err(mtu->dev, "RX %d Length error\n", i);
+ }
+ mtu3_writel(mbase, U3D_RQERRIR0, errval);
+ }
+
+ if (qmu_status & RXQ_ZLPERR_INT) {
+ errval = mtu3_readl(mbase, U3D_RQERRIR1);
+ for (i = 1; i < mtu->num_eps; i++) {
+ if (errval & QMU_RX_ZLP_ERR(i))
+ dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i);
+ }
+ mtu3_writel(mbase, U3D_RQERRIR1, errval);
+ }
+
+ if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) {
+ errval = mtu3_readl(mbase, U3D_TQERRIR0);
+ for (i = 1; i < mtu->num_eps; i++) {
+ if (errval & QMU_TX_CS_ERR(i))
+ dev_err(mtu->dev, "Tx %d checksum error!\n", i);
+
+ if (errval & QMU_TX_LEN_ERR(i))
+ qmu_tx_zlp_error_handler(mtu, i);
+ }
+ mtu3_writel(mbase, U3D_TQERRIR0, errval);
+ }
+}
+
+irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ u32 qmu_status;
+ u32 qmu_done_status;
+
+ /* U3D_QISAR1 is read update */
+ qmu_status = mtu3_readl(mbase, U3D_QISAR1);
+ qmu_status &= mtu3_readl(mbase, U3D_QIER1);
+
+ qmu_done_status = mtu3_readl(mbase, U3D_QISAR0);
+ qmu_done_status &= mtu3_readl(mbase, U3D_QIER0);
+ mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */
+ dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
+ (qmu_done_status & 0xFFFF), qmu_done_status >> 16,
+ qmu_status);
+
+ if (qmu_done_status)
+ qmu_done_isr(mtu, qmu_done_status);
+
+ if (qmu_status)
+ qmu_exception_isr(mtu, qmu_status);
+
+ return IRQ_HANDLED;
+}
+
+int mtu3_qmu_init(struct mtu3 *mtu)
+{
+
+ compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B");
+
+ mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev,
+ QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0);
+
+ if (!mtu->qmu_gpd_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mtu3_qmu_exit(struct mtu3 *mtu)
+{
+ dma_pool_destroy(mtu->qmu_gpd_pool);
+}
diff --git a/drivers/usb/mtu3/mtu3_qmu.h b/drivers/usb/mtu3/mtu3_qmu.h
new file mode 100644
index 000000000000..4dafa16bf120
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_qmu.h
@@ -0,0 +1,43 @@
+/*
+ * mtu3_qmu.h - Queue Management Unit driver header
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MTK_QMU_H__
+#define __MTK_QMU_H__
+
+#define MAX_GPD_NUM 64
+#define QMU_GPD_SIZE (sizeof(struct qmu_gpd))
+#define QMU_GPD_RING_SIZE (MAX_GPD_NUM * QMU_GPD_SIZE)
+
+#define GPD_BUF_SIZE 65532
+
+void mtu3_qmu_stop(struct mtu3_ep *mep);
+int mtu3_qmu_start(struct mtu3_ep *mep);
+void mtu3_qmu_resume(struct mtu3_ep *mep);
+void mtu3_qmu_flush(struct mtu3_ep *mep);
+
+void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq);
+int mtu3_prepare_transfer(struct mtu3_ep *mep);
+
+int mtu3_gpd_ring_alloc(struct mtu3_ep *mep);
+void mtu3_gpd_ring_free(struct mtu3_ep *mep);
+
+irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu);
+int mtu3_qmu_init(struct mtu3 *mtu);
+void mtu3_qmu_exit(struct mtu3 *mtu);
+
+#endif
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 2440f88e07a3..e89708d839e5 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -6,6 +6,9 @@
* Based on the DaVinci "glue layer" code.
* Copyright (C) 2005-2006 by Texas Instruments
*
+ * DT support
+ * Copyright (c) 2016 Petr Kulhavy <petr@barix.com>
+ *
* This file is part of the Inventra Controller Driver for Linux.
*
* The Inventra Controller Driver for Linux is free software; you
@@ -340,6 +343,13 @@ static int da8xx_musb_set_mode(struct musb *musb, u8 musb_mode)
struct da8xx_glue *glue = dev_get_drvdata(musb->controller->parent);
enum phy_mode phy_mode;
+ /*
+ * The PHY has some issues when it is forced in device or host mode.
+ * Unless the user request another mode, configure the PHY in OTG mode.
+ */
+ if (!musb->is_initialized)
+ return phy_set_mode(glue->phy, PHY_MODE_USB_OTG);
+
switch (musb_mode) {
case MUSB_HOST: /* Force VBUS valid, ID = 0 */
phy_mode = PHY_MODE_USB_HOST;
@@ -366,6 +376,12 @@ static int da8xx_musb_init(struct musb *musb)
musb->mregs += DA8XX_MENTOR_CORE_OFFSET;
+ ret = clk_prepare_enable(glue->clk);
+ if (ret) {
+ dev_err(glue->dev, "failed to enable clock\n");
+ return ret;
+ }
+
/* Returns zero if e.g. not clocked */
rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG);
if (!rev)
@@ -377,12 +393,6 @@ static int da8xx_musb_init(struct musb *musb)
goto fail;
}
- ret = clk_prepare_enable(glue->clk);
- if (ret) {
- dev_err(glue->dev, "failed to enable clock\n");
- goto fail;
- }
-
setup_timer(&otg_workaround, otg_timer, (unsigned long)musb);
/* Reset the controller */
@@ -392,7 +402,7 @@ static int da8xx_musb_init(struct musb *musb)
ret = phy_init(glue->phy);
if (ret) {
dev_err(glue->dev, "Failed to init phy.\n");
- goto err_phy_init;
+ goto fail;
}
ret = phy_power_on(glue->phy);
@@ -412,9 +422,8 @@ static int da8xx_musb_init(struct musb *musb)
err_phy_power_on:
phy_exit(glue->phy);
-err_phy_init:
- clk_disable_unprepare(glue->clk);
fail:
+ clk_disable_unprepare(glue->clk);
return ret;
}
@@ -433,6 +442,21 @@ static int da8xx_musb_exit(struct musb *musb)
return 0;
}
+static inline u8 get_vbus_power(struct device *dev)
+{
+ struct regulator *vbus_supply;
+ int current_uA;
+
+ vbus_supply = regulator_get_optional(dev, "vbus");
+ if (IS_ERR(vbus_supply))
+ return 255;
+ current_uA = regulator_get_current_limit(vbus_supply);
+ regulator_put(vbus_supply);
+ if (current_uA <= 0 || current_uA > 510000)
+ return 255;
+ return current_uA / 1000 / 2;
+}
+
static const struct musb_platform_ops da8xx_ops = {
.quirks = MUSB_DMA_CPPI | MUSB_INDEXED_EP,
.init = da8xx_musb_init,
@@ -458,6 +482,12 @@ static const struct platform_device_info da8xx_dev_info = {
.dma_mask = DMA_BIT_MASK(32),
};
+static const struct musb_hdrc_config da8xx_config = {
+ .ram_bits = 10,
+ .num_eps = 5,
+ .multipoint = 1,
+};
+
static int da8xx_probe(struct platform_device *pdev)
{
struct resource musb_resources[2];
@@ -465,6 +495,7 @@ static int da8xx_probe(struct platform_device *pdev)
struct da8xx_glue *glue;
struct platform_device_info pinfo;
struct clk *clk;
+ struct device_node *np = pdev->dev.of_node;
int ret;
glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
@@ -487,6 +518,16 @@ static int da8xx_probe(struct platform_device *pdev)
glue->dev = &pdev->dev;
glue->clk = clk;
+ if (IS_ENABLED(CONFIG_OF) && np) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->config = &da8xx_config;
+ pdata->mode = musb_get_mode(&pdev->dev);
+ pdata->power = get_vbus_power(&pdev->dev);
+ }
+
pdata->platform_ops = &da8xx_ops;
glue->usb_phy = usb_phy_generic_register();
@@ -537,11 +578,22 @@ static int da8xx_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id da8xx_id_table[] = {
+ {
+ .compatible = "ti,da830-musb",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, da8xx_id_table);
+#endif
+
static struct platform_driver da8xx_driver = {
.probe = da8xx_probe,
.remove = da8xx_remove,
.driver = {
.name = "musb-da8xx",
+ .of_match_table = of_match_ptr(da8xx_id_table),
},
};
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c3e172e15ec3..9e226468a13e 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -100,6 +100,7 @@
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/usb.h>
+#include <linux/usb/of.h>
#include "musb_core.h"
#include "musb_trace.h"
@@ -130,6 +131,24 @@ static inline struct musb *dev_to_musb(struct device *dev)
return dev_get_drvdata(dev);
}
+enum musb_mode musb_get_mode(struct device *dev)
+{
+ enum usb_dr_mode mode;
+
+ mode = usb_get_dr_mode(dev);
+ switch (mode) {
+ case USB_DR_MODE_HOST:
+ return MUSB_HOST;
+ case USB_DR_MODE_PERIPHERAL:
+ return MUSB_PERIPHERAL;
+ case USB_DR_MODE_OTG:
+ case USB_DR_MODE_UNKNOWN:
+ default:
+ return MUSB_OTG;
+ }
+}
+EXPORT_SYMBOL_GPL(musb_get_mode);
+
/*-------------------------------------------------------------------------*/
#ifndef CONFIG_BLACKFIN
@@ -569,10 +588,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
if (devctl & MUSB_DEVCTL_HM) {
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_SUSPEND:
- /* remote wakeup? later, GetPortStatus
- * will stop RESUME signaling
- */
-
+ /* remote wakeup? */
musb->port1_status |=
(USB_PORT_STAT_C_SUSPEND << 16)
| MUSB_PORT_STAT_RESUME;
@@ -2414,8 +2430,9 @@ fail2:
musb_platform_exit(musb);
fail1:
- dev_err(musb->controller,
- "musb_init_controller failed with status %d\n", status);
+ if (status != -EPROBE_DEFER)
+ dev_err(musb->controller,
+ "%s failed with status %d\n", __func__, status);
musb_free(musb);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 91817d77d59c..a611e2f67bdc 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -626,4 +626,10 @@ static inline void musb_platform_post_root_reset_end(struct musb *musb)
musb->ops->post_root_reset_end(musb);
}
+/*
+ * gets the "dr_mode" property from DT and converts it into musb_mode
+ * if the property is not found or not recognized returns MUSB_OTG
+ */
+extern enum musb_mode musb_get_mode(struct device *dev);
+
#endif /* __MUSB_CORE_H__ */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index a55173c9e564..1acc4864f9f6 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -974,8 +974,8 @@ static int musb_gadget_enable(struct usb_ep *ep,
goto fail;
/* REVISIT this rules out high bandwidth periodic transfers */
- tmp = usb_endpoint_maxp(desc);
- if (tmp & ~0x07ff) {
+ tmp = usb_endpoint_maxp_mult(desc) - 1;
+ if (tmp) {
int ok;
if (usb_endpoint_dir_in(desc))
@@ -987,12 +987,12 @@ static int musb_gadget_enable(struct usb_ep *ep,
musb_dbg(musb, "no support for high bandwidth ISO");
goto fail;
}
- musb_ep->hb_mult = (tmp >> 11) & 3;
+ musb_ep->hb_mult = tmp;
} else {
musb_ep->hb_mult = 0;
}
- musb_ep->packet_sz = tmp & 0x7ff;
+ musb_ep->packet_sz = usb_endpoint_maxp(desc);
tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
/* enable the interrupts for the endpoint, set the endpoint
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 53bc4ceefe89..f6cdbad00dac 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2237,7 +2237,7 @@ static int musb_urb_enqueue(
* Some musb cores don't support high bandwidth ISO transfers; and
* we don't (yet!) support high bandwidth interrupt transfers.
*/
- qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
+ qh->hb_mult = usb_endpoint_maxp_mult(epd);
if (qh->hb_mult > 1) {
int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 61b5f1c3c5bc..0b4595439d51 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -132,7 +132,6 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
musb_dbg(musb, "Root port resuming, power %02x", power);
- /* later, GetPortStatus will stop RESUME signaling */
musb->port1_status |= MUSB_PORT_STAT_RESUME;
schedule_delayed_work(&musb->finish_resume_work,
msecs_to_jiffies(USB_RESUME_TIMEOUT));
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index e8be8e39ab8f..8b73214a9ea3 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -277,12 +277,12 @@ static int omap2430_musb_init(struct musb *musb)
if (status == -ENXIO)
return status;
- pr_err("HS USB OTG: no transceiver configured\n");
+ dev_dbg(dev, "HS USB OTG: no transceiver configured\n");
return -EPROBE_DEFER;
}
if (IS_ERR(musb->phy)) {
- pr_err("HS USB OTG: no PHY configured\n");
+ dev_err(dev, "HS USB OTG: no PHY configured\n");
return PTR_ERR(musb->phy);
}
musb->isr = omap2430_musb_interrupt;
@@ -301,7 +301,7 @@ static int omap2430_musb_init(struct musb *musb)
musb_writel(musb->mregs, OTG_INTERFSEL, l);
- pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
+ dev_dbg(dev, "HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
"sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n",
musb_readl(musb->mregs, OTG_REVISION),
musb_readl(musb->mregs, OTG_SYSCONFIG),
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index 1408245be18e..d0be0eadd0d9 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -186,16 +186,6 @@ static irqreturn_t sunxi_musb_interrupt(int irq, void *__hci)
if (musb->int_usb)
writeb(musb->int_usb, musb->mregs + SUNXI_MUSB_INTRUSB);
- /*
- * sunxi musb often signals babble on low / full speed device
- * disconnect, without ever raising MUSB_INTR_DISCONNECT, since
- * normally babble never happens treat it as disconnect.
- */
- if ((musb->int_usb & MUSB_INTR_BABBLE) && is_host_active(musb)) {
- musb->int_usb &= ~MUSB_INTR_BABBLE;
- musb->int_usb |= MUSB_INTR_DISCONNECT;
- }
-
if ((musb->int_usb & MUSB_INTR_RESET) && !is_host_active(musb)) {
/* ep0 FADDR must be 0 when (re)entering peripheral mode */
musb_ep_select(musb->mregs, 0);
@@ -390,6 +380,20 @@ static int sunxi_musb_set_mode(struct musb *musb, u8 mode)
return 0;
}
+static int sunxi_musb_recover(struct musb *musb)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ /*
+ * Schedule a phy_set_mode with the current glue->phy_mode value,
+ * this will force end the current session.
+ */
+ set_bit(SUNXI_MUSB_FL_PHY_MODE_PEND, &glue->flags);
+ schedule_work(&glue->work);
+
+ return 0;
+}
+
/*
* sunxi musb register layout
* 0x00 - 0x17 fifo regs, 1 long per fifo
@@ -618,6 +622,7 @@ static const struct musb_platform_ops sunxi_musb_ops = {
.dma_init = sunxi_musb_dma_controller_create,
.dma_exit = sunxi_musb_dma_controller_destroy,
.set_mode = sunxi_musb_set_mode,
+ .recover = sunxi_musb_recover,
.set_vbus = sunxi_musb_set_vbus,
.pre_root_reset_end = sunxi_musb_pre_root_reset_end,
.post_root_reset_end = sunxi_musb_post_root_reset_end,
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index b9c409a18faa..61cef7511a50 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -84,6 +84,7 @@ config SAMSUNG_USBPHY
config TWL6030_USB
tristate "TWL6030 USB Transceiver Driver"
depends on TWL4030_CORE && OMAP_USB2 && USB_MUSB_OMAP2PLUS
+ depends on OF
help
Enable this to support the USB OTG transceiver on TWL6030
family chips. This TWL6030 transceiver has the VBUS and ID GND
diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
index 42a1afe36a90..5f5f19813fde 100644
--- a/drivers/usb/phy/phy-am335x-control.c
+++ b/drivers/usb/phy/phy-am335x-control.c
@@ -134,10 +134,12 @@ struct phy_control *am335x_get_phy_control(struct device *dev)
return NULL;
dev = bus_find_device(&platform_bus_type, NULL, node, match);
+ of_node_put(node);
if (!dev)
return NULL;
ctrl_usb = dev_get_drvdata(dev);
+ put_device(dev);
if (!ctrl_usb)
return NULL;
return &ctrl_usb->phy_ctrl;
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 8311ba2968cd..89d6e7a5fdb7 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -59,6 +59,15 @@ EXPORT_SYMBOL_GPL(usb_phy_generic_unregister);
static int nop_set_suspend(struct usb_phy *x, int suspend)
{
+ struct usb_phy_generic *nop = dev_get_drvdata(x->dev);
+
+ if (!IS_ERR(nop->clk)) {
+ if (suspend)
+ clk_disable_unprepare(nop->clk);
+ else
+ clk_prepare_enable(nop->clk);
+ }
+
return 0;
}
diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
index 8d111ec653e4..042c5a8fd423 100644
--- a/drivers/usb/phy/phy-isp1301-omap.c
+++ b/drivers/usb/phy/phy-isp1301-omap.c
@@ -94,7 +94,7 @@ struct isp1301 {
#if defined(CONFIG_MACH_OMAP_H2) || defined(CONFIG_MACH_OMAP_H3)
-#if defined(CONFIG_TPS65010) || (defined(CONFIG_TPS65010_MODULE) && defined(MODULE))
+#if IS_REACHABLE(CONFIG_TPS65010)
#include <linux/i2c/tps65010.h>
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index a72e8d670adc..628b600b02b1 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -108,7 +108,6 @@ struct twl6030_usb {
enum musb_vbus_id_status linkstat;
u8 asleep;
bool vbus_enable;
- const char *regulator;
};
#define comparator_to_twl(x) container_of((x), struct twl6030_usb, comparator)
@@ -166,7 +165,7 @@ static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
/* Program MISC2 register and set bit VUSB_IN_VBAT */
twl6030_writeb(twl, TWL6030_MODULE_ID0, 0x10, TWL6030_MISC2);
- twl->usb3v3 = regulator_get(twl->dev, twl->regulator);
+ twl->usb3v3 = regulator_get(twl->dev, "usb");
if (IS_ERR(twl->usb3v3))
return -ENODEV;
@@ -341,7 +340,11 @@ static int twl6030_usb_probe(struct platform_device *pdev)
int status, err;
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- struct twl4030_usb_data *pdata = dev_get_platdata(dev);
+
+ if (!np) {
+ dev_err(dev, "no DT info\n");
+ return -EINVAL;
+ }
twl = devm_kzalloc(dev, sizeof(*twl), GFP_KERNEL);
if (!twl)
@@ -361,18 +364,6 @@ static int twl6030_usb_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- if (np) {
- twl->regulator = "usb";
- } else if (pdata) {
- if (pdata->features & TWL6032_SUBCLASS)
- twl->regulator = "ldousb";
- else
- twl->regulator = "vusb";
- } else {
- dev_err(&pdev->dev, "twl6030 initialized without pdata\n");
- return -EINVAL;
- }
-
/* init spinlock for workqueue */
spin_lock_init(&twl->lock);
@@ -436,13 +427,11 @@ static int twl6030_usb_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
static const struct of_device_id twl6030_usb_id_table[] = {
{ .compatible = "ti,twl6030-usb" },
{}
};
MODULE_DEVICE_TABLE(of, twl6030_usb_id_table);
-#endif
static struct platform_driver twl6030_usb_driver = {
.probe = twl6030_usb_probe,
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 857e78337324..d1af831f43eb 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -100,10 +100,7 @@ static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
{
- if (list_empty(&pipe->list))
- return NULL;
-
- return list_first_entry(&pipe->list, struct usbhs_pkt, node);
+ return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node);
}
static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 56ecb8b5115d..d9bc8dafe000 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -255,6 +255,16 @@ config USB_SERIAL_F81232
To compile this driver as a module, choose M here: the
module will be called f81232.
+config USB_SERIAL_F8153X
+ tristate "USB Fintek F81532/534 Multi-Ports Serial Driver"
+ help
+ Say Y here if you want to use the Fintek F81532/534 Multi-Ports
+ USB to serial adapter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called f81534.
+
+
config USB_SERIAL_GARMIN
tristate "USB Garmin GPS driver"
help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 349d9df0895f..9e43b7b002eb 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_USB_SERIAL_EDGEPORT) += io_edgeport.o
obj-$(CONFIG_USB_SERIAL_EDGEPORT_TI) += io_ti.o
obj-$(CONFIG_USB_SERIAL_EMPEG) += empeg.o
obj-$(CONFIG_USB_SERIAL_F81232) += f81232.o
+obj-$(CONFIG_USB_SERIAL_F8153X) += f81534.o
obj-$(CONFIG_USB_SERIAL_FTDI_SIO) += ftdi_sio.o
obj-$(CONFIG_USB_SERIAL_GARMIN) += garmin_gps.o
obj-$(CONFIG_USB_SERIAL_IPAQ) += ipaq.o
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index f139488d0816..2597b83a8ae2 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -61,13 +61,26 @@
* the Net/FreeBSD uchcom.c driver by Takanori Watanabe. Domo arigato.
*/
+#define CH341_REQ_READ_VERSION 0x5F
#define CH341_REQ_WRITE_REG 0x9A
#define CH341_REQ_READ_REG 0x95
-#define CH341_REG_BREAK1 0x05
-#define CH341_REG_BREAK2 0x18
-#define CH341_NBREAK_BITS_REG1 0x01
-#define CH341_NBREAK_BITS_REG2 0x40
-
+#define CH341_REQ_SERIAL_INIT 0xA1
+#define CH341_REQ_MODEM_CTRL 0xA4
+
+#define CH341_REG_BREAK 0x05
+#define CH341_REG_LCR 0x18
+#define CH341_NBREAK_BITS 0x01
+
+#define CH341_LCR_ENABLE_RX 0x80
+#define CH341_LCR_ENABLE_TX 0x40
+#define CH341_LCR_MARK_SPACE 0x20
+#define CH341_LCR_PAR_EVEN 0x10
+#define CH341_LCR_ENABLE_PAR 0x08
+#define CH341_LCR_STOP_BITS_2 0x04
+#define CH341_LCR_CS8 0x03
+#define CH341_LCR_CS7 0x02
+#define CH341_LCR_CS6 0x01
+#define CH341_LCR_CS5 0x00
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x4348, 0x5523) },
@@ -119,10 +132,10 @@ static int ch341_control_in(struct usb_device *dev,
return r;
}
-static int ch341_set_baudrate(struct usb_device *dev,
- struct ch341_private *priv)
+static int ch341_init_set_baudrate(struct usb_device *dev,
+ struct ch341_private *priv, unsigned ctrl)
{
- short a, b;
+ short a;
int r;
unsigned long factor;
short divisor;
@@ -142,18 +155,17 @@ static int ch341_set_baudrate(struct usb_device *dev,
factor = 0x10000 - factor;
a = (factor & 0xff00) | divisor;
- b = factor & 0xff;
- r = ch341_control_out(dev, 0x9a, 0x1312, a);
- if (!r)
- r = ch341_control_out(dev, 0x9a, 0x0f2c, b);
+ /* 0x9c is "enable SFR_UART Control register and timer" */
+ r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT,
+ 0x9c | (ctrl << 8), a | 0x80);
return r;
}
static int ch341_set_handshake(struct usb_device *dev, u8 control)
{
- return ch341_control_out(dev, 0xa4, ~control, 0);
+ return ch341_control_out(dev, CH341_REQ_MODEM_CTRL, ~control, 0);
}
static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
@@ -167,7 +179,7 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
if (!buffer)
return -ENOMEM;
- r = ch341_control_in(dev, 0x95, 0x0706, 0, buffer, size);
+ r = ch341_control_in(dev, CH341_REQ_READ_REG, 0x0706, 0, buffer, size);
if (r < 0)
goto out;
@@ -197,24 +209,21 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
return -ENOMEM;
/* expect two bytes 0x27 0x00 */
- r = ch341_control_in(dev, 0x5f, 0, 0, buffer, size);
+ r = ch341_control_in(dev, CH341_REQ_READ_VERSION, 0, 0, buffer, size);
if (r < 0)
goto out;
+ dev_dbg(&dev->dev, "Chip version: 0x%02x\n", buffer[0]);
- r = ch341_control_out(dev, 0xa1, 0, 0);
- if (r < 0)
- goto out;
-
- r = ch341_set_baudrate(dev, priv);
+ r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 0, 0);
if (r < 0)
goto out;
/* expect two bytes 0x56 0x00 */
- r = ch341_control_in(dev, 0x95, 0x2518, 0, buffer, size);
+ r = ch341_control_in(dev, CH341_REQ_READ_REG, 0x2518, 0, buffer, size);
if (r < 0)
goto out;
- r = ch341_control_out(dev, 0x9a, 0x2518, 0x0050);
+ r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x2518, 0x0050);
if (r < 0)
goto out;
@@ -223,11 +232,7 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
if (r < 0)
goto out;
- r = ch341_control_out(dev, 0xa1, 0x501f, 0xd90a);
- if (r < 0)
- goto out;
-
- r = ch341_set_baudrate(dev, priv);
+ r = ch341_init_set_baudrate(dev, priv, 0);
if (r < 0)
goto out;
@@ -342,16 +347,53 @@ static void ch341_set_termios(struct tty_struct *tty,
struct ch341_private *priv = usb_get_serial_port_data(port);
unsigned baud_rate;
unsigned long flags;
+ unsigned char ctrl;
+ int r;
+
+ /* redundant changes may cause the chip to lose bytes */
+ if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
+ return;
baud_rate = tty_get_baud_rate(tty);
priv->baud_rate = baud_rate;
+ ctrl = CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX;
+
+ switch (C_CSIZE(tty)) {
+ case CS5:
+ ctrl |= CH341_LCR_CS5;
+ break;
+ case CS6:
+ ctrl |= CH341_LCR_CS6;
+ break;
+ case CS7:
+ ctrl |= CH341_LCR_CS7;
+ break;
+ case CS8:
+ ctrl |= CH341_LCR_CS8;
+ break;
+ }
+
+ if (C_PARENB(tty)) {
+ ctrl |= CH341_LCR_ENABLE_PAR;
+ if (C_PARODD(tty) == 0)
+ ctrl |= CH341_LCR_PAR_EVEN;
+ if (C_CMSPAR(tty))
+ ctrl |= CH341_LCR_MARK_SPACE;
+ }
+
+ if (C_CSTOPB(tty))
+ ctrl |= CH341_LCR_STOP_BITS_2;
if (baud_rate) {
spin_lock_irqsave(&priv->lock, flags);
priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
spin_unlock_irqrestore(&priv->lock, flags);
- ch341_set_baudrate(port->serial->dev, priv);
+ r = ch341_init_set_baudrate(port->serial->dev, priv, ctrl);
+ if (r < 0 && old_termios) {
+ priv->baud_rate = tty_termios_baud_rate(old_termios);
+ tty_termios_copy_hw(&tty->termios, old_termios);
+ }
} else {
spin_lock_irqsave(&priv->lock, flags);
priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
@@ -360,17 +402,12 @@ static void ch341_set_termios(struct tty_struct *tty,
ch341_set_handshake(port->serial->dev, priv->line_control);
- /* Unimplemented:
- * (cflag & CSIZE) : data bits [5, 8]
- * (cflag & PARENB) : parity {NONE, EVEN, ODD}
- * (cflag & CSTOPB) : stop bits [1, 2]
- */
}
static void ch341_break_ctl(struct tty_struct *tty, int break_state)
{
const uint16_t ch341_break_reg =
- ((uint16_t) CH341_REG_BREAK2 << 8) | CH341_REG_BREAK1;
+ ((uint16_t) CH341_REG_LCR << 8) | CH341_REG_BREAK;
struct usb_serial_port *port = tty->driver_data;
int r;
uint16_t reg_contents;
@@ -391,12 +428,12 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state)
__func__, break_reg[0], break_reg[1]);
if (break_state != 0) {
dev_dbg(&port->dev, "%s - Enter break state requested\n", __func__);
- break_reg[0] &= ~CH341_NBREAK_BITS_REG1;
- break_reg[1] &= ~CH341_NBREAK_BITS_REG2;
+ break_reg[0] &= ~CH341_NBREAK_BITS;
+ break_reg[1] &= ~CH341_LCR_ENABLE_TX;
} else {
dev_dbg(&port->dev, "%s - Leave break state requested\n", __func__);
- break_reg[0] |= CH341_NBREAK_BITS_REG1;
- break_reg[1] |= CH341_NBREAK_BITS_REG2;
+ break_reg[0] |= CH341_NBREAK_BITS;
+ break_reg[1] |= CH341_LCR_ENABLE_TX;
}
dev_dbg(&port->dev, "%s - New ch341 break register contents - reg1: %x, reg2: %x\n",
__func__, break_reg[0], break_reg[1]);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 243ac5ebe46a..fff718352e0c 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -23,6 +23,9 @@
#include <linux/usb.h>
#include <linux/uaccess.h>
#include <linux/usb/serial.h>
+#include <linux/gpio/driver.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
#define DRIVER_DESC "Silicon Labs CP210x RS232 serial adaptor driver"
@@ -33,7 +36,7 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *);
static void cp210x_close(struct usb_serial_port *);
static void cp210x_get_termios(struct tty_struct *, struct usb_serial_port *);
static void cp210x_get_termios_port(struct usb_serial_port *port,
- unsigned int *cflagp, unsigned int *baudp);
+ tcflag_t *cflagp, unsigned int *baudp);
static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *,
struct ktermios *);
static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *,
@@ -44,6 +47,9 @@ static int cp210x_tiocmset(struct tty_struct *, unsigned int, unsigned int);
static int cp210x_tiocmset_port(struct usb_serial_port *port,
unsigned int, unsigned int);
static void cp210x_break_ctl(struct tty_struct *, int);
+static int cp210x_attach(struct usb_serial *);
+static void cp210x_disconnect(struct usb_serial *);
+static void cp210x_release(struct usb_serial *);
static int cp210x_port_probe(struct usb_serial_port *);
static int cp210x_port_remove(struct usb_serial_port *);
static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
@@ -209,6 +215,16 @@ static const struct usb_device_id id_table[] = {
MODULE_DEVICE_TABLE(usb, id_table);
+struct cp210x_serial_private {
+#ifdef CONFIG_GPIOLIB
+ struct gpio_chip gc;
+ u8 config;
+ u8 gpio_mode;
+ bool gpio_registered;
+#endif
+ u8 partnum;
+};
+
struct cp210x_port_private {
__u8 bInterfaceNumber;
bool has_swapped_line_ctl;
@@ -230,6 +246,9 @@ static struct usb_serial_driver cp210x_device = {
.tx_empty = cp210x_tx_empty,
.tiocmget = cp210x_tiocmget,
.tiocmset = cp210x_tiocmset,
+ .attach = cp210x_attach,
+ .disconnect = cp210x_disconnect,
+ .release = cp210x_release,
.port_probe = cp210x_port_probe,
.port_remove = cp210x_port_remove,
.dtr_rts = cp210x_dtr_rts
@@ -272,6 +291,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
#define CP210X_SET_CHARS 0x19
#define CP210X_GET_BAUDRATE 0x1D
#define CP210X_SET_BAUDRATE 0x1E
+#define CP210X_VENDOR_SPECIFIC 0xFF
/* CP210X_IFC_ENABLE */
#define UART_ENABLE 0x0001
@@ -314,6 +334,21 @@ static struct usb_serial_driver * const serial_drivers[] = {
#define CONTROL_WRITE_DTR 0x0100
#define CONTROL_WRITE_RTS 0x0200
+/* CP210X_VENDOR_SPECIFIC values */
+#define CP210X_READ_LATCH 0x00C2
+#define CP210X_GET_PARTNUM 0x370B
+#define CP210X_GET_PORTCONFIG 0x370C
+#define CP210X_GET_DEVICEMODE 0x3711
+#define CP210X_WRITE_LATCH 0x37E1
+
+/* Part number definitions */
+#define CP210X_PARTNUM_CP2101 0x01
+#define CP210X_PARTNUM_CP2102 0x02
+#define CP210X_PARTNUM_CP2103 0x03
+#define CP210X_PARTNUM_CP2104 0x04
+#define CP210X_PARTNUM_CP2105 0x05
+#define CP210X_PARTNUM_CP2108 0x08
+
/* CP210X_GET_COMM_STATUS returns these 0x13 bytes */
struct cp210x_comm_status {
__le32 ulErrors;
@@ -369,6 +404,60 @@ struct cp210x_flow_ctl {
#define CP210X_SERIAL_RTS_ACTIVE 1
#define CP210X_SERIAL_RTS_FLOW_CTL 2
+/* CP210X_VENDOR_SPECIFIC, CP210X_GET_DEVICEMODE call reads these 0x2 bytes. */
+struct cp210x_pin_mode {
+ u8 eci;
+ u8 sci;
+} __packed;
+
+#define CP210X_PIN_MODE_MODEM 0
+#define CP210X_PIN_MODE_GPIO BIT(0)
+
+/*
+ * CP210X_VENDOR_SPECIFIC, CP210X_GET_PORTCONFIG call reads these 0xf bytes.
+ * Structure needs padding due to unused/unspecified bytes.
+ */
+struct cp210x_config {
+ __le16 gpio_mode;
+ u8 __pad0[2];
+ __le16 reset_state;
+ u8 __pad1[4];
+ __le16 suspend_state;
+ u8 sci_cfg;
+ u8 eci_cfg;
+ u8 device_cfg;
+} __packed;
+
+/* GPIO modes */
+#define CP210X_SCI_GPIO_MODE_OFFSET 9
+#define CP210X_SCI_GPIO_MODE_MASK GENMASK(11, 9)
+
+#define CP210X_ECI_GPIO_MODE_OFFSET 2
+#define CP210X_ECI_GPIO_MODE_MASK GENMASK(3, 2)
+
+/* CP2105 port configuration values */
+#define CP2105_GPIO0_TXLED_MODE BIT(0)
+#define CP2105_GPIO1_RXLED_MODE BIT(1)
+#define CP2105_GPIO1_RS485_MODE BIT(2)
+
+/* CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x2 bytes. */
+struct cp210x_gpio_write {
+ u8 mask;
+ u8 state;
+} __packed;
+
+/*
+ * Helper to get interface number when we only have struct usb_serial.
+ */
+static u8 cp210x_interface_num(struct usb_serial *serial)
+{
+ struct usb_host_interface *cur_altsetting;
+
+ cur_altsetting = serial->interface->cur_altsetting;
+
+ return cur_altsetting->desc.bInterfaceNumber;
+}
+
/*
* Reads a variable-sized block of CP210X_ registers, identified by req.
* Returns data into buf in native USB byte order.
@@ -402,7 +491,7 @@ static int cp210x_read_reg_block(struct usb_serial_port *port, u8 req,
dev_err(&port->dev, "failed get req 0x%x size %d status: %d\n",
req, bufsize, result);
if (result >= 0)
- result = -EPROTO;
+ result = -EIO;
/*
* FIXME Some callers don't bother to check for error,
@@ -465,6 +554,40 @@ static int cp210x_read_u8_reg(struct usb_serial_port *port, u8 req, u8 *val)
}
/*
+ * Reads a variable-sized vendor block of CP210X_ registers, identified by val.
+ * Returns data into buf in native USB byte order.
+ */
+static int cp210x_read_vendor_block(struct usb_serial *serial, u8 type, u16 val,
+ void *buf, int bufsize)
+{
+ void *dmabuf;
+ int result;
+
+ dmabuf = kmalloc(bufsize, GFP_KERNEL);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ CP210X_VENDOR_SPECIFIC, type, val,
+ cp210x_interface_num(serial), dmabuf, bufsize,
+ USB_CTRL_GET_TIMEOUT);
+ if (result == bufsize) {
+ memcpy(buf, dmabuf, bufsize);
+ result = 0;
+ } else {
+ dev_err(&serial->interface->dev,
+ "failed to get vendor val 0x%04x size %d: %d\n", val,
+ bufsize, result);
+ if (result >= 0)
+ result = -EIO;
+ }
+
+ kfree(dmabuf);
+
+ return result;
+}
+
+/*
* Writes any 16-bit CP210X_ register (req) whose value is passed
* entirely in the wValue field of the USB request.
*/
@@ -515,7 +638,7 @@ static int cp210x_write_reg_block(struct usb_serial_port *port, u8 req,
dev_err(&port->dev, "failed set req 0x%x size %d status: %d\n",
req, bufsize, result);
if (result >= 0)
- result = -EPROTO;
+ result = -EIO;
}
return result;
@@ -533,6 +656,42 @@ static int cp210x_write_u32_reg(struct usb_serial_port *port, u8 req, u32 val)
return cp210x_write_reg_block(port, req, &le32_val, sizeof(le32_val));
}
+#ifdef CONFIG_GPIOLIB
+/*
+ * Writes a variable-sized vendor block of CP210X_ registers, identified by val.
+ * Data in buf must be in native USB byte order.
+ */
+static int cp210x_write_vendor_block(struct usb_serial *serial, u8 type,
+ u16 val, void *buf, int bufsize)
+{
+ void *dmabuf;
+ int result;
+
+ dmabuf = kmemdup(buf, bufsize, GFP_KERNEL);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
+ CP210X_VENDOR_SPECIFIC, type, val,
+ cp210x_interface_num(serial), dmabuf, bufsize,
+ USB_CTRL_SET_TIMEOUT);
+
+ kfree(dmabuf);
+
+ if (result == bufsize) {
+ result = 0;
+ } else {
+ dev_err(&serial->interface->dev,
+ "failed to set vendor val 0x%04x size %d: %d\n", val,
+ bufsize, result);
+ if (result >= 0)
+ result = -EIO;
+ }
+
+ return result;
+}
+#endif
+
/*
* Detect CP2108 GET_LINE_CTL bug and activate workaround.
* Write a known good value 0x800, read it back.
@@ -683,7 +842,7 @@ static int cp210x_get_tx_queue_byte_count(struct usb_serial_port *port,
} else {
dev_err(&port->dev, "failed to get comm status: %d\n", result);
if (result >= 0)
- result = -EPROTO;
+ result = -EIO;
}
kfree(sts);
@@ -719,7 +878,7 @@ static void cp210x_get_termios(struct tty_struct *tty,
&tty->termios.c_cflag, &baud);
tty_encode_baud_rate(tty, baud, baud);
} else {
- unsigned int cflag;
+ tcflag_t cflag;
cflag = 0;
cp210x_get_termios_port(port, &cflag, &baud);
}
@@ -730,10 +889,10 @@ static void cp210x_get_termios(struct tty_struct *tty,
* This is the heart of cp210x_get_termios which always uses a &usb_serial_port.
*/
static void cp210x_get_termios_port(struct usb_serial_port *port,
- unsigned int *cflagp, unsigned int *baudp)
+ tcflag_t *cflagp, unsigned int *baudp)
{
struct device *dev = &port->dev;
- unsigned int cflag;
+ tcflag_t cflag;
struct cp210x_flow_ctl flow_ctl;
u32 baud;
u16 bits;
@@ -930,16 +1089,9 @@ static void cp210x_set_termios(struct tty_struct *tty,
dev_dbg(dev, "%s - data bits = 7\n", __func__);
break;
case CS8:
- bits |= BITS_DATA_8;
- dev_dbg(dev, "%s - data bits = 8\n", __func__);
- break;
- /*case CS9:
- bits |= BITS_DATA_9;
- dev_dbg(dev, "%s - data bits = 9\n", __func__);
- break;*/
default:
- dev_dbg(dev, "cp210x driver does not support the number of bits requested, using 8 bit mode\n");
bits |= BITS_DATA_8;
+ dev_dbg(dev, "%s - data bits = 8\n", __func__);
break;
}
if (cp210x_write_u16_reg(port, CP210X_SET_LINE_CTL, bits))
@@ -1108,10 +1260,188 @@ static void cp210x_break_ctl(struct tty_struct *tty, int break_state)
cp210x_write_u16_reg(port, CP210X_SET_BREAK, state);
}
+#ifdef CONFIG_GPIOLIB
+static int cp210x_gpio_request(struct gpio_chip *gc, unsigned int offset)
+{
+ struct usb_serial *serial = gpiochip_get_data(gc);
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+
+ switch (offset) {
+ case 0:
+ if (priv->config & CP2105_GPIO0_TXLED_MODE)
+ return -ENODEV;
+ break;
+ case 1:
+ if (priv->config & (CP2105_GPIO1_RXLED_MODE |
+ CP2105_GPIO1_RS485_MODE))
+ return -ENODEV;
+ break;
+ }
+
+ return 0;
+}
+
+static int cp210x_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct usb_serial *serial = gpiochip_get_data(gc);
+ int result;
+ u8 buf;
+
+ result = cp210x_read_vendor_block(serial, REQTYPE_INTERFACE_TO_HOST,
+ CP210X_READ_LATCH, &buf, sizeof(buf));
+ if (result < 0)
+ return result;
+
+ return !!(buf & BIT(gpio));
+}
+
+static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+{
+ struct usb_serial *serial = gpiochip_get_data(gc);
+ struct cp210x_gpio_write buf;
+
+ if (value == 1)
+ buf.state = BIT(gpio);
+ else
+ buf.state = 0;
+
+ buf.mask = BIT(gpio);
+
+ cp210x_write_vendor_block(serial, REQTYPE_HOST_TO_INTERFACE,
+ CP210X_WRITE_LATCH, &buf, sizeof(buf));
+}
+
+static int cp210x_gpio_direction_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ /* Hardware does not support an input mode */
+ return 0;
+}
+
+static int cp210x_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio)
+{
+ /* Hardware does not support an input mode */
+ return -ENOTSUPP;
+}
+
+static int cp210x_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio,
+ int value)
+{
+ return 0;
+}
+
+static int cp210x_gpio_set_single_ended(struct gpio_chip *gc, unsigned int gpio,
+ enum single_ended_mode mode)
+{
+ struct usb_serial *serial = gpiochip_get_data(gc);
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+
+ /* Succeed only if in correct mode (this can't be set at runtime) */
+ if ((mode == LINE_MODE_PUSH_PULL) && (priv->gpio_mode & BIT(gpio)))
+ return 0;
+
+ if ((mode == LINE_MODE_OPEN_DRAIN) && !(priv->gpio_mode & BIT(gpio)))
+ return 0;
+
+ return -ENOTSUPP;
+}
+
+/*
+ * This function is for configuring GPIO using shared pins, where other signals
+ * are made unavailable by configuring the use of GPIO. This is believed to be
+ * only applicable to the cp2105 at this point, the other devices supported by
+ * this driver that provide GPIO do so in a way that does not impact other
+ * signals and are thus expected to have very different initialisation.
+ */
+static int cp2105_shared_gpio_init(struct usb_serial *serial)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ struct cp210x_pin_mode mode;
+ struct cp210x_config config;
+ u8 intf_num = cp210x_interface_num(serial);
+ int result;
+
+ result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
+ CP210X_GET_DEVICEMODE, &mode,
+ sizeof(mode));
+ if (result < 0)
+ return result;
+
+ result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
+ CP210X_GET_PORTCONFIG, &config,
+ sizeof(config));
+ if (result < 0)
+ return result;
+
+ /* 2 banks of GPIO - One for the pins taken from each serial port */
+ if (intf_num == 0) {
+ if (mode.eci == CP210X_PIN_MODE_MODEM)
+ return 0;
+
+ priv->config = config.eci_cfg;
+ priv->gpio_mode = (u8)((le16_to_cpu(config.gpio_mode) &
+ CP210X_ECI_GPIO_MODE_MASK) >>
+ CP210X_ECI_GPIO_MODE_OFFSET);
+ priv->gc.ngpio = 2;
+ } else if (intf_num == 1) {
+ if (mode.sci == CP210X_PIN_MODE_MODEM)
+ return 0;
+
+ priv->config = config.sci_cfg;
+ priv->gpio_mode = (u8)((le16_to_cpu(config.gpio_mode) &
+ CP210X_SCI_GPIO_MODE_MASK) >>
+ CP210X_SCI_GPIO_MODE_OFFSET);
+ priv->gc.ngpio = 3;
+ } else {
+ return -ENODEV;
+ }
+
+ priv->gc.label = "cp210x";
+ priv->gc.request = cp210x_gpio_request;
+ priv->gc.get_direction = cp210x_gpio_direction_get;
+ priv->gc.direction_input = cp210x_gpio_direction_input;
+ priv->gc.direction_output = cp210x_gpio_direction_output;
+ priv->gc.get = cp210x_gpio_get;
+ priv->gc.set = cp210x_gpio_set;
+ priv->gc.set_single_ended = cp210x_gpio_set_single_ended;
+ priv->gc.owner = THIS_MODULE;
+ priv->gc.parent = &serial->interface->dev;
+ priv->gc.base = -1;
+ priv->gc.can_sleep = true;
+
+ result = gpiochip_add_data(&priv->gc, serial);
+ if (!result)
+ priv->gpio_registered = true;
+
+ return result;
+}
+
+static void cp210x_gpio_remove(struct usb_serial *serial)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+
+ if (priv->gpio_registered) {
+ gpiochip_remove(&priv->gc);
+ priv->gpio_registered = false;
+ }
+}
+
+#else
+
+static int cp2105_shared_gpio_init(struct usb_serial *serial)
+{
+ return 0;
+}
+
+static void cp210x_gpio_remove(struct usb_serial *serial)
+{
+ /* Nothing to do */
+}
+
+#endif
+
static int cp210x_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
- struct usb_host_interface *cur_altsetting;
struct cp210x_port_private *port_priv;
int ret;
@@ -1119,8 +1449,7 @@ static int cp210x_port_probe(struct usb_serial_port *port)
if (!port_priv)
return -ENOMEM;
- cur_altsetting = serial->interface->cur_altsetting;
- port_priv->bInterfaceNumber = cur_altsetting->desc.bInterfaceNumber;
+ port_priv->bInterfaceNumber = cp210x_interface_num(serial);
usb_set_serial_port_data(port, port_priv);
@@ -1143,6 +1472,52 @@ static int cp210x_port_remove(struct usb_serial_port *port)
return 0;
}
+static int cp210x_attach(struct usb_serial *serial)
+{
+ int result;
+ struct cp210x_serial_private *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
+ CP210X_GET_PARTNUM, &priv->partnum,
+ sizeof(priv->partnum));
+ if (result < 0)
+ goto err_free_priv;
+
+ usb_set_serial_data(serial, priv);
+
+ if (priv->partnum == CP210X_PARTNUM_CP2105) {
+ result = cp2105_shared_gpio_init(serial);
+ if (result < 0) {
+ dev_err(&serial->interface->dev,
+ "GPIO initialisation failed, continuing without GPIO support\n");
+ }
+ }
+
+ return 0;
+err_free_priv:
+ kfree(priv);
+
+ return result;
+}
+
+static void cp210x_disconnect(struct usb_serial *serial)
+{
+ cp210x_gpio_remove(serial);
+}
+
+static void cp210x_release(struct usb_serial *serial)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+
+ cp210x_gpio_remove(serial);
+
+ kfree(priv);
+}
+
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
new file mode 100644
index 000000000000..8282a6a18fee
--- /dev/null
+++ b/drivers/usb/serial/f81534.c
@@ -0,0 +1,1409 @@
+/*
+ * F81532/F81534 USB to Serial Ports Bridge
+ *
+ * F81532 => 2 Serial Ports
+ * F81534 => 4 Serial Ports
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Copyright (C) 2016 Feature Integration Technology Inc., (Fintek)
+ * Copyright (C) 2016 Tom Tsai (Tom_Tsai@fintek.com.tw)
+ * Copyright (C) 2016 Peter Hong (Peter_Hong@fintek.com.tw)
+ *
+ * The F81532/F81534 had 1 control endpoint for setting, 1 endpoint bulk-out
+ * for all serial port TX and 1 endpoint bulk-in for all serial port read in
+ * (Read Data/MSR/LSR).
+ *
+ * Write URB is fixed with 512bytes, per serial port used 128Bytes.
+ * It can be described by f81534_prepare_write_buffer()
+ *
+ * Read URB is 512Bytes max, per serial port used 128Bytes.
+ * It can be described by f81534_process_read_urb() and maybe received with
+ * 128x1,2,3,4 bytes.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+#include <linux/serial_reg.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+/* Serial Port register Address */
+#define F81534_UART_BASE_ADDRESS 0x1200
+#define F81534_UART_OFFSET 0x10
+#define F81534_DIVISOR_LSB_REG (0x00 + F81534_UART_BASE_ADDRESS)
+#define F81534_DIVISOR_MSB_REG (0x01 + F81534_UART_BASE_ADDRESS)
+#define F81534_FIFO_CONTROL_REG (0x02 + F81534_UART_BASE_ADDRESS)
+#define F81534_LINE_CONTROL_REG (0x03 + F81534_UART_BASE_ADDRESS)
+#define F81534_MODEM_CONTROL_REG (0x04 + F81534_UART_BASE_ADDRESS)
+#define F81534_MODEM_STATUS_REG (0x06 + F81534_UART_BASE_ADDRESS)
+#define F81534_CONFIG1_REG (0x09 + F81534_UART_BASE_ADDRESS)
+
+#define F81534_DEF_CONF_ADDRESS_START 0x3000
+#define F81534_DEF_CONF_SIZE 8
+
+#define F81534_CUSTOM_ADDRESS_START 0x2f00
+#define F81534_CUSTOM_DATA_SIZE 0x10
+#define F81534_CUSTOM_NO_CUSTOM_DATA 0xff
+#define F81534_CUSTOM_VALID_TOKEN 0xf0
+#define F81534_CONF_OFFSET 1
+
+#define F81534_MAX_DATA_BLOCK 64
+#define F81534_MAX_BUS_RETRY 20
+
+/* Default URB timeout for USB operations */
+#define F81534_USB_MAX_RETRY 10
+#define F81534_USB_TIMEOUT 1000
+#define F81534_SET_GET_REGISTER 0xA0
+
+#define F81534_NUM_PORT 4
+#define F81534_UNUSED_PORT 0xff
+#define F81534_WRITE_BUFFER_SIZE 512
+
+#define DRIVER_DESC "Fintek F81532/F81534"
+#define FINTEK_VENDOR_ID_1 0x1934
+#define FINTEK_VENDOR_ID_2 0x2C42
+#define FINTEK_DEVICE_ID 0x1202
+#define F81534_MAX_TX_SIZE 124
+#define F81534_MAX_RX_SIZE 124
+#define F81534_RECEIVE_BLOCK_SIZE 128
+#define F81534_MAX_RECEIVE_BLOCK_SIZE 512
+
+#define F81534_TOKEN_RECEIVE 0x01
+#define F81534_TOKEN_WRITE 0x02
+#define F81534_TOKEN_TX_EMPTY 0x03
+#define F81534_TOKEN_MSR_CHANGE 0x04
+
+/*
+ * We used interal SPI bus to access FLASH section. We must wait the SPI bus to
+ * idle if we performed any command.
+ *
+ * SPI Bus status register: F81534_BUS_REG_STATUS
+ * Bit 0/1 : BUSY
+ * Bit 2 : IDLE
+ */
+#define F81534_BUS_BUSY (BIT(0) | BIT(1))
+#define F81534_BUS_IDLE BIT(2)
+#define F81534_BUS_READ_DATA 0x1004
+#define F81534_BUS_REG_STATUS 0x1003
+#define F81534_BUS_REG_START 0x1002
+#define F81534_BUS_REG_END 0x1001
+
+#define F81534_CMD_READ 0x03
+
+#define F81534_DEFAULT_BAUD_RATE 9600
+#define F81534_MAX_BAUDRATE 115200
+
+#define F81534_PORT_CONF_DISABLE_PORT BIT(3)
+#define F81534_PORT_CONF_NOT_EXIST_PORT BIT(7)
+#define F81534_PORT_UNAVAILABLE \
+ (F81534_PORT_CONF_DISABLE_PORT | F81534_PORT_CONF_NOT_EXIST_PORT)
+
+#define F81534_1X_RXTRIGGER 0xc3
+#define F81534_8X_RXTRIGGER 0xcf
+
+static const struct usb_device_id f81534_id_table[] = {
+ { USB_DEVICE(FINTEK_VENDOR_ID_1, FINTEK_DEVICE_ID) },
+ { USB_DEVICE(FINTEK_VENDOR_ID_2, FINTEK_DEVICE_ID) },
+ {} /* Terminating entry */
+};
+
+#define F81534_TX_EMPTY_BIT 0
+
+struct f81534_serial_private {
+ u8 conf_data[F81534_DEF_CONF_SIZE];
+ int tty_idx[F81534_NUM_PORT];
+ u8 setting_idx;
+ int opened_port;
+ struct mutex urb_mutex;
+};
+
+struct f81534_port_private {
+ struct mutex mcr_mutex;
+ unsigned long tx_empty;
+ spinlock_t msr_lock;
+ u8 shadow_mcr;
+ u8 shadow_msr;
+ u8 phy_num;
+};
+
+static int f81534_logic_to_phy_port(struct usb_serial *serial,
+ struct usb_serial_port *port)
+{
+ struct f81534_serial_private *serial_priv =
+ usb_get_serial_data(port->serial);
+ int count = 0;
+ int i;
+
+ for (i = 0; i < F81534_NUM_PORT; ++i) {
+ if (serial_priv->conf_data[i] & F81534_PORT_UNAVAILABLE)
+ continue;
+
+ if (port->port_number == count)
+ return i;
+
+ ++count;
+ }
+
+ return -ENODEV;
+}
+
+static int f81534_set_register(struct usb_serial *serial, u16 reg, u8 data)
+{
+ struct usb_interface *interface = serial->interface;
+ struct usb_device *dev = serial->dev;
+ size_t count = F81534_USB_MAX_RETRY;
+ int status;
+ u8 *tmp;
+
+ tmp = kmalloc(sizeof(u8), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ *tmp = data;
+
+ /*
+ * Our device maybe not reply when heavily loading, We'll retry for
+ * F81534_USB_MAX_RETRY times.
+ */
+ while (count--) {
+ status = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ F81534_SET_GET_REGISTER,
+ USB_TYPE_VENDOR | USB_DIR_OUT,
+ reg, 0, tmp, sizeof(u8),
+ F81534_USB_TIMEOUT);
+ if (status > 0) {
+ status = 0;
+ break;
+ } else if (status == 0) {
+ status = -EIO;
+ }
+ }
+
+ if (status < 0) {
+ dev_err(&interface->dev, "%s: reg: %x data: %x failed: %d\n",
+ __func__, reg, data, status);
+ }
+
+ kfree(tmp);
+ return status;
+}
+
+static int f81534_get_register(struct usb_serial *serial, u16 reg, u8 *data)
+{
+ struct usb_interface *interface = serial->interface;
+ struct usb_device *dev = serial->dev;
+ size_t count = F81534_USB_MAX_RETRY;
+ int status;
+ u8 *tmp;
+
+ tmp = kmalloc(sizeof(u8), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ /*
+ * Our device maybe not reply when heavily loading, We'll retry for
+ * F81534_USB_MAX_RETRY times.
+ */
+ while (count--) {
+ status = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ F81534_SET_GET_REGISTER,
+ USB_TYPE_VENDOR | USB_DIR_IN,
+ reg, 0, tmp, sizeof(u8),
+ F81534_USB_TIMEOUT);
+ if (status > 0) {
+ status = 0;
+ break;
+ } else if (status == 0) {
+ status = -EIO;
+ }
+ }
+
+ if (status < 0) {
+ dev_err(&interface->dev, "%s: reg: %x failed: %d\n", __func__,
+ reg, status);
+ goto end;
+ }
+
+ *data = *tmp;
+
+end:
+ kfree(tmp);
+ return status;
+}
+
+static int f81534_set_port_register(struct usb_serial_port *port, u16 reg,
+ u8 data)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+
+ return f81534_set_register(port->serial,
+ reg + port_priv->phy_num * F81534_UART_OFFSET, data);
+}
+
+static int f81534_get_port_register(struct usb_serial_port *port, u16 reg,
+ u8 *data)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+
+ return f81534_get_register(port->serial,
+ reg + port_priv->phy_num * F81534_UART_OFFSET, data);
+}
+
+/*
+ * If we try to access the internal flash via SPI bus, we should check the bus
+ * status for every command. e.g., F81534_BUS_REG_START/F81534_BUS_REG_END
+ */
+static int f81534_wait_for_spi_idle(struct usb_serial *serial)
+{
+ size_t count = F81534_MAX_BUS_RETRY;
+ u8 tmp;
+ int status;
+
+ do {
+ status = f81534_get_register(serial, F81534_BUS_REG_STATUS,
+ &tmp);
+ if (status)
+ return status;
+
+ if (tmp & F81534_BUS_BUSY)
+ continue;
+
+ if (tmp & F81534_BUS_IDLE)
+ break;
+
+ } while (--count);
+
+ if (!count) {
+ dev_err(&serial->interface->dev,
+ "%s: timed out waiting for idle SPI bus\n",
+ __func__);
+ return -EIO;
+ }
+
+ return f81534_set_register(serial, F81534_BUS_REG_STATUS,
+ tmp & ~F81534_BUS_IDLE);
+}
+
+static int f81534_get_spi_register(struct usb_serial *serial, u16 reg,
+ u8 *data)
+{
+ int status;
+
+ status = f81534_get_register(serial, reg, data);
+ if (status)
+ return status;
+
+ return f81534_wait_for_spi_idle(serial);
+}
+
+static int f81534_set_spi_register(struct usb_serial *serial, u16 reg, u8 data)
+{
+ int status;
+
+ status = f81534_set_register(serial, reg, data);
+ if (status)
+ return status;
+
+ return f81534_wait_for_spi_idle(serial);
+}
+
+static int f81534_read_flash(struct usb_serial *serial, u32 address,
+ size_t size, u8 *buf)
+{
+ u8 tmp_buf[F81534_MAX_DATA_BLOCK];
+ size_t block = 0;
+ size_t read_size;
+ size_t count;
+ int status;
+ int offset;
+ u16 reg_tmp;
+
+ status = f81534_set_spi_register(serial, F81534_BUS_REG_START,
+ F81534_CMD_READ);
+ if (status)
+ return status;
+
+ status = f81534_set_spi_register(serial, F81534_BUS_REG_START,
+ (address >> 16) & 0xff);
+ if (status)
+ return status;
+
+ status = f81534_set_spi_register(serial, F81534_BUS_REG_START,
+ (address >> 8) & 0xff);
+ if (status)
+ return status;
+
+ status = f81534_set_spi_register(serial, F81534_BUS_REG_START,
+ (address >> 0) & 0xff);
+ if (status)
+ return status;
+
+ /* Continuous read mode */
+ do {
+ read_size = min_t(size_t, F81534_MAX_DATA_BLOCK, size);
+
+ for (count = 0; count < read_size; ++count) {
+ /* To write F81534_BUS_REG_END when final byte */
+ if (size <= F81534_MAX_DATA_BLOCK &&
+ read_size == count + 1)
+ reg_tmp = F81534_BUS_REG_END;
+ else
+ reg_tmp = F81534_BUS_REG_START;
+
+ /*
+ * Dummy code, force IC to generate a read pulse, the
+ * set of value 0xf1 is dont care (any value is ok)
+ */
+ status = f81534_set_spi_register(serial, reg_tmp,
+ 0xf1);
+ if (status)
+ return status;
+
+ status = f81534_get_spi_register(serial,
+ F81534_BUS_READ_DATA,
+ &tmp_buf[count]);
+ if (status)
+ return status;
+
+ offset = count + block * F81534_MAX_DATA_BLOCK;
+ buf[offset] = tmp_buf[count];
+ }
+
+ size -= read_size;
+ ++block;
+ } while (size);
+
+ return 0;
+}
+
+static void f81534_prepare_write_buffer(struct usb_serial_port *port, u8 *buf)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+ int phy_num = port_priv->phy_num;
+ u8 tx_len;
+ int i;
+
+ /*
+ * The block layout is fixed with 4x128 Bytes, per 128 Bytes a port.
+ * index 0: port phy idx (e.g., 0,1,2,3)
+ * index 1: only F81534_TOKEN_WRITE
+ * index 2: serial TX out length
+ * index 3: fix to 0
+ * index 4~127: serial out data block
+ */
+ for (i = 0; i < F81534_NUM_PORT; ++i) {
+ buf[i * F81534_RECEIVE_BLOCK_SIZE] = i;
+ buf[i * F81534_RECEIVE_BLOCK_SIZE + 1] = F81534_TOKEN_WRITE;
+ buf[i * F81534_RECEIVE_BLOCK_SIZE + 2] = 0;
+ buf[i * F81534_RECEIVE_BLOCK_SIZE + 3] = 0;
+ }
+
+ tx_len = kfifo_out_locked(&port->write_fifo,
+ &buf[phy_num * F81534_RECEIVE_BLOCK_SIZE + 4],
+ F81534_MAX_TX_SIZE, &port->lock);
+
+ buf[phy_num * F81534_RECEIVE_BLOCK_SIZE + 2] = tx_len;
+}
+
+static int f81534_submit_writer(struct usb_serial_port *port, gfp_t mem_flags)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+ struct urb *urb;
+ unsigned long flags;
+ int result;
+
+ /* Check is any data in write_fifo */
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (kfifo_is_empty(&port->write_fifo)) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ return 0;
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* Check H/W is TXEMPTY */
+ if (!test_and_clear_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty))
+ return 0;
+
+ urb = port->write_urbs[0];
+ f81534_prepare_write_buffer(port, port->bulk_out_buffers[0]);
+ urb->transfer_buffer_length = F81534_WRITE_BUFFER_SIZE;
+
+ result = usb_submit_urb(urb, mem_flags);
+ if (result) {
+ set_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty);
+ dev_err(&port->dev, "%s: submit failed: %d\n", __func__,
+ result);
+ return result;
+ }
+
+ usb_serial_port_softint(port);
+ return 0;
+}
+
+static u32 f81534_calc_baud_divisor(u32 baudrate, u32 clockrate)
+{
+ if (!baudrate)
+ return 0;
+
+ /* Round to nearest divisor */
+ return DIV_ROUND_CLOSEST(clockrate, baudrate);
+}
+
+static int f81534_set_port_config(struct usb_serial_port *port, u32 baudrate,
+ u8 lcr)
+{
+ u32 divisor;
+ int status;
+ u8 value;
+
+ if (baudrate <= 1200)
+ value = F81534_1X_RXTRIGGER; /* 128 FIFO & TL: 1x */
+ else
+ value = F81534_8X_RXTRIGGER; /* 128 FIFO & TL: 8x */
+
+ status = f81534_set_port_register(port, F81534_CONFIG1_REG, value);
+ if (status) {
+ dev_err(&port->dev, "%s: CONFIG1 setting failed\n", __func__);
+ return status;
+ }
+
+ if (baudrate <= 1200)
+ value = UART_FCR_TRIGGER_1 | UART_FCR_ENABLE_FIFO; /* TL: 1 */
+ else
+ value = UART_FCR_R_TRIG_11 | UART_FCR_ENABLE_FIFO; /* TL: 14 */
+
+ status = f81534_set_port_register(port, F81534_FIFO_CONTROL_REG,
+ value);
+ if (status) {
+ dev_err(&port->dev, "%s: FCR setting failed\n", __func__);
+ return status;
+ }
+
+ divisor = f81534_calc_baud_divisor(baudrate, F81534_MAX_BAUDRATE);
+ value = UART_LCR_DLAB;
+ status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG,
+ value);
+ if (status) {
+ dev_err(&port->dev, "%s: set LCR failed\n", __func__);
+ return status;
+ }
+
+ value = divisor & 0xff;
+ status = f81534_set_port_register(port, F81534_DIVISOR_LSB_REG, value);
+ if (status) {
+ dev_err(&port->dev, "%s: set DLAB LSB failed\n", __func__);
+ return status;
+ }
+
+ value = (divisor >> 8) & 0xff;
+ status = f81534_set_port_register(port, F81534_DIVISOR_MSB_REG, value);
+ if (status) {
+ dev_err(&port->dev, "%s: set DLAB MSB failed\n", __func__);
+ return status;
+ }
+
+ status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG, lcr);
+ if (status) {
+ dev_err(&port->dev, "%s: set LCR failed\n", __func__);
+ return status;
+ }
+
+ return 0;
+}
+
+static int f81534_update_mctrl(struct usb_serial_port *port, unsigned int set,
+ unsigned int clear)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+ int status;
+ u8 tmp;
+
+ if (((set | clear) & (TIOCM_DTR | TIOCM_RTS)) == 0)
+ return 0; /* no change */
+
+ mutex_lock(&port_priv->mcr_mutex);
+
+ /* 'Set' takes precedence over 'Clear' */
+ clear &= ~set;
+
+ /* Always enable UART_MCR_OUT2 */
+ tmp = UART_MCR_OUT2 | port_priv->shadow_mcr;
+
+ if (clear & TIOCM_DTR)
+ tmp &= ~UART_MCR_DTR;
+
+ if (clear & TIOCM_RTS)
+ tmp &= ~UART_MCR_RTS;
+
+ if (set & TIOCM_DTR)
+ tmp |= UART_MCR_DTR;
+
+ if (set & TIOCM_RTS)
+ tmp |= UART_MCR_RTS;
+
+ status = f81534_set_port_register(port, F81534_MODEM_CONTROL_REG, tmp);
+ if (status < 0) {
+ dev_err(&port->dev, "%s: MCR write failed\n", __func__);
+ mutex_unlock(&port_priv->mcr_mutex);
+ return status;
+ }
+
+ port_priv->shadow_mcr = tmp;
+ mutex_unlock(&port_priv->mcr_mutex);
+ return 0;
+}
+
+/*
+ * This function will search the data area with token F81534_CUSTOM_VALID_TOKEN
+ * for latest configuration index. If nothing found
+ * (*index = F81534_CUSTOM_NO_CUSTOM_DATA), We'll load default configure in
+ * F81534_DEF_CONF_ADDRESS_START section.
+ *
+ * Due to we only use block0 to save data, so *index should be 0 or
+ * F81534_CUSTOM_NO_CUSTOM_DATA.
+ */
+static int f81534_find_config_idx(struct usb_serial *serial, u8 *index)
+{
+ u8 tmp;
+ int status;
+
+ status = f81534_read_flash(serial, F81534_CUSTOM_ADDRESS_START, 1,
+ &tmp);
+ if (status) {
+ dev_err(&serial->interface->dev, "%s: read failed: %d\n",
+ __func__, status);
+ return status;
+ }
+
+ /* We'll use the custom data when the data is valid. */
+ if (tmp == F81534_CUSTOM_VALID_TOKEN)
+ *index = 0;
+ else
+ *index = F81534_CUSTOM_NO_CUSTOM_DATA;
+
+ return 0;
+}
+
+/*
+ * We had 2 generation of F81532/534 IC. All has an internal storage.
+ *
+ * 1st is pure USB-to-TTL RS232 IC and designed for 4 ports only, no any
+ * internal data will used. All mode and gpio control should manually set
+ * by AP or Driver and all storage space value are 0xff. The
+ * f81534_calc_num_ports() will run to final we marked as "oldest version"
+ * for this IC.
+ *
+ * 2rd is designed to more generic to use any transceiver and this is our
+ * mass production type. We'll save data in F81534_CUSTOM_ADDRESS_START
+ * (0x2f00) with 9bytes. The 1st byte is a indicater. If the token is
+ * F81534_CUSTOM_VALID_TOKEN(0xf0), the IC is 2nd gen type, the following
+ * 4bytes save port mode (0:RS232/1:RS485 Invert/2:RS485), and the last
+ * 4bytes save GPIO state(value from 0~7 to represent 3 GPIO output pin).
+ * The f81534_calc_num_ports() will run to "new style" with checking
+ * F81534_PORT_UNAVAILABLE section.
+ */
+static int f81534_calc_num_ports(struct usb_serial *serial)
+{
+ u8 setting[F81534_CUSTOM_DATA_SIZE];
+ u8 setting_idx;
+ u8 num_port = 0;
+ int status;
+ size_t i;
+
+ /* Check had custom setting */
+ status = f81534_find_config_idx(serial, &setting_idx);
+ if (status) {
+ dev_err(&serial->interface->dev, "%s: find idx failed: %d\n",
+ __func__, status);
+ return 0;
+ }
+
+ /*
+ * We'll read custom data only when data available, otherwise we'll
+ * read default value instead.
+ */
+ if (setting_idx != F81534_CUSTOM_NO_CUSTOM_DATA) {
+ status = f81534_read_flash(serial,
+ F81534_CUSTOM_ADDRESS_START +
+ F81534_CONF_OFFSET,
+ sizeof(setting), setting);
+ if (status) {
+ dev_err(&serial->interface->dev,
+ "%s: get custom data failed: %d\n",
+ __func__, status);
+ return 0;
+ }
+
+ dev_dbg(&serial->interface->dev,
+ "%s: read config from block: %d\n", __func__,
+ setting_idx);
+ } else {
+ /* Read default board setting */
+ status = f81534_read_flash(serial,
+ F81534_DEF_CONF_ADDRESS_START, F81534_NUM_PORT,
+ setting);
+
+ if (status) {
+ dev_err(&serial->interface->dev,
+ "%s: read failed: %d\n", __func__,
+ status);
+ return 0;
+ }
+
+ dev_dbg(&serial->interface->dev, "%s: read default config\n",
+ __func__);
+ }
+
+ /* New style, find all possible ports */
+ for (i = 0; i < F81534_NUM_PORT; ++i) {
+ if (setting[i] & F81534_PORT_UNAVAILABLE)
+ continue;
+
+ ++num_port;
+ }
+
+ if (num_port)
+ return num_port;
+
+ dev_warn(&serial->interface->dev, "%s: Read Failed. default 4 ports\n",
+ __func__);
+ return 4; /* Nothing found, oldest version IC */
+}
+
+static void f81534_set_termios(struct tty_struct *tty,
+ struct usb_serial_port *port,
+ struct ktermios *old_termios)
+{
+ u8 new_lcr = 0;
+ int status;
+ u32 baud;
+
+ if (C_BAUD(tty) == B0)
+ f81534_update_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS);
+ else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
+ f81534_update_mctrl(port, TIOCM_DTR | TIOCM_RTS, 0);
+
+ if (C_PARENB(tty)) {
+ new_lcr |= UART_LCR_PARITY;
+
+ if (!C_PARODD(tty))
+ new_lcr |= UART_LCR_EPAR;
+
+ if (C_CMSPAR(tty))
+ new_lcr |= UART_LCR_SPAR;
+ }
+
+ if (C_CSTOPB(tty))
+ new_lcr |= UART_LCR_STOP;
+
+ switch (C_CSIZE(tty)) {
+ case CS5:
+ new_lcr |= UART_LCR_WLEN5;
+ break;
+ case CS6:
+ new_lcr |= UART_LCR_WLEN6;
+ break;
+ case CS7:
+ new_lcr |= UART_LCR_WLEN7;
+ break;
+ default:
+ case CS8:
+ new_lcr |= UART_LCR_WLEN8;
+ break;
+ }
+
+ baud = tty_get_baud_rate(tty);
+ if (!baud)
+ return;
+
+ if (baud > F81534_MAX_BAUDRATE) {
+ if (old_termios)
+ baud = tty_termios_baud_rate(old_termios);
+ else
+ baud = F81534_DEFAULT_BAUD_RATE;
+
+ tty_encode_baud_rate(tty, baud, baud);
+ }
+
+ dev_dbg(&port->dev, "%s: baud: %d\n", __func__, baud);
+
+ status = f81534_set_port_config(port, baud, new_lcr);
+ if (status < 0) {
+ dev_err(&port->dev, "%s: set port config failed: %d\n",
+ __func__, status);
+ }
+}
+
+static int f81534_submit_read_urb(struct usb_serial *serial, gfp_t flags)
+{
+ return usb_serial_generic_submit_read_urbs(serial->port[0], flags);
+}
+
+static void f81534_msr_changed(struct usb_serial_port *port, u8 msr)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+ struct tty_struct *tty;
+ unsigned long flags;
+ u8 old_msr;
+
+ if (!(msr & UART_MSR_ANY_DELTA))
+ return;
+
+ spin_lock_irqsave(&port_priv->msr_lock, flags);
+ old_msr = port_priv->shadow_msr;
+ port_priv->shadow_msr = msr;
+ spin_unlock_irqrestore(&port_priv->msr_lock, flags);
+
+ dev_dbg(&port->dev, "%s: MSR from %02x to %02x\n", __func__, old_msr,
+ msr);
+
+ /* Update input line counters */
+ if (msr & UART_MSR_DCTS)
+ port->icount.cts++;
+ if (msr & UART_MSR_DDSR)
+ port->icount.dsr++;
+ if (msr & UART_MSR_DDCD)
+ port->icount.dcd++;
+ if (msr & UART_MSR_TERI)
+ port->icount.rng++;
+
+ wake_up_interruptible(&port->port.delta_msr_wait);
+
+ if (!(msr & UART_MSR_DDCD))
+ return;
+
+ dev_dbg(&port->dev, "%s: DCD Changed: phy_num: %d from %x to %x\n",
+ __func__, port_priv->phy_num, old_msr, msr);
+
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+ return;
+
+ usb_serial_handle_dcd_change(port, tty, msr & UART_MSR_DCD);
+ tty_kref_put(tty);
+}
+
+static int f81534_read_msr(struct usb_serial_port *port)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ int status;
+ u8 msr;
+
+ /* Get MSR initial value */
+ status = f81534_get_port_register(port, F81534_MODEM_STATUS_REG, &msr);
+ if (status)
+ return status;
+
+ /* Force update current state */
+ spin_lock_irqsave(&port_priv->msr_lock, flags);
+ port_priv->shadow_msr = msr;
+ spin_unlock_irqrestore(&port_priv->msr_lock, flags);
+
+ return 0;
+}
+
+static int f81534_open(struct tty_struct *tty, struct usb_serial_port *port)
+{
+ struct f81534_serial_private *serial_priv =
+ usb_get_serial_data(port->serial);
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+ int status;
+
+ status = f81534_set_port_register(port,
+ F81534_FIFO_CONTROL_REG, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ if (status) {
+ dev_err(&port->dev, "%s: Clear FIFO failed: %d\n", __func__,
+ status);
+ return status;
+ }
+
+ if (tty)
+ f81534_set_termios(tty, port, NULL);
+
+ status = f81534_read_msr(port);
+ if (status)
+ return status;
+
+ mutex_lock(&serial_priv->urb_mutex);
+
+ /* Submit Read URBs for first port opened */
+ if (!serial_priv->opened_port) {
+ status = f81534_submit_read_urb(port->serial, GFP_KERNEL);
+ if (status)
+ goto exit;
+ }
+
+ serial_priv->opened_port++;
+
+exit:
+ mutex_unlock(&serial_priv->urb_mutex);
+
+ set_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty);
+ return status;
+}
+
+static void f81534_close(struct usb_serial_port *port)
+{
+ struct f81534_serial_private *serial_priv =
+ usb_get_serial_data(port->serial);
+ struct usb_serial_port *port0 = port->serial->port[0];
+ unsigned long flags;
+ size_t i;
+
+ usb_kill_urb(port->write_urbs[0]);
+
+ spin_lock_irqsave(&port->lock, flags);
+ kfifo_reset_out(&port->write_fifo);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* Kill Read URBs when final port closed */
+ mutex_lock(&serial_priv->urb_mutex);
+ serial_priv->opened_port--;
+
+ if (!serial_priv->opened_port) {
+ for (i = 0; i < ARRAY_SIZE(port0->read_urbs); ++i)
+ usb_kill_urb(port0->read_urbs[i]);
+ }
+
+ mutex_unlock(&serial_priv->urb_mutex);
+}
+
+static int f81534_get_serial_info(struct usb_serial_port *port,
+ struct serial_struct __user *retinfo)
+{
+ struct f81534_port_private *port_priv;
+ struct serial_struct tmp;
+
+ port_priv = usb_get_serial_port_data(port);
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ tmp.type = PORT_16550A;
+ tmp.port = port->port_number;
+ tmp.line = port->minor;
+ tmp.baud_base = F81534_MAX_BAUDRATE;
+
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int f81534_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct serial_struct __user *buf = (struct serial_struct __user *)arg;
+
+ switch (cmd) {
+ case TIOCGSERIAL:
+ return f81534_get_serial_info(port, buf);
+ default:
+ break;
+ }
+
+ return -ENOIOCTLCMD;
+}
+
+static void f81534_process_per_serial_block(struct usb_serial_port *port,
+ u8 *data)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+ int phy_num = data[0];
+ size_t read_size = 0;
+ size_t i;
+ char tty_flag;
+ int status;
+ u8 lsr;
+
+ /*
+ * The block layout is 128 Bytes
+ * index 0: port phy idx (e.g., 0,1,2,3),
+ * index 1: It's could be
+ * F81534_TOKEN_RECEIVE
+ * F81534_TOKEN_TX_EMPTY
+ * F81534_TOKEN_MSR_CHANGE
+ * index 2: serial in size (data+lsr, must be even)
+ * meaningful for F81534_TOKEN_RECEIVE only
+ * index 3: current MSR with this device
+ * index 4~127: serial in data block (data+lsr, must be even)
+ */
+ switch (data[1]) {
+ case F81534_TOKEN_TX_EMPTY:
+ set_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty);
+
+ /* Try to submit writer */
+ status = f81534_submit_writer(port, GFP_ATOMIC);
+ if (status)
+ dev_err(&port->dev, "%s: submit failed\n", __func__);
+ return;
+
+ case F81534_TOKEN_MSR_CHANGE:
+ f81534_msr_changed(port, data[3]);
+ return;
+
+ case F81534_TOKEN_RECEIVE:
+ read_size = data[2];
+ if (read_size > F81534_MAX_RX_SIZE) {
+ dev_err(&port->dev,
+ "%s: phy: %d read_size: %zu larger than: %d\n",
+ __func__, phy_num, read_size,
+ F81534_MAX_RX_SIZE);
+ return;
+ }
+
+ break;
+
+ default:
+ dev_warn(&port->dev, "%s: unknown token: %02x\n", __func__,
+ data[1]);
+ return;
+ }
+
+ for (i = 4; i < 4 + read_size; i += 2) {
+ tty_flag = TTY_NORMAL;
+ lsr = data[i + 1];
+
+ if (lsr & UART_LSR_BRK_ERROR_BITS) {
+ if (lsr & UART_LSR_BI) {
+ tty_flag = TTY_BREAK;
+ port->icount.brk++;
+ usb_serial_handle_break(port);
+ } else if (lsr & UART_LSR_PE) {
+ tty_flag = TTY_PARITY;
+ port->icount.parity++;
+ } else if (lsr & UART_LSR_FE) {
+ tty_flag = TTY_FRAME;
+ port->icount.frame++;
+ }
+
+ if (lsr & UART_LSR_OE) {
+ port->icount.overrun++;
+ tty_insert_flip_char(&port->port, 0,
+ TTY_OVERRUN);
+ }
+ }
+
+ if (port->port.console && port->sysrq) {
+ if (usb_serial_handle_sysrq_char(port, data[i]))
+ continue;
+ }
+
+ tty_insert_flip_char(&port->port, data[i], tty_flag);
+ }
+
+ tty_flip_buffer_push(&port->port);
+}
+
+static void f81534_process_read_urb(struct urb *urb)
+{
+ struct f81534_serial_private *serial_priv;
+ struct usb_serial_port *port;
+ struct usb_serial *serial;
+ u8 *buf;
+ int phy_port_num;
+ int tty_port_num;
+ size_t i;
+
+ if (!urb->actual_length ||
+ urb->actual_length % F81534_RECEIVE_BLOCK_SIZE) {
+ return;
+ }
+
+ port = urb->context;
+ serial = port->serial;
+ buf = urb->transfer_buffer;
+ serial_priv = usb_get_serial_data(serial);
+
+ for (i = 0; i < urb->actual_length; i += F81534_RECEIVE_BLOCK_SIZE) {
+ phy_port_num = buf[i];
+ if (phy_port_num >= F81534_NUM_PORT) {
+ dev_err(&port->dev,
+ "%s: phy_port_num: %d larger than: %d\n",
+ __func__, phy_port_num, F81534_NUM_PORT);
+ continue;
+ }
+
+ tty_port_num = serial_priv->tty_idx[phy_port_num];
+ port = serial->port[tty_port_num];
+
+ if (tty_port_initialized(&port->port))
+ f81534_process_per_serial_block(port, &buf[i]);
+ }
+}
+
+static void f81534_write_usb_callback(struct urb *urb)
+{
+ struct usb_serial_port *port = urb->context;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ dev_dbg(&port->dev, "%s - urb stopped: %d\n",
+ __func__, urb->status);
+ return;
+ case -EPIPE:
+ dev_err(&port->dev, "%s - urb stopped: %d\n",
+ __func__, urb->status);
+ return;
+ default:
+ dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
+ __func__, urb->status);
+ break;
+ }
+}
+
+static int f81534_setup_ports(struct usb_serial *serial)
+{
+ struct usb_serial_port *port;
+ u8 port0_out_address;
+ int buffer_size;
+ size_t i;
+
+ /*
+ * In our system architecture, we had 2 or 4 serial ports,
+ * but only get 1 set of bulk in/out endpoints.
+ *
+ * The usb-serial subsystem will generate port 0 data,
+ * but port 1/2/3 will not. It's will generate write URB and buffer
+ * by following code and use the port0 read URB for read operation.
+ */
+ for (i = 1; i < serial->num_ports; ++i) {
+ port0_out_address = serial->port[0]->bulk_out_endpointAddress;
+ buffer_size = serial->port[0]->bulk_out_size;
+ port = serial->port[i];
+
+ if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
+ return -ENOMEM;
+
+ port->bulk_out_size = buffer_size;
+ port->bulk_out_endpointAddress = port0_out_address;
+
+ port->write_urbs[0] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!port->write_urbs[0])
+ return -ENOMEM;
+
+ port->bulk_out_buffers[0] = kzalloc(buffer_size, GFP_KERNEL);
+ if (!port->bulk_out_buffers[0])
+ return -ENOMEM;
+
+ usb_fill_bulk_urb(port->write_urbs[0], serial->dev,
+ usb_sndbulkpipe(serial->dev,
+ port0_out_address),
+ port->bulk_out_buffers[0], buffer_size,
+ serial->type->write_bulk_callback, port);
+
+ port->write_urb = port->write_urbs[0];
+ port->bulk_out_buffer = port->bulk_out_buffers[0];
+ }
+
+ return 0;
+}
+
+static int f81534_probe(struct usb_serial *serial,
+ const struct usb_device_id *id)
+{
+ struct usb_endpoint_descriptor *endpoint;
+ struct usb_host_interface *iface_desc;
+ struct device *dev;
+ int num_bulk_in = 0;
+ int num_bulk_out = 0;
+ int size_bulk_in = 0;
+ int size_bulk_out = 0;
+ int i;
+
+ dev = &serial->interface->dev;
+ iface_desc = serial->interface->cur_altsetting;
+
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ endpoint = &iface_desc->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(endpoint)) {
+ ++num_bulk_in;
+ size_bulk_in = usb_endpoint_maxp(endpoint);
+ }
+
+ if (usb_endpoint_is_bulk_out(endpoint)) {
+ ++num_bulk_out;
+ size_bulk_out = usb_endpoint_maxp(endpoint);
+ }
+ }
+
+ if (num_bulk_in != 1 || num_bulk_out != 1) {
+ dev_err(dev, "expected endpoints not found\n");
+ return -ENODEV;
+ }
+
+ if (size_bulk_out != F81534_WRITE_BUFFER_SIZE ||
+ size_bulk_in != F81534_MAX_RECEIVE_BLOCK_SIZE) {
+ dev_err(dev, "unsupported endpoint max packet size\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int f81534_attach(struct usb_serial *serial)
+{
+ struct f81534_serial_private *serial_priv;
+ int index = 0;
+ int status;
+ int i;
+
+ serial_priv = devm_kzalloc(&serial->interface->dev,
+ sizeof(*serial_priv), GFP_KERNEL);
+ if (!serial_priv)
+ return -ENOMEM;
+
+ usb_set_serial_data(serial, serial_priv);
+
+ mutex_init(&serial_priv->urb_mutex);
+
+ status = f81534_setup_ports(serial);
+ if (status)
+ return status;
+
+ /* Check had custom setting */
+ status = f81534_find_config_idx(serial, &serial_priv->setting_idx);
+ if (status) {
+ dev_err(&serial->interface->dev, "%s: find idx failed: %d\n",
+ __func__, status);
+ return status;
+ }
+
+ /*
+ * We'll read custom data only when data available, otherwise we'll
+ * read default value instead.
+ */
+ if (serial_priv->setting_idx == F81534_CUSTOM_NO_CUSTOM_DATA) {
+ /*
+ * The default configuration layout:
+ * byte 0/1/2/3: uart setting
+ */
+ status = f81534_read_flash(serial,
+ F81534_DEF_CONF_ADDRESS_START,
+ F81534_DEF_CONF_SIZE,
+ serial_priv->conf_data);
+ if (status) {
+ dev_err(&serial->interface->dev,
+ "%s: read reserve data failed: %d\n",
+ __func__, status);
+ return status;
+ }
+ } else {
+ /* Only read 8 bytes for mode & GPIO */
+ status = f81534_read_flash(serial,
+ F81534_CUSTOM_ADDRESS_START +
+ F81534_CONF_OFFSET,
+ sizeof(serial_priv->conf_data),
+ serial_priv->conf_data);
+ if (status) {
+ dev_err(&serial->interface->dev,
+ "%s: idx: %d get data failed: %d\n",
+ __func__, serial_priv->setting_idx,
+ status);
+ return status;
+ }
+ }
+
+ /* Assign phy-to-logic mapping */
+ for (i = 0; i < F81534_NUM_PORT; ++i) {
+ if (serial_priv->conf_data[i] & F81534_PORT_UNAVAILABLE)
+ continue;
+
+ serial_priv->tty_idx[i] = index++;
+ dev_dbg(&serial->interface->dev,
+ "%s: phy_num: %d, tty_idx: %d\n", __func__, i,
+ serial_priv->tty_idx[i]);
+ }
+
+ return 0;
+}
+
+static int f81534_port_probe(struct usb_serial_port *port)
+{
+ struct f81534_port_private *port_priv;
+
+ port_priv = devm_kzalloc(&port->dev, sizeof(*port_priv), GFP_KERNEL);
+ if (!port_priv)
+ return -ENOMEM;
+
+ spin_lock_init(&port_priv->msr_lock);
+ mutex_init(&port_priv->mcr_mutex);
+
+ /* Assign logic-to-phy mapping */
+ port_priv->phy_num = f81534_logic_to_phy_port(port->serial, port);
+ if (port_priv->phy_num < 0 || port_priv->phy_num >= F81534_NUM_PORT)
+ return -ENODEV;
+
+ usb_set_serial_port_data(port, port_priv);
+ dev_dbg(&port->dev, "%s: port_number: %d, phy_num: %d\n", __func__,
+ port->port_number, port_priv->phy_num);
+
+ return 0;
+}
+
+static int f81534_tiocmget(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+ int status;
+ int r;
+ u8 msr;
+ u8 mcr;
+
+ /* Read current MSR from device */
+ status = f81534_get_port_register(port, F81534_MODEM_STATUS_REG, &msr);
+ if (status)
+ return status;
+
+ mutex_lock(&port_priv->mcr_mutex);
+ mcr = port_priv->shadow_mcr;
+ mutex_unlock(&port_priv->mcr_mutex);
+
+ r = (mcr & UART_MCR_DTR ? TIOCM_DTR : 0) |
+ (mcr & UART_MCR_RTS ? TIOCM_RTS : 0) |
+ (msr & UART_MSR_CTS ? TIOCM_CTS : 0) |
+ (msr & UART_MSR_DCD ? TIOCM_CAR : 0) |
+ (msr & UART_MSR_RI ? TIOCM_RI : 0) |
+ (msr & UART_MSR_DSR ? TIOCM_DSR : 0);
+
+ return r;
+}
+
+static int f81534_tiocmset(struct tty_struct *tty, unsigned int set,
+ unsigned int clear)
+{
+ struct usb_serial_port *port = tty->driver_data;
+
+ return f81534_update_mctrl(port, set, clear);
+}
+
+static void f81534_dtr_rts(struct usb_serial_port *port, int on)
+{
+ if (on)
+ f81534_update_mctrl(port, TIOCM_DTR | TIOCM_RTS, 0);
+ else
+ f81534_update_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS);
+}
+
+static int f81534_write(struct tty_struct *tty, struct usb_serial_port *port,
+ const u8 *buf, int count)
+{
+ int bytes_out, status;
+
+ if (!count)
+ return 0;
+
+ bytes_out = kfifo_in_locked(&port->write_fifo, buf, count,
+ &port->lock);
+
+ status = f81534_submit_writer(port, GFP_ATOMIC);
+ if (status) {
+ dev_err(&port->dev, "%s: submit failed\n", __func__);
+ return status;
+ }
+
+ return bytes_out;
+}
+
+static bool f81534_tx_empty(struct usb_serial_port *port)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+
+ return test_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty);
+}
+
+static int f81534_resume(struct usb_serial *serial)
+{
+ struct f81534_serial_private *serial_priv =
+ usb_get_serial_data(serial);
+ struct usb_serial_port *port;
+ int error = 0;
+ int status;
+ size_t i;
+
+ /*
+ * We'll register port 0 bulkin when port had opened, It'll take all
+ * port received data, MSR register change and TX_EMPTY information.
+ */
+ mutex_lock(&serial_priv->urb_mutex);
+
+ if (serial_priv->opened_port) {
+ status = f81534_submit_read_urb(serial, GFP_NOIO);
+ if (status) {
+ mutex_unlock(&serial_priv->urb_mutex);
+ return status;
+ }
+ }
+
+ mutex_unlock(&serial_priv->urb_mutex);
+
+ for (i = 0; i < serial->num_ports; i++) {
+ port = serial->port[i];
+ if (!tty_port_initialized(&port->port))
+ continue;
+
+ status = f81534_submit_writer(port, GFP_NOIO);
+ if (status) {
+ dev_err(&port->dev, "%s: submit failed\n", __func__);
+ ++error;
+ }
+ }
+
+ if (error)
+ return -EIO;
+
+ return 0;
+}
+
+static struct usb_serial_driver f81534_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "f81534",
+ },
+ .description = DRIVER_DESC,
+ .id_table = f81534_id_table,
+ .open = f81534_open,
+ .close = f81534_close,
+ .write = f81534_write,
+ .tx_empty = f81534_tx_empty,
+ .calc_num_ports = f81534_calc_num_ports,
+ .probe = f81534_probe,
+ .attach = f81534_attach,
+ .port_probe = f81534_port_probe,
+ .dtr_rts = f81534_dtr_rts,
+ .process_read_urb = f81534_process_read_urb,
+ .ioctl = f81534_ioctl,
+ .tiocmget = f81534_tiocmget,
+ .tiocmset = f81534_tiocmset,
+ .write_bulk_callback = f81534_write_usb_callback,
+ .set_termios = f81534_set_termios,
+ .resume = f81534_resume,
+};
+
+static struct usb_serial_driver *const serial_drivers[] = {
+ &f81534_device, NULL
+};
+
+module_usb_serial_driver(serial_drivers, f81534_id_table);
+
+MODULE_DEVICE_TABLE(usb, f81534_id_table);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Peter Hong <Peter_Hong@fintek.com.tw>");
+MODULE_AUTHOR("Tom Tsai <Tom_Tsai@fintek.com.tw>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 6e9fc8bcc285..23d14b98ae2a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1455,8 +1455,6 @@ static int get_serial_info(struct usb_serial_port *port,
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct serial_struct tmp;
- if (!retinfo)
- return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
tmp.flags = priv->flags;
tmp.baud_base = priv->baud_base;
@@ -1538,9 +1536,6 @@ static int get_lsr_info(struct usb_serial_port *port,
struct ftdi_private *priv = usb_get_serial_port_data(port);
unsigned int result = 0;
- if (!retinfo)
- return -EFAULT;
-
if (priv->transmit_empty)
result = TIOCSER_TEMT;
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 11c05ce2f35f..dcc0c58aaad5 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -1554,9 +1554,6 @@ static int get_serial_info(struct edgeport_port *edge_port,
{
struct serial_struct tmp;
- if (!retinfo)
- return -EFAULT;
-
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_16550A;
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index fce82fd79f77..c339163698eb 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2459,9 +2459,6 @@ static int get_serial_info(struct edgeport_port *edge_port,
struct serial_struct tmp;
unsigned cwait;
- if (!retinfo)
- return -EFAULT;
-
cwait = edge_port->port->port.closing_wait;
if (cwait != ASYNC_CLOSING_WAIT_NONE)
cwait = jiffies_to_msecs(cwait) / 10;
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index fc5d3a791e08..0ee190fc1bf8 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -296,7 +296,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
rc = usb_serial_generic_open(tty, port);
if (rc) {
retval = rc;
- goto exit;
+ goto err_free_cfg;
}
rc = usb_control_msg(port->serial->dev,
@@ -311,21 +311,38 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
if (rc < 0) {
dev_err(&port->dev, "Enabling read failed (error = %d)\n", rc);
retval = rc;
+ goto err_generic_close;
} else
dev_dbg(&port->dev, "%s - enabled reading\n", __func__);
rc = klsi_105_get_line_state(port, &line_state);
- if (rc >= 0) {
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_state = line_state;
- spin_unlock_irqrestore(&priv->lock, flags);
- dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, line_state);
- retval = 0;
- } else
+ if (rc < 0) {
retval = rc;
+ goto err_disable_read;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->line_state = line_state;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__,
+ line_state);
+
+ return 0;
-exit:
+err_disable_read:
+ usb_control_msg(port->serial->dev,
+ usb_sndctrlpipe(port->serial->dev, 0),
+ KL5KUSB105A_SIO_CONFIGURE,
+ USB_TYPE_VENDOR | USB_DIR_OUT,
+ KL5KUSB105A_SIO_CONFIGURE_READ_OFF,
+ 0, /* index */
+ NULL, 0,
+ KLSI_TIMEOUT);
+err_generic_close:
+ usb_serial_generic_close(port);
+err_free_cfg:
kfree(cfg);
+
return retval;
}
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index de9992b492b0..d52caa03679c 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1861,9 +1861,6 @@ static int get_serial_info(struct moschip_port *mos7720_port,
{
struct serial_struct tmp;
- if (!retinfo)
- return -EFAULT;
-
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_16550A;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 57426d703a09..9a220b8e810f 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1956,9 +1956,6 @@ static int mos7840_get_serial_info(struct moschip_port *mos7840_port,
if (mos7840_port == NULL)
return -1;
- if (!retinfo)
- return -EFAULT;
-
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_16550A;
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 4b7bfb394a32..5ded6f524d59 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -336,9 +336,6 @@ static int get_serial_info(struct usb_serial_port *port,
{
struct serial_struct tmp;
- if (!serial)
- return -EFAULT;
-
memset(&tmp, 0x00, sizeof(tmp));
/* fake emulate a 16550 uart to make userspace code happy */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 9894e341c6ac..7ce31a4c7e7f 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -268,6 +268,8 @@ static void option_instat_callback(struct urb *urb);
#define TELIT_PRODUCT_CC864_SINGLE 0x1006
#define TELIT_PRODUCT_DE910_DUAL 0x1010
#define TELIT_PRODUCT_UE910_V2 0x1012
+#define TELIT_PRODUCT_LE922_USBCFG1 0x1040
+#define TELIT_PRODUCT_LE922_USBCFG2 0x1041
#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
@@ -1210,6 +1212,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
+ .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
@@ -1989,6 +1995,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 85acb50a7ee2..659cb8606bd9 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -463,9 +463,6 @@ static int get_serial_info(struct usb_serial_port *port,
{
struct serial_struct tmp;
- if (!retinfo)
- return -EFAULT;
-
memset(&tmp, 0, sizeof(tmp));
tmp.line = port->minor;
tmp.port = 0;
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 70a098de429f..2a156144c76c 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -318,9 +318,6 @@ static int get_serial_info(struct usb_serial_port *port,
{
struct serial_struct tmp;
- if (!retinfo)
- return -EFAULT;
-
memset(&tmp, 0, sizeof(tmp));
tmp.line = port->minor;
tmp.port = 0;
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index a8b9bdba314f..8db9d071d940 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1426,9 +1426,6 @@ static int ti_get_serial_info(struct ti_port *tport,
struct serial_struct ret_serial;
unsigned cwait;
- if (!ret_arg)
- return -EFAULT;
-
cwait = port->port.closing_wait;
if (cwait != ASYNC_CLOSING_WAIT_NONE)
cwait = jiffies_to_msecs(cwait) / 10;
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 3dfdfc81254b..59bfcb3da116 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -140,9 +140,6 @@ static int get_serial_info(struct usb_serial_port *port,
{
struct serial_struct tmp;
- if (!retinfo)
- return -EFAULT;
-
memset(&tmp, 0, sizeof(tmp));
tmp.line = port->minor;
tmp.port = port->port_number;
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 2cba13a532cd..615bea08ec0a 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -52,7 +52,6 @@
#include <linux/sched.h>
#include <linux/errno.h>
-#include <linux/freezer.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kthread.h>
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 03eccf29ace0..c4724fb3a691 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -460,13 +460,14 @@ static void vhci_tx_urb(struct urb *urb)
{
struct vhci_device *vdev = get_vdev(urb->dev);
struct vhci_priv *priv;
- struct vhci_hcd *vhci = vdev_to_vhci(vdev);
+ struct vhci_hcd *vhci;
unsigned long flags;
if (!vdev) {
pr_err("could not get virtual device");
return;
}
+ vhci = vdev_to_vhci(vdev);
priv = kzalloc(sizeof(struct vhci_priv), GFP_ATOMIC);
if (!priv) {
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index c404017c1b5a..b96e5b189269 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -361,6 +361,7 @@ static void set_status_attr(int id)
status->attr.attr.name = status->name;
status->attr.attr.mode = S_IRUGO;
status->attr.show = status_show;
+ sysfs_attr_init(&status->attr.attr);
}
static int init_status_attrs(void)
diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c
index 7091848df6c8..968471b62cbc 100644
--- a/drivers/usb/usbip/vudc_dev.c
+++ b/drivers/usb/usbip/vudc_dev.c
@@ -242,10 +242,10 @@ static const struct usb_gadget_ops vgadget_ops = {
static int vep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
- struct vep *ep;
- struct vudc *udc;
- unsigned maxp;
- unsigned long flags;
+ struct vep *ep;
+ struct vudc *udc;
+ unsigned int maxp;
+ unsigned long flags;
ep = to_vep(_ep);
udc = ep_to_vudc(ep);
@@ -259,7 +259,7 @@ static int vep_enable(struct usb_ep *_ep,
spin_lock_irqsave(&udc->lock, flags);
- maxp = usb_endpoint_maxp(desc) & 0x7ff;
+ maxp = usb_endpoint_maxp(desc);
_ep->maxpacket = maxp;
ep->desc = desc;
ep->type = usb_endpoint_type(desc);
@@ -549,30 +549,34 @@ static int init_vudc_hw(struct vudc *udc)
sprintf(ep->name, "ep%d%s", num,
i ? (is_out ? "out" : "in") : "");
ep->ep.name = ep->name;
+
+ ep->ep.ops = &vep_ops;
+
+ usb_ep_set_maxpacket_limit(&ep->ep, ~0);
+ ep->ep.max_streams = 16;
+ ep->gadget = &udc->gadget;
+ INIT_LIST_HEAD(&ep->req_queue);
+
if (i == 0) {
+ /* ep0 */
ep->ep.caps.type_control = true;
ep->ep.caps.dir_out = true;
ep->ep.caps.dir_in = true;
+
+ udc->gadget.ep0 = &ep->ep;
} else {
+ /* All other eps */
ep->ep.caps.type_iso = true;
ep->ep.caps.type_int = true;
ep->ep.caps.type_bulk = true;
- }
- if (is_out)
- ep->ep.caps.dir_out = true;
- else
- ep->ep.caps.dir_in = true;
+ if (is_out)
+ ep->ep.caps.dir_out = true;
+ else
+ ep->ep.caps.dir_in = true;
- ep->ep.ops = &vep_ops;
- list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
- ep->halted = ep->wedged = ep->already_seen =
- ep->setup_stage = 0;
- usb_ep_set_maxpacket_limit(&ep->ep, ~0);
- ep->ep.max_streams = 16;
- ep->gadget = &udc->gadget;
- ep->desc = NULL;
- INIT_LIST_HEAD(&ep->req_queue);
+ list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+ }
}
spin_lock_init(&udc->lock);
@@ -589,9 +593,6 @@ static int init_vudc_hw(struct vudc *udc)
ud->eh_ops.reset = vudc_device_reset;
ud->eh_ops.unusable = vudc_device_unusable;
- udc->gadget.ep0 = &udc->ep[0].ep;
- list_del_init(&udc->ep[0].ep.ep_list);
-
v_init_timer(udc);
return 0;
diff --git a/drivers/usb/usbip/vudc_transfer.c b/drivers/usb/usbip/vudc_transfer.c
index aba6bd478045..4cfd475ee865 100644
--- a/drivers/usb/usbip/vudc_transfer.c
+++ b/drivers/usb/usbip/vudc_transfer.c
@@ -73,8 +73,8 @@ static int handle_control_request(struct vudc *udc, struct urb *urb,
{
struct vep *ep2;
int ret_val = 1;
- unsigned w_index;
- unsigned w_value;
+ unsigned int w_index;
+ unsigned int w_value;
w_index = le16_to_cpu(setup->wIndex);
w_value = le16_to_cpu(setup->wValue);
@@ -200,7 +200,7 @@ static int transfer(struct vudc *udc,
top:
/* if there's no request queued, the device is NAKing; return */
list_for_each_entry(req, &ep->req_queue, req_entry) {
- unsigned host_len, dev_len, len;
+ unsigned int host_len, dev_len, len;
void *ubuf_pos, *rbuf_pos;
int is_short, to_host;
int rescan = 0;
@@ -339,6 +339,8 @@ static void v_timer(unsigned long _vudc)
total = timer->frame_limit;
}
+ /* We have to clear ep0 flags separately as it's not on the list */
+ udc->ep[0].already_seen = 0;
list_for_each_entry(_ep, &udc->gadget.ep_list, ep_list) {
ep = to_vep(_ep);
ep->already_seen = 0;
diff --git a/drivers/usb/wusbcore/dev-sysfs.c b/drivers/usb/wusbcore/dev-sysfs.c
index 415b14002a61..d4de56b93d68 100644
--- a/drivers/usb/wusbcore/dev-sysfs.c
+++ b/drivers/usb/wusbcore/dev-sysfs.c
@@ -53,7 +53,7 @@ static ssize_t wusb_disconnect_store(struct device *dev,
wusbhc_put(wusbhc);
return size;
}
-static DEVICE_ATTR(wusb_disconnect, 0200, NULL, wusb_disconnect_store);
+static DEVICE_ATTR_WO(wusb_disconnect);
static ssize_t wusb_cdid_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -69,7 +69,7 @@ static ssize_t wusb_cdid_show(struct device *dev,
wusb_dev_put(wusb_dev);
return result + 1;
}
-static DEVICE_ATTR(wusb_cdid, 0444, wusb_cdid_show, NULL);
+static DEVICE_ATTR_RO(wusb_cdid);
static ssize_t wusb_ck_store(struct device *dev,
struct device_attribute *attr,
@@ -105,7 +105,7 @@ static ssize_t wusb_ck_store(struct device *dev,
wusbhc_put(wusbhc);
return result < 0 ? result : size;
}
-static DEVICE_ATTR(wusb_ck, 0200, NULL, wusb_ck_store);
+static DEVICE_ATTR_WO(wusb_ck);
static struct attribute *wusb_dev_attrs[] = {
&dev_attr_wusb_disconnect.attr,
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index 8c9421b69da0..170f2c38de9b 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -240,6 +240,7 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc,
if (new_secd == NULL) {
dev_err(dev,
"Can't allocate space for security descriptors\n");
+ result = -ENOMEM;
goto out;
}
secd = new_secd;
diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c
index ed4622279c63..e3819fc182b0 100644
--- a/drivers/usb/wusbcore/wa-nep.c
+++ b/drivers/usb/wusbcore/wa-nep.c
@@ -198,6 +198,7 @@ static int wa_nep_queue(struct wahc *wa, size_t size)
if (nw == NULL) {
if (printk_ratelimit())
dev_err(dev, "No memory to queue notification\n");
+ result = -ENOMEM;
goto out;
}
INIT_WORK(&nw->work, wa_notif_dispatch);
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 167fcc71f5f6..e70322b1dd02 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -1203,6 +1203,7 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
sizeof(struct wa_xfer_packet_info_hwaiso) +
(seg_isoc_frame_count * sizeof(__le16));
}
+ result = -ENOMEM;
seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size,
GFP_ATOMIC);
if (seg == NULL)
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c
index 94f401ab859f..a273a91cf667 100644
--- a/drivers/usb/wusbcore/wusbhc.c
+++ b/drivers/usb/wusbcore/wusbhc.c
@@ -84,8 +84,7 @@ static ssize_t wusb_trust_timeout_store(struct device *dev,
out:
return result < 0 ? result : size;
}
-static DEVICE_ATTR(wusb_trust_timeout, 0644, wusb_trust_timeout_show,
- wusb_trust_timeout_store);
+static DEVICE_ATTR_RW(wusb_trust_timeout);
/*
* Show the current WUSB CHID.
@@ -145,7 +144,7 @@ static ssize_t wusb_chid_store(struct device *dev,
result = wusbhc_chid_set(wusbhc, &chid);
return result < 0 ? result : size;
}
-static DEVICE_ATTR(wusb_chid, 0644, wusb_chid_show, wusb_chid_store);
+static DEVICE_ATTR_RW(wusb_chid);
static ssize_t wusb_phy_rate_show(struct device *dev,
@@ -174,8 +173,7 @@ static ssize_t wusb_phy_rate_store(struct device *dev,
wusbhc->phy_rate = phy_rate;
return size;
}
-static DEVICE_ATTR(wusb_phy_rate, 0644, wusb_phy_rate_show,
- wusb_phy_rate_store);
+static DEVICE_ATTR_RW(wusb_phy_rate);
static ssize_t wusb_dnts_show(struct device *dev,
struct device_attribute *attr,
@@ -205,7 +203,7 @@ static ssize_t wusb_dnts_store(struct device *dev,
return size;
}
-static DEVICE_ATTR(wusb_dnts, 0644, wusb_dnts_show, wusb_dnts_store);
+static DEVICE_ATTR_RW(wusb_dnts);
static ssize_t wusb_retry_count_show(struct device *dev,
struct device_attribute *attr,
@@ -234,8 +232,7 @@ static ssize_t wusb_retry_count_store(struct device *dev,
return size;
}
-static DEVICE_ATTR(wusb_retry_count, 0644, wusb_retry_count_show,
- wusb_retry_count_store);
+static DEVICE_ATTR_RW(wusb_retry_count);
/* Group all the WUSBHC attributes */
static struct attribute *wusbhc_attrs[] = {
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index b87f5cfdaea5..a44f5627b82a 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -164,8 +164,6 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
int count, int ypos, int xpos);
static void fbcon_clear_margins(struct vc_data *vc, int bottom_only);
static void fbcon_cursor(struct vc_data *vc, int mode);
-static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
- int count);
static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
int height, int width);
static int fbcon_switch(struct vc_data *vc);
@@ -1795,15 +1793,15 @@ static inline void fbcon_softback_note(struct vc_data *vc, int t,
softback_curr = softback_in;
}
-static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
- int count)
+static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
+ enum con_scroll dir, unsigned int count)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct display *p = &fb_display[vc->vc_num];
int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
if (fbcon_is_inactive(vc, info))
- return -EINVAL;
+ return true;
fbcon_cursor(vc, CM_ERASE);
@@ -1831,7 +1829,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
(b - count)),
vc->vc_video_erase_char,
vc->vc_size_row * count);
- return 1;
+ return true;
break;
case SCROLL_WRAP_MOVE:
@@ -1903,7 +1901,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
(b - count)),
vc->vc_video_erase_char,
vc->vc_size_row * count);
- return 1;
+ return true;
}
break;
@@ -1922,7 +1920,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
t),
vc->vc_video_erase_char,
vc->vc_size_row * count);
- return 1;
+ return true;
break;
case SCROLL_WRAP_MOVE:
@@ -1992,10 +1990,10 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
t),
vc->vc_video_erase_char,
vc->vc_size_row * count);
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c
index bacbb044d77c..ec192a1bf297 100644
--- a/drivers/video/console/mdacon.c
+++ b/drivers/video/console/mdacon.c
@@ -488,12 +488,13 @@ static void mdacon_cursor(struct vc_data *c, int mode)
}
}
-static int mdacon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
+static bool mdacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
+ enum con_scroll dir, unsigned int lines)
{
u16 eattr = mda_convert_attr(c->vc_video_erase_char);
if (!lines)
- return 0;
+ return false;
if (lines > c->vc_rows) /* maximum realistic size */
lines = c->vc_rows;
@@ -514,7 +515,7 @@ static int mdacon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
break;
}
- return 0;
+ return false;
}
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index e3b9521e4ec3..1e11614322fe 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -574,8 +574,8 @@ static int newport_font_set(struct vc_data *vc, struct console_font *font, unsig
return newport_set_font(vc->vc_num, font);
}
-static int newport_scroll(struct vc_data *vc, int t, int b, int dir,
- int lines)
+static bool newport_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
+ enum con_scroll dir, unsigned int lines)
{
int count, x, y;
unsigned short *s, *d;
@@ -595,7 +595,7 @@ static int newport_scroll(struct vc_data *vc, int t, int b, int dir,
(vc->vc_color & 0xf0) >> 4);
}
npregs->cset.topscan = (topscan - 1) & 0x3ff;
- return 0;
+ return false;
}
count = (b - t - lines) * vc->vc_cols;
@@ -670,7 +670,7 @@ static int newport_scroll(struct vc_data *vc, int t, int b, int dir,
}
}
}
- return 1;
+ return true;
}
static int newport_dummy(struct vc_data *c)
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index 3a10ac19598f..79c9bd8d3025 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -153,12 +153,13 @@ static void sticon_cursor(struct vc_data *conp, int mode)
}
}
-static int sticon_scroll(struct vc_data *conp, int t, int b, int dir, int count)
+static bool sticon_scroll(struct vc_data *conp, unsigned int t,
+ unsigned int b, enum con_scroll dir, unsigned int count)
{
struct sti_struct *sti = sticon_sti;
if (vga_is_gfx)
- return 0;
+ return false;
sticon_cursor(conp, CM_ERASE);
@@ -174,7 +175,7 @@ static int sticon_scroll(struct vc_data *conp, int t, int b, int dir, int count)
break;
}
- return 0;
+ return false;
}
static void sticon_init(struct vc_data *c, int init)
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 11576611a974..c22a56232b7c 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -60,15 +60,6 @@ static struct vgastate vgastate;
#define BLANK 0x0020
-#define CAN_LOAD_EGA_FONTS /* undefine if the user must not do this */
-#define CAN_LOAD_PALETTE /* undefine if the user must not do this */
-
-/* You really do _NOT_ want to define this, unless you have buggy
- * Trident VGA which will resize cursor when moving it between column
- * 15 & 16. If you define this and your VGA is OK, inverse bug will
- * appear.
- */
-#undef TRIDENT_GLITCH
#define VGA_FONTWIDTH 8 /* VGA does not support fontwidths != 8 */
/*
* Interface used by the world
@@ -83,14 +74,12 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch);
static void vgacon_scrolldelta(struct vc_data *c, int lines);
static int vgacon_set_origin(struct vc_data *c);
static void vgacon_save_screen(struct vc_data *c);
-static int vgacon_scroll(struct vc_data *c, int t, int b, int dir,
- int lines);
static void vgacon_invert_region(struct vc_data *c, u16 * p, int count);
static struct uni_pagedir *vgacon_uni_pagedir;
static int vgacon_refcount;
/* Description of the hardware situation */
-static int vga_init_done __read_mostly;
+static bool vga_init_done;
static unsigned long vga_vram_base __read_mostly; /* Base of video memory */
static unsigned long vga_vram_end __read_mostly; /* End of video memory */
static unsigned int vga_vram_size __read_mostly; /* Size of video memory */
@@ -98,31 +87,31 @@ static u16 vga_video_port_reg __read_mostly; /* Video register select port */
static u16 vga_video_port_val __read_mostly; /* Video register value port */
static unsigned int vga_video_num_columns; /* Number of text columns */
static unsigned int vga_video_num_lines; /* Number of text lines */
-static int vga_can_do_color __read_mostly; /* Do we support colors? */
+static bool vga_can_do_color; /* Do we support colors? */
static unsigned int vga_default_font_height __read_mostly; /* Height of default screen font */
static unsigned char vga_video_type __read_mostly; /* Card type */
-static unsigned char vga_hardscroll_enabled __read_mostly;
-static unsigned char vga_hardscroll_user_enable __read_mostly = 1;
-static unsigned char vga_font_is_default = 1;
+static bool vga_font_is_default = true;
static int vga_vesa_blanked;
-static int vga_palette_blanked;
-static int vga_is_gfx;
-static int vga_512_chars;
+static bool vga_palette_blanked;
+static bool vga_is_gfx;
+static bool vga_512_chars;
static int vga_video_font_height;
static int vga_scan_lines __read_mostly;
static unsigned int vga_rolled_over;
-static int vgacon_text_mode_force;
+static bool vgacon_text_mode_force;
+static bool vga_hardscroll_enabled;
+static bool vga_hardscroll_user_enable = true;
bool vgacon_text_force(void)
{
- return vgacon_text_mode_force ? true : false;
+ return vgacon_text_mode_force;
}
EXPORT_SYMBOL(vgacon_text_force);
static int __init text_mode(char *str)
{
- vgacon_text_mode_force = 1;
+ vgacon_text_mode_force = true;
return 1;
}
@@ -136,7 +125,7 @@ static int __init no_scroll(char *str)
* Braille reader made by F.H. Papenmeier (Germany).
* Use the "no-scroll" bootflag.
*/
- vga_hardscroll_user_enable = vga_hardscroll_enabled = 0;
+ vga_hardscroll_user_enable = vga_hardscroll_enabled = false;
return 1;
}
@@ -159,18 +148,10 @@ static inline void write_vga(unsigned char reg, unsigned int val)
* handlers, thus the write has to be IRQ-atomic.
*/
raw_spin_lock_irqsave(&vga_lock, flags);
-
-#ifndef SLOW_VGA
v1 = reg + (val & 0xff00);
v2 = reg + 1 + ((val << 8) & 0xff00);
outw(v1, vga_video_port_reg);
outw(v2, vga_video_port_reg);
-#else
- outb_p(reg, vga_video_port_reg);
- outb_p(val >> 8, vga_video_port_val);
- outb_p(reg + 1, vga_video_port_reg);
- outb_p(val & 0xff, vga_video_port_val);
-#endif
raw_spin_unlock_irqrestore(&vga_lock, flags);
}
@@ -334,31 +315,8 @@ static void vgacon_restore_screen(struct vc_data *c)
static void vgacon_scrolldelta(struct vc_data *c, int lines)
{
- if (!lines) /* Turn scrollback off */
- c->vc_visible_origin = c->vc_origin;
- else {
- int margin = c->vc_size_row * 4;
- int ul, we, p, st;
-
- if (vga_rolled_over >
- (c->vc_scr_end - vga_vram_base) + margin) {
- ul = c->vc_scr_end - vga_vram_base;
- we = vga_rolled_over + c->vc_size_row;
- } else {
- ul = 0;
- we = vga_vram_size;
- }
- p = (c->vc_visible_origin - vga_vram_base - ul + we) % we +
- lines * c->vc_size_row;
- st = (c->vc_origin - vga_vram_base - ul + we) % we;
- if (st < 2 * margin)
- margin = 0;
- if (p < margin)
- p = 0;
- if (p > st - margin)
- p = st;
- c->vc_visible_origin = vga_vram_base + (p + ul) % we;
- }
+ vc_scrolldelta_helper(c, lines, vga_rolled_over, (void *)vga_vram_base,
+ vga_vram_size);
vga_set_mem_top(c);
}
#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */
@@ -427,7 +385,7 @@ static const char *vgacon_startup(void)
}
} else {
/* If not, it is color. */
- vga_can_do_color = 1;
+ vga_can_do_color = true;
vga_vram_base = 0xb8000;
vga_video_port_reg = VGA_CRT_IC;
vga_video_port_val = VGA_CRT_DC;
@@ -451,18 +409,6 @@ static const char *vgacon_startup(void)
request_resource(&ioport_resource,
&vga_console_resource);
-#ifdef VGA_CAN_DO_64KB
- /*
- * get 64K rather than 32K of video RAM.
- * This doesn't actually work on all "VGA"
- * controllers (it seems like setting MM=01
- * and COE=1 isn't necessarily a good idea)
- */
- vga_vram_base = 0xa0000;
- vga_vram_size = 0x10000;
- outb_p(6, VGA_GFX_I);
- outb_p(6, VGA_GFX_D);
-#endif
/*
* Normalise the palette registers, to point
* the 16 screen colours to the first 16
@@ -542,7 +488,7 @@ static const char *vgacon_startup(void)
if (!vga_init_done) {
vgacon_scrollback_startup();
- vga_init_done = 1;
+ vga_init_done = true;
}
return display_desc;
@@ -634,7 +580,7 @@ static u8 vgacon_build_attr(struct vc_data *c, u8 color, u8 intensity,
static void vgacon_invert_region(struct vc_data *c, u16 * p, int count)
{
- int col = vga_can_do_color;
+ const bool col = vga_can_do_color;
while (count--) {
u16 a = scr_readw(p);
@@ -652,11 +598,6 @@ static void vgacon_set_cursor_size(int xpos, int from, int to)
unsigned long flags;
int curs, cure;
-#ifdef TRIDENT_GLITCH
- if (xpos < 16)
- from--, to--;
-#endif
-
if ((from == cursor_size_lastfrom) && (to == cursor_size_lastto))
return;
cursor_size_lastfrom = from;
@@ -858,12 +799,10 @@ static void vga_set_palette(struct vc_data *vc, const unsigned char *table)
static void vgacon_set_palette(struct vc_data *vc, const unsigned char *table)
{
-#ifdef CAN_LOAD_PALETTE
if (vga_video_type != VIDEO_TYPE_VGAC || vga_palette_blanked
|| !con_is_visible(vc))
return;
vga_set_palette(vc, table);
-#endif
}
/* structure holding original VGA register settings */
@@ -1006,24 +945,24 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
}
if (vga_palette_blanked) {
vga_set_palette(c, color_table);
- vga_palette_blanked = 0;
+ vga_palette_blanked = false;
return 0;
}
- vga_is_gfx = 0;
+ vga_is_gfx = false;
/* Tell console.c that it has to restore the screen itself */
return 1;
case 1: /* Normal blanking */
case -1: /* Obsolete */
if (!mode_switch && vga_video_type == VIDEO_TYPE_VGAC) {
vga_pal_blank(&vgastate);
- vga_palette_blanked = 1;
+ vga_palette_blanked = true;
return 0;
}
vgacon_set_origin(c);
scr_memsetw((void *) vga_vram_base, BLANK,
c->vc_screenbuf_size);
if (mode_switch)
- vga_is_gfx = 1;
+ vga_is_gfx = true;
return 1;
default: /* VESA blanking */
if (vga_video_type == VIDEO_TYPE_VGAC) {
@@ -1046,15 +985,14 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
* (sizif@botik.yaroslavl.su).
*/
-#ifdef CAN_LOAD_EGA_FONTS
-
#define colourmap 0xa0000
/* Pauline Middelink <middelin@polyware.iaf.nl> reports that we
should use 0xA0000 for the bwmap as well.. */
#define blackwmap 0xa0000
#define cmapsz 8192
-static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+static int vgacon_do_font_op(struct vgastate *state, char *arg, int set,
+ bool ch512)
{
unsigned short video_port_status = vga_video_port_reg + 6;
int font_select = 0x00, beg, i;
@@ -1063,10 +1001,6 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
if (vga_video_type != VIDEO_TYPE_EGAM) {
charmap = (char *) VGA_MAP_MEM(colourmap, 0);
beg = 0x0e;
-#ifdef VGA_CAN_DO_64KB
- if (vga_video_type == VIDEO_TYPE_VGAC)
- beg = 0x06;
-#endif
} else {
charmap = (char *) VGA_MAP_MEM(blackwmap, 0);
beg = 0x0a;
@@ -1080,7 +1014,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
if (!arg)
return -EINVAL; /* Return to default font not supported */
- vga_font_is_default = 0;
+ vga_font_is_default = false;
font_select = ch512 ? 0x04 : 0x00;
#else
/*
@@ -1091,7 +1025,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
if (set) {
vga_font_is_default = !arg;
if (!arg)
- ch512 = 0; /* Default font is always 256 */
+ ch512 = false; /* Default font is always 256 */
font_select = arg ? (ch512 ? 0x0e : 0x0a) : 0x00;
}
@@ -1295,13 +1229,6 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
return vgacon_do_font_op(&vgastate, font->data, 0, vga_512_chars);
}
-#else
-
-#define vgacon_font_set NULL
-#define vgacon_font_get NULL
-
-#endif
-
static int vgacon_resize(struct vc_data *c, unsigned int width,
unsigned int height, unsigned int user)
{
@@ -1350,17 +1277,17 @@ static void vgacon_save_screen(struct vc_data *c)
c->vc_screenbuf_size > vga_vram_size ? vga_vram_size : c->vc_screenbuf_size);
}
-static int vgacon_scroll(struct vc_data *c, int t, int b, int dir,
- int lines)
+static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
+ enum con_scroll dir, unsigned int lines)
{
unsigned long oldo;
unsigned int delta;
if (t || b != c->vc_rows || vga_is_gfx || c->vc_mode != KD_TEXT)
- return 0;
+ return false;
if (!vga_hardscroll_enabled || lines >= c->vc_rows / 2)
- return 0;
+ return false;
vgacon_restore_screen(c);
oldo = c->vc_origin;
@@ -1396,7 +1323,7 @@ static int vgacon_scroll(struct vc_data *c, int t, int b, int dir,
c->vc_visible_origin = c->vc_origin;
vga_set_mem_top(c);
c->vc_pos = (c->vc_pos - oldo) + c->vc_origin;
- return 1;
+ return true;
}
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 0567d517eed3..d0115a7af0a9 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -633,7 +633,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
struct xenfb_info *info = dev_get_drvdata(&dev->dev);
- int val;
switch (backend_state) {
case XenbusStateInitialising:
@@ -657,16 +656,12 @@ InitWait:
if (dev->state != XenbusStateConnected)
goto InitWait; /* no InitWait seen yet, fudge it */
- if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "request-update", "%d", &val) < 0)
- val = 0;
- if (val)
+ if (xenbus_read_unsigned(info->xbdev->otherend,
+ "request-update", 0))
info->update_wanted = 1;
- if (xenbus_scanf(XBT_NIL, dev->otherend,
- "feature-resize", "%d", &val) < 0)
- val = 0;
- info->feature_resize = val;
+ info->feature_resize = xenbus_read_unsigned(dev->otherend,
+ "feature-resize", 0);
break;
case XenbusStateClosed:
diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c
index 630bd189f167..79b35515904e 100644
--- a/drivers/watchdog/mei_wdt.c
+++ b/drivers/watchdog/mei_wdt.c
@@ -410,11 +410,11 @@ static void mei_wdt_unregister_work(struct work_struct *work)
}
/**
- * mei_wdt_event_rx - callback for data receive
+ * mei_wdt_rx - callback for data receive
*
* @cldev: bus device
*/
-static void mei_wdt_event_rx(struct mei_cl_device *cldev)
+static void mei_wdt_rx(struct mei_cl_device *cldev)
{
struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
struct mei_wdt_start_response res;
@@ -482,11 +482,11 @@ out:
}
/*
- * mei_wdt_notify_event - callback for event notification
+ * mei_wdt_notif - callback for event notification
*
* @cldev: bus device
*/
-static void mei_wdt_notify_event(struct mei_cl_device *cldev)
+static void mei_wdt_notif(struct mei_cl_device *cldev)
{
struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
@@ -496,23 +496,6 @@ static void mei_wdt_notify_event(struct mei_cl_device *cldev)
mei_wdt_register(wdt);
}
-/**
- * mei_wdt_event - callback for event receive
- *
- * @cldev: bus device
- * @events: event mask
- * @context: callback context
- */
-static void mei_wdt_event(struct mei_cl_device *cldev,
- u32 events, void *context)
-{
- if (events & BIT(MEI_CL_EVENT_RX))
- mei_wdt_event_rx(cldev);
-
- if (events & BIT(MEI_CL_EVENT_NOTIF))
- mei_wdt_notify_event(cldev);
-}
-
#if IS_ENABLED(CONFIG_DEBUG_FS)
static ssize_t mei_dbgfs_read_activation(struct file *file, char __user *ubuf,
@@ -623,16 +606,17 @@ static int mei_wdt_probe(struct mei_cl_device *cldev,
goto err_out;
}
- ret = mei_cldev_register_event_cb(wdt->cldev,
- BIT(MEI_CL_EVENT_RX) |
- BIT(MEI_CL_EVENT_NOTIF),
- mei_wdt_event, NULL);
+ ret = mei_cldev_register_rx_cb(wdt->cldev, mei_wdt_rx);
+ if (ret) {
+ dev_err(&cldev->dev, "Could not reg rx event ret=%d\n", ret);
+ goto err_disable;
+ }
+ ret = mei_cldev_register_notif_cb(wdt->cldev, mei_wdt_notif);
/* on legacy devices notification is not supported
- * this doesn't fail the registration for RX event
*/
if (ret && ret != -EOPNOTSUPP) {
- dev_err(&cldev->dev, "Could not register event ret=%d\n", ret);
+ dev_err(&cldev->dev, "Could not reg notif event ret=%d\n", ret);
goto err_disable;
}
@@ -699,25 +683,7 @@ static struct mei_cl_driver mei_wdt_driver = {
.remove = mei_wdt_remove,
};
-static int __init mei_wdt_init(void)
-{
- int ret;
-
- ret = mei_cldev_driver_register(&mei_wdt_driver);
- if (ret) {
- pr_err(KBUILD_MODNAME ": module registration failed\n");
- return ret;
- }
- return 0;
-}
-
-static void __exit mei_wdt_exit(void)
-{
- mei_cldev_driver_unregister(&mei_wdt_driver);
-}
-
-module_init(mei_wdt_init);
-module_exit(mei_wdt_exit);
+module_mei_cl_driver(mei_wdt_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index e4db19e88ab1..db107fa50ca1 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -180,7 +180,6 @@ static void __balloon_append(struct page *page)
static void balloon_append(struct page *page)
{
__balloon_append(page);
- adjust_managed_page_count(page, -1);
}
/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
@@ -201,8 +200,6 @@ static struct page *balloon_retrieve(bool require_lowmem)
else
balloon_stats.balloon_low--;
- adjust_managed_page_count(page, 1);
-
return page;
}
@@ -478,7 +475,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
#endif
/* Relinquish the page back to the allocator. */
- __free_reserved_page(page);
+ free_reserved_page(page);
}
balloon_stats.current_pages += rc;
@@ -509,6 +506,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
state = BP_EAGAIN;
break;
}
+ adjust_managed_page_count(page, -1);
scrub_page(page);
list_add(&page->lru, &pages);
}
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index adc19ce3cc66..fd8e872d2943 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -947,7 +947,7 @@ static int find_virq(unsigned int virq, unsigned int cpu)
continue;
if (status.status != EVTCHNSTAT_virq)
continue;
- if (status.u.virq == virq && status.vcpu == cpu) {
+ if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
rc = port;
break;
}
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 7a47c4c9fb1b..1bf55a32a4b3 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -127,18 +127,21 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
struct gntalloc_gref *gref, *next;
readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE);
- rc = -ENOMEM;
for (i = 0; i < op->count; i++) {
gref = kzalloc(sizeof(*gref), GFP_KERNEL);
- if (!gref)
+ if (!gref) {
+ rc = -ENOMEM;
goto undo;
+ }
list_add_tail(&gref->next_gref, &queue_gref);
list_add_tail(&gref->next_file, &queue_file);
gref->users = 1;
gref->file_index = op->index + i * PAGE_SIZE;
gref->page = alloc_page(GFP_KERNEL|__GFP_ZERO);
- if (!gref->page)
+ if (!gref->page) {
+ rc = -ENOMEM;
goto undo;
+ }
/* Grant foreign access to the page. */
rc = gnttab_grant_foreign_access(op->domid,
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index bb952121ea94..2ef2b61b69df 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1007,7 +1007,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
vma->vm_ops = &gntdev_vmops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
if (use_ptemod)
vma->vm_flags |= VM_DONTCOPY;
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index b59c9455aae1..112ce422dc22 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -125,8 +125,4 @@ static struct pci_driver platform_driver = {
.id_table = platform_pci_tbl,
};
-static int __init platform_pci_init(void)
-{
- return pci_register_driver(&platform_driver);
-}
-device_initcall(platform_pci_init);
+builtin_pci_driver(platform_driver);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 87e6035c9e81..478fb91e3df2 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -405,7 +405,8 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
*/
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
- map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
+ map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
+ attrs);
if (map == SWIOTLB_MAP_ERROR)
return DMA_ERROR_CODE;
@@ -416,11 +417,13 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
/*
* Ensure that the address returned is DMA'ble
*/
- if (!dma_capable(dev, dev_addr, size)) {
- swiotlb_tbl_unmap_single(dev, map, size, dir);
- dev_addr = 0;
- }
- return dev_addr;
+ if (dma_capable(dev, dev_addr, size))
+ return dev_addr;
+
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+ swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
+
+ return DMA_ERROR_CODE;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
@@ -444,7 +447,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr)) {
- swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
+ swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
return;
}
@@ -557,11 +560,12 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
start_dma_addr,
sg_phys(sg),
sg->length,
- dir);
+ dir, attrs);
if (map == SWIOTLB_MAP_ERROR) {
dev_warn(hwdev, "swiotlb buffer is full\n");
/* Don't panic here, we expect map_sg users
to do proper error handling. */
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
attrs);
sg_dma_len(sgl) = 0;
@@ -648,13 +652,6 @@ xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
}
EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
-int
-xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
-{
- return !dma_addr;
-}
-EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
-
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 5ce878c51d03..3f0aee0a068b 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -362,7 +362,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
int err = 0;
int num_devs;
int domain, bus, slot, func;
- int substate;
+ unsigned int substate;
int i, len;
char state_str[64];
char dev_str[64];
@@ -395,10 +395,8 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
"configuration");
goto out;
}
- err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, state_str,
- "%d", &substate);
- if (err != 1)
- substate = XenbusStateUnknown;
+ substate = xenbus_read_unsigned(pdev->xdev->nodename, state_str,
+ XenbusStateUnknown);
switch (substate) {
case XenbusStateInitialising:
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 1e8be12ebb55..6c0ead4be784 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -538,6 +538,8 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
nonseekable_open(inode, filp);
+ filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
+
u = kzalloc(sizeof(*u), GFP_KERNEL);
if (u == NULL)
return -ENOMEM;
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 33a31cfef55d..4bdf654041e9 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -702,7 +702,7 @@ device_initcall(xenbus_probe_initcall);
*/
static int __init xenstored_local_init(void)
{
- int err = 0;
+ int err = -ENOMEM;
unsigned long page = 0;
struct evtchn_alloc_unbound alloc_unbound;
@@ -826,7 +826,7 @@ static int __init xenbus_init(void)
* Create xenfs mountpoint in /proc for compatibility with
* utilities that expect to find "xenbus" under "/proc/xen".
*/
- proc_mkdir("xen", NULL);
+ proc_create_mount_point("xen");
#endif
out_error:
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 04f7f85a5edf..37929df829a3 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -224,13 +224,7 @@ static int read_frontend_details(struct xenbus_device *xendev)
int xenbus_dev_is_online(struct xenbus_device *dev)
{
- int rc, val;
-
- rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val);
- if (rc != 1)
- val = 0; /* no online node present */
-
- return val;
+ return !!xenbus_read_unsigned(dev->nodename, "online", 0);
}
EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 22f7cd711c57..6afb993c5809 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -559,6 +559,21 @@ int xenbus_scanf(struct xenbus_transaction t,
}
EXPORT_SYMBOL_GPL(xenbus_scanf);
+/* Read an (optional) unsigned value. */
+unsigned int xenbus_read_unsigned(const char *dir, const char *node,
+ unsigned int default_val)
+{
+ unsigned int val;
+ int ret;
+
+ ret = xenbus_scanf(XBT_NIL, dir, node, "%u", &val);
+ if (ret <= 0)
+ val = default_val;
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(xenbus_read_unsigned);
+
/* Single printf and write: returns -errno or 0. */
int xenbus_printf(struct xenbus_transaction t,
const char *dir, const char *node, const char *fmt, ...)
@@ -672,7 +687,7 @@ static bool xen_strict_xenbus_quirk(void)
}
static void xs_reset_watches(void)
{
- int err, supported = 0;
+ int err;
if (!xen_hvm_domain() || xen_initial_domain())
return;
@@ -680,9 +695,8 @@ static void xs_reset_watches(void)
if (xen_strict_xenbus_quirk())
return;
- err = xenbus_scanf(XBT_NIL, "control",
- "platform-feature-xs_reset_watches", "%d", &supported);
- if (err != 1 || !supported)
+ if (!xenbus_read_unsigned("control",
+ "platform-feature-xs_reset_watches", 0))
return;
err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
diff --git a/fs/Kconfig b/fs/Kconfig
index 4bd03a2b0518..8e9e5f4104f4 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -55,7 +55,6 @@ config FS_DAX_PMD
depends on FS_DAX
depends on ZONE_DEVICE
depends on TRANSPARENT_HUGEPAGE
- depends on BROKEN
endif # BLOCK
diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig
index 92348faf9865..f514978f6688 100644
--- a/fs/crypto/Kconfig
+++ b/fs/crypto/Kconfig
@@ -8,9 +8,7 @@ config FS_ENCRYPTION
select CRYPTO_XTS
select CRYPTO_CTS
select CRYPTO_CTR
- select CRYPTO_SHA256
select KEYS
- select ENCRYPTED_KEYS
help
Enable encryption of files and directories. This
feature is similar to ecryptfs, but it is more memory
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 98f87fe8f186..ac8e4f6a3773 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -27,7 +27,7 @@
#include <linux/bio.h>
#include <linux/dcache.h>
#include <linux/namei.h>
-#include <linux/fscrypto.h>
+#include "fscrypt_private.h"
static unsigned int num_prealloc_crypto_pages = 32;
static unsigned int num_prealloc_crypto_ctxs = 128;
@@ -63,7 +63,7 @@ void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
{
unsigned long flags;
- if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) {
+ if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) {
mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
ctx->w.bounce_page = NULL;
}
@@ -88,7 +88,7 @@ EXPORT_SYMBOL(fscrypt_release_ctx);
* Return: An allocated and initialized encryption context on success; error
* value or NULL otherwise.
*/
-struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
+struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags)
{
struct fscrypt_ctx *ctx = NULL;
struct fscrypt_info *ci = inode->i_crypt_info;
@@ -121,7 +121,7 @@ struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
} else {
ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
}
- ctx->flags &= ~FS_WRITE_PATH_FL;
+ ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL;
return ctx;
}
EXPORT_SYMBOL(fscrypt_get_ctx);
@@ -146,9 +146,10 @@ typedef enum {
FS_ENCRYPT,
} fscrypt_direction_t;
-static int do_page_crypto(struct inode *inode,
- fscrypt_direction_t rw, pgoff_t index,
+static int do_page_crypto(const struct inode *inode,
+ fscrypt_direction_t rw, u64 lblk_num,
struct page *src_page, struct page *dest_page,
+ unsigned int len, unsigned int offs,
gfp_t gfp_flags)
{
struct {
@@ -162,6 +163,8 @@ static int do_page_crypto(struct inode *inode,
struct crypto_skcipher *tfm = ci->ci_ctfm;
int res = 0;
+ BUG_ON(len == 0);
+
req = skcipher_request_alloc(tfm, gfp_flags);
if (!req) {
printk_ratelimited(KERN_ERR
@@ -175,14 +178,14 @@ static int do_page_crypto(struct inode *inode,
page_crypt_complete, &ecr);
BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
- xts_tweak.index = cpu_to_le64(index);
+ xts_tweak.index = cpu_to_le64(lblk_num);
memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
sg_init_table(&dst, 1);
- sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
+ sg_set_page(&dst, dest_page, len, offs);
sg_init_table(&src, 1);
- sg_set_page(&src, src_page, PAGE_SIZE, 0);
- skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, &xts_tweak);
+ sg_set_page(&src, src_page, len, offs);
+ skcipher_request_set_crypt(req, &src, &dst, len, &xts_tweak);
if (rw == FS_DECRYPT)
res = crypto_skcipher_decrypt(req);
else
@@ -207,34 +210,66 @@ static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags)
ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
if (ctx->w.bounce_page == NULL)
return ERR_PTR(-ENOMEM);
- ctx->flags |= FS_WRITE_PATH_FL;
+ ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL;
return ctx->w.bounce_page;
}
/**
* fscypt_encrypt_page() - Encrypts a page
- * @inode: The inode for which the encryption should take place
- * @plaintext_page: The page to encrypt. Must be locked.
- * @gfp_flags: The gfp flag for memory allocation
+ * @inode: The inode for which the encryption should take place
+ * @page: The page to encrypt. Must be locked for bounce-page
+ * encryption.
+ * @len: Length of data to encrypt in @page and encrypted
+ * data in returned page.
+ * @offs: Offset of data within @page and returned
+ * page holding encrypted data.
+ * @lblk_num: Logical block number. This must be unique for multiple
+ * calls with same inode, except when overwriting
+ * previously written data.
+ * @gfp_flags: The gfp flag for memory allocation
*
- * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
- * encryption context.
+ * Encrypts @page using the ctx encryption context. Performs encryption
+ * either in-place or into a newly allocated bounce page.
+ * Called on the page write path.
*
- * Called on the page write path. The caller must call
+ * Bounce page allocation is the default.
+ * In this case, the contents of @page are encrypted and stored in an
+ * allocated bounce page. @page has to be locked and the caller must call
* fscrypt_restore_control_page() on the returned ciphertext page to
* release the bounce buffer and the encryption context.
*
- * Return: An allocated page with the encrypted content on success. Else, an
+ * In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in
+ * fscrypt_operations. Here, the input-page is returned with its content
+ * encrypted.
+ *
+ * Return: A page with the encrypted content on success. Else, an
* error value or NULL.
*/
-struct page *fscrypt_encrypt_page(struct inode *inode,
- struct page *plaintext_page, gfp_t gfp_flags)
+struct page *fscrypt_encrypt_page(const struct inode *inode,
+ struct page *page,
+ unsigned int len,
+ unsigned int offs,
+ u64 lblk_num, gfp_t gfp_flags)
+
{
struct fscrypt_ctx *ctx;
- struct page *ciphertext_page = NULL;
+ struct page *ciphertext_page = page;
int err;
- BUG_ON(!PageLocked(plaintext_page));
+ BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
+
+ if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
+ /* with inplace-encryption we just encrypt the page */
+ err = do_page_crypto(inode, FS_ENCRYPT, lblk_num,
+ page, ciphertext_page,
+ len, offs, gfp_flags);
+ if (err)
+ return ERR_PTR(err);
+
+ return ciphertext_page;
+ }
+
+ BUG_ON(!PageLocked(page));
ctx = fscrypt_get_ctx(inode, gfp_flags);
if (IS_ERR(ctx))
@@ -245,10 +280,10 @@ struct page *fscrypt_encrypt_page(struct inode *inode,
if (IS_ERR(ciphertext_page))
goto errout;
- ctx->w.control_page = plaintext_page;
- err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index,
- plaintext_page, ciphertext_page,
- gfp_flags);
+ ctx->w.control_page = page;
+ err = do_page_crypto(inode, FS_ENCRYPT, lblk_num,
+ page, ciphertext_page,
+ len, offs, gfp_flags);
if (err) {
ciphertext_page = ERR_PTR(err);
goto errout;
@@ -265,8 +300,13 @@ errout:
EXPORT_SYMBOL(fscrypt_encrypt_page);
/**
- * f2crypt_decrypt_page() - Decrypts a page in-place
- * @page: The page to decrypt. Must be locked.
+ * fscrypt_decrypt_page() - Decrypts a page in-place
+ * @inode: The corresponding inode for the page to decrypt.
+ * @page: The page to decrypt. Must be locked in case
+ * it is a writeback page (FS_CFLG_OWN_PAGES unset).
+ * @len: Number of bytes in @page to be decrypted.
+ * @offs: Start of data in @page.
+ * @lblk_num: Logical block number.
*
* Decrypts page in-place using the ctx encryption context.
*
@@ -274,16 +314,18 @@ EXPORT_SYMBOL(fscrypt_encrypt_page);
*
* Return: Zero on success, non-zero otherwise.
*/
-int fscrypt_decrypt_page(struct page *page)
+int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
+ unsigned int len, unsigned int offs, u64 lblk_num)
{
- BUG_ON(!PageLocked(page));
+ if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
+ BUG_ON(!PageLocked(page));
- return do_page_crypto(page->mapping->host,
- FS_DECRYPT, page->index, page, page, GFP_NOFS);
+ return do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page, len,
+ offs, GFP_NOFS);
}
EXPORT_SYMBOL(fscrypt_decrypt_page);
-int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
+int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
struct fscrypt_ctx *ctx;
@@ -306,7 +348,7 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
while (len--) {
err = do_page_crypto(inode, FS_ENCRYPT, lblk,
ZERO_PAGE(0), ciphertext_page,
- GFP_NOFS);
+ PAGE_SIZE, 0, GFP_NOFS);
if (err)
goto errout;
@@ -414,7 +456,8 @@ static void completion_pages(struct work_struct *work)
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
- int ret = fscrypt_decrypt_page(page);
+ int ret = fscrypt_decrypt_page(page->mapping->host, page,
+ PAGE_SIZE, 0, page->index);
if (ret) {
WARN_ON_ONCE(1);
@@ -482,17 +525,22 @@ static void fscrypt_destroy(void)
/**
* fscrypt_initialize() - allocate major buffers for fs encryption.
+ * @cop_flags: fscrypt operations flags
*
* We only call this when we start accessing encrypted files, since it
* results in memory getting allocated that wouldn't otherwise be used.
*
* Return: Zero on success, non-zero otherwise.
*/
-int fscrypt_initialize(void)
+int fscrypt_initialize(unsigned int cop_flags)
{
int i, res = -ENOMEM;
- if (fscrypt_bounce_page_pool)
+ /*
+ * No need to allocate a bounce page pool if there already is one or
+ * this FS won't use it.
+ */
+ if (cop_flags & FS_CFLG_OWN_PAGES || fscrypt_bounce_page_pool)
return 0;
mutex_lock(&fscrypt_init_mutex);
@@ -521,7 +569,6 @@ fail:
mutex_unlock(&fscrypt_init_mutex);
return res;
}
-EXPORT_SYMBOL(fscrypt_initialize);
/**
* fscrypt_init() - Set up for fs encryption.
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 9b774f4b50c8..56ad9d195f18 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -12,7 +12,7 @@
#include <linux/scatterlist.h>
#include <linux/ratelimit.h>
-#include <linux/fscrypto.h>
+#include "fscrypt_private.h"
/**
* fname_crypt_complete() - completion callback for filename crypto
@@ -209,7 +209,7 @@ static int digest_decode(const char *src, int len, char *dst)
return cp - dst;
}
-u32 fscrypt_fname_encrypted_size(struct inode *inode, u32 ilen)
+u32 fscrypt_fname_encrypted_size(const struct inode *inode, u32 ilen)
{
int padding = 32;
struct fscrypt_info *ci = inode->i_crypt_info;
@@ -227,7 +227,7 @@ EXPORT_SYMBOL(fscrypt_fname_encrypted_size);
* Allocates an output buffer that is sufficient for the crypto operation
* specified by the context and the direction.
*/
-int fscrypt_fname_alloc_buffer(struct inode *inode,
+int fscrypt_fname_alloc_buffer(const struct inode *inode,
u32 ilen, struct fscrypt_str *crypto_str)
{
unsigned int olen = fscrypt_fname_encrypted_size(inode, ilen);
@@ -350,7 +350,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
fname->disk_name.len = iname->len;
return 0;
}
- ret = get_crypt_info(dir);
+ ret = fscrypt_get_crypt_info(dir);
if (ret && ret != -EOPNOTSUPP)
return ret;
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
new file mode 100644
index 000000000000..aeab032d7d35
--- /dev/null
+++ b/fs/crypto/fscrypt_private.h
@@ -0,0 +1,93 @@
+/*
+ * fscrypt_private.h
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption key functions.
+ *
+ * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
+ */
+
+#ifndef _FSCRYPT_PRIVATE_H
+#define _FSCRYPT_PRIVATE_H
+
+#include <linux/fscrypto.h>
+
+#define FS_FNAME_CRYPTO_DIGEST_SIZE 32
+
+/* Encryption parameters */
+#define FS_XTS_TWEAK_SIZE 16
+#define FS_AES_128_ECB_KEY_SIZE 16
+#define FS_AES_256_GCM_KEY_SIZE 32
+#define FS_AES_256_CBC_KEY_SIZE 32
+#define FS_AES_256_CTS_KEY_SIZE 32
+#define FS_AES_256_XTS_KEY_SIZE 64
+#define FS_MAX_KEY_SIZE 64
+
+#define FS_KEY_DESC_PREFIX "fscrypt:"
+#define FS_KEY_DESC_PREFIX_SIZE 8
+
+#define FS_KEY_DERIVATION_NONCE_SIZE 16
+
+/**
+ * Encryption context for inode
+ *
+ * Protector format:
+ * 1 byte: Protector format (1 = this version)
+ * 1 byte: File contents encryption mode
+ * 1 byte: File names encryption mode
+ * 1 byte: Flags
+ * 8 bytes: Master Key descriptor
+ * 16 bytes: Encryption Key derivation nonce
+ */
+struct fscrypt_context {
+ u8 format;
+ u8 contents_encryption_mode;
+ u8 filenames_encryption_mode;
+ u8 flags;
+ u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+ u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
+} __packed;
+
+#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1
+
+/* This is passed in from userspace into the kernel keyring */
+struct fscrypt_key {
+ u32 mode;
+ u8 raw[FS_MAX_KEY_SIZE];
+ u32 size;
+} __packed;
+
+/*
+ * A pointer to this structure is stored in the file system's in-core
+ * representation of an inode.
+ */
+struct fscrypt_info {
+ u8 ci_data_mode;
+ u8 ci_filename_mode;
+ u8 ci_flags;
+ struct crypto_skcipher *ci_ctfm;
+ struct key *ci_keyring_key;
+ u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
+};
+
+#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
+#define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002
+
+struct fscrypt_completion_result {
+ struct completion completion;
+ int res;
+};
+
+#define DECLARE_FS_COMPLETION_RESULT(ecr) \
+ struct fscrypt_completion_result ecr = { \
+ COMPLETION_INITIALIZER((ecr).completion), 0 }
+
+
+/* crypto.c */
+int fscrypt_initialize(unsigned int cop_flags);
+
+/* keyinfo.c */
+extern int fscrypt_get_crypt_info(struct inode *);
+
+#endif /* _FSCRYPT_PRIVATE_H */
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 67fb6d8876d0..6eeea1dcba41 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -10,7 +10,7 @@
#include <keys/user-type.h>
#include <linux/scatterlist.h>
-#include <linux/fscrypto.h>
+#include "fscrypt_private.h"
static void derive_crypt_complete(struct crypto_async_request *req, int rc)
{
@@ -178,7 +178,7 @@ static void put_crypt_info(struct fscrypt_info *ci)
kmem_cache_free(fscrypt_info_cachep, ci);
}
-int get_crypt_info(struct inode *inode)
+int fscrypt_get_crypt_info(struct inode *inode)
{
struct fscrypt_info *crypt_info;
struct fscrypt_context ctx;
@@ -188,7 +188,7 @@ int get_crypt_info(struct inode *inode)
u8 *raw_key = NULL;
int res;
- res = fscrypt_initialize();
+ res = fscrypt_initialize(inode->i_sb->s_cop->flags);
if (res)
return res;
@@ -327,7 +327,7 @@ int fscrypt_get_encryption_info(struct inode *inode)
(ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
(1 << KEY_FLAG_REVOKED) |
(1 << KEY_FLAG_DEAD)))))
- return get_crypt_info(inode);
+ return fscrypt_get_crypt_info(inode);
return 0;
}
EXPORT_SYMBOL(fscrypt_get_encryption_info);
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 6865663aac69..6ed7c2eebeec 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -10,8 +10,8 @@
#include <linux/random.h>
#include <linux/string.h>
-#include <linux/fscrypto.h>
#include <linux/mount.h>
+#include "fscrypt_private.h"
static int inode_has_encryption_context(struct inode *inode)
{
@@ -93,16 +93,19 @@ static int create_encryption_context_from_policy(struct inode *inode,
return inode->i_sb->s_cop->set_context(inode, &ctx, sizeof(ctx), NULL);
}
-int fscrypt_process_policy(struct file *filp,
- const struct fscrypt_policy *policy)
+int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg)
{
+ struct fscrypt_policy policy;
struct inode *inode = file_inode(filp);
int ret;
+ if (copy_from_user(&policy, arg, sizeof(policy)))
+ return -EFAULT;
+
if (!inode_owner_or_capable(inode))
return -EACCES;
- if (policy->version != 0)
+ if (policy.version != 0)
return -EINVAL;
ret = mnt_want_write_file(filp);
@@ -120,9 +123,9 @@ int fscrypt_process_policy(struct file *filp,
ret = -ENOTEMPTY;
else
ret = create_encryption_context_from_policy(inode,
- policy);
+ &policy);
} else if (!is_encryption_context_consistent_with_policy(inode,
- policy)) {
+ &policy)) {
printk(KERN_WARNING
"%s: Policy inconsistent with encryption context\n",
__func__);
@@ -134,11 +137,13 @@ int fscrypt_process_policy(struct file *filp,
mnt_drop_write_file(filp);
return ret;
}
-EXPORT_SYMBOL(fscrypt_process_policy);
+EXPORT_SYMBOL(fscrypt_ioctl_set_policy);
-int fscrypt_get_policy(struct inode *inode, struct fscrypt_policy *policy)
+int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
{
+ struct inode *inode = file_inode(filp);
struct fscrypt_context ctx;
+ struct fscrypt_policy policy;
int res;
if (!inode->i_sb->s_cop->get_context ||
@@ -151,15 +156,18 @@ int fscrypt_get_policy(struct inode *inode, struct fscrypt_policy *policy)
if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
return -EINVAL;
- policy->version = 0;
- policy->contents_encryption_mode = ctx.contents_encryption_mode;
- policy->filenames_encryption_mode = ctx.filenames_encryption_mode;
- policy->flags = ctx.flags;
- memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor,
+ policy.version = 0;
+ policy.contents_encryption_mode = ctx.contents_encryption_mode;
+ policy.filenames_encryption_mode = ctx.filenames_encryption_mode;
+ policy.flags = ctx.flags;
+ memcpy(policy.master_key_descriptor, ctx.master_key_descriptor,
FS_KEY_DESCRIPTOR_SIZE);
+
+ if (copy_to_user(arg, &policy, sizeof(policy)))
+ return -EFAULT;
return 0;
}
-EXPORT_SYMBOL(fscrypt_get_policy);
+EXPORT_SYMBOL(fscrypt_ioctl_get_policy);
int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
{
diff --git a/fs/dax.c b/fs/dax.c
index 6916ed37d463..5ae8e11ad786 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -34,25 +34,11 @@
#include <linux/iomap.h>
#include "internal.h"
-/*
- * We use lowest available bit in exceptional entry for locking, other two
- * bits to determine entry type. In total 3 special bits.
- */
-#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)
-#define RADIX_DAX_PTE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
-#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
-#define RADIX_DAX_TYPE_MASK (RADIX_DAX_PTE | RADIX_DAX_PMD)
-#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_TYPE_MASK)
-#define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
-#define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
- RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE) | \
- RADIX_TREE_EXCEPTIONAL_ENTRY))
-
/* We choose 4096 entries - same as per-zone page wait tables */
#define DAX_WAIT_TABLE_BITS 12
#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
-wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
+static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
static int __init init_dax_wait_table(void)
{
@@ -64,14 +50,6 @@ static int __init init_dax_wait_table(void)
}
fs_initcall(init_dax_wait_table);
-static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
- pgoff_t index)
-{
- unsigned long hash = hash_long((unsigned long)mapping ^ index,
- DAX_WAIT_TABLE_BITS);
- return wait_table + hash;
-}
-
static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
{
struct request_queue *q = bdev->bd_queue;
@@ -98,209 +76,52 @@ static void dax_unmap_atomic(struct block_device *bdev,
blk_queue_exit(bdev->bd_queue);
}
-struct page *read_dax_sector(struct block_device *bdev, sector_t n)
+static int dax_is_pmd_entry(void *entry)
{
- struct page *page = alloc_pages(GFP_KERNEL, 0);
- struct blk_dax_ctl dax = {
- .size = PAGE_SIZE,
- .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
- };
- long rc;
-
- if (!page)
- return ERR_PTR(-ENOMEM);
-
- rc = dax_map_atomic(bdev, &dax);
- if (rc < 0)
- return ERR_PTR(rc);
- memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
- dax_unmap_atomic(bdev, &dax);
- return page;
+ return (unsigned long)entry & RADIX_DAX_PMD;
}
-static bool buffer_written(struct buffer_head *bh)
+static int dax_is_pte_entry(void *entry)
{
- return buffer_mapped(bh) && !buffer_unwritten(bh);
+ return !((unsigned long)entry & RADIX_DAX_PMD);
}
-/*
- * When ext4 encounters a hole, it returns without modifying the buffer_head
- * which means that we can't trust b_size. To cope with this, we set b_state
- * to 0 before calling get_block and, if any bit is set, we know we can trust
- * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
- * and would save us time calling get_block repeatedly.
- */
-static bool buffer_size_valid(struct buffer_head *bh)
+static int dax_is_zero_entry(void *entry)
{
- return bh->b_state != 0;
+ return (unsigned long)entry & RADIX_DAX_HZP;
}
-
-static sector_t to_sector(const struct buffer_head *bh,
- const struct inode *inode)
+static int dax_is_empty_entry(void *entry)
{
- sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
-
- return sector;
+ return (unsigned long)entry & RADIX_DAX_EMPTY;
}
-static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
- loff_t start, loff_t end, get_block_t get_block,
- struct buffer_head *bh)
+struct page *read_dax_sector(struct block_device *bdev, sector_t n)
{
- loff_t pos = start, max = start, bh_max = start;
- bool hole = false;
- struct block_device *bdev = NULL;
- int rw = iov_iter_rw(iter), rc;
- long map_len = 0;
+ struct page *page = alloc_pages(GFP_KERNEL, 0);
struct blk_dax_ctl dax = {
- .addr = ERR_PTR(-EIO),
+ .size = PAGE_SIZE,
+ .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
};
- unsigned blkbits = inode->i_blkbits;
- sector_t file_blks = (i_size_read(inode) + (1 << blkbits) - 1)
- >> blkbits;
-
- if (rw == READ)
- end = min(end, i_size_read(inode));
-
- while (pos < end) {
- size_t len;
- if (pos == max) {
- long page = pos >> PAGE_SHIFT;
- sector_t block = page << (PAGE_SHIFT - blkbits);
- unsigned first = pos - (block << blkbits);
- long size;
-
- if (pos == bh_max) {
- bh->b_size = PAGE_ALIGN(end - pos);
- bh->b_state = 0;
- rc = get_block(inode, block, bh, rw == WRITE);
- if (rc)
- break;
- if (!buffer_size_valid(bh))
- bh->b_size = 1 << blkbits;
- bh_max = pos - first + bh->b_size;
- bdev = bh->b_bdev;
- /*
- * We allow uninitialized buffers for writes
- * beyond EOF as those cannot race with faults
- */
- WARN_ON_ONCE(
- (buffer_new(bh) && block < file_blks) ||
- (rw == WRITE && buffer_unwritten(bh)));
- } else {
- unsigned done = bh->b_size -
- (bh_max - (pos - first));
- bh->b_blocknr += done >> blkbits;
- bh->b_size -= done;
- }
-
- hole = rw == READ && !buffer_written(bh);
- if (hole) {
- size = bh->b_size - first;
- } else {
- dax_unmap_atomic(bdev, &dax);
- dax.sector = to_sector(bh, inode);
- dax.size = bh->b_size;
- map_len = dax_map_atomic(bdev, &dax);
- if (map_len < 0) {
- rc = map_len;
- break;
- }
- dax.addr += first;
- size = map_len - first;
- }
- /*
- * pos + size is one past the last offset for IO,
- * so pos + size can overflow loff_t at extreme offsets.
- * Cast to u64 to catch this and get the true minimum.
- */
- max = min_t(u64, pos + size, end);
- }
-
- if (iov_iter_rw(iter) == WRITE) {
- len = copy_from_iter_pmem(dax.addr, max - pos, iter);
- } else if (!hole)
- len = copy_to_iter((void __force *) dax.addr, max - pos,
- iter);
- else
- len = iov_iter_zero(max - pos, iter);
-
- if (!len) {
- rc = -EFAULT;
- break;
- }
+ long rc;
- pos += len;
- if (!IS_ERR(dax.addr))
- dax.addr += len;
- }
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+ rc = dax_map_atomic(bdev, &dax);
+ if (rc < 0)
+ return ERR_PTR(rc);
+ memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
dax_unmap_atomic(bdev, &dax);
-
- return (pos == start) ? rc : pos - start;
-}
-
-/**
- * dax_do_io - Perform I/O to a DAX file
- * @iocb: The control block for this I/O
- * @inode: The file which the I/O is directed at
- * @iter: The addresses to do I/O from or to
- * @get_block: The filesystem method used to translate file offsets to blocks
- * @end_io: A filesystem callback for I/O completion
- * @flags: See below
- *
- * This function uses the same locking scheme as do_blockdev_direct_IO:
- * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
- * caller for writes. For reads, we take and release the i_mutex ourselves.
- * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
- * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
- * is in progress.
- */
-ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
- struct iov_iter *iter, get_block_t get_block,
- dio_iodone_t end_io, int flags)
-{
- struct buffer_head bh;
- ssize_t retval = -EINVAL;
- loff_t pos = iocb->ki_pos;
- loff_t end = pos + iov_iter_count(iter);
-
- memset(&bh, 0, sizeof(bh));
- bh.b_bdev = inode->i_sb->s_bdev;
-
- if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
- inode_lock(inode);
-
- /* Protects against truncate */
- if (!(flags & DIO_SKIP_DIO_COUNT))
- inode_dio_begin(inode);
-
- retval = dax_io(inode, iter, pos, end, get_block, &bh);
-
- if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
- inode_unlock(inode);
-
- if (end_io) {
- int err;
-
- err = end_io(iocb, pos, retval, bh.b_private);
- if (err)
- retval = err;
- }
-
- if (!(flags & DIO_SKIP_DIO_COUNT))
- inode_dio_end(inode);
- return retval;
+ return page;
}
-EXPORT_SYMBOL_GPL(dax_do_io);
/*
* DAX radix tree locking
*/
struct exceptional_entry_key {
struct address_space *mapping;
- unsigned long index;
+ pgoff_t entry_start;
};
struct wait_exceptional_entry_queue {
@@ -308,6 +129,26 @@ struct wait_exceptional_entry_queue {
struct exceptional_entry_key key;
};
+static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
+ pgoff_t index, void *entry, struct exceptional_entry_key *key)
+{
+ unsigned long hash;
+
+ /*
+ * If 'entry' is a PMD, align the 'index' that we use for the wait
+ * queue to the start of that PMD. This ensures that all offsets in
+ * the range covered by the PMD map to the same bit lock.
+ */
+ if (dax_is_pmd_entry(entry))
+ index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
+
+ key->mapping = mapping;
+ key->entry_start = index;
+
+ hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
+ return wait_table + hash;
+}
+
static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
int sync, void *keyp)
{
@@ -316,7 +157,7 @@ static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
container_of(wait, struct wait_exceptional_entry_queue, wait);
if (key->mapping != ewait->key.mapping ||
- key->index != ewait->key.index)
+ key->entry_start != ewait->key.entry_start)
return 0;
return autoremove_wake_function(wait, mode, sync, NULL);
}
@@ -372,24 +213,24 @@ static inline void *unlock_slot(struct address_space *mapping, void **slot)
static void *get_unlocked_mapping_entry(struct address_space *mapping,
pgoff_t index, void ***slotp)
{
- void *ret, **slot;
+ void *entry, **slot;
struct wait_exceptional_entry_queue ewait;
- wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index);
+ wait_queue_head_t *wq;
init_wait(&ewait.wait);
ewait.wait.func = wake_exceptional_entry_func;
- ewait.key.mapping = mapping;
- ewait.key.index = index;
for (;;) {
- ret = __radix_tree_lookup(&mapping->page_tree, index, NULL,
+ entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
&slot);
- if (!ret || !radix_tree_exceptional_entry(ret) ||
+ if (!entry || !radix_tree_exceptional_entry(entry) ||
!slot_locked(mapping, slot)) {
if (slotp)
*slotp = slot;
- return ret;
+ return entry;
}
+
+ wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
prepare_to_wait_exclusive(wq, &ewait.wait,
TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&mapping->tree_lock);
@@ -399,52 +240,156 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping,
}
}
+static void put_locked_mapping_entry(struct address_space *mapping,
+ pgoff_t index, void *entry)
+{
+ if (!radix_tree_exceptional_entry(entry)) {
+ unlock_page(entry);
+ put_page(entry);
+ } else {
+ dax_unlock_mapping_entry(mapping, index);
+ }
+}
+
+/*
+ * Called when we are done with radix tree entry we looked up via
+ * get_unlocked_mapping_entry() and which we didn't lock in the end.
+ */
+static void put_unlocked_mapping_entry(struct address_space *mapping,
+ pgoff_t index, void *entry)
+{
+ if (!radix_tree_exceptional_entry(entry))
+ return;
+
+ /* We have to wake up next waiter for the radix tree entry lock */
+ dax_wake_mapping_entry_waiter(mapping, index, entry, false);
+}
+
/*
* Find radix tree entry at given index. If it points to a page, return with
* the page locked. If it points to the exceptional entry, return with the
* radix tree entry locked. If the radix tree doesn't contain given index,
* create empty exceptional entry for the index and return with it locked.
*
+ * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
+ * either return that locked entry or will return an error. This error will
+ * happen if there are any 4k entries (either zero pages or DAX entries)
+ * within the 2MiB range that we are requesting.
+ *
+ * We always favor 4k entries over 2MiB entries. There isn't a flow where we
+ * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
+ * insertion will fail if it finds any 4k entries already in the tree, and a
+ * 4k insertion will cause an existing 2MiB entry to be unmapped and
+ * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
+ * well as 2MiB empty entries.
+ *
+ * The exception to this downgrade path is for 2MiB DAX PMD entries that have
+ * real storage backing them. We will leave these real 2MiB DAX entries in
+ * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
+ *
* Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
* persistent memory the benefit is doubtful. We can add that later if we can
* show it helps.
*/
-static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index)
+static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
+ unsigned long size_flag)
{
- void *ret, **slot;
+ bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
+ void *entry, **slot;
restart:
spin_lock_irq(&mapping->tree_lock);
- ret = get_unlocked_mapping_entry(mapping, index, &slot);
+ entry = get_unlocked_mapping_entry(mapping, index, &slot);
+
+ if (entry) {
+ if (size_flag & RADIX_DAX_PMD) {
+ if (!radix_tree_exceptional_entry(entry) ||
+ dax_is_pte_entry(entry)) {
+ put_unlocked_mapping_entry(mapping, index,
+ entry);
+ entry = ERR_PTR(-EEXIST);
+ goto out_unlock;
+ }
+ } else { /* trying to grab a PTE entry */
+ if (radix_tree_exceptional_entry(entry) &&
+ dax_is_pmd_entry(entry) &&
+ (dax_is_zero_entry(entry) ||
+ dax_is_empty_entry(entry))) {
+ pmd_downgrade = true;
+ }
+ }
+ }
+
/* No entry for given index? Make sure radix tree is big enough. */
- if (!ret) {
+ if (!entry || pmd_downgrade) {
int err;
+ if (pmd_downgrade) {
+ /*
+ * Make sure 'entry' remains valid while we drop
+ * mapping->tree_lock.
+ */
+ entry = lock_slot(mapping, slot);
+ }
+
spin_unlock_irq(&mapping->tree_lock);
+ /*
+ * Besides huge zero pages the only other thing that gets
+ * downgraded are empty entries which don't need to be
+ * unmapped.
+ */
+ if (pmd_downgrade && dax_is_zero_entry(entry))
+ unmap_mapping_range(mapping,
+ (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
+
err = radix_tree_preload(
mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
- if (err)
+ if (err) {
+ if (pmd_downgrade)
+ put_locked_mapping_entry(mapping, index, entry);
return ERR_PTR(err);
- ret = (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
- RADIX_DAX_ENTRY_LOCK);
+ }
spin_lock_irq(&mapping->tree_lock);
- err = radix_tree_insert(&mapping->page_tree, index, ret);
+
+ if (pmd_downgrade) {
+ radix_tree_delete(&mapping->page_tree, index);
+ mapping->nrexceptional--;
+ dax_wake_mapping_entry_waiter(mapping, index, entry,
+ true);
+ }
+
+ entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
+
+ err = __radix_tree_insert(&mapping->page_tree, index,
+ dax_radix_order(entry), entry);
radix_tree_preload_end();
if (err) {
spin_unlock_irq(&mapping->tree_lock);
- /* Someone already created the entry? */
- if (err == -EEXIST)
+ /*
+ * Someone already created the entry? This is a
+ * normal failure when inserting PMDs in a range
+ * that already contains PTEs. In that case we want
+ * to return -EEXIST immediately.
+ */
+ if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD))
goto restart;
+ /*
+ * Our insertion of a DAX PMD entry failed, most
+ * likely because it collided with a PTE sized entry
+ * at a different index in the PMD range. We haven't
+ * inserted anything into the radix tree and have no
+ * waiters to wake.
+ */
return ERR_PTR(err);
}
/* Good, we have inserted empty locked entry into the tree. */
mapping->nrexceptional++;
spin_unlock_irq(&mapping->tree_lock);
- return ret;
+ return entry;
}
/* Normal page in radix tree? */
- if (!radix_tree_exceptional_entry(ret)) {
- struct page *page = ret;
+ if (!radix_tree_exceptional_entry(entry)) {
+ struct page *page = entry;
get_page(page);
spin_unlock_irq(&mapping->tree_lock);
@@ -457,15 +402,26 @@ restart:
}
return page;
}
- ret = lock_slot(mapping, slot);
+ entry = lock_slot(mapping, slot);
+ out_unlock:
spin_unlock_irq(&mapping->tree_lock);
- return ret;
+ return entry;
}
+/*
+ * We do not necessarily hold the mapping->tree_lock when we call this
+ * function so it is possible that 'entry' is no longer a valid item in the
+ * radix tree. This is okay because all we really need to do is to find the
+ * correct waitqueue where tasks might be waiting for that old 'entry' and
+ * wake them.
+ */
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
- pgoff_t index, bool wake_all)
+ pgoff_t index, void *entry, bool wake_all)
{
- wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index);
+ struct exceptional_entry_key key;
+ wait_queue_head_t *wq;
+
+ wq = dax_entry_waitqueue(mapping, index, entry, &key);
/*
* Checking for locked entry and prepare_to_wait_exclusive() happens
@@ -473,54 +429,24 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
* So at this point all tasks that could have seen our entry locked
* must be in the waitqueue and the following check will see them.
*/
- if (waitqueue_active(wq)) {
- struct exceptional_entry_key key;
-
- key.mapping = mapping;
- key.index = index;
+ if (waitqueue_active(wq))
__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
- }
}
void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
{
- void *ret, **slot;
+ void *entry, **slot;
spin_lock_irq(&mapping->tree_lock);
- ret = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
- if (WARN_ON_ONCE(!ret || !radix_tree_exceptional_entry(ret) ||
+ entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
+ if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
!slot_locked(mapping, slot))) {
spin_unlock_irq(&mapping->tree_lock);
return;
}
unlock_slot(mapping, slot);
spin_unlock_irq(&mapping->tree_lock);
- dax_wake_mapping_entry_waiter(mapping, index, false);
-}
-
-static void put_locked_mapping_entry(struct address_space *mapping,
- pgoff_t index, void *entry)
-{
- if (!radix_tree_exceptional_entry(entry)) {
- unlock_page(entry);
- put_page(entry);
- } else {
- dax_unlock_mapping_entry(mapping, index);
- }
-}
-
-/*
- * Called when we are done with radix tree entry we looked up via
- * get_unlocked_mapping_entry() and which we didn't lock in the end.
- */
-static void put_unlocked_mapping_entry(struct address_space *mapping,
- pgoff_t index, void *entry)
-{
- if (!radix_tree_exceptional_entry(entry))
- return;
-
- /* We have to wake up next waiter for the radix tree entry lock */
- dax_wake_mapping_entry_waiter(mapping, index, false);
+ dax_wake_mapping_entry_waiter(mapping, index, entry, false);
}
/*
@@ -547,7 +473,7 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
radix_tree_delete(&mapping->page_tree, index);
mapping->nrexceptional--;
spin_unlock_irq(&mapping->tree_lock);
- dax_wake_mapping_entry_waiter(mapping, index, true);
+ dax_wake_mapping_entry_waiter(mapping, index, entry, true);
return 1;
}
@@ -600,11 +526,17 @@ static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size
return 0;
}
-#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
-
+/*
+ * By this point grab_mapping_entry() has ensured that we have a locked entry
+ * of the appropriate size so we don't have to worry about downgrading PMDs to
+ * PTEs. If we happen to be trying to insert a PTE and there is a PMD
+ * already in the tree, we will skip the insertion and just dirty the PMD as
+ * appropriate.
+ */
static void *dax_insert_mapping_entry(struct address_space *mapping,
struct vm_fault *vmf,
- void *entry, sector_t sector)
+ void *entry, sector_t sector,
+ unsigned long flags)
{
struct radix_tree_root *page_tree = &mapping->page_tree;
int error = 0;
@@ -627,22 +559,35 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
if (error)
return ERR_PTR(error);
+ } else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
+ /* replacing huge zero page with PMD block mapping */
+ unmap_mapping_range(mapping,
+ (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
}
spin_lock_irq(&mapping->tree_lock);
- new_entry = (void *)((unsigned long)RADIX_DAX_ENTRY(sector, false) |
- RADIX_DAX_ENTRY_LOCK);
+ new_entry = dax_radix_locked_entry(sector, flags);
+
if (hole_fill) {
__delete_from_page_cache(entry, NULL);
/* Drop pagecache reference */
put_page(entry);
- error = radix_tree_insert(page_tree, index, new_entry);
+ error = __radix_tree_insert(page_tree, index,
+ dax_radix_order(new_entry), new_entry);
if (error) {
new_entry = ERR_PTR(error);
goto unlock;
}
mapping->nrexceptional++;
- } else {
+ } else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
+ /*
+ * Only swap our new entry into the radix tree if the current
+ * entry is a zero page or an empty entry. If a normal PTE or
+ * PMD entry is already in the tree, we leave it alone. This
+ * means that if we are trying to insert a PTE and the
+ * existing entry is a PMD, we will just leave the PMD in the
+ * tree and dirty it if necessary.
+ */
struct radix_tree_node *node;
void **slot;
void *ret;
@@ -674,7 +619,6 @@ static int dax_writeback_one(struct block_device *bdev,
struct address_space *mapping, pgoff_t index, void *entry)
{
struct radix_tree_root *page_tree = &mapping->page_tree;
- int type = RADIX_DAX_TYPE(entry);
struct radix_tree_node *node;
struct blk_dax_ctl dax;
void **slot;
@@ -695,13 +639,21 @@ static int dax_writeback_one(struct block_device *bdev,
if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
goto unlock;
- if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD)) {
+ if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
+ dax_is_zero_entry(entry))) {
ret = -EIO;
goto unlock;
}
- dax.sector = RADIX_DAX_SECTOR(entry);
- dax.size = (type == RADIX_DAX_PMD ? PMD_SIZE : PAGE_SIZE);
+ /*
+ * Even if dax_writeback_mapping_range() was given a wbc->range_start
+ * in the middle of a PMD, the 'index' we are given will be aligned to
+ * the start index of the PMD, as will the sector we pull from
+ * 'entry'. This allows us to flush for PMD_SIZE and not have to
+ * worry about partial PMD writebacks.
+ */
+ dax.sector = dax_radix_sector(entry);
+ dax.size = PAGE_SIZE << dax_radix_order(entry);
spin_unlock_irq(&mapping->tree_lock);
/*
@@ -740,12 +692,11 @@ int dax_writeback_mapping_range(struct address_space *mapping,
struct block_device *bdev, struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
- pgoff_t start_index, end_index, pmd_index;
+ pgoff_t start_index, end_index;
pgoff_t indices[PAGEVEC_SIZE];
struct pagevec pvec;
bool done = false;
int i, ret = 0;
- void *entry;
if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
return -EIO;
@@ -755,15 +706,6 @@ int dax_writeback_mapping_range(struct address_space *mapping,
start_index = wbc->range_start >> PAGE_SHIFT;
end_index = wbc->range_end >> PAGE_SHIFT;
- pmd_index = DAX_PMD_INDEX(start_index);
-
- rcu_read_lock();
- entry = radix_tree_lookup(&mapping->page_tree, pmd_index);
- rcu_read_unlock();
-
- /* see if the start of our range is covered by a PMD entry */
- if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
- start_index = pmd_index;
tag_pages_for_writeback(mapping, start_index, end_index);
@@ -808,7 +750,7 @@ static int dax_insert_mapping(struct address_space *mapping,
return PTR_ERR(dax.addr);
dax_unmap_atomic(bdev, &dax);
- ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector);
+ ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0);
if (IS_ERR(ret))
return PTR_ERR(ret);
*entryp = ret;
@@ -817,323 +759,6 @@ static int dax_insert_mapping(struct address_space *mapping,
}
/**
- * dax_fault - handle a page fault on a DAX file
- * @vma: The virtual memory area where the fault occurred
- * @vmf: The description of the fault
- * @get_block: The filesystem method used to translate file offsets to blocks
- *
- * When a page fault occurs, filesystems may call this helper in their
- * fault handler for DAX files. dax_fault() assumes the caller has done all
- * the necessary locking for the page fault to proceed successfully.
- */
-int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
- get_block_t get_block)
-{
- struct file *file = vma->vm_file;
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
- void *entry;
- struct buffer_head bh;
- unsigned long vaddr = (unsigned long)vmf->virtual_address;
- unsigned blkbits = inode->i_blkbits;
- sector_t block;
- pgoff_t size;
- int error;
- int major = 0;
-
- /*
- * Check whether offset isn't beyond end of file now. Caller is supposed
- * to hold locks serializing us with truncate / punch hole so this is
- * a reliable test.
- */
- size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (vmf->pgoff >= size)
- return VM_FAULT_SIGBUS;
-
- memset(&bh, 0, sizeof(bh));
- block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
- bh.b_bdev = inode->i_sb->s_bdev;
- bh.b_size = PAGE_SIZE;
-
- entry = grab_mapping_entry(mapping, vmf->pgoff);
- if (IS_ERR(entry)) {
- error = PTR_ERR(entry);
- goto out;
- }
-
- error = get_block(inode, block, &bh, 0);
- if (!error && (bh.b_size < PAGE_SIZE))
- error = -EIO; /* fs corruption? */
- if (error)
- goto unlock_entry;
-
- if (vmf->cow_page) {
- struct page *new_page = vmf->cow_page;
- if (buffer_written(&bh))
- error = copy_user_dax(bh.b_bdev, to_sector(&bh, inode),
- bh.b_size, new_page, vaddr);
- else
- clear_user_highpage(new_page, vaddr);
- if (error)
- goto unlock_entry;
- if (!radix_tree_exceptional_entry(entry)) {
- vmf->page = entry;
- return VM_FAULT_LOCKED;
- }
- vmf->entry = entry;
- return VM_FAULT_DAX_LOCKED;
- }
-
- if (!buffer_mapped(&bh)) {
- if (vmf->flags & FAULT_FLAG_WRITE) {
- error = get_block(inode, block, &bh, 1);
- count_vm_event(PGMAJFAULT);
- mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
- major = VM_FAULT_MAJOR;
- if (!error && (bh.b_size < PAGE_SIZE))
- error = -EIO;
- if (error)
- goto unlock_entry;
- } else {
- return dax_load_hole(mapping, entry, vmf);
- }
- }
-
- /* Filesystem should not return unwritten buffers to us! */
- WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
- error = dax_insert_mapping(mapping, bh.b_bdev, to_sector(&bh, inode),
- bh.b_size, &entry, vma, vmf);
- unlock_entry:
- put_locked_mapping_entry(mapping, vmf->pgoff, entry);
- out:
- if (error == -ENOMEM)
- return VM_FAULT_OOM | major;
- /* -EBUSY is fine, somebody else faulted on the same PTE */
- if ((error < 0) && (error != -EBUSY))
- return VM_FAULT_SIGBUS | major;
- return VM_FAULT_NOPAGE | major;
-}
-EXPORT_SYMBOL_GPL(dax_fault);
-
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
-/*
- * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
- * more often than one might expect in the below function.
- */
-#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
-
-static void __dax_dbg(struct buffer_head *bh, unsigned long address,
- const char *reason, const char *fn)
-{
- if (bh) {
- char bname[BDEVNAME_SIZE];
- bdevname(bh->b_bdev, bname);
- pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
- "length %zd fallback: %s\n", fn, current->comm,
- address, bname, bh->b_state, (u64)bh->b_blocknr,
- bh->b_size, reason);
- } else {
- pr_debug("%s: %s addr: %lx fallback: %s\n", fn,
- current->comm, address, reason);
- }
-}
-
-#define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd")
-
-/**
- * dax_pmd_fault - handle a PMD fault on a DAX file
- * @vma: The virtual memory area where the fault occurred
- * @vmf: The description of the fault
- * @get_block: The filesystem method used to translate file offsets to blocks
- *
- * When a page fault occurs, filesystems may call this helper in their
- * pmd_fault handler for DAX files.
- */
-int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
- pmd_t *pmd, unsigned int flags, get_block_t get_block)
-{
- struct file *file = vma->vm_file;
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
- struct buffer_head bh;
- unsigned blkbits = inode->i_blkbits;
- unsigned long pmd_addr = address & PMD_MASK;
- bool write = flags & FAULT_FLAG_WRITE;
- struct block_device *bdev;
- pgoff_t size, pgoff;
- sector_t block;
- int result = 0;
- bool alloc = false;
-
- /* dax pmd mappings require pfn_t_devmap() */
- if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
- return VM_FAULT_FALLBACK;
-
- /* Fall back to PTEs if we're going to COW */
- if (write && !(vma->vm_flags & VM_SHARED)) {
- split_huge_pmd(vma, pmd, address);
- dax_pmd_dbg(NULL, address, "cow write");
- return VM_FAULT_FALLBACK;
- }
- /* If the PMD would extend outside the VMA */
- if (pmd_addr < vma->vm_start) {
- dax_pmd_dbg(NULL, address, "vma start unaligned");
- return VM_FAULT_FALLBACK;
- }
- if ((pmd_addr + PMD_SIZE) > vma->vm_end) {
- dax_pmd_dbg(NULL, address, "vma end unaligned");
- return VM_FAULT_FALLBACK;
- }
-
- pgoff = linear_page_index(vma, pmd_addr);
- size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (pgoff >= size)
- return VM_FAULT_SIGBUS;
- /* If the PMD would cover blocks out of the file */
- if ((pgoff | PG_PMD_COLOUR) >= size) {
- dax_pmd_dbg(NULL, address,
- "offset + huge page size > file size");
- return VM_FAULT_FALLBACK;
- }
-
- memset(&bh, 0, sizeof(bh));
- bh.b_bdev = inode->i_sb->s_bdev;
- block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
-
- bh.b_size = PMD_SIZE;
-
- if (get_block(inode, block, &bh, 0) != 0)
- return VM_FAULT_SIGBUS;
-
- if (!buffer_mapped(&bh) && write) {
- if (get_block(inode, block, &bh, 1) != 0)
- return VM_FAULT_SIGBUS;
- alloc = true;
- WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
- }
-
- bdev = bh.b_bdev;
-
- /*
- * If the filesystem isn't willing to tell us the length of a hole,
- * just fall back to PTEs. Calling get_block 512 times in a loop
- * would be silly.
- */
- if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) {
- dax_pmd_dbg(&bh, address, "allocated block too small");
- return VM_FAULT_FALLBACK;
- }
-
- /*
- * If we allocated new storage, make sure no process has any
- * zero pages covering this hole
- */
- if (alloc) {
- loff_t lstart = pgoff << PAGE_SHIFT;
- loff_t lend = lstart + PMD_SIZE - 1; /* inclusive */
-
- truncate_pagecache_range(inode, lstart, lend);
- }
-
- if (!write && !buffer_mapped(&bh)) {
- spinlock_t *ptl;
- pmd_t entry;
- struct page *zero_page = mm_get_huge_zero_page(vma->vm_mm);
-
- if (unlikely(!zero_page)) {
- dax_pmd_dbg(&bh, address, "no zero page");
- goto fallback;
- }
-
- ptl = pmd_lock(vma->vm_mm, pmd);
- if (!pmd_none(*pmd)) {
- spin_unlock(ptl);
- dax_pmd_dbg(&bh, address, "pmd already present");
- goto fallback;
- }
-
- dev_dbg(part_to_dev(bdev->bd_part),
- "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
- __func__, current->comm, address,
- (unsigned long long) to_sector(&bh, inode));
-
- entry = mk_pmd(zero_page, vma->vm_page_prot);
- entry = pmd_mkhuge(entry);
- set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
- result = VM_FAULT_NOPAGE;
- spin_unlock(ptl);
- } else {
- struct blk_dax_ctl dax = {
- .sector = to_sector(&bh, inode),
- .size = PMD_SIZE,
- };
- long length = dax_map_atomic(bdev, &dax);
-
- if (length < 0) {
- dax_pmd_dbg(&bh, address, "dax-error fallback");
- goto fallback;
- }
- if (length < PMD_SIZE) {
- dax_pmd_dbg(&bh, address, "dax-length too small");
- dax_unmap_atomic(bdev, &dax);
- goto fallback;
- }
- if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) {
- dax_pmd_dbg(&bh, address, "pfn unaligned");
- dax_unmap_atomic(bdev, &dax);
- goto fallback;
- }
-
- if (!pfn_t_devmap(dax.pfn)) {
- dax_unmap_atomic(bdev, &dax);
- dax_pmd_dbg(&bh, address, "pfn not in memmap");
- goto fallback;
- }
- dax_unmap_atomic(bdev, &dax);
-
- /*
- * For PTE faults we insert a radix tree entry for reads, and
- * leave it clean. Then on the first write we dirty the radix
- * tree entry via the dax_pfn_mkwrite() path. This sequence
- * allows the dax_pfn_mkwrite() call to be simpler and avoid a
- * call into get_block() to translate the pgoff to a sector in
- * order to be able to create a new radix tree entry.
- *
- * The PMD path doesn't have an equivalent to
- * dax_pfn_mkwrite(), though, so for a read followed by a
- * write we traverse all the way through dax_pmd_fault()
- * twice. This means we can just skip inserting a radix tree
- * entry completely on the initial read and just wait until
- * the write to insert a dirty entry.
- */
- if (write) {
- /*
- * We should insert radix-tree entry and dirty it here.
- * For now this is broken...
- */
- }
-
- dev_dbg(part_to_dev(bdev->bd_part),
- "%s: %s addr: %lx pfn: %lx sect: %llx\n",
- __func__, current->comm, address,
- pfn_t_to_pfn(dax.pfn),
- (unsigned long long) dax.sector);
- result |= vmf_insert_pfn_pmd(vma, address, pmd,
- dax.pfn, write);
- }
-
- out:
- return result;
-
- fallback:
- count_vm_event(THP_FAULT_FALLBACK);
- result = VM_FAULT_FALLBACK;
- goto out;
-}
-EXPORT_SYMBOL_GPL(dax_pmd_fault);
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
-/**
* dax_pfn_mkwrite - handle first write to DAX page
* @vma: The virtual memory area where the fault occurred
* @vmf: The description of the fault
@@ -1193,62 +818,14 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
}
EXPORT_SYMBOL_GPL(__dax_zero_page_range);
-/**
- * dax_zero_page_range - zero a range within a page of a DAX file
- * @inode: The file being truncated
- * @from: The file offset that is being truncated to
- * @length: The number of bytes to zero
- * @get_block: The filesystem method used to translate file offsets to blocks
- *
- * This function can be called by a filesystem when it is zeroing part of a
- * page in a DAX file. This is intended for hole-punch operations. If
- * you are truncating a file, the helper function dax_truncate_page() may be
- * more convenient.
- */
-int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
- get_block_t get_block)
-{
- struct buffer_head bh;
- pgoff_t index = from >> PAGE_SHIFT;
- unsigned offset = from & (PAGE_SIZE-1);
- int err;
-
- /* Block boundary? Nothing to do */
- if (!length)
- return 0;
- BUG_ON((offset + length) > PAGE_SIZE);
-
- memset(&bh, 0, sizeof(bh));
- bh.b_bdev = inode->i_sb->s_bdev;
- bh.b_size = PAGE_SIZE;
- err = get_block(inode, index, &bh, 0);
- if (err < 0 || !buffer_written(&bh))
- return err;
-
- return __dax_zero_page_range(bh.b_bdev, to_sector(&bh, inode),
- offset, length);
-}
-EXPORT_SYMBOL_GPL(dax_zero_page_range);
-
-/**
- * dax_truncate_page - handle a partial page being truncated in a DAX file
- * @inode: The file being truncated
- * @from: The file offset that is being truncated to
- * @get_block: The filesystem method used to translate file offsets to blocks
- *
- * Similar to block_truncate_page(), this function can be called by a
- * filesystem when it is truncating a DAX file to handle the partial page.
- */
-int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
+#ifdef CONFIG_FS_IOMAP
+static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
{
- unsigned length = PAGE_ALIGN(from) - from;
- return dax_zero_page_range(inode, from, length, get_block);
+ return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
}
-EXPORT_SYMBOL_GPL(dax_truncate_page);
-#ifdef CONFIG_FS_IOMAP
static loff_t
-iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
struct iomap *iomap)
{
struct iov_iter *iter = data;
@@ -1272,8 +849,7 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
struct blk_dax_ctl dax = { 0 };
ssize_t map_len;
- dax.sector = iomap->blkno +
- (((pos & PAGE_MASK) - iomap->offset) >> 9);
+ dax.sector = dax_iomap_sector(iomap, pos);
dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
map_len = dax_map_atomic(iomap->bdev, &dax);
if (map_len < 0) {
@@ -1305,7 +881,7 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
}
/**
- * iomap_dax_rw - Perform I/O to a DAX file
+ * dax_iomap_rw - Perform I/O to a DAX file
* @iocb: The control block for this I/O
* @iter: The addresses to do I/O from or to
* @ops: iomap ops passed from the file system
@@ -1315,7 +891,7 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
* and evicting any page cache pages in the region under I/O.
*/
ssize_t
-iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
+dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
struct iomap_ops *ops)
{
struct address_space *mapping = iocb->ki_filp->f_mapping;
@@ -1345,7 +921,7 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
while (iov_iter_count(iter)) {
ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
- iter, iomap_dax_actor);
+ iter, dax_iomap_actor);
if (ret <= 0)
break;
pos += ret;
@@ -1355,10 +931,10 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
iocb->ki_pos += done;
return done ? done : ret;
}
-EXPORT_SYMBOL_GPL(iomap_dax_rw);
+EXPORT_SYMBOL_GPL(dax_iomap_rw);
/**
- * iomap_dax_fault - handle a page fault on a DAX file
+ * dax_iomap_fault - handle a page fault on a DAX file
* @vma: The virtual memory area where the fault occurred
* @vmf: The description of the fault
* @ops: iomap ops passed from the file system
@@ -1367,7 +943,7 @@ EXPORT_SYMBOL_GPL(iomap_dax_rw);
* or mkwrite handler for DAX files. Assumes the caller has done all the
* necessary locking for the page fault to proceed successfully.
*/
-int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
struct iomap_ops *ops)
{
struct address_space *mapping = vma->vm_file->f_mapping;
@@ -1376,8 +952,9 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
sector_t sector;
struct iomap iomap = { 0 };
- unsigned flags = 0;
+ unsigned flags = IOMAP_FAULT;
int error, major = 0;
+ int locked_status = 0;
void *entry;
/*
@@ -1388,7 +965,7 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
if (pos >= i_size_read(inode))
return VM_FAULT_SIGBUS;
- entry = grab_mapping_entry(mapping, vmf->pgoff);
+ entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
if (IS_ERR(entry)) {
error = PTR_ERR(entry);
goto out;
@@ -1407,10 +984,10 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
goto unlock_entry;
if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
error = -EIO; /* fs corruption? */
- goto unlock_entry;
+ goto finish_iomap;
}
- sector = iomap.blkno + (((pos & PAGE_MASK) - iomap.offset) >> 9);
+ sector = dax_iomap_sector(&iomap, pos);
if (vmf->cow_page) {
switch (iomap.type) {
@@ -1429,13 +1006,15 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
}
if (error)
- goto unlock_entry;
+ goto finish_iomap;
if (!radix_tree_exceptional_entry(entry)) {
vmf->page = entry;
- return VM_FAULT_LOCKED;
+ locked_status = VM_FAULT_LOCKED;
+ } else {
+ vmf->entry = entry;
+ locked_status = VM_FAULT_DAX_LOCKED;
}
- vmf->entry = entry;
- return VM_FAULT_DAX_LOCKED;
+ goto finish_iomap;
}
switch (iomap.type) {
@@ -1450,8 +1029,10 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
break;
case IOMAP_UNWRITTEN:
case IOMAP_HOLE:
- if (!(vmf->flags & FAULT_FLAG_WRITE))
- return dax_load_hole(mapping, entry, vmf);
+ if (!(vmf->flags & FAULT_FLAG_WRITE)) {
+ locked_status = dax_load_hole(mapping, entry, vmf);
+ break;
+ }
/*FALLTHRU*/
default:
WARN_ON_ONCE(1);
@@ -1459,15 +1040,218 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
break;
}
+ finish_iomap:
+ if (ops->iomap_end) {
+ if (error) {
+ /* keep previous error */
+ ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags,
+ &iomap);
+ } else {
+ error = ops->iomap_end(inode, pos, PAGE_SIZE,
+ PAGE_SIZE, flags, &iomap);
+ }
+ }
unlock_entry:
- put_locked_mapping_entry(mapping, vmf->pgoff, entry);
+ if (!locked_status || error)
+ put_locked_mapping_entry(mapping, vmf->pgoff, entry);
out:
if (error == -ENOMEM)
return VM_FAULT_OOM | major;
/* -EBUSY is fine, somebody else faulted on the same PTE */
if (error < 0 && error != -EBUSY)
return VM_FAULT_SIGBUS | major;
+ if (locked_status) {
+ WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
+ return locked_status;
+ }
return VM_FAULT_NOPAGE | major;
}
-EXPORT_SYMBOL_GPL(iomap_dax_fault);
+EXPORT_SYMBOL_GPL(dax_iomap_fault);
+
+#ifdef CONFIG_FS_DAX_PMD
+/*
+ * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
+ * more often than one might expect in the below functions.
+ */
+#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
+
+static int dax_pmd_insert_mapping(struct vm_area_struct *vma, pmd_t *pmd,
+ struct vm_fault *vmf, unsigned long address,
+ struct iomap *iomap, loff_t pos, bool write, void **entryp)
+{
+ struct address_space *mapping = vma->vm_file->f_mapping;
+ struct block_device *bdev = iomap->bdev;
+ struct blk_dax_ctl dax = {
+ .sector = dax_iomap_sector(iomap, pos),
+ .size = PMD_SIZE,
+ };
+ long length = dax_map_atomic(bdev, &dax);
+ void *ret;
+
+ if (length < 0) /* dax_map_atomic() failed */
+ return VM_FAULT_FALLBACK;
+ if (length < PMD_SIZE)
+ goto unmap_fallback;
+ if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)
+ goto unmap_fallback;
+ if (!pfn_t_devmap(dax.pfn))
+ goto unmap_fallback;
+
+ dax_unmap_atomic(bdev, &dax);
+
+ ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector,
+ RADIX_DAX_PMD);
+ if (IS_ERR(ret))
+ return VM_FAULT_FALLBACK;
+ *entryp = ret;
+
+ return vmf_insert_pfn_pmd(vma, address, pmd, dax.pfn, write);
+
+ unmap_fallback:
+ dax_unmap_atomic(bdev, &dax);
+ return VM_FAULT_FALLBACK;
+}
+
+static int dax_pmd_load_hole(struct vm_area_struct *vma, pmd_t *pmd,
+ struct vm_fault *vmf, unsigned long address,
+ struct iomap *iomap, void **entryp)
+{
+ struct address_space *mapping = vma->vm_file->f_mapping;
+ unsigned long pmd_addr = address & PMD_MASK;
+ struct page *zero_page;
+ spinlock_t *ptl;
+ pmd_t pmd_entry;
+ void *ret;
+
+ zero_page = mm_get_huge_zero_page(vma->vm_mm);
+
+ if (unlikely(!zero_page))
+ return VM_FAULT_FALLBACK;
+
+ ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
+ RADIX_DAX_PMD | RADIX_DAX_HZP);
+ if (IS_ERR(ret))
+ return VM_FAULT_FALLBACK;
+ *entryp = ret;
+
+ ptl = pmd_lock(vma->vm_mm, pmd);
+ if (!pmd_none(*pmd)) {
+ spin_unlock(ptl);
+ return VM_FAULT_FALLBACK;
+ }
+
+ pmd_entry = mk_pmd(zero_page, vma->vm_page_prot);
+ pmd_entry = pmd_mkhuge(pmd_entry);
+ set_pmd_at(vma->vm_mm, pmd_addr, pmd, pmd_entry);
+ spin_unlock(ptl);
+ return VM_FAULT_NOPAGE;
+}
+
+int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmd, unsigned int flags, struct iomap_ops *ops)
+{
+ struct address_space *mapping = vma->vm_file->f_mapping;
+ unsigned long pmd_addr = address & PMD_MASK;
+ bool write = flags & FAULT_FLAG_WRITE;
+ unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
+ struct inode *inode = mapping->host;
+ int result = VM_FAULT_FALLBACK;
+ struct iomap iomap = { 0 };
+ pgoff_t max_pgoff, pgoff;
+ struct vm_fault vmf;
+ void *entry;
+ loff_t pos;
+ int error;
+
+ /* Fall back to PTEs if we're going to COW */
+ if (write && !(vma->vm_flags & VM_SHARED))
+ goto fallback;
+
+ /* If the PMD would extend outside the VMA */
+ if (pmd_addr < vma->vm_start)
+ goto fallback;
+ if ((pmd_addr + PMD_SIZE) > vma->vm_end)
+ goto fallback;
+
+ /*
+ * Check whether offset isn't beyond end of file now. Caller is
+ * supposed to hold locks serializing us with truncate / punch hole so
+ * this is a reliable test.
+ */
+ pgoff = linear_page_index(vma, pmd_addr);
+ max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
+
+ if (pgoff > max_pgoff)
+ return VM_FAULT_SIGBUS;
+
+ /* If the PMD would extend beyond the file size */
+ if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
+ goto fallback;
+
+ /*
+ * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
+ * PMD or a HZP entry. If it can't (because a 4k page is already in
+ * the tree, for instance), it will return -EEXIST and we just fall
+ * back to 4k entries.
+ */
+ entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
+ if (IS_ERR(entry))
+ goto fallback;
+
+ /*
+ * Note that we don't use iomap_apply here. We aren't doing I/O, only
+ * setting up a mapping, so really we're using iomap_begin() as a way
+ * to look up our filesystem block.
+ */
+ pos = (loff_t)pgoff << PAGE_SHIFT;
+ error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
+ if (error)
+ goto unlock_entry;
+ if (iomap.offset + iomap.length < pos + PMD_SIZE)
+ goto finish_iomap;
+
+ vmf.pgoff = pgoff;
+ vmf.flags = flags;
+ vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO;
+
+ switch (iomap.type) {
+ case IOMAP_MAPPED:
+ result = dax_pmd_insert_mapping(vma, pmd, &vmf, address,
+ &iomap, pos, write, &entry);
+ break;
+ case IOMAP_UNWRITTEN:
+ case IOMAP_HOLE:
+ if (WARN_ON_ONCE(write))
+ goto finish_iomap;
+ result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap,
+ &entry);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ finish_iomap:
+ if (ops->iomap_end) {
+ if (result == VM_FAULT_FALLBACK) {
+ ops->iomap_end(inode, pos, PMD_SIZE, 0, iomap_flags,
+ &iomap);
+ } else {
+ error = ops->iomap_end(inode, pos, PMD_SIZE, PMD_SIZE,
+ iomap_flags, &iomap);
+ if (error)
+ result = VM_FAULT_FALLBACK;
+ }
+ }
+ unlock_entry:
+ put_locked_mapping_entry(mapping, pgoff, entry);
+ fallback:
+ if (result == VM_FAULT_FALLBACK) {
+ split_huge_pmd(vma, pmd, address);
+ count_vm_event(THP_FAULT_FALLBACK);
+ }
+ return result;
+}
+EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
+#endif /* CONFIG_FS_DAX_PMD */
#endif /* CONFIG_FS_IOMAP */
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index dcea1e37a1b7..07fed838d8fd 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -268,7 +268,7 @@ void dlm_callback_work(struct work_struct *work)
int dlm_callback_start(struct dlm_ls *ls)
{
ls->ls_callback_wq = alloc_workqueue("dlm_callback",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+ WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
if (!ls->ls_callback_wq) {
log_print("can't start dlm_callback workqueue");
return -ENOMEM;
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index df955d2209ce..7211e826d90d 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -12,7 +12,7 @@
******************************************************************************/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/configfs.h>
#include <linux/slab.h>
#include <linux/in.h>
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index 466f7d60edc2..ca7089aeadab 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -12,7 +12,7 @@
#include <linux/pagemap.h>
#include <linux/seq_file.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 216b61604ef9..b670f5601fbb 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -18,7 +18,6 @@
* This is the main header file to be included in each DLM source file.
*/
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/types.h>
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index f3e72787e7f9..91592b75c309 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -11,6 +11,8 @@
*******************************************************************************
******************************************************************************/
+#include <linux/module.h>
+
#include "dlm_internal.h"
#include "lockspace.h"
#include "member.h"
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 609998de533e..7d398d300e97 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -519,29 +519,25 @@ out:
/* Note: sk_callback_lock must be locked before calling this function. */
static void save_callbacks(struct connection *con, struct sock *sk)
{
- lock_sock(sk);
con->orig_data_ready = sk->sk_data_ready;
con->orig_state_change = sk->sk_state_change;
con->orig_write_space = sk->sk_write_space;
con->orig_error_report = sk->sk_error_report;
- release_sock(sk);
}
static void restore_callbacks(struct connection *con, struct sock *sk)
{
write_lock_bh(&sk->sk_callback_lock);
- lock_sock(sk);
sk->sk_user_data = NULL;
sk->sk_data_ready = con->orig_data_ready;
sk->sk_state_change = con->orig_state_change;
sk->sk_write_space = con->orig_write_space;
sk->sk_error_report = con->orig_error_report;
- release_sock(sk);
write_unlock_bh(&sk->sk_callback_lock);
}
/* Make a socket active */
-static void add_sock(struct socket *sock, struct connection *con)
+static void add_sock(struct socket *sock, struct connection *con, bool save_cb)
{
struct sock *sk = sock->sk;
@@ -549,7 +545,7 @@ static void add_sock(struct socket *sock, struct connection *con)
con->sock = sock;
sk->sk_user_data = con;
- if (!test_bit(CF_IS_OTHERCON, &con->flags))
+ if (save_cb)
save_callbacks(con, sk);
/* Install a data_ready callback */
sk->sk_data_ready = lowcomms_data_ready;
@@ -806,7 +802,7 @@ static int tcp_accept_from_sock(struct connection *con)
newcon->othercon = othercon;
othercon->sock = newsock;
newsock->sk->sk_user_data = othercon;
- add_sock(newsock, othercon);
+ add_sock(newsock, othercon, false);
addcon = othercon;
}
else {
@@ -819,7 +815,10 @@ static int tcp_accept_from_sock(struct connection *con)
else {
newsock->sk->sk_user_data = newcon;
newcon->rx_action = receive_from_sock;
- add_sock(newsock, newcon);
+ /* accept copies the sk after we've saved the callbacks, so we
+ don't want to save them a second time or comm errors will
+ result in calling sk_error_report recursively. */
+ add_sock(newsock, newcon, false);
addcon = newcon;
}
@@ -880,7 +879,8 @@ static int sctp_accept_from_sock(struct connection *con)
}
make_sockaddr(&prim.ssp_addr, 0, &addr_len);
- if (addr_to_nodeid(&prim.ssp_addr, &nodeid)) {
+ ret = addr_to_nodeid(&prim.ssp_addr, &nodeid);
+ if (ret) {
unsigned char *b = (unsigned char *)&prim.ssp_addr;
log_print("reject connect from unknown addr");
@@ -919,7 +919,7 @@ static int sctp_accept_from_sock(struct connection *con)
newcon->othercon = othercon;
othercon->sock = newsock;
newsock->sk->sk_user_data = othercon;
- add_sock(newsock, othercon);
+ add_sock(newsock, othercon, false);
addcon = othercon;
} else {
printk("Extra connection from node %d attempted\n", nodeid);
@@ -930,7 +930,7 @@ static int sctp_accept_from_sock(struct connection *con)
} else {
newsock->sk->sk_user_data = newcon;
newcon->rx_action = receive_from_sock;
- add_sock(newsock, newcon);
+ add_sock(newsock, newcon, false);
addcon = newcon;
}
@@ -1058,7 +1058,7 @@ static void sctp_connect_to_sock(struct connection *con)
sock->sk->sk_user_data = con;
con->rx_action = receive_from_sock;
con->connect_action = sctp_connect_to_sock;
- add_sock(sock, con);
+ add_sock(sock, con, true);
/* Bind to all addresses. */
if (sctp_bind_addrs(con, 0))
@@ -1146,7 +1146,7 @@ static void tcp_connect_to_sock(struct connection *con)
sock->sk->sk_user_data = con;
con->rx_action = receive_from_sock;
con->connect_action = tcp_connect_to_sock;
- add_sock(sock, con);
+ add_sock(sock, con, true);
/* Bind to our cluster-known address connecting to avoid
routing problems */
@@ -1366,7 +1366,7 @@ static int tcp_listen_for_all(void)
sock = tcp_create_listen_sock(con, dlm_local_addr[0]);
if (sock) {
- add_sock(sock, con);
+ add_sock(sock, con, true);
result = 0;
}
else {
diff --git a/fs/dlm/main.c b/fs/dlm/main.c
index 079c0bd71ab7..8e1b618891be 100644
--- a/fs/dlm/main.c
+++ b/fs/dlm/main.c
@@ -11,6 +11,8 @@
*******************************************************************************
******************************************************************************/
+#include <linux/module.h>
+
#include "dlm_internal.h"
#include "lockspace.h"
#include "lock.h"
diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c
index 0643ae44f342..43a96c330570 100644
--- a/fs/dlm/netlink.c
+++ b/fs/dlm/netlink.c
@@ -65,7 +65,7 @@ static int user_cmd(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-static struct genl_ops dlm_nl_ops[] = {
+static const struct genl_ops dlm_nl_ops[] = {
{
.cmd = DLM_CMD_HELLO,
.doit = user_cmd,
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 58c2f4a21b7f..1ce908c2232c 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -9,7 +9,6 @@
#include <linux/miscdevice.h>
#include <linux/init.h>
#include <linux/wait.h>
-#include <linux/module.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/poll.h>
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index a0e1478dfd04..b0f241528a30 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -38,7 +38,7 @@ static ssize_t ext2_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
return 0; /* skip atime */
inode_lock_shared(inode);
- ret = iomap_dax_rw(iocb, to, &ext2_iomap_ops);
+ ret = dax_iomap_rw(iocb, to, &ext2_iomap_ops);
inode_unlock_shared(inode);
file_accessed(iocb->ki_filp);
@@ -62,7 +62,7 @@ static ssize_t ext2_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (ret)
goto out_unlock;
- ret = iomap_dax_rw(iocb, from, &ext2_iomap_ops);
+ ret = dax_iomap_rw(iocb, from, &ext2_iomap_ops);
if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
i_size_write(inode, iocb->ki_pos);
mark_inode_dirty(inode);
@@ -99,7 +99,7 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
down_read(&ei->dax_sem);
- ret = iomap_dax_fault(vma, vmf, &ext2_iomap_ops);
+ ret = dax_iomap_fault(vma, vmf, &ext2_iomap_ops);
up_read(&ei->dax_sem);
if (vmf->flags & FAULT_FLAG_WRITE)
@@ -107,27 +107,6 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return ret;
}
-static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, unsigned int flags)
-{
- struct inode *inode = file_inode(vma->vm_file);
- struct ext2_inode_info *ei = EXT2_I(inode);
- int ret;
-
- if (flags & FAULT_FLAG_WRITE) {
- sb_start_pagefault(inode->i_sb);
- file_update_time(vma->vm_file);
- }
- down_read(&ei->dax_sem);
-
- ret = dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block);
-
- up_read(&ei->dax_sem);
- if (flags & FAULT_FLAG_WRITE)
- sb_end_pagefault(inode->i_sb);
- return ret;
-}
-
static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
@@ -154,7 +133,11 @@ static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
static const struct vm_operations_struct ext2_dax_vm_ops = {
.fault = ext2_dax_fault,
- .pmd_fault = ext2_dax_pmd_fault,
+ /*
+ * .pmd_fault is not supported for DAX because allocation in ext2
+ * cannot be reliably aligned to huge page sizes and so pmd faults
+ * will always fail and fail back to regular faults.
+ */
.page_mkwrite = ext2_dax_fault,
.pfn_mkwrite = ext2_dax_pfn_mkwrite,
};
@@ -166,7 +149,7 @@ static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
file_accessed(file);
vma->vm_ops = &ext2_dax_vm_ops;
- vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
+ vma->vm_flags |= VM_MIXEDMAP;
return 0;
}
#else
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 41b8b44a391c..046b642f3585 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -850,6 +850,9 @@ struct iomap_ops ext2_iomap_ops = {
.iomap_begin = ext2_iomap_begin,
.iomap_end = ext2_iomap_end,
};
+#else
+/* Define empty ops for !CONFIG_FS_DAX case to avoid ugly ifdefs */
+struct iomap_ops ext2_iomap_ops;
#endif /* CONFIG_FS_DAX */
int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
@@ -1293,9 +1296,11 @@ static int ext2_setsize(struct inode *inode, loff_t newsize)
inode_dio_wait(inode);
- if (IS_DAX(inode))
- error = dax_truncate_page(inode, newsize, ext2_get_block);
- else if (test_opt(inode->i_sb, NOBH))
+ if (IS_DAX(inode)) {
+ error = iomap_zero_range(inode, newsize,
+ PAGE_ALIGN(newsize) - newsize, NULL,
+ &ext2_iomap_ops);
+ } else if (test_opt(inode->i_sb, NOBH))
error = nobh_truncate_page(inode->i_mapping,
newsize, ext2_get_block);
else
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index e38039fd96ff..7b90691e98c4 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -37,6 +37,7 @@ config EXT4_FS
select CRC16
select CRYPTO
select CRYPTO_CRC32C
+ select FS_IOMAP if FS_DAX
help
This is the next generation of the ext3 filesystem.
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index dfa519979038..fd389935ecd1 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -196,7 +196,7 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
if (error)
return error;
- inode->i_ctime = ext4_current_time(inode);
+ inode->i_ctime = current_time(inode);
ext4_mark_inode_dirty(handle, inode);
}
break;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index a8a750f59621..2163c1e69f2a 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -397,8 +397,9 @@ struct flex_groups {
#define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
#define EXT4_FL_USER_VISIBLE 0x304BDFFF /* User visible flags */
-#define EXT4_FL_USER_MODIFIABLE 0x204380FF /* User modifiable flags */
+#define EXT4_FL_USER_MODIFIABLE 0x204BC0FF /* User modifiable flags */
+/* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */
#define EXT4_FL_XFLAG_VISIBLE (EXT4_SYNC_FL | \
EXT4_IMMUTABLE_FL | \
EXT4_APPEND_FL | \
@@ -1533,12 +1534,6 @@ static inline struct ext4_inode_info *EXT4_I(struct inode *inode)
return container_of(inode, struct ext4_inode_info, vfs_inode);
}
-static inline struct timespec ext4_current_time(struct inode *inode)
-{
- return (inode->i_sb->s_time_gran < NSEC_PER_SEC) ?
- current_fs_time(inode->i_sb) : CURRENT_TIME_SEC;
-}
-
static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
{
return ino == EXT4_ROOT_INO ||
@@ -2277,11 +2272,6 @@ extern unsigned ext4_free_clusters_after_init(struct super_block *sb,
struct ext4_group_desc *gdp);
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
-static inline int ext4_sb_has_crypto(struct super_block *sb)
-{
- return ext4_has_feature_encrypt(sb);
-}
-
static inline bool ext4_encrypted_inode(struct inode *inode)
{
return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT);
@@ -2339,8 +2329,8 @@ static inline void ext4_fname_free_filename(struct ext4_filename *fname) { }
#define fscrypt_pullback_bio_page fscrypt_notsupp_pullback_bio_page
#define fscrypt_restore_control_page fscrypt_notsupp_restore_control_page
#define fscrypt_zeroout_range fscrypt_notsupp_zeroout_range
-#define fscrypt_process_policy fscrypt_notsupp_process_policy
-#define fscrypt_get_policy fscrypt_notsupp_get_policy
+#define fscrypt_ioctl_set_policy fscrypt_notsupp_ioctl_set_policy
+#define fscrypt_ioctl_get_policy fscrypt_notsupp_ioctl_get_policy
#define fscrypt_has_permitted_context fscrypt_notsupp_has_permitted_context
#define fscrypt_inherit_context fscrypt_notsupp_inherit_context
#define fscrypt_get_encryption_info fscrypt_notsupp_get_encryption_info
@@ -2458,8 +2448,6 @@ struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int);
struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
-int ext4_dax_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create);
int ext4_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
int ext4_dio_get_block(struct inode *inode, sector_t iblock,
@@ -2492,7 +2480,7 @@ extern int ext4_change_inode_journal_flag(struct inode *, int);
extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
extern int ext4_inode_attach_jinode(struct inode *inode);
extern int ext4_can_truncate(struct inode *inode);
-extern void ext4_truncate(struct inode *);
+extern int ext4_truncate(struct inode *);
extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
extern int ext4_truncate_restart_trans(handle_t *, struct inode *, int nblocks);
extern void ext4_set_inode_flags(struct inode *);
@@ -3129,7 +3117,7 @@ extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents);
extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
-extern void ext4_ext_truncate(handle_t *, struct inode *);
+extern int ext4_ext_truncate(handle_t *, struct inode *);
extern int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
ext4_lblk_t end);
extern void ext4_ext_init(struct super_block *);
@@ -3265,12 +3253,7 @@ static inline void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
}
}
-static inline bool ext4_aligned_io(struct inode *inode, loff_t off, loff_t len)
-{
- int blksize = 1 << inode->i_blkbits;
-
- return IS_ALIGNED(off, blksize) && IS_ALIGNED(len, blksize);
-}
+extern struct iomap_ops ext4_iomap_ops;
#endif /* __KERNEL__ */
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index b1d52c14098e..f97611171023 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -414,17 +414,19 @@ static inline int ext4_inode_journal_mode(struct inode *inode)
return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */
/* We do not support data journalling with delayed allocation */
if (!S_ISREG(inode->i_mode) ||
- test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
- return EXT4_INODE_JOURNAL_DATA_MODE; /* journal data */
- if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
- !test_opt(inode->i_sb, DELALLOC))
+ test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
+ (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
+ !test_opt(inode->i_sb, DELALLOC))) {
+ /* We do not support data journalling for encrypted data */
+ if (S_ISREG(inode->i_mode) && ext4_encrypted_inode(inode))
+ return EXT4_INODE_ORDERED_DATA_MODE; /* ordered */
return EXT4_INODE_JOURNAL_DATA_MODE; /* journal data */
+ }
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
return EXT4_INODE_ORDERED_DATA_MODE; /* ordered */
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */
- else
- BUG();
+ BUG();
}
static inline int ext4_should_journal_data(struct inode *inode)
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index c930a0110fb4..3e1014fe835e 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4631,7 +4631,7 @@ out2:
return err ? err : allocated;
}
-void ext4_ext_truncate(handle_t *handle, struct inode *inode)
+int ext4_ext_truncate(handle_t *handle, struct inode *inode)
{
struct super_block *sb = inode->i_sb;
ext4_lblk_t last_block;
@@ -4645,7 +4645,9 @@ void ext4_ext_truncate(handle_t *handle, struct inode *inode)
/* we have to know where to truncate from in crash case */
EXT4_I(inode)->i_disksize = inode->i_size;
- ext4_mark_inode_dirty(handle, inode);
+ err = ext4_mark_inode_dirty(handle, inode);
+ if (err)
+ return err;
last_block = (inode->i_size + sb->s_blocksize - 1)
>> EXT4_BLOCK_SIZE_BITS(sb);
@@ -4657,12 +4659,9 @@ retry:
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry;
}
- if (err) {
- ext4_std_error(inode->i_sb, err);
- return;
- }
- err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
- ext4_std_error(inode->i_sb, err);
+ if (err)
+ return err;
+ return ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
}
static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
@@ -4701,7 +4700,7 @@ retry:
/*
* Recalculate credits when extent tree depth changes.
*/
- if (depth >= 0 && depth != ext_depth(inode)) {
+ if (depth != ext_depth(inode)) {
credits = ext4_chunk_trans_blocks(inode, len);
depth = ext_depth(inode);
}
@@ -4725,7 +4724,7 @@ retry:
map.m_lblk += ret;
map.m_len = len = len - ret;
epos = (loff_t)map.m_lblk << inode->i_blkbits;
- inode->i_ctime = ext4_current_time(inode);
+ inode->i_ctime = current_time(inode);
if (new_size) {
if (epos > new_size)
epos = new_size;
@@ -4853,7 +4852,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
}
/* Now release the pages and zero block aligned part of pages */
truncate_pagecache_range(inode, start, end - 1);
- inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
flags, mode);
@@ -4878,7 +4877,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
goto out_dio;
}
- inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
if (new_size) {
ext4_update_inode_size(inode, new_size);
} else {
@@ -5568,7 +5567,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
- inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
ext4_mark_inode_dirty(handle, inode);
out_stop:
@@ -5678,7 +5677,7 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
/* Expand file to avoid data loss if there is error while shifting */
inode->i_size += len;
EXT4_I(inode)->i_disksize += len;
- inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
ret = ext4_mark_inode_dirty(handle, inode);
if (ret)
goto out_stop;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 2a822d30e73f..b5f184493c57 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -31,6 +31,42 @@
#include "xattr.h"
#include "acl.h"
+#ifdef CONFIG_FS_DAX
+static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ ssize_t ret;
+
+ inode_lock_shared(inode);
+ /*
+ * Recheck under inode lock - at this point we are sure it cannot
+ * change anymore
+ */
+ if (!IS_DAX(inode)) {
+ inode_unlock_shared(inode);
+ /* Fallback to buffered IO in case we cannot support DAX */
+ return generic_file_read_iter(iocb, to);
+ }
+ ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
+ inode_unlock_shared(inode);
+
+ file_accessed(iocb->ki_filp);
+ return ret;
+}
+#endif
+
+static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ if (!iov_iter_count(to))
+ return 0; /* skip atime */
+
+#ifdef CONFIG_FS_DAX
+ if (IS_DAX(file_inode(iocb->ki_filp)))
+ return ext4_dax_read_iter(iocb, to);
+#endif
+ return generic_file_read_iter(iocb, to);
+}
+
/*
* Called when an inode is released. Note that this is different
* from ext4_file_open: open gets called at every open, but release
@@ -88,6 +124,86 @@ ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
return 0;
}
+/* Is IO overwriting allocated and initialized blocks? */
+static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
+{
+ struct ext4_map_blocks map;
+ unsigned int blkbits = inode->i_blkbits;
+ int err, blklen;
+
+ if (pos + len > i_size_read(inode))
+ return false;
+
+ map.m_lblk = pos >> blkbits;
+ map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
+ blklen = map.m_len;
+
+ err = ext4_map_blocks(NULL, inode, &map, 0);
+ /*
+ * 'err==len' means that all of the blocks have been preallocated,
+ * regardless of whether they have been initialized or not. To exclude
+ * unwritten extents, we need to check m_flags.
+ */
+ return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
+}
+
+static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ ssize_t ret;
+
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0)
+ return ret;
+ /*
+ * If we have encountered a bitmap-format file, the size limit
+ * is smaller than s_maxbytes, which is for extent-mapped files.
+ */
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+
+ if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
+ return -EFBIG;
+ iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
+ }
+ return iov_iter_count(from);
+}
+
+#ifdef CONFIG_FS_DAX
+static ssize_t
+ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ ssize_t ret;
+ bool overwrite = false;
+
+ inode_lock(inode);
+ ret = ext4_write_checks(iocb, from);
+ if (ret <= 0)
+ goto out;
+ ret = file_remove_privs(iocb->ki_filp);
+ if (ret)
+ goto out;
+ ret = file_update_time(iocb->ki_filp);
+ if (ret)
+ goto out;
+
+ if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
+ overwrite = true;
+ downgrade_write(&inode->i_rwsem);
+ }
+ ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
+out:
+ if (!overwrite)
+ inode_unlock(inode);
+ else
+ inode_unlock_shared(inode);
+ if (ret > 0)
+ ret = generic_write_sync(iocb, ret);
+ return ret;
+}
+#endif
+
static ssize_t
ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
@@ -97,8 +213,13 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
int overwrite = 0;
ssize_t ret;
+#ifdef CONFIG_FS_DAX
+ if (IS_DAX(inode))
+ return ext4_dax_write_iter(iocb, from);
+#endif
+
inode_lock(inode);
- ret = generic_write_checks(iocb, from);
+ ret = ext4_write_checks(iocb, from);
if (ret <= 0)
goto out;
@@ -114,53 +235,11 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
ext4_unwritten_wait(inode);
}
- /*
- * If we have encountered a bitmap-format file, the size limit
- * is smaller than s_maxbytes, which is for extent-mapped files.
- */
- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-
- if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) {
- ret = -EFBIG;
- goto out;
- }
- iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
- }
-
iocb->private = &overwrite;
- if (o_direct) {
- size_t length = iov_iter_count(from);
- loff_t pos = iocb->ki_pos;
-
- /* check whether we do a DIO overwrite or not */
- if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
- pos + length <= i_size_read(inode)) {
- struct ext4_map_blocks map;
- unsigned int blkbits = inode->i_blkbits;
- int err, len;
-
- map.m_lblk = pos >> blkbits;
- map.m_len = EXT4_MAX_BLOCKS(length, pos, blkbits);
- len = map.m_len;
-
- err = ext4_map_blocks(NULL, inode, &map, 0);
- /*
- * 'err==len' means that all of blocks has
- * been preallocated no matter they are
- * initialized or not. For excluding
- * unwritten extents, we need to check
- * m_flags. There are two conditions that
- * indicate for initialized extents. 1) If we
- * hit extent cache, EXT4_MAP_MAPPED flag is
- * returned; 2) If we do a real lookup,
- * non-flags are returned. So we should check
- * these two conditions.
- */
- if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
- overwrite = 1;
- }
- }
+ /* Check whether we do a DIO overwrite or not */
+ if (o_direct && ext4_should_dioread_nolock(inode) && !unaligned_aio &&
+ ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from)))
+ overwrite = 1;
ret = __generic_file_write_iter(iocb, from);
inode_unlock(inode);
@@ -196,7 +275,7 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (IS_ERR(handle))
result = VM_FAULT_SIGBUS;
else
- result = dax_fault(vma, vmf, ext4_dax_get_block);
+ result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
if (write) {
if (!IS_ERR(handle))
@@ -230,9 +309,10 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
if (IS_ERR(handle))
result = VM_FAULT_SIGBUS;
- else
- result = dax_pmd_fault(vma, addr, pmd, flags,
- ext4_dax_get_block);
+ else {
+ result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
+ &ext4_iomap_ops);
+ }
if (write) {
if (!IS_ERR(handle))
@@ -687,7 +767,7 @@ loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
const struct file_operations ext4_file_operations = {
.llseek = ext4_llseek,
- .read_iter = generic_file_read_iter,
+ .read_iter = ext4_file_read_iter,
.write_iter = ext4_file_write_iter,
.unlocked_ioctl = ext4_ioctl,
#ifdef CONFIG_COMPAT
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 170421edfdfe..e57e8d90ea54 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1039,7 +1039,7 @@ got:
/* This is the optimal IO size (for stat), not the fs block size */
inode->i_blocks = 0;
inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
- ext4_current_time(inode);
+ current_time(inode);
memset(ei->i_data, 0, sizeof(ei->i_data));
ei->i_dir_start_lookup = 0;
@@ -1115,8 +1115,7 @@ got:
}
if (encrypt) {
- /* give pointer to avoid set_context with journal ops. */
- err = fscrypt_inherit_context(dir, inode, &encrypt, true);
+ err = fscrypt_inherit_context(dir, inode, handle, true);
if (err)
goto fail_free_drop;
}
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index f74d5ee2cdec..437df6a1a841 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -299,6 +299,11 @@ static int ext4_create_inline_data(handle_t *handle,
EXT4_I(inode)->i_inline_size = len + EXT4_MIN_INLINE_DATA_SIZE;
ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
ext4_set_inode_flag(inode, EXT4_INODE_INLINE_DATA);
+ /*
+ * Propagate changes to inode->i_flags as well - e.g. S_DAX may
+ * get cleared
+ */
+ ext4_set_inode_flags(inode);
get_bh(is.iloc.bh);
error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
@@ -336,8 +341,10 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
len -= EXT4_MIN_INLINE_DATA_SIZE;
value = kzalloc(len, GFP_NOFS);
- if (!value)
+ if (!value) {
+ error = -ENOMEM;
goto out;
+ }
error = ext4_xattr_ibody_get(inode, i.name_index, i.name,
value, len);
@@ -442,6 +449,11 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle,
}
}
ext4_clear_inode_flag(inode, EXT4_INODE_INLINE_DATA);
+ /*
+ * Propagate changes to inode->i_flags as well - e.g. S_DAX may
+ * get set.
+ */
+ ext4_set_inode_flags(inode);
get_bh(is.iloc.bh);
error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
@@ -1028,7 +1040,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
* happen is that the times are slightly out of date
* and/or different from the directory change time.
*/
- dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
+ dir->i_mtime = dir->i_ctime = current_time(dir);
ext4_update_dx_flag(dir);
dir->i_version++;
ext4_mark_inode_dirty(handle, dir);
@@ -1971,7 +1983,7 @@ out:
if (inode->i_nlink)
ext4_orphan_del(handle, inode);
- inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
ext4_mark_inode_dirty(handle, inode);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9c064727ed62..72d593fa690d 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -37,6 +37,7 @@
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/bitops.h>
+#include <linux/iomap.h>
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -71,10 +72,9 @@ static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
csum_size);
offset += csum_size;
- csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
- EXT4_INODE_SIZE(inode->i_sb) -
- offset);
}
+ csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
+ EXT4_INODE_SIZE(inode->i_sb) - offset);
}
return csum;
@@ -261,8 +261,15 @@ void ext4_evict_inode(struct inode *inode)
"couldn't mark inode dirty (err %d)", err);
goto stop_handle;
}
- if (inode->i_blocks)
- ext4_truncate(inode);
+ if (inode->i_blocks) {
+ err = ext4_truncate(inode);
+ if (err) {
+ ext4_error(inode->i_sb,
+ "couldn't truncate inode %lu (err %d)",
+ inode->i_ino, err);
+ goto stop_handle;
+ }
+ }
/*
* ext4_ext_truncate() doesn't reserve any slop when it
@@ -767,6 +774,9 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
ext4_update_bh_state(bh, map.m_flags);
bh->b_size = inode->i_sb->s_blocksize * map.m_len;
ret = 0;
+ } else if (ret == 0) {
+ /* hole case, need to fill in bh->b_size */
+ bh->b_size = inode->i_sb->s_blocksize * map.m_len;
}
return ret;
}
@@ -1166,7 +1176,8 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
if (unlikely(err))
page_zero_new_buffers(page, from, to);
else if (decrypt)
- err = fscrypt_decrypt_page(page);
+ err = fscrypt_decrypt_page(page->mapping->host, page,
+ PAGE_SIZE, 0, page->index);
return err;
}
#endif
@@ -2891,7 +2902,8 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
index = pos >> PAGE_SHIFT;
- if (ext4_nonda_switch(inode->i_sb)) {
+ if (ext4_nonda_switch(inode->i_sb) ||
+ S_ISLNK(inode->i_mode)) {
*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
return ext4_write_begin(file, mapping, pos,
len, flags, pagep, fsdata);
@@ -3268,53 +3280,159 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
}
#ifdef CONFIG_FS_DAX
-/*
- * Get block function for DAX IO and mmap faults. It takes care of converting
- * unwritten extents to written ones and initializes new / converted blocks
- * to zeros.
- */
-int ext4_dax_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
+static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ unsigned flags, struct iomap *iomap)
{
+ unsigned int blkbits = inode->i_blkbits;
+ unsigned long first_block = offset >> blkbits;
+ unsigned long last_block = (offset + length - 1) >> blkbits;
+ struct ext4_map_blocks map;
int ret;
- ext4_debug("inode %lu, create flag %d\n", inode->i_ino, create);
- if (!create)
- return _ext4_get_block(inode, iblock, bh_result, 0);
+ if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
+ return -ERANGE;
- ret = ext4_get_block_trans(inode, iblock, bh_result,
- EXT4_GET_BLOCKS_PRE_IO |
- EXT4_GET_BLOCKS_CREATE_ZERO);
- if (ret < 0)
- return ret;
+ map.m_lblk = first_block;
+ map.m_len = last_block - first_block + 1;
+
+ if (!(flags & IOMAP_WRITE)) {
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ } else {
+ int dio_credits;
+ handle_t *handle;
+ int retries = 0;
- if (buffer_unwritten(bh_result)) {
+ /* Trim mapping request to maximum we can map at once for DIO */
+ if (map.m_len > DIO_MAX_BLOCKS)
+ map.m_len = DIO_MAX_BLOCKS;
+ dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
+retry:
/*
- * We are protected by i_mmap_sem or i_mutex so we know block
- * cannot go away from under us even though we dropped
- * i_data_sem. Convert extent to written and write zeros there.
+ * Either we allocate blocks and then we don't get unwritten
+ * extent so we have reserved enough credits, or the blocks
+ * are already allocated and unwritten and in that case
+ * extent conversion fits in the credits as well.
*/
- ret = ext4_get_block_trans(inode, iblock, bh_result,
- EXT4_GET_BLOCKS_CONVERT |
- EXT4_GET_BLOCKS_CREATE_ZERO);
- if (ret < 0)
+ handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
+ dio_credits);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ ret = ext4_map_blocks(handle, inode, &map,
+ EXT4_GET_BLOCKS_CREATE_ZERO);
+ if (ret < 0) {
+ ext4_journal_stop(handle);
+ if (ret == -ENOSPC &&
+ ext4_should_retry_alloc(inode->i_sb, &retries))
+ goto retry;
return ret;
+ }
+
+ /*
+ * If we added blocks beyond i_size, we need to make sure they
+ * will get truncated if we crash before updating i_size in
+ * ext4_iomap_end(). For faults we don't need to do that (and
+ * even cannot because for orphan list operations inode_lock is
+ * required) - if we happen to instantiate block beyond i_size,
+ * it is because we race with truncate which has already added
+ * the inode to the orphan list.
+ */
+ if (!(flags & IOMAP_FAULT) && first_block + map.m_len >
+ (i_size_read(inode) + (1 << blkbits) - 1) >> blkbits) {
+ int err;
+
+ err = ext4_orphan_add(handle, inode);
+ if (err < 0) {
+ ext4_journal_stop(handle);
+ return err;
+ }
+ }
+ ext4_journal_stop(handle);
}
- /*
- * At least for now we have to clear BH_New so that DAX code
- * doesn't attempt to zero blocks again in a racy way.
- */
- clear_buffer_new(bh_result);
+
+ iomap->flags = 0;
+ iomap->bdev = inode->i_sb->s_bdev;
+ iomap->offset = first_block << blkbits;
+
+ if (ret == 0) {
+ iomap->type = IOMAP_HOLE;
+ iomap->blkno = IOMAP_NULL_BLOCK;
+ iomap->length = (u64)map.m_len << blkbits;
+ } else {
+ if (map.m_flags & EXT4_MAP_MAPPED) {
+ iomap->type = IOMAP_MAPPED;
+ } else if (map.m_flags & EXT4_MAP_UNWRITTEN) {
+ iomap->type = IOMAP_UNWRITTEN;
+ } else {
+ WARN_ON_ONCE(1);
+ return -EIO;
+ }
+ iomap->blkno = (sector_t)map.m_pblk << (blkbits - 9);
+ iomap->length = (u64)map.m_len << blkbits;
+ }
+
+ if (map.m_flags & EXT4_MAP_NEW)
+ iomap->flags |= IOMAP_F_NEW;
return 0;
}
-#else
-/* Just define empty function, it will never get called. */
-int ext4_dax_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
+
+static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
+ ssize_t written, unsigned flags, struct iomap *iomap)
{
- BUG();
- return 0;
+ int ret = 0;
+ handle_t *handle;
+ int blkbits = inode->i_blkbits;
+ bool truncate = false;
+
+ if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT))
+ return 0;
+
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ goto orphan_del;
+ }
+ if (ext4_update_inode_size(inode, offset + written))
+ ext4_mark_inode_dirty(handle, inode);
+ /*
+ * We may need to truncate allocated but not written blocks beyond EOF.
+ */
+ if (iomap->offset + iomap->length >
+ ALIGN(inode->i_size, 1 << blkbits)) {
+ ext4_lblk_t written_blk, end_blk;
+
+ written_blk = (offset + written) >> blkbits;
+ end_blk = (offset + length) >> blkbits;
+ if (written_blk < end_blk && ext4_can_truncate(inode))
+ truncate = true;
+ }
+ /*
+ * Remove inode from orphan list if we were extending a inode and
+ * everything went fine.
+ */
+ if (!truncate && inode->i_nlink &&
+ !list_empty(&EXT4_I(inode)->i_orphan))
+ ext4_orphan_del(handle, inode);
+ ext4_journal_stop(handle);
+ if (truncate) {
+ ext4_truncate_failed_write(inode);
+orphan_del:
+ /*
+ * If truncate failed early the inode might still be on the
+ * orphan list; we need to make sure the inode is removed from
+ * the orphan list in that case.
+ */
+ if (inode->i_nlink)
+ ext4_orphan_del(NULL, inode);
+ }
+ return ret;
}
+
+struct iomap_ops ext4_iomap_ops = {
+ .iomap_begin = ext4_iomap_begin,
+ .iomap_end = ext4_iomap_end,
+};
+
#endif
static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
@@ -3436,19 +3554,7 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
iocb->private = NULL;
if (overwrite)
get_block_func = ext4_dio_get_block_overwrite;
- else if (IS_DAX(inode)) {
- /*
- * We can avoid zeroing for aligned DAX writes beyond EOF. Other
- * writes need zeroing either because they can race with page
- * faults or because they use partial blocks.
- */
- if (round_down(offset, 1<<inode->i_blkbits) >= inode->i_size &&
- ext4_aligned_io(inode, offset, count))
- get_block_func = ext4_dio_get_block;
- else
- get_block_func = ext4_dax_get_block;
- dio_flags = DIO_LOCKING;
- } else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
+ else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
round_down(offset, 1 << inode->i_blkbits) >= inode->i_size) {
get_block_func = ext4_dio_get_block;
dio_flags = DIO_LOCKING | DIO_SKIP_HOLES;
@@ -3462,14 +3568,9 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
#ifdef CONFIG_EXT4_FS_ENCRYPTION
BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
#endif
- if (IS_DAX(inode)) {
- ret = dax_do_io(iocb, inode, iter, get_block_func,
- ext4_end_io_dio, dio_flags);
- } else
- ret = __blockdev_direct_IO(iocb, inode,
- inode->i_sb->s_bdev, iter,
- get_block_func,
- ext4_end_io_dio, NULL, dio_flags);
+ ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
+ get_block_func, ext4_end_io_dio, NULL,
+ dio_flags);
if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
EXT4_STATE_DIO_UNWRITTEN)) {
@@ -3538,6 +3639,7 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
{
struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = mapping->host;
+ size_t count = iov_iter_count(iter);
ssize_t ret;
/*
@@ -3546,19 +3648,12 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
* we are protected against page writeback as well.
*/
inode_lock_shared(inode);
- if (IS_DAX(inode)) {
- ret = dax_do_io(iocb, inode, iter, ext4_dio_get_block, NULL, 0);
- } else {
- size_t count = iov_iter_count(iter);
-
- ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
- iocb->ki_pos + count);
- if (ret)
- goto out_unlock;
- ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
- iter, ext4_dio_get_block,
- NULL, NULL, 0);
- }
+ ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
+ iocb->ki_pos + count);
+ if (ret)
+ goto out_unlock;
+ ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
+ iter, ext4_dio_get_block, NULL, NULL, 0);
out_unlock:
inode_unlock_shared(inode);
return ret;
@@ -3587,6 +3682,10 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
if (ext4_has_inline_data(inode))
return 0;
+ /* DAX uses iomap path now */
+ if (WARN_ON_ONCE(IS_DAX(inode)))
+ return 0;
+
trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
if (iov_iter_rw(iter) == READ)
ret = ext4_direct_IO_read(iocb, iter);
@@ -3615,6 +3714,13 @@ static int ext4_journalled_set_page_dirty(struct page *page)
return __set_page_dirty_nobuffers(page);
}
+static int ext4_set_page_dirty(struct page *page)
+{
+ WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page));
+ WARN_ON_ONCE(!page_has_buffers(page));
+ return __set_page_dirty_buffers(page);
+}
+
static const struct address_space_operations ext4_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
@@ -3622,6 +3728,7 @@ static const struct address_space_operations ext4_aops = {
.writepages = ext4_writepages,
.write_begin = ext4_write_begin,
.write_end = ext4_write_end,
+ .set_page_dirty = ext4_set_page_dirty,
.bmap = ext4_bmap,
.invalidatepage = ext4_invalidatepage,
.releasepage = ext4_releasepage,
@@ -3654,6 +3761,7 @@ static const struct address_space_operations ext4_da_aops = {
.writepages = ext4_writepages,
.write_begin = ext4_da_write_begin,
.write_end = ext4_da_write_end,
+ .set_page_dirty = ext4_set_page_dirty,
.bmap = ext4_bmap,
.invalidatepage = ext4_da_invalidatepage,
.releasepage = ext4_releasepage,
@@ -3743,7 +3851,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
BUG_ON(blocksize != PAGE_SIZE);
- WARN_ON_ONCE(fscrypt_decrypt_page(page));
+ WARN_ON_ONCE(fscrypt_decrypt_page(page->mapping->host,
+ page, PAGE_SIZE, 0, page->index));
}
}
if (ext4_should_journal_data(inode)) {
@@ -3792,8 +3901,10 @@ static int ext4_block_zero_page_range(handle_t *handle,
if (length > max || length < 0)
length = max;
- if (IS_DAX(inode))
- return dax_zero_page_range(inode, from, length, ext4_get_block);
+ if (IS_DAX(inode)) {
+ return iomap_zero_range(inode, from, length, NULL,
+ &ext4_iomap_ops);
+ }
return __ext4_block_zero_page_range(handle, mapping, from, length);
}
@@ -4026,7 +4137,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
if (IS_SYNC(inode))
ext4_handle_sync(handle);
- inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
ext4_mark_inode_dirty(handle, inode);
out_stop:
ext4_journal_stop(handle);
@@ -4091,10 +4202,11 @@ int ext4_inode_attach_jinode(struct inode *inode)
* that's fine - as long as they are linked from the inode, the post-crash
* ext4_truncate() run will find them and release them.
*/
-void ext4_truncate(struct inode *inode)
+int ext4_truncate(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int credits;
+ int err = 0;
handle_t *handle;
struct address_space *mapping = inode->i_mapping;
@@ -4108,7 +4220,7 @@ void ext4_truncate(struct inode *inode)
trace_ext4_truncate_enter(inode);
if (!ext4_can_truncate(inode))
- return;
+ return 0;
ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
@@ -4120,13 +4232,13 @@ void ext4_truncate(struct inode *inode)
ext4_inline_data_truncate(inode, &has_inline);
if (has_inline)
- return;
+ return 0;
}
/* If we zero-out tail of the page, we have to create jinode for jbd2 */
if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
if (ext4_inode_attach_jinode(inode) < 0)
- return;
+ return 0;
}
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
@@ -4135,10 +4247,8 @@ void ext4_truncate(struct inode *inode)
credits = ext4_blocks_for_truncate(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
- if (IS_ERR(handle)) {
- ext4_std_error(inode->i_sb, PTR_ERR(handle));
- return;
- }
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
if (inode->i_size & (inode->i_sb->s_blocksize - 1))
ext4_block_truncate_page(handle, mapping, inode->i_size);
@@ -4152,7 +4262,8 @@ void ext4_truncate(struct inode *inode)
* Implication: the file must always be in a sane, consistent
* truncatable state while each transaction commits.
*/
- if (ext4_orphan_add(handle, inode))
+ err = ext4_orphan_add(handle, inode);
+ if (err)
goto out_stop;
down_write(&EXT4_I(inode)->i_data_sem);
@@ -4160,11 +4271,13 @@ void ext4_truncate(struct inode *inode)
ext4_discard_preallocations(inode);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- ext4_ext_truncate(handle, inode);
+ err = ext4_ext_truncate(handle, inode);
else
ext4_ind_truncate(handle, inode);
up_write(&ei->i_data_sem);
+ if (err)
+ goto out_stop;
if (IS_SYNC(inode))
ext4_handle_sync(handle);
@@ -4180,11 +4293,12 @@ out_stop:
if (inode->i_nlink)
ext4_orphan_del(handle, inode);
- inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
trace_ext4_truncate_exit(inode);
+ return err;
}
/*
@@ -4352,7 +4466,9 @@ void ext4_set_inode_flags(struct inode *inode)
new_fl |= S_NOATIME;
if (flags & EXT4_DIRSYNC_FL)
new_fl |= S_DIRSYNC;
- if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
+ if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode) &&
+ !ext4_should_journal_data(inode) && !ext4_has_inline_data(inode) &&
+ !ext4_encrypted_inode(inode))
new_fl |= S_DAX;
inode_set_flags(inode, new_fl,
S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX);
@@ -4411,7 +4527,9 @@ static inline void ext4_iget_extra_inode(struct inode *inode,
{
__le32 *magic = (void *)raw_inode +
EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
- if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
+ if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <=
+ EXT4_INODE_SIZE(inode->i_sb) &&
+ *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
ext4_set_inode_state(inode, EXT4_STATE_XATTR);
ext4_find_inline_data_nolock(inode);
} else
@@ -4434,6 +4552,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
struct inode *inode;
journal_t *journal = EXT4_SB(sb)->s_journal;
long ret;
+ loff_t size;
int block;
uid_t i_uid;
gid_t i_gid;
@@ -4456,10 +4575,12 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
- EXT4_INODE_SIZE(inode->i_sb)) {
- EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
- EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
- EXT4_INODE_SIZE(inode->i_sb));
+ EXT4_INODE_SIZE(inode->i_sb) ||
+ (ei->i_extra_isize & 3)) {
+ EXT4_ERROR_INODE(inode,
+ "bad extra_isize %u (inode size %u)",
+ ei->i_extra_isize,
+ EXT4_INODE_SIZE(inode->i_sb));
ret = -EFSCORRUPTED;
goto bad_inode;
}
@@ -4534,6 +4655,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
ei->i_file_acl |=
((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
inode->i_size = ext4_isize(raw_inode);
+ if ((size = i_size_read(inode)) < 0) {
+ EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
+ ret = -EFSCORRUPTED;
+ goto bad_inode;
+ }
ei->i_disksize = inode->i_size;
#ifdef CONFIG_QUOTA
ei->i_reserved_quota = 0;
@@ -4577,6 +4703,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
if (ei->i_extra_isize == 0) {
/* The extra space is currently unused. Use it. */
+ BUILD_BUG_ON(sizeof(struct ext4_inode) & 3);
ei->i_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
} else {
@@ -5154,7 +5281,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
* update c/mtime in shrink case below
*/
if (!shrink) {
- inode->i_mtime = ext4_current_time(inode);
+ inode->i_mtime = current_time(inode);
inode->i_ctime = inode->i_mtime;
}
down_write(&EXT4_I(inode)->i_data_sem);
@@ -5199,12 +5326,15 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
* in data=journal mode to make pages freeable.
*/
truncate_pagecache(inode, inode->i_size);
- if (shrink)
- ext4_truncate(inode);
+ if (shrink) {
+ rc = ext4_truncate(inode);
+ if (rc)
+ error = rc;
+ }
up_write(&EXT4_I(inode)->i_mmap_sem);
}
- if (!rc) {
+ if (!error) {
setattr_copy(inode, attr);
mark_inode_dirty(inode);
}
@@ -5216,7 +5346,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
if (orphan && inode->i_nlink)
ext4_orphan_del(NULL, inode);
- if (!rc && (ia_valid & ATTR_MODE))
+ if (!error && (ia_valid & ATTR_MODE))
rc = posix_acl_chmod(inode, inode->i_mode);
err_out:
@@ -5455,18 +5585,20 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err)
return err;
- if (ext4_handle_valid(handle) &&
- EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
+ if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
!ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
/*
- * We need extra buffer credits since we may write into EA block
+ * In nojournal mode, we can immediately attempt to expand
+ * the inode. When journaled, we first need to obtain extra
+ * buffer credits since we may write into the EA block
* with this same handle. If journal_extend fails, then it will
* only result in a minor loss of functionality for that inode.
* If this is felt to be critical, then e2fsck should be run to
* force a large enough s_min_extra_isize.
*/
- if ((jbd2_journal_extend(handle,
- EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
+ if (!ext4_handle_valid(handle) ||
+ jbd2_journal_extend(handle,
+ EXT4_DATA_TRANS_BLOCKS(inode->i_sb)) == 0) {
ret = ext4_expand_extra_isize(inode,
sbi->s_want_extra_isize,
iloc, handle);
@@ -5620,6 +5752,11 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
}
ext4_set_aops(inode);
+ /*
+ * Update inode->i_flags after EXT4_INODE_JOURNAL_DATA was updated.
+ * E.g. S_DAX may get cleared / set.
+ */
+ ext4_set_inode_flags(inode);
jbd2_journal_unlock_updates(journal);
percpu_up_write(&sbi->s_journal_flag_rwsem);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index bf5ae8ebbc97..49fd1371bfa2 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -153,7 +153,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
swap_inode_data(inode, inode_bl);
- inode->i_ctime = inode_bl->i_ctime = ext4_current_time(inode);
+ inode->i_ctime = inode_bl->i_ctime = current_time(inode);
spin_lock(&sbi->s_next_gen_lock);
inode->i_generation = sbi->s_next_generation++;
@@ -191,6 +191,7 @@ journal_err_out:
return err;
}
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
static int uuid_is_zero(__u8 u[16])
{
int i;
@@ -200,6 +201,7 @@ static int uuid_is_zero(__u8 u[16])
return 0;
return 1;
}
+#endif
static int ext4_ioctl_setflags(struct inode *inode,
unsigned int flags)
@@ -248,8 +250,11 @@ static int ext4_ioctl_setflags(struct inode *inode,
err = -EOPNOTSUPP;
goto flags_out;
}
- } else if (oldflags & EXT4_EOFBLOCKS_FL)
- ext4_truncate(inode);
+ } else if (oldflags & EXT4_EOFBLOCKS_FL) {
+ err = ext4_truncate(inode);
+ if (err)
+ goto flags_out;
+ }
handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
if (IS_ERR(handle)) {
@@ -265,6 +270,9 @@ static int ext4_ioctl_setflags(struct inode *inode,
for (i = 0, mask = 1; i < 32; i++, mask <<= 1) {
if (!(mask & EXT4_FL_USER_MODIFIABLE))
continue;
+ /* These flags get special treatment later */
+ if (mask == EXT4_JOURNAL_DATA_FL || mask == EXT4_EXTENTS_FL)
+ continue;
if (mask & flags)
ext4_set_inode_flag(inode, i);
else
@@ -272,7 +280,7 @@ static int ext4_ioctl_setflags(struct inode *inode,
}
ext4_set_inode_flags(inode);
- inode->i_ctime = ext4_current_time(inode);
+ inode->i_ctime = current_time(inode);
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
flags_err:
@@ -368,7 +376,7 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
}
EXT4_I(inode)->i_projid = kprojid;
- inode->i_ctime = ext4_current_time(inode);
+ inode->i_ctime = current_time(inode);
out_dirty:
rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
if (!err)
@@ -409,6 +417,10 @@ static inline __u32 ext4_iflags_to_xflags(unsigned long iflags)
return xflags;
}
+#define EXT4_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \
+ FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \
+ FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT)
+
/* Transfer xflags flags to internal */
static inline unsigned long ext4_xflags_to_iflags(__u32 xflags)
{
@@ -453,12 +465,22 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (get_user(flags, (int __user *) arg))
return -EFAULT;
+ if (flags & ~EXT4_FL_USER_VISIBLE)
+ return -EOPNOTSUPP;
+ /*
+ * chattr(1) grabs flags via GETFLAGS, modifies the result and
+ * passes that to SETFLAGS. So we cannot easily make SETFLAGS
+ * more restrictive than just silently masking off visible but
+ * not settable flags as we always did.
+ */
+ flags &= EXT4_FL_USER_MODIFIABLE;
+ if (ext4_mask_flags(inode->i_mode, flags) != flags)
+ return -EOPNOTSUPP;
+
err = mnt_want_write_file(filp);
if (err)
return err;
- flags = ext4_mask_flags(inode->i_mode, flags);
-
inode_lock(inode);
err = ext4_ioctl_setflags(inode, flags);
inode_unlock(inode);
@@ -500,7 +522,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err == 0) {
- inode->i_ctime = ext4_current_time(inode);
+ inode->i_ctime = current_time(inode);
inode->i_generation = generation;
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
}
@@ -765,28 +787,19 @@ resizefs_out:
}
case EXT4_IOC_PRECACHE_EXTENTS:
return ext4_ext_precache(inode);
- case EXT4_IOC_SET_ENCRYPTION_POLICY: {
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
- struct fscrypt_policy policy;
+ case EXT4_IOC_SET_ENCRYPTION_POLICY:
if (!ext4_has_feature_encrypt(sb))
return -EOPNOTSUPP;
+ return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
- if (copy_from_user(&policy,
- (struct fscrypt_policy __user *)arg,
- sizeof(policy)))
- return -EFAULT;
- return fscrypt_process_policy(filp, &policy);
-#else
- return -EOPNOTSUPP;
-#endif
- }
case EXT4_IOC_GET_ENCRYPTION_PWSALT: {
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
int err, err2;
struct ext4_sb_info *sbi = EXT4_SB(sb);
handle_t *handle;
- if (!ext4_sb_has_crypto(sb))
+ if (!ext4_has_feature_encrypt(sb))
return -EOPNOTSUPP;
if (uuid_is_zero(sbi->s_es->s_encrypt_pw_salt)) {
err = mnt_want_write_file(filp);
@@ -816,24 +829,13 @@ resizefs_out:
sbi->s_es->s_encrypt_pw_salt, 16))
return -EFAULT;
return 0;
- }
- case EXT4_IOC_GET_ENCRYPTION_POLICY: {
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
- struct fscrypt_policy policy;
- int err = 0;
-
- if (!ext4_encrypted_inode(inode))
- return -ENOENT;
- err = fscrypt_get_policy(inode, &policy);
- if (err)
- return err;
- if (copy_to_user((void __user *)arg, &policy, sizeof(policy)))
- return -EFAULT;
- return 0;
#else
return -EOPNOTSUPP;
#endif
}
+ case EXT4_IOC_GET_ENCRYPTION_POLICY:
+ return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
+
case EXT4_IOC_FSGETXATTR:
{
struct fsxattr fa;
@@ -865,13 +867,17 @@ resizefs_out:
if (!inode_owner_or_capable(inode))
return -EACCES;
+ if (fa.fsx_xflags & ~EXT4_SUPPORTED_FS_XFLAGS)
+ return -EOPNOTSUPP;
+
+ flags = ext4_xflags_to_iflags(fa.fsx_xflags);
+ if (ext4_mask_flags(inode->i_mode, flags) != flags)
+ return -EOPNOTSUPP;
+
err = mnt_want_write_file(filp);
if (err)
return err;
- flags = ext4_xflags_to_iflags(fa.fsx_xflags);
- flags = ext4_mask_flags(inode->i_mode, flags);
-
inode_lock(inode);
flags = (ei->i_flags & ~EXT4_FL_XFLAG_VISIBLE) |
(flags & EXT4_FL_XFLAG_VISIBLE);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index f418f55c2bbe..7ae43c59bc79 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -669,7 +669,7 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
ext4_grpblk_t min;
ext4_grpblk_t max;
ext4_grpblk_t chunk;
- unsigned short border;
+ unsigned int border;
BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
@@ -2287,7 +2287,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
struct ext4_group_info *grinfo;
struct sg {
struct ext4_group_info info;
- ext4_grpblk_t counters[16];
+ ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
} sg;
group--;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 104f8bfba718..eadba919f26b 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1941,7 +1941,7 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
* happen is that the times are slightly out of date
* and/or different from the directory change time.
*/
- dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
+ dir->i_mtime = dir->i_ctime = current_time(dir);
ext4_update_dx_flag(dir);
dir->i_version++;
ext4_mark_inode_dirty(handle, dir);
@@ -2987,7 +2987,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
* recovery. */
inode->i_size = 0;
ext4_orphan_add(handle, inode);
- inode->i_ctime = dir->i_ctime = dir->i_mtime = ext4_current_time(inode);
+ inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
ext4_mark_inode_dirty(handle, inode);
ext4_dec_count(handle, dir);
ext4_update_dx_flag(dir);
@@ -3050,13 +3050,13 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
retval = ext4_delete_entry(handle, dir, de, bh);
if (retval)
goto end_unlink;
- dir->i_ctime = dir->i_mtime = ext4_current_time(dir);
+ dir->i_ctime = dir->i_mtime = current_time(dir);
ext4_update_dx_flag(dir);
ext4_mark_inode_dirty(handle, dir);
drop_nlink(inode);
if (!inode->i_nlink)
ext4_orphan_add(handle, inode);
- inode->i_ctime = ext4_current_time(inode);
+ inode->i_ctime = current_time(inode);
ext4_mark_inode_dirty(handle, inode);
end_unlink:
@@ -3254,7 +3254,7 @@ retry:
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
- inode->i_ctime = ext4_current_time(inode);
+ inode->i_ctime = current_time(inode);
ext4_inc_count(handle, inode);
ihold(inode);
@@ -3381,7 +3381,7 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
ent->de->file_type = file_type;
ent->dir->i_version++;
ent->dir->i_ctime = ent->dir->i_mtime =
- ext4_current_time(ent->dir);
+ current_time(ent->dir);
ext4_mark_inode_dirty(handle, ent->dir);
BUFFER_TRACE(ent->bh, "call ext4_handle_dirty_metadata");
if (!ent->inlined) {
@@ -3651,7 +3651,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
- old.inode->i_ctime = ext4_current_time(old.inode);
+ old.inode->i_ctime = current_time(old.inode);
ext4_mark_inode_dirty(handle, old.inode);
if (!whiteout) {
@@ -3663,9 +3663,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
if (new.inode) {
ext4_dec_count(handle, new.inode);
- new.inode->i_ctime = ext4_current_time(new.inode);
+ new.inode->i_ctime = current_time(new.inode);
}
- old.dir->i_ctime = old.dir->i_mtime = ext4_current_time(old.dir);
+ old.dir->i_ctime = old.dir->i_mtime = current_time(old.dir);
ext4_update_dx_flag(old.dir);
if (old.dir_bh) {
retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
@@ -3723,6 +3723,7 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
};
u8 new_file_type;
int retval;
+ struct timespec ctime;
if ((ext4_encrypted_inode(old_dir) ||
ext4_encrypted_inode(new_dir)) &&
@@ -3823,8 +3824,9 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
- old.inode->i_ctime = ext4_current_time(old.inode);
- new.inode->i_ctime = ext4_current_time(new.inode);
+ ctime = current_time(old.inode);
+ old.inode->i_ctime = ctime;
+ new.inode->i_ctime = ctime;
ext4_mark_inode_dirty(handle, old.inode);
ext4_mark_inode_dirty(handle, new.inode);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index e0b3b54cdef3..e2332a65e8fb 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -470,7 +470,8 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
gfp_t gfp_flags = GFP_NOFS;
retry_encrypt:
- data_page = fscrypt_encrypt_page(inode, page, gfp_flags);
+ data_page = fscrypt_encrypt_page(inode, page, PAGE_SIZE, 0,
+ page->index, gfp_flags);
if (IS_ERR(data_page)) {
ret = PTR_ERR(data_page);
if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index caa4147cda47..dfc8309d7755 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -863,7 +863,6 @@ static void ext4_put_super(struct super_block *sb)
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
- brelse(sbi->s_sbh);
#ifdef CONFIG_QUOTA
for (i = 0; i < EXT4_MAXQUOTAS; i++)
kfree(sbi->s_qf_names[i]);
@@ -895,6 +894,7 @@ static void ext4_put_super(struct super_block *sb)
}
if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk);
+ brelse(sbi->s_sbh);
sb->s_fs_info = NULL;
/*
* Now that we are completely done shutting down the
@@ -1114,37 +1114,55 @@ static int ext4_prepare_context(struct inode *inode)
static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
void *fs_data)
{
- handle_t *handle;
- int res, res2;
+ handle_t *handle = fs_data;
+ int res, res2, retries = 0;
+
+ /*
+ * If a journal handle was specified, then the encryption context is
+ * being set on a new inode via inheritance and is part of a larger
+ * transaction to create the inode. Otherwise the encryption context is
+ * being set on an existing inode in its own transaction. Only in the
+ * latter case should the "retry on ENOSPC" logic be used.
+ */
- /* fs_data is null when internally used. */
- if (fs_data) {
- res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
- EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx,
- len, 0);
+ if (handle) {
+ res = ext4_xattr_set_handle(handle, inode,
+ EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+ ctx, len, 0);
if (!res) {
ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
ext4_clear_inode_state(inode,
EXT4_STATE_MAY_INLINE_DATA);
+ /*
+ * Update inode->i_flags - e.g. S_DAX may get disabled
+ */
+ ext4_set_inode_flags(inode);
}
return res;
}
+retry:
handle = ext4_journal_start(inode, EXT4_HT_MISC,
ext4_jbd2_credits_xattr(inode));
if (IS_ERR(handle))
return PTR_ERR(handle);
- res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
- EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx,
- len, 0);
+ res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+ ctx, len, 0);
if (!res) {
ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
+ /* Update inode->i_flags - e.g. S_DAX may get disabled */
+ ext4_set_inode_flags(inode);
res = ext4_mark_inode_dirty(handle, inode);
if (res)
EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
}
res2 = ext4_journal_stop(handle);
+
+ if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ goto retry;
if (!res)
res = res2;
return res;
@@ -1883,12 +1901,6 @@ static int parse_options(char *options, struct super_block *sb,
return 0;
}
}
- if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
- test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
- ext4_msg(sb, KERN_ERR, "can't mount with journal_async_commit "
- "in data=ordered mode");
- return 0;
- }
return 1;
}
@@ -2330,7 +2342,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
struct ext4_super_block *es)
{
unsigned int s_flags = sb->s_flags;
- int nr_orphans = 0, nr_truncates = 0;
+ int ret, nr_orphans = 0, nr_truncates = 0;
#ifdef CONFIG_QUOTA
int i;
#endif
@@ -2412,7 +2424,9 @@ static void ext4_orphan_cleanup(struct super_block *sb,
inode->i_ino, inode->i_size);
inode_lock(inode);
truncate_inode_pages(inode->i_mapping, inode->i_size);
- ext4_truncate(inode);
+ ret = ext4_truncate(inode);
+ if (ret)
+ ext4_std_error(inode->i_sb, ret);
inode_unlock(inode);
nr_truncates++;
} else {
@@ -3193,10 +3207,15 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
ext4_set_bit(s++, buf);
count++;
}
- for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
- ext4_set_bit(EXT4_B2C(sbi, s++), buf);
- count++;
+ j = ext4_bg_num_gdb(sb, grp);
+ if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
+ ext4_error(sb, "Invalid number of block group "
+ "descriptor blocks: %d", j);
+ j = EXT4_BLOCKS_PER_GROUP(sb) - s;
}
+ count += j;
+ for (; j > 0; j--)
+ ext4_set_bit(EXT4_B2C(sbi, s++), buf);
}
if (!count)
return 0;
@@ -3301,7 +3320,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
char *orig_data = kstrdup(data, GFP_KERNEL);
struct buffer_head *bh;
struct ext4_super_block *es = NULL;
- struct ext4_sb_info *sbi;
+ struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
ext4_fsblk_t block;
ext4_fsblk_t sb_block = get_sb_block(&data);
ext4_fsblk_t logical_sb_block;
@@ -3320,16 +3339,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
ext4_group_t first_not_zeroed;
- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- if (!sbi)
- goto out_free_orig;
+ if ((data && !orig_data) || !sbi)
+ goto out_free_base;
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
- if (!sbi->s_blockgroup_lock) {
- kfree(sbi);
- goto out_free_orig;
- }
+ if (!sbi->s_blockgroup_lock)
+ goto out_free_base;
+
sb->s_fs_info = sbi;
sbi->s_sb = sb;
sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
@@ -3475,11 +3492,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
*/
sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
- if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
- &journal_devnum, &journal_ioprio, 0)) {
- ext4_msg(sb, KERN_WARNING,
- "failed to parse options in superblock: %s",
- sbi->s_es->s_mount_opts);
+ if (sbi->s_es->s_mount_opts[0]) {
+ char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
+ sizeof(sbi->s_es->s_mount_opts),
+ GFP_KERNEL);
+ if (!s_mount_opts)
+ goto failed_mount;
+ if (!parse_options(s_mount_opts, sb, &journal_devnum,
+ &journal_ioprio, 0)) {
+ ext4_msg(sb, KERN_WARNING,
+ "failed to parse options in superblock: %s",
+ s_mount_opts);
+ }
+ kfree(s_mount_opts);
}
sbi->s_def_mount_opt = sbi->s_mount_opt;
if (!parse_options((char *) data, sb, &journal_devnum,
@@ -3505,6 +3530,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
"both data=journal and dax");
goto failed_mount;
}
+ if (ext4_has_feature_encrypt(sb)) {
+ ext4_msg(sb, KERN_WARNING,
+ "encrypted files will use data=ordered "
+ "instead of data journaling mode");
+ }
if (test_opt(sb, DELALLOC))
clear_opt(sb, DELALLOC);
} else {
@@ -3660,12 +3690,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
- if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
- goto cantfind_ext4;
sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
if (sbi->s_inodes_per_block == 0)
goto cantfind_ext4;
+ if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
+ sbi->s_inodes_per_group > blocksize * 8) {
+ ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
+ sbi->s_blocks_per_group);
+ goto failed_mount;
+ }
sbi->s_itb_per_group = sbi->s_inodes_per_group /
sbi->s_inodes_per_block;
sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
@@ -3748,13 +3782,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
sbi->s_cluster_ratio = clustersize / blocksize;
- if (sbi->s_inodes_per_group > blocksize * 8) {
- ext4_msg(sb, KERN_ERR,
- "#inodes per group too big: %lu",
- sbi->s_inodes_per_group);
- goto failed_mount;
- }
-
/* Do we have standard group size of clustersize * 8 blocks ? */
if (sbi->s_blocks_per_group == clustersize << 3)
set_opt2(sb, STD_GROUP_SIZE);
@@ -3814,6 +3841,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
EXT4_DESC_PER_BLOCK(sb);
+ if (ext4_has_feature_meta_bg(sb)) {
+ if (le32_to_cpu(es->s_first_meta_bg) >= db_count) {
+ ext4_msg(sb, KERN_WARNING,
+ "first meta block group too large: %u "
+ "(group descriptor block count %u)",
+ le32_to_cpu(es->s_first_meta_bg), db_count);
+ goto failed_mount;
+ }
+ }
sbi->s_group_desc = ext4_kvmalloc(db_count *
sizeof(struct buffer_head *),
GFP_KERNEL);
@@ -3967,6 +4003,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
default:
break;
}
+
+ if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
+ test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "journal_async_commit in data=ordered mode");
+ goto failed_mount_wq;
+ }
+
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
@@ -4160,7 +4204,9 @@ no_journal:
if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
- "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
+ "Opts: %.*s%s%s", descr,
+ (int) sizeof(sbi->s_es->s_mount_opts),
+ sbi->s_es->s_mount_opts,
*sbi->s_es->s_mount_opts ? "; " : "", orig_data);
if (es->s_error_count)
@@ -4239,8 +4285,8 @@ failed_mount:
out_fail:
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
+out_free_base:
kfree(sbi);
-out_free_orig:
kfree(orig_data);
return err ? err : ret;
}
@@ -4550,7 +4596,8 @@ static int ext4_commit_super(struct super_block *sb, int sync)
&EXT4_SB(sb)->s_freeinodes_counter));
BUFFER_TRACE(sbh, "marking dirty");
ext4_superblock_csum_set(sb);
- lock_buffer(sbh);
+ if (sync)
+ lock_buffer(sbh);
if (buffer_write_io_error(sbh)) {
/*
* Oh, dear. A previous attempt to write the
@@ -4566,8 +4613,8 @@ static int ext4_commit_super(struct super_block *sb, int sync)
set_buffer_uptodate(sbh);
}
mark_buffer_dirty(sbh);
- unlock_buffer(sbh);
if (sync) {
+ unlock_buffer(sbh);
error = __sync_dirty_buffer(sbh,
test_opt(sb, BARRIER) ? REQ_FUA : REQ_SYNC);
if (error)
@@ -4857,6 +4904,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
err = -EINVAL;
goto restore_opts;
}
+ } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
+ if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "journal_async_commit in data=ordered mode");
+ err = -EINVAL;
+ goto restore_opts;
+ }
}
if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
@@ -5366,7 +5420,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
if (IS_ERR(handle))
goto out;
- inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ inode->i_mtime = inode->i_ctime = current_time(inode);
ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index d77be9e9f535..5a94fa52b74f 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -185,6 +185,7 @@ ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
{
struct ext4_xattr_entry *e = entry;
+ /* Find the end of the names list */
while (!IS_LAST_ENTRY(e)) {
struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
if ((void *)next >= end)
@@ -192,15 +193,29 @@ ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
e = next;
}
+ /* Check the values */
while (!IS_LAST_ENTRY(entry)) {
if (entry->e_value_block != 0)
return -EFSCORRUPTED;
- if (entry->e_value_size != 0 &&
- (value_start + le16_to_cpu(entry->e_value_offs) <
- (void *)e + sizeof(__u32) ||
- value_start + le16_to_cpu(entry->e_value_offs) +
- le32_to_cpu(entry->e_value_size) > end))
- return -EFSCORRUPTED;
+ if (entry->e_value_size != 0) {
+ u16 offs = le16_to_cpu(entry->e_value_offs);
+ u32 size = le32_to_cpu(entry->e_value_size);
+ void *value;
+
+ /*
+ * The value cannot overlap the names, and the value
+ * with padding cannot extend beyond 'end'. Check both
+ * the padded and unpadded sizes, since the size may
+ * overflow to 0 when adding padding.
+ */
+ if (offs > end - value_start)
+ return -EFSCORRUPTED;
+ value = value_start + offs;
+ if (value < (void *)e + sizeof(u32) ||
+ size > end - value ||
+ EXT4_XATTR_SIZE(size) > end - value)
+ return -EFSCORRUPTED;
+ }
entry = EXT4_XATTR_NEXT(entry);
}
@@ -231,13 +246,12 @@ static int
__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
void *end, const char *function, unsigned int line)
{
- struct ext4_xattr_entry *entry = IFIRST(header);
int error = -EFSCORRUPTED;
- if (((void *) header >= end) ||
+ if (end - (void *)header < sizeof(*header) + sizeof(u32) ||
(header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)))
goto errout;
- error = ext4_xattr_check_names(entry, end, entry);
+ error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
errout:
if (error)
__ext4_error_inode(inode, function, line, 0,
@@ -1109,7 +1123,7 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
return 0;
}
-static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
+static int ext4_xattr_ibody_set(struct inode *inode,
struct ext4_xattr_info *i,
struct ext4_xattr_ibody_find *is)
{
@@ -1216,7 +1230,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
}
if (!value) {
if (!is.s.not_found)
- error = ext4_xattr_ibody_set(handle, inode, &i, &is);
+ error = ext4_xattr_ibody_set(inode, &i, &is);
else if (!bs.s.not_found)
error = ext4_xattr_block_set(handle, inode, &i, &bs);
} else {
@@ -1227,7 +1241,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
if (!bs.s.not_found && ext4_xattr_value_same(&bs.s, &i))
goto cleanup;
- error = ext4_xattr_ibody_set(handle, inode, &i, &is);
+ error = ext4_xattr_ibody_set(inode, &i, &is);
if (!error && !bs.s.not_found) {
i.value = NULL;
error = ext4_xattr_block_set(handle, inode, &i, &bs);
@@ -1242,14 +1256,13 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
goto cleanup;
if (!is.s.not_found) {
i.value = NULL;
- error = ext4_xattr_ibody_set(handle, inode, &i,
- &is);
+ error = ext4_xattr_ibody_set(inode, &i, &is);
}
}
}
if (!error) {
ext4_xattr_update_super_block(handle, inode->i_sb);
- inode->i_ctime = ext4_current_time(inode);
+ inode->i_ctime = current_time(inode);
if (!value)
ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
@@ -1384,7 +1397,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
goto out;
/* Remove the chosen entry from the inode */
- error = ext4_xattr_ibody_set(handle, inode, &i, is);
+ error = ext4_xattr_ibody_set(inode, &i, is);
if (error)
goto out;
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 6fe23af509e1..8f487692c21f 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -384,7 +384,7 @@ int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
if (error)
return error;
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
if (default_acl) {
error = __f2fs_set_acl(inode, ACL_TYPE_DEFAULT, default_acl,
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index d935c06a84f0..f73ee9534d83 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -228,7 +228,7 @@ void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
f2fs_put_page(page, 0);
if (readahead)
- ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR, true);
+ ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true);
}
static int f2fs_write_meta_page(struct page *page,
@@ -770,7 +770,12 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
/* Sanity checking of checkpoint */
if (sanity_check_ckpt(sbi))
- goto fail_no_cp;
+ goto free_fail_no_cp;
+
+ if (cur_page == cp1)
+ sbi->cur_cp_pack = 1;
+ else
+ sbi->cur_cp_pack = 2;
if (cp_blks <= 1)
goto done;
@@ -793,6 +798,9 @@ done:
f2fs_put_page(cp2, 1);
return 0;
+free_fail_no_cp:
+ f2fs_put_page(cp1, 1);
+ f2fs_put_page(cp2, 1);
fail_no_cp:
kfree(sbi->ckpt);
return -EINVAL;
@@ -921,7 +929,11 @@ int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
inode = igrab(&fi->vfs_inode);
spin_unlock(&sbi->inode_lock[DIRTY_META]);
if (inode) {
- update_inode_page(inode);
+ sync_inode_metadata(inode, 0);
+
+ /* it's on eviction */
+ if (is_inode_flag_set(inode, FI_DIRTY_INODE))
+ update_inode_page(inode);
iput(inode);
}
};
@@ -987,7 +999,7 @@ static void unblock_operations(struct f2fs_sb_info *sbi)
{
up_write(&sbi->node_write);
- build_free_nids(sbi);
+ build_free_nids(sbi, false);
f2fs_unlock_all(sbi);
}
@@ -998,7 +1010,7 @@ static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
for (;;) {
prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
- if (!atomic_read(&sbi->nr_wb_bios))
+ if (!get_pages(sbi, F2FS_WB_CP_DATA))
break;
io_schedule_timeout(5*HZ);
@@ -1123,7 +1135,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
le32_to_cpu(ckpt->checksum_offset)))
= cpu_to_le32(crc32);
- start_blk = __start_cp_addr(sbi);
+ start_blk = __start_cp_next_addr(sbi);
/* need to wait for end_io results */
wait_on_all_pages_writeback(sbi);
@@ -1184,9 +1196,9 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- clear_prefree_segments(sbi, cpc);
clear_sbi_flag(sbi, SBI_IS_DIRTY);
clear_sbi_flag(sbi, SBI_NEED_CP);
+ __set_cp_next_pack(sbi);
/*
* redirty superblock if metadata like node page or inode cache is
@@ -1261,8 +1273,12 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* unlock all the fs_lock[] in do_checkpoint() */
err = do_checkpoint(sbi, cpc);
-
- f2fs_wait_all_discard_bio(sbi);
+ if (err) {
+ release_discard_addrs(sbi);
+ } else {
+ clear_prefree_segments(sbi, cpc);
+ f2fs_wait_all_discard_bio(sbi);
+ }
unblock_operations(sbi);
stat_inc_cp_count(sbi->stat_info);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 9e5561fa4cb6..9ac262564fa6 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -29,6 +29,26 @@
#include "trace.h"
#include <trace/events/f2fs.h>
+static bool __is_cp_guaranteed(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ struct inode *inode;
+ struct f2fs_sb_info *sbi;
+
+ if (!mapping)
+ return false;
+
+ inode = mapping->host;
+ sbi = F2FS_I_SB(inode);
+
+ if (inode->i_ino == F2FS_META_INO(sbi) ||
+ inode->i_ino == F2FS_NODE_INO(sbi) ||
+ S_ISDIR(inode->i_mode) ||
+ is_cold_data(page))
+ return true;
+ return false;
+}
+
static void f2fs_read_end_io(struct bio *bio)
{
struct bio_vec *bvec;
@@ -71,6 +91,7 @@ static void f2fs_write_end_io(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
+ enum count_type type = WB_DATA_TYPE(page);
fscrypt_pullback_bio_page(&page, true);
@@ -78,9 +99,11 @@ static void f2fs_write_end_io(struct bio *bio)
mapping_set_error(page->mapping, -EIO);
f2fs_stop_checkpoint(sbi, true);
}
+ dec_page_count(sbi, type);
+ clear_cold_data(page);
end_page_writeback(page);
}
- if (atomic_dec_and_test(&sbi->nr_wb_bios) &&
+ if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
wq_has_sleeper(&sbi->cp_wait))
wake_up(&sbi->cp_wait);
@@ -88,6 +111,46 @@ static void f2fs_write_end_io(struct bio *bio)
}
/*
+ * Return true, if pre_bio's bdev is same as its target device.
+ */
+struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
+ block_t blk_addr, struct bio *bio)
+{
+ struct block_device *bdev = sbi->sb->s_bdev;
+ int i;
+
+ for (i = 0; i < sbi->s_ndevs; i++) {
+ if (FDEV(i).start_blk <= blk_addr &&
+ FDEV(i).end_blk >= blk_addr) {
+ blk_addr -= FDEV(i).start_blk;
+ bdev = FDEV(i).bdev;
+ break;
+ }
+ }
+ if (bio) {
+ bio->bi_bdev = bdev;
+ bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
+ }
+ return bdev;
+}
+
+int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
+{
+ int i;
+
+ for (i = 0; i < sbi->s_ndevs; i++)
+ if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
+ return i;
+ return 0;
+}
+
+static bool __same_bdev(struct f2fs_sb_info *sbi,
+ block_t blk_addr, struct bio *bio)
+{
+ return f2fs_target_device(sbi, blk_addr, NULL) == bio->bi_bdev;
+}
+
+/*
* Low-level block read/write IO operations.
*/
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
@@ -97,8 +160,7 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
bio = f2fs_bio_alloc(npages);
- bio->bi_bdev = sbi->sb->s_bdev;
- bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
+ f2fs_target_device(sbi, blk_addr, bio);
bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
bio->bi_private = is_read ? NULL : sbi;
@@ -109,8 +171,7 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
struct bio *bio, enum page_type type)
{
if (!is_read_io(bio_op(bio))) {
- atomic_inc(&sbi->nr_wb_bios);
- if (f2fs_sb_mounted_hmsmr(sbi->sb) &&
+ if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
current->plug && (type == DATA || type == NODE))
blk_finish_plug(current->plug);
}
@@ -268,22 +329,24 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
verify_block_addr(sbi, fio->old_blkaddr);
verify_block_addr(sbi, fio->new_blkaddr);
+ bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+
+ if (!is_read)
+ inc_page_count(sbi, WB_DATA_TYPE(bio_page));
+
down_write(&io->io_rwsem);
if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
- (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags)))
+ (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
+ !__same_bdev(sbi, fio->new_blkaddr, io->bio)))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
- int bio_blocks = MAX_BIO_BLOCKS(sbi);
-
io->bio = __bio_alloc(sbi, fio->new_blkaddr,
- bio_blocks, is_read);
+ BIO_MAX_PAGES, is_read);
io->fio = *fio;
}
- bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
-
if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
PAGE_SIZE) {
__submit_merged_bio(io);
@@ -588,7 +651,6 @@ static int __allocate_data_block(struct dnode_of_data *dn)
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct f2fs_summary sum;
struct node_info ni;
- int seg = CURSEG_WARM_DATA;
pgoff_t fofs;
blkcnt_t count = 1;
@@ -606,11 +668,8 @@ alloc:
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
- if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
- seg = CURSEG_DIRECT_IO;
-
allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
- &sum, seg);
+ &sum, CURSEG_WARM_DATA);
set_data_blkaddr(dn);
/* update i_size */
@@ -622,11 +681,18 @@ alloc:
return 0;
}
-ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
+static inline bool __force_buffered_io(struct inode *inode, int rw)
+{
+ return ((f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) ||
+ (rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
+ F2FS_I_SB(inode)->s_ndevs);
+}
+
+int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct f2fs_map_blocks map;
- ssize_t ret = 0;
+ int err = 0;
map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
@@ -638,19 +704,22 @@ ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
map.m_next_pgofs = NULL;
if (iocb->ki_flags & IOCB_DIRECT) {
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- return ret;
- return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
+ return f2fs_map_blocks(inode, &map, 1,
+ __force_buffered_io(inode, WRITE) ?
+ F2FS_GET_BLOCK_PRE_AIO :
+ F2FS_GET_BLOCK_PRE_DIO);
}
if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- return ret;
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
}
if (!f2fs_has_inline_data(inode))
return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
- return ret;
+ return err;
}
/*
@@ -674,7 +743,6 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
unsigned int ofs_in_node, last_ofs_in_node;
blkcnt_t prealloc;
struct extent_info ei;
- bool allocated = false;
block_t blkaddr;
if (!maxblocks)
@@ -714,7 +782,7 @@ next_dnode:
}
prealloc = 0;
- ofs_in_node = dn.ofs_in_node;
+ last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
next_block:
@@ -733,10 +801,8 @@ next_block:
}
} else {
err = __allocate_data_block(&dn);
- if (!err) {
+ if (!err)
set_inode_flag(inode, FI_APPEND_WRITE);
- allocated = true;
- }
}
if (err)
goto sync_out;
@@ -791,7 +857,6 @@ skip:
err = reserve_new_blocks(&dn, prealloc);
if (err)
goto sync_out;
- allocated = dn.node_changed;
map->m_len += dn.ofs_in_node - ofs_in_node;
if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
@@ -810,9 +875,8 @@ skip:
if (create) {
f2fs_unlock_op(sbi);
- f2fs_balance_fs(sbi, allocated);
+ f2fs_balance_fs(sbi, dn.node_changed);
}
- allocated = false;
goto next_dnode;
sync_out:
@@ -820,7 +884,7 @@ sync_out:
unlock_out:
if (create) {
f2fs_unlock_op(sbi);
- f2fs_balance_fs(sbi, allocated);
+ f2fs_balance_fs(sbi, dn.node_changed);
}
out:
trace_f2fs_map_blocks(inode, map, err);
@@ -832,19 +896,19 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
pgoff_t *next_pgofs)
{
struct f2fs_map_blocks map;
- int ret;
+ int err;
map.m_lblk = iblock;
map.m_len = bh->b_size >> inode->i_blkbits;
map.m_next_pgofs = next_pgofs;
- ret = f2fs_map_blocks(inode, &map, create, flag);
- if (!ret) {
+ err = f2fs_map_blocks(inode, &map, create, flag);
+ if (!err) {
map_bh(bh, inode->i_sb, map.m_pblk);
bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
bh->b_size = map.m_len << inode->i_blkbits;
}
- return ret;
+ return err;
}
static int get_data_block(struct inode *inode, sector_t iblock,
@@ -889,7 +953,6 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
struct buffer_head map_bh;
sector_t start_blk, last_blk;
pgoff_t next_pgofs;
- loff_t isize;
u64 logical = 0, phys = 0, size = 0;
u32 flags = 0;
int ret = 0;
@@ -906,13 +969,6 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
inode_lock(inode);
- isize = i_size_read(inode);
- if (start >= isize)
- goto out;
-
- if (start + len > isize)
- len = isize - start;
-
if (logical_to_blk(inode, len) == 0)
len = blk_to_logical(inode, 1);
@@ -931,13 +987,11 @@ next:
/* HOLE */
if (!buffer_mapped(&map_bh)) {
start_blk = next_pgofs;
- /* Go through holes util pass the EOF */
- if (blk_to_logical(inode, start_blk) < isize)
+
+ if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
+ F2FS_I_SB(inode)->max_file_blocks))
goto prep_next;
- /* Found a hole beyond isize means no more extents.
- * Note that the premise is that filesystems don't
- * punch holes beyond isize and keep size unchanged.
- */
+
flags |= FIEMAP_EXTENT_LAST;
}
@@ -980,7 +1034,6 @@ static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct fscrypt_ctx *ctx = NULL;
- struct block_device *bdev = sbi->sb->s_bdev;
struct bio *bio;
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
@@ -998,8 +1051,7 @@ static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
fscrypt_release_ctx(ctx);
return ERR_PTR(-ENOMEM);
}
- bio->bi_bdev = bdev;
- bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blkaddr);
+ f2fs_target_device(sbi, blkaddr, bio);
bio->bi_end_io = f2fs_read_end_io;
bio->bi_private = ctx;
@@ -1094,7 +1146,8 @@ got_it:
* This page will go to BIO. Do we need to send this
* BIO off first?
*/
- if (bio && (last_block_in_bio != block_nr - 1)) {
+ if (bio && (last_block_in_bio != block_nr - 1 ||
+ !__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
submit_and_realloc:
__submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
@@ -1193,7 +1246,9 @@ int do_write_data_page(struct f2fs_io_info *fio)
fio->old_blkaddr);
retry_encrypt:
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
- gfp_flags);
+ PAGE_SIZE, 0,
+ fio->page->index,
+ gfp_flags);
if (IS_ERR(fio->encrypted_page)) {
err = PTR_ERR(fio->encrypted_page);
if (err == -ENOMEM) {
@@ -1309,7 +1364,6 @@ done:
if (err && err != -ENOENT)
goto redirty_out;
- clear_cold_data(page);
out:
inode_dec_dirty_pages(inode);
if (err)
@@ -1330,6 +1384,8 @@ out:
redirty_out:
redirty_page_for_writepage(wbc, page);
+ if (!err)
+ return AOP_WRITEPAGE_ACTIVATE;
unlock_page(page);
return err;
}
@@ -1425,6 +1481,15 @@ continue_unlock:
ret = mapping->a_ops->writepage(page, wbc);
if (unlikely(ret)) {
+ /*
+ * keep nr_to_write, since vfs uses this to
+ * get # of written pages.
+ */
+ if (ret == AOP_WRITEPAGE_ACTIVATE) {
+ unlock_page(page);
+ ret = 0;
+ continue;
+ }
done_index = page->index + 1;
done = 1;
break;
@@ -1712,7 +1777,6 @@ static int f2fs_write_end(struct file *file,
goto unlock_out;
set_page_dirty(page);
- clear_cold_data(page);
if (pos + copied > i_size_read(inode))
f2fs_i_size_write(inode, pos + copied);
@@ -1749,9 +1813,7 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
if (err)
return err;
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
- return 0;
- if (test_opt(F2FS_I_SB(inode), LFS))
+ if (__force_buffered_io(inode, rw))
return 0;
trace_f2fs_direct_IO_enter(inode, offset, count, rw);
@@ -1783,12 +1845,14 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
return;
if (PageDirty(page)) {
- if (inode->i_ino == F2FS_META_INO(sbi))
+ if (inode->i_ino == F2FS_META_INO(sbi)) {
dec_page_count(sbi, F2FS_DIRTY_META);
- else if (inode->i_ino == F2FS_NODE_INO(sbi))
+ } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
dec_page_count(sbi, F2FS_DIRTY_NODES);
- else
+ } else {
inode_dec_dirty_pages(inode);
+ remove_dirty_inode(inode);
+ }
}
/* This is atomic written page, keep Private */
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index fb245bd302e4..fbd5184140d0 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -50,7 +50,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
- si->wb_bios = atomic_read(&sbi->nr_wb_bios);
+ si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA);
+ si->nr_wb_data = get_pages(sbi, F2FS_WB_DATA);
si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
si->rsvd_segs = reserved_segments(sbi);
si->overp_segs = overprovision_segments(sbi);
@@ -74,7 +75,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
si->sits = MAIN_SEGS(sbi);
si->dirty_sits = SIT_I(sbi)->dirty_sentries;
- si->fnids = NM_I(sbi)->fcnt;
+ si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID_LIST];
+ si->alloc_nids = NM_I(sbi)->nid_cnt[ALLOC_NID_LIST];
si->bg_gc = sbi->bg_gc;
si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
@@ -194,7 +196,9 @@ get_cache:
si->cache_mem += sizeof(struct flush_cmd_control);
/* free nids */
- si->cache_mem += NM_I(sbi)->fcnt * sizeof(struct free_nid);
+ si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID_LIST] +
+ NM_I(sbi)->nid_cnt[ALLOC_NID_LIST]) *
+ sizeof(struct free_nid);
si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry);
si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
sizeof(struct nat_entry_set);
@@ -310,22 +314,22 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n",
si->ext_tree, si->zombie_tree, si->ext_node);
seq_puts(s, "\nBalancing F2FS Async:\n");
- seq_printf(s, " - inmem: %4lld, wb_bios: %4d\n",
- si->inmem_pages, si->wb_bios);
- seq_printf(s, " - nodes: %4lld in %4d\n",
+ seq_printf(s, " - inmem: %4d, wb_cp_data: %4d, wb_data: %4d\n",
+ si->inmem_pages, si->nr_wb_cp_data, si->nr_wb_data);
+ seq_printf(s, " - nodes: %4d in %4d\n",
si->ndirty_node, si->node_pages);
- seq_printf(s, " - dents: %4lld in dirs:%4d (%4d)\n",
+ seq_printf(s, " - dents: %4d in dirs:%4d (%4d)\n",
si->ndirty_dent, si->ndirty_dirs, si->ndirty_all);
- seq_printf(s, " - datas: %4lld in files:%4d\n",
+ seq_printf(s, " - datas: %4d in files:%4d\n",
si->ndirty_data, si->ndirty_files);
- seq_printf(s, " - meta: %4lld in %4d\n",
+ seq_printf(s, " - meta: %4d in %4d\n",
si->ndirty_meta, si->meta_pages);
- seq_printf(s, " - imeta: %4lld\n",
+ seq_printf(s, " - imeta: %4d\n",
si->ndirty_imeta);
seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n",
si->dirty_nats, si->nats, si->dirty_sits, si->sits);
- seq_printf(s, " - free_nids: %9d\n",
- si->fnids);
+ seq_printf(s, " - free_nids: %9d, alloc_nids: %9d\n",
+ si->free_nids, si->alloc_nids);
seq_puts(s, "\nDistribution of User Blocks:");
seq_puts(s, " [ valid | invalid | free ]\n");
seq_puts(s, " [");
@@ -373,6 +377,7 @@ static int stat_open(struct inode *inode, struct file *file)
}
static const struct file_operations stat_fops = {
+ .owner = THIS_MODULE,
.open = stat_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 369f4513be37..827c5daef4fc 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -136,7 +136,7 @@ struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname,
/* show encrypted name */
if (fname->hash) {
- if (de->hash_code == fname->hash)
+ if (de->hash_code == cpu_to_le32(fname->hash))
goto found;
} else if (de_name.len == name->len &&
de->hash_code == namehash &&
@@ -313,7 +313,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
set_page_dirty(page);
dir->i_mtime = dir->i_ctime = current_time(dir);
- f2fs_mark_inode_dirty_sync(dir);
+ f2fs_mark_inode_dirty_sync(dir, false);
f2fs_put_page(page, 1);
}
@@ -466,7 +466,7 @@ void update_parent_metadata(struct inode *dir, struct inode *inode,
clear_inode_flag(inode, FI_NEW_INODE);
}
dir->i_mtime = dir->i_ctime = current_time(dir);
- f2fs_mark_inode_dirty_sync(dir);
+ f2fs_mark_inode_dirty_sync(dir, false);
if (F2FS_I(dir)->i_current_depth != current_depth)
f2fs_i_depth_write(dir, current_depth);
@@ -731,7 +731,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
set_page_dirty(page);
dir->i_ctime = dir->i_mtime = current_time(dir);
- f2fs_mark_inode_dirty_sync(dir);
+ f2fs_mark_inode_dirty_sync(dir, false);
if (inode)
f2fs_drop_nlink(dir, inode);
@@ -742,6 +742,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
ClearPagePrivate(page);
ClearPageUptodate(page);
inode_dec_dirty_pages(dir);
+ remove_dirty_inode(dir);
}
f2fs_put_page(page, 1);
}
@@ -784,7 +785,7 @@ bool f2fs_empty_dir(struct inode *dir)
return true;
}
-bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
+int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
unsigned int start_pos, struct fscrypt_str *fstr)
{
unsigned char d_type = DT_UNKNOWN;
@@ -819,7 +820,7 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
(u32)de->hash_code, 0,
&de_name, fstr);
if (err)
- return true;
+ return err;
de_name = *fstr;
fstr->len = save_len;
@@ -827,12 +828,12 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
if (!dir_emit(ctx, de_name.name, de_name.len,
le32_to_cpu(de->ino), d_type))
- return true;
+ return 1;
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
ctx->pos = start_pos + bit_pos;
}
- return false;
+ return 0;
}
static int f2fs_readdir(struct file *file, struct dir_context *ctx)
@@ -871,17 +872,21 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
dentry_page = get_lock_data_page(inode, n, false);
if (IS_ERR(dentry_page)) {
err = PTR_ERR(dentry_page);
- if (err == -ENOENT)
+ if (err == -ENOENT) {
+ err = 0;
continue;
- else
+ } else {
goto out;
+ }
}
dentry_blk = kmap(dentry_page);
make_dentry_ptr(inode, &d, (void *)dentry_blk, 1);
- if (f2fs_fill_dentries(ctx, &d, n * NR_DENTRY_IN_BLOCK, &fstr)) {
+ err = f2fs_fill_dentries(ctx, &d,
+ n * NR_DENTRY_IN_BLOCK, &fstr);
+ if (err) {
kunmap(dentry_page);
f2fs_put_page(dentry_page, 1);
break;
@@ -891,10 +896,9 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
kunmap(dentry_page);
f2fs_put_page(dentry_page, 1);
}
- err = 0;
out:
fscrypt_fname_free_buffer(&fstr);
- return err;
+ return err < 0 ? err : 0;
}
static int f2fs_dir_open(struct inode *inode, struct file *filp)
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 2b06d4fcd954..4db44da7ef69 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -172,7 +172,7 @@ static void __drop_largest_extent(struct inode *inode,
if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs) {
largest->len = 0;
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
}
}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 2cf4f7f09e32..2da8c3aa0ce5 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -103,7 +103,7 @@ struct f2fs_mount_info {
};
#define F2FS_FEATURE_ENCRYPT 0x0001
-#define F2FS_FEATURE_HMSMR 0x0002
+#define F2FS_FEATURE_BLKZONED 0x0002
#define F2FS_HAS_FEATURE(sb, mask) \
((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -401,6 +401,7 @@ struct f2fs_map_blocks {
#define FADVISE_LOST_PINO_BIT 0x02
#define FADVISE_ENCRYPT_BIT 0x04
#define FADVISE_ENC_NAME_BIT 0x08
+#define FADVISE_KEEP_SIZE_BIT 0x10
#define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT)
#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
@@ -413,6 +414,8 @@ struct f2fs_map_blocks {
#define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT)
#define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT)
#define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
+#define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT)
+#define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
#define DEF_DIR_LEVEL 0
@@ -428,7 +431,7 @@ struct f2fs_inode_info {
/* Use below internally in f2fs*/
unsigned long flags; /* use to pass per-file flags */
struct rw_semaphore i_sem; /* protect fi info */
- struct percpu_counter dirty_pages; /* # of dirty pages */
+ atomic_t dirty_pages; /* # of dirty pages */
f2fs_hash_t chash; /* hash value of given file name */
unsigned int clevel; /* maximum level of given file name */
nid_t i_xattr_nid; /* node id that contains xattrs */
@@ -493,20 +496,26 @@ static inline bool __is_front_mergeable(struct extent_info *cur,
return __is_extent_mergeable(cur, front);
}
-extern void f2fs_mark_inode_dirty_sync(struct inode *);
+extern void f2fs_mark_inode_dirty_sync(struct inode *, bool);
static inline void __try_update_largest_extent(struct inode *inode,
struct extent_tree *et, struct extent_node *en)
{
if (en->ei.len > et->largest.len) {
et->largest = en->ei;
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
}
}
+enum nid_list {
+ FREE_NID_LIST,
+ ALLOC_NID_LIST,
+ MAX_NID_LIST,
+};
+
struct f2fs_nm_info {
block_t nat_blkaddr; /* base disk address of NAT */
nid_t max_nid; /* maximum possible node ids */
- nid_t available_nids; /* maximum available node ids */
+ nid_t available_nids; /* # of available node ids */
nid_t next_scan_nid; /* the next nid to be scanned */
unsigned int ram_thresh; /* control the memory footprint */
unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */
@@ -522,9 +531,9 @@ struct f2fs_nm_info {
/* free node ids management */
struct radix_tree_root free_nid_root;/* root of the free_nid cache */
- struct list_head free_nid_list; /* a list for free nids */
- spinlock_t free_nid_list_lock; /* protect free nid list */
- unsigned int fcnt; /* the number of free node id */
+ struct list_head nid_list[MAX_NID_LIST];/* lists for free nids */
+ unsigned int nid_cnt[MAX_NID_LIST]; /* the number of free node id */
+ spinlock_t nid_list_lock; /* protect nid lists ops */
struct mutex build_lock; /* lock for build free nids */
/* for checkpoint */
@@ -585,7 +594,6 @@ enum {
CURSEG_WARM_NODE, /* direct node blocks of normal files */
CURSEG_COLD_NODE, /* indirect node blocks */
NO_CHECK_TYPE,
- CURSEG_DIRECT_IO, /* to use for the direct IO path */
};
struct flush_cmd {
@@ -649,6 +657,7 @@ struct f2fs_sm_info {
* f2fs monitors the number of several block types such as on-writeback,
* dirty dentry blocks, dirty node blocks, and dirty meta blocks.
*/
+#define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
enum count_type {
F2FS_DIRTY_DENTS,
F2FS_DIRTY_DATA,
@@ -656,6 +665,8 @@ enum count_type {
F2FS_DIRTY_META,
F2FS_INMEM_PAGES,
F2FS_DIRTY_IMETA,
+ F2FS_WB_CP_DATA,
+ F2FS_WB_DATA,
NR_COUNT_TYPE,
};
@@ -704,6 +715,20 @@ struct f2fs_bio_info {
struct rw_semaphore io_rwsem; /* blocking op for bio */
};
+#define FDEV(i) (sbi->devs[i])
+#define RDEV(i) (raw_super->devs[i])
+struct f2fs_dev_info {
+ struct block_device *bdev;
+ char path[MAX_PATH_LEN];
+ unsigned int total_segments;
+ block_t start_blk;
+ block_t end_blk;
+#ifdef CONFIG_BLK_DEV_ZONED
+ unsigned int nr_blkz; /* Total number of zones */
+ u8 *blkz_type; /* Array of zones type */
+#endif
+};
+
enum inode_type {
DIR_INODE, /* for dirty dir inode */
FILE_INODE, /* for dirty regular/symlink inode */
@@ -750,6 +775,12 @@ struct f2fs_sb_info {
u8 key_prefix[F2FS_KEY_DESC_PREFIX_SIZE];
u8 key_prefix_size;
#endif
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ unsigned int blocks_per_blkz; /* F2FS blocks per zone */
+ unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */
+#endif
+
/* for node-related operations */
struct f2fs_nm_info *nm_info; /* node manager */
struct inode *node_inode; /* cache node blocks */
@@ -764,6 +795,7 @@ struct f2fs_sb_info {
/* for checkpoint */
struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
+ int cur_cp_pack; /* remain current cp pack */
spinlock_t cp_lock; /* for flag in ckpt */
struct inode *meta_inode; /* cache meta blocks */
struct mutex cp_mutex; /* checkpoint procedure lock */
@@ -815,10 +847,9 @@ struct f2fs_sb_info {
block_t discard_blks; /* discard command candidats */
block_t last_valid_block_count; /* for recovery */
u32 s_next_generation; /* for NFS support */
- atomic_t nr_wb_bios; /* # of writeback bios */
/* # of pages, see count_type */
- struct percpu_counter nr_pages[NR_COUNT_TYPE];
+ atomic_t nr_pages[NR_COUNT_TYPE];
/* # of allocated blocks */
struct percpu_counter alloc_valid_block_count;
@@ -863,6 +894,8 @@ struct f2fs_sb_info {
/* For shrinker support */
struct list_head s_list;
+ int s_ndevs; /* number of devices */
+ struct f2fs_dev_info *devs; /* for device list */
struct mutex umount_mutex;
unsigned int shrinker_run_no;
@@ -1105,13 +1138,6 @@ static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
spin_unlock(&sbi->cp_lock);
}
-static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi)
-{
- struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
-
- return blk_queue_discard(q);
-}
-
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
{
down_read(&sbi->cp_rwsem);
@@ -1232,9 +1258,10 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
{
- percpu_counter_inc(&sbi->nr_pages[count_type]);
+ atomic_inc(&sbi->nr_pages[count_type]);
- if (count_type == F2FS_DIRTY_DATA || count_type == F2FS_INMEM_PAGES)
+ if (count_type == F2FS_DIRTY_DATA || count_type == F2FS_INMEM_PAGES ||
+ count_type == F2FS_WB_CP_DATA || count_type == F2FS_WB_DATA)
return;
set_sbi_flag(sbi, SBI_IS_DIRTY);
@@ -1242,14 +1269,14 @@ static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
static inline void inode_inc_dirty_pages(struct inode *inode)
{
- percpu_counter_inc(&F2FS_I(inode)->dirty_pages);
+ atomic_inc(&F2FS_I(inode)->dirty_pages);
inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
}
static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
{
- percpu_counter_dec(&sbi->nr_pages[count_type]);
+ atomic_dec(&sbi->nr_pages[count_type]);
}
static inline void inode_dec_dirty_pages(struct inode *inode)
@@ -1258,19 +1285,19 @@ static inline void inode_dec_dirty_pages(struct inode *inode)
!S_ISLNK(inode->i_mode))
return;
- percpu_counter_dec(&F2FS_I(inode)->dirty_pages);
+ atomic_dec(&F2FS_I(inode)->dirty_pages);
dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
}
static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
{
- return percpu_counter_sum_positive(&sbi->nr_pages[count_type]);
+ return atomic_read(&sbi->nr_pages[count_type]);
}
-static inline s64 get_dirty_pages(struct inode *inode)
+static inline int get_dirty_pages(struct inode *inode)
{
- return percpu_counter_sum_positive(&F2FS_I(inode)->dirty_pages);
+ return atomic_read(&F2FS_I(inode)->dirty_pages);
}
static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
@@ -1329,22 +1356,27 @@ static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
{
- block_t start_addr;
- struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
- unsigned long long ckpt_version = cur_cp_version(ckpt);
-
- start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
+ block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
- /*
- * odd numbered checkpoint should at cp segment 0
- * and even segment must be at cp segment 1
- */
- if (!(ckpt_version & 1))
+ if (sbi->cur_cp_pack == 2)
start_addr += sbi->blocks_per_seg;
+ return start_addr;
+}
+
+static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
+{
+ block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
+ if (sbi->cur_cp_pack == 1)
+ start_addr += sbi->blocks_per_seg;
return start_addr;
}
+static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
+{
+ sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
+}
+
static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
{
return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
@@ -1621,7 +1653,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
return;
case FI_DATA_EXIST:
case FI_INLINE_DOTS:
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
}
}
@@ -1648,7 +1680,7 @@ static inline void set_acl_inode(struct inode *inode, umode_t mode)
{
F2FS_I(inode)->i_acl_mode = mode;
set_inode_flag(inode, FI_ACL_MODE);
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, false);
}
static inline void f2fs_i_links_write(struct inode *inode, bool inc)
@@ -1657,7 +1689,7 @@ static inline void f2fs_i_links_write(struct inode *inode, bool inc)
inc_nlink(inode);
else
drop_nlink(inode);
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
}
static inline void f2fs_i_blocks_write(struct inode *inode,
@@ -1668,7 +1700,7 @@ static inline void f2fs_i_blocks_write(struct inode *inode,
inode->i_blocks = add ? inode->i_blocks + diff :
inode->i_blocks - diff;
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
if (clean || recover)
set_inode_flag(inode, FI_AUTO_RECOVER);
}
@@ -1682,34 +1714,27 @@ static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
return;
i_size_write(inode, i_size);
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
if (clean || recover)
set_inode_flag(inode, FI_AUTO_RECOVER);
}
-static inline bool f2fs_skip_inode_update(struct inode *inode)
-{
- if (!is_inode_flag_set(inode, FI_AUTO_RECOVER))
- return false;
- return F2FS_I(inode)->last_disk_size == i_size_read(inode);
-}
-
static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
{
F2FS_I(inode)->i_current_depth = depth;
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
}
static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
{
F2FS_I(inode)->i_xattr_nid = xnid;
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
}
static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
{
F2FS_I(inode)->i_pino = pino;
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
}
static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
@@ -1837,13 +1862,31 @@ static inline int is_file(struct inode *inode, int type)
static inline void set_file(struct inode *inode, int type)
{
F2FS_I(inode)->i_advise |= type;
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
}
static inline void clear_file(struct inode *inode, int type)
{
F2FS_I(inode)->i_advise &= ~type;
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
+}
+
+static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
+{
+ if (dsync) {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ bool ret;
+
+ spin_lock(&sbi->inode_lock[DIRTY_META]);
+ ret = list_empty(&F2FS_I(inode)->gdirty_list);
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ return ret;
+ }
+ if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
+ file_keep_isize(inode) ||
+ i_size_read(inode) & PAGE_MASK)
+ return false;
+ return F2FS_I(inode)->last_disk_size == i_size_read(inode);
}
static inline int f2fs_readonly(struct super_block *sb)
@@ -1955,7 +1998,7 @@ void set_de_type(struct f2fs_dir_entry *, umode_t);
unsigned char get_de_type(struct f2fs_dir_entry *);
struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *,
f2fs_hash_t, int *, struct f2fs_dentry_ptr *);
-bool f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *,
+int f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *,
unsigned int, struct fscrypt_str *);
void do_make_empty_dir(struct inode *, struct inode *,
struct f2fs_dentry_ptr *);
@@ -1995,7 +2038,7 @@ static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
/*
* super.c
*/
-int f2fs_inode_dirtied(struct inode *);
+int f2fs_inode_dirtied(struct inode *, bool);
void f2fs_inode_synced(struct inode *);
int f2fs_commit_super(struct f2fs_sb_info *, bool);
int f2fs_sync_fs(struct super_block *, int);
@@ -2034,7 +2077,7 @@ void move_node_page(struct page *, int);
int fsync_node_pages(struct f2fs_sb_info *, struct inode *,
struct writeback_control *, bool);
int sync_node_pages(struct f2fs_sb_info *, struct writeback_control *);
-void build_free_nids(struct f2fs_sb_info *);
+void build_free_nids(struct f2fs_sb_info *, bool);
bool alloc_nid(struct f2fs_sb_info *, nid_t *);
void alloc_nid_done(struct f2fs_sb_info *, nid_t);
void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
@@ -2060,7 +2103,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *, bool);
void f2fs_balance_fs_bg(struct f2fs_sb_info *);
int f2fs_issue_flush(struct f2fs_sb_info *);
int create_flush_cmd_control(struct f2fs_sb_info *);
-void destroy_flush_cmd_control(struct f2fs_sb_info *);
+void destroy_flush_cmd_control(struct f2fs_sb_info *, bool);
void invalidate_blocks(struct f2fs_sb_info *, block_t);
bool is_checkpointed_data(struct f2fs_sb_info *, block_t);
void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
@@ -2132,12 +2175,15 @@ void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *, struct inode *,
void f2fs_flush_merged_bios(struct f2fs_sb_info *);
int f2fs_submit_page_bio(struct f2fs_io_info *);
void f2fs_submit_page_mbio(struct f2fs_io_info *);
+struct block_device *f2fs_target_device(struct f2fs_sb_info *,
+ block_t, struct bio *);
+int f2fs_target_device_index(struct f2fs_sb_info *, block_t);
void set_data_blkaddr(struct dnode_of_data *);
void f2fs_update_data_blkaddr(struct dnode_of_data *, block_t);
int reserve_new_blocks(struct dnode_of_data *, blkcnt_t);
int reserve_new_block(struct dnode_of_data *);
int f2fs_get_block(struct dnode_of_data *, pgoff_t);
-ssize_t f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *);
+int f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *);
int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
struct page *get_read_data_page(struct inode *, pgoff_t, int, bool);
struct page *find_data_page(struct inode *, pgoff_t);
@@ -2160,7 +2206,7 @@ int f2fs_migrate_page(struct address_space *, struct page *, struct page *,
int start_gc_thread(struct f2fs_sb_info *);
void stop_gc_thread(struct f2fs_sb_info *);
block_t start_bidx_of_node(unsigned int, struct inode *);
-int f2fs_gc(struct f2fs_sb_info *, bool);
+int f2fs_gc(struct f2fs_sb_info *, bool, bool);
void build_gc_manager(struct f2fs_sb_info *);
/*
@@ -2181,12 +2227,12 @@ struct f2fs_stat_info {
unsigned long long hit_largest, hit_cached, hit_rbtree;
unsigned long long hit_total, total_ext;
int ext_tree, zombie_tree, ext_node;
- s64 ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta;
- s64 inmem_pages;
+ int ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta;
+ int inmem_pages;
unsigned int ndirty_dirs, ndirty_files, ndirty_all;
- int nats, dirty_nats, sits, dirty_sits, fnids;
+ int nats, dirty_nats, sits, dirty_sits, free_nids, alloc_nids;
int total_count, utilization;
- int bg_gc, wb_bios;
+ int bg_gc, nr_wb_cp_data, nr_wb_data;
int inline_xattr, inline_inode, inline_dir, orphans;
unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
unsigned int bimodal, avg_vblocks;
@@ -2412,9 +2458,30 @@ static inline int f2fs_sb_has_crypto(struct super_block *sb)
return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_ENCRYPT);
}
-static inline int f2fs_sb_mounted_hmsmr(struct super_block *sb)
+static inline int f2fs_sb_mounted_blkzoned(struct super_block *sb)
+{
+ return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_BLKZONED);
+}
+
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline int get_blkz_type(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t blkaddr)
+{
+ unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
+ int i;
+
+ for (i = 0; i < sbi->s_ndevs; i++)
+ if (FDEV(i).bdev == bdev)
+ return FDEV(i).blkz_type[zno];
+ return -EINVAL;
+}
+#endif
+
+static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi)
{
- return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_HMSMR);
+ struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
+
+ return blk_queue_discard(q) || f2fs_sb_mounted_blkzoned(sbi->sb);
}
static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
@@ -2453,8 +2520,8 @@ static inline bool f2fs_may_encrypt(struct inode *inode)
#define fscrypt_pullback_bio_page fscrypt_notsupp_pullback_bio_page
#define fscrypt_restore_control_page fscrypt_notsupp_restore_control_page
#define fscrypt_zeroout_range fscrypt_notsupp_zeroout_range
-#define fscrypt_process_policy fscrypt_notsupp_process_policy
-#define fscrypt_get_policy fscrypt_notsupp_get_policy
+#define fscrypt_ioctl_set_policy fscrypt_notsupp_ioctl_set_policy
+#define fscrypt_ioctl_get_policy fscrypt_notsupp_ioctl_get_policy
#define fscrypt_has_permitted_context fscrypt_notsupp_has_permitted_context
#define fscrypt_inherit_context fscrypt_notsupp_inherit_context
#define fscrypt_get_encryption_info fscrypt_notsupp_get_encryption_info
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index c7865073cd26..49f10dce817d 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -94,8 +94,6 @@ mapped:
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
- /* if gced page is attached, don't write to cold segment */
- clear_cold_data(page);
out:
sb_end_pagefault(inode->i_sb);
f2fs_update_time(sbi, REQ_TIME);
@@ -210,7 +208,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
}
/* if the inode is dirty, let's recover all the time */
- if (!datasync && !f2fs_skip_inode_update(inode)) {
+ if (!f2fs_skip_inode_update(inode, datasync)) {
f2fs_write_inode(inode, NULL);
goto go_write;
}
@@ -264,7 +262,7 @@ sync_nodes:
}
if (need_inode_block_update(sbi, ino)) {
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
f2fs_write_inode(inode, NULL);
goto sync_nodes;
}
@@ -632,7 +630,7 @@ int f2fs_truncate(struct inode *inode)
return err;
inode->i_mtime = inode->i_ctime = current_time(inode);
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, false);
return 0;
}
@@ -679,6 +677,7 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
int err;
+ bool size_changed = false;
err = setattr_prepare(dentry, attr);
if (err)
@@ -694,7 +693,6 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
err = f2fs_truncate(inode);
if (err)
return err;
- f2fs_balance_fs(F2FS_I_SB(inode), true);
} else {
/*
* do not trim all blocks after i_size if target size is
@@ -710,6 +708,8 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
}
inode->i_mtime = inode->i_ctime = current_time(inode);
}
+
+ size_changed = true;
}
__setattr_copy(inode, attr);
@@ -722,7 +722,12 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
}
}
- f2fs_mark_inode_dirty_sync(inode);
+ /* file size may changed here */
+ f2fs_mark_inode_dirty_sync(inode, size_changed);
+
+ /* inode change will produce dirty node pages flushed by checkpoint */
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
+
return err;
}
@@ -967,7 +972,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
new_size = (dst + i) << PAGE_SHIFT;
if (dst_inode->i_size < new_size)
f2fs_i_size_write(dst_inode, new_size);
- } while ((do_replace[i] || blkaddr[i] == NULL_ADDR) && --ilen);
+ } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
f2fs_put_dnode(&dn);
} else {
@@ -1218,6 +1223,9 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
ret = f2fs_do_zero_range(&dn, index, end);
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
+
+ f2fs_balance_fs(sbi, dn.node_changed);
+
if (ret)
goto out;
@@ -1313,15 +1321,15 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
pgoff_t pg_end;
loff_t new_size = i_size_read(inode);
loff_t off_end;
- int ret;
+ int err;
- ret = inode_newsize_ok(inode, (len + offset));
- if (ret)
- return ret;
+ err = inode_newsize_ok(inode, (len + offset));
+ if (err)
+ return err;
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- return ret;
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
f2fs_balance_fs(sbi, true);
@@ -1333,12 +1341,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
if (off_end)
map.m_len++;
- ret = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
- if (ret) {
+ err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+ if (err) {
pgoff_t last_off;
if (!map.m_len)
- return ret;
+ return err;
last_off = map.m_lblk + map.m_len - 1;
@@ -1352,7 +1360,7 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
f2fs_i_size_write(inode, new_size);
- return ret;
+ return err;
}
static long f2fs_fallocate(struct file *file, int mode,
@@ -1393,7 +1401,9 @@ static long f2fs_fallocate(struct file *file, int mode,
if (!ret) {
inode->i_mtime = inode->i_ctime = current_time(inode);
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, false);
+ if (mode & FALLOC_FL_KEEP_SIZE)
+ file_set_keep_isize(inode);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
}
@@ -1526,7 +1536,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
goto out;
f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
- "Unexpected flush for atomic writes: ino=%lu, npages=%lld",
+ "Unexpected flush for atomic writes: ino=%lu, npages=%u",
inode->i_ino, get_dirty_pages(inode));
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
if (ret)
@@ -1752,31 +1762,16 @@ static bool uuid_is_nonzero(__u8 u[16])
static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
{
- struct fscrypt_policy policy;
struct inode *inode = file_inode(filp);
- if (copy_from_user(&policy, (struct fscrypt_policy __user *)arg,
- sizeof(policy)))
- return -EFAULT;
-
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
- return fscrypt_process_policy(filp, &policy);
+ return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
}
static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
{
- struct fscrypt_policy policy;
- struct inode *inode = file_inode(filp);
- int err;
-
- err = fscrypt_get_policy(inode, &policy);
- if (err)
- return err;
-
- if (copy_to_user((struct fscrypt_policy __user *)arg, &policy, sizeof(policy)))
- return -EFAULT;
- return 0;
+ return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
}
static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
@@ -1842,7 +1837,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
mutex_lock(&sbi->gc_mutex);
}
- ret = f2fs_gc(sbi, sync);
+ ret = f2fs_gc(sbi, sync, true);
out:
mnt_drop_write_file(filp);
return ret;
@@ -2256,12 +2251,15 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
inode_lock(inode);
ret = generic_write_checks(iocb, from);
if (ret > 0) {
- ret = f2fs_preallocate_blocks(iocb, from);
- if (!ret) {
- blk_start_plug(&plug);
- ret = __generic_file_write_iter(iocb, from);
- blk_finish_plug(&plug);
+ int err = f2fs_preallocate_blocks(iocb, from);
+
+ if (err) {
+ inode_unlock(inode);
+ return err;
}
+ blk_start_plug(&plug);
+ ret = __generic_file_write_iter(iocb, from);
+ blk_finish_plug(&plug);
}
inode_unlock(inode);
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index fcca12b97a2a..88bfc3dff496 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -82,7 +82,7 @@ static int gc_thread_func(void *data)
stat_inc_bggc_count(sbi);
/* if return value is not zero, no victim was selected */
- if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC)))
+ if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true))
wait_ms = gc_th->no_gc_sleep_time;
trace_f2fs_background_gc(sbi->sb, wait_ms,
@@ -544,7 +544,8 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
return true;
}
-static void move_encrypted_block(struct inode *inode, block_t bidx)
+static void move_encrypted_block(struct inode *inode, block_t bidx,
+ unsigned int segno, int off)
{
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
@@ -565,6 +566,9 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
if (!page)
return;
+ if (!check_valid_map(F2FS_I_SB(inode), segno, off))
+ goto out;
+
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
if (err)
@@ -645,7 +649,8 @@ out:
f2fs_put_page(page, 1);
}
-static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
+static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
+ unsigned int segno, int off)
{
struct page *page;
@@ -653,6 +658,9 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
if (IS_ERR(page))
return;
+ if (!check_valid_map(F2FS_I_SB(inode), segno, off))
+ goto out;
+
if (gc_type == BG_GC) {
if (PageWriteback(page))
goto out;
@@ -673,8 +681,10 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
retry:
set_page_dirty(page);
f2fs_wait_on_page_writeback(page, DATA, true);
- if (clear_page_dirty_for_io(page))
+ if (clear_page_dirty_for_io(page)) {
inode_dec_dirty_pages(inode);
+ remove_dirty_inode(inode);
+ }
set_cold_data(page);
@@ -683,8 +693,6 @@ retry:
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry;
}
-
- clear_cold_data(page);
}
out:
f2fs_put_page(page, 1);
@@ -794,9 +802,9 @@ next_step:
start_bidx = start_bidx_of_node(nofs, inode)
+ ofs_in_node;
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
- move_encrypted_block(inode, start_bidx);
+ move_encrypted_block(inode, start_bidx, segno, off);
else
- move_data_page(inode, start_bidx, gc_type);
+ move_data_page(inode, start_bidx, gc_type, segno, off);
if (locked) {
up_write(&fi->dio_rwsem[WRITE]);
@@ -899,7 +907,7 @@ next:
return sec_freed;
}
-int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
+int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background)
{
unsigned int segno;
int gc_type = sync ? FG_GC : BG_GC;
@@ -940,6 +948,9 @@ gc_more:
if (ret)
goto stop;
}
+ } else if (gc_type == BG_GC && !background) {
+ /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
+ goto stop;
}
if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 2e7f54c191b4..e32a9e527968 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -137,8 +137,10 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
fio.old_blkaddr = dn->data_blkaddr;
write_data_page(dn, &fio);
f2fs_wait_on_page_writeback(page, DATA, true);
- if (dirty)
+ if (dirty) {
inode_dec_dirty_pages(dn->inode);
+ remove_dirty_inode(dn->inode);
+ }
/* this converted inline_data should be recovered. */
set_inode_flag(dn->inode, FI_APPEND_WRITE);
@@ -419,7 +421,7 @@ static int f2fs_add_inline_entries(struct inode *dir,
}
new_name.name = d.filename[bit_pos];
- new_name.len = de->name_len;
+ new_name.len = le16_to_cpu(de->name_len);
ino = le32_to_cpu(de->ino);
fake_mode = get_de_type(de) << S_SHIFT;
@@ -573,7 +575,7 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
f2fs_put_page(page, 1);
dir->i_ctime = dir->i_mtime = current_time(dir);
- f2fs_mark_inode_dirty_sync(dir);
+ f2fs_mark_inode_dirty_sync(dir, false);
if (inode)
f2fs_drop_nlink(dir, inode);
@@ -610,6 +612,7 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
struct f2fs_inline_dentry *inline_dentry = NULL;
struct page *ipage = NULL;
struct f2fs_dentry_ptr d;
+ int err;
if (ctx->pos == NR_INLINE_DENTRY)
return 0;
@@ -622,11 +625,12 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
make_dentry_ptr(inode, &d, (void *)inline_dentry, 2);
- if (!f2fs_fill_dentries(ctx, &d, 0, fstr))
+ err = f2fs_fill_dentries(ctx, &d, 0, fstr);
+ if (!err)
ctx->pos = NR_INLINE_DENTRY;
f2fs_put_page(ipage, 1);
- return 0;
+ return err < 0 ? err : 0;
}
int f2fs_inline_data_fiemap(struct inode *inode,
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index d7369895a78a..af06bda51a54 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -19,10 +19,11 @@
#include <trace/events/f2fs.h>
-void f2fs_mark_inode_dirty_sync(struct inode *inode)
+void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
{
- if (f2fs_inode_dirtied(inode))
+ if (f2fs_inode_dirtied(inode, sync))
return;
+
mark_inode_dirty_sync(inode);
}
@@ -43,7 +44,7 @@ void f2fs_set_inode_flags(struct inode *inode)
new_fl |= S_DIRSYNC;
inode_set_flags(inode, new_fl,
S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, false);
}
static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
@@ -252,6 +253,7 @@ retry:
int update_inode(struct inode *inode, struct page *node_page)
{
struct f2fs_inode *ri;
+ struct extent_tree *et = F2FS_I(inode)->extent_tree;
f2fs_inode_synced(inode);
@@ -267,11 +269,13 @@ int update_inode(struct inode *inode, struct page *node_page)
ri->i_size = cpu_to_le64(i_size_read(inode));
ri->i_blocks = cpu_to_le64(inode->i_blocks);
- if (F2FS_I(inode)->extent_tree)
- set_raw_extent(&F2FS_I(inode)->extent_tree->largest,
- &ri->i_ext);
- else
+ if (et) {
+ read_lock(&et->lock);
+ set_raw_extent(&et->largest, &ri->i_ext);
+ read_unlock(&et->lock);
+ } else {
memset(&ri->i_ext, 0, sizeof(ri->i_ext));
+ }
set_raw_inline(inode, ri);
ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
@@ -335,7 +339,7 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
* We need to balance fs here to prevent from producing dirty node pages
* during the urgent cleaning time when runing out of free sections.
*/
- if (update_inode_page(inode))
+ if (update_inode_page(inode) && wbc && wbc->nr_to_write)
f2fs_balance_fs(sbi, true);
return 0;
}
@@ -373,6 +377,9 @@ void f2fs_evict_inode(struct inode *inode)
goto no_delete;
#endif
+ remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
+ remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
+
sb_start_intwrite(inode->i_sb);
set_inode_flag(inode, FI_NO_ALLOC);
i_size_write(inode, 0);
@@ -384,6 +391,8 @@ retry:
f2fs_lock_op(sbi);
err = remove_inode_page(inode);
f2fs_unlock_op(sbi);
+ if (err == -ENOENT)
+ err = 0;
}
/* give more chances, if ENOMEM case */
@@ -403,10 +412,12 @@ no_delete:
invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
if (xnid)
invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
- if (is_inode_flag_set(inode, FI_APPEND_WRITE))
- add_ino_entry(sbi, inode->i_ino, APPEND_INO);
- if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
- add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
+ if (inode->i_nlink) {
+ if (is_inode_flag_set(inode, FI_APPEND_WRITE))
+ add_ino_entry(sbi, inode->i_ino, APPEND_INO);
+ if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
+ add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
+ }
if (is_inode_flag_set(inode, FI_FREE_NID)) {
alloc_nid_failed(sbi, inode->i_ino);
clear_inode_flag(inode, FI_FREE_NID);
@@ -424,6 +435,18 @@ void handle_failed_inode(struct inode *inode)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct node_info ni;
+ /*
+ * clear nlink of inode in order to release resource of inode
+ * immediately.
+ */
+ clear_nlink(inode);
+
+ /*
+ * we must call this to avoid inode being remained as dirty, resulting
+ * in a panic when flushing dirty inodes in gdirty_list.
+ */
+ update_inode_page(inode);
+
/* don't make bad inode, since it becomes a regular file. */
unlock_new_inode(inode);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 489fa0d5f914..db33b5631dc8 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -778,7 +778,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
up_write(&F2FS_I(old_inode)->i_sem);
old_inode->i_ctime = current_time(old_inode);
- f2fs_mark_inode_dirty_sync(old_inode);
+ f2fs_mark_inode_dirty_sync(old_inode, false);
f2fs_delete_entry(old_entry, old_page, old_dir, NULL);
@@ -938,7 +938,7 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
f2fs_i_links_write(old_dir, old_nlink > 0);
up_write(&F2FS_I(old_dir)->i_sem);
}
- f2fs_mark_inode_dirty_sync(old_dir);
+ f2fs_mark_inode_dirty_sync(old_dir, false);
/* update directory entry info of new dir inode */
f2fs_set_link(new_dir, new_entry, new_page, old_inode);
@@ -953,7 +953,7 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
f2fs_i_links_write(new_dir, new_nlink > 0);
up_write(&F2FS_I(new_dir)->i_sem);
}
- f2fs_mark_inode_dirty_sync(new_dir);
+ f2fs_mark_inode_dirty_sync(new_dir, false);
f2fs_unlock_op(sbi);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index d1e29deb4598..b9078fdb3743 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -45,8 +45,8 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
* give 25%, 25%, 50%, 50%, 50% memory for each components respectively
*/
if (type == FREE_NIDS) {
- mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
- PAGE_SHIFT;
+ mem_size = (nm_i->nid_cnt[FREE_NID_LIST] *
+ sizeof(struct free_nid)) >> PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
} else if (type == NAT_ENTRIES) {
mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
@@ -270,8 +270,9 @@ static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
e = grab_nat_entry(nm_i, nid);
node_info_from_raw_nat(&e->ni, ne);
} else {
- f2fs_bug_on(sbi, nat_get_ino(e) != ne->ino ||
- nat_get_blkaddr(e) != ne->block_addr ||
+ f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
+ nat_get_blkaddr(e) !=
+ le32_to_cpu(ne->block_addr) ||
nat_get_version(e) != ne->version);
}
}
@@ -1204,6 +1205,7 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
ret = f2fs_write_inline_data(inode, page);
inode_dec_dirty_pages(inode);
+ remove_dirty_inode(inode);
if (ret)
set_page_dirty(page);
page_out:
@@ -1338,7 +1340,8 @@ retry:
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_put_page(last_page, 0);
pagevec_release(&pvec);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
if (!IS_DNODE(page) || !is_cold_node(page))
@@ -1407,11 +1410,12 @@ continue_unlock:
"Retry to write fsync mark: ino=%u, idx=%lx",
ino, last_page->index);
lock_page(last_page);
+ f2fs_wait_on_page_writeback(last_page, NODE, true);
set_page_dirty(last_page);
unlock_page(last_page);
goto retry;
}
-
+out:
if (nwritten)
f2fs_submit_merged_bio_cond(sbi, NULL, NULL, ino, NODE, WRITE);
return ret ? -EIO: 0;
@@ -1692,11 +1696,35 @@ static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
return radix_tree_lookup(&nm_i->free_nid_root, n);
}
-static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
- struct free_nid *i)
+static int __insert_nid_to_list(struct f2fs_sb_info *sbi,
+ struct free_nid *i, enum nid_list list, bool new)
{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+ if (new) {
+ int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
+ if (err)
+ return err;
+ }
+
+ f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
+ i->state != NID_ALLOC);
+ nm_i->nid_cnt[list]++;
+ list_add_tail(&i->list, &nm_i->nid_list[list]);
+ return 0;
+}
+
+static void __remove_nid_from_list(struct f2fs_sb_info *sbi,
+ struct free_nid *i, enum nid_list list, bool reuse)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+ f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
+ i->state != NID_ALLOC);
+ nm_i->nid_cnt[list]--;
list_del(&i->list);
- radix_tree_delete(&nm_i->free_nid_root, i->nid);
+ if (!reuse)
+ radix_tree_delete(&nm_i->free_nid_root, i->nid);
}
static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
@@ -1704,9 +1732,7 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
struct nat_entry *ne;
-
- if (!available_free_memory(sbi, FREE_NIDS))
- return -1;
+ int err;
/* 0 nid should not be used */
if (unlikely(nid == 0))
@@ -1729,33 +1755,30 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
return 0;
}
- spin_lock(&nm_i->free_nid_list_lock);
- if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
- spin_unlock(&nm_i->free_nid_list_lock);
- radix_tree_preload_end();
+ spin_lock(&nm_i->nid_list_lock);
+ err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true);
+ spin_unlock(&nm_i->nid_list_lock);
+ radix_tree_preload_end();
+ if (err) {
kmem_cache_free(free_nid_slab, i);
return 0;
}
- list_add_tail(&i->list, &nm_i->free_nid_list);
- nm_i->fcnt++;
- spin_unlock(&nm_i->free_nid_list_lock);
- radix_tree_preload_end();
return 1;
}
-static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
+static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
bool need_free = false;
- spin_lock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
i = __lookup_free_nid_list(nm_i, nid);
if (i && i->state == NID_NEW) {
- __del_from_free_nid_list(nm_i, i);
- nm_i->fcnt--;
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
need_free = true;
}
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_unlock(&nm_i->nid_list_lock);
if (need_free)
kmem_cache_free(free_nid_slab, i);
@@ -1778,14 +1801,12 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
- if (blk_addr == NULL_ADDR) {
- if (add_free_nid(sbi, start_nid, true) < 0)
- break;
- }
+ if (blk_addr == NULL_ADDR)
+ add_free_nid(sbi, start_nid, true);
}
}
-void build_free_nids(struct f2fs_sb_info *sbi)
+static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
@@ -1794,7 +1815,10 @@ void build_free_nids(struct f2fs_sb_info *sbi)
nid_t nid = nm_i->next_scan_nid;
/* Enough entries */
- if (nm_i->fcnt >= NAT_ENTRY_PER_BLOCK)
+ if (nm_i->nid_cnt[FREE_NID_LIST] >= NAT_ENTRY_PER_BLOCK)
+ return;
+
+ if (!sync && !available_free_memory(sbi, FREE_NIDS))
return;
/* readahead nat pages to be scanned */
@@ -1830,7 +1854,7 @@ void build_free_nids(struct f2fs_sb_info *sbi)
if (addr == NULL_ADDR)
add_free_nid(sbi, nid, true);
else
- remove_free_nid(nm_i, nid);
+ remove_free_nid(sbi, nid);
}
up_read(&curseg->journal_rwsem);
up_read(&nm_i->nat_tree_lock);
@@ -1839,6 +1863,13 @@ void build_free_nids(struct f2fs_sb_info *sbi)
nm_i->ra_nid_pages, META_NAT, false);
}
+void build_free_nids(struct f2fs_sb_info *sbi, bool sync)
+{
+ mutex_lock(&NM_I(sbi)->build_lock);
+ __build_free_nids(sbi, sync);
+ mutex_unlock(&NM_I(sbi)->build_lock);
+}
+
/*
* If this function returns success, caller can obtain a new nid
* from second parameter of this function.
@@ -1853,31 +1884,31 @@ retry:
if (time_to_inject(sbi, FAULT_ALLOC_NID))
return false;
#endif
- if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
- return false;
+ spin_lock(&nm_i->nid_list_lock);
- spin_lock(&nm_i->free_nid_list_lock);
+ if (unlikely(nm_i->available_nids == 0)) {
+ spin_unlock(&nm_i->nid_list_lock);
+ return false;
+ }
/* We should not use stale free nids created by build_free_nids */
- if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
- f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
- list_for_each_entry(i, &nm_i->free_nid_list, list)
- if (i->state == NID_NEW)
- break;
-
- f2fs_bug_on(sbi, i->state != NID_NEW);
+ if (nm_i->nid_cnt[FREE_NID_LIST] && !on_build_free_nids(nm_i)) {
+ f2fs_bug_on(sbi, list_empty(&nm_i->nid_list[FREE_NID_LIST]));
+ i = list_first_entry(&nm_i->nid_list[FREE_NID_LIST],
+ struct free_nid, list);
*nid = i->nid;
+
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST, true);
i->state = NID_ALLOC;
- nm_i->fcnt--;
- spin_unlock(&nm_i->free_nid_list_lock);
+ __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
+ nm_i->available_nids--;
+ spin_unlock(&nm_i->nid_list_lock);
return true;
}
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_unlock(&nm_i->nid_list_lock);
/* Let's scan nat pages and its caches to get free nids */
- mutex_lock(&nm_i->build_lock);
- build_free_nids(sbi);
- mutex_unlock(&nm_i->build_lock);
+ build_free_nids(sbi, true);
goto retry;
}
@@ -1889,11 +1920,11 @@ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
- spin_lock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
i = __lookup_free_nid_list(nm_i, nid);
- f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
- __del_from_free_nid_list(nm_i, i);
- spin_unlock(&nm_i->free_nid_list_lock);
+ f2fs_bug_on(sbi, !i);
+ __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false);
+ spin_unlock(&nm_i->nid_list_lock);
kmem_cache_free(free_nid_slab, i);
}
@@ -1910,17 +1941,22 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
if (!nid)
return;
- spin_lock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
i = __lookup_free_nid_list(nm_i, nid);
- f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
+ f2fs_bug_on(sbi, !i);
+
if (!available_free_memory(sbi, FREE_NIDS)) {
- __del_from_free_nid_list(nm_i, i);
+ __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false);
need_free = true;
} else {
+ __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, true);
i->state = NID_NEW;
- nm_i->fcnt++;
+ __insert_nid_to_list(sbi, i, FREE_NID_LIST, false);
}
- spin_unlock(&nm_i->free_nid_list_lock);
+
+ nm_i->available_nids++;
+
+ spin_unlock(&nm_i->nid_list_lock);
if (need_free)
kmem_cache_free(free_nid_slab, i);
@@ -1932,24 +1968,24 @@ int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
struct free_nid *i, *next;
int nr = nr_shrink;
- if (nm_i->fcnt <= MAX_FREE_NIDS)
+ if (nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
return 0;
if (!mutex_trylock(&nm_i->build_lock))
return 0;
- spin_lock(&nm_i->free_nid_list_lock);
- list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
- if (nr_shrink <= 0 || nm_i->fcnt <= MAX_FREE_NIDS)
+ spin_lock(&nm_i->nid_list_lock);
+ list_for_each_entry_safe(i, next, &nm_i->nid_list[FREE_NID_LIST],
+ list) {
+ if (nr_shrink <= 0 ||
+ nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
break;
- if (i->state == NID_ALLOC)
- continue;
- __del_from_free_nid_list(nm_i, i);
+
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
kmem_cache_free(free_nid_slab, i);
- nm_i->fcnt--;
nr_shrink--;
}
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_unlock(&nm_i->nid_list_lock);
mutex_unlock(&nm_i->build_lock);
return nr - nr_shrink;
@@ -2005,7 +2041,7 @@ recover_xnid:
if (unlikely(!inc_valid_node_count(sbi, inode)))
f2fs_bug_on(sbi, 1);
- remove_free_nid(NM_I(sbi), new_xnid);
+ remove_free_nid(sbi, new_xnid);
get_node_info(sbi, new_xnid, &ni);
ni.ino = inode->i_ino;
set_node_addr(sbi, &ni, NEW_ADDR, false);
@@ -2035,7 +2071,7 @@ retry:
}
/* Should not use this inode from free nid list */
- remove_free_nid(NM_I(sbi), ino);
+ remove_free_nid(sbi, ino);
if (!PageUptodate(ipage))
SetPageUptodate(ipage);
@@ -2069,7 +2105,6 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
struct f2fs_node *rn;
struct f2fs_summary *sum_entry;
block_t addr;
- int bio_blocks = MAX_BIO_BLOCKS(sbi);
int i, idx, last_offset, nrpages;
/* scan the node segment */
@@ -2078,7 +2113,7 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
sum_entry = &sum->entries[0];
for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
- nrpages = min(last_offset - i, bio_blocks);
+ nrpages = min(last_offset - i, BIO_MAX_PAGES);
/* readahead node pages */
ra_meta_pages(sbi, addr, nrpages, META_POR, true);
@@ -2120,6 +2155,19 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
ne = grab_nat_entry(nm_i, nid);
node_info_from_raw_nat(&ne->ni, &raw_ne);
}
+
+ /*
+ * if a free nat in journal has not been used after last
+ * checkpoint, we should remove it from available nids,
+ * since later we will add it again.
+ */
+ if (!get_nat_flag(ne, IS_DIRTY) &&
+ le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
+ spin_lock(&nm_i->nid_list_lock);
+ nm_i->available_nids--;
+ spin_unlock(&nm_i->nid_list_lock);
+ }
+
__set_nat_cache_dirty(nm_i, ne);
}
update_nats_in_cursum(journal, -i);
@@ -2192,8 +2240,12 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
raw_nat_from_node_info(raw_ne, &ne->ni);
nat_reset_flag(ne);
__clear_nat_cache_dirty(NM_I(sbi), ne);
- if (nat_get_blkaddr(ne) == NULL_ADDR)
+ if (nat_get_blkaddr(ne) == NULL_ADDR) {
add_free_nid(sbi, nid, false);
+ spin_lock(&NM_I(sbi)->nid_list_lock);
+ NM_I(sbi)->available_nids++;
+ spin_unlock(&NM_I(sbi)->nid_list_lock);
+ }
}
if (to_journal)
@@ -2268,21 +2320,24 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
/* not used nids: 0, node, meta, (and root counted as valid node) */
- nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM;
- nm_i->fcnt = 0;
+ nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
+ F2FS_RESERVED_NODE_NUM;
+ nm_i->nid_cnt[FREE_NID_LIST] = 0;
+ nm_i->nid_cnt[ALLOC_NID_LIST] = 0;
nm_i->nat_cnt = 0;
nm_i->ram_thresh = DEF_RAM_THRESHOLD;
nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
- INIT_LIST_HEAD(&nm_i->free_nid_list);
+ INIT_LIST_HEAD(&nm_i->nid_list[FREE_NID_LIST]);
+ INIT_LIST_HEAD(&nm_i->nid_list[ALLOC_NID_LIST]);
INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
INIT_LIST_HEAD(&nm_i->nat_entries);
mutex_init(&nm_i->build_lock);
- spin_lock_init(&nm_i->free_nid_list_lock);
+ spin_lock_init(&nm_i->nid_list_lock);
init_rwsem(&nm_i->nat_tree_lock);
nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
@@ -2310,7 +2365,7 @@ int build_node_manager(struct f2fs_sb_info *sbi)
if (err)
return err;
- build_free_nids(sbi);
+ build_free_nids(sbi, true);
return 0;
}
@@ -2327,17 +2382,18 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
return;
/* destroy free nid list */
- spin_lock(&nm_i->free_nid_list_lock);
- list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
- f2fs_bug_on(sbi, i->state == NID_ALLOC);
- __del_from_free_nid_list(nm_i, i);
- nm_i->fcnt--;
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
+ list_for_each_entry_safe(i, next_i, &nm_i->nid_list[FREE_NID_LIST],
+ list) {
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
+ spin_unlock(&nm_i->nid_list_lock);
kmem_cache_free(free_nid_slab, i);
- spin_lock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
}
- f2fs_bug_on(sbi, nm_i->fcnt);
- spin_unlock(&nm_i->free_nid_list_lock);
+ f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID_LIST]);
+ f2fs_bug_on(sbi, nm_i->nid_cnt[ALLOC_NID_LIST]);
+ f2fs_bug_on(sbi, !list_empty(&nm_i->nid_list[ALLOC_NID_LIST]));
+ spin_unlock(&nm_i->nid_list_lock);
/* destroy nat cache */
down_write(&nm_i->nat_tree_lock);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 868bec65e51c..e7997e240366 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -169,14 +169,15 @@ static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *fnid;
- spin_lock(&nm_i->free_nid_list_lock);
- if (nm_i->fcnt <= 0) {
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
+ if (nm_i->nid_cnt[FREE_NID_LIST] <= 0) {
+ spin_unlock(&nm_i->nid_list_lock);
return;
}
- fnid = list_entry(nm_i->free_nid_list.next, struct free_nid, list);
+ fnid = list_entry(nm_i->nid_list[FREE_NID_LIST].next,
+ struct free_nid, list);
*nid = fnid->nid;
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_unlock(&nm_i->nid_list_lock);
}
/*
@@ -313,7 +314,7 @@ static inline bool is_recoverable_dnode(struct page *page)
((unsigned char *)ckpt + crc_offset)));
cp_ver |= (crc << 32);
}
- return cpu_to_le64(cp_ver) == cpver_of_node(page);
+ return cp_ver == cpver_of_node(page);
}
/*
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 2fc84a991325..981a9584b62f 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -180,13 +180,15 @@ static void recover_inode(struct inode *inode, struct page *page)
inode->i_mode = le16_to_cpu(raw->i_mode);
f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
- inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
+ inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
- inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
+ inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
+ F2FS_I(inode)->i_advise = raw->i_advise;
+
if (file_enc_name(inode))
name = "<encrypted>";
else
@@ -196,32 +198,6 @@ static void recover_inode(struct inode *inode, struct page *page)
ino_of_node(page), name);
}
-static bool is_same_inode(struct inode *inode, struct page *ipage)
-{
- struct f2fs_inode *ri = F2FS_INODE(ipage);
- struct timespec disk;
-
- if (!IS_INODE(ipage))
- return true;
-
- disk.tv_sec = le64_to_cpu(ri->i_ctime);
- disk.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
- if (timespec_compare(&inode->i_ctime, &disk) > 0)
- return false;
-
- disk.tv_sec = le64_to_cpu(ri->i_atime);
- disk.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
- if (timespec_compare(&inode->i_atime, &disk) > 0)
- return false;
-
- disk.tv_sec = le64_to_cpu(ri->i_mtime);
- disk.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
- if (timespec_compare(&inode->i_mtime, &disk) > 0)
- return false;
-
- return true;
-}
-
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
struct curseg_info *curseg;
@@ -248,10 +224,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
goto next;
entry = get_fsync_inode(head, ino_of_node(page));
- if (entry) {
- if (!is_same_inode(entry->inode, page))
- goto next;
- } else {
+ if (!entry) {
if (IS_INODE(page) && is_dent_dnode(page)) {
err = recover_inode_page(sbi, page);
if (err)
@@ -454,7 +427,8 @@ retry_dn:
continue;
}
- if ((start + 1) << PAGE_SHIFT > i_size_read(inode))
+ if (!file_keep_isize(inode) &&
+ (i_size_read(inode) <= (start << PAGE_SHIFT)))
f2fs_i_size_write(inode, (start + 1) << PAGE_SHIFT);
/*
@@ -507,8 +481,10 @@ err:
f2fs_put_dnode(&dn);
out:
f2fs_msg(sbi->sb, KERN_NOTICE,
- "recover_data: ino = %lx, recovered = %d blocks, err = %d",
- inode->i_ino, recovered, err);
+ "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
+ inode->i_ino,
+ file_keep_isize(inode) ? "keep" : "recover",
+ recovered, err);
return err;
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index f1b4a1775ebe..0738f48293cc 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -274,8 +274,10 @@ static int __commit_inmem_pages(struct inode *inode,
set_page_dirty(page);
f2fs_wait_on_page_writeback(page, DATA, true);
- if (clear_page_dirty_for_io(page))
+ if (clear_page_dirty_for_io(page)) {
inode_dec_dirty_pages(inode);
+ remove_dirty_inode(inode);
+ }
fio.page = page;
err = do_write_data_page(&fio);
@@ -287,7 +289,6 @@ static int __commit_inmem_pages(struct inode *inode,
/* record old blkaddr for revoking */
cur->old_addr = fio.old_blkaddr;
- clear_cold_data(page);
submit_bio = true;
}
unlock_page(page);
@@ -363,7 +364,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
*/
if (has_not_enough_free_secs(sbi, 0, 0)) {
mutex_lock(&sbi->gc_mutex);
- f2fs_gc(sbi, false);
+ f2fs_gc(sbi, false, false);
}
}
@@ -380,14 +381,17 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
if (!available_free_memory(sbi, FREE_NIDS))
try_to_free_nids(sbi, MAX_FREE_NIDS);
else
- build_free_nids(sbi);
+ build_free_nids(sbi, false);
+
+ if (!is_idle(sbi))
+ return;
/* checkpoint is the only way to shrink partial cached entries */
if (!available_free_memory(sbi, NAT_ENTRIES) ||
!available_free_memory(sbi, INO_ENTRIES) ||
excess_prefree_segs(sbi) ||
excess_dirty_nats(sbi) ||
- (is_idle(sbi) && f2fs_time_over(sbi, CP_TIME))) {
+ f2fs_time_over(sbi, CP_TIME)) {
if (test_opt(sbi, DATA_FLUSH)) {
struct blk_plug plug;
@@ -400,6 +404,33 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
}
}
+static int __submit_flush_wait(struct block_device *bdev)
+{
+ struct bio *bio = f2fs_bio_alloc(0);
+ int ret;
+
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+ bio->bi_bdev = bdev;
+ ret = submit_bio_wait(bio);
+ bio_put(bio);
+ return ret;
+}
+
+static int submit_flush_wait(struct f2fs_sb_info *sbi)
+{
+ int ret = __submit_flush_wait(sbi->sb->s_bdev);
+ int i;
+
+ if (sbi->s_ndevs && !ret) {
+ for (i = 1; i < sbi->s_ndevs; i++) {
+ ret = __submit_flush_wait(FDEV(i).bdev);
+ if (ret)
+ break;
+ }
+ }
+ return ret;
+}
+
static int issue_flush_thread(void *data)
{
struct f2fs_sb_info *sbi = data;
@@ -410,25 +441,18 @@ repeat:
return 0;
if (!llist_empty(&fcc->issue_list)) {
- struct bio *bio;
struct flush_cmd *cmd, *next;
int ret;
- bio = f2fs_bio_alloc(0);
-
fcc->dispatch_list = llist_del_all(&fcc->issue_list);
fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
- bio->bi_bdev = sbi->sb->s_bdev;
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
- ret = submit_bio_wait(bio);
-
+ ret = submit_flush_wait(sbi);
llist_for_each_entry_safe(cmd, next,
fcc->dispatch_list, llnode) {
cmd->ret = ret;
complete(&cmd->wait);
}
- bio_put(bio);
fcc->dispatch_list = NULL;
}
@@ -449,15 +473,11 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
return 0;
if (!test_opt(sbi, FLUSH_MERGE) || !atomic_read(&fcc->submit_flush)) {
- struct bio *bio = f2fs_bio_alloc(0);
int ret;
atomic_inc(&fcc->submit_flush);
- bio->bi_bdev = sbi->sb->s_bdev;
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
- ret = submit_bio_wait(bio);
+ ret = submit_flush_wait(sbi);
atomic_dec(&fcc->submit_flush);
- bio_put(bio);
return ret;
}
@@ -469,8 +489,13 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
if (!fcc->dispatch_list)
wake_up(&fcc->flush_wait_queue);
- wait_for_completion(&cmd.wait);
- atomic_dec(&fcc->submit_flush);
+ if (fcc->f2fs_issue_flush) {
+ wait_for_completion(&cmd.wait);
+ atomic_dec(&fcc->submit_flush);
+ } else {
+ llist_del_all(&fcc->issue_list);
+ atomic_set(&fcc->submit_flush, 0);
+ }
return cmd.ret;
}
@@ -481,6 +506,11 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
struct flush_cmd_control *fcc;
int err = 0;
+ if (SM_I(sbi)->cmd_control_info) {
+ fcc = SM_I(sbi)->cmd_control_info;
+ goto init_thread;
+ }
+
fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
if (!fcc)
return -ENOMEM;
@@ -488,6 +518,7 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
init_waitqueue_head(&fcc->flush_wait_queue);
init_llist_head(&fcc->issue_list);
SM_I(sbi)->cmd_control_info = fcc;
+init_thread:
fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(fcc->f2fs_issue_flush)) {
@@ -500,14 +531,20 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
return err;
}
-void destroy_flush_cmd_control(struct f2fs_sb_info *sbi)
+void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
{
struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
- if (fcc && fcc->f2fs_issue_flush)
- kthread_stop(fcc->f2fs_issue_flush);
- kfree(fcc);
- SM_I(sbi)->cmd_control_info = NULL;
+ if (fcc && fcc->f2fs_issue_flush) {
+ struct task_struct *flush_thread = fcc->f2fs_issue_flush;
+
+ fcc->f2fs_issue_flush = NULL;
+ kthread_stop(flush_thread);
+ }
+ if (free) {
+ kfree(fcc);
+ SM_I(sbi)->cmd_control_info = NULL;
+ }
}
static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
@@ -633,15 +670,23 @@ static void f2fs_submit_bio_wait_endio(struct bio *bio)
}
/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
-int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+static int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t blkstart, block_t blklen)
{
- struct block_device *bdev = sbi->sb->s_bdev;
struct bio *bio = NULL;
int err;
- err = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
- &bio);
+ trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
+
+ if (sbi->s_ndevs) {
+ int devi = f2fs_target_device_index(sbi, blkstart);
+
+ blkstart -= FDEV(devi).start_blk;
+ }
+ err = __blkdev_issue_discard(bdev,
+ SECTOR_FROM_BLOCK(blkstart),
+ SECTOR_FROM_BLOCK(blklen),
+ GFP_NOFS, 0, &bio);
if (!err && bio) {
struct bio_entry *be = __add_bio_entry(sbi, bio);
@@ -654,24 +699,101 @@ int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi, sector_t sector,
return err;
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t blkstart, block_t blklen)
+{
+ sector_t nr_sects = SECTOR_FROM_BLOCK(blklen);
+ sector_t sector;
+ int devi = 0;
+
+ if (sbi->s_ndevs) {
+ devi = f2fs_target_device_index(sbi, blkstart);
+ blkstart -= FDEV(devi).start_blk;
+ }
+ sector = SECTOR_FROM_BLOCK(blkstart);
+
+ if (sector & (bdev_zone_size(bdev) - 1) ||
+ nr_sects != bdev_zone_size(bdev)) {
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "(%d) %s: Unaligned discard attempted (block %x + %x)",
+ devi, sbi->s_ndevs ? FDEV(devi).path: "",
+ blkstart, blklen);
+ return -EIO;
+ }
+
+ /*
+ * We need to know the type of the zone: for conventional zones,
+ * use regular discard if the drive supports it. For sequential
+ * zones, reset the zone write pointer.
+ */
+ switch (get_blkz_type(sbi, bdev, blkstart)) {
+
+ case BLK_ZONE_TYPE_CONVENTIONAL:
+ if (!blk_queue_discard(bdev_get_queue(bdev)))
+ return 0;
+ return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen);
+ case BLK_ZONE_TYPE_SEQWRITE_REQ:
+ case BLK_ZONE_TYPE_SEQWRITE_PREF:
+ trace_f2fs_issue_reset_zone(sbi->sb, blkstart);
+ return blkdev_reset_zones(bdev, sector,
+ nr_sects, GFP_NOFS);
+ default:
+ /* Unknown zone type: broken device ? */
+ return -EIO;
+ }
+}
+#endif
+
+static int __issue_discard_async(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t blkstart, block_t blklen)
+{
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
+ bdev_zoned_model(bdev) != BLK_ZONED_NONE)
+ return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
+#endif
+ return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen);
+}
+
static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
block_t blkstart, block_t blklen)
{
- sector_t start = SECTOR_FROM_BLOCK(blkstart);
- sector_t len = SECTOR_FROM_BLOCK(blklen);
+ sector_t start = blkstart, len = 0;
+ struct block_device *bdev;
struct seg_entry *se;
unsigned int offset;
block_t i;
+ int err = 0;
+
+ bdev = f2fs_target_device(sbi, blkstart, NULL);
+
+ for (i = blkstart; i < blkstart + blklen; i++, len++) {
+ if (i != start) {
+ struct block_device *bdev2 =
+ f2fs_target_device(sbi, i, NULL);
+
+ if (bdev2 != bdev) {
+ err = __issue_discard_async(sbi, bdev,
+ start, len);
+ if (err)
+ return err;
+ bdev = bdev2;
+ start = i;
+ len = 0;
+ }
+ }
- for (i = blkstart; i < blkstart + blklen; i++) {
se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
offset = GET_BLKOFF_FROM_SEG0(sbi, i);
if (!f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
}
- trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
- return __f2fs_issue_discard_async(sbi, start, len, GFP_NOFS, 0);
+
+ if (len)
+ err = __issue_discard_async(sbi, bdev, start, len);
+ return err;
}
static void __add_discard_entry(struct f2fs_sb_info *sbi,
@@ -1296,25 +1418,21 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
stat_inc_seg_type(sbi, curseg);
}
-static void __allocate_new_segments(struct f2fs_sb_info *sbi, int type)
-{
- struct curseg_info *curseg = CURSEG_I(sbi, type);
- unsigned int old_segno;
-
- old_segno = curseg->segno;
- SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
- locate_dirty_segment(sbi, old_segno);
-}
-
void allocate_new_segments(struct f2fs_sb_info *sbi)
{
+ struct curseg_info *curseg;
+ unsigned int old_segno;
int i;
if (test_opt(sbi, LFS))
return;
- for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
- __allocate_new_segments(sbi, i);
+ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
+ curseg = CURSEG_I(sbi, i);
+ old_segno = curseg->segno;
+ SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
+ locate_dirty_segment(sbi, old_segno);
+ }
}
static const struct segment_allocation default_salloc_ops = {
@@ -1448,21 +1566,11 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
struct f2fs_summary *sum, int type)
{
struct sit_info *sit_i = SIT_I(sbi);
- struct curseg_info *curseg;
- bool direct_io = (type == CURSEG_DIRECT_IO);
-
- type = direct_io ? CURSEG_WARM_DATA : type;
-
- curseg = CURSEG_I(sbi, type);
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
mutex_lock(&curseg->curseg_mutex);
mutex_lock(&sit_i->sentry_lock);
- /* direct_io'ed data is aligned to the segment for better performance */
- if (direct_io && curseg->next_blkoff &&
- !has_not_enough_free_secs(sbi, 0, 0))
- __allocate_new_segments(sbi, type);
-
*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
/*
@@ -2166,7 +2274,6 @@ out:
static int build_sit_info(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
- struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct sit_info *sit_i;
unsigned int sit_segs, start;
char *src_bitmap, *dst_bitmap;
@@ -2233,7 +2340,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
- sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
+ sit_i->written_valid_blocks = 0;
sit_i->sit_bitmap = dst_bitmap;
sit_i->bitmap_size = bitmap_size;
sit_i->dirty_sentries = 0;
@@ -2315,10 +2422,10 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
int sit_blk_cnt = SIT_BLK_CNT(sbi);
unsigned int i, start, end;
unsigned int readed, start_blk = 0;
- int nrpages = MAX_BIO_BLOCKS(sbi) * 8;
do {
- readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT, true);
+ readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
+ META_SIT, true);
start = start_blk * sit_i->sents_per_block;
end = (start_blk + readed) * sit_i->sents_per_block;
@@ -2387,6 +2494,9 @@ static void init_free_segmap(struct f2fs_sb_info *sbi)
struct seg_entry *sentry = get_seg_entry(sbi, start);
if (!sentry->valid_blocks)
__set_free(sbi, start);
+ else
+ SIT_I(sbi)->written_valid_blocks +=
+ sentry->valid_blocks;
}
/* set use the current segments */
@@ -2645,7 +2755,7 @@ void destroy_segment_manager(struct f2fs_sb_info *sbi)
if (!sm_info)
return;
- destroy_flush_cmd_control(sbi);
+ destroy_flush_cmd_control(sbi, true);
destroy_dirty_segmap(sbi);
destroy_curseg(sbi);
destroy_free_segmap(sbi);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index fecb856ad874..9d44ce83acb2 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -18,6 +18,8 @@
#define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
#define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */
+#define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
+
/* L: Logical segment # in volume, R: Relative segment # in main area */
#define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
#define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno)
@@ -102,8 +104,6 @@
(((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
#define SECTOR_TO_BLOCK(sectors) \
(sectors >> F2FS_LOG_SECTORS_PER_BLOCK)
-#define MAX_BIO_BLOCKS(sbi) \
- ((int)min((int)max_hw_blocks(sbi), BIO_MAX_PAGES))
/*
* indicate a block allocation direction: RIGHT and LEFT.
@@ -471,11 +471,12 @@ static inline bool need_SSR(struct f2fs_sb_info *sbi)
{
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
+ int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
if (test_opt(sbi, LFS))
return false;
- return free_sections(sbi) <= (node_secs + 2 * dent_secs +
+ return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
reserved_sections(sbi) + 1);
}
@@ -484,14 +485,14 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
{
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
-
- node_secs += get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
+ int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return false;
return (free_sections(sbi) + freed) <=
- (node_secs + 2 * dent_secs + reserved_sections(sbi) + needed);
+ (node_secs + 2 * dent_secs + imeta_secs +
+ reserved_sections(sbi) + needed);
}
static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
@@ -695,13 +696,6 @@ static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
return false;
}
-static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi)
-{
- struct block_device *bdev = sbi->sb->s_bdev;
- struct request_queue *q = bdev_get_queue(bdev);
- return SECTOR_TO_BLOCK(queue_max_sectors(q));
-}
-
/*
* It is very important to gather dirty pages and write at once, so that we can
* submit a big bio without interfering other data writes.
@@ -719,7 +713,7 @@ static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
else if (type == NODE)
return 8 * sbi->blocks_per_seg;
else if (type == META)
- return 8 * MAX_BIO_BLOCKS(sbi);
+ return 8 * BIO_MAX_PAGES;
else
return 0;
}
@@ -736,11 +730,9 @@ static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
return 0;
nr_to_write = wbc->nr_to_write;
-
+ desired = BIO_MAX_PAGES;
if (type == NODE)
- desired = 2 * max_hw_blocks(sbi);
- else
- desired = MAX_BIO_BLOCKS(sbi);
+ desired <<= 1;
wbc->nr_to_write = desired;
return desired - nr_to_write;
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
index 46c915425923..5c60fc28ec75 100644
--- a/fs/f2fs/shrinker.c
+++ b/fs/f2fs/shrinker.c
@@ -21,14 +21,16 @@ static unsigned int shrinker_run_no;
static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
{
- return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
+ long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
+
+ return count > 0 ? count : 0;
}
static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
{
- if (NM_I(sbi)->fcnt > MAX_FREE_NIDS)
- return NM_I(sbi)->fcnt - MAX_FREE_NIDS;
- return 0;
+ long count = NM_I(sbi)->nid_cnt[FREE_NID_LIST] - MAX_FREE_NIDS;
+
+ return count > 0 ? count : 0;
}
static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 2cac6bb86080..702638e21c76 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -412,14 +412,20 @@ static int parse_options(struct super_block *sb, char *options)
q = bdev_get_queue(sb->s_bdev);
if (blk_queue_discard(q)) {
set_opt(sbi, DISCARD);
- } else {
+ } else if (!f2fs_sb_mounted_blkzoned(sb)) {
f2fs_msg(sb, KERN_WARNING,
"mounting with \"discard\" option, but "
"the device does not support discard");
}
break;
case Opt_nodiscard:
+ if (f2fs_sb_mounted_blkzoned(sb)) {
+ f2fs_msg(sb, KERN_WARNING,
+ "discard is required for zoned block devices");
+ return -EINVAL;
+ }
clear_opt(sbi, DISCARD);
+ break;
case Opt_noheap:
set_opt(sbi, NOHEAP);
break;
@@ -512,6 +518,13 @@ static int parse_options(struct super_block *sb, char *options)
return -ENOMEM;
if (strlen(name) == 8 &&
!strncmp(name, "adaptive", 8)) {
+ if (f2fs_sb_mounted_blkzoned(sb)) {
+ f2fs_msg(sb, KERN_WARNING,
+ "adaptive mode is not allowed with "
+ "zoned block device feature");
+ kfree(name);
+ return -EINVAL;
+ }
set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
} else if (strlen(name) == 3 &&
!strncmp(name, "lfs", 3)) {
@@ -558,13 +571,9 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
init_once((void *) fi);
- if (percpu_counter_init(&fi->dirty_pages, 0, GFP_NOFS)) {
- kmem_cache_free(f2fs_inode_cachep, fi);
- return NULL;
- }
-
/* Initialize f2fs-specific inode info */
fi->vfs_inode.i_version = 1;
+ atomic_set(&fi->dirty_pages, 0);
fi->i_current_depth = 1;
fi->i_advise = 0;
init_rwsem(&fi->i_sem);
@@ -620,24 +629,25 @@ static int f2fs_drop_inode(struct inode *inode)
return generic_drop_inode(inode);
}
-int f2fs_inode_dirtied(struct inode *inode)
+int f2fs_inode_dirtied(struct inode *inode, bool sync)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ int ret = 0;
spin_lock(&sbi->inode_lock[DIRTY_META]);
if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
- spin_unlock(&sbi->inode_lock[DIRTY_META]);
- return 1;
+ ret = 1;
+ } else {
+ set_inode_flag(inode, FI_DIRTY_INODE);
+ stat_inc_dirty_inode(sbi, DIRTY_META);
}
-
- set_inode_flag(inode, FI_DIRTY_INODE);
- list_add_tail(&F2FS_I(inode)->gdirty_list,
+ if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
+ list_add_tail(&F2FS_I(inode)->gdirty_list,
&sbi->inode_list[DIRTY_META]);
- inc_page_count(sbi, F2FS_DIRTY_IMETA);
- stat_inc_dirty_inode(sbi, DIRTY_META);
+ inc_page_count(sbi, F2FS_DIRTY_IMETA);
+ }
spin_unlock(&sbi->inode_lock[DIRTY_META]);
-
- return 0;
+ return ret;
}
void f2fs_inode_synced(struct inode *inode)
@@ -649,10 +659,12 @@ void f2fs_inode_synced(struct inode *inode)
spin_unlock(&sbi->inode_lock[DIRTY_META]);
return;
}
- list_del_init(&F2FS_I(inode)->gdirty_list);
+ if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
+ list_del_init(&F2FS_I(inode)->gdirty_list);
+ dec_page_count(sbi, F2FS_DIRTY_IMETA);
+ }
clear_inode_flag(inode, FI_DIRTY_INODE);
clear_inode_flag(inode, FI_AUTO_RECOVER);
- dec_page_count(sbi, F2FS_DIRTY_IMETA);
stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
spin_unlock(&sbi->inode_lock[DIRTY_META]);
}
@@ -676,7 +688,7 @@ static void f2fs_dirty_inode(struct inode *inode, int flags)
if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
clear_inode_flag(inode, FI_AUTO_RECOVER);
- f2fs_inode_dirtied(inode);
+ f2fs_inode_dirtied(inode, false);
}
static void f2fs_i_callback(struct rcu_head *head)
@@ -687,20 +699,28 @@ static void f2fs_i_callback(struct rcu_head *head)
static void f2fs_destroy_inode(struct inode *inode)
{
- percpu_counter_destroy(&F2FS_I(inode)->dirty_pages);
call_rcu(&inode->i_rcu, f2fs_i_callback);
}
static void destroy_percpu_info(struct f2fs_sb_info *sbi)
{
- int i;
-
- for (i = 0; i < NR_COUNT_TYPE; i++)
- percpu_counter_destroy(&sbi->nr_pages[i]);
percpu_counter_destroy(&sbi->alloc_valid_block_count);
percpu_counter_destroy(&sbi->total_valid_inode_count);
}
+static void destroy_device_list(struct f2fs_sb_info *sbi)
+{
+ int i;
+
+ for (i = 0; i < sbi->s_ndevs; i++) {
+ blkdev_put(FDEV(i).bdev, FMODE_EXCL);
+#ifdef CONFIG_BLK_DEV_ZONED
+ kfree(FDEV(i).blkz_type);
+#endif
+ }
+ kfree(sbi->devs);
+}
+
static void f2fs_put_super(struct super_block *sb)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -738,7 +758,6 @@ static void f2fs_put_super(struct super_block *sb)
* In addition, EIO will skip do checkpoint, we need this as well.
*/
release_ino_entry(sbi, true);
- release_discard_addrs(sbi);
f2fs_leave_shrinker(sbi);
mutex_unlock(&sbi->umount_mutex);
@@ -762,6 +781,8 @@ static void f2fs_put_super(struct super_block *sb)
crypto_free_shash(sbi->s_chksum_driver);
kfree(sbi->raw_super);
+ destroy_device_list(sbi);
+
destroy_percpu_info(sbi);
kfree(sbi);
}
@@ -789,13 +810,17 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
static int f2fs_freeze(struct super_block *sb)
{
- int err;
-
if (f2fs_readonly(sb))
return 0;
- err = f2fs_sync_fs(sb, 1);
- return err;
+ /* IO error happened before */
+ if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
+ return -EIO;
+
+ /* must be clean, since sync_filesystem() was already called */
+ if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
+ return -EINVAL;
+ return 0;
}
static int f2fs_unfreeze(struct super_block *sb)
@@ -822,7 +847,8 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_bavail = user_block_count - valid_user_blocks(sbi);
buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
- buf->f_ffree = buf->f_files - valid_inode_count(sbi);
+ buf->f_ffree = min(buf->f_files - valid_node_count(sbi),
+ buf->f_bavail);
buf->f_namelen = F2FS_NAME_LEN;
buf->f_fsid.val[0] = (u32)id;
@@ -974,7 +1000,7 @@ static void default_options(struct f2fs_sb_info *sbi)
set_opt(sbi, EXTENT_CACHE);
sbi->sb->s_flags |= MS_LAZYTIME;
set_opt(sbi, FLUSH_MERGE);
- if (f2fs_sb_mounted_hmsmr(sbi->sb)) {
+ if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
set_opt_mode(sbi, F2FS_MOUNT_LFS);
set_opt(sbi, DISCARD);
} else {
@@ -1076,8 +1102,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
* or if flush_merge is not passed in mount option.
*/
if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
- destroy_flush_cmd_control(sbi);
- } else if (!SM_I(sbi)->cmd_control_info) {
+ clear_opt(sbi, FLUSH_MERGE);
+ destroy_flush_cmd_control(sbi, false);
+ } else {
err = create_flush_cmd_control(sbi);
if (err)
goto restore_gc;
@@ -1426,6 +1453,7 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
unsigned int total, fsmeta;
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ unsigned int ovp_segments, reserved_segments;
total = le32_to_cpu(raw_super->segment_count);
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
@@ -1437,6 +1465,16 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
if (unlikely(fsmeta >= total))
return 1;
+ ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
+ reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
+
+ if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
+ ovp_segments == 0 || reserved_segments == 0)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong layout: check mkfs.f2fs version");
+ return 1;
+ }
+
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
return 1;
@@ -1447,6 +1485,7 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
static void init_sb_info(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = sbi->raw_super;
+ int i;
sbi->log_sectors_per_block =
le32_to_cpu(raw_super->log_sectors_per_block);
@@ -1471,6 +1510,9 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
clear_sbi_flag(sbi, SBI_NEED_FSCK);
+ for (i = 0; i < NR_COUNT_TYPE; i++)
+ atomic_set(&sbi->nr_pages[i], 0);
+
INIT_LIST_HEAD(&sbi->s_list);
mutex_init(&sbi->umount_mutex);
mutex_init(&sbi->wio_mutex[NODE]);
@@ -1486,13 +1528,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
static int init_percpu_info(struct f2fs_sb_info *sbi)
{
- int i, err;
-
- for (i = 0; i < NR_COUNT_TYPE; i++) {
- err = percpu_counter_init(&sbi->nr_pages[i], 0, GFP_KERNEL);
- if (err)
- return err;
- }
+ int err;
err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
if (err)
@@ -1502,6 +1538,71 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
GFP_KERNEL);
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
+{
+ struct block_device *bdev = FDEV(devi).bdev;
+ sector_t nr_sectors = bdev->bd_part->nr_sects;
+ sector_t sector = 0;
+ struct blk_zone *zones;
+ unsigned int i, nr_zones;
+ unsigned int n = 0;
+ int err = -EIO;
+
+ if (!f2fs_sb_mounted_blkzoned(sbi->sb))
+ return 0;
+
+ if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
+ SECTOR_TO_BLOCK(bdev_zone_size(bdev)))
+ return -EINVAL;
+ sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_size(bdev));
+ if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
+ __ilog2_u32(sbi->blocks_per_blkz))
+ return -EINVAL;
+ sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
+ FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
+ sbi->log_blocks_per_blkz;
+ if (nr_sectors & (bdev_zone_size(bdev) - 1))
+ FDEV(devi).nr_blkz++;
+
+ FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
+ if (!FDEV(devi).blkz_type)
+ return -ENOMEM;
+
+#define F2FS_REPORT_NR_ZONES 4096
+
+ zones = kcalloc(F2FS_REPORT_NR_ZONES, sizeof(struct blk_zone),
+ GFP_KERNEL);
+ if (!zones)
+ return -ENOMEM;
+
+ /* Get block zones type */
+ while (zones && sector < nr_sectors) {
+
+ nr_zones = F2FS_REPORT_NR_ZONES;
+ err = blkdev_report_zones(bdev, sector,
+ zones, &nr_zones,
+ GFP_KERNEL);
+ if (err)
+ break;
+ if (!nr_zones) {
+ err = -EIO;
+ break;
+ }
+
+ for (i = 0; i < nr_zones; i++) {
+ FDEV(devi).blkz_type[n] = zones[i].type;
+ sector += zones[i].len;
+ n++;
+ }
+ }
+
+ kfree(zones);
+
+ return err;
+}
+#endif
+
/*
* Read f2fs raw super block.
* Because we have two copies of super block, so read both of them
@@ -1594,6 +1695,77 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
return err;
}
+static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+ int i;
+
+ for (i = 0; i < MAX_DEVICES; i++) {
+ if (!RDEV(i).path[0])
+ return 0;
+
+ if (i == 0) {
+ sbi->devs = kzalloc(sizeof(struct f2fs_dev_info) *
+ MAX_DEVICES, GFP_KERNEL);
+ if (!sbi->devs)
+ return -ENOMEM;
+ }
+
+ memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
+ FDEV(i).total_segments = le32_to_cpu(RDEV(i).total_segments);
+ if (i == 0) {
+ FDEV(i).start_blk = 0;
+ FDEV(i).end_blk = FDEV(i).start_blk +
+ (FDEV(i).total_segments <<
+ sbi->log_blocks_per_seg) - 1 +
+ le32_to_cpu(raw_super->segment0_blkaddr);
+ } else {
+ FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
+ FDEV(i).end_blk = FDEV(i).start_blk +
+ (FDEV(i).total_segments <<
+ sbi->log_blocks_per_seg) - 1;
+ }
+
+ FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
+ sbi->sb->s_mode, sbi->sb->s_type);
+ if (IS_ERR(FDEV(i).bdev))
+ return PTR_ERR(FDEV(i).bdev);
+
+ /* to release errored devices */
+ sbi->s_ndevs = i + 1;
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
+ !f2fs_sb_mounted_blkzoned(sbi->sb)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Zoned block device feature not enabled\n");
+ return -EINVAL;
+ }
+ if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
+ if (init_blkz_info(sbi, i)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Failed to initialize F2FS blkzone information");
+ return -EINVAL;
+ }
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
+ i, FDEV(i).path,
+ FDEV(i).total_segments,
+ FDEV(i).start_blk, FDEV(i).end_blk,
+ bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
+ "Host-aware" : "Host-managed");
+ continue;
+ }
+#endif
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Mount Device [%2d]: %20s, %8u, %8x - %8x",
+ i, FDEV(i).path,
+ FDEV(i).total_segments,
+ FDEV(i).start_blk, FDEV(i).end_blk);
+ }
+ return 0;
+}
+
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
struct f2fs_sb_info *sbi;
@@ -1641,6 +1813,18 @@ try_onemore:
sb->s_fs_info = sbi;
sbi->raw_super = raw_super;
+ /*
+ * The BLKZONED feature indicates that the drive was formatted with
+ * zone alignment optimization. This is optional for host-aware
+ * devices, but mandatory for host-managed zoned block devices.
+ */
+#ifndef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_mounted_blkzoned(sb)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Zoned block device support is not enabled\n");
+ goto free_sb_buf;
+ }
+#endif
default_options(sbi);
/* parse mount options */
options = kstrdup((const char *)data, GFP_KERNEL);
@@ -1710,6 +1894,13 @@ try_onemore:
goto free_meta_inode;
}
+ /* Initialize device list */
+ err = f2fs_scan_devices(sbi);
+ if (err) {
+ f2fs_msg(sb, KERN_ERR, "Failed to find devices");
+ goto free_devices;
+ }
+
sbi->total_valid_node_count =
le32_to_cpu(sbi->ckpt->valid_node_count);
percpu_counter_set(&sbi->total_valid_inode_count,
@@ -1893,12 +2084,21 @@ free_node_inode:
mutex_lock(&sbi->umount_mutex);
release_ino_entry(sbi, true);
f2fs_leave_shrinker(sbi);
+ /*
+ * Some dirty meta pages can be produced by recover_orphan_inodes()
+ * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
+ * followed by write_checkpoint() through f2fs_write_node_pages(), which
+ * falls into an infinite loop in sync_meta_pages().
+ */
+ truncate_inode_pages_final(META_MAPPING(sbi));
iput(sbi->node_inode);
mutex_unlock(&sbi->umount_mutex);
free_nm:
destroy_node_manager(sbi);
free_sm:
destroy_segment_manager(sbi);
+free_devices:
+ destroy_device_list(sbi);
kfree(sbi->ckpt);
free_meta_inode:
make_bad_inode(sbi->meta_inode);
@@ -2044,3 +2244,4 @@ module_exit(exit_f2fs_fs)
MODULE_AUTHOR("Samsung Electronics's Praesto Team");
MODULE_DESCRIPTION("Flash Friendly File System");
MODULE_LICENSE("GPL");
+
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 3e1c0280f866..c47ce2f330a1 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -106,7 +106,7 @@ static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
return -EINVAL;
F2FS_I(inode)->i_advise |= *(char *)value;
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
return 0;
}
@@ -554,7 +554,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
f2fs_set_encrypted_inode(inode);
- f2fs_mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
if (!error && S_ISDIR(inode->i_mode))
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
exit:
diff --git a/fs/iomap.c b/fs/iomap.c
index a8ee8c33ca78..13dd413b2b9c 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -467,8 +467,9 @@ int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
offset = page_offset(page);
while (length > 0) {
- ret = iomap_apply(inode, offset, length, IOMAP_WRITE,
- ops, page, iomap_page_mkwrite_actor);
+ ret = iomap_apply(inode, offset, length,
+ IOMAP_WRITE | IOMAP_FAULT, ops, page,
+ iomap_page_mkwrite_actor);
if (unlikely(ret <= 0))
goto out_unlock;
offset += ret;
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index 8653cac7e12e..b6fd1ff29ddf 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -121,7 +121,7 @@ long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
jfs_set_inode_flags(inode);
inode_unlock(inode);
- inode->i_ctime = CURRENT_TIME_SEC;
+ inode->i_ctime = current_time(inode);
mark_inode_dirty(inode);
setflags_out:
mnt_drop_write_file(filp);
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index a1982118f92f..ac9e108ce1ea 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -335,7 +335,7 @@ static int kernfs_xattr_set(const struct xattr_handler *handler,
return simple_xattr_set(&attrs->xattrs, name, value, size, flags);
}
-const struct xattr_handler kernfs_trusted_xattr_handler = {
+static const struct xattr_handler kernfs_trusted_xattr_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.get = kernfs_xattr_get,
.set = kernfs_xattr_set,
@@ -372,7 +372,7 @@ static int kernfs_security_xattr_set(const struct xattr_handler *handler,
return error;
}
-const struct xattr_handler kernfs_security_xattr_handler = {
+static const struct xattr_handler kernfs_security_xattr_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.get = kernfs_xattr_get,
.set = kernfs_security_xattr_set,
diff --git a/fs/mbcache.c b/fs/mbcache.c
index c5bd19ffa326..b19be429d655 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -29,7 +29,7 @@ struct mb_cache {
/* log2 of hash table size */
int c_bucket_bits;
/* Maximum entries in cache to avoid degrading hash too much */
- int c_max_entries;
+ unsigned long c_max_entries;
/* Protects c_list, c_entry_count */
spinlock_t c_list_lock;
struct list_head c_list;
@@ -43,7 +43,7 @@ struct mb_cache {
static struct kmem_cache *mb_entry_cache;
static unsigned long mb_cache_shrink(struct mb_cache *cache,
- unsigned int nr_to_scan);
+ unsigned long nr_to_scan);
static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
u32 key)
@@ -155,12 +155,12 @@ out:
}
/*
- * mb_cache_entry_find_first - find the first entry in cache with given key
+ * mb_cache_entry_find_first - find the first reusable entry with the given key
* @cache: cache where we should search
* @key: key to look for
*
- * Search in @cache for entry with key @key. Grabs reference to the first
- * entry found and returns the entry.
+ * Search in @cache for a reusable entry with key @key. Grabs reference to the
+ * first reusable entry found and returns the entry.
*/
struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
u32 key)
@@ -170,14 +170,14 @@ struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
EXPORT_SYMBOL(mb_cache_entry_find_first);
/*
- * mb_cache_entry_find_next - find next entry in cache with the same
+ * mb_cache_entry_find_next - find next reusable entry with the same key
* @cache: cache where we should search
* @entry: entry to start search from
*
- * Finds next entry in the hash chain which has the same key as @entry.
- * If @entry is unhashed (which can happen when deletion of entry races
- * with the search), finds the first entry in the hash chain. The function
- * drops reference to @entry and returns with a reference to the found entry.
+ * Finds next reusable entry in the hash chain which has the same key as @entry.
+ * If @entry is unhashed (which can happen when deletion of entry races with the
+ * search), finds the first reusable entry in the hash chain. The function drops
+ * reference to @entry and returns with a reference to the found entry.
*/
struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
struct mb_cache_entry *entry)
@@ -274,11 +274,11 @@ static unsigned long mb_cache_count(struct shrinker *shrink,
/* Shrink number of entries in cache */
static unsigned long mb_cache_shrink(struct mb_cache *cache,
- unsigned int nr_to_scan)
+ unsigned long nr_to_scan)
{
struct mb_cache_entry *entry;
struct hlist_bl_head *head;
- unsigned int shrunk = 0;
+ unsigned long shrunk = 0;
spin_lock(&cache->c_list_lock);
while (nr_to_scan-- && !list_empty(&cache->c_list)) {
@@ -286,7 +286,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
struct mb_cache_entry, e_list);
if (entry->e_referenced) {
entry->e_referenced = 0;
- list_move_tail(&cache->c_list, &entry->e_list);
+ list_move_tail(&entry->e_list, &cache->c_list);
continue;
}
list_del_init(&entry->e_list);
@@ -316,10 +316,9 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
static unsigned long mb_cache_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
- int nr_to_scan = sc->nr_to_scan;
struct mb_cache *cache = container_of(shrink, struct mb_cache,
c_shrink);
- return mb_cache_shrink(cache, nr_to_scan);
+ return mb_cache_shrink(cache, sc->nr_to_scan);
}
/* We shrink 1/X of the cache when we have too many entries in it */
@@ -341,11 +340,8 @@ static void mb_cache_shrink_worker(struct work_struct *work)
struct mb_cache *mb_cache_create(int bucket_bits)
{
struct mb_cache *cache;
- int bucket_count = 1 << bucket_bits;
- int i;
-
- if (!try_module_get(THIS_MODULE))
- return NULL;
+ unsigned long bucket_count = 1UL << bucket_bits;
+ unsigned long i;
cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
if (!cache)
@@ -377,7 +373,6 @@ struct mb_cache *mb_cache_create(int bucket_bits)
return cache;
err_out:
- module_put(THIS_MODULE);
return NULL;
}
EXPORT_SYMBOL(mb_cache_create);
@@ -411,7 +406,6 @@ void mb_cache_destroy(struct mb_cache *cache)
}
kfree(cache->c_hash);
kfree(cache);
- module_put(THIS_MODULE);
}
EXPORT_SYMBOL(mb_cache_destroy);
@@ -420,7 +414,8 @@ static int __init mbcache_init(void)
mb_entry_cache = kmem_cache_create("mbcache",
sizeof(struct mb_cache_entry), 0,
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
- BUG_ON(!mb_entry_cache);
+ if (!mb_entry_cache)
+ return -ENOMEM;
return 0;
}
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 5f2dc2032c79..7eb3cefcf2a3 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -479,6 +479,7 @@ struct proc_dir_entry *proc_create_mount_point(const char *name)
}
return ent;
}
+EXPORT_SYMBOL(proc_create_mount_point);
struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
struct proc_dir_entry *parent,
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index bbba5d22aada..109876a24d2c 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -195,7 +195,6 @@ static inline bool is_empty_pde(const struct proc_dir_entry *pde)
{
return S_ISDIR(pde->mode) && !pde->proc_iops;
}
-struct proc_dir_entry *proc_create_mount_point(const char *name);
/*
* inode.c
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 6be5204a06d3..38755ca96c7a 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1297,8 +1297,7 @@ __xfs_get_blocks(
sector_t iblock,
struct buffer_head *bh_result,
int create,
- bool direct,
- bool dax_fault)
+ bool direct)
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
@@ -1419,13 +1418,8 @@ __xfs_get_blocks(
if (ISUNWRITTEN(&imap))
set_buffer_unwritten(bh_result);
/* direct IO needs special help */
- if (create) {
- if (dax_fault)
- ASSERT(!ISUNWRITTEN(&imap));
- else
- xfs_map_direct(inode, bh_result, &imap, offset,
- is_cow);
- }
+ if (create)
+ xfs_map_direct(inode, bh_result, &imap, offset, is_cow);
}
/*
@@ -1465,7 +1459,7 @@ xfs_get_blocks(
struct buffer_head *bh_result,
int create)
{
- return __xfs_get_blocks(inode, iblock, bh_result, create, false, false);
+ return __xfs_get_blocks(inode, iblock, bh_result, create, false);
}
int
@@ -1475,17 +1469,7 @@ xfs_get_blocks_direct(
struct buffer_head *bh_result,
int create)
{
- return __xfs_get_blocks(inode, iblock, bh_result, create, true, false);
-}
-
-int
-xfs_get_blocks_dax_fault(
- struct inode *inode,
- sector_t iblock,
- struct buffer_head *bh_result,
- int create)
-{
- return __xfs_get_blocks(inode, iblock, bh_result, create, true, true);
+ return __xfs_get_blocks(inode, iblock, bh_result, create, true);
}
/*
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index b3c6634f9518..34dc00dfb91d 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -59,9 +59,6 @@ int xfs_get_blocks(struct inode *inode, sector_t offset,
struct buffer_head *map_bh, int create);
int xfs_get_blocks_direct(struct inode *inode, sector_t offset,
struct buffer_head *map_bh, int create);
-int xfs_get_blocks_dax_fault(struct inode *inode, sector_t offset,
- struct buffer_head *map_bh, int create);
-
int xfs_end_io_direct_write(struct kiocb *iocb, loff_t offset,
ssize_t size, void *private);
int xfs_setfilesize(struct xfs_inode *ip, xfs_off_t offset, size_t size);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 6e4f7f900fea..d818c160451f 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -318,7 +318,7 @@ xfs_file_dax_read(
return 0; /* skip atime */
xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
- ret = iomap_dax_rw(iocb, to, &xfs_iomap_ops);
+ ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
file_accessed(iocb->ki_filp);
@@ -653,7 +653,7 @@ xfs_file_dax_write(
trace_xfs_file_dax_write(ip, count, pos);
- ret = iomap_dax_rw(iocb, from, &xfs_iomap_ops);
+ ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
i_size_write(inode, iocb->ki_pos);
error = xfs_setfilesize(ip, pos, ret);
@@ -1474,7 +1474,7 @@ xfs_filemap_page_mkwrite(
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (IS_DAX(inode)) {
- ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops);
+ ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
} else {
ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops);
ret = block_page_mkwrite_return(ret);
@@ -1508,7 +1508,7 @@ xfs_filemap_fault(
* changes to xfs_get_blocks_direct() to map unwritten extent
* ioend for conversion on read-only mappings.
*/
- ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops);
+ ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
} else
ret = filemap_fault(vma, vmf);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
@@ -1545,7 +1545,7 @@ xfs_filemap_pmd_fault(
}
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
- ret = dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault);
+ ret = dax_iomap_pmd_fault(vma, addr, pmd, flags, &xfs_iomap_ops);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (flags & FAULT_FLAG_WRITE)
diff --git a/include/dt-bindings/sound/cs42l42.h b/include/dt-bindings/sound/cs42l42.h
new file mode 100644
index 000000000000..399a123aed58
--- /dev/null
+++ b/include/dt-bindings/sound/cs42l42.h
@@ -0,0 +1,73 @@
+/*
+ * cs42l42.h -- CS42L42 ALSA SoC audio driver DT bindings header
+ *
+ * Copyright 2016 Cirrus Logic, Inc.
+ *
+ * Author: James Schulman <james.schulman@cirrus.com>
+ * Author: Brian Austin <brian.austin@cirrus.com>
+ * Author: Michael White <michael.white@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DT_CS42L42_H
+#define __DT_CS42L42_H
+
+/* HPOUT Load Capacity */
+#define CS42L42_HPOUT_LOAD_1NF 0
+#define CS42L42_HPOUT_LOAD_10NF 1
+
+/* HPOUT Clamp to GND Overide */
+#define CS42L42_HPOUT_CLAMP_EN 0
+#define CS42L42_HPOUT_CLAMP_DIS 1
+
+/* Tip Sense Inversion */
+#define CS42L42_TS_INV_DIS 0
+#define CS42L42_TS_INV_EN 1
+
+/* Tip Sense Debounce */
+#define CS42L42_TS_DBNCE_0 0
+#define CS42L42_TS_DBNCE_125 1
+#define CS42L42_TS_DBNCE_250 2
+#define CS42L42_TS_DBNCE_500 3
+#define CS42L42_TS_DBNCE_750 4
+#define CS42L42_TS_DBNCE_1000 5
+#define CS42L42_TS_DBNCE_1250 6
+#define CS42L42_TS_DBNCE_1500 7
+
+/* Button Press Software Debounce Times */
+#define CS42L42_BTN_DET_INIT_DBNCE_MIN 0
+#define CS42L42_BTN_DET_INIT_DBNCE_DEFAULT 100
+#define CS42L42_BTN_DET_INIT_DBNCE_MAX 200
+
+#define CS42L42_BTN_DET_EVENT_DBNCE_MIN 0
+#define CS42L42_BTN_DET_EVENT_DBNCE_DEFAULT 10
+#define CS42L42_BTN_DET_EVENT_DBNCE_MAX 20
+
+/* Button Detect Level Sensitivities */
+#define CS42L42_NUM_BIASES 4
+
+#define CS42L42_HS_DET_LEVEL_15 0x0F
+#define CS42L42_HS_DET_LEVEL_8 0x08
+#define CS42L42_HS_DET_LEVEL_4 0x04
+#define CS42L42_HS_DET_LEVEL_1 0x01
+
+#define CS42L42_HS_DET_LEVEL_MIN 0
+#define CS42L42_HS_DET_LEVEL_MAX 0x3F
+
+/* HS Bias Ramp Rate */
+
+#define CS42L42_HSBIAS_RAMP_FAST_RISE_SLOW_FALL 0
+#define CS42L42_HSBIAS_RAMP_FAST 1
+#define CS42L42_HSBIAS_RAMP_SLOW 2
+#define CS42L42_HSBIAS_RAMP_SLOWEST 3
+
+#define CS42L42_HSBIAS_RAMP_TIME0 10
+#define CS42L42_HSBIAS_RAMP_TIME1 40
+#define CS42L42_HSBIAS_RAMP_TIME2 90
+#define CS42L42_HSBIAS_RAMP_TIME3 170
+
+#endif /* __DT_CS42L42_H */
diff --git a/include/linux/ahci-remap.h b/include/linux/ahci-remap.h
new file mode 100644
index 000000000000..62be3a40239d
--- /dev/null
+++ b/include/linux/ahci-remap.h
@@ -0,0 +1,28 @@
+#ifndef _LINUX_AHCI_REMAP_H
+#define _LINUX_AHCI_REMAP_H
+
+#include <linux/sizes.h>
+
+#define AHCI_VSCAP 0xa4
+#define AHCI_REMAP_CAP 0x800
+
+/* device class code */
+#define AHCI_REMAP_N_DCC 0x880
+
+/* remap-device base relative to ahci-bar */
+#define AHCI_REMAP_N_OFFSET SZ_16K
+#define AHCI_REMAP_N_SIZE SZ_16K
+
+#define AHCI_MAX_REMAP 3
+
+static inline unsigned int ahci_remap_dcc(int i)
+{
+ return AHCI_REMAP_N_DCC + i * 0x80;
+}
+
+static inline unsigned int ahci_remap_base(int i)
+{
+ return AHCI_REMAP_N_OFFSET + i * AHCI_REMAP_N_SIZE;
+}
+
+#endif /* _LINUX_AHCI_REMAP_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index fdb180367ba1..af6859b3a93d 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -348,6 +348,7 @@ enum {
ATA_LOG_DEVSLP_DETO = 0x01,
ATA_LOG_DEVSLP_VALID = 0x07,
ATA_LOG_DEVSLP_VALID_MASK = 0x80,
+ ATA_LOG_NCQ_PRIO_OFFSET = 0x09,
/* NCQ send and receive log */
ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0x00,
@@ -940,6 +941,11 @@ static inline bool ata_id_has_ncq_non_data(const u16 *id)
return id[ATA_ID_SATA_CAPABILITY_2] & BIT(5);
}
+static inline bool ata_id_has_ncq_prio(const u16 *id)
+{
+ return id[ATA_ID_SATA_CAPABILITY] & BIT(12);
+}
+
static inline bool ata_id_has_trim(const u16 *id)
{
if (ata_id_major_version(id) >= 7 &&
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 87e404aae267..4a2ab5d99ff7 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -242,6 +242,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q);
void blk_mq_freeze_queue_start(struct request_queue *q);
int blk_mq_reinit_tagset(struct blk_mq_tag_set *set);
+int blk_mq_map_queues(struct blk_mq_tag_set *set);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
/*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c5393766909d..286b2a264383 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1059,6 +1059,20 @@ static inline unsigned int blk_rq_count_bios(struct request *rq)
}
/*
+ * blk_rq_set_prio - associate a request with prio from ioc
+ * @rq: request of interest
+ * @ioc: target iocontext
+ *
+ * Assocate request prio with ioc prio so request based drivers
+ * can leverage priority information.
+ */
+static inline void blk_rq_set_prio(struct request *rq, struct io_context *ioc)
+{
+ if (ioc)
+ rq->ioprio = ioc->ioprio;
+}
+
+/*
* Request issue related functions.
*/
extern struct request *blk_peek_request(struct request_queue *q);
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index a226652a5a6c..657a718c27d2 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -40,6 +40,8 @@ struct bsg_job {
struct device *dev;
struct request *req;
+ struct kref kref;
+
/* Transport/driver specific request/reply structs */
void *request;
void *reply;
@@ -67,5 +69,7 @@ void bsg_job_done(struct bsg_job *job, int result,
int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name,
bsg_job_fn *job_fn, int dd_job_size);
void bsg_request_fn(struct request_queue *q);
+void bsg_job_put(struct bsg_job *job);
+int __must_check bsg_job_get(struct bsg_job *job);
#endif
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 2189935075b4..a951fd10aaaa 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -71,6 +71,7 @@ struct cpu_cacheinfo {
struct cacheinfo *info_list;
unsigned int num_levels;
unsigned int num_leaves;
+ bool cpu_map_populated;
};
/*
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index d9d6a9d77489..9a30b921f740 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -228,7 +228,7 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \
struct configfs_item_operations {
void (*release)(struct config_item *);
int (*allow_link)(struct config_item *src, struct config_item *target);
- int (*drop_link)(struct config_item *src, struct config_item *target);
+ void (*drop_link)(struct config_item *src, struct config_item *target);
};
struct configfs_group_operations {
diff --git a/include/linux/console.h b/include/linux/console.h
index d530c4627e54..9c26c6685587 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -28,9 +28,17 @@ struct tty_struct;
#define VT100ID "\033[?1;2c"
#define VT102ID "\033[?6c"
+enum con_scroll {
+ SM_UP,
+ SM_DOWN,
+};
+
/**
* struct consw - callbacks for consoles
*
+ * @con_scroll: move lines from @top to @bottom in direction @dir by @lines.
+ * Return true if no generic handling should be done.
+ * Invoked by csi_M and printing to the console.
* @con_set_palette: sets the palette of the console to @table (optional)
* @con_scrolldelta: the contents of the console should be scrolled by @lines.
* Invoked by user. (optional)
@@ -44,7 +52,9 @@ struct consw {
void (*con_putc)(struct vc_data *, int, int, int);
void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int);
void (*con_cursor)(struct vc_data *, int);
- int (*con_scroll)(struct vc_data *, int, int, int, int);
+ bool (*con_scroll)(struct vc_data *, unsigned int top,
+ unsigned int bottom, enum con_scroll dir,
+ unsigned int lines);
int (*con_switch)(struct vc_data *);
int (*con_blank)(struct vc_data *, int, int);
int (*con_font_set)(struct vc_data *, struct console_font *, unsigned);
@@ -99,10 +109,6 @@ static inline int con_debug_leave(void)
}
#endif
-/* scroll */
-#define SM_UP (1)
-#define SM_DOWN (2)
-
/* cursor */
#define CM_DRAW (1)
#define CM_ERASE (2)
diff --git a/include/linux/dax.h b/include/linux/dax.h
index add6c4bc568f..0afade8bd3d7 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -8,21 +8,41 @@
struct iomap_ops;
-/* We use lowest available exceptional entry bit for locking */
+/*
+ * We use lowest available bit in exceptional entry for locking, one bit for
+ * the entry size (PMD) and two more to tell us if the entry is a huge zero
+ * page (HZP) or an empty entry that is just used for locking. In total four
+ * special bits.
+ *
+ * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the HZP and
+ * EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
+ * block allocation.
+ */
+#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
+#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
+#define RADIX_DAX_HZP (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
+#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
-ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
+static inline unsigned long dax_radix_sector(void *entry)
+{
+ return (unsigned long)entry >> RADIX_DAX_SHIFT;
+}
+
+static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
+{
+ return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
+ ((unsigned long)sector << RADIX_DAX_SHIFT) |
+ RADIX_DAX_ENTRY_LOCK);
+}
+
+ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
struct iomap_ops *ops);
-ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
- get_block_t, dio_iodone_t, int flags);
-int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
-int dax_truncate_page(struct inode *, loff_t from, get_block_t);
-int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
struct iomap_ops *ops);
-int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
- pgoff_t index, bool wake_all);
+ pgoff_t index, void *entry, bool wake_all);
#ifdef CONFIG_FS_DAX
struct page *read_dax_sector(struct block_device *bdev, sector_t n);
@@ -48,18 +68,28 @@ static inline int __dax_zero_page_range(struct block_device *bdev,
}
#endif
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
-int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
- unsigned int flags, get_block_t);
+#ifdef CONFIG_FS_DAX_PMD
+static inline unsigned int dax_radix_order(void *entry)
+{
+ if ((unsigned long)entry & RADIX_DAX_PMD)
+ return PMD_SHIFT - PAGE_SHIFT;
+ return 0;
+}
+int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmd, unsigned int flags, struct iomap_ops *ops);
#else
-static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, unsigned int flags, get_block_t gb)
+static inline unsigned int dax_radix_order(void *entry)
+{
+ return 0;
+}
+static inline int dax_iomap_pmd_fault(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmd, unsigned int flags,
+ struct iomap_ops *ops)
{
return VM_FAULT_FALLBACK;
}
#endif
int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
-#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
static inline bool vma_is_dax(struct vm_area_struct *vma)
{
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index bf1907d96097..014cc564d1c4 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -63,6 +63,21 @@ debugfs_real_fops(const struct file *filp)
return filp->f_path.dentry->d_fsdata;
}
+#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+static int __fops ## _open(struct inode *inode, struct file *file) \
+{ \
+ __simple_attr_check_format(__fmt, 0ull); \
+ return simple_attr_open(inode, file, __get, __set, __fmt); \
+} \
+static const struct file_operations __fops = { \
+ .owner = THIS_MODULE, \
+ .open = __fops ## _open, \
+ .release = simple_attr_release, \
+ .read = debugfs_attr_read, \
+ .write = debugfs_attr_write, \
+ .llseek = generic_file_llseek, \
+}
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_create_file(const char *name, umode_t mode,
@@ -100,21 +115,6 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
-#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
-static int __fops ## _open(struct inode *inode, struct file *file) \
-{ \
- __simple_attr_check_format(__fmt, 0ull); \
- return simple_attr_open(inode, file, __get, __set, __fmt); \
-} \
-static const struct file_operations __fops = { \
- .owner = THIS_MODULE, \
- .open = __fops ## _open, \
- .release = simple_attr_release, \
- .read = debugfs_attr_read, \
- .write = debugfs_attr_write, \
- .llseek = generic_file_llseek, \
-}
-
struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, const char *new_name);
@@ -234,8 +234,18 @@ static inline void debugfs_use_file_finish(int srcu_idx)
__releases(&debugfs_srcu)
{ }
-#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
- static const struct file_operations __fops = { 0 }
+static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
+static inline ssize_t debugfs_attr_write(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return -ENODEV;
+}
static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, char *new_name)
diff --git a/include/linux/device.h b/include/linux/device.h
index 94926d3ad6c6..491b4c0ca633 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -362,6 +362,7 @@ int subsys_virtual_register(struct bus_type *subsys,
* @name: Name of the class.
* @owner: The module owner.
* @class_attrs: Default attributes of this class.
+ * @class_groups: Default attributes of this class.
* @dev_groups: Default attributes of the devices that belong to the class.
* @dev_kobj: The kobject that represents this class and links it into the hierarchy.
* @dev_uevent: Called when a device is added, removed from this class, or a
@@ -390,6 +391,7 @@ struct class {
struct module *owner;
struct class_attribute *class_attrs;
+ const struct attribute_group **class_groups;
const struct attribute_group **dev_groups;
struct kobject *dev_kobj;
@@ -465,6 +467,8 @@ struct class_attribute {
struct class_attribute class_attr_##_name = __ATTR_RW(_name)
#define CLASS_ATTR_RO(_name) \
struct class_attribute class_attr_##_name = __ATTR_RO(_name)
+#define CLASS_ATTR_WO(_name) \
+ struct class_attribute class_attr_##_name = __ATTR_WO(_name)
extern int __must_check class_create_file_ns(struct class *class,
const struct class_attribute *attr,
@@ -727,6 +731,87 @@ struct device_dma_parameters {
};
/**
+ * enum device_link_state - Device link states.
+ * @DL_STATE_NONE: The presence of the drivers is not being tracked.
+ * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
+ * @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not.
+ * @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present).
+ * @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present.
+ * @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding.
+ */
+enum device_link_state {
+ DL_STATE_NONE = -1,
+ DL_STATE_DORMANT = 0,
+ DL_STATE_AVAILABLE,
+ DL_STATE_CONSUMER_PROBE,
+ DL_STATE_ACTIVE,
+ DL_STATE_SUPPLIER_UNBIND,
+};
+
+/*
+ * Device link flags.
+ *
+ * STATELESS: The core won't track the presence of supplier/consumer drivers.
+ * AUTOREMOVE: Remove this link automatically on consumer driver unbind.
+ * PM_RUNTIME: If set, the runtime PM framework will use this link.
+ * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
+ */
+#define DL_FLAG_STATELESS BIT(0)
+#define DL_FLAG_AUTOREMOVE BIT(1)
+#define DL_FLAG_PM_RUNTIME BIT(2)
+#define DL_FLAG_RPM_ACTIVE BIT(3)
+
+/**
+ * struct device_link - Device link representation.
+ * @supplier: The device on the supplier end of the link.
+ * @s_node: Hook to the supplier device's list of links to consumers.
+ * @consumer: The device on the consumer end of the link.
+ * @c_node: Hook to the consumer device's list of links to suppliers.
+ * @status: The state of the link (with respect to the presence of drivers).
+ * @flags: Link flags.
+ * @rpm_active: Whether or not the consumer device is runtime-PM-active.
+ * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
+ */
+struct device_link {
+ struct device *supplier;
+ struct list_head s_node;
+ struct device *consumer;
+ struct list_head c_node;
+ enum device_link_state status;
+ u32 flags;
+ bool rpm_active;
+#ifdef CONFIG_SRCU
+ struct rcu_head rcu_head;
+#endif
+};
+
+/**
+ * enum dl_dev_state - Device driver presence tracking information.
+ * @DL_DEV_NO_DRIVER: There is no driver attached to the device.
+ * @DL_DEV_PROBING: A driver is probing.
+ * @DL_DEV_DRIVER_BOUND: The driver has been bound to the device.
+ * @DL_DEV_UNBINDING: The driver is unbinding from the device.
+ */
+enum dl_dev_state {
+ DL_DEV_NO_DRIVER = 0,
+ DL_DEV_PROBING,
+ DL_DEV_DRIVER_BOUND,
+ DL_DEV_UNBINDING,
+};
+
+/**
+ * struct dev_links_info - Device data related to device links.
+ * @suppliers: List of links to supplier devices.
+ * @consumers: List of links to consumer devices.
+ * @status: Driver status information.
+ */
+struct dev_links_info {
+ struct list_head suppliers;
+ struct list_head consumers;
+ enum dl_dev_state status;
+};
+
+/**
* struct device - The basic device structure
* @parent: The device's "parent" device, the device to which it is attached.
* In most cases, a parent device is some sort of bus or host
@@ -751,6 +836,7 @@ struct device_dma_parameters {
* on. This shrinks the "Board Support Packages" (BSPs) and
* minimizes board-specific #ifdefs in drivers.
* @driver_data: Private pointer for driver specific info.
+ * @links: Links to suppliers and consumers of this device.
* @power: For device power management.
* See Documentation/power/admin-guide/devices.rst for details.
* @pm_domain: Provide callbacks that are executed during system suspend,
@@ -818,6 +904,7 @@ struct device {
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
dev_set/get_drvdata */
+ struct dev_links_info links;
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
@@ -1135,6 +1222,10 @@ extern void device_shutdown(void);
/* debugging and troubleshooting/diagnostic helpers. */
extern const char *dev_driver_string(const struct device *dev);
+/* Device links interface. */
+struct device_link *device_link_add(struct device *consumer,
+ struct device *supplier, u32 flags);
+void device_link_del(struct device_link *link);
#ifdef CONFIG_PRINTK
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 422630b8e588..cea41a124a80 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -52,10 +52,17 @@
#define VERSION_LEN 256
#define MAX_VOLUME_NAME 512
+#define MAX_PATH_LEN 64
+#define MAX_DEVICES 8
/*
* For superblock
*/
+struct f2fs_device {
+ __u8 path[MAX_PATH_LEN];
+ __le32 total_segments;
+} __packed;
+
struct f2fs_super_block {
__le32 magic; /* Magic Number */
__le16 major_ver; /* Major Version */
@@ -94,7 +101,8 @@ struct f2fs_super_block {
__le32 feature; /* defined features */
__u8 encryption_level; /* versioning level for encryption */
__u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
- __u8 reserved[871]; /* valid reserved region */
+ struct f2fs_device devs[MAX_DEVICES]; /* device list */
+ __u8 reserved[327]; /* valid reserved region */
} __packed;
/*
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
new file mode 100644
index 000000000000..dba6e3c697c7
--- /dev/null
+++ b/include/linux/fpga/fpga-bridge.h
@@ -0,0 +1,60 @@
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+
+#ifndef _LINUX_FPGA_BRIDGE_H
+#define _LINUX_FPGA_BRIDGE_H
+
+struct fpga_bridge;
+
+/**
+ * struct fpga_bridge_ops - ops for low level FPGA bridge drivers
+ * @enable_show: returns the FPGA bridge's status
+ * @enable_set: set a FPGA bridge as enabled or disabled
+ * @fpga_bridge_remove: set FPGA into a specific state during driver remove
+ */
+struct fpga_bridge_ops {
+ int (*enable_show)(struct fpga_bridge *bridge);
+ int (*enable_set)(struct fpga_bridge *bridge, bool enable);
+ void (*fpga_bridge_remove)(struct fpga_bridge *bridge);
+};
+
+/**
+ * struct fpga_bridge - FPGA bridge structure
+ * @name: name of low level FPGA bridge
+ * @dev: FPGA bridge device
+ * @mutex: enforces exclusive reference to bridge
+ * @br_ops: pointer to struct of FPGA bridge ops
+ * @info: fpga image specific information
+ * @node: FPGA bridge list node
+ * @priv: low level driver private date
+ */
+struct fpga_bridge {
+ const char *name;
+ struct device dev;
+ struct mutex mutex; /* for exclusive reference to bridge */
+ const struct fpga_bridge_ops *br_ops;
+ struct fpga_image_info *info;
+ struct list_head node;
+ void *priv;
+};
+
+#define to_fpga_bridge(d) container_of(d, struct fpga_bridge, dev)
+
+struct fpga_bridge *of_fpga_bridge_get(struct device_node *node,
+ struct fpga_image_info *info);
+void fpga_bridge_put(struct fpga_bridge *bridge);
+int fpga_bridge_enable(struct fpga_bridge *bridge);
+int fpga_bridge_disable(struct fpga_bridge *bridge);
+
+int fpga_bridges_enable(struct list_head *bridge_list);
+int fpga_bridges_disable(struct list_head *bridge_list);
+void fpga_bridges_put(struct list_head *bridge_list);
+int fpga_bridge_get_to_list(struct device_node *np,
+ struct fpga_image_info *info,
+ struct list_head *bridge_list);
+
+int fpga_bridge_register(struct device *dev, const char *name,
+ const struct fpga_bridge_ops *br_ops, void *priv);
+void fpga_bridge_unregister(struct device *dev);
+
+#endif /* _LINUX_FPGA_BRIDGE_H */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 0940bf45e2f2..16551d5eac36 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -65,11 +65,26 @@ enum fpga_mgr_states {
/*
* FPGA Manager flags
* FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
+ * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
*/
#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
+#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
+
+/**
+ * struct fpga_image_info - information specific to a FPGA image
+ * @flags: boolean flags as defined above
+ * @enable_timeout_us: maximum time to enable traffic through bridge (uSec)
+ * @disable_timeout_us: maximum time to disable traffic through bridge (uSec)
+ */
+struct fpga_image_info {
+ u32 flags;
+ u32 enable_timeout_us;
+ u32 disable_timeout_us;
+};
/**
* struct fpga_manager_ops - ops for low level fpga manager drivers
+ * @initial_header_size: Maximum number of bytes that should be passed into write_init
* @state: returns an enum value of the FPGA's state
* @write_init: prepare the FPGA to receive confuration data
* @write: write count bytes of configuration data to the FPGA
@@ -81,11 +96,14 @@ enum fpga_mgr_states {
* called, so leaving them out is fine.
*/
struct fpga_manager_ops {
+ size_t initial_header_size;
enum fpga_mgr_states (*state)(struct fpga_manager *mgr);
- int (*write_init)(struct fpga_manager *mgr, u32 flags,
+ int (*write_init)(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
const char *buf, size_t count);
int (*write)(struct fpga_manager *mgr, const char *buf, size_t count);
- int (*write_complete)(struct fpga_manager *mgr, u32 flags);
+ int (*write_complete)(struct fpga_manager *mgr,
+ struct fpga_image_info *info);
void (*fpga_remove)(struct fpga_manager *mgr);
};
@@ -109,14 +127,17 @@ struct fpga_manager {
#define to_fpga_manager(d) container_of(d, struct fpga_manager, dev)
-int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags,
+int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
const char *buf, size_t count);
-int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
+int fpga_mgr_firmware_load(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
const char *image_name);
struct fpga_manager *of_fpga_mgr_get(struct device_node *node);
+struct fpga_manager *fpga_mgr_get(struct device *dev);
+
void fpga_mgr_put(struct fpga_manager *mgr);
int fpga_mgr_register(struct device *dev, const char *name,
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index ff8b11b26f31..c074b670aa99 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -18,73 +18,9 @@
#include <crypto/skcipher.h>
#include <uapi/linux/fs.h>
-#define FS_KEY_DERIVATION_NONCE_SIZE 16
-#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1
-
-#define FS_POLICY_FLAGS_PAD_4 0x00
-#define FS_POLICY_FLAGS_PAD_8 0x01
-#define FS_POLICY_FLAGS_PAD_16 0x02
-#define FS_POLICY_FLAGS_PAD_32 0x03
-#define FS_POLICY_FLAGS_PAD_MASK 0x03
-#define FS_POLICY_FLAGS_VALID 0x03
-
-/* Encryption algorithms */
-#define FS_ENCRYPTION_MODE_INVALID 0
-#define FS_ENCRYPTION_MODE_AES_256_XTS 1
-#define FS_ENCRYPTION_MODE_AES_256_GCM 2
-#define FS_ENCRYPTION_MODE_AES_256_CBC 3
-#define FS_ENCRYPTION_MODE_AES_256_CTS 4
-
-/**
- * Encryption context for inode
- *
- * Protector format:
- * 1 byte: Protector format (1 = this version)
- * 1 byte: File contents encryption mode
- * 1 byte: File names encryption mode
- * 1 byte: Flags
- * 8 bytes: Master Key descriptor
- * 16 bytes: Encryption Key derivation nonce
- */
-struct fscrypt_context {
- u8 format;
- u8 contents_encryption_mode;
- u8 filenames_encryption_mode;
- u8 flags;
- u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
- u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
-} __packed;
-
-/* Encryption parameters */
-#define FS_XTS_TWEAK_SIZE 16
-#define FS_AES_128_ECB_KEY_SIZE 16
-#define FS_AES_256_GCM_KEY_SIZE 32
-#define FS_AES_256_CBC_KEY_SIZE 32
-#define FS_AES_256_CTS_KEY_SIZE 32
-#define FS_AES_256_XTS_KEY_SIZE 64
-#define FS_MAX_KEY_SIZE 64
-
-#define FS_KEY_DESC_PREFIX "fscrypt:"
-#define FS_KEY_DESC_PREFIX_SIZE 8
-
-/* This is passed in from userspace into the kernel keyring */
-struct fscrypt_key {
- u32 mode;
- u8 raw[FS_MAX_KEY_SIZE];
- u32 size;
-} __packed;
-
-struct fscrypt_info {
- u8 ci_data_mode;
- u8 ci_filename_mode;
- u8 ci_flags;
- struct crypto_skcipher *ci_ctfm;
- struct key *ci_keyring_key;
- u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
-};
+#define FS_CRYPTO_BLOCK_SIZE 16
-#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
-#define FS_WRITE_PATH_FL 0x00000002
+struct fscrypt_info;
struct fscrypt_ctx {
union {
@@ -102,19 +38,6 @@ struct fscrypt_ctx {
u8 mode; /* Encryption mode for tfm */
};
-struct fscrypt_completion_result {
- struct completion completion;
- int res;
-};
-
-#define DECLARE_FS_COMPLETION_RESULT(ecr) \
- struct fscrypt_completion_result ecr = { \
- COMPLETION_INITIALIZER((ecr).completion), 0 }
-
-#define FS_FNAME_NUM_SCATTER_ENTRIES 4
-#define FS_CRYPTO_BLOCK_SIZE 16
-#define FS_FNAME_CRYPTO_DIGEST_SIZE 32
-
/**
* For encrypted symlinks, the ciphertext length is stored at the beginning
* of the string in little-endian format.
@@ -154,9 +77,15 @@ struct fscrypt_name {
#define fname_len(p) ((p)->disk_name.len)
/*
+ * fscrypt superblock flags
+ */
+#define FS_CFLG_OWN_PAGES (1U << 1)
+
+/*
* crypto opertions for filesystems
*/
struct fscrypt_operations {
+ unsigned int flags;
int (*get_context)(struct inode *, void *, size_t);
int (*key_prefix)(struct inode *, u8 **);
int (*prepare_context)(struct inode *);
@@ -206,7 +135,7 @@ static inline struct page *fscrypt_control_page(struct page *page)
#endif
}
-static inline int fscrypt_has_encryption_key(struct inode *inode)
+static inline int fscrypt_has_encryption_key(const struct inode *inode)
{
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
return (inode->i_crypt_info != NULL);
@@ -238,25 +167,25 @@ static inline void fscrypt_set_d_op(struct dentry *dentry)
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
/* crypto.c */
extern struct kmem_cache *fscrypt_info_cachep;
-int fscrypt_initialize(void);
-
-extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t);
+extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
-extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t);
-extern int fscrypt_decrypt_page(struct page *);
+extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
+ unsigned int, unsigned int,
+ u64, gfp_t);
+extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
+ unsigned int, u64);
extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
extern void fscrypt_pullback_bio_page(struct page **, bool);
extern void fscrypt_restore_control_page(struct page *);
-extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
+extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
/* policy.c */
-extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *);
-extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
+extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
+extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
extern int fscrypt_inherit_context(struct inode *, struct inode *,
void *, bool);
/* keyinfo.c */
-extern int get_crypt_info(struct inode *);
extern int fscrypt_get_encryption_info(struct inode *);
extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
@@ -264,8 +193,8 @@ extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
int lookup, struct fscrypt_name *);
extern void fscrypt_free_filename(struct fscrypt_name *);
-extern u32 fscrypt_fname_encrypted_size(struct inode *, u32);
-extern int fscrypt_fname_alloc_buffer(struct inode *, u32,
+extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32);
+extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
struct fscrypt_str *);
extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
@@ -275,7 +204,7 @@ extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
#endif
/* crypto.c */
-static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i,
+static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(const struct inode *i,
gfp_t f)
{
return ERR_PTR(-EOPNOTSUPP);
@@ -286,13 +215,18 @@ static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c)
return;
}
-static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i,
- struct page *p, gfp_t f)
+static inline struct page *fscrypt_notsupp_encrypt_page(const struct inode *i,
+ struct page *p,
+ unsigned int len,
+ unsigned int offs,
+ u64 lblk_num, gfp_t f)
{
return ERR_PTR(-EOPNOTSUPP);
}
-static inline int fscrypt_notsupp_decrypt_page(struct page *p)
+static inline int fscrypt_notsupp_decrypt_page(const struct inode *i, struct page *p,
+ unsigned int len, unsigned int offs,
+ u64 lblk_num)
{
return -EOPNOTSUPP;
}
@@ -313,21 +247,21 @@ static inline void fscrypt_notsupp_restore_control_page(struct page *p)
return;
}
-static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
+static inline int fscrypt_notsupp_zeroout_range(const struct inode *i, pgoff_t p,
sector_t s, unsigned int f)
{
return -EOPNOTSUPP;
}
/* policy.c */
-static inline int fscrypt_notsupp_process_policy(struct file *f,
- const struct fscrypt_policy *p)
+static inline int fscrypt_notsupp_ioctl_set_policy(struct file *f,
+ const void __user *arg)
{
return -EOPNOTSUPP;
}
-static inline int fscrypt_notsupp_get_policy(struct inode *i,
- struct fscrypt_policy *p)
+static inline int fscrypt_notsupp_ioctl_get_policy(struct file *f,
+ void __user *arg)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index f2912914141a..60cef8227534 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -100,6 +100,7 @@ struct fsl_usb2_platform_data {
unsigned already_suspended:1;
unsigned has_fsl_erratum_a007792:1;
unsigned has_fsl_erratum_a005275:1;
+ unsigned has_fsl_erratum_a005697:1;
unsigned check_phy_clk_valid:1;
/* register save area for suspend/resume */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index b2ec82712baa..28f38e2b8f30 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -231,7 +231,11 @@ struct hid_item {
#define HID_DG_TAP 0x000d0035
#define HID_DG_TABLETFUNCTIONKEY 0x000d0039
#define HID_DG_PROGRAMCHANGEKEY 0x000d003a
+#define HID_DG_BATTERYSTRENGTH 0x000d003b
#define HID_DG_INVERT 0x000d003c
+#define HID_DG_TILT_X 0x000d003d
+#define HID_DG_TILT_Y 0x000d003e
+#define HID_DG_TWIST 0x000d0041
#define HID_DG_TIPSWITCH 0x000d0042
#define HID_DG_TIPSWITCH2 0x000d0043
#define HID_DG_BARRELSWITCH 0x000d0044
@@ -479,6 +483,7 @@ struct hid_input {
struct list_head list;
struct hid_report *report;
struct input_dev *input;
+ bool registered;
};
enum hid_type {
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 9d2f8bde7d12..78d59dba563e 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -298,8 +298,8 @@ enum hwmon_pwm_attributes {
* Channel number
* The function returns the file permissions.
* If the return value is 0, no attribute will be created.
- * @read: Read callback. Optional. If not provided, attributes
- * will not be readable.
+ * @read: Read callback for data attributes. Mandatory if readable
+ * data attributes are present.
* Parameters are:
* @dev: Pointer to hardware monitoring device
* @type: Sensor type
@@ -308,8 +308,19 @@ enum hwmon_pwm_attributes {
* Channel number
* @val: Pointer to returned value
* The function returns 0 on success or a negative error number.
- * @write: Write callback. Optional. If not provided, attributes
- * will not be writable.
+ * @read_string:
+ * Read callback for string attributes. Mandatory if string
+ * attributes are present.
+ * Parameters are:
+ * @dev: Pointer to hardware monitoring device
+ * @type: Sensor type
+ * @attr: Sensor attribute
+ * @channel:
+ * Channel number
+ * @str: Pointer to returned string
+ * The function returns 0 on success or a negative error number.
+ * @write: Write callback for data attributes. Mandatory if writeable
+ * data attributes are present.
* Parameters are:
* @dev: Pointer to hardware monitoring device
* @type: Sensor type
@@ -324,6 +335,8 @@ struct hwmon_ops {
u32 attr, int channel);
int (*read)(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val);
+ int (*read_string)(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, char **str);
int (*write)(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val);
};
@@ -349,7 +362,9 @@ struct hwmon_chip_info {
const struct hwmon_channel_info **info;
};
+/* hwmon_device_register() is deprecated */
struct device *hwmon_device_register(struct device *dev);
+
struct device *
hwmon_device_register_with_groups(struct device *dev, const char *name,
void *drvdata,
@@ -362,12 +377,12 @@ struct device *
hwmon_device_register_with_info(struct device *dev,
const char *name, void *drvdata,
const struct hwmon_chip_info *info,
- const struct attribute_group **groups);
+ const struct attribute_group **extra_groups);
struct device *
devm_hwmon_device_register_with_info(struct device *dev,
- const char *name, void *drvdata,
- const struct hwmon_chip_info *info,
- const struct attribute_group **groups);
+ const char *name, void *drvdata,
+ const struct hwmon_chip_info *info,
+ const struct attribute_group **extra_groups);
void hwmon_device_unregister(struct device *dev);
void devm_hwmon_device_unregister(struct device *dev);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index cd184bdca58f..42fe43fb0c80 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -696,7 +696,7 @@ enum vmbus_device_type {
HV_FCOPY,
HV_BACKUP,
HV_DM,
- HV_UNKOWN,
+ HV_UNKNOWN,
};
struct vmbus_device {
@@ -1119,6 +1119,12 @@ struct hv_driver {
struct device_driver driver;
+ /* dynamic device GUID's */
+ struct {
+ spinlock_t lock;
+ struct list_head list;
+ } dynids;
+
int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
int (*remove)(struct hv_device *);
void (*shutdown)(struct hv_device *);
@@ -1447,6 +1453,7 @@ void hv_event_tasklet_enable(struct vmbus_channel *channel);
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
+void vmbus_setevent(struct vmbus_channel *channel);
/*
* Negotiated version with the Host.
*/
@@ -1479,10 +1486,11 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
* there is room for the producer to send the pending packet.
*/
-static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
+static inline void hv_signal_on_read(struct vmbus_channel *channel)
{
u32 cur_write_sz;
u32 pending_sz;
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
/*
* Issue a full memory barrier before making the signaling decision.
@@ -1500,14 +1508,14 @@ static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
/* If the other end is not blocked on write don't bother. */
if (pending_sz == 0)
- return false;
+ return;
cur_write_sz = hv_get_bytes_to_write(rbi);
if (cur_write_sz >= pending_sz)
- return true;
+ vmbus_setevent(channel);
- return false;
+ return;
}
/*
@@ -1519,31 +1527,23 @@ static inline struct vmpacket_descriptor *
get_next_pkt_raw(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *ring_info = &channel->inbound;
- u32 read_loc = ring_info->priv_read_index;
+ u32 priv_read_loc = ring_info->priv_read_index;
void *ring_buffer = hv_get_ring_buffer(ring_info);
- struct vmpacket_descriptor *cur_desc;
- u32 packetlen;
u32 dsize = ring_info->ring_datasize;
- u32 delta = read_loc - ring_info->ring_buffer->read_index;
+ /*
+ * delta is the difference between what is available to read and
+ * what was already consumed in place. We commit read index after
+ * the whole batch is processed.
+ */
+ u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ?
+ priv_read_loc - ring_info->ring_buffer->read_index :
+ (dsize - ring_info->ring_buffer->read_index) + priv_read_loc;
u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
return NULL;
- if ((read_loc + sizeof(*cur_desc)) > dsize)
- return NULL;
-
- cur_desc = ring_buffer + read_loc;
- packetlen = cur_desc->len8 << 3;
-
- /*
- * If the packet under consideration is wrapping around,
- * return failure.
- */
- if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1))
- return NULL;
-
- return cur_desc;
+ return ring_buffer + priv_read_loc;
}
/*
@@ -1555,16 +1555,14 @@ static inline void put_pkt_raw(struct vmbus_channel *channel,
struct vmpacket_descriptor *desc)
{
struct hv_ring_buffer_info *ring_info = &channel->inbound;
- u32 read_loc = ring_info->priv_read_index;
u32 packetlen = desc->len8 << 3;
u32 dsize = ring_info->ring_datasize;
- if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize)
- BUG();
/*
* Include the packet trailer.
*/
ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
+ ring_info->priv_read_index %= dsize;
}
/*
@@ -1589,8 +1587,7 @@ static inline void commit_rd_index(struct vmbus_channel *channel)
virt_rmb();
ring_info->ring_buffer->read_index = ring_info->priv_read_index;
- if (hv_need_to_signal_on_read(ring_info))
- vmbus_set_event(channel);
+ hv_signal_on_read(channel);
}
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index e7fdec4db9da..5ba430cc9a87 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -136,6 +136,7 @@ int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_OFFSET), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
.scan_index = (_si), \
.scan_type = { \
.sign = 'u', \
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 9edccfba1ffb..47eeec3218b5 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -226,6 +226,34 @@ int iio_read_channel_processed(struct iio_channel *chan, int *val);
int iio_write_channel_raw(struct iio_channel *chan, int val);
/**
+ * iio_read_max_channel_raw() - read maximum available raw value from a given
+ * channel, i.e. the maximum possible value.
+ * @chan: The channel being queried.
+ * @val: Value read back.
+ *
+ * Note raw reads from iio channels are in adc counts and hence
+ * scale will need to be applied if standard units are required.
+ */
+int iio_read_max_channel_raw(struct iio_channel *chan, int *val);
+
+/**
+ * iio_read_avail_channel_raw() - read available raw values from a given channel
+ * @chan: The channel being queried.
+ * @vals: Available values read back.
+ * @length: Number of entries in vals.
+ *
+ * Returns an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST.
+ *
+ * For ranges, three vals are always returned; min, step and max.
+ * For lists, all the possible values are enumerated.
+ *
+ * Note raw available values from iio channels are in adc counts and
+ * hence scale will need to be applied if standard units are required.
+ */
+int iio_read_avail_channel_raw(struct iio_channel *chan,
+ const int **vals, int *length);
+
+/**
* iio_get_channel_type() - get the type of a channel
* @channel: The channel being queried.
* @type: The type of the channel.
@@ -236,6 +264,19 @@ int iio_get_channel_type(struct iio_channel *channel,
enum iio_chan_type *type);
/**
+ * iio_read_channel_offset() - read the offset value for a channel
+ * @chan: The channel being queried.
+ * @val: First part of value read back.
+ * @val2: Second part of value read back.
+ *
+ * Note returns a description of what is in val and val2, such
+ * as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val
+ * + val2/1e6
+ */
+int iio_read_channel_offset(struct iio_channel *chan, int *val,
+ int *val2);
+
+/**
* iio_read_channel_scale() - read the scale value for a channel
* @chan: The channel being queried.
* @val: First part of value read back.
diff --git a/include/linux/iio/dac/mcp4725.h b/include/linux/iio/dac/mcp4725.h
index 91530e6611e9..628b2cf54c50 100644
--- a/include/linux/iio/dac/mcp4725.h
+++ b/include/linux/iio/dac/mcp4725.h
@@ -9,8 +9,18 @@
#ifndef IIO_DAC_MCP4725_H_
#define IIO_DAC_MCP4725_H_
+/**
+ * struct mcp4725_platform_data - MCP4725/6 DAC specific data.
+ * @use_vref: Whether an external reference voltage on Vref pin should be used.
+ * Additional vref-supply must be specified when used.
+ * @vref_buffered: Controls buffering of the external reference voltage.
+ *
+ * Vref related settings are available only on MCP4756. See
+ * Documentation/devicetree/bindings/iio/dac/mcp4725.txt for more information.
+ */
struct mcp4725_platform_data {
- u16 vref_mv;
+ bool use_vref;
+ bool vref_buffered;
};
#endif /* IIO_DAC_MCP4725_H_ */
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index b4a0679e4a49..3f5ea2e9a39e 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -225,12 +225,22 @@ struct iio_event_spec {
* endianness: little or big endian
* @info_mask_separate: What information is to be exported that is specific to
* this channel.
+ * @info_mask_separate_available: What availability information is to be
+ * exported that is specific to this channel.
* @info_mask_shared_by_type: What information is to be exported that is shared
* by all channels of the same type.
+ * @info_mask_shared_by_type_available: What availability information is to be
+ * exported that is shared by all channels of the same
+ * type.
* @info_mask_shared_by_dir: What information is to be exported that is shared
* by all channels of the same direction.
+ * @info_mask_shared_by_dir_available: What availability information is to be
+ * exported that is shared by all channels of the same
+ * direction.
* @info_mask_shared_by_all: What information is to be exported that is shared
* by all channels.
+ * @info_mask_shared_by_all_available: What availability information is to be
+ * exported that is shared by all channels.
* @event_spec: Array of events which should be registered for this
* channel.
* @num_event_specs: Size of the event_spec array.
@@ -269,9 +279,13 @@ struct iio_chan_spec {
enum iio_endian endianness;
} scan_type;
long info_mask_separate;
+ long info_mask_separate_available;
long info_mask_shared_by_type;
+ long info_mask_shared_by_type_available;
long info_mask_shared_by_dir;
+ long info_mask_shared_by_dir_available;
long info_mask_shared_by_all;
+ long info_mask_shared_by_all_available;
const struct iio_event_spec *event_spec;
unsigned int num_event_specs;
const struct iio_chan_spec_ext_info *ext_info;
@@ -301,6 +315,23 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
(chan->info_mask_shared_by_all & BIT(type));
}
+/**
+ * iio_channel_has_available() - Checks if a channel has an available attribute
+ * @chan: The channel to be queried
+ * @type: Type of the available attribute to be checked
+ *
+ * Returns true if the channel supports reporting available values for the
+ * given attribute type, false otherwise.
+ */
+static inline bool iio_channel_has_available(const struct iio_chan_spec *chan,
+ enum iio_chan_info_enum type)
+{
+ return (chan->info_mask_separate_available & BIT(type)) |
+ (chan->info_mask_shared_by_type_available & BIT(type)) |
+ (chan->info_mask_shared_by_dir_available & BIT(type)) |
+ (chan->info_mask_shared_by_all_available & BIT(type));
+}
+
#define IIO_CHAN_SOFT_TIMESTAMP(_si) { \
.type = IIO_TIMESTAMP, \
.channel = -1, \
@@ -349,6 +380,14 @@ struct iio_dev;
* max_len specifies maximum number of elements
* vals pointer can contain. val_len is used to return
* length of valid elements in vals.
+ * @read_avail: function to return the available values from the device.
+ * mask specifies which value. Note 0 means the available
+ * values for the channel in question. Return value
+ * specifies if a IIO_AVAIL_LIST or a IIO_AVAIL_RANGE is
+ * returned in vals. The type of the vals are returned in
+ * type and the number of vals is returned in length. For
+ * ranges, there are always three vals returned; min, step
+ * and max. For lists, all possible values are enumerated.
* @write_raw: function to write a value to the device.
* Parameters are the same as for read_raw.
* @write_raw_get_fmt: callback function to query the expected
@@ -381,7 +420,7 @@ struct iio_dev;
**/
struct iio_info {
struct module *driver_module;
- struct attribute_group *event_attrs;
+ const struct attribute_group *event_attrs;
const struct attribute_group *attrs;
int (*read_raw)(struct iio_dev *indio_dev,
@@ -397,6 +436,13 @@ struct iio_info {
int *val_len,
long mask);
+ int (*read_avail)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals,
+ int *type,
+ int *length,
+ long mask);
+
int (*write_raw)(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
index 9cd8f747212f..ce9426c507fd 100644
--- a/include/linux/iio/sysfs.h
+++ b/include/linux/iio/sysfs.h
@@ -55,10 +55,34 @@ struct iio_const_attr {
{ .dev_attr = __ATTR(_name, _mode, _show, _store), \
.address = _addr }
+#define IIO_ATTR_RO(_name, _addr) \
+ { .dev_attr = __ATTR_RO(_name), \
+ .address = _addr }
+
+#define IIO_ATTR_WO(_name, _addr) \
+ { .dev_attr = __ATTR_WO(_name), \
+ .address = _addr }
+
+#define IIO_ATTR_RW(_name, _addr) \
+ { .dev_attr = __ATTR_RW(_name), \
+ .address = _addr }
+
#define IIO_DEVICE_ATTR(_name, _mode, _show, _store, _addr) \
struct iio_dev_attr iio_dev_attr_##_name \
= IIO_ATTR(_name, _mode, _show, _store, _addr)
+#define IIO_DEVICE_ATTR_RO(_name, _addr) \
+ struct iio_dev_attr iio_dev_attr_##_name \
+ = IIO_ATTR_RO(_name, _addr)
+
+#define IIO_DEVICE_ATTR_WO(_name, _addr) \
+ struct iio_dev_attr iio_dev_attr_##_name \
+ = IIO_ATTR_WO(_name, _addr)
+
+#define IIO_DEVICE_ATTR_RW(_name, _addr) \
+ struct iio_dev_attr iio_dev_attr_##_name \
+ = IIO_ATTR_RW(_name, _addr)
+
#define IIO_DEVICE_ATTR_NAMED(_vname, _name, _mode, _show, _store, _addr) \
struct iio_dev_attr iio_dev_attr_##_vname \
= IIO_ATTR(_name, _mode, _show, _store, _addr)
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 4f1154f7a33c..ea08302f2d7b 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -170,6 +170,8 @@ void iio_trigger_free(struct iio_trigger *trig);
*/
bool iio_trigger_using_own(struct iio_dev *indio_dev);
+int iio_trigger_validate_own_device(struct iio_trigger *trig,
+ struct iio_dev *indio_dev);
#else
struct iio_trigger;
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 32b579525004..2aa7b6384d64 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -29,4 +29,9 @@ enum iio_event_info {
#define IIO_VAL_FRACTIONAL 10
#define IIO_VAL_FRACTIONAL_LOG2 11
+enum iio_available_type {
+ IIO_AVAIL_LIST,
+ IIO_AVAIL_RANGE,
+};
+
#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 7892f55a1866..f185156de74d 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -49,6 +49,7 @@ struct iomap {
#define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */
#define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */
#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
+#define IOMAP_FAULT (1 << 3) /* mapping for page fault */
struct iomap_ops {
/*
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 5118d3a0c9ca..e808f8ae6f14 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -295,10 +295,10 @@
#define GITS_BASER_InnerShareable \
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
#define GITS_BASER_PAGE_SIZE_SHIFT (8)
-#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_4K (0ULL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_16K (1ULL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_64K (2ULL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_MASK (3ULL << GITS_BASER_PAGE_SIZE_SHIFT)
#define GITS_BASER_PAGES_MAX 256
#define GITS_BASER_PAGES_SHIFT (0)
#define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 01b6b460c34d..d234cd31e75a 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -45,6 +45,7 @@
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
+/* @a is a power of 2 value */
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 81ba3ba641ba..1c5190dab2c1 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -438,6 +438,9 @@ struct kvm {
pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
#define kvm_debug(fmt, ...) \
pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
+#define kvm_debug_ratelimited(fmt, ...) \
+ pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
+ ## __VA_ARGS__)
#define kvm_pr_unimpl(fmt, ...) \
pr_err_ratelimited("kvm [%i]: " fmt, \
task_tgid_nr(current), ## __VA_ARGS__)
@@ -449,6 +452,9 @@ struct kvm {
#define vcpu_debug(vcpu, fmt, ...) \
kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+#define vcpu_debug_ratelimited(vcpu, fmt, ...) \
+ kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \
+ ## __VA_ARGS__)
#define vcpu_err(vcpu, fmt, ...) \
kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
@@ -1108,6 +1114,10 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
extern bool kvm_rebooting;
+extern unsigned int halt_poll_ns;
+extern unsigned int halt_poll_ns_grow;
+extern unsigned int halt_poll_ns_shrink;
+
struct kvm_device {
struct kvm_device_ops *ops;
struct kvm *kvm;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 616eef4d81ea..c170be548b7f 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -166,6 +166,8 @@ enum {
ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */
ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */
+ ATA_DFLAG_NCQ_PRIO = (1 << 20), /* device supports NCQ priority */
+ ATA_DFLAG_NCQ_PRIO_ENABLE = (1 << 21), /* Priority cmds sent to dev */
ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
ATA_DFLAG_DETACH = (1 << 24),
@@ -342,7 +344,9 @@ enum {
ATA_SHIFT_PIO = 0,
ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_NR_PIO_MODES,
ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_NR_MWDMA_MODES,
+ ATA_SHIFT_PRIO = 6,
+ ATA_PRIO_HIGH = 2,
/* size of buffer to pad xfers ending on unaligned boundaries */
ATA_DMA_PAD_SZ = 4,
@@ -542,6 +546,7 @@ typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes)
extern struct device_attribute dev_attr_link_power_management_policy;
extern struct device_attribute dev_attr_unload_heads;
+extern struct device_attribute dev_attr_ncq_prio_enable;
extern struct device_attribute dev_attr_em_message_type;
extern struct device_attribute dev_attr_em_message;
extern struct device_attribute dev_attr_sw_activity;
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index e746919530f5..a0d274fe08f1 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -8,8 +8,7 @@
struct mei_cl_device;
struct mei_device;
-typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev,
- u32 events, void *context);
+typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev);
/**
* struct mei_cl_device - MEI device handle
@@ -24,12 +23,12 @@ typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev,
* @me_cl: me client
* @cl: mei client
* @name: device name
- * @event_work: async work to execute event callback
- * @event_cb: Drivers register this callback to get asynchronous ME
- * events (e.g. Rx buffer pending) notifications.
- * @event_context: event callback run context
- * @events_mask: Events bit mask requested by driver.
- * @events: Events bitmask sent to the driver.
+ * @rx_work: async work to execute Rx event callback
+ * @rx_cb: Drivers register this callback to get asynchronous ME
+ * Rx buffer pending notifications.
+ * @notif_work: async work to execute FW notif event callback
+ * @notif_cb: Drivers register this callback to get asynchronous ME
+ * FW notification pending notifications.
*
* @do_match: wheather device can be matched with a driver
* @is_added: device is already scanned
@@ -44,11 +43,10 @@ struct mei_cl_device {
struct mei_cl *cl;
char name[MEI_CL_NAME_SIZE];
- struct work_struct event_work;
- mei_cldev_event_cb_t event_cb;
- void *event_context;
- unsigned long events_mask;
- unsigned long events;
+ struct work_struct rx_work;
+ mei_cldev_cb_t rx_cb;
+ struct work_struct notif_work;
+ mei_cldev_cb_t notif_cb;
unsigned int do_match:1;
unsigned int is_added:1;
@@ -74,16 +72,27 @@ int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv);
-ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length);
-ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
+/**
+ * module_mei_cl_driver - Helper macro for registering mei cl driver
+ *
+ * @__mei_cldrv: mei_cl_driver structure
+ *
+ * Helper macro for mei cl drivers which do not do anything special in module
+ * init/exit, for eliminating a boilerplate code.
+ */
+#define module_mei_cl_driver(__mei_cldrv) \
+ module_driver(__mei_cldrv, \
+ mei_cldev_driver_register,\
+ mei_cldev_driver_unregister)
-int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
- unsigned long event_mask,
- mei_cldev_event_cb_t read_cb, void *context);
+ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length);
+ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
+ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
+ size_t length);
-#define MEI_CL_EVENT_RX 0
-#define MEI_CL_EVENT_TX 1
-#define MEI_CL_EVENT_NOTIF 2
+int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb);
+int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
+ mei_cldev_cb_t notif_cb);
const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev);
u8 mei_cldev_ver(const struct mei_cl_device *cldev);
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 76f7ef4d3a0d..f62043a75f43 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -148,6 +148,15 @@ struct cros_ec_device {
int event_size;
};
+/**
+ * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information
+ *
+ * @sensor_num: Id of the sensor, as reported by the EC.
+ */
+struct cros_ec_sensor_platform {
+ u8 sensor_num;
+};
+
/* struct cros_ec_platform - ChromeOS EC platform information
*
* @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
@@ -175,6 +184,7 @@ struct cros_ec_dev {
struct cros_ec_device *ec_dev;
struct device *dev;
u16 cmd_offset;
+ u32 features[2];
};
/**
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 76728ff37d01..1683003603f3 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -713,6 +713,90 @@ struct ec_response_get_set_value {
/* More than one command can use these structs to get/set paramters. */
#define EC_CMD_GSV_PAUSE_IN_S5 0x0c
+/*****************************************************************************/
+/* List the features supported by the firmware */
+#define EC_CMD_GET_FEATURES 0x0d
+
+/* Supported features */
+enum ec_feature_code {
+ /*
+ * This image contains a limited set of features. Another image
+ * in RW partition may support more features.
+ */
+ EC_FEATURE_LIMITED = 0,
+ /*
+ * Commands for probing/reading/writing/erasing the flash in the
+ * EC are present.
+ */
+ EC_FEATURE_FLASH = 1,
+ /*
+ * Can control the fan speed directly.
+ */
+ EC_FEATURE_PWM_FAN = 2,
+ /*
+ * Can control the intensity of the keyboard backlight.
+ */
+ EC_FEATURE_PWM_KEYB = 3,
+ /*
+ * Support Google lightbar, introduced on Pixel.
+ */
+ EC_FEATURE_LIGHTBAR = 4,
+ /* Control of LEDs */
+ EC_FEATURE_LED = 5,
+ /* Exposes an interface to control gyro and sensors.
+ * The host goes through the EC to access these sensors.
+ * In addition, the EC may provide composite sensors, like lid angle.
+ */
+ EC_FEATURE_MOTION_SENSE = 6,
+ /* The keyboard is controlled by the EC */
+ EC_FEATURE_KEYB = 7,
+ /* The AP can use part of the EC flash as persistent storage. */
+ EC_FEATURE_PSTORE = 8,
+ /* The EC monitors BIOS port 80h, and can return POST codes. */
+ EC_FEATURE_PORT80 = 9,
+ /*
+ * Thermal management: include TMP specific commands.
+ * Higher level than direct fan control.
+ */
+ EC_FEATURE_THERMAL = 10,
+ /* Can switch the screen backlight on/off */
+ EC_FEATURE_BKLIGHT_SWITCH = 11,
+ /* Can switch the wifi module on/off */
+ EC_FEATURE_WIFI_SWITCH = 12,
+ /* Monitor host events, through for example SMI or SCI */
+ EC_FEATURE_HOST_EVENTS = 13,
+ /* The EC exposes GPIO commands to control/monitor connected devices. */
+ EC_FEATURE_GPIO = 14,
+ /* The EC can send i2c messages to downstream devices. */
+ EC_FEATURE_I2C = 15,
+ /* Command to control charger are included */
+ EC_FEATURE_CHARGER = 16,
+ /* Simple battery support. */
+ EC_FEATURE_BATTERY = 17,
+ /*
+ * Support Smart battery protocol
+ * (Common Smart Battery System Interface Specification)
+ */
+ EC_FEATURE_SMART_BATTERY = 18,
+ /* EC can dectect when the host hangs. */
+ EC_FEATURE_HANG_DETECT = 19,
+ /* Report power information, for pit only */
+ EC_FEATURE_PMU = 20,
+ /* Another Cros EC device is present downstream of this one */
+ EC_FEATURE_SUB_MCU = 21,
+ /* Support USB Power delivery (PD) commands */
+ EC_FEATURE_USB_PD = 22,
+ /* Control USB multiplexer, for audio through USB port for instance. */
+ EC_FEATURE_USB_MUX = 23,
+ /* Motion Sensor code has an internal software FIFO */
+ EC_FEATURE_MOTION_SENSE_FIFO = 24,
+};
+
+#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32))
+#define EC_FEATURE_MASK_1(event_code) (1UL << (event_code - 32))
+struct ec_response_get_features {
+ uint32_t flags[2];
+} __packed;
/*****************************************************************************/
/* Flash commands */
@@ -1315,6 +1399,24 @@ enum motionsense_command {
*/
MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5,
+ /*
+ * Returns a single sensor data.
+ */
+ MOTIONSENSE_CMD_DATA = 6,
+
+ /*
+ * Perform low level calibration.. On sensors that support it, ask to
+ * do offset calibration.
+ */
+ MOTIONSENSE_CMD_PERFORM_CALIB = 10,
+
+ /*
+ * Sensor Offset command is a setter/getter command for the offset used
+ * for calibration. The offsets can be calculated by the host, or via
+ * PERFORM_CALIB command.
+ */
+ MOTIONSENSE_CMD_SENSOR_OFFSET = 11,
+
/* Number of motionsense sub-commands. */
MOTIONSENSE_NUM_CMDS
};
@@ -1335,12 +1437,18 @@ enum motionsensor_id {
enum motionsensor_type {
MOTIONSENSE_TYPE_ACCEL = 0,
MOTIONSENSE_TYPE_GYRO = 1,
+ MOTIONSENSE_TYPE_MAG = 2,
+ MOTIONSENSE_TYPE_PROX = 3,
+ MOTIONSENSE_TYPE_LIGHT = 4,
+ MOTIONSENSE_TYPE_ACTIVITY = 5,
+ MOTIONSENSE_TYPE_MAX
};
/* List of motion sensor locations. */
enum motionsensor_location {
MOTIONSENSE_LOC_BASE = 0,
MOTIONSENSE_LOC_LID = 1,
+ MOTIONSENSE_LOC_MAX,
};
/* List of motion sensor chips. */
@@ -1361,6 +1469,31 @@ enum motionsensor_chip {
*/
#define EC_MOTION_SENSE_NO_VALUE -1
+#define EC_MOTION_SENSE_INVALID_CALIB_TEMP 0x8000
+
+/* Set Calibration information */
+#define MOTION_SENSE_SET_OFFSET 1
+
+struct ec_response_motion_sensor_data {
+ /* Flags for each sensor. */
+ uint8_t flags;
+ /* Sensor number the data comes from */
+ uint8_t sensor_num;
+ /* Each sensor is up to 3-axis. */
+ union {
+ int16_t data[3];
+ struct {
+ uint16_t rsvd;
+ uint32_t timestamp;
+ } __packed;
+ struct {
+ uint8_t activity; /* motionsensor_activity */
+ uint8_t state;
+ int16_t add_info[2];
+ };
+ };
+} __packed;
+
struct ec_params_motion_sense {
uint8_t cmd;
union {
@@ -1378,9 +1511,37 @@ struct ec_params_motion_sense {
int16_t data;
} ec_rate, kb_wake_angle;
+ /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */
+ struct {
+ uint8_t sensor_num;
+
+ /*
+ * bit 0: If set (MOTION_SENSE_SET_OFFSET), set
+ * the calibration information in the EC.
+ * If unset, just retrieve calibration information.
+ */
+ uint16_t flags;
+
+ /*
+ * Temperature at calibration, in units of 0.01 C
+ * 0x8000: invalid / unknown.
+ * 0x0: 0C
+ * 0x7fff: +327.67C
+ */
+ int16_t temp;
+
+ /*
+ * Offset for calibration.
+ * Unit:
+ * Accelerometer: 1/1024 g
+ * Gyro: 1/1024 deg/s
+ * Compass: 1/16 uT
+ */
+ int16_t offset[3];
+ } __packed sensor_offset;
+
/* Used for MOTIONSENSE_CMD_INFO. */
struct {
- /* Should be element of enum motionsensor_id. */
uint8_t sensor_num;
} info;
@@ -1410,11 +1571,14 @@ struct ec_response_motion_sense {
/* Flags representing the motion sensor module. */
uint8_t module_flags;
- /* Flags for each sensor in enum motionsensor_id. */
- uint8_t sensor_flags[EC_MOTION_SENSOR_COUNT];
+ /* Number of sensors managed directly by the EC. */
+ uint8_t sensor_count;
- /* Array of all sensor data. Each sensor is 3-axis. */
- int16_t data[3*EC_MOTION_SENSOR_COUNT];
+ /*
+ * Sensor data is truncated if response_max is too small
+ * for holding all the data.
+ */
+ struct ec_response_motion_sensor_data sensor[0];
} dump;
/* Used for MOTIONSENSE_CMD_INFO. */
@@ -1429,6 +1593,9 @@ struct ec_response_motion_sense {
uint8_t chip;
} info;
+ /* Used for MOTIONSENSE_CMD_DATA */
+ struct ec_response_motion_sensor_data data;
+
/*
* Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR,
* MOTIONSENSE_CMD_SENSOR_RANGE, and
@@ -1438,6 +1605,12 @@ struct ec_response_motion_sense {
/* Current value of the parameter queried. */
int32_t ret;
} ec_rate, sensor_odr, sensor_range, kb_wake_angle;
+
+ /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */
+ struct {
+ int16_t temp;
+ int16_t offset[3];
+ } sensor_offset, perform_calib;
};
} __packed;
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 7f55b8b41032..b9a53e013bff 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -23,6 +23,8 @@
#define REG_IRQENABLE 0x02C
#define REG_IRQCLR 0x030
#define REG_IRQWAKEUP 0x034
+#define REG_DMAENABLE_SET 0x038
+#define REG_DMAENABLE_CLEAR 0x03c
#define REG_CTRL 0x040
#define REG_ADCFSM 0x044
#define REG_CLKDIV 0x04C
@@ -36,6 +38,7 @@
#define REG_FIFO0THR 0xE8
#define REG_FIFO1CNT 0xF0
#define REG_FIFO1THR 0xF4
+#define REG_DMA1REQ 0xF8
#define REG_FIFO0 0x100
#define REG_FIFO1 0x200
@@ -126,6 +129,10 @@
#define FIFOREAD_DATA_MASK (0xfff << 0)
#define FIFOREAD_CHNLID_MASK (0xf << 16)
+/* DMA ENABLE/CLEAR Register */
+#define DMA_FIFO0 BIT(0)
+#define DMA_FIFO1 BIT(1)
+
/* Sequencer Status */
#define SEQ_STATUS BIT(5)
#define CHARGE_STEP 0x11
@@ -155,6 +162,7 @@ struct ti_tscadc_dev {
struct device *dev;
struct regmap *regmap;
void __iomem *tscadc_base;
+ phys_addr_t tscadc_phys_base;
int irq;
int used_cells; /* 1-2 */
int tsc_wires;
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 722698a43d79..a426cb55dc43 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -72,6 +72,13 @@ extern int misc_register(struct miscdevice *misc);
extern void misc_deregister(struct miscdevice *misc);
/*
+ * Helper macro for drivers that don't do anything special in the initcall.
+ * This helps in eleminating of boilerplate code.
+ */
+#define builtin_misc_device(__misc_device) \
+ builtin_driver(__misc_device, misc_register)
+
+/*
* Helper macro for drivers that don't do anything special in module init / exit
* call. This helps in eleminating of boilerplate code.
*/
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index 6f47562d477b..50a7dbe88cf3 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -896,7 +896,7 @@ static inline int ntb_spad_is_unsafe(struct ntb_dev *ntb)
}
/**
- * ntb_mw_count() - get the number of scratchpads
+ * ntb_spad_count() - get the number of scratchpads
* @ntb: NTB device context.
*
* Hardware and topology may support a different number of scratchpads.
diff --git a/include/linux/of.h b/include/linux/of.h
index 299aeb192727..d72f01009297 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -1266,6 +1266,18 @@ static inline bool of_device_is_system_power_controller(const struct device_node
* Overlay support
*/
+enum of_overlay_notify_action {
+ OF_OVERLAY_PRE_APPLY,
+ OF_OVERLAY_POST_APPLY,
+ OF_OVERLAY_PRE_REMOVE,
+ OF_OVERLAY_POST_REMOVE,
+};
+
+struct of_overlay_notify_data {
+ struct device_node *overlay;
+ struct device_node *target;
+};
+
#ifdef CONFIG_OF_OVERLAY
/* ID based overlays; the API for external users */
@@ -1273,6 +1285,9 @@ int of_overlay_create(struct device_node *tree);
int of_overlay_destroy(int id);
int of_overlay_destroy_all(void);
+int of_overlay_notifier_register(struct notifier_block *nb);
+int of_overlay_notifier_unregister(struct notifier_block *nb);
+
#else
static inline int of_overlay_create(struct device_node *tree)
@@ -1290,6 +1305,16 @@ static inline int of_overlay_destroy_all(void)
return -ENOTSUPP;
}
+static inline int of_overlay_notifier_register(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int of_overlay_notifier_unregister(struct notifier_block *nb)
+{
+ return 0;
+}
+
#endif
#endif /* _LINUX_OF_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index c58752fe16c4..a5e6c7bca610 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -23,8 +23,10 @@
#define PCI_CLASS_STORAGE_SATA 0x0106
#define PCI_CLASS_STORAGE_SATA_AHCI 0x010601
#define PCI_CLASS_STORAGE_SAS 0x0107
+#define PCI_CLASS_STORAGE_EXPRESS 0x010802
#define PCI_CLASS_STORAGE_OTHER 0x0180
+
#define PCI_BASE_CLASS_NETWORK 0x02
#define PCI_CLASS_NETWORK_ETHERNET 0x0200
#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h
index 9d18e9f948e9..35c070ea6ea3 100644
--- a/include/linux/phy/phy-qcom-ufs.h
+++ b/include/linux/phy/phy-qcom-ufs.h
@@ -18,22 +18,6 @@
#include "phy.h"
/**
- * ufs_qcom_phy_enable_ref_clk() - Enable the phy
- * ref clock.
- * @phy: reference to a generic phy
- *
- * returns 0 for success, and non-zero for error.
- */
-int ufs_qcom_phy_enable_ref_clk(struct phy *phy);
-
-/**
- * ufs_qcom_phy_disable_ref_clk() - Disable the phy
- * ref clock.
- * @phy: reference to a generic phy.
- */
-void ufs_qcom_phy_disable_ref_clk(struct phy *phy);
-
-/**
* ufs_qcom_phy_enable_dev_ref_clk() - Enable the device
* ref clock.
* @phy: reference to a generic phy.
@@ -47,8 +31,6 @@ void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy);
*/
void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy);
-int ufs_qcom_phy_enable_iface_clk(struct phy *phy);
-void ufs_qcom_phy_disable_iface_clk(struct phy *phy);
int ufs_qcom_phy_start_serdes(struct phy *phy);
int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes);
int ufs_qcom_phy_calibrate_phy(struct phy *phy, bool is_rate_B);
diff --git a/include/linux/pm.h b/include/linux/pm.h
index efa67b2dfee9..f926af41e122 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -559,6 +559,7 @@ struct dev_pm_info {
pm_message_t power_state;
unsigned int can_wakeup:1;
unsigned int async_suspend:1;
+ bool in_dpm_list:1; /* Owned by the PM core */
bool is_prepared:1; /* Owned by the PM core */
bool is_suspended:1; /* Ditto */
bool is_noirq_suspended:1;
@@ -596,6 +597,7 @@ struct dev_pm_info {
unsigned int use_autosuspend:1;
unsigned int timer_autosuspends:1;
unsigned int memalloc_noio:1;
+ unsigned int links_count;
enum rpm_request request;
enum rpm_status runtime_status;
int runtime_error;
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 4957fc185ea9..ca4823e675e2 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -55,6 +55,11 @@ extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
+extern void pm_runtime_clean_up_links(struct device *dev);
+extern void pm_runtime_get_suppliers(struct device *dev);
+extern void pm_runtime_put_suppliers(struct device *dev);
+extern void pm_runtime_new_link(struct device *dev);
+extern void pm_runtime_drop_link(struct device *dev);
static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
{
@@ -179,6 +184,11 @@ static inline unsigned long pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
bool enable){}
+static inline void pm_runtime_clean_up_links(struct device *dev) {}
+static inline void pm_runtime_get_suppliers(struct device *dev) {}
+static inline void pm_runtime_put_suppliers(struct device *dev) {}
+static inline void pm_runtime_new_link(struct device *dev) {}
+static inline void pm_runtime_drop_link(struct device *dev) {}
#endif /* !CONFIG_PM */
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index e30deb046156..bed9557b69e7 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -4,7 +4,8 @@
enum bq27xxx_chip {
BQ27000 = 1, /* bq27000, bq27200 */
BQ27010, /* bq27010, bq27210 */
- BQ27500, /* bq27500, bq27510, bq27520 */
+ BQ27500, /* bq27500 */
+ BQ27510, /* bq27510, bq27520 */
BQ27530, /* bq27530, bq27531 */
BQ27541, /* bq27541, bq27542, bq27546, bq27742 */
BQ27545, /* bq27545 */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 368c7ad06ae5..2d2bf592d9db 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -21,6 +21,7 @@ extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
struct proc_dir_entry *, void *);
extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
struct proc_dir_entry *);
+struct proc_dir_entry *proc_create_mount_point(const char *name);
extern struct proc_dir_entry *proc_create_data(const char *, umode_t,
struct proc_dir_entry *,
@@ -56,6 +57,7 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
struct proc_dir_entry *parent,const char *dest) { return NULL;}
static inline struct proc_dir_entry *proc_mkdir(const char *name,
struct proc_dir_entry *parent) {return NULL;}
+static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; }
static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
new file mode 100644
index 000000000000..0d905d8ec553
--- /dev/null
+++ b/include/linux/restart_block.h
@@ -0,0 +1,51 @@
+/*
+ * Common syscall restarting data
+ */
+#ifndef __LINUX_RESTART_BLOCK_H
+#define __LINUX_RESTART_BLOCK_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+struct timespec;
+struct compat_timespec;
+struct pollfd;
+
+/*
+ * System call restart block.
+ */
+struct restart_block {
+ long (*fn)(struct restart_block *);
+ union {
+ /* For futex_wait and futex_wait_requeue_pi */
+ struct {
+ u32 __user *uaddr;
+ u32 val;
+ u32 flags;
+ u32 bitset;
+ u64 time;
+ u32 __user *uaddr2;
+ } futex;
+ /* For nanosleep */
+ struct {
+ clockid_t clockid;
+ struct timespec __user *rmtp;
+#ifdef CONFIG_COMPAT
+ struct compat_timespec __user *compat_rmtp;
+#endif
+ u64 expires;
+ } nanosleep;
+ /* For poll */
+ struct {
+ struct pollfd __user *ufds;
+ int nfds;
+ int has_timeout;
+ unsigned long tv_sec;
+ unsigned long tv_nsec;
+ } poll;
+ };
+};
+
+extern long do_no_restart_syscall(struct restart_block *parm);
+
+#endif /* __LINUX_RESTART_BLOCK_H */
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 48ec7651989b..61fbb440449c 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -36,6 +36,8 @@ struct plat_serial8250_port {
void (*set_termios)(struct uart_port *,
struct ktermios *new,
struct ktermios *old);
+ void (*set_ldisc)(struct uart_port *,
+ struct ktermios *);
unsigned int (*get_mctrl)(struct uart_port *);
int (*handle_irq)(struct uart_port *);
void (*pm)(struct uart_port *, unsigned int state,
@@ -94,7 +96,7 @@ struct uart_8250_port {
struct uart_port port;
struct timer_list timer; /* "no irq" timer */
struct list_head list; /* ports on this IRQ */
- unsigned short capabilities; /* port capabilities */
+ u32 capabilities; /* port capabilities */
unsigned short bugs; /* port bugs */
bool fifo_bug; /* min RX trigger if enabled */
unsigned int tx_loadsz; /* transmit fifo load size */
@@ -149,6 +151,8 @@ extern int early_serial8250_setup(struct earlycon_device *device,
const char *options);
extern void serial8250_do_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old);
+extern void serial8250_do_set_ldisc(struct uart_port *port,
+ struct ktermios *termios);
extern unsigned int serial8250_do_get_mctrl(struct uart_port *port);
extern int serial8250_do_startup(struct uart_port *port);
extern void serial8250_do_shutdown(struct uart_port *port);
@@ -168,6 +172,6 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
extern void serial8250_set_isa_configurator(void (*v)
(int port, struct uart_port *up,
- unsigned short *capabilities));
+ u32 *capabilities));
#endif
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 344201437017..5d494888a612 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -123,6 +123,8 @@ struct uart_port {
void (*set_termios)(struct uart_port *,
struct ktermios *new,
struct ktermios *old);
+ void (*set_ldisc)(struct uart_port *,
+ struct ktermios *);
unsigned int (*get_mctrl)(struct uart_port *);
void (*set_mctrl)(struct uart_port *, unsigned int);
int (*startup)(struct uart_port *port);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 4b743ac35396..75c6bd0ac605 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -442,6 +442,7 @@ struct spi_master {
#define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */
#define SPI_MASTER_MUST_RX BIT(3) /* requires rx */
#define SPI_MASTER_MUST_TX BIT(4) /* requires tx */
+#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
/*
* on some hardware transfer / message size may be constrained
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 5f81f8a187f2..183f37c8a5e1 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -44,11 +44,13 @@ enum dma_sync_target {
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_addr,
phys_addr_t phys, size_t size,
- enum dma_data_direction dir);
+ enum dma_data_direction dir,
+ unsigned long attrs);
extern void swiotlb_tbl_unmap_single(struct device *hwdev,
phys_addr_t tlb_addr,
- size_t size, enum dma_data_direction dir);
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
extern void swiotlb_tbl_sync_single(struct device *hwdev,
phys_addr_t tlb_addr,
@@ -73,14 +75,6 @@ extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
unsigned long attrs);
extern int
-swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir);
-
-extern void
-swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir);
-
-extern int
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
enum dma_data_direction dir,
unsigned long attrs);
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 2873baf5372a..58373875e8ee 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -9,50 +9,17 @@
#include <linux/types.h>
#include <linux/bug.h>
-
-struct timespec;
-struct compat_timespec;
+#include <linux/restart_block.h>
#ifdef CONFIG_THREAD_INFO_IN_TASK
-#define current_thread_info() ((struct thread_info *)current)
-#endif
-
/*
- * System call restart block.
+ * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
+ * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
+ * including <asm/current.h> can cause a circular dependency on some platforms.
*/
-struct restart_block {
- long (*fn)(struct restart_block *);
- union {
- /* For futex_wait and futex_wait_requeue_pi */
- struct {
- u32 __user *uaddr;
- u32 val;
- u32 flags;
- u32 bitset;
- u64 time;
- u32 __user *uaddr2;
- } futex;
- /* For nanosleep */
- struct {
- clockid_t clockid;
- struct timespec __user *rmtp;
-#ifdef CONFIG_COMPAT
- struct compat_timespec __user *compat_rmtp;
+#include <asm/current.h>
+#define current_thread_info() ((struct thread_info *)current)
#endif
- u64 expires;
- } nanosleep;
- /* For poll */
- struct {
- struct pollfd __user *ufds;
- int nfds;
- int has_timeout;
- unsigned long tv_sec;
- unsigned long tv_nsec;
- } poll;
- };
-};
-
-extern long do_no_restart_syscall(struct restart_block *parm);
#include <linux/bitops.h>
#include <asm/thread_info.h>
diff --git a/include/linux/usb.h b/include/linux/usb.h
index eba1f10e8cfd..7e68259360de 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1160,7 +1160,7 @@ extern struct bus_type usb_bus_type;
* @minor_base: the start of the minor range for this driver.
*
* This structure is used for the usb_register_dev() and
- * usb_unregister_dev() functions, to consolidate a number of the
+ * usb_deregister_dev() functions, to consolidate a number of the
* parameters used for them.
*/
struct usb_class_driver {
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 8e81f9eb95e4..e4516e9ded0f 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -429,7 +429,9 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
*/
static inline size_t usb_ep_align(struct usb_ep *ep, size_t len)
{
- return round_up(len, (size_t)le16_to_cpu(ep->desc->wMaxPacketSize));
+ int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc) & 0x7ff;
+
+ return round_up(len, max_packet_size);
}
/**
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 66fc13705ab7..40edf6a8533e 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -566,21 +566,22 @@ extern void usb_ep0_reinit(struct usb_device *);
((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
/* class requests from the USB 2.0 hub spec, table 11-15 */
+#define HUB_CLASS_REQ(dir, type, request) ((((dir) | (type)) << 8) | (request))
/* GetBusState and SetHubDescriptor are optional, omitted */
-#define ClearHubFeature (0x2000 | USB_REQ_CLEAR_FEATURE)
-#define ClearPortFeature (0x2300 | USB_REQ_CLEAR_FEATURE)
-#define GetHubDescriptor (0xa000 | USB_REQ_GET_DESCRIPTOR)
-#define GetHubStatus (0xa000 | USB_REQ_GET_STATUS)
-#define GetPortStatus (0xa300 | USB_REQ_GET_STATUS)
-#define SetHubFeature (0x2000 | USB_REQ_SET_FEATURE)
-#define SetPortFeature (0x2300 | USB_REQ_SET_FEATURE)
+#define ClearHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_CLEAR_FEATURE)
+#define ClearPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_CLEAR_FEATURE)
+#define GetHubDescriptor HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_DESCRIPTOR)
+#define GetHubStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_STATUS)
+#define GetPortStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, USB_REQ_GET_STATUS)
+#define SetHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_SET_FEATURE)
+#define SetPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_SET_FEATURE)
/*-------------------------------------------------------------------------*/
/* class requests from USB 3.1 hub spec, table 10-7 */
-#define SetHubDepth (0x2000 | HUB_SET_DEPTH)
-#define GetPortErrorCount (0xa300 | HUB_GET_PORT_ERR_COUNT)
+#define SetHubDepth HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, HUB_SET_DEPTH)
+#define GetPortErrorCount HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, HUB_GET_PORT_ERR_COUNT)
/*
* Generic bandwidth allocation constants/support
diff --git a/include/linux/vme.h b/include/linux/vme.h
index ea6095deba20..8c589176c2f8 100644
--- a/include/linux/vme.h
+++ b/include/linux/vme.h
@@ -113,7 +113,6 @@ struct vme_driver {
int (*match)(struct vme_dev *);
int (*probe)(struct vme_dev *);
int (*remove)(struct vme_dev *);
- void (*shutdown)(void);
struct device_driver driver;
struct list_head devices;
};
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 6abd24f258bc..833fdd4794a0 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -191,5 +191,7 @@ extern void vt_set_led_state(int console, int leds);
extern void vt_kbd_con_start(int console);
extern void vt_kbd_con_stop(int console);
+void vc_scrolldelta_helper(struct vc_data *c, int lines,
+ unsigned int rolled_over, void *_base, unsigned int size);
#endif /* _VT_KERN_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index d4f16cf6281c..a26cc437293c 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -603,14 +603,6 @@ static inline bool schedule_delayed_work(struct delayed_work *dwork,
return queue_delayed_work(system_wq, dwork, delay);
}
-/**
- * keventd_up - is workqueue initialized yet?
- */
-static inline bool keventd_up(void)
-{
- return system_wq != NULL;
-}
-
#ifndef CONFIG_SMP
static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
{
@@ -645,4 +637,7 @@ int workqueue_online_cpu(unsigned int cpu);
int workqueue_offline_cpu(unsigned int cpu);
#endif
+int __init workqueue_init_early(void);
+int __init workqueue_init(void);
+
#endif
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 7428a53257ca..96dd0b3f70d7 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -44,6 +44,11 @@
#define FC_NO_ERR 0 /* no error */
#define FC_EX_TIMEOUT 1 /* Exchange timeout */
#define FC_EX_CLOSED 2 /* Exchange closed */
+#define FC_EX_ALLOC_ERR 3 /* Exchange allocation failed */
+#define FC_EX_XMIT_ERR 4 /* Exchange transmit failed */
+#define FC_EX_ELS_RJT 5 /* ELS rejected */
+#define FC_EX_INV_LOGIN 6 /* Login not completed */
+#define FC_EX_SEQ_ERR 6 /* Exchange sequence error */
/**
* enum fc_lport_state - Local port states
@@ -350,7 +355,8 @@ struct fc_fcp_pkt {
/* Timeout/error related information */
struct timer_list timer;
- int wait_for_comp;
+ int wait_for_comp;
+ int timer_delay;
u32 recov_retry;
struct fc_seq *recov_seq;
struct completion tm_done;
@@ -385,6 +391,7 @@ struct fc_seq {
#define FC_EX_DONE (1 << 0) /* ep is completed */
#define FC_EX_RST_CLEANUP (1 << 1) /* reset is forcing completion */
+#define FC_EX_QUARANTINE (1 << 2) /* exch is quarantined */
/**
* struct fc_exch - Fibre Channel Exchange
@@ -478,37 +485,6 @@ struct libfc_function_template {
void *arg, u32 timer_msec);
/*
- * Send the FC frame payload using a new exchange and sequence.
- *
- * The exchange response handler is set in this routine to resp()
- * function pointer. It can be called in two scenarios: if a timeout
- * occurs or if a response frame is received for the exchange. The
- * fc_frame pointer in response handler will also indicate timeout
- * as error using IS_ERR related macros.
- *
- * The exchange destructor handler is also set in this routine.
- * The destructor handler is invoked by EM layer when exchange
- * is about to free, this can be used by caller to free its
- * resources along with exchange free.
- *
- * The arg is passed back to resp and destructor handler.
- *
- * The timeout value (in msec) for an exchange is set if non zero
- * timer_msec argument is specified. The timer is canceled when
- * it fires or when the exchange is done. The exchange timeout handler
- * is registered by EM layer.
- *
- * STATUS: OPTIONAL
- */
- struct fc_seq *(*exch_seq_send)(struct fc_lport *, struct fc_frame *,
- void (*resp)(struct fc_seq *,
- struct fc_frame *,
- void *),
- void (*destructor)(struct fc_seq *,
- void *),
- void *, unsigned int timer_msec);
-
- /*
* Sets up the DDP context for a given exchange id on the given
* scatterlist if LLD supports DDP for large receive.
*
@@ -537,73 +513,6 @@ struct libfc_function_template {
* STATUS: OPTIONAL
*/
void (*get_lesb)(struct fc_lport *, struct fc_els_lesb *lesb);
- /*
- * Send a frame using an existing sequence and exchange.
- *
- * STATUS: OPTIONAL
- */
- int (*seq_send)(struct fc_lport *, struct fc_seq *,
- struct fc_frame *);
-
- /*
- * Send an ELS response using information from the received frame.
- *
- * STATUS: OPTIONAL
- */
- void (*seq_els_rsp_send)(struct fc_frame *, enum fc_els_cmd,
- struct fc_seq_els_data *);
-
- /*
- * Abort an exchange and sequence. Generally called because of a
- * exchange timeout or an abort from the upper layer.
- *
- * A timer_msec can be specified for abort timeout, if non-zero
- * timer_msec value is specified then exchange resp handler
- * will be called with timeout error if no response to abort.
- *
- * STATUS: OPTIONAL
- */
- int (*seq_exch_abort)(const struct fc_seq *,
- unsigned int timer_msec);
-
- /*
- * Indicate that an exchange/sequence tuple is complete and the memory
- * allocated for the related objects may be freed.
- *
- * STATUS: OPTIONAL
- */
- void (*exch_done)(struct fc_seq *);
-
- /*
- * Start a new sequence on the same exchange/sequence tuple.
- *
- * STATUS: OPTIONAL
- */
- struct fc_seq *(*seq_start_next)(struct fc_seq *);
-
- /*
- * Set a response handler for the exchange of the sequence.
- *
- * STATUS: OPTIONAL
- */
- void (*seq_set_resp)(struct fc_seq *sp,
- void (*resp)(struct fc_seq *, struct fc_frame *,
- void *),
- void *arg);
-
- /*
- * Assign a sequence for an incoming request frame.
- *
- * STATUS: OPTIONAL
- */
- struct fc_seq *(*seq_assign)(struct fc_lport *, struct fc_frame *);
-
- /*
- * Release the reference on the sequence returned by seq_assign().
- *
- * STATUS: OPTIONAL
- */
- void (*seq_release)(struct fc_seq *);
/*
* Reset an exchange manager, completing all sequences and exchanges.
@@ -615,27 +524,6 @@ struct libfc_function_template {
void (*exch_mgr_reset)(struct fc_lport *, u32 s_id, u32 d_id);
/*
- * Flush the rport work queue. Generally used before shutdown.
- *
- * STATUS: OPTIONAL
- */
- void (*rport_flush_queue)(void);
-
- /*
- * Receive a frame for a local port.
- *
- * STATUS: OPTIONAL
- */
- void (*lport_recv)(struct fc_lport *, struct fc_frame *);
-
- /*
- * Reset the local port.
- *
- * STATUS: OPTIONAL
- */
- int (*lport_reset)(struct fc_lport *);
-
- /*
* Set the local port FC_ID.
*
* This may be provided by the LLD to allow it to be
@@ -656,54 +544,6 @@ struct libfc_function_template {
struct fc_frame *);
/*
- * Create a remote port with a given port ID
- *
- * STATUS: OPTIONAL
- */
- struct fc_rport_priv *(*rport_create)(struct fc_lport *, u32);
-
- /*
- * Initiates the RP state machine. It is called from the LP module.
- * This function will issue the following commands to the N_Port
- * identified by the FC ID provided.
- *
- * - PLOGI
- * - PRLI
- * - RTV
- *
- * STATUS: OPTIONAL
- */
- int (*rport_login)(struct fc_rport_priv *);
-
- /*
- * Logoff, and remove the rport from the transport if
- * it had been added. This will send a LOGO to the target.
- *
- * STATUS: OPTIONAL
- */
- int (*rport_logoff)(struct fc_rport_priv *);
-
- /*
- * Receive a request from a remote port.
- *
- * STATUS: OPTIONAL
- */
- void (*rport_recv_req)(struct fc_lport *, struct fc_frame *);
-
- /*
- * lookup an rport by it's port ID.
- *
- * STATUS: OPTIONAL
- */
- struct fc_rport_priv *(*rport_lookup)(const struct fc_lport *, u32);
-
- /*
- * Destroy an rport after final kref_put().
- * The argument is a pointer to the kref inside the fc_rport_priv.
- */
- void (*rport_destroy)(struct kref *);
-
- /*
* Callback routine after the remote port is logged in
*
* STATUS: OPTIONAL
@@ -1068,18 +908,26 @@ void fc_vport_setlink(struct fc_lport *);
void fc_vports_linkchange(struct fc_lport *);
int fc_lport_config(struct fc_lport *);
int fc_lport_reset(struct fc_lport *);
+void fc_lport_recv(struct fc_lport *lport, struct fc_frame *fp);
int fc_set_mfs(struct fc_lport *, u32 mfs);
struct fc_lport *libfc_vport_create(struct fc_vport *, int privsize);
struct fc_lport *fc_vport_id_lookup(struct fc_lport *, u32 port_id);
-int fc_lport_bsg_request(struct fc_bsg_job *);
+int fc_lport_bsg_request(struct bsg_job *);
void fc_lport_set_local_id(struct fc_lport *, u32 port_id);
void fc_lport_iterate(void (*func)(struct fc_lport *, void *), void *);
/*
* REMOTE PORT LAYER
*****************************/
-int fc_rport_init(struct fc_lport *);
void fc_rport_terminate_io(struct fc_rport *);
+struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
+ u32 port_id);
+struct fc_rport_priv *fc_rport_create(struct fc_lport *, u32);
+void fc_rport_destroy(struct kref *kref);
+int fc_rport_login(struct fc_rport_priv *rdata);
+int fc_rport_logoff(struct fc_rport_priv *rdata);
+void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp);
+void fc_rport_flush_queue(void);
/*
* DISCOVERY LAYER
@@ -1131,6 +979,21 @@ void fc_fill_hdr(struct fc_frame *, const struct fc_frame *,
*****************************/
int fc_exch_init(struct fc_lport *);
void fc_exch_update_stats(struct fc_lport *lport);
+struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
+ struct fc_frame *fp,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *fp,
+ void *arg),
+ void (*destructor)(struct fc_seq *, void *),
+ void *arg, u32 timer_msec);
+void fc_seq_els_rsp_send(struct fc_frame *, enum fc_els_cmd,
+ struct fc_seq_els_data *);
+struct fc_seq *fc_seq_start_next(struct fc_seq *sp);
+void fc_seq_set_resp(struct fc_seq *sp,
+ void (*resp)(struct fc_seq *, struct fc_frame *, void *),
+ void *arg);
+struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp);
+void fc_seq_release(struct fc_seq *sp);
struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *,
struct fc_exch_mgr *,
bool (*match)(struct fc_frame *));
@@ -1142,6 +1005,9 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *, enum fc_class class,
void fc_exch_mgr_free(struct fc_lport *);
void fc_exch_recv(struct fc_lport *, struct fc_frame *);
void fc_exch_mgr_reset(struct fc_lport *, u32 s_id, u32 d_id);
+int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp);
+int fc_seq_exch_abort(const struct fc_seq *, unsigned int timer_msec);
+void fc_exch_done(struct fc_seq *sp);
/*
* Functions for fc_functions_template
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 7e4cd53139ed..36680f13270d 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -278,6 +278,14 @@ struct scsi_host_template {
int (* change_queue_depth)(struct scsi_device *, int);
/*
+ * This functions lets the driver expose the queue mapping
+ * to the block layer.
+ *
+ * Status: OPTIONAL
+ */
+ int (* map_queues)(struct Scsi_Host *shost);
+
+ /*
* This function determines the BIOS parameters for a given
* harddisk. These tend to be numbers that are made up by
* the host adapter. Parameters:
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index bf66ea6bed2b..924c8e614b45 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -28,9 +28,11 @@
#define SCSI_TRANSPORT_FC_H
#include <linux/sched.h>
+#include <linux/bsg-lib.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_netlink.h>
+#include <scsi/scsi_host.h>
struct scsi_transport_template;
@@ -624,48 +626,6 @@ struct fc_host_attrs {
#define fc_host_dev_loss_tmo(x) \
(((struct fc_host_attrs *)(x)->shost_data)->dev_loss_tmo)
-
-struct fc_bsg_buffer {
- unsigned int payload_len;
- int sg_cnt;
- struct scatterlist *sg_list;
-};
-
-/* Values for fc_bsg_job->state_flags (bitflags) */
-#define FC_RQST_STATE_INPROGRESS 0
-#define FC_RQST_STATE_DONE 1
-
-struct fc_bsg_job {
- struct Scsi_Host *shost;
- struct fc_rport *rport;
- struct device *dev;
- struct request *req;
- spinlock_t job_lock;
- unsigned int state_flags;
- unsigned int ref_cnt;
- void (*job_done)(struct fc_bsg_job *);
-
- struct fc_bsg_request *request;
- struct fc_bsg_reply *reply;
- unsigned int request_len;
- unsigned int reply_len;
- /*
- * On entry : reply_len indicates the buffer size allocated for
- * the reply.
- *
- * Upon completion : the message handler must set reply_len
- * to indicates the size of the reply to be returned to the
- * caller.
- */
-
- /* DMA payloads for the request/response */
- struct fc_bsg_buffer request_payload;
- struct fc_bsg_buffer reply_payload;
-
- void *dd_data; /* Used for driver-specific storage */
-};
-
-
/* The functions by which the transport class and the driver communicate */
struct fc_function_template {
void (*get_rport_dev_loss_tmo)(struct fc_rport *);
@@ -702,8 +662,8 @@ struct fc_function_template {
int (* it_nexus_response)(struct Scsi_Host *, u64, int);
/* bsg support */
- int (*bsg_request)(struct fc_bsg_job *);
- int (*bsg_timeout)(struct fc_bsg_job *);
+ int (*bsg_request)(struct bsg_job *);
+ int (*bsg_timeout)(struct bsg_job *);
/* allocation lengths for host-specific data */
u32 dd_fcrport_size;
@@ -849,4 +809,18 @@ struct fc_vport *fc_vport_create(struct Scsi_Host *shost, int channel,
int fc_vport_terminate(struct fc_vport *vport);
int fc_block_scsi_eh(struct scsi_cmnd *cmnd);
+static inline struct Scsi_Host *fc_bsg_to_shost(struct bsg_job *job)
+{
+ if (scsi_is_host_device(job->dev))
+ return dev_to_shost(job->dev);
+ return rport_to_shost(dev_to_rport(job->dev));
+}
+
+static inline struct fc_rport *fc_bsg_to_rport(struct bsg_job *job)
+{
+ if (scsi_is_fc_rport(job->dev))
+ return dev_to_rport(job->dev);
+ return NULL;
+}
+
#endif /* SCSI_TRANSPORT_FC_H */
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index 3fb357193f09..cb979ad90401 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -109,14 +109,35 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_FRAMEBUFFER_SET_OVERSCAN = 0x0004800a,
RPI_FIRMWARE_FRAMEBUFFER_SET_PALETTE = 0x0004800b,
+ RPI_FIRMWARE_VCHIQ_INIT = 0x00048010,
+
RPI_FIRMWARE_GET_COMMAND_LINE = 0x00050001,
RPI_FIRMWARE_GET_DMA_CHANNELS = 0x00060001,
};
+#if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
int rpi_firmware_property(struct rpi_firmware *fw,
u32 tag, void *data, size_t len);
int rpi_firmware_property_list(struct rpi_firmware *fw,
void *data, size_t tag_size);
struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
+#else
+static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
+ void *data, size_t len)
+{
+ return 0;
+}
+
+static inline int rpi_firmware_property_list(struct rpi_firmware *fw,
+ void *data, size_t tag_size)
+{
+ return 0;
+}
+
+static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
+{
+ return NULL;
+}
+#endif
#endif /* __SOC_RASPBERRY_FIRMWARE_H__ */
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index cee8c00f3d3e..9924bc9cbc7c 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -155,6 +155,7 @@ struct snd_compr {
struct mutex lock;
int device;
#ifdef CONFIG_SND_VERBOSE_PROCFS
+ /* private: */
char id[64];
struct snd_info_entry *proc_root;
struct snd_info_entry *proc_info_entry;
diff --git a/include/sound/core.h b/include/sound/core.h
index 31079ea5e484..f7d8c10c4c45 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -308,8 +308,8 @@ __printf(4, 5)
void __snd_printk(unsigned int level, const char *file, int line,
const char *format, ...);
#else
-#define __snd_printk(level, file, line, format, args...) \
- printk(format, ##args)
+#define __snd_printk(level, file, line, format, ...) \
+ printk(format, ##__VA_ARGS__)
#endif
/**
@@ -319,8 +319,8 @@ void __snd_printk(unsigned int level, const char *file, int line,
* Works like printk() but prints the file and the line of the caller
* when configured with CONFIG_SND_VERBOSE_PRINTK.
*/
-#define snd_printk(fmt, args...) \
- __snd_printk(0, __FILE__, __LINE__, fmt, ##args)
+#define snd_printk(fmt, ...) \
+ __snd_printk(0, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
#ifdef CONFIG_SND_DEBUG
/**
@@ -330,10 +330,10 @@ void __snd_printk(unsigned int level, const char *file, int line,
* Works like snd_printk() for debugging purposes.
* Ignored when CONFIG_SND_DEBUG is not set.
*/
-#define snd_printd(fmt, args...) \
- __snd_printk(1, __FILE__, __LINE__, fmt, ##args)
-#define _snd_printd(level, fmt, args...) \
- __snd_printk(level, __FILE__, __LINE__, fmt, ##args)
+#define snd_printd(fmt, ...) \
+ __snd_printk(1, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
+#define _snd_printd(level, fmt, ...) \
+ __snd_printk(level, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
/**
* snd_BUG - give a BUG warning message and stack trace
@@ -383,8 +383,8 @@ static inline bool snd_printd_ratelimit(void) { return false; }
* Works like snd_printk() for debugging purposes.
* Ignored when CONFIG_SND_DEBUG_VERBOSE is not set.
*/
-#define snd_printdd(format, args...) \
- __snd_printk(2, __FILE__, __LINE__, format, ##args)
+#define snd_printdd(format, ...) \
+ __snd_printk(2, __FILE__, __LINE__, format, ##__VA_ARGS__)
#else
__printf(1, 2)
static inline void snd_printdd(const char *format, ...) {}
diff --git a/include/sound/cs35l34.h b/include/sound/cs35l34.h
new file mode 100644
index 000000000000..9c927cffbe46
--- /dev/null
+++ b/include/sound/cs35l34.h
@@ -0,0 +1,35 @@
+/*
+ * linux/sound/cs35l34.h -- Platform data for CS35l34
+ *
+ * Copyright (c) 2016 Cirrus Logic Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CS35L34_H
+#define __CS35L34_H
+
+struct cs35l34_platform_data {
+ /* Set AIF to half drive strength */
+ bool aif_half_drv;
+ /* Digital Soft Ramp Disable */
+ bool digsft_disable;
+ /* Amplifier Invert */
+ bool amp_inv;
+ /* Peak current (mA) */
+ unsigned int boost_peak;
+ /* Boost inductor value (nH) */
+ unsigned int boost_ind;
+ /* Boost Controller Voltage Setting (mV) */
+ unsigned int boost_vtge;
+ /* Gain Change Zero Cross */
+ bool gain_zc_disable;
+ /* SDIN Left/Right Selection */
+ unsigned int i2s_sdinloc;
+ /* TDM Rising Edge */
+ bool tdm_rising_edge;
+};
+
+#endif /* __CS35L34_H */
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index 67be2445941a..1c8f9e1ef2a5 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -71,7 +71,6 @@ struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
* @slave_id: Slave requester id for the DMA channel.
* @filter_data: Custom DMA channel filter data, this will usually be used when
* requesting the DMA channel.
- * @chan_name: Custom channel name to use when requesting DMA channel.
* @fifo_size: FIFO size of the DAI controller in bytes
* @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now
*/
@@ -81,7 +80,6 @@ struct snd_dmaengine_dai_dma_data {
u32 maxburst;
unsigned int slave_id;
void *filter_data;
- const char *chan_name;
unsigned int fifo_size;
unsigned int flags;
};
@@ -107,10 +105,6 @@ void snd_dmaengine_pcm_set_config_from_dai_data(
* playback.
*/
#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3)
-/*
- * The PCM streams have custom channel names specified.
- */
-#define SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME BIT(4)
/**
* struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 5bd134651f5e..4f42affe777c 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -1688,7 +1688,8 @@ struct snd_emu1010 {
unsigned int internal_clock; /* 44100 or 48000 */
unsigned int optical_in; /* 0:SPDIF, 1:ADAT */
unsigned int optical_out; /* 0:SPDIF, 1:ADAT */
- struct task_struct *firmware_thread;
+ struct delayed_work firmware_work;
+ u32 last_reg;
};
struct snd_emu10k1 {
diff --git a/include/sound/rt5514.h b/include/sound/rt5514.h
new file mode 100644
index 000000000000..ef18494769ee
--- /dev/null
+++ b/include/sound/rt5514.h
@@ -0,0 +1,20 @@
+/*
+ * linux/sound/rt5514.h -- Platform data for RT5514
+ *
+ * Copyright 2016 Realtek Semiconductor Corp.
+ * Author: Oder Chiou <oder_chiou@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_RT5514_H
+#define __LINUX_SND_RT5514_H
+
+struct rt5514_platform_data {
+ unsigned int dmic_init_delay;
+};
+
+#endif
+
diff --git a/include/sound/rt5665.h b/include/sound/rt5665.h
new file mode 100755
index 000000000000..963229e71dc7
--- /dev/null
+++ b/include/sound/rt5665.h
@@ -0,0 +1,47 @@
+/*
+ * linux/sound/rt5665.h -- Platform data for RT5665
+ *
+ * Copyright 2016 Realtek Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_RT5665_H
+#define __LINUX_SND_RT5665_H
+
+enum rt5665_dmic1_data_pin {
+ RT5665_DMIC1_NULL,
+ RT5665_DMIC1_DATA_GPIO4,
+ RT5665_DMIC1_DATA_IN2N,
+};
+
+enum rt5665_dmic2_data_pin {
+ RT5665_DMIC2_NULL,
+ RT5665_DMIC2_DATA_GPIO5,
+ RT5665_DMIC2_DATA_IN2P,
+};
+
+enum rt5665_jd_src {
+ RT5665_JD_NULL,
+ RT5665_JD1,
+};
+
+struct rt5665_platform_data {
+ bool in1_diff;
+ bool in2_diff;
+ bool in3_diff;
+ bool in4_diff;
+
+ int ldo1_en; /* GPIO for LDO1_EN */
+
+ enum rt5665_dmic1_data_pin dmic1_data_pin;
+ enum rt5665_dmic2_data_pin dmic2_data_pin;
+ enum rt5665_jd_src jd_src;
+
+ unsigned int sar_hs_type;
+};
+
+#endif
+
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index fd6412551145..64e90ca9ad32 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -1,5 +1,5 @@
/*
- * simple_card_core.h
+ * simple_card_utils.h
*
* Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*
@@ -7,8 +7,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#ifndef __SIMPLE_CARD_CORE_H
-#define __SIMPLE_CARD_CORE_H
+#ifndef __SIMPLE_CARD_UTILS_H
+#define __SIMPLE_CARD_UTILS_H
#include <sound/soc.h>
@@ -68,4 +68,4 @@ void asoc_simple_card_canonicalize_cpu(struct snd_soc_dai_link *dai_link,
int asoc_simple_card_clean_reference(struct snd_soc_card *card);
-#endif /* __SIMPLE_CARD_CORE_H */
+#endif /* __SIMPLE_CARD_UTILS_H */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 964b7de1a1cc..200e1f04c166 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -15,6 +15,7 @@
#include <linux/list.h>
+#include <sound/asoc.h>
struct snd_pcm_substream;
struct snd_soc_dapm_widget;
@@ -26,13 +27,13 @@ struct snd_compr_stream;
* Describes the physical PCM data formating and clocking. Add new formats
* to the end.
*/
-#define SND_SOC_DAIFMT_I2S 1 /* I2S mode */
-#define SND_SOC_DAIFMT_RIGHT_J 2 /* Right Justified mode */
-#define SND_SOC_DAIFMT_LEFT_J 3 /* Left Justified mode */
-#define SND_SOC_DAIFMT_DSP_A 4 /* L data MSB after FRM LRC */
-#define SND_SOC_DAIFMT_DSP_B 5 /* L data MSB during FRM LRC */
-#define SND_SOC_DAIFMT_AC97 6 /* AC97 */
-#define SND_SOC_DAIFMT_PDM 7 /* Pulse density modulation */
+#define SND_SOC_DAIFMT_I2S SND_SOC_DAI_FORMAT_I2S
+#define SND_SOC_DAIFMT_RIGHT_J SND_SOC_DAI_FORMAT_RIGHT_J
+#define SND_SOC_DAIFMT_LEFT_J SND_SOC_DAI_FORMAT_LEFT_J
+#define SND_SOC_DAIFMT_DSP_A SND_SOC_DAI_FORMAT_DSP_A
+#define SND_SOC_DAIFMT_DSP_B SND_SOC_DAI_FORMAT_DSP_B
+#define SND_SOC_DAIFMT_AC97 SND_SOC_DAI_FORMAT_AC97
+#define SND_SOC_DAIFMT_PDM SND_SOC_DAI_FORMAT_PDM
/* left and right justified also known as MSB and LSB respectively */
#define SND_SOC_DAIFMT_MSB SND_SOC_DAIFMT_LEFT_J
@@ -207,6 +208,30 @@ struct snd_soc_dai_ops {
struct snd_soc_dai *);
};
+struct snd_soc_cdai_ops {
+ /*
+ * for compress ops
+ */
+ int (*startup)(struct snd_compr_stream *,
+ struct snd_soc_dai *);
+ int (*shutdown)(struct snd_compr_stream *,
+ struct snd_soc_dai *);
+ int (*set_params)(struct snd_compr_stream *,
+ struct snd_compr_params *, struct snd_soc_dai *);
+ int (*get_params)(struct snd_compr_stream *,
+ struct snd_codec *, struct snd_soc_dai *);
+ int (*set_metadata)(struct snd_compr_stream *,
+ struct snd_compr_metadata *, struct snd_soc_dai *);
+ int (*get_metadata)(struct snd_compr_stream *,
+ struct snd_compr_metadata *, struct snd_soc_dai *);
+ int (*trigger)(struct snd_compr_stream *, int,
+ struct snd_soc_dai *);
+ int (*pointer)(struct snd_compr_stream *,
+ struct snd_compr_tstamp *, struct snd_soc_dai *);
+ int (*ack)(struct snd_compr_stream *, size_t,
+ struct snd_soc_dai *);
+};
+
/*
* Digital Audio Interface Driver.
*
@@ -236,6 +261,7 @@ struct snd_soc_dai_driver {
/* ops */
const struct snd_soc_dai_ops *ops;
+ const struct snd_soc_cdai_ops *cops;
/* DAI capabilities */
struct snd_soc_pcm_stream capture;
@@ -268,8 +294,9 @@ struct snd_soc_dai {
unsigned int symmetric_rates:1;
unsigned int symmetric_channels:1;
unsigned int symmetric_samplebits:1;
+ unsigned int probed:1;
+
unsigned int active;
- unsigned char probed:1;
struct snd_soc_dapm_widget *playback_widget;
struct snd_soc_dapm_widget *capture_widget;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index f60d755f7ac6..a466f4bdc835 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -272,6 +272,16 @@ struct device;
/* dapm kcontrol types */
+#define SOC_DAPM_DOUBLE(xname, reg, lshift, rshift, max, invert) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_soc_info_volsw, \
+ .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
+ .private_value = SOC_DOUBLE_VALUE(reg, lshift, rshift, max, invert, 0) }
+#define SOC_DAPM_DOUBLE_R(xname, lreg, rreg, shift, max, invert) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_soc_info_volsw, \
+ .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
+ .private_value = SOC_DOUBLE_R_VALUE(lreg, rreg, shift, max, invert) }
#define SOC_DAPM_SINGLE(xname, reg, shift, max, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_volsw, \
@@ -615,6 +625,10 @@ struct snd_soc_dapm_update {
int reg;
int mask;
int val;
+ int reg2;
+ int mask2;
+ int val2;
+ bool has_second_set;
};
struct snd_soc_dapm_wcache {
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index b897b9d63161..f9cc7b9271ac 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -53,7 +53,7 @@ struct snd_soc_dobj_control {
/* dynamic widget object */
struct snd_soc_dobj_widget {
- unsigned int kcontrol_enum:1; /* this widget is an enum kcontrol */
+ unsigned int kcontrol_type; /* kcontrol type: mixer, enum, bytes */
};
/* generic dynamic object - all dynamic objects belong to this struct */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 4f1c784e44f6..2b502f6cc6d0 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -782,6 +782,8 @@ struct snd_soc_component_driver {
int (*probe)(struct snd_soc_component *);
void (*remove)(struct snd_soc_component *);
+ int (*suspend)(struct snd_soc_component *);
+ int (*resume)(struct snd_soc_component *);
/* DT */
int (*of_xlate_dai_name)(struct snd_soc_component *component,
@@ -807,9 +809,11 @@ struct snd_soc_component {
unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */
unsigned int registered_as_component:1;
+ unsigned int auxiliary:1; /* for auxiliary component of the card */
+ unsigned int suspended:1; /* is in suspend PM state */
struct list_head list;
- struct list_head list_aux; /* for auxiliary component of the card */
+ struct list_head card_list;
struct snd_soc_dai_driver *dai_drv;
int num_dai;
@@ -852,6 +856,8 @@ struct snd_soc_component {
int (*probe)(struct snd_soc_component *);
void (*remove)(struct snd_soc_component *);
+ int (*suspend)(struct snd_soc_component *);
+ int (*resume)(struct snd_soc_component *);
/* machine specific init */
int (*init)(struct snd_soc_component *component);
@@ -868,11 +874,9 @@ struct snd_soc_codec {
const struct snd_soc_codec_driver *driver;
struct list_head list;
- struct list_head card_list;
/* runtime */
unsigned int cache_bypass:1; /* Suppress access to the cache */
- unsigned int suspended:1; /* Codec is in suspend PM state */
unsigned int cache_init:1; /* codec cache has been initialized */
/* codec IO */
@@ -1025,13 +1029,13 @@ struct snd_soc_dai_link {
const struct snd_soc_ops *ops;
const struct snd_soc_compr_ops *compr_ops;
- /* For unidirectional dai links */
- bool playback_only;
- bool capture_only;
-
/* Mark this pcm with non atomic ops */
bool nonatomic;
+ /* For unidirectional dai links */
+ unsigned int playback_only:1;
+ unsigned int capture_only:1;
+
/* Keep DAI active over suspend */
unsigned int ignore_suspend:1;
@@ -1148,7 +1152,6 @@ struct snd_soc_card {
*/
struct snd_soc_aux_dev *aux_dev;
int num_aux_devs;
- struct list_head aux_comp_list;
const struct snd_kcontrol_new *controls;
int num_controls;
@@ -1170,7 +1173,7 @@ struct snd_soc_card {
struct work_struct deferred_resume_work;
/* lists of probed devices belonging to this card */
- struct list_head codec_dev_list;
+ struct list_head component_dev_list;
struct list_head widgets;
struct list_head paths;
@@ -1203,14 +1206,11 @@ struct snd_soc_pcm_runtime {
enum snd_soc_pcm_subclass pcm_subclass;
struct snd_pcm_ops ops;
- unsigned int dev_registered:1;
-
/* Dynamic PCM BE runtime data */
struct snd_soc_dpcm_runtime dpcm[2];
int fe_compr;
long pmdown_time;
- unsigned char pop_wait:1;
/* runtime devices */
struct snd_pcm *pcm;
@@ -1219,7 +1219,6 @@ struct snd_soc_pcm_runtime {
struct snd_soc_platform *platform;
struct snd_soc_dai *codec_dai;
struct snd_soc_dai *cpu_dai;
- struct snd_soc_component *component; /* Only valid for AUX dev rtds */
struct snd_soc_dai **codec_dais;
unsigned int num_codecs;
@@ -1232,6 +1231,10 @@ struct snd_soc_pcm_runtime {
unsigned int num; /* 0-based and monotonic increasing */
struct list_head list; /* rtd list of the soc card */
+
+ /* bit field */
+ unsigned int dev_registered:1;
+ unsigned int pop_wait:1;
};
/* mixer control */
@@ -1541,11 +1544,10 @@ static inline void *snd_soc_platform_get_drvdata(struct snd_soc_platform *platfo
static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card)
{
- INIT_LIST_HEAD(&card->codec_dev_list);
INIT_LIST_HEAD(&card->widgets);
INIT_LIST_HEAD(&card->paths);
INIT_LIST_HEAD(&card->dapm_list);
- INIT_LIST_HEAD(&card->aux_comp_list);
+ INIT_LIST_HEAD(&card->component_dev_list);
}
static inline bool snd_soc_volsw_is_stereo(struct soc_mixer_control *mc)
@@ -1642,25 +1644,43 @@ static inline struct snd_soc_platform *snd_soc_kcontrol_platform(
int snd_soc_util_init(void);
void snd_soc_util_exit(void);
-int snd_soc_of_parse_card_name(struct snd_soc_card *card,
- const char *propname);
-int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
- const char *propname);
+#define snd_soc_of_parse_card_name(card, propname) \
+ snd_soc_of_parse_card_name_from_node(card, NULL, propname)
+int snd_soc_of_parse_card_name_from_node(struct snd_soc_card *card,
+ struct device_node *np,
+ const char *propname);
+#define snd_soc_of_parse_audio_simple_widgets(card, propname)\
+ snd_soc_of_parse_audio_simple_widgets_from_node(card, NULL, propname)
+int snd_soc_of_parse_audio_simple_widgets_from_node(struct snd_soc_card *card,
+ struct device_node *np,
+ const char *propname);
+
int snd_soc_of_parse_tdm_slot(struct device_node *np,
unsigned int *tx_mask,
unsigned int *rx_mask,
unsigned int *slots,
unsigned int *slot_width);
-void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
+#define snd_soc_of_parse_audio_prefix(card, codec_conf, of_node, propname) \
+ snd_soc_of_parse_audio_prefix_from_node(card, NULL, codec_conf, \
+ of_node, propname)
+void snd_soc_of_parse_audio_prefix_from_node(struct snd_soc_card *card,
+ struct device_node *np,
struct snd_soc_codec_conf *codec_conf,
struct device_node *of_node,
const char *propname);
-int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
- const char *propname);
+
+#define snd_soc_of_parse_audio_routing(card, propname) \
+ snd_soc_of_parse_audio_routing_from_node(card, NULL, propname)
+int snd_soc_of_parse_audio_routing_from_node(struct snd_soc_card *card,
+ struct device_node *np,
+ const char *propname);
+
unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
const char *prefix,
struct device_node **bitclkmaster,
struct device_node **framemaster);
+int snd_soc_get_dai_name(struct of_phandle_args *args,
+ const char **dai_name);
int snd_soc_of_get_dai_name(struct device_node *of_node,
const char **dai_name);
int snd_soc_of_get_dai_link_codecs(struct device *dev,
@@ -1671,6 +1691,9 @@ int snd_soc_add_dai_link(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link);
void snd_soc_remove_dai_link(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link);
+struct snd_soc_dai_link *snd_soc_find_dai_link(struct snd_soc_card *card,
+ int id, const char *name,
+ const char *stream_name);
int snd_soc_register_dai(struct snd_soc_component *component,
struct snd_soc_dai_driver *dai_drv);
@@ -1697,4 +1720,24 @@ static inline void snd_soc_dapm_mutex_unlock(struct snd_soc_dapm_context *dapm)
mutex_unlock(&dapm->card->dapm_mutex);
}
+int snd_soc_component_enable_pin(struct snd_soc_component *component,
+ const char *pin);
+int snd_soc_component_enable_pin_unlocked(struct snd_soc_component *component,
+ const char *pin);
+int snd_soc_component_disable_pin(struct snd_soc_component *component,
+ const char *pin);
+int snd_soc_component_disable_pin_unlocked(struct snd_soc_component *component,
+ const char *pin);
+int snd_soc_component_nc_pin(struct snd_soc_component *component,
+ const char *pin);
+int snd_soc_component_nc_pin_unlocked(struct snd_soc_component *component,
+ const char *pin);
+int snd_soc_component_get_pin_status(struct snd_soc_component *component,
+ const char *pin);
+int snd_soc_component_force_enable_pin(struct snd_soc_component *component,
+ const char *pin);
+int snd_soc_component_force_enable_pin_unlocked(
+ struct snd_soc_component *component,
+ const char *pin);
+
#endif
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 5da2c829a718..01b3c9869a0d 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -1111,6 +1111,27 @@ TRACE_EVENT(f2fs_issue_discard,
(unsigned long long)__entry->blklen)
);
+TRACE_EVENT(f2fs_issue_reset_zone,
+
+ TP_PROTO(struct super_block *sb, block_t blkstart),
+
+ TP_ARGS(sb, blkstart),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(block_t, blkstart)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->blkstart = blkstart;
+ ),
+
+ TP_printk("dev = (%d,%d), reset zone at block = 0x%llx",
+ show_dev(__entry),
+ (unsigned long long)__entry->blkstart)
+);
+
TRACE_EVENT(f2fs_issue_flush,
TP_PROTO(struct super_block *sb, unsigned int nobarrier,
diff --git a/include/uapi/linux/dm-log-userspace.h b/include/uapi/linux/dm-log-userspace.h
index 0fa0d9ef06a5..05e91e14c501 100644
--- a/include/uapi/linux/dm-log-userspace.h
+++ b/include/uapi/linux/dm-log-userspace.h
@@ -7,6 +7,7 @@
#ifndef __DM_LOG_USERSPACE_H__
#define __DM_LOG_USERSPACE_H__
+#include <linux/types.h>
#include <linux/dm-ioctl.h> /* For DM_UUID_LEN */
/*
@@ -147,12 +148,12 @@
/*
* DM_ULOG_GET_REGION_SIZE corresponds to (found in dm-dirty-log.h):
- * uint32_t (*get_region_size)(struct dm_dirty_log *log);
+ * __u32 (*get_region_size)(struct dm_dirty_log *log);
*
* Payload-to-userspace:
* None.
* Payload-to-kernel:
- * uint64_t - contains the region size
+ * __u64 - contains the region size
*
* The region size is something that was determined at constructor time.
* It is returned in the payload area and 'data_size' is set to
@@ -168,11 +169,11 @@
* int (*is_clean)(struct dm_dirty_log *log, region_t region);
*
* Payload-to-userspace:
- * uint64_t - the region to get clean status on
+ * __u64 - the region to get clean status on
* Payload-to-kernel:
- * int64_t - 1 if clean, 0 otherwise
+ * __s64 - 1 if clean, 0 otherwise
*
- * Payload is sizeof(uint64_t) and contains the region for which the clean
+ * Payload is sizeof(__u64) and contains the region for which the clean
* status is being made.
*
* When the request has been processed, user-space must return the
@@ -187,9 +188,9 @@
* int can_block);
*
* Payload-to-userspace:
- * uint64_t - the region to get sync status on
+ * __u64 - the region to get sync status on
* Payload-to-kernel:
- * int64_t - 1 if in-sync, 0 otherwise
+ * __s64 - 1 if in-sync, 0 otherwise
*
* Exactly the same as 'is_clean' above, except this time asking "has the
* region been recovered?" vs. "is the region not being modified?"
@@ -203,7 +204,7 @@
* Payload-to-userspace:
* If the 'integrated_flush' directive is present in the constructor
* table, the payload is as same as DM_ULOG_MARK_REGION:
- * uint64_t [] - region(s) to mark
+ * __u64 [] - region(s) to mark
* else
* None
* Payload-to-kernel:
@@ -225,13 +226,13 @@
* void (*mark_region)(struct dm_dirty_log *log, region_t region);
*
* Payload-to-userspace:
- * uint64_t [] - region(s) to mark
+ * __u64 [] - region(s) to mark
* Payload-to-kernel:
* None.
*
* Incoming payload contains the one or more regions to mark dirty.
* The number of regions contained in the payload can be determined from
- * 'data_size/sizeof(uint64_t)'.
+ * 'data_size/sizeof(__u64)'.
*
* When the request has been processed, user-space must return the
* dm_ulog_request to the kernel - setting the 'error' field and clearing
@@ -244,13 +245,13 @@
* void (*clear_region)(struct dm_dirty_log *log, region_t region);
*
* Payload-to-userspace:
- * uint64_t [] - region(s) to clear
+ * __u64 [] - region(s) to clear
* Payload-to-kernel:
* None.
*
* Incoming payload contains the one or more regions to mark clean.
* The number of regions contained in the payload can be determined from
- * 'data_size/sizeof(uint64_t)'.
+ * 'data_size/sizeof(__u64)'.
*
* When the request has been processed, user-space must return the
* dm_ulog_request to the kernel - setting the 'error' field and clearing
@@ -266,8 +267,8 @@
* None.
* Payload-to-kernel:
* {
- * int64_t i; -- 1 if recovery necessary, 0 otherwise
- * uint64_t r; -- The region to recover if i=1
+ * __s64 i; -- 1 if recovery necessary, 0 otherwise
+ * __u64 r; -- The region to recover if i=1
* }
* 'data_size' should be set appropriately.
*
@@ -283,8 +284,8 @@
*
* Payload-to-userspace:
* {
- * uint64_t - region to set sync state on
- * int64_t - 0 if not-in-sync, 1 if in-sync
+ * __u64 - region to set sync state on
+ * __s64 - 0 if not-in-sync, 1 if in-sync
* }
* Payload-to-kernel:
* None.
@@ -302,7 +303,7 @@
* Payload-to-userspace:
* None.
* Payload-to-kernel:
- * uint64_t - the number of in-sync regions
+ * __u64 - the number of in-sync regions
*
* No incoming payload. Kernel-bound payload contains the number of
* regions that are in-sync (in a size_t).
@@ -350,11 +351,11 @@
* int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region);
*
* Payload-to-userspace:
- * uint64_t - region to determine recovery status on
+ * __u64 - region to determine recovery status on
* Payload-to-kernel:
* {
- * int64_t is_recovering; -- 0 if no, 1 if yes
- * uint64_t in_sync_hint; -- lowest region still needing resync
+ * __s64 is_recovering; -- 0 if no, 1 if yes
+ * __u64 in_sync_hint; -- lowest region still needing resync
* }
*
* When the request has been processed, user-space must return the
@@ -413,16 +414,16 @@ struct dm_ulog_request {
* differentiate between logs that are being swapped and have the
* same 'uuid'. (Think "live" and "inactive" device-mapper tables.)
*/
- uint64_t luid;
+ __u64 luid;
char uuid[DM_UUID_LEN];
char padding[3]; /* Padding because DM_UUID_LEN = 129 */
- uint32_t version; /* See DM_ULOG_REQUEST_VERSION */
- int32_t error; /* Used to report back processing errors */
+ __u32 version; /* See DM_ULOG_REQUEST_VERSION */
+ __s32 error; /* Used to report back processing errors */
- uint32_t seq; /* Sequence number for request */
- uint32_t request_type; /* DM_ULOG_* defined above */
- uint32_t data_size; /* How much data (not including this struct) */
+ __u32 seq; /* Sequence number for request */
+ __u32 request_type; /* DM_ULOG_* defined above */
+ __u32 data_size; /* How much data (not including this struct) */
char data[0];
};
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index c1d11df07b28..36da93fbf188 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -258,6 +258,20 @@ struct fsxattr {
/* Policy provided via an ioctl on the topmost directory */
#define FS_KEY_DESCRIPTOR_SIZE 8
+#define FS_POLICY_FLAGS_PAD_4 0x00
+#define FS_POLICY_FLAGS_PAD_8 0x01
+#define FS_POLICY_FLAGS_PAD_16 0x02
+#define FS_POLICY_FLAGS_PAD_32 0x03
+#define FS_POLICY_FLAGS_PAD_MASK 0x03
+#define FS_POLICY_FLAGS_VALID 0x03
+
+/* Encryption algorithms */
+#define FS_ENCRYPTION_MODE_INVALID 0
+#define FS_ENCRYPTION_MODE_AES_256_XTS 1
+#define FS_ENCRYPTION_MODE_AES_256_GCM 2
+#define FS_ENCRYPTION_MODE_AES_256_CBC 3
+#define FS_ENCRYPTION_MODE_AES_256_CTS 4
+
struct fscrypt_policy {
__u8 version;
__u8 contents_encryption_mode;
diff --git a/include/uapi/linux/hw_breakpoint.h b/include/uapi/linux/hw_breakpoint.h
index b04000a2296a..2b65efd19a46 100644
--- a/include/uapi/linux/hw_breakpoint.h
+++ b/include/uapi/linux/hw_breakpoint.h
@@ -4,7 +4,11 @@
enum {
HW_BREAKPOINT_LEN_1 = 1,
HW_BREAKPOINT_LEN_2 = 2,
+ HW_BREAKPOINT_LEN_3 = 3,
HW_BREAKPOINT_LEN_4 = 4,
+ HW_BREAKPOINT_LEN_5 = 5,
+ HW_BREAKPOINT_LEN_6 = 6,
+ HW_BREAKPOINT_LEN_7 = 7,
HW_BREAKPOINT_LEN_8 = 8,
};
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index 22e5e589a274..e54d14a7f876 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -40,6 +40,8 @@ enum iio_chan_type {
IIO_PH,
IIO_UVINDEX,
IIO_ELECTRICALCONDUCTIVITY,
+ IIO_COUNT,
+ IIO_INDEX,
};
enum iio_modifier {
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 4ee67cb99143..cac48eda1075 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -651,6 +651,9 @@ struct kvm_enable_cap {
};
/* for KVM_PPC_GET_PVINFO */
+
+#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
+
struct kvm_ppc_pvinfo {
/* out */
__u32 flags;
@@ -682,8 +685,6 @@ struct kvm_ppc_smmu_info {
struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
};
-#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
-
#define KVMIO 0xAE
/* machine type bits, to be used as argument to KVM_CREATE_VM */
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index c3e654c6d518..9930f3e9040f 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -84,6 +84,10 @@
#define MD_DISK_CANDIDATE 5 /* disk is added as spare (local) until confirmed
* For clustered enviroments only.
*/
+#define MD_DISK_FAILFAST 10 /* Send REQ_FAILFAST if there are multiple
+ * devices available - and don't try to
+ * correct read errors.
+ */
#define MD_DISK_WRITEMOSTLY 9 /* disk is "write-mostly" is RAID1 config.
* read requests will only be sent here in
@@ -265,8 +269,9 @@ struct mdp_superblock_1 {
__le32 dev_number; /* permanent identifier of this device - not role in raid */
__le32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */
__u8 device_uuid[16]; /* user-space setable, ignored by kernel */
- __u8 devflags; /* per-device flags. Only one defined...*/
+ __u8 devflags; /* per-device flags. Only two defined...*/
#define WriteMostly1 1 /* mask for writemostly flag in above */
+#define FailFast1 2 /* Should avoid retries and fixups and just fail */
/* Bad block log. If there are any bad blocks the feature flag is set.
* If offset and size are non-zero, that space is reserved and available
*/
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index a8acc24765fe..2c5d7c4a69e3 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -423,6 +423,12 @@ struct usb_endpoint_descriptor {
#define USB_ENDPOINT_XFER_INT 3
#define USB_ENDPOINT_MAX_ADJUSTABLE 0x80
+#define USB_ENDPOINT_MAXP_MASK 0x07ff
+#define USB_EP_MAXP_MULT_SHIFT 11
+#define USB_EP_MAXP_MULT_MASK (3 << USB_EP_MAXP_MULT_SHIFT)
+#define USB_EP_MAXP_MULT(m) \
+ (((m) & USB_EP_MAXP_MULT_MASK) >> USB_EP_MAXP_MULT_SHIFT)
+
/* The USB 3.0 spec redefines bits 5:4 of bmAttributes as interrupt ep type. */
#define USB_ENDPOINT_INTRTYPE 0x30
#define USB_ENDPOINT_INTR_PERIODIC (0 << 4)
@@ -623,11 +629,25 @@ static inline int usb_endpoint_is_isoc_out(
* usb_endpoint_maxp - get endpoint's max packet size
* @epd: endpoint to be checked
*
- * Returns @epd's max packet
+ * Returns @epd's max packet bits [10:0]
*/
static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd)
{
- return __le16_to_cpu(epd->wMaxPacketSize);
+ return __le16_to_cpu(epd->wMaxPacketSize) & USB_ENDPOINT_MAXP_MASK;
+}
+
+/**
+ * usb_endpoint_maxp_mult - get endpoint's transactional opportunities
+ * @epd: endpoint to be checked
+ *
+ * Return @epd's wMaxPacketSize[12:11] + 1
+ */
+static inline int
+usb_endpoint_maxp_mult(const struct usb_endpoint_descriptor *epd)
+{
+ int maxp = __le16_to_cpu(epd->wMaxPacketSize);
+
+ return USB_EP_MAXP_MULT(maxp) + 1;
}
static inline int usb_endpoint_interrupt_type(
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 819d895edfdc..6702533c8bd8 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -33,6 +33,11 @@
*/
#define SND_SOC_TPLG_STREAM_CONFIG_MAX 8
+/*
+ * Maximum number of physical link's hardware configs
+ */
+#define SND_SOC_TPLG_HW_CONFIG_MAX 8
+
/* individual kcontrol info types - can be mixed with other types */
#define SND_SOC_TPLG_CTL_VOLSW 1
#define SND_SOC_TPLG_CTL_VOLSW_SX 2
@@ -77,7 +82,8 @@
#define SND_SOC_TPLG_NUM_TEXTS 16
/* ABI version */
-#define SND_SOC_TPLG_ABI_VERSION 0x5
+#define SND_SOC_TPLG_ABI_VERSION 0x5 /* current version */
+#define SND_SOC_TPLG_ABI_VERSION_MIN 0x4 /* oldest version supported */
/* Max size of TLV data */
#define SND_SOC_TPLG_TLV_SIZE 32
@@ -99,8 +105,8 @@
#define SND_SOC_TPLG_TYPE_CODEC_LINK 9
#define SND_SOC_TPLG_TYPE_BACKEND_LINK 10
#define SND_SOC_TPLG_TYPE_PDATA 11
-#define SND_SOC_TPLG_TYPE_BE_DAI 12
-#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_BE_DAI
+#define SND_SOC_TPLG_TYPE_DAI 12
+#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_DAI
/* vendor block IDs - please add new vendor types to end */
#define SND_SOC_TPLG_TYPE_VENDOR_FW 1000
@@ -119,11 +125,32 @@
#define SND_SOC_TPLG_TUPLE_TYPE_WORD 4
#define SND_SOC_TPLG_TUPLE_TYPE_SHORT 5
-/* BE DAI flags */
+/* DAI flags */
#define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_RATES (1 << 0)
#define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_CHANNELS (1 << 1)
#define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_SAMPLEBITS (1 << 2)
+/* DAI physical PCM data formats.
+ * Add new formats to the end of the list.
+ */
+#define SND_SOC_DAI_FORMAT_I2S 1 /* I2S mode */
+#define SND_SOC_DAI_FORMAT_RIGHT_J 2 /* Right Justified mode */
+#define SND_SOC_DAI_FORMAT_LEFT_J 3 /* Left Justified mode */
+#define SND_SOC_DAI_FORMAT_DSP_A 4 /* L data MSB after FRM LRC */
+#define SND_SOC_DAI_FORMAT_DSP_B 5 /* L data MSB during FRM LRC */
+#define SND_SOC_DAI_FORMAT_AC97 6 /* AC97 */
+#define SND_SOC_DAI_FORMAT_PDM 7 /* Pulse density modulation */
+
+/* left and right justified also known as MSB and LSB respectively */
+#define SND_SOC_DAI_FORMAT_MSB SND_SOC_DAI_FORMAT_LEFT_J
+#define SND_SOC_DAI_FORMAT_LSB SND_SOC_DAI_FORMAT_RIGHT_J
+
+/* DAI link flags */
+#define SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_RATES (1 << 0)
+#define SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_CHANNELS (1 << 1)
+#define SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_SAMPLEBITS (1 << 2)
+#define SND_SOC_TPLG_LNK_FLGBIT_VOICE_WAKEUP (1 << 3)
+
/*
* Block Header.
* This header precedes all object and object arrays below.
@@ -267,6 +294,35 @@ struct snd_soc_tplg_stream {
__le32 channels; /* channels */
} __attribute__((packed));
+
+/*
+ * Describes a physical link's runtime supported hardware config,
+ * i.e. hardware audio formats.
+ */
+struct snd_soc_tplg_hw_config {
+ __le32 size; /* in bytes of this structure */
+ __le32 id; /* unique ID - - used to match */
+ __le32 fmt; /* SND_SOC_DAI_FORMAT_ format value */
+ __u8 clock_gated; /* 1 if clock can be gated to save power */
+ __u8 invert_bclk; /* 1 for inverted BCLK, 0 for normal */
+ __u8 invert_fsync; /* 1 for inverted frame clock, 0 for normal */
+ __u8 bclk_master; /* 1 for master of BCLK, 0 for slave */
+ __u8 fsync_master; /* 1 for master of FSYNC, 0 for slave */
+ __u8 mclk_direction; /* 0 for input, 1 for output */
+ __le16 reserved; /* for 32bit alignment */
+ __le32 mclk_rate; /* MCLK or SYSCLK freqency in Hz */
+ __le32 bclk_rate; /* BCLK freqency in Hz */
+ __le32 fsync_rate; /* frame clock in Hz */
+ __le32 tdm_slots; /* number of TDM slots in use */
+ __le32 tdm_slot_width; /* width in bits for each slot */
+ __le32 tx_slots; /* bit mask for active Tx slots */
+ __le32 rx_slots; /* bit mask for active Rx slots */
+ __le32 tx_channels; /* number of Tx channels */
+ __le32 tx_chanmap[SND_SOC_TPLG_MAX_CHAN]; /* array of slot number */
+ __le32 rx_channels; /* number of Rx channels */
+ __le32 rx_chanmap[SND_SOC_TPLG_MAX_CHAN]; /* array of slot number */
+} __attribute__((packed));
+
/*
* Manifest. List totals for each payload type. Not used in parsing, but will
* be passed to the component driver before any other objects in order for any
@@ -286,7 +342,7 @@ struct snd_soc_tplg_manifest {
__le32 graph_elems; /* number of graph elements */
__le32 pcm_elems; /* number of PCM elements */
__le32 dai_link_elems; /* number of DAI link elements */
- __le32 be_dai_elems; /* number of BE DAI elements */
+ __le32 dai_elems; /* number of physical DAI elements */
__le32 reserved[20]; /* reserved for new ABI element types */
struct snd_soc_tplg_private priv;
} __attribute__((packed));
@@ -434,13 +490,16 @@ struct snd_soc_tplg_pcm {
struct snd_soc_tplg_stream stream[SND_SOC_TPLG_STREAM_CONFIG_MAX]; /* for DAI link */
__le32 num_streams; /* number of streams */
struct snd_soc_tplg_stream_caps caps[2]; /* playback and capture for DAI */
+ __le32 flag_mask; /* bitmask of flags to configure */
+ __le32 flags; /* SND_SOC_TPLG_LNK_FLGBIT_* flag value */
+ struct snd_soc_tplg_private priv;
} __attribute__((packed));
/*
- * Describes the BE or CC link runtime supported configs or params
+ * Describes the physical link runtime supported configs or params
*
- * File block representation for BE/CC link config :-
+ * File block representation for physical link config :-
* +-----------------------------------+-----+
* | struct snd_soc_tplg_hdr | 1 |
* +-----------------------------------+-----+
@@ -450,21 +509,30 @@ struct snd_soc_tplg_pcm {
struct snd_soc_tplg_link_config {
__le32 size; /* in bytes of this structure */
__le32 id; /* unique ID - used to match */
+ char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; /* name - used to match */
+ char stream_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; /* stream name - used to match */
struct snd_soc_tplg_stream stream[SND_SOC_TPLG_STREAM_CONFIG_MAX]; /* supported configs playback and captrure */
__le32 num_streams; /* number of streams */
+ struct snd_soc_tplg_hw_config hw_config[SND_SOC_TPLG_HW_CONFIG_MAX]; /* hw configs */
+ __le32 num_hw_configs; /* number of hw configs */
+ __le32 default_hw_config_id; /* default hw config ID for init */
+ __le32 flag_mask; /* bitmask of flags to configure */
+ __le32 flags; /* SND_SOC_TPLG_LNK_FLGBIT_* flag value */
+ struct snd_soc_tplg_private priv;
} __attribute__((packed));
/*
- * Describes SW/FW specific features of BE DAI.
+ * Describes SW/FW specific features of physical DAI.
+ * It can be used to configure backend DAIs for DPCM.
*
- * File block representation for BE DAI :-
+ * File block representation for physical DAI :-
* +-----------------------------------+-----+
* | struct snd_soc_tplg_hdr | 1 |
* +-----------------------------------+-----+
- * | struct snd_soc_tplg_be_dai | N |
+ * | struct snd_soc_tplg_dai | N |
* +-----------------------------------+-----+
*/
-struct snd_soc_tplg_be_dai {
+struct snd_soc_tplg_dai {
__le32 size; /* in bytes of this structure */
char dai_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; /* name - used to match */
__le32 dai_id; /* unique ID - used to match */
diff --git a/include/uapi/sound/snd_sst_tokens.h b/include/uapi/sound/snd_sst_tokens.h
index 1ee2e943d66a..93392bedcc58 100644
--- a/include/uapi/sound/snd_sst_tokens.h
+++ b/include/uapi/sound/snd_sst_tokens.h
@@ -157,6 +157,10 @@
*
* %SKL_TKN_STR_LIB_NAME: Specifies the library name
*
+ * %SKL_TKN_U32_PMODE: Specifies the power mode for pipe
+ *
+ * %SKL_TKL_U32_D0I3_CAPS: Specifies the D0i3 capability for module
+ *
* module_id and loadable flags dont have tokens as these values will be
* read from the DSP FW manifest
*/
@@ -208,7 +212,9 @@ enum SKL_TKNS {
SKL_TKN_U32_PROC_DOMAIN,
SKL_TKN_U32_LIB_COUNT,
SKL_TKN_STR_LIB_NAME,
- SKL_TKN_MAX = SKL_TKN_STR_LIB_NAME,
+ SKL_TKN_U32_PMODE,
+ SKL_TKL_U32_D0I3_CAPS,
+ SKL_TKN_MAX = SKL_TKL_U32_D0I3_CAPS,
};
#endif
diff --git a/include/xen/arm/hypercall.h b/include/xen/arm/hypercall.h
new file mode 100644
index 000000000000..9d874db13c0e
--- /dev/null
+++ b/include/xen/arm/hypercall.h
@@ -0,0 +1,87 @@
+/******************************************************************************
+ * hypercall.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _ASM_ARM_XEN_HYPERCALL_H
+#define _ASM_ARM_XEN_HYPERCALL_H
+
+#include <linux/bug.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/sched.h>
+#include <xen/interface/platform.h>
+
+long privcmd_call(unsigned call, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5);
+int HYPERVISOR_xen_version(int cmd, void *arg);
+int HYPERVISOR_console_io(int cmd, int count, char *str);
+int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
+int HYPERVISOR_sched_op(int cmd, void *arg);
+int HYPERVISOR_event_channel_op(int cmd, void *arg);
+unsigned long HYPERVISOR_hvm_op(int op, void *arg);
+int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
+int HYPERVISOR_physdev_op(int cmd, void *arg);
+int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
+int HYPERVISOR_tmem_op(void *arg);
+int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type);
+int HYPERVISOR_platform_op_raw(void *arg);
+static inline int HYPERVISOR_platform_op(struct xen_platform_op *op)
+{
+ op->interface_version = XENPF_INTERFACE_VERSION;
+ return HYPERVISOR_platform_op_raw(op);
+}
+int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr);
+
+static inline int
+HYPERVISOR_suspend(unsigned long start_info_mfn)
+{
+ struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
+
+ /* start_info_mfn is unused on ARM */
+ return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
+}
+
+static inline void
+MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
+ unsigned int new_val, unsigned long flags)
+{
+ BUG();
+}
+
+static inline void
+MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
+ int count, int *success_count, domid_t domid)
+{
+ BUG();
+}
+
+#endif /* _ASM_ARM_XEN_HYPERCALL_H */
diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h
new file mode 100644
index 000000000000..95251512e2c4
--- /dev/null
+++ b/include/xen/arm/hypervisor.h
@@ -0,0 +1,39 @@
+#ifndef _ASM_ARM_XEN_HYPERVISOR_H
+#define _ASM_ARM_XEN_HYPERVISOR_H
+
+#include <linux/init.h>
+
+extern struct shared_info *HYPERVISOR_shared_info;
+extern struct start_info *xen_start_info;
+
+/* Lazy mode for batching updates / context switch */
+enum paravirt_lazy_mode {
+ PARAVIRT_LAZY_NONE,
+ PARAVIRT_LAZY_MMU,
+ PARAVIRT_LAZY_CPU,
+};
+
+static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
+{
+ return PARAVIRT_LAZY_NONE;
+}
+
+extern struct dma_map_ops *xen_dma_ops;
+
+#ifdef CONFIG_XEN
+void __init xen_early_init(void);
+#else
+static inline void xen_early_init(void) { return; }
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+static inline void xen_arch_register_cpu(int num)
+{
+}
+
+static inline void xen_arch_unregister_cpu(int num)
+{
+}
+#endif
+
+#endif /* _ASM_ARM_XEN_HYPERVISOR_H */
diff --git a/include/xen/arm/interface.h b/include/xen/arm/interface.h
new file mode 100644
index 000000000000..75d596862892
--- /dev/null
+++ b/include/xen/arm/interface.h
@@ -0,0 +1,85 @@
+/******************************************************************************
+ * Guest OS interface to ARM Xen.
+ *
+ * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
+ */
+
+#ifndef _ASM_ARM_XEN_INTERFACE_H
+#define _ASM_ARM_XEN_INTERFACE_H
+
+#include <linux/types.h>
+
+#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
+
+#define __DEFINE_GUEST_HANDLE(name, type) \
+ typedef struct { union { type *p; uint64_aligned_t q; }; } \
+ __guest_handle_ ## name
+
+#define DEFINE_GUEST_HANDLE_STRUCT(name) \
+ __DEFINE_GUEST_HANDLE(name, struct name)
+#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
+#define GUEST_HANDLE(name) __guest_handle_ ## name
+
+#define set_xen_guest_handle(hnd, val) \
+ do { \
+ if (sizeof(hnd) == 8) \
+ *(uint64_t *)&(hnd) = 0; \
+ (hnd).p = val; \
+ } while (0)
+
+#define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op
+
+#ifndef __ASSEMBLY__
+/* Explicitly size integers that represent pfns in the interface with
+ * Xen so that we can have one ABI that works for 32 and 64 bit guests.
+ * Note that this means that the xen_pfn_t type may be capable of
+ * representing pfn's which the guest cannot represent in its own pfn
+ * type. However since pfn space is controlled by the guest this is
+ * fine since it simply wouldn't be able to create any sure pfns in
+ * the first place.
+ */
+typedef uint64_t xen_pfn_t;
+#define PRI_xen_pfn "llx"
+typedef uint64_t xen_ulong_t;
+#define PRI_xen_ulong "llx"
+typedef int64_t xen_long_t;
+#define PRI_xen_long "llx"
+/* Guest handles for primitive C types. */
+__DEFINE_GUEST_HANDLE(uchar, unsigned char);
+__DEFINE_GUEST_HANDLE(uint, unsigned int);
+DEFINE_GUEST_HANDLE(char);
+DEFINE_GUEST_HANDLE(int);
+DEFINE_GUEST_HANDLE(void);
+DEFINE_GUEST_HANDLE(uint64_t);
+DEFINE_GUEST_HANDLE(uint32_t);
+DEFINE_GUEST_HANDLE(xen_pfn_t);
+DEFINE_GUEST_HANDLE(xen_ulong_t);
+
+/* Maximum number of virtual CPUs in multi-processor guests. */
+#define MAX_VIRT_CPUS 1
+
+struct arch_vcpu_info { };
+struct arch_shared_info { };
+
+/* TODO: Move pvclock definitions some place arch independent */
+struct pvclock_vcpu_time_info {
+ u32 version;
+ u32 pad0;
+ u64 tsc_timestamp;
+ u64 system_time;
+ u32 tsc_to_system_mul;
+ s8 tsc_shift;
+ u8 flags;
+ u8 pad[2];
+} __attribute__((__packed__)); /* 32 bytes */
+
+/* It is OK to have a 12 bytes struct with no padding because it is packed */
+struct pvclock_wall_clock {
+ u32 version;
+ u32 sec;
+ u32 nsec;
+ u32 sec_hi;
+} __attribute__((__packed__));
+#endif
+
+#endif /* _ASM_ARM_XEN_INTERFACE_H */
diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h
new file mode 100644
index 000000000000..95ce6ac3a971
--- /dev/null
+++ b/include/xen/arm/page-coherent.h
@@ -0,0 +1,98 @@
+#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
+#define _ASM_ARM_XEN_PAGE_COHERENT_H
+
+#include <asm/page.h>
+#include <linux/dma-mapping.h>
+
+void __xen_dma_map_page(struct device *hwdev, struct page *page,
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+void __xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir);
+
+void __xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir);
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
+{
+ return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
+{
+ __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ unsigned long page_pfn = page_to_xen_pfn(page);
+ unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
+ unsigned long compound_pages =
+ (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
+ bool local = (page_pfn <= dev_pfn) &&
+ (dev_pfn - page_pfn < compound_pages);
+
+ /*
+ * Dom0 is mapped 1:1, while the Linux page can span across
+ * multiple Xen pages, it's not possible for it to contain a
+ * mix of local and foreign Xen pages. So if the first xen_pfn
+ * == mfn the page is local otherwise it's a foreign page
+ * grant-mapped in dom0. If the page is local we can safely
+ * call the native dma_ops function, otherwise we call the xen
+ * specific function.
+ */
+ if (local)
+ __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+ else
+ __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
+}
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ /*
+ * Dom0 is mapped 1:1, while the Linux page can be spanned accross
+ * multiple Xen page, it's not possible to have a mix of local and
+ * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
+ * foreign mfn will always return false. If the page is local we can
+ * safely call the native dma_ops function, otherwise we call the xen
+ * specific function.
+ */
+ if (pfn_valid(pfn)) {
+ if (__generic_dma_ops(hwdev)->unmap_page)
+ __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
+ } else
+ __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ if (pfn_valid(pfn)) {
+ if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
+ __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
+ } else
+ __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ if (pfn_valid(pfn)) {
+ if (__generic_dma_ops(hwdev)->sync_single_for_device)
+ __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
+ } else
+ __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
+}
+
+#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/include/xen/arm/page.h b/include/xen/arm/page.h
new file mode 100644
index 000000000000..415dbc6e43fd
--- /dev/null
+++ b/include/xen/arm/page.h
@@ -0,0 +1,122 @@
+#ifndef _ASM_ARM_XEN_PAGE_H
+#define _ASM_ARM_XEN_PAGE_H
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+#include <linux/pfn.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+
+#include <xen/xen.h>
+#include <xen/interface/grant_table.h>
+
+#define phys_to_machine_mapping_valid(pfn) (1)
+
+/* Xen machine address */
+typedef struct xmaddr {
+ phys_addr_t maddr;
+} xmaddr_t;
+
+/* Xen pseudo-physical address */
+typedef struct xpaddr {
+ phys_addr_t paddr;
+} xpaddr_t;
+
+#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
+#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
+
+#define INVALID_P2M_ENTRY (~0UL)
+
+/*
+ * The pseudo-physical frame (pfn) used in all the helpers is always based
+ * on Xen page granularity (i.e 4KB).
+ *
+ * A Linux page may be split across multiple non-contiguous Xen page so we
+ * have to keep track with frame based on 4KB page granularity.
+ *
+ * PV drivers should never make a direct usage of those helpers (particularly
+ * pfn_to_gfn and gfn_to_pfn).
+ */
+
+unsigned long __pfn_to_mfn(unsigned long pfn);
+extern struct rb_root phys_to_mach;
+
+/* Pseudo-physical <-> Guest conversion */
+static inline unsigned long pfn_to_gfn(unsigned long pfn)
+{
+ return pfn;
+}
+
+static inline unsigned long gfn_to_pfn(unsigned long gfn)
+{
+ return gfn;
+}
+
+/* Pseudo-physical <-> BUS conversion */
+static inline unsigned long pfn_to_bfn(unsigned long pfn)
+{
+ unsigned long mfn;
+
+ if (phys_to_mach.rb_node != NULL) {
+ mfn = __pfn_to_mfn(pfn);
+ if (mfn != INVALID_P2M_ENTRY)
+ return mfn;
+ }
+
+ return pfn;
+}
+
+static inline unsigned long bfn_to_pfn(unsigned long bfn)
+{
+ return bfn;
+}
+
+#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn)
+
+/* VIRT <-> GUEST conversion */
+#define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT))
+#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT))
+
+/* Only used in PV code. But ARM guests are always HVM. */
+static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
+{
+ BUG();
+}
+
+/* TODO: this shouldn't be here but it is because the frontend drivers
+ * are using it (its rolled in headers) even though we won't hit the code path.
+ * So for right now just punt with this.
+ */
+static inline pte_t *lookup_address(unsigned long address, unsigned int *level)
+{
+ BUG();
+ return NULL;
+}
+
+extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count);
+
+extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
+ struct gnttab_unmap_grant_ref *kunmap_ops,
+ struct page **pages, unsigned int count);
+
+bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
+ unsigned long nr_pages);
+
+static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+ return __set_phys_to_machine(pfn, mfn);
+}
+
+#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
+#define xen_unmap(cookie) iounmap((cookie))
+
+bool xen_arch_need_swiotlb(struct device *dev,
+ phys_addr_t phys,
+ dma_addr_t dev_addr);
+unsigned long xen_get_swiotlb_free_pages(unsigned int order);
+
+#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index 7c35e279d1e3..a0083be5d529 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -51,9 +51,6 @@ xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir);
extern int
-xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
-
-extern int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
extern int
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 32b944b7cebd..271ba62503c7 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -151,6 +151,10 @@ __scanf(4, 5)
int xenbus_scanf(struct xenbus_transaction t,
const char *dir, const char *node, const char *fmt, ...);
+/* Read an (optional) unsigned value. */
+unsigned int xenbus_read_unsigned(const char *dir, const char *node,
+ unsigned int default_val);
+
/* Single printf and write: returns -errno or 0. */
__printf(4, 5)
int xenbus_printf(struct xenbus_transaction t,
diff --git a/init/main.c b/init/main.c
index fa201166cba7..23c275cca73a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -553,6 +553,14 @@ asmlinkage __visible void __init start_kernel(void)
"Interrupts were enabled *very* early, fixing it\n"))
local_irq_disable();
idr_init_cache();
+
+ /*
+ * Allow workqueue creation and work item queueing/cancelling
+ * early. Work item execution depends on kthreads and starts after
+ * workqueue_init().
+ */
+ workqueue_init_early();
+
rcu_init();
/* trace_printk() and trace points may be used after this */
@@ -1009,6 +1017,8 @@ static noinline void __init kernel_init_freeable(void)
smp_prepare_cpus(setup_max_cpus);
+ workqueue_init();
+
do_pre_smp_initcalls();
lockup_detector_init();
diff --git a/kernel/Makefile b/kernel/Makefile
index eb26e12c6c2a..eaee9de224bd 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -115,8 +115,6 @@ obj-$(CONFIG_HAS_IOMEM) += memremap.o
$(obj)/configs.o: $(obj)/config_data.h
-# config_data.h contains the same information as ikconfig.h but gzipped.
-# Info from config_data can be extracted from /proc/config*
targets += config_data.gz
$(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
$(call if_changed,gzip)
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 168ff442ebde..97b0df71303e 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -482,16 +482,7 @@ void pm_qos_update_request(struct pm_qos_request *req,
return;
}
- /*
- * This function may be called very early during boot, for example,
- * from of_clk_init(), where irq needs to stay disabled.
- * cancel_delayed_work_sync() assumes that irq is enabled on
- * invocation and re-enables it on return. Avoid calling it until
- * workqueue is initialized.
- */
- if (keventd_up())
- cancel_delayed_work_sync(&req->work);
-
+ cancel_delayed_work_sync(&req->work);
__pm_qos_update_request(req, new_value);
}
EXPORT_SYMBOL_GPL(pm_qos_update_request);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 479d840db286..1d9fb6543a66 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -290,6 +290,8 @@ module_param_named(disable_numa, wq_disable_numa, bool, 0444);
static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
module_param_named(power_efficient, wq_power_efficient, bool, 0444);
+static bool wq_online; /* can kworkers be created yet? */
+
static bool wq_numa_enabled; /* unbound NUMA affinity enabled */
/* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
@@ -2583,6 +2585,9 @@ void flush_workqueue(struct workqueue_struct *wq)
};
int next_color;
+ if (WARN_ON(!wq_online))
+ return;
+
lock_map_acquire(&wq->lockdep_map);
lock_map_release(&wq->lockdep_map);
@@ -2843,6 +2848,9 @@ bool flush_work(struct work_struct *work)
{
struct wq_barrier barr;
+ if (WARN_ON(!wq_online))
+ return false;
+
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
@@ -2913,7 +2921,13 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
mark_work_canceling(work);
local_irq_restore(flags);
- flush_work(work);
+ /*
+ * This allows canceling during early boot. We know that @work
+ * isn't executing.
+ */
+ if (wq_online)
+ flush_work(work);
+
clear_work_data(work);
/*
@@ -3364,7 +3378,7 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
goto fail;
/* create and start the initial worker */
- if (!create_worker(pool))
+ if (wq_online && !create_worker(pool))
goto fail;
/* install */
@@ -3429,6 +3443,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
{
struct workqueue_struct *wq = pwq->wq;
bool freezable = wq->flags & WQ_FREEZABLE;
+ unsigned long flags;
/* for @wq->saved_max_active */
lockdep_assert_held(&wq->mutex);
@@ -3437,7 +3452,8 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
if (!freezable && pwq->max_active == wq->saved_max_active)
return;
- spin_lock_irq(&pwq->pool->lock);
+ /* this function can be called during early boot w/ irq disabled */
+ spin_lock_irqsave(&pwq->pool->lock, flags);
/*
* During [un]freezing, the caller is responsible for ensuring that
@@ -3460,7 +3476,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
pwq->max_active = 0;
}
- spin_unlock_irq(&pwq->pool->lock);
+ spin_unlock_irqrestore(&pwq->pool->lock, flags);
}
/* initialize newly alloced @pwq which is associated with @wq and @pool */
@@ -4033,6 +4049,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
for (i = 0; i < WORK_NR_COLORS; i++) {
if (WARN_ON(pwq->nr_in_flight[i])) {
mutex_unlock(&wq->mutex);
+ show_workqueue_state();
return;
}
}
@@ -4041,6 +4058,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
WARN_ON(pwq->nr_active) ||
WARN_ON(!list_empty(&pwq->delayed_works))) {
mutex_unlock(&wq->mutex);
+ show_workqueue_state();
return;
}
}
@@ -5467,7 +5485,17 @@ static void __init wq_numa_init(void)
wq_numa_enabled = true;
}
-static int __init init_workqueues(void)
+/**
+ * workqueue_init_early - early init for workqueue subsystem
+ *
+ * This is the first half of two-staged workqueue subsystem initialization
+ * and invoked as soon as the bare basics - memory allocation, cpumasks and
+ * idr are up. It sets up all the data structures and system workqueues
+ * and allows early boot code to create workqueues and queue/cancel work
+ * items. Actual work item execution starts only after kthreads can be
+ * created and scheduled right before early initcalls.
+ */
+int __init workqueue_init_early(void)
{
int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
int i, cpu;
@@ -5479,8 +5507,6 @@ static int __init init_workqueues(void)
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
- wq_numa_init();
-
/* initialize CPU pools */
for_each_possible_cpu(cpu) {
struct worker_pool *pool;
@@ -5500,16 +5526,6 @@ static int __init init_workqueues(void)
}
}
- /* create the initial worker */
- for_each_online_cpu(cpu) {
- struct worker_pool *pool;
-
- for_each_cpu_worker_pool(pool, cpu) {
- pool->flags &= ~POOL_DISASSOCIATED;
- BUG_ON(!create_worker(pool));
- }
- }
-
/* create default unbound and ordered wq attrs */
for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
struct workqueue_attrs *attrs;
@@ -5546,8 +5562,59 @@ static int __init init_workqueues(void)
!system_power_efficient_wq ||
!system_freezable_power_efficient_wq);
+ return 0;
+}
+
+/**
+ * workqueue_init - bring workqueue subsystem fully online
+ *
+ * This is the latter half of two-staged workqueue subsystem initialization
+ * and invoked as soon as kthreads can be created and scheduled.
+ * Workqueues have been created and work items queued on them, but there
+ * are no kworkers executing the work items yet. Populate the worker pools
+ * with the initial workers and enable future kworker creations.
+ */
+int __init workqueue_init(void)
+{
+ struct workqueue_struct *wq;
+ struct worker_pool *pool;
+ int cpu, bkt;
+
+ /*
+ * It'd be simpler to initialize NUMA in workqueue_init_early() but
+ * CPU to node mapping may not be available that early on some
+ * archs such as power and arm64. As per-cpu pools created
+ * previously could be missing node hint and unbound pools NUMA
+ * affinity, fix them up.
+ */
+ wq_numa_init();
+
+ mutex_lock(&wq_pool_mutex);
+
+ for_each_possible_cpu(cpu) {
+ for_each_cpu_worker_pool(pool, cpu) {
+ pool->node = cpu_to_node(cpu);
+ }
+ }
+
+ list_for_each_entry(wq, &workqueues, list)
+ wq_update_unbound_numa(wq, smp_processor_id(), true);
+
+ mutex_unlock(&wq_pool_mutex);
+
+ /* create the initial workers */
+ for_each_online_cpu(cpu) {
+ for_each_cpu_worker_pool(pool, cpu) {
+ pool->flags &= ~POOL_DISASSOCIATED;
+ BUG_ON(!create_worker(pool));
+ }
+ }
+
+ hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
+ BUG_ON(!create_worker(pool));
+
+ wq_online = true;
wq_watchdog_init();
return 0;
}
-early_initcall(init_workqueues);
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 056052dc8e91..04c1ef717fe0 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -199,7 +199,7 @@ static void free_object(struct debug_obj *obj)
* initialized:
*/
if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
- sched = keventd_up();
+ sched = 1;
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index f6c2c1e7779c..9a2b811966eb 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -56,7 +56,7 @@ static const char *kobject_actions[] = {
* kobject_action_type - translate action string to numeric type
*
* @buf: buffer containing the action string, newline is ignored
- * @len: length of buffer
+ * @count: length of buffer
* @type: pointer to the location to store the action type
*
* Returns 0 if the action string was recognized.
@@ -154,8 +154,8 @@ static void cleanup_uevent_env(struct subprocess_info *info)
/**
* kobject_uevent_env - send an uevent with environmental data
*
- * @action: action that is happening
* @kobj: struct kobject that the action is happening to
+ * @action: action that is happening
* @envp_ext: pointer to environmental data
*
* Returns 0 if kobject_uevent_env() is completed with success or the
@@ -363,8 +363,8 @@ EXPORT_SYMBOL_GPL(kobject_uevent_env);
/**
* kobject_uevent - notify userspace by sending an uevent
*
- * @action: action that is happening
* @kobj: struct kobject that the action is happening to
+ * @action: action that is happening
*
* Returns 0 if kobject_uevent() is completed with success or the
* corresponding error when it fails.
diff --git a/lib/raid6/avx2.c b/lib/raid6/avx2.c
index 76734004358d..20bca3d44f67 100644
--- a/lib/raid6/avx2.c
+++ b/lib/raid6/avx2.c
@@ -87,9 +87,57 @@ static void raid6_avx21_gen_syndrome(int disks, size_t bytes, void **ptrs)
kernel_fpu_end();
}
+static void raid6_avx21_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
+
+ for (d = 0 ; d < bytes ; d += 32) {
+ asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
+ asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
+ asm volatile("vpxor %ymm4,%ymm2,%ymm2");
+ /* P/Q data pages */
+ for (z = z0-1 ; z >= start ; z--) {
+ asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+ asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
+ asm volatile("vpxor %ymm5,%ymm2,%ymm2");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ }
+ /* P/Q left side optimization */
+ for (z = start-1 ; z >= 0 ; z--) {
+ asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+ asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ }
+ asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
+ /* Don't use movntdq for r/w memory area < cache line */
+ asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
+ asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
const struct raid6_calls raid6_avx2x1 = {
raid6_avx21_gen_syndrome,
- NULL, /* XOR not yet implemented */
+ raid6_avx21_xor_syndrome,
raid6_have_avx2,
"avx2x1",
1 /* Has cache hints */
@@ -149,9 +197,77 @@ static void raid6_avx22_gen_syndrome(int disks, size_t bytes, void **ptrs)
kernel_fpu_end();
}
+static void raid6_avx22_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
+
+ for (d = 0 ; d < bytes ; d += 64) {
+ asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
+ asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32]));
+ asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
+ asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
+ asm volatile("vpxor %ymm4,%ymm2,%ymm2");
+ asm volatile("vpxor %ymm6,%ymm3,%ymm3");
+ /* P/Q data pages */
+ for (z = z0-1 ; z >= start ; z--) {
+ asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+ asm volatile("vpxor %ymm7,%ymm7,%ymm7");
+ asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+ asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpand %ymm0,%ymm7,%ymm7");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
+ asm volatile("vmovdqa %0,%%ymm7"
+ :: "m" (dptr[z][d+32]));
+ asm volatile("vpxor %ymm5,%ymm2,%ymm2");
+ asm volatile("vpxor %ymm7,%ymm3,%ymm3");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ }
+ /* P/Q left side optimization */
+ for (z = start-1 ; z >= 0 ; z--) {
+ asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+ asm volatile("vpxor %ymm7,%ymm7,%ymm7");
+ asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+ asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpand %ymm0,%ymm7,%ymm7");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ }
+ asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
+ asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
+ /* Don't use movntdq for r/w memory area < cache line */
+ asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
+ asm volatile("vmovdqa %%ymm6,%0" : "=m" (q[d+32]));
+ asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
+ asm volatile("vmovdqa %%ymm3,%0" : "=m" (p[d+32]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
const struct raid6_calls raid6_avx2x2 = {
raid6_avx22_gen_syndrome,
- NULL, /* XOR not yet implemented */
+ raid6_avx22_xor_syndrome,
raid6_have_avx2,
"avx2x2",
1 /* Has cache hints */
@@ -242,9 +358,119 @@ static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs)
kernel_fpu_end();
}
+static void raid6_avx24_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa %0,%%ymm0" :: "m" (raid6_avx2_constants.x1d[0]));
+
+ for (d = 0 ; d < bytes ; d += 128) {
+ asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
+ asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32]));
+ asm volatile("vmovdqa %0,%%ymm12" :: "m" (dptr[z0][d+64]));
+ asm volatile("vmovdqa %0,%%ymm14" :: "m" (dptr[z0][d+96]));
+ asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
+ asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
+ asm volatile("vmovdqa %0,%%ymm10" : : "m" (p[d+64]));
+ asm volatile("vmovdqa %0,%%ymm11" : : "m" (p[d+96]));
+ asm volatile("vpxor %ymm4,%ymm2,%ymm2");
+ asm volatile("vpxor %ymm6,%ymm3,%ymm3");
+ asm volatile("vpxor %ymm12,%ymm10,%ymm10");
+ asm volatile("vpxor %ymm14,%ymm11,%ymm11");
+ /* P/Q data pages */
+ for (z = z0-1 ; z >= start ; z--) {
+ asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
+ asm volatile("prefetchnta %0" :: "m" (dptr[z][d+64]));
+ asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+ asm volatile("vpxor %ymm7,%ymm7,%ymm7");
+ asm volatile("vpxor %ymm13,%ymm13,%ymm13");
+ asm volatile("vpxor %ymm15,%ymm15,%ymm15");
+ asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+ asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
+ asm volatile("vpcmpgtb %ymm12,%ymm13,%ymm13");
+ asm volatile("vpcmpgtb %ymm14,%ymm15,%ymm15");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+ asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
+ asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpand %ymm0,%ymm7,%ymm7");
+ asm volatile("vpand %ymm0,%ymm13,%ymm13");
+ asm volatile("vpand %ymm0,%ymm15,%ymm15");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ asm volatile("vpxor %ymm13,%ymm12,%ymm12");
+ asm volatile("vpxor %ymm15,%ymm14,%ymm14");
+ asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
+ asm volatile("vmovdqa %0,%%ymm7"
+ :: "m" (dptr[z][d+32]));
+ asm volatile("vmovdqa %0,%%ymm13"
+ :: "m" (dptr[z][d+64]));
+ asm volatile("vmovdqa %0,%%ymm15"
+ :: "m" (dptr[z][d+96]));
+ asm volatile("vpxor %ymm5,%ymm2,%ymm2");
+ asm volatile("vpxor %ymm7,%ymm3,%ymm3");
+ asm volatile("vpxor %ymm13,%ymm10,%ymm10");
+ asm volatile("vpxor %ymm15,%ymm11,%ymm11");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ asm volatile("vpxor %ymm13,%ymm12,%ymm12");
+ asm volatile("vpxor %ymm15,%ymm14,%ymm14");
+ }
+ asm volatile("prefetchnta %0" :: "m" (q[d]));
+ asm volatile("prefetchnta %0" :: "m" (q[d+64]));
+ /* P/Q left side optimization */
+ for (z = start-1 ; z >= 0 ; z--) {
+ asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+ asm volatile("vpxor %ymm7,%ymm7,%ymm7");
+ asm volatile("vpxor %ymm13,%ymm13,%ymm13");
+ asm volatile("vpxor %ymm15,%ymm15,%ymm15");
+ asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+ asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
+ asm volatile("vpcmpgtb %ymm12,%ymm13,%ymm13");
+ asm volatile("vpcmpgtb %ymm14,%ymm15,%ymm15");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+ asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
+ asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpand %ymm0,%ymm7,%ymm7");
+ asm volatile("vpand %ymm0,%ymm13,%ymm13");
+ asm volatile("vpand %ymm0,%ymm15,%ymm15");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ asm volatile("vpxor %ymm13,%ymm12,%ymm12");
+ asm volatile("vpxor %ymm15,%ymm14,%ymm14");
+ }
+ asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
+ asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
+ asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64]));
+ asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96]));
+ asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
+ asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
+ asm volatile("vpxor %0,%%ymm12,%%ymm12" : : "m" (q[d+64]));
+ asm volatile("vpxor %0,%%ymm14,%%ymm14" : : "m" (q[d+96]));
+ asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
+ asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
+ asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64]));
+ asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96]));
+ }
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
const struct raid6_calls raid6_avx2x4 = {
raid6_avx24_gen_syndrome,
- NULL, /* XOR not yet implemented */
+ raid6_avx24_xor_syndrome,
raid6_have_avx2,
"avx2x4",
1 /* Has cache hints */
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 22e13a0e19d7..cb1b54ee8527 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -425,7 +425,8 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_addr,
phys_addr_t orig_addr, size_t size,
- enum dma_data_direction dir)
+ enum dma_data_direction dir,
+ unsigned long attrs)
{
unsigned long flags;
phys_addr_t tlb_addr;
@@ -526,7 +527,8 @@ found:
*/
for (i = 0; i < nslots; i++)
io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
- if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
return tlb_addr;
@@ -539,18 +541,20 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
static phys_addr_t
map_single(struct device *hwdev, phys_addr_t phys, size_t size,
- enum dma_data_direction dir)
+ enum dma_data_direction dir, unsigned long attrs)
{
dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
- return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
+ return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
+ dir, attrs);
}
/*
* dma_addr is the kernel virtual address of the bounce buffer to unmap.
*/
void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
- size_t size, enum dma_data_direction dir)
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
{
unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
@@ -561,6 +565,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
* First, sync the memory before unmapping the entry
*/
if (orig_addr != INVALID_PHYS_ADDR &&
+ !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
@@ -654,7 +659,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
* GFP_DMA memory; fall back on map_single(), which
* will grab memory from the lowest available address range.
*/
- phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
+ phys_addr_t paddr = map_single(hwdev, 0, size,
+ DMA_FROM_DEVICE, 0);
if (paddr == SWIOTLB_MAP_ERROR)
goto err_warn;
@@ -667,9 +673,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
(unsigned long long)dma_mask,
(unsigned long long)dev_addr);
- /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
+ /*
+ * DMA_TO_DEVICE to avoid memcpy in unmap_single.
+ * The DMA_ATTR_SKIP_CPU_SYNC is optional.
+ */
swiotlb_tbl_unmap_single(hwdev, paddr,
- size, DMA_TO_DEVICE);
+ size, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
goto err_warn;
}
}
@@ -698,8 +708,12 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
if (!is_swiotlb_buffer(paddr))
free_pages((unsigned long)vaddr, get_order(size));
else
- /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
- swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE);
+ /*
+ * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
+ * DMA_ATTR_SKIP_CPU_SYNC is optional.
+ */
+ swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
}
EXPORT_SYMBOL(swiotlb_free_coherent);
@@ -714,8 +728,8 @@ swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
* When the mapping is small enough return a static buffer to limit
* the damage, or panic when the transfer is too big.
*/
- printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
- "device %s\n", size, dev ? dev_name(dev) : "?");
+ dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n",
+ size);
if (size <= io_tlb_overflow || !do_panic)
return;
@@ -755,7 +769,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
/* Oh well, have to allocate and map a bounce buffer. */
- map = map_single(dev, phys, size, dir);
+ map = map_single(dev, phys, size, dir, attrs);
if (map == SWIOTLB_MAP_ERROR) {
swiotlb_full(dev, size, dir, 1);
return phys_to_dma(dev, io_tlb_overflow_buffer);
@@ -764,12 +778,13 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
dev_addr = phys_to_dma(dev, map);
/* Ensure that the address returned is DMA'ble */
- if (!dma_capable(dev, dev_addr, size)) {
- swiotlb_tbl_unmap_single(dev, map, size, dir);
- return phys_to_dma(dev, io_tlb_overflow_buffer);
- }
+ if (dma_capable(dev, dev_addr, size))
+ return dev_addr;
- return dev_addr;
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+ swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
+
+ return phys_to_dma(dev, io_tlb_overflow_buffer);
}
EXPORT_SYMBOL_GPL(swiotlb_map_page);
@@ -782,14 +797,15 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
* whatever the device wrote there.
*/
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir)
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
{
phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
BUG_ON(dir == DMA_NONE);
if (is_swiotlb_buffer(paddr)) {
- swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
+ swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
return;
}
@@ -809,7 +825,7 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- unmap_single(hwdev, dev_addr, size, dir);
+ unmap_single(hwdev, dev_addr, size, dir, attrs);
}
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
@@ -891,11 +907,12 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
if (swiotlb_force ||
!dma_capable(hwdev, dev_addr, sg->length)) {
phys_addr_t map = map_single(hwdev, sg_phys(sg),
- sg->length, dir);
+ sg->length, dir, attrs);
if (map == SWIOTLB_MAP_ERROR) {
/* Don't panic here, we expect map_sg users
to do proper error handling. */
swiotlb_full(hwdev, sg->length, dir, 0);
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
attrs);
sg_dma_len(sgl) = 0;
@@ -910,14 +927,6 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
}
EXPORT_SYMBOL(swiotlb_map_sg_attrs);
-int
-swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir)
-{
- return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, 0);
-}
-EXPORT_SYMBOL(swiotlb_map_sg);
-
/*
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
* concerning calls here are the same as for swiotlb_unmap_page() above.
@@ -933,19 +942,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i)
- unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir);
-
+ unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
+ attrs);
}
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
-void
-swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir)
-{
- return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, 0);
-}
-EXPORT_SYMBOL(swiotlb_unmap_sg);
-
/*
* Make physical memory consistent for a set of streaming mode DMA translations
* after a transfer.
diff --git a/mm/filemap.c b/mm/filemap.c
index 5b4dd03130da..69568388c699 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -135,10 +135,9 @@ static int page_cache_tree_insert(struct address_space *mapping,
} else {
/* DAX can replace empty locked entry with a hole */
WARN_ON_ONCE(p !=
- (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
- RADIX_DAX_ENTRY_LOCK));
+ dax_radix_locked_entry(0, RADIX_DAX_EMPTY));
/* Wakeup waiters for exceptional entry lock */
- dax_wake_mapping_entry_waiter(mapping, page->index,
+ dax_wake_mapping_entry_waiter(mapping, page->index, p,
false);
}
}
diff --git a/mm/memory.c b/mm/memory.c
index 32e9b7aec366..c264f7cd3e47 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4002,6 +4002,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr,
return ret;
}
+EXPORT_SYMBOL_GPL(access_process_vm);
/*
* Print the name of a VMA.
diff --git a/mm/nommu.c b/mm/nommu.c
index 8b8faaf2a9e9..9720e0bab029 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1878,6 +1878,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
mmput(mm);
return len;
}
+EXPORT_SYMBOL_GPL(access_process_vm);
/**
* nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
diff --git a/mm/percpu.c b/mm/percpu.c
index f696385bcc44..0686f566d347 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -886,7 +886,8 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
size = ALIGN(size, 2);
- if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
+ if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
+ !is_power_of_2(align))) {
WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
size, align);
return NULL;
diff --git a/mm/slab.c b/mm/slab.c
index 87b29e76cafd..29bc6c0dedd0 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -552,12 +552,7 @@ static void start_cpu_timer(int cpu)
{
struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
- /*
- * When this gets called from do_initcalls via cpucache_init(),
- * init_workqueues() has already run, so keventd will be setup
- * at that time.
- */
- if (keventd_up() && reap_work->work.func == NULL) {
+ if (reap_work->work.func == NULL) {
init_reap_node(cpu);
INIT_DEFERRABLE_WORK(reap_work, cache_reap);
schedule_delayed_work_on(cpu, reap_work,
diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py
index a32e4da4c117..3820f00b066a 100755
--- a/scripts/checkkconfigsymbols.py
+++ b/scripts/checkkconfigsymbols.py
@@ -88,7 +88,7 @@ def parse_options():
if args.commit and args.diff:
sys.exit("Please specify only one option at once.")
- if args.diff and not re.match(r"^[\w\-\.]+\.\.[\w\-\.]+$", args.diff):
+ if args.diff and not re.match(r"^[\w\-\.\^]+\.\.[\w\-\.\^]+$", args.diff):
sys.exit("Please specify valid input in the following format: "
"\'commit1..commit2\'")
diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c
index 8160f1c1b56e..12541126575b 100644
--- a/scripts/gcc-plugins/latent_entropy_plugin.c
+++ b/scripts/gcc-plugins/latent_entropy_plugin.c
@@ -619,7 +619,7 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
enabled = false;
continue;
}
- error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+ error(G_("unknown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
}
register_callback(plugin_name, PLUGIN_INFO, NULL,
diff --git a/scripts/gcc-plugins/sancov_plugin.c b/scripts/gcc-plugins/sancov_plugin.c
index 7ea0b3f50739..70f5fe0d590a 100644
--- a/scripts/gcc-plugins/sancov_plugin.c
+++ b/scripts/gcc-plugins/sancov_plugin.c
@@ -126,7 +126,7 @@ __visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gc
enable = false;
continue;
}
- error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+ error(G_("unknown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
}
register_callback(plugin_name, PLUGIN_INFO, NULL, &sancov_plugin_info);
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index ebc9fdfe64df..698a01419515 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -2501,7 +2501,7 @@ static long snd_pcm_oss_ioctl(struct file *file, unsigned int cmd, unsigned long
return put_user(SNDRV_OSS_VERSION, p);
if (cmd == OSS_ALSAEMULVER)
return put_user(1, p);
-#if defined(CONFIG_SND_MIXER_OSS) || (defined(MODULE) && defined(CONFIG_SND_MIXER_OSS_MODULE))
+#if IS_REACHABLE(CONFIG_SND_MIXER_OSS)
if (((cmd >> 8) & 0xff) == 'M') { /* mixer ioctl - for OSS compatibility */
struct snd_pcm_substream *substream;
int idx;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index b450a27588c8..2096bb0835c8 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -1610,7 +1610,7 @@ static int snd_rawmidi_dev_free(struct snd_device *device)
return snd_rawmidi_free(rmidi);
}
-#if defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE))
+#if IS_REACHABLE(CONFIG_SND_SEQUENCER)
static void snd_rawmidi_dev_seq_free(struct snd_seq_device *device)
{
struct snd_rawmidi *rmidi = device->private_data;
@@ -1691,7 +1691,7 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
}
}
rmidi->proc_entry = entry;
-#if defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE))
+#if IS_REACHABLE(CONFIG_SND_SEQUENCER)
if (!rmidi->ops || !rmidi->ops->dev_register) { /* own registration mechanism */
if (snd_seq_device_new(rmidi->card, rmidi->device, SNDRV_SEQ_DEV_ID_MIDISYNTH, 0, &rmidi->seq_dev) >= 0) {
rmidi->seq_dev->private_data = rmidi;
diff --git a/sound/drivers/opl3/opl3_lib.c b/sound/drivers/opl3/opl3_lib.c
index 369cef212ea9..cd9e9f31720f 100644
--- a/sound/drivers/opl3/opl3_lib.c
+++ b/sound/drivers/opl3/opl3_lib.c
@@ -528,7 +528,7 @@ int snd_opl3_hwdep_new(struct snd_opl3 * opl3,
opl3->hwdep = hw;
opl3->seq_dev_num = seq_device;
-#if defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE))
+#if IS_REACHABLE(CONFIG_SND_SEQUENCER)
if (snd_seq_device_new(card, seq_device, SNDRV_SEQ_DEV_ID_OPL3,
sizeof(struct snd_opl3 *), &opl3->seq_dev) >= 0) {
strcpy(opl3->seq_dev->name, hw->name);
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 3469ac14c89c..730ea91d9be8 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -172,12 +172,12 @@ get_saffire_spec(struct fw_unit *unit)
static bool
check_audiophile_booted(struct fw_unit *unit)
{
- char name[24] = {0};
+ char name[28] = {0};
if (fw_csr_string(unit->directory, CSR_MODEL, name, sizeof(name)) < 0)
return false;
- return strncmp(name, "FW Audiophile Bootloader", 15) != 0;
+ return strncmp(name, "FW Audiophile Bootloader", 24) != 0;
}
static void
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 82259ca61e64..1ef7cdf1d3e8 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -1907,7 +1907,7 @@ static int ac97_reset_wait(struct snd_ac97 *ac97, int timeout, int with_modem)
* write). The other callbacks, wait and reset, are not mandatory.
*
* The clock is set to 48000. If another clock is needed, set
- * (*rbus)->clock manually.
+ * ``(*rbus)->clock`` manually.
*
* The AC97 bus instance is registered as a low-level device, so you don't
* have to release it manually.
diff --git a/sound/pci/als4000.c b/sound/pci/als4000.c
index edabe1371660..92bc06d01288 100644
--- a/sound/pci/als4000.c
+++ b/sound/pci/als4000.c
@@ -84,7 +84,7 @@ MODULE_DESCRIPTION("Avance Logic ALS4000");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Avance Logic,ALS4000}}");
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
#define SUPPORT_JOYSTICK 1
#endif
diff --git a/sound/pci/au88x0/au88x0_game.c b/sound/pci/au88x0/au88x0_game.c
index 151815b857a0..53abcd3eccbd 100644
--- a/sound/pci/au88x0/au88x0_game.c
+++ b/sound/pci/au88x0/au88x0_game.c
@@ -36,7 +36,7 @@
#include <linux/gameport.h>
#include <linux/export.h>
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
#define VORTEX_GAME_DWAIT 20 /* 20 ms */
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index 80c4a4456197..79b2e6b7d88b 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -212,7 +212,7 @@ MODULE_DESCRIPTION("Aztech AZF3328 (PCI168)");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Aztech,AZF3328}}");
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
#define SUPPORT_GAMEPORT 1
#endif
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index 73f593526b2d..aeedc270ed9b 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -48,7 +48,7 @@ MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8738},"
"{C-Media,CMI8338A},"
"{C-Media,CMI8338B}}");
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
#define SUPPORT_JOYSTICK 1
#endif
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
index 615d8a99d8c8..8f0f5f24e40e 100644
--- a/sound/pci/cs4281.c
+++ b/sound/pci/cs4281.c
@@ -1194,7 +1194,7 @@ static void snd_cs4281_proc_init(struct cs4281 *chip)
* joystick support
*/
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
static void snd_cs4281_gameport_trigger(struct gameport *gameport)
{
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index 528102cc2d5d..fde3cd48258c 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -2718,7 +2718,7 @@ int snd_cs46xx_midi(struct snd_cs46xx *chip, int device)
* gameport interface
*/
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
static void snd_cs46xx_gameport_trigger(struct gameport *gameport)
{
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
index 4a0cbd2241d8..aa61615288ff 100644
--- a/sound/pci/cs46xx/dsp_spos.c
+++ b/sound/pci/cs46xx/dsp_spos.c
@@ -107,7 +107,8 @@ static int shadow_and_reallocate_code (struct snd_cs46xx * chip, u32 * data, u32
dev_dbg(chip->card->dev,
"handle_wideop:[2] %05x:%05x addr %04x\n",
- hival, loval, address); nreallocated++;
+ hival, loval, address);
+ nreallocated++;
} /* wide_opcodes[j] == wide_op */
} /* for */
} /* mod_type == 0 ... */
diff --git a/sound/pci/echoaudio/layla24_dsp.c b/sound/pci/echoaudio/layla24_dsp.c
index df28e5117359..c02bc1dcc170 100644
--- a/sound/pci/echoaudio/layla24_dsp.c
+++ b/sound/pci/echoaudio/layla24_dsp.c
@@ -135,7 +135,7 @@ static int load_asic(struct echoaudio *chip)
err = load_asic_generic(chip, DSP_FNC_LOAD_LAYLA24_EXTERNAL_ASIC,
FW_LAYLA24_2S_ASIC);
if (err < 0)
- return false;
+ return err;
/* Now give the external ASIC a little time to set up */
mdelay(10);
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index db7a2e5e4a14..6a0e49ac5273 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -37,7 +37,7 @@ MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Creative Labs,SB Live!/PCI512/E-mu APS},"
"{Creative Labs,SB Audigy}}");
-#if defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE))
+#if IS_REACHABLE(CONFIG_SND_SEQUENCER)
#define ENABLE_SYNTH
#include <sound/emu10k1_synth.h>
#endif
@@ -194,6 +194,9 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
if ((err = snd_card_register(card)) < 0)
goto error;
+ if (emu->card_capabilities->emu_model)
+ schedule_delayed_work(&emu->emu1010.firmware_work, 0);
+
pci_set_drvdata(pci, card);
dev++;
return 0;
@@ -219,6 +222,8 @@ static int snd_emu10k1_suspend(struct device *dev)
emu->suspend = 1;
+ cancel_delayed_work_sync(&emu->emu1010.firmware_work);
+
snd_pcm_suspend_all(emu->pcm);
snd_pcm_suspend_all(emu->pcm_mic);
snd_pcm_suspend_all(emu->pcm_efx);
@@ -252,6 +257,10 @@ static int snd_emu10k1_resume(struct device *dev)
emu->suspend = 0;
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+
+ if (emu->card_capabilities->emu_model)
+ schedule_delayed_work(&emu->emu1010.firmware_work, 0);
+
return 0;
}
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index 891453451543..ccf4415a1c7b 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -32,7 +32,6 @@
*/
#include <linux/sched.h>
-#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -662,7 +661,7 @@ static int snd_emu10k1_cardbus_init(struct snd_emu10k1 *emu)
return 0;
}
-static int snd_emu1010_load_firmware(struct snd_emu10k1 *emu,
+static int snd_emu1010_load_firmware_entry(struct snd_emu10k1 *emu,
const struct firmware *fw_entry)
{
int n, i;
@@ -708,98 +707,104 @@ static int snd_emu1010_load_firmware(struct snd_emu10k1 *emu,
return 0;
}
-static int emu1010_firmware_thread(void *data)
+/* firmware file names, per model, init-fw and dock-fw (optional) */
+static const char * const firmware_names[5][2] = {
+ [EMU_MODEL_EMU1010] = {
+ HANA_FILENAME, DOCK_FILENAME
+ },
+ [EMU_MODEL_EMU1010B] = {
+ EMU1010B_FILENAME, MICRO_DOCK_FILENAME
+ },
+ [EMU_MODEL_EMU1616] = {
+ EMU1010_NOTEBOOK_FILENAME, MICRO_DOCK_FILENAME
+ },
+ [EMU_MODEL_EMU0404] = {
+ EMU0404_FILENAME, NULL
+ },
+};
+
+static int snd_emu1010_load_firmware(struct snd_emu10k1 *emu, int dock,
+ const struct firmware **fw)
{
- struct snd_emu10k1 *emu = data;
+ const char *filename;
+ int err;
+
+ if (!*fw) {
+ filename = firmware_names[emu->card_capabilities->emu_model][dock];
+ if (!filename)
+ return 0;
+ err = request_firmware(fw, filename, &emu->pci->dev);
+ if (err)
+ return err;
+ }
+
+ return snd_emu1010_load_firmware_entry(emu, *fw);
+}
+
+static void emu1010_firmware_work(struct work_struct *work)
+{
+ struct snd_emu10k1 *emu;
u32 tmp, tmp2, reg;
- u32 last_reg = 0;
int err;
- for (;;) {
- /* Delay to allow Audio Dock to settle */
- msleep_interruptible(1000);
- if (kthread_should_stop())
- break;
+ emu = container_of(work, struct snd_emu10k1,
+ emu1010.firmware_work.work);
+ if (emu->card->shutdown)
+ return;
#ifdef CONFIG_PM_SLEEP
- if (emu->suspend)
- continue;
+ if (emu->suspend)
+ return;
#endif
- snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, &tmp); /* IRQ Status */
- snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, &reg); /* OPTIONS: Which cards are attached to the EMU */
- if (reg & EMU_HANA_OPTION_DOCK_OFFLINE) {
- /* Audio Dock attached */
- /* Return to Audio Dock programming mode */
- dev_info(emu->card->dev,
- "emu1010: Loading Audio Dock Firmware\n");
- snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, EMU_HANA_FPGA_CONFIG_AUDIODOCK);
-
- if (!emu->dock_fw) {
- const char *filename = NULL;
- switch (emu->card_capabilities->emu_model) {
- case EMU_MODEL_EMU1010:
- filename = DOCK_FILENAME;
- break;
- case EMU_MODEL_EMU1010B:
- filename = MICRO_DOCK_FILENAME;
- break;
- case EMU_MODEL_EMU1616:
- filename = MICRO_DOCK_FILENAME;
- break;
- }
- if (filename) {
- err = request_firmware(&emu->dock_fw,
- filename,
- &emu->pci->dev);
- if (err)
- continue;
- }
- }
-
- if (emu->dock_fw) {
- err = snd_emu1010_load_firmware(emu, emu->dock_fw);
- if (err)
- continue;
- }
+ snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, &tmp); /* IRQ Status */
+ snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, &reg); /* OPTIONS: Which cards are attached to the EMU */
+ if (reg & EMU_HANA_OPTION_DOCK_OFFLINE) {
+ /* Audio Dock attached */
+ /* Return to Audio Dock programming mode */
+ dev_info(emu->card->dev,
+ "emu1010: Loading Audio Dock Firmware\n");
+ snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG,
+ EMU_HANA_FPGA_CONFIG_AUDIODOCK);
+ err = snd_emu1010_load_firmware(emu, 1, &emu->dock_fw);
+ if (err < 0)
+ goto next;
- snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, 0);
- snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, &reg);
- dev_info(emu->card->dev,
- "emu1010: EMU_HANA+DOCK_IRQ_STATUS = 0x%x\n",
- reg);
- /* ID, should read & 0x7f = 0x55 when FPGA programmed. */
- snd_emu1010_fpga_read(emu, EMU_HANA_ID, &reg);
- dev_info(emu->card->dev,
- "emu1010: EMU_HANA+DOCK_ID = 0x%x\n", reg);
- if ((reg & 0x1f) != 0x15) {
- /* FPGA failed to be programmed */
- dev_info(emu->card->dev,
- "emu1010: Loading Audio Dock Firmware file failed, reg = 0x%x\n",
- reg);
- continue;
- }
- dev_info(emu->card->dev,
- "emu1010: Audio Dock Firmware loaded\n");
- snd_emu1010_fpga_read(emu, EMU_DOCK_MAJOR_REV, &tmp);
- snd_emu1010_fpga_read(emu, EMU_DOCK_MINOR_REV, &tmp2);
- dev_info(emu->card->dev, "Audio Dock ver: %u.%u\n",
- tmp, tmp2);
- /* Sync clocking between 1010 and Dock */
- /* Allow DLL to settle */
- msleep(10);
- /* Unmute all. Default is muted after a firmware load */
- snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
- } else if (!reg && last_reg) {
- /* Audio Dock removed */
+ snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, 0);
+ snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, &tmp);
+ dev_info(emu->card->dev,
+ "emu1010: EMU_HANA+DOCK_IRQ_STATUS = 0x%x\n", tmp);
+ /* ID, should read & 0x7f = 0x55 when FPGA programmed. */
+ snd_emu1010_fpga_read(emu, EMU_HANA_ID, &tmp);
+ dev_info(emu->card->dev,
+ "emu1010: EMU_HANA+DOCK_ID = 0x%x\n", tmp);
+ if ((tmp & 0x1f) != 0x15) {
+ /* FPGA failed to be programmed */
dev_info(emu->card->dev,
- "emu1010: Audio Dock detached\n");
- /* Unmute all */
- snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
+ "emu1010: Loading Audio Dock Firmware file failed, reg = 0x%x\n",
+ tmp);
+ goto next;
}
-
- last_reg = reg;
+ dev_info(emu->card->dev,
+ "emu1010: Audio Dock Firmware loaded\n");
+ snd_emu1010_fpga_read(emu, EMU_DOCK_MAJOR_REV, &tmp);
+ snd_emu1010_fpga_read(emu, EMU_DOCK_MINOR_REV, &tmp2);
+ dev_info(emu->card->dev, "Audio Dock ver: %u.%u\n", tmp, tmp2);
+ /* Sync clocking between 1010 and Dock */
+ /* Allow DLL to settle */
+ msleep(10);
+ /* Unmute all. Default is muted after a firmware load */
+ snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
+ } else if (!reg && emu->emu1010.last_reg) {
+ /* Audio Dock removed */
+ dev_info(emu->card->dev, "emu1010: Audio Dock detached\n");
+ /* Unmute all */
+ snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
}
- dev_info(emu->card->dev, "emu1010: firmware thread stopping\n");
- return 0;
+
+ next:
+ emu->emu1010.last_reg = reg;
+ if (!emu->card->shutdown)
+ schedule_delayed_work(&emu->emu1010.firmware_work,
+ msecs_to_jiffies(1000));
}
/*
@@ -881,39 +886,8 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
}
dev_info(emu->card->dev, "emu1010: EMU_HANA_ID = 0x%x\n", reg);
- if (!emu->firmware) {
- const char *filename;
- switch (emu->card_capabilities->emu_model) {
- case EMU_MODEL_EMU1010:
- filename = HANA_FILENAME;
- break;
- case EMU_MODEL_EMU1010B:
- filename = EMU1010B_FILENAME;
- break;
- case EMU_MODEL_EMU1616:
- filename = EMU1010_NOTEBOOK_FILENAME;
- break;
- case EMU_MODEL_EMU0404:
- filename = EMU0404_FILENAME;
- break;
- default:
- return -ENODEV;
- }
-
- err = request_firmware(&emu->firmware, filename, &emu->pci->dev);
- if (err != 0) {
- dev_info(emu->card->dev,
- "emu1010: firmware: %s not found. Err = %d\n",
- filename, err);
- return err;
- }
- dev_info(emu->card->dev,
- "emu1010: firmware file = %s, size = 0x%zx\n",
- filename, emu->firmware->size);
- }
-
- err = snd_emu1010_load_firmware(emu, emu->firmware);
- if (err != 0) {
+ err = snd_emu1010_load_firmware(emu, 0, &emu->firmware);
+ if (err < 0) {
dev_info(emu->card->dev, "emu1010: Loading Firmware failed\n");
return err;
}
@@ -1136,22 +1110,6 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
snd_emu1010_fpga_read(emu, EMU_HANA_SPDIF_MODE, &tmp);
snd_emu1010_fpga_write(emu, EMU_HANA_SPDIF_MODE, 0x10); /* SPDIF Format spdif (or 0x11 for aes/ebu) */
- /* Start Micro/Audio Dock firmware loader thread */
- if (!emu->emu1010.firmware_thread) {
- emu->emu1010.firmware_thread =
- kthread_create(emu1010_firmware_thread, emu,
- "emu1010_firmware");
- if (IS_ERR(emu->emu1010.firmware_thread)) {
- err = PTR_ERR(emu->emu1010.firmware_thread);
- emu->emu1010.firmware_thread = NULL;
- dev_info(emu->card->dev,
- "emu1010: Creating thread failed\n");
- return err;
- }
-
- wake_up_process(emu->emu1010.firmware_thread);
- }
-
#if 0
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_HAMOA_DAC_LEFT1, EMU_SRC_ALICE_EMU32B + 2); /* ALICE2 bus 0xa2 */
@@ -1309,8 +1267,7 @@ static int snd_emu10k1_free(struct snd_emu10k1 *emu)
/* Disable 48Volt power to Audio Dock */
snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_PWR, 0);
}
- if (emu->emu1010.firmware_thread)
- kthread_stop(emu->emu1010.firmware_thread);
+ cancel_delayed_work_sync(&emu->emu1010.firmware_work);
release_firmware(emu->firmware);
release_firmware(emu->dock_fw);
if (emu->irq >= 0)
@@ -1852,6 +1809,7 @@ int snd_emu10k1_create(struct snd_card *card,
emu->irq = -1;
emu->synth = NULL;
emu->get_synth_voice = NULL;
+ INIT_DELAYED_WORK(&emu->emu1010.firmware_work, emu1010_firmware_work);
/* read revision & serial */
emu->revision = pci->revision;
pci_read_config_dword(pci, PCI_SUBSYSTEM_VENDOR_ID, &emu->serial);
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 7e760fed0728..51736c2b5a00 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -79,7 +79,7 @@ MODULE_SUPPORTED_DEVICE("{{Ensoniq,AudioPCI ES1371/73},"
"{Ectiva,EV1938}}");
#endif
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
#define SUPPORT_JOYSTICK
#endif
diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c
index 681355829484..e8d943071a8c 100644
--- a/sound/pci/es1938.c
+++ b/sound/pci/es1938.c
@@ -72,7 +72,7 @@ MODULE_SUPPORTED_DEVICE("{{ESS,ES1938},"
"{ESS,ES1969},"
"{TerraTec,128i PCI}}");
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
#define SUPPORT_JOYSTICK 1
#endif
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index 8146fb76a4ad..2ec2b1ce0af6 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -126,7 +126,7 @@ MODULE_SUPPORTED_DEVICE("{{ESS,Maestro 2e},"
"{ESS,Maestro 1},"
"{TerraTec,DMX}}");
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
#define SUPPORT_JOYSTICK 1
#endif
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 7f57a145a47e..a03cf68d0bcd 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -884,6 +884,8 @@ void snd_hda_apply_fixup(struct hda_codec *codec, int action)
}
EXPORT_SYMBOL_GPL(snd_hda_apply_fixup);
+#define IGNORE_SEQ_ASSOC (~(AC_DEFCFG_SEQUENCE | AC_DEFCFG_DEF_ASSOC))
+
static bool pin_config_match(struct hda_codec *codec,
const struct hda_pintbl *pins)
{
@@ -901,7 +903,7 @@ static bool pin_config_match(struct hda_codec *codec,
for (; t_pins->nid; t_pins++) {
if (t_pins->nid == nid) {
found = 1;
- if (t_pins->val == cfg)
+ if ((t_pins->val & IGNORE_SEQ_ASSOC) == (cfg & IGNORE_SEQ_ASSOC))
break;
else if ((cfg & 0xf0000000) == 0x40000000 && (t_pins->val & 0xf0000000) == 0x40000000)
break;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index ad06866d7c69..11b9b2f17a2e 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -780,6 +780,7 @@ static const struct hda_pintbl alienware_pincfgs[] = {
static const struct snd_pci_quirk ca0132_quirks[] = {
SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE),
SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE),
+ SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
{}
};
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index ed62748a6d55..c15c51bea26d 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -262,6 +262,7 @@ enum {
CXT_FIXUP_CAP_MIX_AMP_5047,
CXT_FIXUP_MUTE_LED_EAPD,
CXT_FIXUP_HP_SPECTRE,
+ CXT_FIXUP_HP_GATE_MIC,
};
/* for hda_fixup_thinkpad_acpi() */
@@ -633,6 +634,17 @@ static void cxt_fixup_cap_mix_amp_5047(struct hda_codec *codec,
(1 << AC_AMPCAP_MUTE_SHIFT));
}
+static void cxt_fixup_hp_gate_mic_jack(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ /* the mic pin (0x19) doesn't give an unsolicited event;
+ * probe the mic pin together with the headphone pin (0x16)
+ */
+ if (action == HDA_FIXUP_ACT_PROBE)
+ snd_hda_jack_set_gating_jack(codec, 0x19, 0x16);
+}
+
/* ThinkPad X200 & co with cxt5051 */
static const struct hda_pintbl cxt_pincfg_lenovo_x200[] = {
{ 0x16, 0x042140ff }, /* HP (seq# overridden) */
@@ -774,6 +786,10 @@ static const struct hda_fixup cxt_fixups[] = {
{ }
}
},
+ [CXT_FIXUP_HP_GATE_MIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cxt_fixup_hp_gate_mic_jack,
+ },
};
static const struct snd_pci_quirk cxt5045_fixups[] = {
@@ -824,6 +840,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+ SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index ea81c08ddc7a..9448daff9d8b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -420,7 +420,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
}
/* generic shutup callback;
- * just turning off EPAD and a little pause for avoiding pop-noise
+ * just turning off EAPD and a little pause for avoiding pop-noise
*/
static void alc_eapd_shutup(struct hda_codec *codec)
{
@@ -5917,6 +5917,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x12, 0x90a60180},
{0x14, 0x90170120},
{0x21, 0x02211030}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x1b, 0x01011020},
+ {0x21, 0x02211010}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60160},
{0x14, 0x90170120},
@@ -6561,6 +6564,30 @@ static void alc662_fixup_led_gpio1(struct hda_codec *codec,
}
}
+static void alc662_usi_automute_hook(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
+{
+ struct alc_spec *spec = codec->spec;
+ int vref;
+ msleep(200);
+ snd_hda_gen_hp_automute(codec, jack);
+
+ vref = spec->gen.hp_jack_present ? PIN_VREF80 : 0;
+ msleep(100);
+ snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+ vref);
+}
+
+static void alc662_fixup_usi_headset_mic(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
+ spec->gen.hp_automute_hook = alc662_usi_automute_hook;
+ }
+}
+
static struct coef_fw alc668_coefs[] = {
WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0),
WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80),
@@ -6626,6 +6653,8 @@ enum {
ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
ALC662_FIXUP_ACER_VERITON,
ALC892_FIXUP_ASROCK_MOBO,
+ ALC662_FIXUP_USI_FUNC,
+ ALC662_FIXUP_USI_HEADSET_MODE,
};
static const struct hda_fixup alc662_fixups[] = {
@@ -6910,6 +6939,20 @@ static const struct hda_fixup alc662_fixups[] = {
{ }
}
},
+ [ALC662_FIXUP_USI_FUNC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc662_fixup_usi_headset_mic,
+ },
+ [ALC662_FIXUP_USI_HEADSET_MODE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x02a1913c }, /* use as headset mic, without its own jack detect */
+ { 0x18, 0x01a1903d },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC662_FIXUP_USI_FUNC
+ },
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6945,6 +6988,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index ada5f01d479c..19c9df6b0f3d 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -110,7 +110,7 @@
#include <sound/opl3.h>
#include <sound/initval.h>
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
#define SUPPORT_JOYSTICK 1
#endif
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index e1a13870bb80..a6aa48c5b969 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -45,7 +45,7 @@ MODULE_DESCRIPTION("S3 SonicVibes PCI");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{S3,SonicVibes PCI}}");
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
#define SUPPORT_JOYSTICK 1
#endif
diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c
index 27f0ed840979..92ad2d7a6bf8 100644
--- a/sound/pci/trident/trident_main.c
+++ b/sound/pci/trident/trident_main.c
@@ -3120,7 +3120,7 @@ static int snd_trident_mixer(struct snd_trident *trident, int pcm_spdif_device)
* gameport interface
*/
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
static unsigned char snd_trident_gameport_read(struct gameport *gameport)
{
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index 38a17b4342a6..2d8c14e3f8d2 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -72,7 +72,7 @@ MODULE_DESCRIPTION("VIA VT82xx audio");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{VIA,VT82C686A/B/C,pci},{VIA,VT8233A/C,8235}}");
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
#define SUPPORT_JOYSTICK 1
#endif
diff --git a/sound/pci/ymfpci/ymfpci.h b/sound/pci/ymfpci/ymfpci.h
index 149d4cb46998..aa9bb065f385 100644
--- a/sound/pci/ymfpci/ymfpci.h
+++ b/sound/pci/ymfpci/ymfpci.h
@@ -176,7 +176,7 @@
#define YMFPCI_LEGACY2_IMOD (1 << 15) /* legacy IRQ mode */
/* SIEN:IMOD 0:0 = legacy irq, 0:1 = INTA, 1:0 = serialized IRQ */
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
#define SUPPORT_JOYSTICK
#endif
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index 22aec9a1e9a4..4a56f3dfba51 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -78,4 +78,14 @@ config SND_ATMEL_SOC_PDMIC
help
Say Y if you want to add support for Atmel ASoC driver for boards using
PDMIC.
+
+config SND_ATMEL_SOC_TSE850_PCM5142
+ tristate "ASoC driver for the Axentia TSE-850"
+ depends on ARCH_AT91 && OF
+ depends on ATMEL_SSC && I2C
+ select SND_ATMEL_SOC_SSC_DMA
+ select SND_SOC_PCM512x_I2C
+ help
+ Say Y if you want to add support for the ASoC driver for the
+ Axentia TSE-850 with a PCM5142 codec.
endif
diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
index a2b127bd9c87..67e10cbd4ed7 100644
--- a/sound/soc/atmel/Makefile
+++ b/sound/soc/atmel/Makefile
@@ -13,9 +13,11 @@ snd-atmel-soc-wm8904-objs := atmel_wm8904.o
snd-soc-sam9x5-wm8731-objs := sam9x5_wm8731.o
snd-atmel-soc-classd-objs := atmel-classd.o
snd-atmel-soc-pdmic-objs := atmel-pdmic.o
+snd-atmel-soc-tse850-pcm5142-objs := tse850-pcm5142.o
obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
obj-$(CONFIG_SND_ATMEL_SOC_WM8904) += snd-atmel-soc-wm8904.o
obj-$(CONFIG_SND_AT91_SOC_SAM9X5_WM8731) += snd-soc-sam9x5-wm8731.o
obj-$(CONFIG_SND_ATMEL_SOC_CLASSD) += snd-atmel-soc-classd.o
obj-$(CONFIG_SND_ATMEL_SOC_PDMIC) += snd-atmel-soc-pdmic.o
+obj-$(CONFIG_SND_ATMEL_SOC_TSE850_PCM5142) += snd-atmel-soc-tse850-pcm5142.o
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 16e459aedffe..a1e2c5682dcd 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -380,6 +380,7 @@ static void atmel_ssc_shutdown(struct snd_pcm_substream *substream,
ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
/* Clear the SSC dividers */
ssc_p->cmr_div = ssc_p->tcmr_period = ssc_p->rcmr_period = 0;
+ ssc_p->forced_divider = 0;
}
spin_unlock_irq(&ssc_p->lock);
@@ -426,14 +427,17 @@ static int atmel_ssc_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
else
if (div != ssc_p->cmr_div)
return -EBUSY;
+ ssc_p->forced_divider |= BIT(ATMEL_SSC_CMR_DIV);
break;
case ATMEL_SSC_TCMR_PERIOD:
ssc_p->tcmr_period = div;
+ ssc_p->forced_divider |= BIT(ATMEL_SSC_TCMR_PERIOD);
break;
case ATMEL_SSC_RCMR_PERIOD:
ssc_p->rcmr_period = div;
+ ssc_p->forced_divider |= BIT(ATMEL_SSC_RCMR_PERIOD);
break;
default:
@@ -443,6 +447,28 @@ static int atmel_ssc_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
return 0;
}
+/* Is the cpu-dai master of the frame clock? */
+static int atmel_ssc_cfs(struct atmel_ssc_info *ssc_p)
+{
+ switch (ssc_p->daifmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_CBS_CFS:
+ return 1;
+ }
+ return 0;
+}
+
+/* Is the cpu-dai master of the bit clock? */
+static int atmel_ssc_cbs(struct atmel_ssc_info *ssc_p)
+{
+ switch (ssc_p->daifmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFM:
+ case SND_SOC_DAIFMT_CBS_CFS:
+ return 1;
+ }
+ return 0;
+}
+
/*
* Configure the SSC.
*/
@@ -459,6 +485,9 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
u32 tfmr, rfmr, tcmr, rcmr;
int ret;
int fslen, fslen_ext;
+ u32 cmr_div;
+ u32 tcmr_period;
+ u32 rcmr_period;
/*
* Currently, there is only one set of dma params for
@@ -470,6 +499,46 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
else
dir = 1;
+ /*
+ * If the cpu dai should provide BCLK, but noone has provided the
+ * divider needed for that to work, fall back to something sensible.
+ */
+ cmr_div = ssc_p->cmr_div;
+ if (!(ssc_p->forced_divider & BIT(ATMEL_SSC_CMR_DIV)) &&
+ atmel_ssc_cbs(ssc_p)) {
+ int bclk_rate = snd_soc_params_to_bclk(params);
+
+ if (bclk_rate < 0) {
+ dev_err(dai->dev, "unable to calculate cmr_div: %d\n",
+ bclk_rate);
+ return bclk_rate;
+ }
+
+ cmr_div = DIV_ROUND_CLOSEST(ssc_p->mck_rate, 2 * bclk_rate);
+ }
+
+ /*
+ * If the cpu dai should provide LRCLK, but noone has provided the
+ * dividers needed for that to work, fall back to something sensible.
+ */
+ tcmr_period = ssc_p->tcmr_period;
+ rcmr_period = ssc_p->rcmr_period;
+ if (atmel_ssc_cfs(ssc_p)) {
+ int frame_size = snd_soc_params_to_frame_size(params);
+
+ if (frame_size < 0) {
+ dev_err(dai->dev,
+ "unable to calculate tx/rx cmr_period: %d\n",
+ frame_size);
+ return frame_size;
+ }
+
+ if (!(ssc_p->forced_divider & BIT(ATMEL_SSC_TCMR_PERIOD)))
+ tcmr_period = frame_size / 2 - 1;
+ if (!(ssc_p->forced_divider & BIT(ATMEL_SSC_RCMR_PERIOD)))
+ rcmr_period = frame_size / 2 - 1;
+ }
+
dma_params = ssc_p->dma_params[dir];
channels = params_channels(params);
@@ -524,7 +593,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
fslen_ext = (bits - 1) / 16;
fslen = (bits - 1) % 16;
- rcmr = SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
+ rcmr = SSC_BF(RCMR_PERIOD, rcmr_period)
| SSC_BF(RCMR_STTDLY, START_DELAY)
| SSC_BF(RCMR_START, SSC_START_FALLING_RF)
| SSC_BF(RCMR_CKI, SSC_CKI_RISING)
@@ -540,7 +609,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
| SSC_BF(RFMR_LOOP, 0)
| SSC_BF(RFMR_DATLEN, (bits - 1));
- tcmr = SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period)
+ tcmr = SSC_BF(TCMR_PERIOD, tcmr_period)
| SSC_BF(TCMR_STTDLY, START_DELAY)
| SSC_BF(TCMR_START, SSC_START_FALLING_RF)
| SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
@@ -606,7 +675,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
fslen_ext = (bits - 1) / 16;
fslen = (bits - 1) % 16;
- rcmr = SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
+ rcmr = SSC_BF(RCMR_PERIOD, rcmr_period)
| SSC_BF(RCMR_STTDLY, START_DELAY)
| SSC_BF(RCMR_START, SSC_START_FALLING_RF)
| SSC_BF(RCMR_CKI, SSC_CKI_RISING)
@@ -623,7 +692,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
| SSC_BF(RFMR_LOOP, 0)
| SSC_BF(RFMR_DATLEN, (bits - 1));
- tcmr = SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period)
+ tcmr = SSC_BF(TCMR_PERIOD, tcmr_period)
| SSC_BF(TCMR_STTDLY, START_DELAY)
| SSC_BF(TCMR_START, SSC_START_FALLING_RF)
| SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
@@ -650,7 +719,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
* MCK divider, and the BCLK signal is output
* on the SSC TK line.
*/
- rcmr = SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
+ rcmr = SSC_BF(RCMR_PERIOD, rcmr_period)
| SSC_BF(RCMR_STTDLY, 1)
| SSC_BF(RCMR_START, SSC_START_RISING_RF)
| SSC_BF(RCMR_CKI, SSC_CKI_RISING)
@@ -665,7 +734,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
| SSC_BF(RFMR_LOOP, 0)
| SSC_BF(RFMR_DATLEN, (bits - 1));
- tcmr = SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period)
+ tcmr = SSC_BF(TCMR_PERIOD, tcmr_period)
| SSC_BF(TCMR_STTDLY, 1)
| SSC_BF(TCMR_START, SSC_START_RISING_RF)
| SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
@@ -760,7 +829,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
}
/* set SSC clock mode register */
- ssc_writel(ssc_p->ssc->regs, CMR, ssc_p->cmr_div);
+ ssc_writel(ssc_p->ssc->regs, CMR, cmr_div);
/* set receive clock mode and format */
ssc_writel(ssc_p->ssc->regs, RCMR, rcmr);
diff --git a/sound/soc/atmel/atmel_ssc_dai.h b/sound/soc/atmel/atmel_ssc_dai.h
index 80b153857a88..75194f582131 100644
--- a/sound/soc/atmel/atmel_ssc_dai.h
+++ b/sound/soc/atmel/atmel_ssc_dai.h
@@ -113,6 +113,7 @@ struct atmel_ssc_info {
unsigned short cmr_div;
unsigned short tcmr_period;
unsigned short rcmr_period;
+ unsigned int forced_divider;
struct atmel_pcm_dma_params *dma_params[2];
struct atmel_ssc_state ssc_state;
unsigned long mck_rate;
diff --git a/sound/soc/atmel/atmel_wm8904.c b/sound/soc/atmel/atmel_wm8904.c
index fdd28ed3e0b9..fbc10f61eb55 100644
--- a/sound/soc/atmel/atmel_wm8904.c
+++ b/sound/soc/atmel/atmel_wm8904.c
@@ -53,7 +53,7 @@ static int atmel_asoc_wm8904_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_soc_ops atmel_asoc_wm8904_ops = {
+static const struct snd_soc_ops atmel_asoc_wm8904_ops = {
.hw_params = atmel_asoc_wm8904_hw_params,
};
diff --git a/sound/soc/atmel/tse850-pcm5142.c b/sound/soc/atmel/tse850-pcm5142.c
new file mode 100644
index 000000000000..ac6a814c8ecf
--- /dev/null
+++ b/sound/soc/atmel/tse850-pcm5142.c
@@ -0,0 +1,472 @@
+/*
+ * TSE-850 audio - ASoC driver for the Axentia TSE-850 with a PCM5142 codec
+ *
+ * Copyright (C) 2016 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * loop1 relays
+ * IN1 +---o +------------+ o---+ OUT1
+ * \ /
+ * + +
+ * | / |
+ * +--o +--. |
+ * | add | |
+ * | V |
+ * | .---. |
+ * DAC +----------->|Sum|---+
+ * | '---' |
+ * | |
+ * + +
+ *
+ * IN2 +---o--+------------+--o---+ OUT2
+ * loop2 relays
+ *
+ * The 'loop1' gpio pin controlls two relays, which are either in loop
+ * position, meaning that input and output are directly connected, or
+ * they are in mixer position, meaning that the signal is passed through
+ * the 'Sum' mixer. Similarly for 'loop2'.
+ *
+ * In the above, the 'loop1' relays are inactive, thus feeding IN1 to the
+ * mixer (if 'add' is active) and feeding the mixer output to OUT1. The
+ * 'loop2' relays are active, short-cutting the TSE-850 from channel 2.
+ * IN1, IN2, OUT1 and OUT2 are TSE-850 connectors and DAC is the PCB name
+ * of the (filtered) output from the PCM5142 codec.
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+
+#include "atmel_ssc_dai.h"
+
+struct tse850_priv {
+ int ssc_id;
+
+ struct gpio_desc *add;
+ struct gpio_desc *loop1;
+ struct gpio_desc *loop2;
+
+ struct regulator *ana;
+
+ int add_cache;
+ int loop1_cache;
+ int loop2_cache;
+};
+
+static int tse850_get_mux1(struct snd_kcontrol *kctrl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
+ struct snd_soc_card *card = dapm->card;
+ struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+
+ ucontrol->value.enumerated.item[0] = tse850->loop1_cache;
+
+ return 0;
+}
+
+static int tse850_put_mux1(struct snd_kcontrol *kctrl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
+ struct snd_soc_card *card = dapm->card;
+ struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+ struct soc_enum *e = (struct soc_enum *)kctrl->private_value;
+ unsigned int val = ucontrol->value.enumerated.item[0];
+
+ if (val >= e->items)
+ return -EINVAL;
+
+ gpiod_set_value_cansleep(tse850->loop1, val);
+ tse850->loop1_cache = val;
+
+ return snd_soc_dapm_put_enum_double(kctrl, ucontrol);
+}
+
+static int tse850_get_mux2(struct snd_kcontrol *kctrl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
+ struct snd_soc_card *card = dapm->card;
+ struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+
+ ucontrol->value.enumerated.item[0] = tse850->loop2_cache;
+
+ return 0;
+}
+
+static int tse850_put_mux2(struct snd_kcontrol *kctrl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
+ struct snd_soc_card *card = dapm->card;
+ struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+ struct soc_enum *e = (struct soc_enum *)kctrl->private_value;
+ unsigned int val = ucontrol->value.enumerated.item[0];
+
+ if (val >= e->items)
+ return -EINVAL;
+
+ gpiod_set_value_cansleep(tse850->loop2, val);
+ tse850->loop2_cache = val;
+
+ return snd_soc_dapm_put_enum_double(kctrl, ucontrol);
+}
+
+int tse850_get_mix(struct snd_kcontrol *kctrl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
+ struct snd_soc_card *card = dapm->card;
+ struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+
+ ucontrol->value.enumerated.item[0] = tse850->add_cache;
+
+ return 0;
+}
+
+int tse850_put_mix(struct snd_kcontrol *kctrl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
+ struct snd_soc_card *card = dapm->card;
+ struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+ int connect = !!ucontrol->value.integer.value[0];
+
+ if (tse850->add_cache == connect)
+ return 0;
+
+ /*
+ * Hmmm, this gpiod_set_value_cansleep call should probably happen
+ * inside snd_soc_dapm_mixer_update_power in the loop.
+ */
+ gpiod_set_value_cansleep(tse850->add, connect);
+ tse850->add_cache = connect;
+
+ snd_soc_dapm_mixer_update_power(dapm, kctrl, connect, NULL);
+ return 1;
+}
+
+int tse850_get_ana(struct snd_kcontrol *kctrl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
+ struct snd_soc_card *card = dapm->card;
+ struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+ int ret;
+
+ ret = regulator_get_voltage(tse850->ana);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Map regulator output values like so:
+ * -11.5V to "Low" (enum 0)
+ * 11.5V-12.5V to "12V" (enum 1)
+ * 12.5V-13.5V to "13V" (enum 2)
+ * ...
+ * 18.5V-19.5V to "19V" (enum 8)
+ * 19.5V- to "20V" (enum 9)
+ */
+ if (ret < 11000000)
+ ret = 11000000;
+ else if (ret > 20000000)
+ ret = 20000000;
+ ret -= 11000000;
+ ret = (ret + 500000) / 1000000;
+
+ ucontrol->value.enumerated.item[0] = ret;
+
+ return 0;
+}
+
+int tse850_put_ana(struct snd_kcontrol *kctrl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
+ struct snd_soc_card *card = dapm->card;
+ struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+ struct soc_enum *e = (struct soc_enum *)kctrl->private_value;
+ unsigned int uV = ucontrol->value.enumerated.item[0];
+ int ret;
+
+ if (uV >= e->items)
+ return -EINVAL;
+
+ /*
+ * Map enum zero (Low) to 2 volts on the regulator, do this since
+ * the ana regulator is supplied by the system 12V voltage and
+ * requesting anything below the system voltage causes the system
+ * voltage to be passed through the regulator. Also, the ana
+ * regulator induces noise when requesting voltages near the
+ * system voltage. So, by mapping Low to 2V, that noise is
+ * eliminated when all that is needed is 12V (the system voltage).
+ */
+ if (uV)
+ uV = 11000000 + (1000000 * uV);
+ else
+ uV = 2000000;
+
+ ret = regulator_set_voltage(tse850->ana, uV, uV);
+ if (ret < 0)
+ return ret;
+
+ return snd_soc_dapm_put_enum_double(kctrl, ucontrol);
+}
+
+static const char * const mux_text[] = { "Mixer", "Loop" };
+
+static const struct soc_enum mux_enum =
+ SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, 2, mux_text);
+
+static const struct snd_kcontrol_new mux1 =
+ SOC_DAPM_ENUM_EXT("MUX1", mux_enum, tse850_get_mux1, tse850_put_mux1);
+
+static const struct snd_kcontrol_new mux2 =
+ SOC_DAPM_ENUM_EXT("MUX2", mux_enum, tse850_get_mux2, tse850_put_mux2);
+
+#define TSE850_DAPM_SINGLE_EXT(xname, reg, shift, max, invert, xget, xput) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_soc_info_volsw, \
+ .get = xget, \
+ .put = xput, \
+ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
+
+static const struct snd_kcontrol_new mix[] = {
+ TSE850_DAPM_SINGLE_EXT("IN Switch", SND_SOC_NOPM, 0, 1, 0,
+ tse850_get_mix, tse850_put_mix),
+};
+
+static const char * const ana_text[] = {
+ "Low", "12V", "13V", "14V", "15V", "16V", "17V", "18V", "19V", "20V"
+};
+
+static const struct soc_enum ana_enum =
+ SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, 9, ana_text);
+
+static const struct snd_kcontrol_new out =
+ SOC_DAPM_ENUM_EXT("ANA", ana_enum, tse850_get_ana, tse850_put_ana);
+
+static const struct snd_soc_dapm_widget tse850_dapm_widgets[] = {
+ SND_SOC_DAPM_LINE("OUT1", NULL),
+ SND_SOC_DAPM_LINE("OUT2", NULL),
+ SND_SOC_DAPM_LINE("IN1", NULL),
+ SND_SOC_DAPM_LINE("IN2", NULL),
+ SND_SOC_DAPM_INPUT("DAC"),
+ SND_SOC_DAPM_AIF_IN("AIFINL", "Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("AIFINR", "Playback", 1, SND_SOC_NOPM, 0, 0),
+ SOC_MIXER_ARRAY("MIX", SND_SOC_NOPM, 0, 0, mix),
+ SND_SOC_DAPM_MUX("MUX1", SND_SOC_NOPM, 0, 0, &mux1),
+ SND_SOC_DAPM_MUX("MUX2", SND_SOC_NOPM, 0, 0, &mux2),
+ SND_SOC_DAPM_OUT_DRV("OUT", SND_SOC_NOPM, 0, 0, &out, 1),
+};
+
+/*
+ * These connections are not entirely correct, since both IN1 and IN2
+ * are always fed to MIX (if the "IN switch" is set so), i.e. without
+ * regard to the loop1 and loop2 relays that according to this only
+ * control MUX1 and MUX2 but in fact also control how the input signals
+ * are routed.
+ * But, 1) I don't know how to do it right, and 2) it doesn't seem to
+ * matter in practice since nothing is powered in those sections anyway.
+ */
+static const struct snd_soc_dapm_route tse850_intercon[] = {
+ { "OUT1", NULL, "MUX1" },
+ { "OUT2", NULL, "MUX2" },
+
+ { "MUX1", "Loop", "IN1" },
+ { "MUX1", "Mixer", "OUT" },
+
+ { "MUX2", "Loop", "IN2" },
+ { "MUX2", "Mixer", "OUT" },
+
+ { "OUT", NULL, "MIX" },
+
+ { "MIX", NULL, "DAC" },
+ { "MIX", "IN Switch", "IN1" },
+ { "MIX", "IN Switch", "IN2" },
+
+ /* connect board input to the codec left channel output pin */
+ { "DAC", NULL, "OUTL" },
+};
+
+static struct snd_soc_dai_link tse850_dailink = {
+ .name = "TSE-850",
+ .stream_name = "TSE-850-PCM",
+ .codec_dai_name = "pcm512x-hifi",
+ .dai_fmt = SND_SOC_DAIFMT_I2S
+ | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFS,
+};
+
+static struct snd_soc_card tse850_card = {
+ .name = "TSE-850-ASoC",
+ .owner = THIS_MODULE,
+ .dai_link = &tse850_dailink,
+ .num_links = 1,
+ .dapm_widgets = tse850_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tse850_dapm_widgets),
+ .dapm_routes = tse850_intercon,
+ .num_dapm_routes = ARRAY_SIZE(tse850_intercon),
+ .fully_routed = true,
+};
+
+static int tse850_dt_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *codec_np, *cpu_np;
+ struct snd_soc_card *card = &tse850_card;
+ struct snd_soc_dai_link *dailink = &tse850_dailink;
+ struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+
+ if (!np) {
+ dev_err(&pdev->dev, "only device tree supported\n");
+ return -EINVAL;
+ }
+
+ cpu_np = of_parse_phandle(np, "axentia,ssc-controller", 0);
+ if (!cpu_np) {
+ dev_err(&pdev->dev, "failed to get dai and pcm info\n");
+ return -EINVAL;
+ }
+ dailink->cpu_of_node = cpu_np;
+ dailink->platform_of_node = cpu_np;
+ tse850->ssc_id = of_alias_get_id(cpu_np, "ssc");
+ of_node_put(cpu_np);
+
+ codec_np = of_parse_phandle(np, "axentia,audio-codec", 0);
+ if (!codec_np) {
+ dev_err(&pdev->dev, "failed to get codec info\n");
+ return -EINVAL;
+ }
+ dailink->codec_of_node = codec_np;
+ of_node_put(codec_np);
+
+ return 0;
+}
+
+static int tse850_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &tse850_card;
+ struct device *dev = card->dev = &pdev->dev;
+ struct tse850_priv *tse850;
+ int ret;
+
+ tse850 = devm_kzalloc(dev, sizeof(*tse850), GFP_KERNEL);
+ if (!tse850)
+ return -ENOMEM;
+
+ snd_soc_card_set_drvdata(card, tse850);
+
+ ret = tse850_dt_init(pdev);
+ if (ret) {
+ dev_err(dev, "failed to init dt info\n");
+ return ret;
+ }
+
+ tse850->add = devm_gpiod_get(dev, "axentia,add", GPIOD_OUT_HIGH);
+ if (IS_ERR(tse850->add)) {
+ if (PTR_ERR(tse850->add) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get 'add' gpio\n");
+ return PTR_ERR(tse850->add);
+ }
+ tse850->add_cache = 1;
+
+ tse850->loop1 = devm_gpiod_get(dev, "axentia,loop1", GPIOD_OUT_HIGH);
+ if (IS_ERR(tse850->loop1)) {
+ if (PTR_ERR(tse850->loop1) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get 'loop1' gpio\n");
+ return PTR_ERR(tse850->loop1);
+ }
+ tse850->loop1_cache = 1;
+
+ tse850->loop2 = devm_gpiod_get(dev, "axentia,loop2", GPIOD_OUT_HIGH);
+ if (IS_ERR(tse850->loop2)) {
+ if (PTR_ERR(tse850->loop2) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get 'loop2' gpio\n");
+ return PTR_ERR(tse850->loop2);
+ }
+ tse850->loop2_cache = 1;
+
+ tse850->ana = devm_regulator_get(dev, "axentia,ana");
+ if (IS_ERR(tse850->ana)) {
+ if (PTR_ERR(tse850->ana) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get 'ana' regulator\n");
+ return PTR_ERR(tse850->ana);
+ }
+
+ ret = regulator_enable(tse850->ana);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable the 'ana' regulator\n");
+ return ret;
+ }
+
+ ret = atmel_ssc_set_audio(tse850->ssc_id);
+ if (ret != 0) {
+ dev_err(dev,
+ "failed to set SSC %d for audio\n", tse850->ssc_id);
+ goto err_disable_ana;
+ }
+
+ ret = snd_soc_register_card(card);
+ if (ret) {
+ dev_err(dev, "snd_soc_register_card failed\n");
+ goto err_put_audio;
+ }
+
+ return 0;
+
+err_put_audio:
+ atmel_ssc_put_audio(tse850->ssc_id);
+err_disable_ana:
+ regulator_disable(tse850->ana);
+ return ret;
+}
+
+static int tse850_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+
+ snd_soc_unregister_card(card);
+ atmel_ssc_put_audio(tse850->ssc_id);
+ regulator_disable(tse850->ana);
+
+ return 0;
+}
+
+static const struct of_device_id tse850_dt_ids[] = {
+ { .compatible = "axentia,tse850-pcm5142", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tse850_dt_ids);
+
+static struct platform_driver tse850_driver = {
+ .driver = {
+ .name = "axentia-tse850-pcm5142",
+ .of_match_table = of_match_ptr(tse850_dt_ids),
+ },
+ .probe = tse850_probe,
+ .remove = tse850_remove,
+};
+
+module_platform_driver(tse850_driver);
+
+/* Module information */
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_DESCRIPTION("ALSA SoC driver for TSE-850 with PCM5142 codec");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/bcm/Kconfig b/sound/soc/bcm/Kconfig
index d528aaceaad9..edf367100ebd 100644
--- a/sound/soc/bcm/Kconfig
+++ b/sound/soc/bcm/Kconfig
@@ -11,6 +11,7 @@ config SND_BCM2835_SOC_I2S
config SND_SOC_CYGNUS
tristate "SoC platform audio for Broadcom Cygnus chips"
depends on ARCH_BCM_CYGNUS || COMPILE_TEST
+ depends on HAS_DMA
help
Say Y if you want to add support for ASoC audio on Broadcom
Cygnus chips (bcm958300, bcm958305, bcm911360)
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index c67667bb970f..9e1718a8cb1c 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -48,6 +48,8 @@ config SND_SOC_ALL_CODECS
select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC
select SND_SOC_CS35L32 if I2C
select SND_SOC_CS35L33 if I2C
+ select SND_SOC_CS35L34 if I2C
+ select SND_SOC_CS42L42 if I2C
select SND_SOC_CS42L51_I2C if I2C
select SND_SOC_CS42L52 if I2C && INPUT
select SND_SOC_CS42L56 if I2C && INPUT
@@ -83,6 +85,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_MAX98095 if I2C
select SND_SOC_MAX98357A if GPIOLIB
select SND_SOC_MAX98371 if I2C
+ select SND_SOC_MAX98504 if I2C
select SND_SOC_MAX9867 if I2C
select SND_SOC_MAX98925 if I2C
select SND_SOC_MAX98926 if I2C
@@ -114,6 +117,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_RT5651 if I2C
select SND_SOC_RT5659 if I2C
select SND_SOC_RT5660 if I2C
+ select SND_SOC_RT5665 if I2C
select SND_SOC_RT5663 if I2C
select SND_SOC_RT5670 if I2C
select SND_SOC_RT5677 if I2C && SPI_MASTER
@@ -399,6 +403,14 @@ config SND_SOC_CS35L33
tristate "Cirrus Logic CS35L33 CODEC"
depends on I2C
+config SND_SOC_CS35L34
+ tristate "Cirrus Logic CS35L34 CODEC"
+ depends on I2C
+
+config SND_SOC_CS42L42
+ tristate "Cirrus Logic CS42L42 CODEC"
+ depends on I2C
+
config SND_SOC_CS42L51
tristate
@@ -581,6 +593,13 @@ config SND_SOC_MAX9860
depends on I2C
select REGMAP_I2C
+config SND_SOC_MSM8916_WCD_ANALOG
+ tristate "Qualcomm MSM8916 WCD Analog Codec"
+ depends on SPMI || COMPILE_TEST
+
+config SND_SOC_MSM8916_WCD_DIGITAL
+ tristate "Qualcomm MSM8916 WCD DIGITAL Codec"
+
config SND_SOC_PCM1681
tristate "Texas Instruments PCM1681 CODEC"
depends on I2C
@@ -649,6 +668,7 @@ config SND_SOC_RL6231
default y if SND_SOC_RT5651=y
default y if SND_SOC_RT5659=y
default y if SND_SOC_RT5660=y
+ default y if SND_SOC_RT5665=y
default y if SND_SOC_RT5663=y
default y if SND_SOC_RT5670=y
default y if SND_SOC_RT5677=y
@@ -659,6 +679,7 @@ config SND_SOC_RL6231
default m if SND_SOC_RT5651=m
default m if SND_SOC_RT5659=m
default m if SND_SOC_RT5660=m
+ default m if SND_SOC_RT5665=m
default m if SND_SOC_RT5663=m
default m if SND_SOC_RT5670=m
default m if SND_SOC_RT5677=m
@@ -672,7 +693,6 @@ config SND_SOC_RL6347A
config SND_SOC_RT286
tristate
- select SND_SOC_RT5663
depends on I2C
config SND_SOC_RT298
@@ -708,6 +728,9 @@ config SND_SOC_RT5659
config SND_SOC_RT5660
tristate
+config SND_SOC_RT5665
+ tristate
+
config SND_SOC_RT5663
tristate
@@ -874,6 +897,7 @@ config SND_SOC_UDA134X
config SND_SOC_UDA1380
tristate
+ depends on I2C
config SND_SOC_WL1273
tristate
@@ -914,7 +938,7 @@ config SND_SOC_WM8523
depends on I2C
config SND_SOC_WM8580
- tristate "Wolfson Microelectronics WM8523 CODEC"
+ tristate "Wolfson Microelectronics WM8580 and WM8581 CODECs"
depends on I2C
config SND_SOC_WM8711
@@ -1048,15 +1072,18 @@ config SND_SOC_WM8998
config SND_SOC_WM9081
tristate
+ depends on I2C
config SND_SOC_WM9090
tristate
config SND_SOC_WM9705
tristate
+ select REGMAP_AC97
config SND_SOC_WM9712
tristate
+ select REGMAP_AC97
config SND_SOC_WM9713
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 958cd4912fbc..7e1dad79610b 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -38,6 +38,8 @@ snd-soc-bt-sco-objs := bt-sco.o
snd-soc-cq93vc-objs := cq93vc.o
snd-soc-cs35l32-objs := cs35l32.o
snd-soc-cs35l33-objs := cs35l33.o
+snd-soc-cs35l34-objs := cs35l34.o
+snd-soc-cs42l42-objs := cs42l42.o
snd-soc-cs42l51-objs := cs42l51.o
snd-soc-cs42l51-i2c-objs := cs42l51-i2c.o
snd-soc-cs42l52-objs := cs42l52.o
@@ -86,6 +88,8 @@ snd-soc-max9850-objs := max9850.o
snd-soc-max9860-objs := max9860.o
snd-soc-mc13783-objs := mc13783.o
snd-soc-ml26124-objs := ml26124.o
+snd-soc-msm8916-analog-objs := msm8916-wcd-analog.o
+snd-soc-msm8916-digital-objs := msm8916-wcd-digital.o
snd-soc-nau8810-objs := nau8810.o
snd-soc-nau8825-objs := nau8825.o
snd-soc-hdmi-codec-objs := hdmi-codec.o
@@ -114,6 +118,7 @@ snd-soc-rt5645-objs := rt5645.o
snd-soc-rt5651-objs := rt5651.o
snd-soc-rt5659-objs := rt5659.o
snd-soc-rt5660-objs := rt5660.o
+snd-soc-rt5665-objs := rt5665.o
snd-soc-rt5663-objs := rt5663.o
snd-soc-rt5670-objs := rt5670.o
snd-soc-rt5677-objs := rt5677.o
@@ -214,7 +219,6 @@ snd-soc-wm9705-objs := wm9705.o
snd-soc-wm9712-objs := wm9712.o
snd-soc-wm9713-objs := wm9713.o
snd-soc-wm-hubs-objs := wm_hubs.o
-
# Amp
snd-soc-max9877-objs := max9877.o
snd-soc-max98504-objs := max98504.o
@@ -263,6 +267,8 @@ obj-$(CONFIG_SND_SOC_BT_SCO) += snd-soc-bt-sco.o
obj-$(CONFIG_SND_SOC_CQ0093VC) += snd-soc-cq93vc.o
obj-$(CONFIG_SND_SOC_CS35L32) += snd-soc-cs35l32.o
obj-$(CONFIG_SND_SOC_CS35L33) += snd-soc-cs35l33.o
+obj-$(CONFIG_SND_SOC_CS35L34) += snd-soc-cs35l34.o
+obj-$(CONFIG_SND_SOC_CS42L42) += snd-soc-cs42l42.o
obj-$(CONFIG_SND_SOC_CS42L51) += snd-soc-cs42l51.o
obj-$(CONFIG_SND_SOC_CS42L51_I2C) += snd-soc-cs42l51-i2c.o
obj-$(CONFIG_SND_SOC_CS42L52) += snd-soc-cs42l52.o
@@ -310,6 +316,8 @@ obj-$(CONFIG_SND_SOC_MAX9850) += snd-soc-max9850.o
obj-$(CONFIG_SND_SOC_MAX9860) += snd-soc-max9860.o
obj-$(CONFIG_SND_SOC_MC13783) += snd-soc-mc13783.o
obj-$(CONFIG_SND_SOC_ML26124) += snd-soc-ml26124.o
+obj-$(CONFIG_SND_SOC_MSM8916_WCD_ANALOG) +=snd-soc-msm8916-analog.o
+obj-$(CONFIG_SND_SOC_MSM8916_WCD_DIGITAL) +=snd-soc-msm8916-digital.o
obj-$(CONFIG_SND_SOC_NAU8810) += snd-soc-nau8810.o
obj-$(CONFIG_SND_SOC_NAU8825) += snd-soc-nau8825.o
obj-$(CONFIG_SND_SOC_HDMI_CODEC) += snd-soc-hdmi-codec.o
@@ -338,6 +346,7 @@ obj-$(CONFIG_SND_SOC_RT5645) += snd-soc-rt5645.o
obj-$(CONFIG_SND_SOC_RT5651) += snd-soc-rt5651.o
obj-$(CONFIG_SND_SOC_RT5659) += snd-soc-rt5659.o
obj-$(CONFIG_SND_SOC_RT5660) += snd-soc-rt5660.o
+obj-$(CONFIG_SND_SOC_RT5665) += snd-soc-rt5665.o
obj-$(CONFIG_SND_SOC_RT5663) += snd-soc-rt5663.o
obj-$(CONFIG_SND_SOC_RT5670) += snd-soc-rt5670.o
obj-$(CONFIG_SND_SOC_RT5677) += snd-soc-rt5677.o
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index 935ff7cb71c5..312b2a11abb6 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -2587,8 +2587,6 @@ static struct platform_driver ab8500_codec_platform_driver = {
},
.probe = ab8500_codec_driver_probe,
.remove = ab8500_codec_driver_remove,
- .suspend = NULL,
- .resume = NULL,
};
module_platform_driver(ab8500_codec_platform_driver);
diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
index 439aa3ff1f99..b36511d965c8 100644
--- a/sound/soc/codecs/adau17x1.c
+++ b/sound/soc/codecs/adau17x1.c
@@ -160,7 +160,7 @@ static int adau17x1_dsp_mux_enum_put(struct snd_kcontrol *kcontrol,
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
struct adau *adau = snd_soc_codec_get_drvdata(codec);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- struct snd_soc_dapm_update update;
+ struct snd_soc_dapm_update update = { 0 };
unsigned int stream = e->shift_l;
unsigned int val, change;
int reg;
diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c
index c91717d08513..ebdaf56c1d61 100644
--- a/sound/soc/codecs/ak4641.c
+++ b/sound/soc/codecs/ak4641.c
@@ -27,7 +27,27 @@
#include <sound/tlv.h>
#include <sound/ak4641.h>
-#include "ak4641.h"
+/* AK4641 register space */
+#define AK4641_PM1 0x00
+#define AK4641_PM2 0x01
+#define AK4641_SIG1 0x02
+#define AK4641_SIG2 0x03
+#define AK4641_MODE1 0x04
+#define AK4641_MODE2 0x05
+#define AK4641_DAC 0x06
+#define AK4641_MIC 0x07
+#define AK4641_TIMER 0x08
+#define AK4641_ALC1 0x09
+#define AK4641_ALC2 0x0a
+#define AK4641_PGA 0x0b
+#define AK4641_LATT 0x0c
+#define AK4641_RATT 0x0d
+#define AK4641_VOL 0x0e
+#define AK4641_STATUS 0x0f
+#define AK4641_EQLO 0x10
+#define AK4641_EQMID 0x11
+#define AK4641_EQHI 0x12
+#define AK4641_BTIF 0x13
/* codec private data */
struct ak4641_priv {
diff --git a/sound/soc/codecs/ak4641.h b/sound/soc/codecs/ak4641.h
deleted file mode 100644
index 4a263248efea..000000000000
--- a/sound/soc/codecs/ak4641.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * ak4641.h -- AK4641 SoC Audio driver
- *
- * Copyright 2008 Harald Welte <laforge@gnufiish.org>
- *
- * Based on ak4535.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _AK4641_H
-#define _AK4641_H
-
-/* AK4641 register space */
-
-#define AK4641_PM1 0x00
-#define AK4641_PM2 0x01
-#define AK4641_SIG1 0x02
-#define AK4641_SIG2 0x03
-#define AK4641_MODE1 0x04
-#define AK4641_MODE2 0x05
-#define AK4641_DAC 0x06
-#define AK4641_MIC 0x07
-#define AK4641_TIMER 0x08
-#define AK4641_ALC1 0x09
-#define AK4641_ALC2 0x0a
-#define AK4641_PGA 0x0b
-#define AK4641_LATT 0x0c
-#define AK4641_RATT 0x0d
-#define AK4641_VOL 0x0e
-#define AK4641_STATUS 0x0f
-#define AK4641_EQLO 0x10
-#define AK4641_EQMID 0x11
-#define AK4641_EQHI 0x12
-#define AK4641_BTIF 0x13
-
-#define AK4641_CACHEREGNUM 0x14
-
-
-
-#define AK4641_DAI_HIFI 0
-#define AK4641_DAI_VOICE 1
-
-
-#endif
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 846ca079845f..0a734d910850 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -191,6 +191,14 @@ int arizona_init_spk(struct snd_soc_codec *codec)
break;
}
+ return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_init_spk);
+
+int arizona_init_spk_irqs(struct arizona *arizona)
+{
+ int ret;
+
ret = arizona_request_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT_WARN,
"Thermal warning", arizona_thermal_warn,
arizona);
@@ -209,19 +217,16 @@ int arizona_init_spk(struct snd_soc_codec *codec)
return 0;
}
-EXPORT_SYMBOL_GPL(arizona_init_spk);
+EXPORT_SYMBOL_GPL(arizona_init_spk_irqs);
-int arizona_free_spk(struct snd_soc_codec *codec)
+int arizona_free_spk_irqs(struct arizona *arizona)
{
- struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
- struct arizona *arizona = priv->arizona;
-
arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT_WARN, arizona);
arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT, arizona);
return 0;
}
-EXPORT_SYMBOL_GPL(arizona_free_spk);
+EXPORT_SYMBOL_GPL(arizona_free_spk_irqs);
static const struct snd_soc_dapm_route arizona_mono_routes[] = {
{ "OUT1R", NULL, "OUT1L" },
@@ -252,6 +257,7 @@ EXPORT_SYMBOL_GPL(arizona_init_mono);
int arizona_init_gpio(struct snd_soc_codec *codec)
{
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
struct arizona *arizona = priv->arizona;
int i;
@@ -259,21 +265,24 @@ int arizona_init_gpio(struct snd_soc_codec *codec)
switch (arizona->type) {
case WM5110:
case WM8280:
- snd_soc_dapm_disable_pin(dapm, "DRC2 Signal Activity");
+ snd_soc_component_disable_pin(component,
+ "DRC2 Signal Activity");
break;
default:
break;
}
- snd_soc_dapm_disable_pin(dapm, "DRC1 Signal Activity");
+ snd_soc_component_disable_pin(component, "DRC1 Signal Activity");
for (i = 0; i < ARRAY_SIZE(arizona->pdata.gpio_defaults); i++) {
switch (arizona->pdata.gpio_defaults[i] & ARIZONA_GPN_FN_MASK) {
case ARIZONA_GP_FN_DRC1_SIGNAL_DETECT:
- snd_soc_dapm_enable_pin(dapm, "DRC1 Signal Activity");
+ snd_soc_component_enable_pin(component,
+ "DRC1 Signal Activity");
break;
case ARIZONA_GP_FN_DRC2_SIGNAL_DETECT:
- snd_soc_dapm_enable_pin(dapm, "DRC2 Signal Activity");
+ snd_soc_component_enable_pin(component,
+ "DRC2 Signal Activity");
break;
default:
break;
@@ -1233,6 +1242,46 @@ static int arizona_set_opclk(struct snd_soc_codec *codec, unsigned int clk,
return -EINVAL;
}
+int arizona_clk_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+ unsigned int val;
+ int clk_idx;
+ int ret;
+
+ ret = regmap_read(arizona->regmap, w->reg, &val);
+ if (ret) {
+ dev_err(codec->dev, "Failed to check clock source: %d\n", ret);
+ return ret;
+ }
+
+ val = (val & ARIZONA_SYSCLK_SRC_MASK) >> ARIZONA_SYSCLK_SRC_SHIFT;
+
+ switch (val) {
+ case ARIZONA_CLK_SRC_MCLK1:
+ clk_idx = ARIZONA_MCLK1;
+ break;
+ case ARIZONA_CLK_SRC_MCLK2:
+ clk_idx = ARIZONA_MCLK2;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ return clk_prepare_enable(arizona->mclk[clk_idx]);
+ case SND_SOC_DAPM_POST_PMD:
+ clk_disable_unprepare(arizona->mclk[clk_idx]);
+ return 0;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(arizona_clk_ev);
+
int arizona_set_sysclk(struct snd_soc_codec *codec, int clk_id,
int source, unsigned int freq, int dir)
{
@@ -2242,6 +2291,42 @@ static int arizona_is_enabled_fll(struct arizona_fll *fll, int base)
return reg & ARIZONA_FLL1_ENA;
}
+static int arizona_set_fll_clks(struct arizona_fll *fll, int base, bool ena)
+{
+ struct arizona *arizona = fll->arizona;
+ unsigned int val;
+ struct clk *clk;
+ int ret;
+
+ ret = regmap_read(arizona->regmap, base + 6, &val);
+ if (ret != 0) {
+ arizona_fll_err(fll, "Failed to read current source: %d\n",
+ ret);
+ return ret;
+ }
+
+ val &= ARIZONA_FLL1_CLK_REF_SRC_MASK;
+ val >>= ARIZONA_FLL1_CLK_REF_SRC_SHIFT;
+
+ switch (val) {
+ case ARIZONA_FLL_SRC_MCLK1:
+ clk = arizona->mclk[ARIZONA_MCLK1];
+ break;
+ case ARIZONA_FLL_SRC_MCLK2:
+ clk = arizona->mclk[ARIZONA_MCLK2];
+ break;
+ default:
+ return 0;
+ }
+
+ if (ena) {
+ return clk_prepare_enable(clk);
+ } else {
+ clk_disable_unprepare(clk);
+ return 0;
+ }
+}
+
static int arizona_enable_fll(struct arizona_fll *fll)
{
struct arizona *arizona = fll->arizona;
@@ -2264,6 +2349,10 @@ static int arizona_enable_fll(struct arizona_fll *fll)
udelay(32);
regmap_update_bits_async(fll->arizona->regmap, fll->base + 0x9,
ARIZONA_FLL1_GAIN_MASK, 0);
+
+ if (arizona_is_enabled_fll(fll, fll->base + 0x10) > 0)
+ arizona_set_fll_clks(fll, fll->base + 0x10, false);
+ arizona_set_fll_clks(fll, fll->base, false);
}
/*
@@ -2318,10 +2407,13 @@ static int arizona_enable_fll(struct arizona_fll *fll)
if (!already_enabled)
pm_runtime_get_sync(arizona->dev);
- if (use_sync)
+ if (use_sync) {
+ arizona_set_fll_clks(fll, fll->base + 0x10, true);
regmap_update_bits_async(arizona->regmap, fll->base + 0x11,
ARIZONA_FLL1_SYNC_ENA,
ARIZONA_FLL1_SYNC_ENA);
+ }
+ arizona_set_fll_clks(fll, fll->base, true);
regmap_update_bits_async(arizona->regmap, fll->base + 1,
ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA);
@@ -2354,19 +2446,24 @@ static int arizona_enable_fll(struct arizona_fll *fll)
static void arizona_disable_fll(struct arizona_fll *fll)
{
struct arizona *arizona = fll->arizona;
- bool change;
+ bool ref_change, sync_change;
regmap_update_bits_async(arizona->regmap, fll->base + 1,
ARIZONA_FLL1_FREERUN, ARIZONA_FLL1_FREERUN);
regmap_update_bits_check(arizona->regmap, fll->base + 1,
- ARIZONA_FLL1_ENA, 0, &change);
- regmap_update_bits(arizona->regmap, fll->base + 0x11,
- ARIZONA_FLL1_SYNC_ENA, 0);
+ ARIZONA_FLL1_ENA, 0, &ref_change);
+ regmap_update_bits_check(arizona->regmap, fll->base + 0x11,
+ ARIZONA_FLL1_SYNC_ENA, 0, &sync_change);
regmap_update_bits_async(arizona->regmap, fll->base + 1,
ARIZONA_FLL1_FREERUN, 0);
- if (change)
+ if (sync_change)
+ arizona_set_fll_clks(fll, fll->base + 0x10, false);
+
+ if (ref_change) {
+ arizona_set_fll_clks(fll, fll->base, false);
pm_runtime_put_autosuspend(arizona->dev);
+ }
}
int arizona_set_fll_refclk(struct arizona_fll *fll, int source,
@@ -2598,30 +2695,6 @@ int arizona_lhpf_coeff_put(struct snd_kcontrol *kcontrol,
}
EXPORT_SYMBOL_GPL(arizona_lhpf_coeff_put);
-int arizona_register_notifier(struct snd_soc_codec *codec,
- struct notifier_block *nb,
- int (*notify)(struct notifier_block *nb,
- unsigned long action, void *data))
-{
- struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
- struct arizona *arizona = priv->arizona;
-
- nb->notifier_call = notify;
-
- return blocking_notifier_chain_register(&arizona->notifier, nb);
-}
-EXPORT_SYMBOL_GPL(arizona_register_notifier);
-
-int arizona_unregister_notifier(struct snd_soc_codec *codec,
- struct notifier_block *nb)
-{
- struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
- struct arizona *arizona = priv->arizona;
-
- return blocking_notifier_chain_unregister(&arizona->notifier, nb);
-}
-EXPORT_SYMBOL_GPL(arizona_unregister_notifier);
-
MODULE_DESCRIPTION("ASoC Wolfson Arizona class device support");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index 850aa338ba29..56707860657c 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -14,6 +14,8 @@
#define _ASOC_ARIZONA_H
#include <linux/completion.h>
+#include <linux/notifier.h>
+#include <linux/mfd/arizona/core.h>
#include <sound/soc.h>
@@ -66,7 +68,6 @@
/* Notifier events */
#define ARIZONA_NOTIFY_VOICE_TRIGGER 0x1
-struct arizona;
struct wm_adsp;
struct arizona_dai_priv {
@@ -255,26 +256,24 @@ extern const struct soc_enum arizona_output_anc_src[];
extern const struct snd_kcontrol_new arizona_voice_trigger_switch[];
-extern int arizona_in_ev(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol,
- int event);
-extern int arizona_out_ev(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol,
- int event);
-extern int arizona_hp_ev(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol,
- int event);
-extern int arizona_anc_ev(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol,
- int event);
-
-extern int arizona_eq_coeff_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
-extern int arizona_lhpf_coeff_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
-
-extern int arizona_set_sysclk(struct snd_soc_codec *codec, int clk_id,
- int source, unsigned int freq, int dir);
+int arizona_in_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol,
+ int event);
+int arizona_out_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol,
+ int event);
+int arizona_hp_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol,
+ int event);
+int arizona_anc_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol,
+ int event);
+
+int arizona_eq_coeff_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int arizona_lhpf_coeff_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+
+int arizona_clk_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol,
+ int event);
+int arizona_set_sysclk(struct snd_soc_codec *codec, int clk_id, int source,
+ unsigned int freq, int dir);
extern const struct snd_soc_dai_ops arizona_dai_ops;
extern const struct snd_soc_dai_ops arizona_simple_dai_ops;
@@ -297,41 +296,57 @@ struct arizona_fll {
char clock_ok_name[ARIZONA_FLL_NAME_LEN];
};
-extern int arizona_dvfs_up(struct snd_soc_codec *codec, unsigned int flags);
-extern int arizona_dvfs_down(struct snd_soc_codec *codec, unsigned int flags);
-extern int arizona_dvfs_sysclk_ev(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event);
-extern void arizona_init_dvfs(struct arizona_priv *priv);
-
-extern int arizona_init_fll(struct arizona *arizona, int id, int base,
- int lock_irq, int ok_irq, struct arizona_fll *fll);
-extern int arizona_set_fll_refclk(struct arizona_fll *fll, int source,
- unsigned int Fref, unsigned int Fout);
-extern int arizona_set_fll(struct arizona_fll *fll, int source,
+int arizona_dvfs_up(struct snd_soc_codec *codec, unsigned int flags);
+int arizona_dvfs_down(struct snd_soc_codec *codec, unsigned int flags);
+int arizona_dvfs_sysclk_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event);
+void arizona_init_dvfs(struct arizona_priv *priv);
+
+int arizona_init_fll(struct arizona *arizona, int id, int base,
+ int lock_irq, int ok_irq, struct arizona_fll *fll);
+int arizona_set_fll_refclk(struct arizona_fll *fll, int source,
unsigned int Fref, unsigned int Fout);
+int arizona_set_fll(struct arizona_fll *fll, int source,
+ unsigned int Fref, unsigned int Fout);
-extern int arizona_init_spk(struct snd_soc_codec *codec);
-extern int arizona_init_gpio(struct snd_soc_codec *codec);
-extern int arizona_init_mono(struct snd_soc_codec *codec);
-extern int arizona_init_notifiers(struct snd_soc_codec *codec);
+int arizona_init_spk(struct snd_soc_codec *codec);
+int arizona_init_gpio(struct snd_soc_codec *codec);
+int arizona_init_mono(struct snd_soc_codec *codec);
+int arizona_init_notifiers(struct snd_soc_codec *codec);
-extern int arizona_free_spk(struct snd_soc_codec *codec);
+int arizona_init_spk_irqs(struct arizona *arizona);
+int arizona_free_spk_irqs(struct arizona *arizona);
-extern int arizona_init_dai(struct arizona_priv *priv, int dai);
+int arizona_init_dai(struct arizona_priv *priv, int dai);
int arizona_set_output_mode(struct snd_soc_codec *codec, int output,
bool diff);
-extern bool arizona_input_analog(struct snd_soc_codec *codec, int shift);
+bool arizona_input_analog(struct snd_soc_codec *codec, int shift);
+
+const char *arizona_sample_rate_val_to_name(unsigned int rate_val);
+
+static inline int arizona_register_notifier(struct snd_soc_codec *codec,
+ struct notifier_block *nb,
+ int (*notify)
+ (struct notifier_block *nb,
+ unsigned long action, void *data))
+{
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->arizona;
+
+ nb->notifier_call = notify;
+
+ return blocking_notifier_chain_register(&arizona->notifier, nb);
+}
-extern const char *arizona_sample_rate_val_to_name(unsigned int rate_val);
+static inline int arizona_unregister_notifier(struct snd_soc_codec *codec,
+ struct notifier_block *nb)
+{
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->arizona;
-extern int arizona_register_notifier(struct snd_soc_codec *codec,
- struct notifier_block *nb,
- int (*notify)(struct notifier_block *nb,
- unsigned long action,
- void *data));
-extern int arizona_unregister_notifier(struct snd_soc_codec *codec,
- struct notifier_block *nb);
+ return blocking_notifier_chain_unregister(&arizona->notifier, nb);
+}
#endif
diff --git a/sound/soc/codecs/cs35l34.c b/sound/soc/codecs/cs35l34.c
new file mode 100644
index 000000000000..7c5d1510cf2c
--- /dev/null
+++ b/sound/soc/codecs/cs35l34.c
@@ -0,0 +1,1251 @@
+/*
+ * cs35l34.c -- CS35l34 ALSA SoC audio driver
+ *
+ * Copyright 2016 Cirrus Logic, Inc.
+ *
+ * Author: Paul Handrigan <Paul.Handrigan@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/machine.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <sound/cs35l34.h>
+
+#include "cs35l34.h"
+
+#define PDN_DONE_ATTEMPTS 10
+#define CS35L34_START_DELAY 50
+
+struct cs35l34_private {
+ struct snd_soc_codec *codec;
+ struct cs35l34_platform_data pdata;
+ struct regmap *regmap;
+ struct regulator_bulk_data core_supplies[2];
+ int num_core_supplies;
+ int mclk_int;
+ bool tdm_mode;
+ struct gpio_desc *reset_gpio; /* Active-low reset GPIO */
+};
+
+static const struct reg_default cs35l34_reg[] = {
+ {CS35L34_PWRCTL1, 0x01},
+ {CS35L34_PWRCTL2, 0x19},
+ {CS35L34_PWRCTL3, 0x01},
+ {CS35L34_ADSP_CLK_CTL, 0x08},
+ {CS35L34_MCLK_CTL, 0x11},
+ {CS35L34_AMP_INP_DRV_CTL, 0x01},
+ {CS35L34_AMP_DIG_VOL_CTL, 0x12},
+ {CS35L34_AMP_DIG_VOL, 0x00},
+ {CS35L34_AMP_ANLG_GAIN_CTL, 0x0F},
+ {CS35L34_PROTECT_CTL, 0x06},
+ {CS35L34_AMP_KEEP_ALIVE_CTL, 0x04},
+ {CS35L34_BST_CVTR_V_CTL, 0x00},
+ {CS35L34_BST_PEAK_I, 0x10},
+ {CS35L34_BST_RAMP_CTL, 0x87},
+ {CS35L34_BST_CONV_COEF_1, 0x24},
+ {CS35L34_BST_CONV_COEF_2, 0x24},
+ {CS35L34_BST_CONV_SLOPE_COMP, 0x4E},
+ {CS35L34_BST_CONV_SW_FREQ, 0x08},
+ {CS35L34_CLASS_H_CTL, 0x0D},
+ {CS35L34_CLASS_H_HEADRM_CTL, 0x0D},
+ {CS35L34_CLASS_H_RELEASE_RATE, 0x08},
+ {CS35L34_CLASS_H_FET_DRIVE_CTL, 0x41},
+ {CS35L34_CLASS_H_STATUS, 0x05},
+ {CS35L34_VPBR_CTL, 0x0A},
+ {CS35L34_VPBR_VOL_CTL, 0x90},
+ {CS35L34_VPBR_TIMING_CTL, 0x6A},
+ {CS35L34_PRED_MAX_ATTEN_SPK_LOAD, 0x95},
+ {CS35L34_PRED_BROWNOUT_THRESH, 0x1C},
+ {CS35L34_PRED_BROWNOUT_VOL_CTL, 0x00},
+ {CS35L34_PRED_BROWNOUT_RATE_CTL, 0x10},
+ {CS35L34_PRED_WAIT_CTL, 0x10},
+ {CS35L34_PRED_ZVP_INIT_IMP_CTL, 0x08},
+ {CS35L34_PRED_MAN_SAFE_VPI_CTL, 0x80},
+ {CS35L34_VPBR_ATTEN_STATUS, 0x00},
+ {CS35L34_PRED_BRWNOUT_ATT_STATUS, 0x00},
+ {CS35L34_SPKR_MON_CTL, 0xC6},
+ {CS35L34_ADSP_I2S_CTL, 0x00},
+ {CS35L34_ADSP_TDM_CTL, 0x00},
+ {CS35L34_TDM_TX_CTL_1_VMON, 0x00},
+ {CS35L34_TDM_TX_CTL_2_IMON, 0x04},
+ {CS35L34_TDM_TX_CTL_3_VPMON, 0x03},
+ {CS35L34_TDM_TX_CTL_4_VBSTMON, 0x07},
+ {CS35L34_TDM_TX_CTL_5_FLAG1, 0x08},
+ {CS35L34_TDM_TX_CTL_6_FLAG2, 0x09},
+ {CS35L34_TDM_TX_SLOT_EN_1, 0x00},
+ {CS35L34_TDM_TX_SLOT_EN_2, 0x00},
+ {CS35L34_TDM_TX_SLOT_EN_3, 0x00},
+ {CS35L34_TDM_TX_SLOT_EN_4, 0x00},
+ {CS35L34_TDM_RX_CTL_1_AUDIN, 0x40},
+ {CS35L34_TDM_RX_CTL_3_ALIVE, 0x04},
+ {CS35L34_MULT_DEV_SYNCH1, 0x00},
+ {CS35L34_MULT_DEV_SYNCH2, 0x80},
+ {CS35L34_PROT_RELEASE_CTL, 0x00},
+ {CS35L34_DIAG_MODE_REG_LOCK, 0x00},
+ {CS35L34_DIAG_MODE_CTL_1, 0x00},
+ {CS35L34_DIAG_MODE_CTL_2, 0x00},
+ {CS35L34_INT_MASK_1, 0xFF},
+ {CS35L34_INT_MASK_2, 0xFF},
+ {CS35L34_INT_MASK_3, 0xFF},
+ {CS35L34_INT_MASK_4, 0xFF},
+ {CS35L34_INT_STATUS_1, 0x30},
+ {CS35L34_INT_STATUS_2, 0x05},
+ {CS35L34_INT_STATUS_3, 0x00},
+ {CS35L34_INT_STATUS_4, 0x00},
+ {CS35L34_OTP_TRIM_STATUS, 0x00},
+};
+
+static bool cs35l34_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS35L34_DEVID_AB:
+ case CS35L34_DEVID_CD:
+ case CS35L34_DEVID_E:
+ case CS35L34_FAB_ID:
+ case CS35L34_REV_ID:
+ case CS35L34_INT_STATUS_1:
+ case CS35L34_INT_STATUS_2:
+ case CS35L34_INT_STATUS_3:
+ case CS35L34_INT_STATUS_4:
+ case CS35L34_CLASS_H_STATUS:
+ case CS35L34_VPBR_ATTEN_STATUS:
+ case CS35L34_OTP_TRIM_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs35l34_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS35L34_DEVID_AB:
+ case CS35L34_DEVID_CD:
+ case CS35L34_DEVID_E:
+ case CS35L34_FAB_ID:
+ case CS35L34_REV_ID:
+ case CS35L34_PWRCTL1:
+ case CS35L34_PWRCTL2:
+ case CS35L34_PWRCTL3:
+ case CS35L34_ADSP_CLK_CTL:
+ case CS35L34_MCLK_CTL:
+ case CS35L34_AMP_INP_DRV_CTL:
+ case CS35L34_AMP_DIG_VOL_CTL:
+ case CS35L34_AMP_DIG_VOL:
+ case CS35L34_AMP_ANLG_GAIN_CTL:
+ case CS35L34_PROTECT_CTL:
+ case CS35L34_AMP_KEEP_ALIVE_CTL:
+ case CS35L34_BST_CVTR_V_CTL:
+ case CS35L34_BST_PEAK_I:
+ case CS35L34_BST_RAMP_CTL:
+ case CS35L34_BST_CONV_COEF_1:
+ case CS35L34_BST_CONV_COEF_2:
+ case CS35L34_BST_CONV_SLOPE_COMP:
+ case CS35L34_BST_CONV_SW_FREQ:
+ case CS35L34_CLASS_H_CTL:
+ case CS35L34_CLASS_H_HEADRM_CTL:
+ case CS35L34_CLASS_H_RELEASE_RATE:
+ case CS35L34_CLASS_H_FET_DRIVE_CTL:
+ case CS35L34_CLASS_H_STATUS:
+ case CS35L34_VPBR_CTL:
+ case CS35L34_VPBR_VOL_CTL:
+ case CS35L34_VPBR_TIMING_CTL:
+ case CS35L34_PRED_MAX_ATTEN_SPK_LOAD:
+ case CS35L34_PRED_BROWNOUT_THRESH:
+ case CS35L34_PRED_BROWNOUT_VOL_CTL:
+ case CS35L34_PRED_BROWNOUT_RATE_CTL:
+ case CS35L34_PRED_WAIT_CTL:
+ case CS35L34_PRED_ZVP_INIT_IMP_CTL:
+ case CS35L34_PRED_MAN_SAFE_VPI_CTL:
+ case CS35L34_VPBR_ATTEN_STATUS:
+ case CS35L34_PRED_BRWNOUT_ATT_STATUS:
+ case CS35L34_SPKR_MON_CTL:
+ case CS35L34_ADSP_I2S_CTL:
+ case CS35L34_ADSP_TDM_CTL:
+ case CS35L34_TDM_TX_CTL_1_VMON:
+ case CS35L34_TDM_TX_CTL_2_IMON:
+ case CS35L34_TDM_TX_CTL_3_VPMON:
+ case CS35L34_TDM_TX_CTL_4_VBSTMON:
+ case CS35L34_TDM_TX_CTL_5_FLAG1:
+ case CS35L34_TDM_TX_CTL_6_FLAG2:
+ case CS35L34_TDM_TX_SLOT_EN_1:
+ case CS35L34_TDM_TX_SLOT_EN_2:
+ case CS35L34_TDM_TX_SLOT_EN_3:
+ case CS35L34_TDM_TX_SLOT_EN_4:
+ case CS35L34_TDM_RX_CTL_1_AUDIN:
+ case CS35L34_TDM_RX_CTL_3_ALIVE:
+ case CS35L34_MULT_DEV_SYNCH1:
+ case CS35L34_MULT_DEV_SYNCH2:
+ case CS35L34_PROT_RELEASE_CTL:
+ case CS35L34_DIAG_MODE_REG_LOCK:
+ case CS35L34_DIAG_MODE_CTL_1:
+ case CS35L34_DIAG_MODE_CTL_2:
+ case CS35L34_INT_MASK_1:
+ case CS35L34_INT_MASK_2:
+ case CS35L34_INT_MASK_3:
+ case CS35L34_INT_MASK_4:
+ case CS35L34_INT_STATUS_1:
+ case CS35L34_INT_STATUS_2:
+ case CS35L34_INT_STATUS_3:
+ case CS35L34_INT_STATUS_4:
+ case CS35L34_OTP_TRIM_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs35l34_precious_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS35L34_INT_STATUS_1:
+ case CS35L34_INT_STATUS_2:
+ case CS35L34_INT_STATUS_3:
+ case CS35L34_INT_STATUS_4:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int cs35l34_sdin_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct cs35l34_private *priv = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (priv->tdm_mode)
+ regmap_update_bits(priv->regmap, CS35L34_PWRCTL3,
+ CS35L34_PDN_TDM, 0x00);
+
+ ret = regmap_update_bits(priv->regmap, CS35L34_PWRCTL1,
+ CS35L34_PDN_ALL, 0);
+ if (ret < 0) {
+ dev_err(codec->dev, "Cannot set Power bits %d\n", ret);
+ return ret;
+ }
+ usleep_range(5000, 5100);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (priv->tdm_mode) {
+ regmap_update_bits(priv->regmap, CS35L34_PWRCTL3,
+ CS35L34_PDN_TDM, CS35L34_PDN_TDM);
+ }
+ ret = regmap_update_bits(priv->regmap, CS35L34_PWRCTL1,
+ CS35L34_PDN_ALL, CS35L34_PDN_ALL);
+ break;
+ default:
+ pr_err("Invalid event = 0x%x\n", event);
+ }
+ return 0;
+}
+
+static int cs35l34_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int rx_mask, int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct cs35l34_private *priv = snd_soc_codec_get_drvdata(codec);
+ unsigned int reg, bit_pos;
+ int slot, slot_num;
+
+ if (slot_width != 8)
+ return -EINVAL;
+
+ priv->tdm_mode = true;
+ /* scan rx_mask for aud slot */
+ slot = ffs(rx_mask) - 1;
+ if (slot >= 0)
+ snd_soc_update_bits(codec, CS35L34_TDM_RX_CTL_1_AUDIN,
+ CS35L34_X_LOC, slot);
+
+ /* scan tx_mask: vmon(2 slots); imon (2 slots); vpmon (1 slot)
+ * vbstmon (1 slot)
+ */
+ slot = ffs(tx_mask) - 1;
+ slot_num = 0;
+
+ /* disable vpmon/vbstmon: enable later if set in tx_mask */
+ snd_soc_update_bits(codec, CS35L34_TDM_TX_CTL_3_VPMON,
+ CS35L34_X_STATE | CS35L34_X_LOC,
+ CS35L34_X_STATE | CS35L34_X_LOC);
+ snd_soc_update_bits(codec, CS35L34_TDM_TX_CTL_4_VBSTMON,
+ CS35L34_X_STATE | CS35L34_X_LOC,
+ CS35L34_X_STATE | CS35L34_X_LOC);
+
+ /* disconnect {vp,vbst}_mon routes: eanble later if set in tx_mask*/
+ while (slot >= 0) {
+ /* configure VMON_TX_LOC */
+ if (slot_num == 0)
+ snd_soc_update_bits(codec, CS35L34_TDM_TX_CTL_1_VMON,
+ CS35L34_X_STATE | CS35L34_X_LOC, slot);
+
+ /* configure IMON_TX_LOC */
+ if (slot_num == 4) {
+ snd_soc_update_bits(codec, CS35L34_TDM_TX_CTL_2_IMON,
+ CS35L34_X_STATE | CS35L34_X_LOC, slot);
+ }
+ /* configure VPMON_TX_LOC */
+ if (slot_num == 3) {
+ snd_soc_update_bits(codec, CS35L34_TDM_TX_CTL_3_VPMON,
+ CS35L34_X_STATE | CS35L34_X_LOC, slot);
+ }
+ /* configure VBSTMON_TX_LOC */
+ if (slot_num == 7) {
+ snd_soc_update_bits(codec,
+ CS35L34_TDM_TX_CTL_4_VBSTMON,
+ CS35L34_X_STATE | CS35L34_X_LOC, slot);
+ }
+
+ /* Enable the relevant tx slot */
+ reg = CS35L34_TDM_TX_SLOT_EN_4 - (slot/8);
+ bit_pos = slot - ((slot / 8) * (8));
+ snd_soc_update_bits(codec, reg,
+ 1 << bit_pos, 1 << bit_pos);
+
+ tx_mask &= ~(1 << slot);
+ slot = ffs(tx_mask) - 1;
+ slot_num++;
+ }
+
+ return 0;
+}
+
+static int cs35l34_main_amp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct cs35l34_private *priv = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ regmap_update_bits(priv->regmap, CS35L34_BST_CVTR_V_CTL,
+ CS35L34_BST_CVTL_MASK, priv->pdata.boost_vtge);
+ usleep_range(5000, 5100);
+ regmap_update_bits(priv->regmap, CS35L34_PROTECT_CTL,
+ CS35L34_MUTE, 0);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ regmap_update_bits(priv->regmap, CS35L34_BST_CVTR_V_CTL,
+ CS35L34_BST_CVTL_MASK, 0);
+ regmap_update_bits(priv->regmap, CS35L34_PROTECT_CTL,
+ CS35L34_MUTE, CS35L34_MUTE);
+ usleep_range(5000, 5100);
+ break;
+ default:
+ pr_err("Invalid event = 0x%x\n", event);
+ }
+ return 0;
+}
+
+static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10200, 50, 0);
+
+static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 300, 100, 0);
+
+
+static const struct snd_kcontrol_new cs35l34_snd_controls[] = {
+ SOC_SINGLE_SX_TLV("Digital Volume", CS35L34_AMP_DIG_VOL,
+ 0, 0x34, 0xE4, dig_vol_tlv),
+ SOC_SINGLE_TLV("Amp Gain Volume", CS35L34_AMP_ANLG_GAIN_CTL,
+ 0, 0xF, 0, amp_gain_tlv),
+};
+
+
+static int cs35l34_mclk_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct cs35l34_private *priv = snd_soc_codec_get_drvdata(codec);
+ int ret, i;
+ unsigned int reg;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMD:
+ ret = regmap_read(priv->regmap, CS35L34_AMP_DIG_VOL_CTL,
+ &reg);
+ if (ret != 0) {
+ pr_err("%s regmap read failure %d\n", __func__, ret);
+ return ret;
+ }
+ if (reg & CS35L34_AMP_DIGSFT)
+ msleep(40);
+ else
+ usleep_range(2000, 2100);
+
+ for (i = 0; i < PDN_DONE_ATTEMPTS; i++) {
+ ret = regmap_read(priv->regmap, CS35L34_INT_STATUS_2,
+ &reg);
+ if (ret != 0) {
+ pr_err("%s regmap read failure %d\n",
+ __func__, ret);
+ return ret;
+ }
+ if (reg & CS35L34_PDN_DONE)
+ break;
+
+ usleep_range(5000, 5100);
+ }
+ if (i == PDN_DONE_ATTEMPTS)
+ pr_err("%s Device did not power down properly\n",
+ __func__);
+ break;
+ default:
+ pr_err("Invalid event = 0x%x\n", event);
+ break;
+ }
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget cs35l34_dapm_widgets[] = {
+ SND_SOC_DAPM_AIF_IN_E("SDIN", NULL, 0, CS35L34_PWRCTL3,
+ 1, 1, cs35l34_sdin_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_AIF_OUT("SDOUT", NULL, 0, CS35L34_PWRCTL3, 2, 1),
+
+ SND_SOC_DAPM_SUPPLY("EXTCLK", CS35L34_PWRCTL3, 7, 1,
+ cs35l34_mclk_event, SND_SOC_DAPM_PRE_PMD),
+
+ SND_SOC_DAPM_OUTPUT("SPK"),
+
+ SND_SOC_DAPM_INPUT("VP"),
+ SND_SOC_DAPM_INPUT("VPST"),
+ SND_SOC_DAPM_INPUT("ISENSE"),
+ SND_SOC_DAPM_INPUT("VSENSE"),
+
+ SND_SOC_DAPM_ADC("VMON ADC", NULL, CS35L34_PWRCTL2, 7, 1),
+ SND_SOC_DAPM_ADC("IMON ADC", NULL, CS35L34_PWRCTL2, 6, 1),
+ SND_SOC_DAPM_ADC("VPMON ADC", NULL, CS35L34_PWRCTL3, 3, 1),
+ SND_SOC_DAPM_ADC("VBSTMON ADC", NULL, CS35L34_PWRCTL3, 4, 1),
+ SND_SOC_DAPM_ADC("CLASS H", NULL, CS35L34_PWRCTL2, 5, 1),
+ SND_SOC_DAPM_ADC("BOOST", NULL, CS35L34_PWRCTL2, 2, 1),
+
+ SND_SOC_DAPM_OUT_DRV_E("Main AMP", CS35L34_PWRCTL2, 0, 1, NULL, 0,
+ cs35l34_main_amp_event, SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route cs35l34_audio_map[] = {
+ {"SDIN", NULL, "AMP Playback"},
+ {"BOOST", NULL, "SDIN"},
+ {"CLASS H", NULL, "BOOST"},
+ {"Main AMP", NULL, "CLASS H"},
+ {"SPK", NULL, "Main AMP"},
+
+ {"VPMON ADC", NULL, "CLASS H"},
+ {"VBSTMON ADC", NULL, "CLASS H"},
+ {"SPK", NULL, "VPMON ADC"},
+ {"SPK", NULL, "VBSTMON ADC"},
+
+ {"IMON ADC", NULL, "ISENSE"},
+ {"VMON ADC", NULL, "VSENSE"},
+ {"SDOUT", NULL, "IMON ADC"},
+ {"SDOUT", NULL, "VMON ADC"},
+ {"AMP Capture", NULL, "SDOUT"},
+
+ {"SDIN", NULL, "EXTCLK"},
+ {"SDOUT", NULL, "EXTCLK"},
+};
+
+struct cs35l34_mclk_div {
+ int mclk;
+ int srate;
+ u8 adsp_rate;
+};
+
+static struct cs35l34_mclk_div cs35l34_mclk_coeffs[] = {
+
+ /* MCLK, Sample Rate, adsp_rate */
+
+ {5644800, 11025, 0x1},
+ {5644800, 22050, 0x4},
+ {5644800, 44100, 0x7},
+
+ {6000000, 8000, 0x0},
+ {6000000, 11025, 0x1},
+ {6000000, 12000, 0x2},
+ {6000000, 16000, 0x3},
+ {6000000, 22050, 0x4},
+ {6000000, 24000, 0x5},
+ {6000000, 32000, 0x6},
+ {6000000, 44100, 0x7},
+ {6000000, 48000, 0x8},
+
+ {6144000, 8000, 0x0},
+ {6144000, 11025, 0x1},
+ {6144000, 12000, 0x2},
+ {6144000, 16000, 0x3},
+ {6144000, 22050, 0x4},
+ {6144000, 24000, 0x5},
+ {6144000, 32000, 0x6},
+ {6144000, 44100, 0x7},
+ {6144000, 48000, 0x8},
+};
+
+static int cs35l34_get_mclk_coeff(int mclk, int srate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs35l34_mclk_coeffs); i++) {
+ if (cs35l34_mclk_coeffs[i].mclk == mclk &&
+ cs35l34_mclk_coeffs[i].srate == srate)
+ return i;
+ }
+ return -EINVAL;
+}
+
+static int cs35l34_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct cs35l34_private *priv = snd_soc_codec_get_drvdata(codec);
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ regmap_update_bits(priv->regmap, CS35L34_ADSP_CLK_CTL,
+ 0x80, 0x80);
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ regmap_update_bits(priv->regmap, CS35L34_ADSP_CLK_CTL,
+ 0x80, 0x00);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int cs35l34_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct cs35l34_private *priv = snd_soc_codec_get_drvdata(codec);
+ int srate = params_rate(params);
+ int ret;
+
+ int coeff = cs35l34_get_mclk_coeff(priv->mclk_int, srate);
+
+ if (coeff < 0) {
+ dev_err(codec->dev, "ERROR: Invalid mclk %d and/or srate %d\n",
+ priv->mclk_int, srate);
+ return coeff;
+ }
+
+ ret = regmap_update_bits(priv->regmap, CS35L34_ADSP_CLK_CTL,
+ CS35L34_ADSP_RATE, cs35l34_mclk_coeffs[coeff].adsp_rate);
+ if (ret != 0)
+ dev_err(codec->dev, "Failed to set clock state %d\n", ret);
+
+ return ret;
+}
+
+static unsigned int cs35l34_src_rates[] = {
+ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
+};
+
+
+static struct snd_pcm_hw_constraint_list cs35l34_constraints = {
+ .count = ARRAY_SIZE(cs35l34_src_rates),
+ .list = cs35l34_src_rates,
+};
+
+static int cs35l34_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &cs35l34_constraints);
+ return 0;
+}
+
+
+static int cs35l34_set_tristate(struct snd_soc_dai *dai, int tristate)
+{
+
+ struct snd_soc_codec *codec = dai->codec;
+
+ if (tristate)
+ snd_soc_update_bits(codec, CS35L34_PWRCTL3,
+ CS35L34_PDN_SDOUT, CS35L34_PDN_SDOUT);
+ else
+ snd_soc_update_bits(codec, CS35L34_PWRCTL3,
+ CS35L34_PDN_SDOUT, 0);
+ return 0;
+}
+
+static int cs35l34_dai_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct cs35l34_private *cs35l34 = snd_soc_codec_get_drvdata(codec);
+ unsigned int value;
+
+ switch (freq) {
+ case CS35L34_MCLK_5644:
+ value = CS35L34_MCLK_RATE_5P6448;
+ cs35l34->mclk_int = freq;
+ break;
+ case CS35L34_MCLK_6:
+ value = CS35L34_MCLK_RATE_6P0000;
+ cs35l34->mclk_int = freq;
+ break;
+ case CS35L34_MCLK_6144:
+ value = CS35L34_MCLK_RATE_6P1440;
+ cs35l34->mclk_int = freq;
+ break;
+ case CS35L34_MCLK_11289:
+ value = CS35L34_MCLK_DIV | CS35L34_MCLK_RATE_5P6448;
+ cs35l34->mclk_int = freq / 2;
+ break;
+ case CS35L34_MCLK_12:
+ value = CS35L34_MCLK_DIV | CS35L34_MCLK_RATE_6P0000;
+ cs35l34->mclk_int = freq / 2;
+ break;
+ case CS35L34_MCLK_12288:
+ value = CS35L34_MCLK_DIV | CS35L34_MCLK_RATE_6P1440;
+ cs35l34->mclk_int = freq / 2;
+ break;
+ default:
+ dev_err(codec->dev, "ERROR: Invalid Frequency %d\n", freq);
+ cs35l34->mclk_int = 0;
+ return -EINVAL;
+ }
+ regmap_update_bits(cs35l34->regmap, CS35L34_MCLK_CTL,
+ CS35L34_MCLK_DIV | CS35L34_MCLK_RATE_MASK, value);
+ return 0;
+}
+
+static const struct snd_soc_dai_ops cs35l34_ops = {
+ .startup = cs35l34_pcm_startup,
+ .set_tristate = cs35l34_set_tristate,
+ .set_fmt = cs35l34_set_dai_fmt,
+ .hw_params = cs35l34_pcm_hw_params,
+ .set_sysclk = cs35l34_dai_set_sysclk,
+ .set_tdm_slot = cs35l34_set_tdm_slot,
+};
+
+static struct snd_soc_dai_driver cs35l34_dai = {
+ .name = "cs35l34",
+ .id = 0,
+ .playback = {
+ .stream_name = "AMP Playback",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = CS35L34_RATES,
+ .formats = CS35L34_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AMP Capture",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = CS35L34_RATES,
+ .formats = CS35L34_FORMATS,
+ },
+ .ops = &cs35l34_ops,
+ .symmetric_rates = 1,
+};
+
+static int cs35l34_boost_inductor(struct cs35l34_private *cs35l34,
+ unsigned int inductor)
+{
+ struct snd_soc_codec *codec = cs35l34->codec;
+
+ switch (inductor) {
+ case 1000: /* 1 uH */
+ regmap_write(cs35l34->regmap, CS35L34_BST_CONV_COEF_1, 0x24);
+ regmap_write(cs35l34->regmap, CS35L34_BST_CONV_COEF_2, 0x24);
+ regmap_write(cs35l34->regmap, CS35L34_BST_CONV_SLOPE_COMP,
+ 0x4E);
+ regmap_write(cs35l34->regmap, CS35L34_BST_CONV_SW_FREQ, 0);
+ break;
+ case 1200: /* 1.2 uH */
+ regmap_write(cs35l34->regmap, CS35L34_BST_CONV_COEF_1, 0x20);
+ regmap_write(cs35l34->regmap, CS35L34_BST_CONV_COEF_2, 0x20);
+ regmap_write(cs35l34->regmap, CS35L34_BST_CONV_SLOPE_COMP,
+ 0x47);
+ regmap_write(cs35l34->regmap, CS35L34_BST_CONV_SW_FREQ, 1);
+ break;
+ case 1500: /* 1.5uH */
+ regmap_write(cs35l34->regmap, CS35L34_BST_CONV_COEF_1, 0x20);
+ regmap_write(cs35l34->regmap, CS35L34_BST_CONV_COEF_2, 0x20);
+ regmap_write(cs35l34->regmap, CS35L34_BST_CONV_SLOPE_COMP,
+ 0x3C);
+ regmap_write(cs35l34->regmap, CS35L34_BST_CONV_SW_FREQ, 2);
+ break;
+ case 2200: /* 2.2uH */
+ regmap_write(cs35l34->regmap, CS35L34_BST_CONV_COEF_1, 0x19);
+ regmap_write(cs35l34->regmap, CS35L34_BST_CONV_COEF_2, 0x25);
+ regmap_write(cs35l34->regmap, CS35L34_BST_CONV_SLOPE_COMP,
+ 0x23);
+ regmap_write(cs35l34->regmap, CS35L34_BST_CONV_SW_FREQ, 3);
+ break;
+ default:
+ dev_err(codec->dev, "%s Invalid Inductor Value %d uH\n",
+ __func__, inductor);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int cs35l34_probe(struct snd_soc_codec *codec)
+{
+ int ret = 0;
+ struct cs35l34_private *cs35l34 = snd_soc_codec_get_drvdata(codec);
+
+ pm_runtime_get_sync(codec->dev);
+
+ /* Set over temperature warning attenuation to 6 dB */
+ regmap_update_bits(cs35l34->regmap, CS35L34_PROTECT_CTL,
+ CS35L34_OTW_ATTN_MASK, 0x8);
+
+ /* Set Power control registers 2 and 3 to have everything
+ * powered down at initialization
+ */
+ regmap_write(cs35l34->regmap, CS35L34_PWRCTL2, 0xFD);
+ regmap_write(cs35l34->regmap, CS35L34_PWRCTL3, 0x1F);
+
+ /* Set mute bit at startup */
+ regmap_update_bits(cs35l34->regmap, CS35L34_PROTECT_CTL,
+ CS35L34_MUTE, CS35L34_MUTE);
+
+ /* Set Platform Data */
+ if (cs35l34->pdata.boost_peak)
+ regmap_update_bits(cs35l34->regmap, CS35L34_BST_PEAK_I,
+ CS35L34_BST_PEAK_MASK,
+ cs35l34->pdata.boost_peak);
+
+ if (cs35l34->pdata.gain_zc_disable)
+ regmap_update_bits(cs35l34->regmap, CS35L34_PROTECT_CTL,
+ CS35L34_GAIN_ZC_MASK, 0);
+ else
+ regmap_update_bits(cs35l34->regmap, CS35L34_PROTECT_CTL,
+ CS35L34_GAIN_ZC_MASK, CS35L34_GAIN_ZC_MASK);
+
+ if (cs35l34->pdata.aif_half_drv)
+ regmap_update_bits(cs35l34->regmap, CS35L34_ADSP_CLK_CTL,
+ CS35L34_ADSP_DRIVE, 0);
+
+ if (cs35l34->pdata.digsft_disable)
+ regmap_update_bits(cs35l34->regmap, CS35L34_AMP_DIG_VOL_CTL,
+ CS35L34_AMP_DIGSFT, 0);
+
+ if (cs35l34->pdata.amp_inv)
+ regmap_update_bits(cs35l34->regmap, CS35L34_AMP_DIG_VOL_CTL,
+ CS35L34_INV, CS35L34_INV);
+
+ if (cs35l34->pdata.boost_ind)
+ ret = cs35l34_boost_inductor(cs35l34, cs35l34->pdata.boost_ind);
+
+ if (cs35l34->pdata.i2s_sdinloc)
+ regmap_update_bits(cs35l34->regmap, CS35L34_ADSP_I2S_CTL,
+ CS35L34_I2S_LOC_MASK,
+ cs35l34->pdata.i2s_sdinloc << CS35L34_I2S_LOC_SHIFT);
+
+ if (cs35l34->pdata.tdm_rising_edge)
+ regmap_update_bits(cs35l34->regmap, CS35L34_ADSP_TDM_CTL,
+ 1, 1);
+
+ pm_runtime_put_sync(codec->dev);
+
+ return ret;
+}
+
+
+static struct snd_soc_codec_driver soc_codec_dev_cs35l34 = {
+ .probe = cs35l34_probe,
+
+ .component_driver = {
+ .dapm_widgets = cs35l34_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs35l34_dapm_widgets),
+ .dapm_routes = cs35l34_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(cs35l34_audio_map),
+ .controls = cs35l34_snd_controls,
+ .num_controls = ARRAY_SIZE(cs35l34_snd_controls),
+ },
+};
+
+static struct regmap_config cs35l34_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = CS35L34_MAX_REGISTER,
+ .reg_defaults = cs35l34_reg,
+ .num_reg_defaults = ARRAY_SIZE(cs35l34_reg),
+ .volatile_reg = cs35l34_volatile_register,
+ .readable_reg = cs35l34_readable_register,
+ .precious_reg = cs35l34_precious_register,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int cs35l34_handle_of_data(struct i2c_client *i2c_client,
+ struct cs35l34_platform_data *pdata)
+{
+ struct device_node *np = i2c_client->dev.of_node;
+ unsigned int val;
+
+ if (of_property_read_u32(np, "cirrus,boost-vtge-millivolt",
+ &val) >= 0) {
+ /* Boost Voltage has a maximum of 8V */
+ if (val > 8000 || (val < 3300 && val > 0)) {
+ dev_err(&i2c_client->dev,
+ "Invalid Boost Voltage %d mV\n", val);
+ return -EINVAL;
+ }
+ if (val == 0)
+ pdata->boost_vtge = 0; /* Use VP */
+ else
+ pdata->boost_vtge = ((val - 3300)/100) + 1;
+ } else {
+ dev_warn(&i2c_client->dev,
+ "Boost Voltage not specified. Using VP\n");
+ }
+
+ if (of_property_read_u32(np, "cirrus,boost-ind-nanohenry", &val) >= 0) {
+ pdata->boost_ind = val;
+ } else {
+ dev_err(&i2c_client->dev, "Inductor not specified.\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(np, "cirrus,boost-peak-milliamp", &val) >= 0) {
+ if (val > 3840 || val < 1200) {
+ dev_err(&i2c_client->dev,
+ "Invalid Boost Peak Current %d mA\n", val);
+ return -EINVAL;
+ }
+ pdata->boost_peak = ((val - 1200)/80) + 1;
+ }
+
+ pdata->aif_half_drv = of_property_read_bool(np,
+ "cirrus,aif-half-drv");
+ pdata->digsft_disable = of_property_read_bool(np,
+ "cirrus,digsft-disable");
+
+ pdata->gain_zc_disable = of_property_read_bool(np,
+ "cirrus,gain-zc-disable");
+ pdata->amp_inv = of_property_read_bool(np, "cirrus,amp-inv");
+
+ if (of_property_read_u32(np, "cirrus,i2s-sdinloc", &val) >= 0)
+ pdata->i2s_sdinloc = val;
+ if (of_property_read_u32(np, "cirrus,tdm-rising-edge", &val) >= 0)
+ pdata->tdm_rising_edge = val;
+
+ return 0;
+}
+
+static irqreturn_t cs35l34_irq_thread(int irq, void *data)
+{
+ struct cs35l34_private *cs35l34 = data;
+ struct snd_soc_codec *codec = cs35l34->codec;
+ unsigned int sticky1, sticky2, sticky3, sticky4;
+ unsigned int mask1, mask2, mask3, mask4, current1;
+
+
+ /* ack the irq by reading all status registers */
+ regmap_read(cs35l34->regmap, CS35L34_INT_STATUS_4, &sticky4);
+ regmap_read(cs35l34->regmap, CS35L34_INT_STATUS_3, &sticky3);
+ regmap_read(cs35l34->regmap, CS35L34_INT_STATUS_2, &sticky2);
+ regmap_read(cs35l34->regmap, CS35L34_INT_STATUS_1, &sticky1);
+
+ regmap_read(cs35l34->regmap, CS35L34_INT_MASK_4, &mask4);
+ regmap_read(cs35l34->regmap, CS35L34_INT_MASK_3, &mask3);
+ regmap_read(cs35l34->regmap, CS35L34_INT_MASK_2, &mask2);
+ regmap_read(cs35l34->regmap, CS35L34_INT_MASK_1, &mask1);
+
+ if (!(sticky1 & ~mask1) && !(sticky2 & ~mask2) && !(sticky3 & ~mask3)
+ && !(sticky4 & ~mask4))
+ return IRQ_NONE;
+
+ regmap_read(cs35l34->regmap, CS35L34_INT_STATUS_1, &current1);
+
+ if (sticky1 & CS35L34_CAL_ERR) {
+ dev_err(codec->dev, "Cal error\n");
+
+ /* error is no longer asserted; safe to reset */
+ if (!(current1 & CS35L34_CAL_ERR)) {
+ dev_dbg(codec->dev, "Cal error release\n");
+ regmap_update_bits(cs35l34->regmap,
+ CS35L34_PROT_RELEASE_CTL,
+ CS35L34_CAL_ERR_RLS, 0);
+ regmap_update_bits(cs35l34->regmap,
+ CS35L34_PROT_RELEASE_CTL,
+ CS35L34_CAL_ERR_RLS,
+ CS35L34_CAL_ERR_RLS);
+ regmap_update_bits(cs35l34->regmap,
+ CS35L34_PROT_RELEASE_CTL,
+ CS35L34_CAL_ERR_RLS, 0);
+ /* note: amp will re-calibrate on next resume */
+ }
+ }
+
+ if (sticky1 & CS35L34_ALIVE_ERR)
+ dev_err(codec->dev, "Alive error\n");
+
+ if (sticky1 & CS35L34_AMP_SHORT) {
+ dev_crit(codec->dev, "Amp short error\n");
+
+ /* error is no longer asserted; safe to reset */
+ if (!(current1 & CS35L34_AMP_SHORT)) {
+ dev_dbg(codec->dev,
+ "Amp short error release\n");
+ regmap_update_bits(cs35l34->regmap,
+ CS35L34_PROT_RELEASE_CTL,
+ CS35L34_SHORT_RLS, 0);
+ regmap_update_bits(cs35l34->regmap,
+ CS35L34_PROT_RELEASE_CTL,
+ CS35L34_SHORT_RLS,
+ CS35L34_SHORT_RLS);
+ regmap_update_bits(cs35l34->regmap,
+ CS35L34_PROT_RELEASE_CTL,
+ CS35L34_SHORT_RLS, 0);
+ }
+ }
+
+ if (sticky1 & CS35L34_OTW) {
+ dev_crit(codec->dev, "Over temperature warning\n");
+
+ /* error is no longer asserted; safe to reset */
+ if (!(current1 & CS35L34_OTW)) {
+ dev_dbg(codec->dev,
+ "Over temperature warning release\n");
+ regmap_update_bits(cs35l34->regmap,
+ CS35L34_PROT_RELEASE_CTL,
+ CS35L34_OTW_RLS, 0);
+ regmap_update_bits(cs35l34->regmap,
+ CS35L34_PROT_RELEASE_CTL,
+ CS35L34_OTW_RLS,
+ CS35L34_OTW_RLS);
+ regmap_update_bits(cs35l34->regmap,
+ CS35L34_PROT_RELEASE_CTL,
+ CS35L34_OTW_RLS, 0);
+ }
+ }
+
+ if (sticky1 & CS35L34_OTE) {
+ dev_crit(codec->dev, "Over temperature error\n");
+
+ /* error is no longer asserted; safe to reset */
+ if (!(current1 & CS35L34_OTE)) {
+ dev_dbg(codec->dev,
+ "Over temperature error release\n");
+ regmap_update_bits(cs35l34->regmap,
+ CS35L34_PROT_RELEASE_CTL,
+ CS35L34_OTE_RLS, 0);
+ regmap_update_bits(cs35l34->regmap,
+ CS35L34_PROT_RELEASE_CTL,
+ CS35L34_OTE_RLS,
+ CS35L34_OTE_RLS);
+ regmap_update_bits(cs35l34->regmap,
+ CS35L34_PROT_RELEASE_CTL,
+ CS35L34_OTE_RLS, 0);
+ }
+ }
+
+ if (sticky3 & CS35L34_BST_HIGH) {
+ dev_crit(codec->dev, "VBST too high error; powering off!\n");
+ regmap_update_bits(cs35l34->regmap, CS35L34_PWRCTL2,
+ CS35L34_PDN_AMP, CS35L34_PDN_AMP);
+ regmap_update_bits(cs35l34->regmap, CS35L34_PWRCTL1,
+ CS35L34_PDN_ALL, CS35L34_PDN_ALL);
+ }
+
+ if (sticky3 & CS35L34_LBST_SHORT) {
+ dev_crit(codec->dev, "LBST short error; powering off!\n");
+ regmap_update_bits(cs35l34->regmap, CS35L34_PWRCTL2,
+ CS35L34_PDN_AMP, CS35L34_PDN_AMP);
+ regmap_update_bits(cs35l34->regmap, CS35L34_PWRCTL1,
+ CS35L34_PDN_ALL, CS35L34_PDN_ALL);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const char * const cs35l34_core_supplies[] = {
+ "VA",
+ "VP",
+};
+
+static int cs35l34_i2c_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id)
+{
+ struct cs35l34_private *cs35l34;
+ struct cs35l34_platform_data *pdata =
+ dev_get_platdata(&i2c_client->dev);
+ int i;
+ int ret;
+ unsigned int devid = 0;
+ unsigned int reg;
+
+ cs35l34 = devm_kzalloc(&i2c_client->dev,
+ sizeof(struct cs35l34_private),
+ GFP_KERNEL);
+ if (!cs35l34) {
+ dev_err(&i2c_client->dev, "could not allocate codec\n");
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(i2c_client, cs35l34);
+ cs35l34->regmap = devm_regmap_init_i2c(i2c_client, &cs35l34_regmap);
+ if (IS_ERR(cs35l34->regmap)) {
+ ret = PTR_ERR(cs35l34->regmap);
+ dev_err(&i2c_client->dev, "regmap_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ cs35l34->num_core_supplies = ARRAY_SIZE(cs35l34_core_supplies);
+ for (i = 0; i < ARRAY_SIZE(cs35l34_core_supplies); i++)
+ cs35l34->core_supplies[i].supply = cs35l34_core_supplies[i];
+
+ ret = devm_regulator_bulk_get(&i2c_client->dev,
+ cs35l34->num_core_supplies,
+ cs35l34->core_supplies);
+ if (ret != 0) {
+ dev_err(&i2c_client->dev,
+ "Failed to request core supplies %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(cs35l34->num_core_supplies,
+ cs35l34->core_supplies);
+ if (ret != 0) {
+ dev_err(&i2c_client->dev,
+ "Failed to enable core supplies: %d\n", ret);
+ return ret;
+ }
+
+ if (pdata) {
+ cs35l34->pdata = *pdata;
+ } else {
+ pdata = devm_kzalloc(&i2c_client->dev,
+ sizeof(struct cs35l34_platform_data),
+ GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&i2c_client->dev,
+ "could not allocate pdata\n");
+ return -ENOMEM;
+ }
+ if (i2c_client->dev.of_node) {
+ ret = cs35l34_handle_of_data(i2c_client, pdata);
+ if (ret != 0)
+ return ret;
+
+ }
+ cs35l34->pdata = *pdata;
+ }
+
+ ret = devm_request_threaded_irq(&i2c_client->dev, i2c_client->irq, NULL,
+ cs35l34_irq_thread, IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ "cs35l34", cs35l34);
+ if (ret != 0)
+ dev_err(&i2c_client->dev, "Failed to request IRQ: %d\n", ret);
+
+ cs35l34->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
+ "reset-gpios", GPIOD_OUT_LOW);
+ if (IS_ERR(cs35l34->reset_gpio))
+ return PTR_ERR(cs35l34->reset_gpio);
+
+ gpiod_set_value_cansleep(cs35l34->reset_gpio, 1);
+
+ msleep(CS35L34_START_DELAY);
+
+ ret = regmap_read(cs35l34->regmap, CS35L34_DEVID_AB, &reg);
+
+ devid = (reg & 0xFF) << 12;
+ ret = regmap_read(cs35l34->regmap, CS35L34_DEVID_CD, &reg);
+ devid |= (reg & 0xFF) << 4;
+ ret = regmap_read(cs35l34->regmap, CS35L34_DEVID_E, &reg);
+ devid |= (reg & 0xF0) >> 4;
+
+ if (devid != CS35L34_CHIP_ID) {
+ dev_err(&i2c_client->dev,
+ "CS35l34 Device ID (%X). Expected ID %X\n",
+ devid, CS35L34_CHIP_ID);
+ ret = -ENODEV;
+ goto err_regulator;
+ }
+
+ ret = regmap_read(cs35l34->regmap, CS35L34_REV_ID, &reg);
+ if (ret < 0) {
+ dev_err(&i2c_client->dev, "Get Revision ID failed\n");
+ goto err_regulator;
+ }
+
+ dev_info(&i2c_client->dev,
+ "Cirrus Logic CS35l34 (%x), Revision: %02X\n", devid,
+ reg & 0xFF);
+
+ /* Unmask critical interrupts */
+ regmap_update_bits(cs35l34->regmap, CS35L34_INT_MASK_1,
+ CS35L34_M_CAL_ERR | CS35L34_M_ALIVE_ERR |
+ CS35L34_M_AMP_SHORT | CS35L34_M_OTW |
+ CS35L34_M_OTE, 0);
+ regmap_update_bits(cs35l34->regmap, CS35L34_INT_MASK_3,
+ CS35L34_M_BST_HIGH | CS35L34_M_LBST_SHORT, 0);
+
+ pm_runtime_set_autosuspend_delay(&i2c_client->dev, 100);
+ pm_runtime_use_autosuspend(&i2c_client->dev);
+ pm_runtime_set_active(&i2c_client->dev);
+ pm_runtime_enable(&i2c_client->dev);
+
+ ret = snd_soc_register_codec(&i2c_client->dev,
+ &soc_codec_dev_cs35l34, &cs35l34_dai, 1);
+ if (ret < 0) {
+ dev_err(&i2c_client->dev,
+ "%s: Register codec failed\n", __func__);
+ goto err_regulator;
+ }
+
+ return 0;
+
+err_regulator:
+ regulator_bulk_disable(cs35l34->num_core_supplies,
+ cs35l34->core_supplies);
+
+ return ret;
+}
+
+static int cs35l34_i2c_remove(struct i2c_client *client)
+{
+ struct cs35l34_private *cs35l34 = i2c_get_clientdata(client);
+
+ snd_soc_unregister_codec(&client->dev);
+
+ if (cs35l34->reset_gpio)
+ gpiod_set_value_cansleep(cs35l34->reset_gpio, 0);
+
+ pm_runtime_disable(&client->dev);
+ regulator_bulk_disable(cs35l34->num_core_supplies,
+ cs35l34->core_supplies);
+
+ return 0;
+}
+
+static int __maybe_unused cs35l34_runtime_resume(struct device *dev)
+{
+ struct cs35l34_private *cs35l34 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_bulk_enable(cs35l34->num_core_supplies,
+ cs35l34->core_supplies);
+
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable core supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ regcache_cache_only(cs35l34->regmap, false);
+
+ gpiod_set_value_cansleep(cs35l34->reset_gpio, 1);
+ msleep(CS35L34_START_DELAY);
+
+ ret = regcache_sync(cs35l34->regmap);
+ if (ret != 0) {
+ dev_err(dev, "Failed to restore register cache\n");
+ goto err;
+ }
+ return 0;
+err:
+ regcache_cache_only(cs35l34->regmap, true);
+ regulator_bulk_disable(cs35l34->num_core_supplies,
+ cs35l34->core_supplies);
+
+ return ret;
+}
+
+static int __maybe_unused cs35l34_runtime_suspend(struct device *dev)
+{
+ struct cs35l34_private *cs35l34 = dev_get_drvdata(dev);
+
+ regcache_cache_only(cs35l34->regmap, true);
+ regcache_mark_dirty(cs35l34->regmap);
+
+ gpiod_set_value_cansleep(cs35l34->reset_gpio, 0);
+
+ regulator_bulk_disable(cs35l34->num_core_supplies,
+ cs35l34->core_supplies);
+
+ return 0;
+}
+
+static const struct dev_pm_ops cs35l34_pm_ops = {
+ SET_RUNTIME_PM_OPS(cs35l34_runtime_suspend,
+ cs35l34_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id cs35l34_of_match[] = {
+ {.compatible = "cirrus,cs35l34"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, cs35l34_of_match);
+
+static const struct i2c_device_id cs35l34_id[] = {
+ {"cs35l34", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, cs35l34_id);
+
+static struct i2c_driver cs35l34_i2c_driver = {
+ .driver = {
+ .name = "cs35l34",
+ .pm = &cs35l34_pm_ops,
+ .of_match_table = cs35l34_of_match,
+
+ },
+ .id_table = cs35l34_id,
+ .probe = cs35l34_i2c_probe,
+ .remove = cs35l34_i2c_remove,
+
+};
+
+static int __init cs35l34_modinit(void)
+{
+ int ret;
+
+ ret = i2c_add_driver(&cs35l34_i2c_driver);
+ if (ret != 0) {
+ pr_err("Failed to register CS35l34 I2C driver: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+module_init(cs35l34_modinit);
+
+static void __exit cs35l34_exit(void)
+{
+ i2c_del_driver(&cs35l34_i2c_driver);
+}
+module_exit(cs35l34_exit);
+
+MODULE_DESCRIPTION("ASoC CS35l34 driver");
+MODULE_AUTHOR("Paul Handrigan, Cirrus Logic Inc, <Paul.Handrigan@cirrus.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs35l34.h b/sound/soc/codecs/cs35l34.h
new file mode 100644
index 000000000000..bcd54f127559
--- /dev/null
+++ b/sound/soc/codecs/cs35l34.h
@@ -0,0 +1,269 @@
+/*
+ * cs35l34.h -- CS35L34 ALSA SoC audio driver
+ *
+ * Copyright 2016 Cirrus Logic, Inc.
+ *
+ * Author: Paul Handrigan <Paul.Handrigan@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __CS35L34_H__
+#define __CS35L34_H__
+
+#define CS35L34_CHIP_ID 0x00035A34
+#define CS35L34_DEVID_AB 0x01 /* Device ID A & B [RO] */
+#define CS35L34_DEVID_CD 0x02 /* Device ID C & D [RO] */
+#define CS35L34_DEVID_E 0x03 /* Device ID E [RO] */
+#define CS35L34_FAB_ID 0x04 /* Fab ID [RO] */
+#define CS35L34_REV_ID 0x05 /* Revision ID [RO] */
+#define CS35L34_PWRCTL1 0x06 /* Power Ctl 1 */
+#define CS35L34_PWRCTL2 0x07 /* Power Ctl 2 */
+#define CS35L34_PWRCTL3 0x08 /* Power Ctl 3 */
+#define CS35L34_ADSP_CLK_CTL 0x0A /* (ADSP) Clock Ctl */
+#define CS35L34_MCLK_CTL 0x0B /* Master Clocking Ctl */
+#define CS35L34_AMP_INP_DRV_CTL 0x14 /* Amp Input Drive Ctl */
+#define CS35L34_AMP_DIG_VOL_CTL 0x15 /* Amplifier Dig Volume Ctl */
+#define CS35L34_AMP_DIG_VOL 0x16 /* Amplifier Dig Volume */
+#define CS35L34_AMP_ANLG_GAIN_CTL 0x17 /* Amplifier Analog Gain Ctl */
+#define CS35L34_PROTECT_CTL 0x18 /* Amp Gain - Prot Ctl Param */
+#define CS35L34_AMP_KEEP_ALIVE_CTL 0x1A /* Amplifier Keep Alive Ctl */
+#define CS35L34_BST_CVTR_V_CTL 0x1D /* Boost Conv Voltage Ctl */
+#define CS35L34_BST_PEAK_I 0x1E /* Boost Conv Peak Current */
+#define CS35L34_BST_RAMP_CTL 0x20 /* Boost Conv Soft Ramp Ctl */
+#define CS35L34_BST_CONV_COEF_1 0x21 /* Boost Conv Coefficients 1 */
+#define CS35L34_BST_CONV_COEF_2 0x22 /* Boost Conv Coefficients 2 */
+#define CS35L34_BST_CONV_SLOPE_COMP 0x23 /* Boost Conv Slope Comp */
+#define CS35L34_BST_CONV_SW_FREQ 0x24 /* Boost Conv L BST SW Freq */
+#define CS35L34_CLASS_H_CTL 0x30 /* CLS H Control */
+#define CS35L34_CLASS_H_HEADRM_CTL 0x31 /* CLS H Headroom Ctl */
+#define CS35L34_CLASS_H_RELEASE_RATE 0x32 /* CLS H Release Rate */
+#define CS35L34_CLASS_H_FET_DRIVE_CTL 0x33 /* CLS H Weak FET Drive Ctl */
+#define CS35L34_CLASS_H_STATUS 0x38 /* CLS H Status */
+#define CS35L34_VPBR_CTL 0x3A /* VPBR Ctl */
+#define CS35L34_VPBR_VOL_CTL 0x3B /* VPBR Volume Ctl */
+#define CS35L34_VPBR_TIMING_CTL 0x3C /* VPBR Timing Ctl */
+#define CS35L34_PRED_MAX_ATTEN_SPK_LOAD 0x40 /* PRD Max Atten / Spkr Load */
+#define CS35L34_PRED_BROWNOUT_THRESH 0x41 /* PRD Brownout Threshold */
+#define CS35L34_PRED_BROWNOUT_VOL_CTL 0x42 /* PRD Brownout Volume Ctl */
+#define CS35L34_PRED_BROWNOUT_RATE_CTL 0x43 /* PRD Brownout Rate Ctl */
+#define CS35L34_PRED_WAIT_CTL 0x44 /* PRD Wait Ctl */
+#define CS35L34_PRED_ZVP_INIT_IMP_CTL 0x46 /* PRD ZVP Initial Imp Ctl */
+#define CS35L34_PRED_MAN_SAFE_VPI_CTL 0x47 /* PRD Manual Safe VPI Ctl */
+#define CS35L34_VPBR_ATTEN_STATUS 0x4B /* VPBR Attenuation Status */
+#define CS35L34_PRED_BRWNOUT_ATT_STATUS 0x4C /* PRD Brownout Atten Status */
+#define CS35L34_SPKR_MON_CTL 0x4E /* Speaker Monitoring Ctl */
+#define CS35L34_ADSP_I2S_CTL 0x50 /* ADSP I2S Ctl */
+#define CS35L34_ADSP_TDM_CTL 0x51 /* ADSP TDM Ctl */
+#define CS35L34_TDM_TX_CTL_1_VMON 0x52 /* TDM TX Ctl 1 (VMON) */
+#define CS35L34_TDM_TX_CTL_2_IMON 0x53 /* TDM TX Ctl 2 (IMON) */
+#define CS35L34_TDM_TX_CTL_3_VPMON 0x54 /* TDM TX Ctl 3 (VPMON) */
+#define CS35L34_TDM_TX_CTL_4_VBSTMON 0x55 /* TDM TX Ctl 4 (VBSTMON) */
+#define CS35L34_TDM_TX_CTL_5_FLAG1 0x56 /* TDM TX Ctl 5 (FLAG1) */
+#define CS35L34_TDM_TX_CTL_6_FLAG2 0x57 /* TDM TX Ctl 6 (FLAG2) */
+#define CS35L34_TDM_TX_SLOT_EN_1 0x5A /* TDM TX Slot Enable */
+#define CS35L34_TDM_TX_SLOT_EN_2 0x5B /* TDM TX Slot Enable */
+#define CS35L34_TDM_TX_SLOT_EN_3 0x5C /* TDM TX Slot Enable */
+#define CS35L34_TDM_TX_SLOT_EN_4 0x5D /* TDM TX Slot Enable */
+#define CS35L34_TDM_RX_CTL_1_AUDIN 0x5E /* TDM RX Ctl 1 */
+#define CS35L34_TDM_RX_CTL_3_ALIVE 0x60 /* TDM RX Ctl 3 (ALIVE) */
+#define CS35L34_MULT_DEV_SYNCH1 0x62 /* Multidevice Synch */
+#define CS35L34_MULT_DEV_SYNCH2 0x63 /* Multidevice Synch 2 */
+#define CS35L34_PROT_RELEASE_CTL 0x64 /* Protection Release Ctl */
+#define CS35L34_DIAG_MODE_REG_LOCK 0x68 /* Diagnostic Mode Reg Lock */
+#define CS35L34_DIAG_MODE_CTL_1 0x69 /* Diagnostic Mode Ctl 1 */
+#define CS35L34_DIAG_MODE_CTL_2 0x6A /* Diagnostic Mode Ctl 2 */
+#define CS35L34_INT_MASK_1 0x70 /* Interrupt Mask 1 */
+#define CS35L34_INT_MASK_2 0x71 /* Interrupt Mask 2 */
+#define CS35L34_INT_MASK_3 0x72 /* Interrupt Mask 3 */
+#define CS35L34_INT_MASK_4 0x73 /* Interrupt Mask 4 */
+#define CS35L34_INT_STATUS_1 0x74 /* Interrupt Status 1 */
+#define CS35L34_INT_STATUS_2 0x75 /* Interrupt Status 2 */
+#define CS35L34_INT_STATUS_3 0x76 /* Interrupt Status 3 */
+#define CS35L34_INT_STATUS_4 0x77 /* Interrupt Status 4 */
+#define CS35L34_OTP_TRIM_STATUS 0x7E /* OTP Trim Status */
+
+#define CS35L34_MAX_REGISTER 0x7F
+#define CS35L34_REGISTER_COUNT 0x4E
+
+#define CS35L34_MCLK_5644 5644800
+#define CS35L34_MCLK_6144 6144000
+#define CS35L34_MCLK_6 6000000
+#define CS35L34_MCLK_11289 11289600
+#define CS35L34_MCLK_12 12000000
+#define CS35L34_MCLK_12288 12288000
+
+/* CS35L34_PWRCTL1 */
+#define CS35L34_SFT_RST (1 << 7)
+#define CS35L34_DISCHG_FLT (1 << 1)
+#define CS35L34_PDN_ALL 1
+
+/* CS35L34_PWRCTL2 */
+#define CS35L34_PDN_VMON (1 << 7)
+#define CS35L34_PDN_IMON (1 << 6)
+#define CS35L34_PDN_CLASSH (1 << 5)
+#define CS35L34_PDN_VPBR (1 << 4)
+#define CS35L34_PDN_PRED (1 << 3)
+#define CS35L34_PDN_BST (1 << 2)
+#define CS35L34_PDN_AMP 1
+
+/* CS35L34_PWRCTL3 */
+#define CS35L34_MCLK_DIS (1 << 7)
+#define CS35L34_PDN_VBSTMON_OUT (1 << 4)
+#define CS35L34_PDN_VMON_OUT (1 << 3)
+/* Tristate the ADSP SDOUT when in I2C mode */
+#define CS35L34_PDN_SDOUT (1 << 2)
+#define CS35L34_PDN_SDIN (1 << 1)
+#define CS35L34_PDN_TDM 1
+
+/* CS35L34_ADSP_CLK_CTL */
+#define CS35L34_ADSP_RATE 0xF
+#define CS35L34_ADSP_DRIVE (1 << 4)
+#define CS35L34_ADSP_M_S (1 << 7)
+
+/* CS35L34_MCLK_CTL */
+#define CS35L34_MCLK_DIV (1 << 4)
+#define CS35L34_MCLK_RATE_MASK 0x7
+#define CS35L34_MCLK_RATE_6P1440 0x2
+#define CS35L34_MCLK_RATE_6P0000 0x1
+#define CS35L34_MCLK_RATE_5P6448 0x0
+#define CS35L34_MCLKDIS (1 << 7)
+#define CS35L34_MCLKDIV2 (1 << 6)
+#define CS35L34_SDOUT_3ST_TDM (1 << 5)
+#define CS35L34_INT_FS_RATE (1 << 4)
+#define CS35L34_ADSP_FS 0xF
+
+/* CS35L34_AMP_INP_DRV_CTL */
+#define CS35L34_DRV_STR_SRC (1 << 1)
+#define CS35L34_DRV_STR 1
+
+/* CS35L34_AMP_DIG_VOL_CTL */
+#define CS35L34_AMP_DSR_RATE_MASK 0xF0
+#define CS35L34_AMP_DSR_RATE_SHIFT (1 << 4)
+#define CS35L34_NOTCH_DIS (1 << 3)
+#define CS35L34_AMP_DIGSFT (1 << 1)
+#define CS35L34_INV 1
+
+/* CS35L34_PROTECT_CTL */
+#define CS35L34_OTW_ATTN_MASK 0xC
+#define CS35L34_OTW_THRD_MASK 0x3
+#define CS35L34_MUTE (1 << 5)
+#define CS35L34_GAIN_ZC (1 << 4)
+#define CS35L34_GAIN_ZC_MASK 0x10
+#define CS35L34_GAIN_ZC_SHIFT 4
+
+/* CS35L34_AMP_KEEP_ALIVE_CTL */
+#define CS35L34_ALIVE_WD_DIS (1 << 2)
+
+/* CS35L34_BST_CVTR_V_CTL */
+#define CS35L34_BST_CVTL_MASK 0x3F
+
+/* CS35L34_BST_PEAK_I */
+#define CS35L34_BST_PEAK_MASK 0x3F
+
+/* CS35L34_ADSP_I2S_CTL */
+#define CS35L34_I2S_LOC_MASK 0xC
+#define CS35L34_I2S_LOC_SHIFT 2
+
+/* CS35L34_MULT_DEV_SYNCH2 */
+#define CS35L34_SYNC2_MASK 0xF
+
+/* CS35L34_PROT_RELEASE_CTL */
+#define CS35L34_CAL_ERR_RLS (1 << 7)
+#define CS35L34_SHORT_RLS (1 << 2)
+#define CS35L34_OTW_RLS (1 << 1)
+#define CS35L34_OTE_RLS 1
+
+/* CS35L34_INT_MASK_1 */
+#define CS35L34_M_CAL_ERR_SHIFT 7
+#define CS35L34_M_CAL_ERR (1 << CS35L34_M_CAL_ERR_SHIFT)
+#define CS35L34_M_ALIVE_ERR_SHIFT 5
+#define CS35L34_M_ALIVE_ERR (1 << CS35L34_M_ALIVE_ERR_SHIFT)
+#define CS35L34_M_ADSP_CLK_SHIFT 4
+#define CS35L34_M_ADSP_CLK_ERR (1 << CS35L34_M_ADSP_CLK_SHIFT)
+#define CS35L34_M_MCLK_SHIFT 3
+#define CS35L34_M_MCLK_ERR (1 << CS35L34_M_MCLK_SHIFT)
+#define CS35L34_M_AMP_SHORT_SHIFT 2
+#define CS35L34_M_AMP_SHORT (1 << CS35L34_M_AMP_SHORT_SHIFT)
+#define CS35L34_M_OTW_SHIFT 1
+#define CS35L34_M_OTW (1 << CS35L34_M_OTW_SHIFT)
+#define CS35L34_M_OTE_SHIFT 0
+#define CS35L34_M_OTE (1 << CS35L34_M_OTE_SHIFT)
+
+/* CS35L34_INT_MASK_2 */
+#define CS35L34_M_PDN_DONE_SHIFT 4
+#define CS35L34_M_PDN_DONE (1 << CS35L34_M_PDN_DONE_SHIFT)
+#define CS35L34_M_PRED_SHIFT 3
+#define CS35L34_M_PRED_ERR (1 << CS35L34_M_PRED_SHIFT)
+#define CS35L34_M_PRED_CLR_SHIFT 2
+#define CS35L34_M_PRED_CLR (1 << CS35L34_M_PRED_CLR_SHIFT)
+#define CS35L34_M_VPBR_SHIFT 1
+#define CS35L34_M_VPBR_ERR (1 << CS35L34_M_VPBR_SHIFT)
+#define CS35L34_M_VPBR_CLR_SHIFT 0
+#define CS35L34_M_VPBR_CLR (1 << CS35L34_M_VPBR_CLR_SHIFT)
+
+/* CS35L34_INT_MASK_3 */
+#define CS35L34_M_BST_HIGH_SHIFT 4
+#define CS35L34_M_BST_HIGH (1 << CS35L34_M_BST_HIGH_SHIFT)
+#define CS35L34_M_BST_HIGH_FLAG_SHIFT 3
+#define CS35L34_M_BST_HIGH_FLAG (1 << CS35L34_M_BST_HIGH_FLAG_SHIFT)
+#define CS35L34_M_BST_IPK_FLAG_SHIFT 2
+#define CS35L34_M_BST_IPK_FLAG (1 << CS35L34_M_BST_IPK_FLAG_SHIFT)
+#define CS35L34_M_LBST_SHORT_SHIFT 0
+#define CS35L34_M_LBST_SHORT (1 << CS35L34_M_LBST_SHORT_SHIFT)
+
+/* CS35L34_INT_MASK_4 */
+#define CS35L34_M_VMON_OVFL_SHIFT 3
+#define CS35L34_M_VMON_OVFL (1 << CS35L34_M_VMON_OVFL_SHIFT)
+#define CS35L34_M_IMON_OVFL_SHIFT 2
+#define CS35L34_M_IMON_OVFL (1 << CS35L34_M_IMON_OVFL_SHIFT)
+#define CS35L34_M_VPMON_OVFL_SHIFT 1
+#define CS35L34_M_VPMON_OVFL (1 << CS35L34_M_VPMON_OVFL_SHIFT)
+#define CS35L34_M_VBSTMON_OVFL_SHIFT 1
+#define CS35L34_M_VBSTMON_OVFL (1 << CS35L34_M_VBSTMON_OVFL_SHIFT)
+
+/* CS35L34_INT_1 */
+#define CS35L34_CAL_ERR (1 << CS35L34_M_CAL_ERR_SHIFT)
+#define CS35L34_ALIVE_ERR (1 << CS35L34_M_ALIVE_ERR_SHIFT)
+#define CS35L34_M_ADSP_CLK_ERR (1 << CS35L34_M_ADSP_CLK_SHIFT)
+#define CS35L34_MCLK_ERR (1 << CS35L34_M_MCLK_SHIFT)
+#define CS35L34_AMP_SHORT (1 << CS35L34_M_AMP_SHORT_SHIFT)
+#define CS35L34_OTW (1 << CS35L34_M_OTW_SHIFT)
+#define CS35L34_OTE (1 << CS35L34_M_OTE_SHIFT)
+
+/* CS35L34_INT_2 */
+#define CS35L34_PDN_DONE (1 << CS35L34_M_PDN_DONE_SHIFT)
+#define CS35L34_PRED_ERR (1 << CS35L34_M_PRED_SHIFT)
+#define CS35L34_PRED_CLR (1 << CS35L34_M_PRED_CLR_SHIFT)
+#define CS35L34_VPBR_ERR (1 << CS35L34_M_VPBR_SHIFT)
+#define CS35L34_VPBR_CLR (1 << CS35L34_M_VPBR_CLR_SHIFT)
+
+/* CS35L34_INT_3 */
+#define CS35L34_BST_HIGH (1 << CS35L34_M_BST_HIGH_SHIFT)
+#define CS35L34_BST_HIGH_FLAG (1 << CS35L34_M_BST_HIGH_FLAG_SHIFT)
+#define CS35L34_BST_IPK_FLAG (1 << CS35L34_M_BST_IPK_FLAG_SHIFT)
+#define CS35L34_LBST_SHORT (1 << CS35L34_M_LBST_SHORT_SHIFT)
+
+/* CS35L34_INT_4 */
+#define CS35L34_VMON_OVFL (1 << CS35L34_M_VMON_OVFL_SHIFT)
+#define CS35L34_IMON_OVFL (1 << CS35L34_M_IMON_OVFL_SHIFT)
+#define CS35L34_VPMON_OVFL (1 << CS35L34_M_VPMON_OVFL_SHIFT)
+#define CS35L34_VBSTMON_OVFL (1 << CS35L34_M_VBSTMON_OVFL_SHIFT)
+
+/* CS35L34_{RX,TX}_X */
+#define CS35L34_X_STATE_SHIFT 7
+#define CS35L34_X_STATE (1 << CS35L34_X_STATE_SHIFT)
+#define CS35L34_X_LOC_SHIFT 0
+#define CS35L34_X_LOC (0x1F << CS35L34_X_LOC_SHIFT)
+
+#define CS35L34_RATES (SNDRV_PCM_RATE_48000 | \
+ SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_32000)
+#define CS35L34_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+#endif
diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
new file mode 100644
index 000000000000..55e4520cdcaf
--- /dev/null
+++ b/sound/soc/codecs/cs42l42.c
@@ -0,0 +1,1986 @@
+/*
+ * cs42l42.c -- CS42L42 ALSA SoC audio driver
+ *
+ * Copyright 2016 Cirrus Logic, Inc.
+ *
+ * Author: James Schulman <james.schulman@cirrus.com>
+ * Author: Brian Austin <brian.austin@cirrus.com>
+ * Author: Michael White <michael.white@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <dt-bindings/sound/cs42l42.h>
+
+#include "cs42l42.h"
+
+static const struct reg_default cs42l42_reg_defaults[] = {
+ { CS42L42_FRZ_CTL, 0x00 },
+ { CS42L42_SRC_CTL, 0x10 },
+ { CS42L42_MCLK_STATUS, 0x02 },
+ { CS42L42_MCLK_CTL, 0x02 },
+ { CS42L42_SFTRAMP_RATE, 0xA4 },
+ { CS42L42_I2C_DEBOUNCE, 0x88 },
+ { CS42L42_I2C_STRETCH, 0x03 },
+ { CS42L42_I2C_TIMEOUT, 0xB7 },
+ { CS42L42_PWR_CTL1, 0xFF },
+ { CS42L42_PWR_CTL2, 0x84 },
+ { CS42L42_PWR_CTL3, 0x20 },
+ { CS42L42_RSENSE_CTL1, 0x40 },
+ { CS42L42_RSENSE_CTL2, 0x00 },
+ { CS42L42_OSC_SWITCH, 0x00 },
+ { CS42L42_OSC_SWITCH_STATUS, 0x05 },
+ { CS42L42_RSENSE_CTL3, 0x1B },
+ { CS42L42_TSENSE_CTL, 0x1B },
+ { CS42L42_TSRS_INT_DISABLE, 0x00 },
+ { CS42L42_TRSENSE_STATUS, 0x00 },
+ { CS42L42_HSDET_CTL1, 0x77 },
+ { CS42L42_HSDET_CTL2, 0x00 },
+ { CS42L42_HS_SWITCH_CTL, 0xF3 },
+ { CS42L42_HS_DET_STATUS, 0x00 },
+ { CS42L42_HS_CLAMP_DISABLE, 0x00 },
+ { CS42L42_MCLK_SRC_SEL, 0x00 },
+ { CS42L42_SPDIF_CLK_CFG, 0x00 },
+ { CS42L42_FSYNC_PW_LOWER, 0x00 },
+ { CS42L42_FSYNC_PW_UPPER, 0x00 },
+ { CS42L42_FSYNC_P_LOWER, 0xF9 },
+ { CS42L42_FSYNC_P_UPPER, 0x00 },
+ { CS42L42_ASP_CLK_CFG, 0x00 },
+ { CS42L42_ASP_FRM_CFG, 0x10 },
+ { CS42L42_FS_RATE_EN, 0x00 },
+ { CS42L42_IN_ASRC_CLK, 0x00 },
+ { CS42L42_OUT_ASRC_CLK, 0x00 },
+ { CS42L42_PLL_DIV_CFG1, 0x00 },
+ { CS42L42_ADC_OVFL_STATUS, 0x00 },
+ { CS42L42_MIXER_STATUS, 0x00 },
+ { CS42L42_SRC_STATUS, 0x00 },
+ { CS42L42_ASP_RX_STATUS, 0x00 },
+ { CS42L42_ASP_TX_STATUS, 0x00 },
+ { CS42L42_CODEC_STATUS, 0x00 },
+ { CS42L42_DET_INT_STATUS1, 0x00 },
+ { CS42L42_DET_INT_STATUS2, 0x00 },
+ { CS42L42_SRCPL_INT_STATUS, 0x00 },
+ { CS42L42_VPMON_STATUS, 0x00 },
+ { CS42L42_PLL_LOCK_STATUS, 0x00 },
+ { CS42L42_TSRS_PLUG_STATUS, 0x00 },
+ { CS42L42_ADC_OVFL_INT_MASK, 0x01 },
+ { CS42L42_MIXER_INT_MASK, 0x0F },
+ { CS42L42_SRC_INT_MASK, 0x0F },
+ { CS42L42_ASP_RX_INT_MASK, 0x1F },
+ { CS42L42_ASP_TX_INT_MASK, 0x0F },
+ { CS42L42_CODEC_INT_MASK, 0x03 },
+ { CS42L42_SRCPL_INT_MASK, 0xFF },
+ { CS42L42_VPMON_INT_MASK, 0x01 },
+ { CS42L42_PLL_LOCK_INT_MASK, 0x01 },
+ { CS42L42_TSRS_PLUG_INT_MASK, 0x0F },
+ { CS42L42_PLL_CTL1, 0x00 },
+ { CS42L42_PLL_DIV_FRAC0, 0x00 },
+ { CS42L42_PLL_DIV_FRAC1, 0x00 },
+ { CS42L42_PLL_DIV_FRAC2, 0x00 },
+ { CS42L42_PLL_DIV_INT, 0x40 },
+ { CS42L42_PLL_CTL3, 0x10 },
+ { CS42L42_PLL_CAL_RATIO, 0x80 },
+ { CS42L42_PLL_CTL4, 0x03 },
+ { CS42L42_LOAD_DET_RCSTAT, 0x00 },
+ { CS42L42_LOAD_DET_DONE, 0x00 },
+ { CS42L42_LOAD_DET_EN, 0x00 },
+ { CS42L42_HSBIAS_SC_AUTOCTL, 0x03 },
+ { CS42L42_WAKE_CTL, 0xC0 },
+ { CS42L42_ADC_DISABLE_MUTE, 0x00 },
+ { CS42L42_TIPSENSE_CTL, 0x02 },
+ { CS42L42_MISC_DET_CTL, 0x03 },
+ { CS42L42_MIC_DET_CTL1, 0x1F },
+ { CS42L42_MIC_DET_CTL2, 0x2F },
+ { CS42L42_DET_STATUS1, 0x00 },
+ { CS42L42_DET_STATUS2, 0x00 },
+ { CS42L42_DET_INT1_MASK, 0xE0 },
+ { CS42L42_DET_INT2_MASK, 0xFF },
+ { CS42L42_HS_BIAS_CTL, 0xC2 },
+ { CS42L42_ADC_CTL, 0x00 },
+ { CS42L42_ADC_VOLUME, 0x00 },
+ { CS42L42_ADC_WNF_HPF_CTL, 0x71 },
+ { CS42L42_DAC_CTL1, 0x00 },
+ { CS42L42_DAC_CTL2, 0x02 },
+ { CS42L42_HP_CTL, 0x0D },
+ { CS42L42_CLASSH_CTL, 0x07 },
+ { CS42L42_MIXER_CHA_VOL, 0x3F },
+ { CS42L42_MIXER_ADC_VOL, 0x3F },
+ { CS42L42_MIXER_CHB_VOL, 0x3F },
+ { CS42L42_EQ_COEF_IN0, 0x22 },
+ { CS42L42_EQ_COEF_IN1, 0x00 },
+ { CS42L42_EQ_COEF_IN2, 0x00 },
+ { CS42L42_EQ_COEF_IN3, 0x00 },
+ { CS42L42_EQ_COEF_RW, 0x00 },
+ { CS42L42_EQ_COEF_OUT0, 0x00 },
+ { CS42L42_EQ_COEF_OUT1, 0x00 },
+ { CS42L42_EQ_COEF_OUT2, 0x00 },
+ { CS42L42_EQ_COEF_OUT3, 0x00 },
+ { CS42L42_EQ_INIT_STAT, 0x00 },
+ { CS42L42_EQ_START_FILT, 0x00 },
+ { CS42L42_EQ_MUTE_CTL, 0x00 },
+ { CS42L42_SP_RX_CH_SEL, 0x04 },
+ { CS42L42_SP_RX_ISOC_CTL, 0x04 },
+ { CS42L42_SP_RX_FS, 0x8C },
+ { CS42l42_SPDIF_CH_SEL, 0x0E },
+ { CS42L42_SP_TX_ISOC_CTL, 0x04 },
+ { CS42L42_SP_TX_FS, 0xCC },
+ { CS42L42_SPDIF_SW_CTL1, 0x3F },
+ { CS42L42_SRC_SDIN_FS, 0x40 },
+ { CS42L42_SRC_SDOUT_FS, 0x40 },
+ { CS42L42_SPDIF_CTL1, 0x01 },
+ { CS42L42_SPDIF_CTL2, 0x00 },
+ { CS42L42_SPDIF_CTL3, 0x00 },
+ { CS42L42_SPDIF_CTL4, 0x42 },
+ { CS42L42_ASP_TX_SZ_EN, 0x00 },
+ { CS42L42_ASP_TX_CH_EN, 0x00 },
+ { CS42L42_ASP_TX_CH_AP_RES, 0x0F },
+ { CS42L42_ASP_TX_CH1_BIT_MSB, 0x00 },
+ { CS42L42_ASP_TX_CH1_BIT_LSB, 0x00 },
+ { CS42L42_ASP_TX_HIZ_DLY_CFG, 0x00 },
+ { CS42L42_ASP_TX_CH2_BIT_MSB, 0x00 },
+ { CS42L42_ASP_TX_CH2_BIT_LSB, 0x00 },
+ { CS42L42_ASP_RX_DAI0_EN, 0x00 },
+ { CS42L42_ASP_RX_DAI0_CH1_AP_RES, 0x03 },
+ { CS42L42_ASP_RX_DAI0_CH1_BIT_MSB, 0x00 },
+ { CS42L42_ASP_RX_DAI0_CH1_BIT_LSB, 0x00 },
+ { CS42L42_ASP_RX_DAI0_CH2_AP_RES, 0x03 },
+ { CS42L42_ASP_RX_DAI0_CH2_BIT_MSB, 0x00 },
+ { CS42L42_ASP_RX_DAI0_CH2_BIT_LSB, 0x00 },
+ { CS42L42_ASP_RX_DAI0_CH3_AP_RES, 0x03 },
+ { CS42L42_ASP_RX_DAI0_CH3_BIT_MSB, 0x00 },
+ { CS42L42_ASP_RX_DAI0_CH3_BIT_LSB, 0x00 },
+ { CS42L42_ASP_RX_DAI0_CH4_AP_RES, 0x03 },
+ { CS42L42_ASP_RX_DAI0_CH4_BIT_MSB, 0x00 },
+ { CS42L42_ASP_RX_DAI0_CH4_BIT_LSB, 0x00 },
+ { CS42L42_ASP_RX_DAI1_CH1_AP_RES, 0x03 },
+ { CS42L42_ASP_RX_DAI1_CH1_BIT_MSB, 0x00 },
+ { CS42L42_ASP_RX_DAI1_CH1_BIT_LSB, 0x00 },
+ { CS42L42_ASP_RX_DAI1_CH2_AP_RES, 0x03 },
+ { CS42L42_ASP_RX_DAI1_CH2_BIT_MSB, 0x00 },
+ { CS42L42_ASP_RX_DAI1_CH2_BIT_LSB, 0x00 },
+ { CS42L42_SUB_REVID, 0x03 },
+};
+
+static bool cs42l42_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS42L42_PAGE_REGISTER:
+ case CS42L42_DEVID_AB:
+ case CS42L42_DEVID_CD:
+ case CS42L42_DEVID_E:
+ case CS42L42_FABID:
+ case CS42L42_REVID:
+ case CS42L42_FRZ_CTL:
+ case CS42L42_SRC_CTL:
+ case CS42L42_MCLK_STATUS:
+ case CS42L42_MCLK_CTL:
+ case CS42L42_SFTRAMP_RATE:
+ case CS42L42_I2C_DEBOUNCE:
+ case CS42L42_I2C_STRETCH:
+ case CS42L42_I2C_TIMEOUT:
+ case CS42L42_PWR_CTL1:
+ case CS42L42_PWR_CTL2:
+ case CS42L42_PWR_CTL3:
+ case CS42L42_RSENSE_CTL1:
+ case CS42L42_RSENSE_CTL2:
+ case CS42L42_OSC_SWITCH:
+ case CS42L42_OSC_SWITCH_STATUS:
+ case CS42L42_RSENSE_CTL3:
+ case CS42L42_TSENSE_CTL:
+ case CS42L42_TSRS_INT_DISABLE:
+ case CS42L42_TRSENSE_STATUS:
+ case CS42L42_HSDET_CTL1:
+ case CS42L42_HSDET_CTL2:
+ case CS42L42_HS_SWITCH_CTL:
+ case CS42L42_HS_DET_STATUS:
+ case CS42L42_HS_CLAMP_DISABLE:
+ case CS42L42_MCLK_SRC_SEL:
+ case CS42L42_SPDIF_CLK_CFG:
+ case CS42L42_FSYNC_PW_LOWER:
+ case CS42L42_FSYNC_PW_UPPER:
+ case CS42L42_FSYNC_P_LOWER:
+ case CS42L42_FSYNC_P_UPPER:
+ case CS42L42_ASP_CLK_CFG:
+ case CS42L42_ASP_FRM_CFG:
+ case CS42L42_FS_RATE_EN:
+ case CS42L42_IN_ASRC_CLK:
+ case CS42L42_OUT_ASRC_CLK:
+ case CS42L42_PLL_DIV_CFG1:
+ case CS42L42_ADC_OVFL_STATUS:
+ case CS42L42_MIXER_STATUS:
+ case CS42L42_SRC_STATUS:
+ case CS42L42_ASP_RX_STATUS:
+ case CS42L42_ASP_TX_STATUS:
+ case CS42L42_CODEC_STATUS:
+ case CS42L42_DET_INT_STATUS1:
+ case CS42L42_DET_INT_STATUS2:
+ case CS42L42_SRCPL_INT_STATUS:
+ case CS42L42_VPMON_STATUS:
+ case CS42L42_PLL_LOCK_STATUS:
+ case CS42L42_TSRS_PLUG_STATUS:
+ case CS42L42_ADC_OVFL_INT_MASK:
+ case CS42L42_MIXER_INT_MASK:
+ case CS42L42_SRC_INT_MASK:
+ case CS42L42_ASP_RX_INT_MASK:
+ case CS42L42_ASP_TX_INT_MASK:
+ case CS42L42_CODEC_INT_MASK:
+ case CS42L42_SRCPL_INT_MASK:
+ case CS42L42_VPMON_INT_MASK:
+ case CS42L42_PLL_LOCK_INT_MASK:
+ case CS42L42_TSRS_PLUG_INT_MASK:
+ case CS42L42_PLL_CTL1:
+ case CS42L42_PLL_DIV_FRAC0:
+ case CS42L42_PLL_DIV_FRAC1:
+ case CS42L42_PLL_DIV_FRAC2:
+ case CS42L42_PLL_DIV_INT:
+ case CS42L42_PLL_CTL3:
+ case CS42L42_PLL_CAL_RATIO:
+ case CS42L42_PLL_CTL4:
+ case CS42L42_LOAD_DET_RCSTAT:
+ case CS42L42_LOAD_DET_DONE:
+ case CS42L42_LOAD_DET_EN:
+ case CS42L42_HSBIAS_SC_AUTOCTL:
+ case CS42L42_WAKE_CTL:
+ case CS42L42_ADC_DISABLE_MUTE:
+ case CS42L42_TIPSENSE_CTL:
+ case CS42L42_MISC_DET_CTL:
+ case CS42L42_MIC_DET_CTL1:
+ case CS42L42_MIC_DET_CTL2:
+ case CS42L42_DET_STATUS1:
+ case CS42L42_DET_STATUS2:
+ case CS42L42_DET_INT1_MASK:
+ case CS42L42_DET_INT2_MASK:
+ case CS42L42_HS_BIAS_CTL:
+ case CS42L42_ADC_CTL:
+ case CS42L42_ADC_VOLUME:
+ case CS42L42_ADC_WNF_HPF_CTL:
+ case CS42L42_DAC_CTL1:
+ case CS42L42_DAC_CTL2:
+ case CS42L42_HP_CTL:
+ case CS42L42_CLASSH_CTL:
+ case CS42L42_MIXER_CHA_VOL:
+ case CS42L42_MIXER_ADC_VOL:
+ case CS42L42_MIXER_CHB_VOL:
+ case CS42L42_EQ_COEF_IN0:
+ case CS42L42_EQ_COEF_IN1:
+ case CS42L42_EQ_COEF_IN2:
+ case CS42L42_EQ_COEF_IN3:
+ case CS42L42_EQ_COEF_RW:
+ case CS42L42_EQ_COEF_OUT0:
+ case CS42L42_EQ_COEF_OUT1:
+ case CS42L42_EQ_COEF_OUT2:
+ case CS42L42_EQ_COEF_OUT3:
+ case CS42L42_EQ_INIT_STAT:
+ case CS42L42_EQ_START_FILT:
+ case CS42L42_EQ_MUTE_CTL:
+ case CS42L42_SP_RX_CH_SEL:
+ case CS42L42_SP_RX_ISOC_CTL:
+ case CS42L42_SP_RX_FS:
+ case CS42l42_SPDIF_CH_SEL:
+ case CS42L42_SP_TX_ISOC_CTL:
+ case CS42L42_SP_TX_FS:
+ case CS42L42_SPDIF_SW_CTL1:
+ case CS42L42_SRC_SDIN_FS:
+ case CS42L42_SRC_SDOUT_FS:
+ case CS42L42_SPDIF_CTL1:
+ case CS42L42_SPDIF_CTL2:
+ case CS42L42_SPDIF_CTL3:
+ case CS42L42_SPDIF_CTL4:
+ case CS42L42_ASP_TX_SZ_EN:
+ case CS42L42_ASP_TX_CH_EN:
+ case CS42L42_ASP_TX_CH_AP_RES:
+ case CS42L42_ASP_TX_CH1_BIT_MSB:
+ case CS42L42_ASP_TX_CH1_BIT_LSB:
+ case CS42L42_ASP_TX_HIZ_DLY_CFG:
+ case CS42L42_ASP_TX_CH2_BIT_MSB:
+ case CS42L42_ASP_TX_CH2_BIT_LSB:
+ case CS42L42_ASP_RX_DAI0_EN:
+ case CS42L42_ASP_RX_DAI0_CH1_AP_RES:
+ case CS42L42_ASP_RX_DAI0_CH1_BIT_MSB:
+ case CS42L42_ASP_RX_DAI0_CH1_BIT_LSB:
+ case CS42L42_ASP_RX_DAI0_CH2_AP_RES:
+ case CS42L42_ASP_RX_DAI0_CH2_BIT_MSB:
+ case CS42L42_ASP_RX_DAI0_CH2_BIT_LSB:
+ case CS42L42_ASP_RX_DAI0_CH3_AP_RES:
+ case CS42L42_ASP_RX_DAI0_CH3_BIT_MSB:
+ case CS42L42_ASP_RX_DAI0_CH3_BIT_LSB:
+ case CS42L42_ASP_RX_DAI0_CH4_AP_RES:
+ case CS42L42_ASP_RX_DAI0_CH4_BIT_MSB:
+ case CS42L42_ASP_RX_DAI0_CH4_BIT_LSB:
+ case CS42L42_ASP_RX_DAI1_CH1_AP_RES:
+ case CS42L42_ASP_RX_DAI1_CH1_BIT_MSB:
+ case CS42L42_ASP_RX_DAI1_CH1_BIT_LSB:
+ case CS42L42_ASP_RX_DAI1_CH2_AP_RES:
+ case CS42L42_ASP_RX_DAI1_CH2_BIT_MSB:
+ case CS42L42_ASP_RX_DAI1_CH2_BIT_LSB:
+ case CS42L42_SUB_REVID:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs42l42_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS42L42_DEVID_AB:
+ case CS42L42_DEVID_CD:
+ case CS42L42_DEVID_E:
+ case CS42L42_MCLK_STATUS:
+ case CS42L42_TRSENSE_STATUS:
+ case CS42L42_HS_DET_STATUS:
+ case CS42L42_ADC_OVFL_STATUS:
+ case CS42L42_MIXER_STATUS:
+ case CS42L42_SRC_STATUS:
+ case CS42L42_ASP_RX_STATUS:
+ case CS42L42_ASP_TX_STATUS:
+ case CS42L42_CODEC_STATUS:
+ case CS42L42_DET_INT_STATUS1:
+ case CS42L42_DET_INT_STATUS2:
+ case CS42L42_SRCPL_INT_STATUS:
+ case CS42L42_VPMON_STATUS:
+ case CS42L42_PLL_LOCK_STATUS:
+ case CS42L42_TSRS_PLUG_STATUS:
+ case CS42L42_LOAD_DET_RCSTAT:
+ case CS42L42_LOAD_DET_DONE:
+ case CS42L42_DET_STATUS1:
+ case CS42L42_DET_STATUS2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_range_cfg cs42l42_page_range = {
+ .name = "Pages",
+ .range_min = 0,
+ .range_max = CS42L42_MAX_REGISTER,
+ .selector_reg = CS42L42_PAGE_REGISTER,
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 256,
+};
+
+static const struct regmap_config cs42l42_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .readable_reg = cs42l42_readable_register,
+ .volatile_reg = cs42l42_volatile_register,
+
+ .ranges = &cs42l42_page_range,
+ .num_ranges = 1,
+
+ .max_register = CS42L42_MAX_REGISTER,
+ .reg_defaults = cs42l42_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(cs42l42_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
+static DECLARE_TLV_DB_SCALE(mixer_tlv, -6200, 100, false);
+
+static const char * const cs42l42_hpf_freq_text[] = {
+ "1.86Hz", "120Hz", "235Hz", "466Hz"
+};
+
+static SOC_ENUM_SINGLE_DECL(cs42l42_hpf_freq_enum, CS42L42_ADC_WNF_HPF_CTL,
+ CS42L42_ADC_HPF_CF_SHIFT,
+ cs42l42_hpf_freq_text);
+
+static const char * const cs42l42_wnf3_freq_text[] = {
+ "160Hz", "180Hz", "200Hz", "220Hz",
+ "240Hz", "260Hz", "280Hz", "300Hz"
+};
+
+static SOC_ENUM_SINGLE_DECL(cs42l42_wnf3_freq_enum, CS42L42_ADC_WNF_HPF_CTL,
+ CS42L42_ADC_WNF_CF_SHIFT,
+ cs42l42_wnf3_freq_text);
+
+static const char * const cs42l42_wnf05_freq_text[] = {
+ "280Hz", "315Hz", "350Hz", "385Hz",
+ "420Hz", "455Hz", "490Hz", "525Hz"
+};
+
+static SOC_ENUM_SINGLE_DECL(cs42l42_wnf05_freq_enum, CS42L42_ADC_WNF_HPF_CTL,
+ CS42L42_ADC_WNF_CF_SHIFT,
+ cs42l42_wnf05_freq_text);
+
+static const struct snd_kcontrol_new cs42l42_snd_controls[] = {
+ /* ADC Volume and Filter Controls */
+ SOC_SINGLE("ADC Notch Switch", CS42L42_ADC_CTL,
+ CS42L42_ADC_NOTCH_DIS_SHIFT, true, false),
+ SOC_SINGLE("ADC Weak Force Switch", CS42L42_ADC_CTL,
+ CS42L42_ADC_FORCE_WEAK_VCM_SHIFT, true, false),
+ SOC_SINGLE("ADC Invert Switch", CS42L42_ADC_CTL,
+ CS42L42_ADC_INV_SHIFT, true, false),
+ SOC_SINGLE("ADC Boost Switch", CS42L42_ADC_CTL,
+ CS42L42_ADC_DIG_BOOST_SHIFT, true, false),
+ SOC_SINGLE_SX_TLV("ADC Volume", CS42L42_ADC_VOLUME,
+ CS42L42_ADC_VOL_SHIFT, 0xA0, 0x6C, adc_tlv),
+ SOC_SINGLE("ADC WNF Switch", CS42L42_ADC_WNF_HPF_CTL,
+ CS42L42_ADC_WNF_EN_SHIFT, true, false),
+ SOC_SINGLE("ADC HPF Switch", CS42L42_ADC_WNF_HPF_CTL,
+ CS42L42_ADC_HPF_EN_SHIFT, true, false),
+ SOC_ENUM("HPF Corner Freq", cs42l42_hpf_freq_enum),
+ SOC_ENUM("WNF 3dB Freq", cs42l42_wnf3_freq_enum),
+ SOC_ENUM("WNF 05dB Freq", cs42l42_wnf05_freq_enum),
+
+ /* DAC Volume and Filter Controls */
+ SOC_SINGLE("DACA Invert Switch", CS42L42_DAC_CTL1,
+ CS42L42_DACA_INV_SHIFT, true, false),
+ SOC_SINGLE("DACB Invert Switch", CS42L42_DAC_CTL1,
+ CS42L42_DACB_INV_SHIFT, true, false),
+ SOC_SINGLE("DAC HPF Switch", CS42L42_DAC_CTL2,
+ CS42L42_DAC_HPF_EN_SHIFT, true, false),
+ SOC_DOUBLE_R_TLV("Mixer Volume", CS42L42_MIXER_CHA_VOL,
+ CS42L42_MIXER_CHB_VOL, CS42L42_MIXER_CH_VOL_SHIFT,
+ 0x3e, 1, mixer_tlv)
+};
+
+static int cs42l42_hpdrv_evt(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ if (event & SND_SOC_DAPM_POST_PMU) {
+ /* Enable the channels */
+ snd_soc_update_bits(codec, CS42L42_ASP_RX_DAI0_EN,
+ CS42L42_ASP_RX0_CH_EN_MASK,
+ (CS42L42_ASP_RX0_CH1_EN |
+ CS42L42_ASP_RX0_CH2_EN) <<
+ CS42L42_ASP_RX0_CH_EN_SHIFT);
+
+ /* Power up */
+ snd_soc_update_bits(codec, CS42L42_PWR_CTL1,
+ CS42L42_ASP_DAI_PDN_MASK | CS42L42_MIXER_PDN_MASK |
+ CS42L42_HP_PDN_MASK, 0);
+ } else if (event & SND_SOC_DAPM_PRE_PMD) {
+ /* Disable the channels */
+ snd_soc_update_bits(codec, CS42L42_ASP_RX_DAI0_EN,
+ CS42L42_ASP_RX0_CH_EN_MASK, 0);
+
+ /* Power down */
+ snd_soc_update_bits(codec, CS42L42_PWR_CTL1,
+ CS42L42_ASP_DAI_PDN_MASK | CS42L42_MIXER_PDN_MASK |
+ CS42L42_HP_PDN_MASK,
+ CS42L42_ASP_DAI_PDN_MASK | CS42L42_MIXER_PDN_MASK |
+ CS42L42_HP_PDN_MASK);
+ } else {
+ dev_err(codec->dev, "Invalid event 0x%x\n", event);
+ }
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget cs42l42_dapm_widgets[] = {
+ SND_SOC_DAPM_OUTPUT("HP"),
+ SND_SOC_DAPM_AIF_IN("SDIN", NULL, 0, CS42L42_ASP_CLK_CFG,
+ CS42L42_ASP_SCLK_EN_SHIFT, false),
+ SND_SOC_DAPM_OUT_DRV_E("HPDRV", SND_SOC_NOPM, 0,
+ 0, NULL, 0, cs42l42_hpdrv_evt,
+ SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD)
+};
+
+static const struct snd_soc_dapm_route cs42l42_audio_map[] = {
+ {"SDIN", NULL, "Playback"},
+ {"HPDRV", NULL, "SDIN"},
+ {"HP", NULL, "HPDRV"}
+};
+
+static int cs42l42_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct cs42l42_private *cs42l42 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
+ regcache_cache_only(cs42l42->regmap, false);
+ regcache_sync(cs42l42->regmap);
+ ret = regulator_bulk_enable(
+ ARRAY_SIZE(cs42l42->supplies),
+ cs42l42->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to enable regulators: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ break;
+ case SND_SOC_BIAS_OFF:
+
+ regcache_cache_only(cs42l42->regmap, true);
+ regulator_bulk_disable(ARRAY_SIZE(cs42l42->supplies),
+ cs42l42->supplies);
+ break;
+ }
+
+ return 0;
+}
+
+static int cs42l42_codec_probe(struct snd_soc_codec *codec)
+{
+ struct cs42l42_private *cs42l42 =
+ (struct cs42l42_private *)snd_soc_codec_get_drvdata(codec);
+
+ cs42l42->codec = codec;
+
+ return 0;
+}
+
+static const struct snd_soc_codec_driver soc_codec_dev_cs42l42 = {
+ .probe = cs42l42_codec_probe,
+ .set_bias_level = cs42l42_set_bias_level,
+ .ignore_pmdown_time = true,
+
+ .component_driver = {
+ .dapm_widgets = cs42l42_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs42l42_dapm_widgets),
+ .dapm_routes = cs42l42_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(cs42l42_audio_map),
+
+ .controls = cs42l42_snd_controls,
+ .num_controls = ARRAY_SIZE(cs42l42_snd_controls),
+ },
+};
+
+struct cs42l42_pll_params {
+ u32 sclk;
+ u8 mclk_div;
+ u8 mclk_src_sel;
+ u8 sclk_prediv;
+ u8 pll_div_int;
+ u32 pll_div_frac;
+ u8 pll_mode;
+ u8 pll_divout;
+ u32 mclk_int;
+ u8 pll_cal_ratio;
+};
+
+/*
+ * Common PLL Settings for given SCLK
+ * Table 4-5 from the Datasheet
+ */
+static const struct cs42l42_pll_params pll_ratio_table[] = {
+ { 1536000, 0, 1, 0x00, 0x7D, 0x000000, 0x03, 0x10, 12000000, 125 },
+ { 2822400, 0, 1, 0x00, 0x40, 0x000000, 0x03, 0x10, 11289600, 128 },
+ { 3000000, 0, 1, 0x00, 0x40, 0x000000, 0x03, 0x10, 12000000, 128 },
+ { 3072000, 0, 1, 0x00, 0x3E, 0x800000, 0x03, 0x10, 12000000, 125 },
+ { 4000000, 0, 1, 0x00, 0x30, 0x800000, 0x03, 0x10, 12000000, 96 },
+ { 4096000, 0, 1, 0x00, 0x2E, 0xE00000, 0x03, 0x10, 12000000, 94 },
+ { 5644800, 0, 1, 0x01, 0x40, 0x000000, 0x03, 0x10, 11289600, 128 },
+ { 6000000, 0, 1, 0x01, 0x40, 0x000000, 0x03, 0x10, 12000000, 128 },
+ { 6144000, 0, 1, 0x01, 0x3E, 0x800000, 0x03, 0x10, 12000000, 125 },
+ { 11289600, 0, 0, 0, 0, 0, 0, 0, 11289600, 0 },
+ { 12000000, 0, 0, 0, 0, 0, 0, 0, 12000000, 0 },
+ { 12288000, 0, 0, 0, 0, 0, 0, 0, 12288000, 0 },
+ { 22579200, 1, 0, 0, 0, 0, 0, 0, 22579200, 0 },
+ { 24000000, 1, 0, 0, 0, 0, 0, 0, 24000000, 0 },
+ { 24576000, 1, 0, 0, 0, 0, 0, 0, 24576000, 0 }
+};
+
+static int cs42l42_pll_config(struct snd_soc_codec *codec)
+{
+ struct cs42l42_private *cs42l42 = snd_soc_codec_get_drvdata(codec);
+ int i;
+ u32 fsync;
+
+ for (i = 0; i < ARRAY_SIZE(pll_ratio_table); i++) {
+ if (pll_ratio_table[i].sclk == cs42l42->sclk) {
+ /* Configure the internal sample rate */
+ snd_soc_update_bits(codec, CS42L42_MCLK_CTL,
+ CS42L42_INTERNAL_FS_MASK,
+ ((pll_ratio_table[i].mclk_int !=
+ 12000000) &&
+ (pll_ratio_table[i].mclk_int !=
+ 24000000)) <<
+ CS42L42_INTERNAL_FS_SHIFT);
+ /* Set the MCLK src (PLL or SCLK) and the divide
+ * ratio
+ */
+ snd_soc_update_bits(codec, CS42L42_MCLK_SRC_SEL,
+ CS42L42_MCLK_SRC_SEL_MASK |
+ CS42L42_MCLKDIV_MASK,
+ (pll_ratio_table[i].mclk_src_sel
+ << CS42L42_MCLK_SRC_SEL_SHIFT) |
+ (pll_ratio_table[i].mclk_div <<
+ CS42L42_MCLKDIV_SHIFT));
+ /* Set up the LRCLK */
+ fsync = cs42l42->sclk / cs42l42->srate;
+ if (((fsync * cs42l42->srate) != cs42l42->sclk)
+ || ((fsync % 2) != 0)) {
+ dev_err(codec->dev,
+ "Unsupported sclk %d/sample rate %d\n",
+ cs42l42->sclk,
+ cs42l42->srate);
+ return -EINVAL;
+ }
+ /* Set the LRCLK period */
+ snd_soc_update_bits(codec,
+ CS42L42_FSYNC_P_LOWER,
+ CS42L42_FSYNC_PERIOD_MASK,
+ CS42L42_FRAC0_VAL(fsync - 1) <<
+ CS42L42_FSYNC_PERIOD_SHIFT);
+ snd_soc_update_bits(codec,
+ CS42L42_FSYNC_P_UPPER,
+ CS42L42_FSYNC_PERIOD_MASK,
+ CS42L42_FRAC1_VAL(fsync - 1) <<
+ CS42L42_FSYNC_PERIOD_SHIFT);
+ /* Set the LRCLK to 50% duty cycle */
+ fsync = fsync / 2;
+ snd_soc_update_bits(codec,
+ CS42L42_FSYNC_PW_LOWER,
+ CS42L42_FSYNC_PULSE_WIDTH_MASK,
+ CS42L42_FRAC0_VAL(fsync - 1) <<
+ CS42L42_FSYNC_PULSE_WIDTH_SHIFT);
+ snd_soc_update_bits(codec,
+ CS42L42_FSYNC_PW_UPPER,
+ CS42L42_FSYNC_PULSE_WIDTH_MASK,
+ CS42L42_FRAC1_VAL(fsync - 1) <<
+ CS42L42_FSYNC_PULSE_WIDTH_SHIFT);
+ snd_soc_update_bits(codec,
+ CS42L42_ASP_FRM_CFG,
+ CS42L42_ASP_5050_MASK,
+ CS42L42_ASP_5050_MASK);
+ /* Set the frame delay to 1.0 SCLK clocks */
+ snd_soc_update_bits(codec, CS42L42_ASP_FRM_CFG,
+ CS42L42_ASP_FSD_MASK,
+ CS42L42_ASP_FSD_1_0 <<
+ CS42L42_ASP_FSD_SHIFT);
+ /* Set the sample rates (96k or lower) */
+ snd_soc_update_bits(codec, CS42L42_FS_RATE_EN,
+ CS42L42_FS_EN_MASK,
+ (CS42L42_FS_EN_IASRC_96K |
+ CS42L42_FS_EN_OASRC_96K) <<
+ CS42L42_FS_EN_SHIFT);
+ /* Set the input/output internal MCLK clock ~12 MHz */
+ snd_soc_update_bits(codec, CS42L42_IN_ASRC_CLK,
+ CS42L42_CLK_IASRC_SEL_MASK,
+ CS42L42_CLK_IASRC_SEL_12 <<
+ CS42L42_CLK_IASRC_SEL_SHIFT);
+ snd_soc_update_bits(codec,
+ CS42L42_OUT_ASRC_CLK,
+ CS42L42_CLK_OASRC_SEL_MASK,
+ CS42L42_CLK_OASRC_SEL_12 <<
+ CS42L42_CLK_OASRC_SEL_SHIFT);
+ /* channel 1 on low LRCLK, 32 bit */
+ snd_soc_update_bits(codec,
+ CS42L42_ASP_RX_DAI0_CH1_AP_RES,
+ CS42L42_ASP_RX_CH_AP_MASK |
+ CS42L42_ASP_RX_CH_RES_MASK,
+ (CS42L42_ASP_RX_CH_AP_LOW <<
+ CS42L42_ASP_RX_CH_AP_SHIFT) |
+ (CS42L42_ASP_RX_CH_RES_32 <<
+ CS42L42_ASP_RX_CH_RES_SHIFT));
+ /* Channel 2 on high LRCLK, 32 bit */
+ snd_soc_update_bits(codec,
+ CS42L42_ASP_RX_DAI0_CH2_AP_RES,
+ CS42L42_ASP_RX_CH_AP_MASK |
+ CS42L42_ASP_RX_CH_RES_MASK,
+ (CS42L42_ASP_RX_CH_AP_HI <<
+ CS42L42_ASP_RX_CH_AP_SHIFT) |
+ (CS42L42_ASP_RX_CH_RES_32 <<
+ CS42L42_ASP_RX_CH_RES_SHIFT));
+ if (pll_ratio_table[i].mclk_src_sel == 0) {
+ /* Pass the clock straight through */
+ snd_soc_update_bits(codec,
+ CS42L42_PLL_CTL1,
+ CS42L42_PLL_START_MASK, 0);
+ } else {
+ /* Configure PLL per table 4-5 */
+ snd_soc_update_bits(codec,
+ CS42L42_PLL_DIV_CFG1,
+ CS42L42_SCLK_PREDIV_MASK,
+ pll_ratio_table[i].sclk_prediv
+ << CS42L42_SCLK_PREDIV_SHIFT);
+ snd_soc_update_bits(codec,
+ CS42L42_PLL_DIV_INT,
+ CS42L42_PLL_DIV_INT_MASK,
+ pll_ratio_table[i].pll_div_int
+ << CS42L42_PLL_DIV_INT_SHIFT);
+ snd_soc_update_bits(codec,
+ CS42L42_PLL_DIV_FRAC0,
+ CS42L42_PLL_DIV_FRAC_MASK,
+ CS42L42_FRAC0_VAL(
+ pll_ratio_table[i].pll_div_frac)
+ << CS42L42_PLL_DIV_FRAC_SHIFT);
+ snd_soc_update_bits(codec,
+ CS42L42_PLL_DIV_FRAC1,
+ CS42L42_PLL_DIV_FRAC_MASK,
+ CS42L42_FRAC1_VAL(
+ pll_ratio_table[i].pll_div_frac)
+ << CS42L42_PLL_DIV_FRAC_SHIFT);
+ snd_soc_update_bits(codec,
+ CS42L42_PLL_DIV_FRAC2,
+ CS42L42_PLL_DIV_FRAC_MASK,
+ CS42L42_FRAC2_VAL(
+ pll_ratio_table[i].pll_div_frac)
+ << CS42L42_PLL_DIV_FRAC_SHIFT);
+ snd_soc_update_bits(codec,
+ CS42L42_PLL_CTL4,
+ CS42L42_PLL_MODE_MASK,
+ pll_ratio_table[i].pll_mode
+ << CS42L42_PLL_MODE_SHIFT);
+ snd_soc_update_bits(codec,
+ CS42L42_PLL_CTL3,
+ CS42L42_PLL_DIVOUT_MASK,
+ pll_ratio_table[i].pll_divout
+ << CS42L42_PLL_DIVOUT_SHIFT);
+ snd_soc_update_bits(codec,
+ CS42L42_PLL_CAL_RATIO,
+ CS42L42_PLL_CAL_RATIO_MASK,
+ pll_ratio_table[i].pll_cal_ratio
+ << CS42L42_PLL_CAL_RATIO_SHIFT);
+ }
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ u32 asp_cfg_val = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFM:
+ asp_cfg_val |= CS42L42_ASP_MASTER_MODE <<
+ CS42L42_ASP_MODE_SHIFT;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ asp_cfg_val |= CS42L42_ASP_SLAVE_MODE <<
+ CS42L42_ASP_MODE_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ case SND_SOC_DAIFMT_LEFT_J:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Bitclock/frame inversion */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ asp_cfg_val |= CS42L42_ASP_POL_INV <<
+ CS42L42_ASP_LCPOL_IN_SHIFT;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ asp_cfg_val |= CS42L42_ASP_POL_INV <<
+ CS42L42_ASP_SCPOL_IN_DAC_SHIFT;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ asp_cfg_val |= CS42L42_ASP_POL_INV <<
+ CS42L42_ASP_LCPOL_IN_SHIFT;
+ asp_cfg_val |= CS42L42_ASP_POL_INV <<
+ CS42L42_ASP_SCPOL_IN_DAC_SHIFT;
+ break;
+ }
+
+ snd_soc_update_bits(codec, CS42L42_ASP_CLK_CFG,
+ CS42L42_ASP_MODE_MASK |
+ CS42L42_ASP_SCPOL_IN_DAC_MASK |
+ CS42L42_ASP_LCPOL_IN_MASK, asp_cfg_val);
+
+ return 0;
+}
+
+static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct cs42l42_private *cs42l42 = snd_soc_codec_get_drvdata(codec);
+ int retval;
+
+ cs42l42->srate = params_rate(params);
+ cs42l42->swidth = params_width(params);
+
+ retval = cs42l42_pll_config(codec);
+
+ return retval;
+}
+
+static int cs42l42_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct cs42l42_private *cs42l42 = snd_soc_codec_get_drvdata(codec);
+
+ cs42l42->sclk = freq;
+
+ return 0;
+}
+
+static int cs42l42_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ unsigned int regval;
+ u8 fullScaleVol;
+
+ if (mute) {
+ /* Mark SCLK as not present to turn on the internal
+ * oscillator.
+ */
+ snd_soc_update_bits(codec, CS42L42_OSC_SWITCH,
+ CS42L42_SCLK_PRESENT_MASK, 0);
+
+ snd_soc_update_bits(codec, CS42L42_PLL_CTL1,
+ CS42L42_PLL_START_MASK,
+ 0 << CS42L42_PLL_START_SHIFT);
+
+ /* Mute the headphone */
+ snd_soc_update_bits(codec, CS42L42_HP_CTL,
+ CS42L42_HP_ANA_AMUTE_MASK |
+ CS42L42_HP_ANA_BMUTE_MASK,
+ CS42L42_HP_ANA_AMUTE_MASK |
+ CS42L42_HP_ANA_BMUTE_MASK);
+ } else {
+ snd_soc_update_bits(codec, CS42L42_PLL_CTL1,
+ CS42L42_PLL_START_MASK,
+ 1 << CS42L42_PLL_START_SHIFT);
+ /* Read the headphone load */
+ regval = snd_soc_read(codec, CS42L42_LOAD_DET_RCSTAT);
+ if (((regval & CS42L42_RLA_STAT_MASK) >>
+ CS42L42_RLA_STAT_SHIFT) == CS42L42_RLA_STAT_15_OHM) {
+ fullScaleVol = CS42L42_HP_FULL_SCALE_VOL_MASK;
+ } else {
+ fullScaleVol = 0;
+ }
+
+ /* Un-mute the headphone, set the full scale volume flag */
+ snd_soc_update_bits(codec, CS42L42_HP_CTL,
+ CS42L42_HP_ANA_AMUTE_MASK |
+ CS42L42_HP_ANA_BMUTE_MASK |
+ CS42L42_HP_FULL_SCALE_VOL_MASK, fullScaleVol);
+
+ /* Mark SCLK as present, turn off internal oscillator */
+ snd_soc_update_bits(codec, CS42L42_OSC_SWITCH,
+ CS42L42_SCLK_PRESENT_MASK,
+ CS42L42_SCLK_PRESENT_MASK);
+ }
+
+ return 0;
+}
+
+#define CS42L42_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+
+static struct snd_soc_dai_ops cs42l42_ops = {
+ .hw_params = cs42l42_pcm_hw_params,
+ .set_fmt = cs42l42_set_dai_fmt,
+ .set_sysclk = cs42l42_set_sysclk,
+ .digital_mute = cs42l42_digital_mute
+};
+
+static struct snd_soc_dai_driver cs42l42_dai = {
+ .name = "cs42l42",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = CS42L42_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = CS42L42_FORMATS,
+ },
+ .ops = &cs42l42_ops,
+};
+
+static void cs42l42_process_hs_type_detect(struct cs42l42_private *cs42l42)
+{
+ unsigned int hs_det_status;
+ unsigned int int_status;
+
+ /* Mask the auto detect interrupt */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_CODEC_INT_MASK,
+ CS42L42_PDN_DONE_MASK |
+ CS42L42_HSDET_AUTO_DONE_MASK,
+ (1 << CS42L42_PDN_DONE_SHIFT) |
+ (1 << CS42L42_HSDET_AUTO_DONE_SHIFT));
+
+ /* Set hs detect to automatic, disabled mode */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_HSDET_CTL2,
+ CS42L42_HSDET_CTRL_MASK |
+ CS42L42_HSDET_SET_MASK |
+ CS42L42_HSBIAS_REF_MASK |
+ CS42L42_HSDET_AUTO_TIME_MASK,
+ (2 << CS42L42_HSDET_CTRL_SHIFT) |
+ (2 << CS42L42_HSDET_SET_SHIFT) |
+ (0 << CS42L42_HSBIAS_REF_SHIFT) |
+ (3 << CS42L42_HSDET_AUTO_TIME_SHIFT));
+
+ /* Read and save the hs detection result */
+ regmap_read(cs42l42->regmap, CS42L42_HS_DET_STATUS, &hs_det_status);
+
+ cs42l42->hs_type = (hs_det_status & CS42L42_HSDET_TYPE_MASK) >>
+ CS42L42_HSDET_TYPE_SHIFT;
+
+ /* Set up button detection */
+ if ((cs42l42->hs_type == CS42L42_PLUG_CTIA) ||
+ (cs42l42->hs_type == CS42L42_PLUG_OMTP)) {
+ /* Set auto HS bias settings to default */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_HSBIAS_SC_AUTOCTL,
+ CS42L42_HSBIAS_SENSE_EN_MASK |
+ CS42L42_AUTO_HSBIAS_HIZ_MASK |
+ CS42L42_TIP_SENSE_EN_MASK |
+ CS42L42_HSBIAS_SENSE_TRIP_MASK,
+ (0 << CS42L42_HSBIAS_SENSE_EN_SHIFT) |
+ (0 << CS42L42_AUTO_HSBIAS_HIZ_SHIFT) |
+ (0 << CS42L42_TIP_SENSE_EN_SHIFT) |
+ (3 << CS42L42_HSBIAS_SENSE_TRIP_SHIFT));
+
+ /* Set up hs detect level sensitivity */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_MIC_DET_CTL1,
+ CS42L42_LATCH_TO_VP_MASK |
+ CS42L42_EVENT_STAT_SEL_MASK |
+ CS42L42_HS_DET_LEVEL_MASK,
+ (1 << CS42L42_LATCH_TO_VP_SHIFT) |
+ (0 << CS42L42_EVENT_STAT_SEL_SHIFT) |
+ (cs42l42->bias_thresholds[0] <<
+ CS42L42_HS_DET_LEVEL_SHIFT));
+
+ /* Set auto HS bias settings to default */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_HSBIAS_SC_AUTOCTL,
+ CS42L42_HSBIAS_SENSE_EN_MASK |
+ CS42L42_AUTO_HSBIAS_HIZ_MASK |
+ CS42L42_TIP_SENSE_EN_MASK |
+ CS42L42_HSBIAS_SENSE_TRIP_MASK,
+ (1 << CS42L42_HSBIAS_SENSE_EN_SHIFT) |
+ (1 << CS42L42_AUTO_HSBIAS_HIZ_SHIFT) |
+ (0 << CS42L42_TIP_SENSE_EN_SHIFT) |
+ (3 << CS42L42_HSBIAS_SENSE_TRIP_SHIFT));
+
+ /* Turn on level detect circuitry */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_MISC_DET_CTL,
+ CS42L42_DETECT_MODE_MASK |
+ CS42L42_HSBIAS_CTL_MASK |
+ CS42L42_PDN_MIC_LVL_DET_MASK,
+ (0 << CS42L42_DETECT_MODE_SHIFT) |
+ (3 << CS42L42_HSBIAS_CTL_SHIFT) |
+ (0 << CS42L42_PDN_MIC_LVL_DET_SHIFT));
+
+ msleep(cs42l42->btn_det_init_dbnce);
+
+ /* Clear any button interrupts before unmasking them */
+ regmap_read(cs42l42->regmap, CS42L42_DET_INT_STATUS2,
+ &int_status);
+
+ /* Unmask button detect interrupts */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_DET_INT2_MASK,
+ CS42L42_M_DETECT_TF_MASK |
+ CS42L42_M_DETECT_FT_MASK |
+ CS42L42_M_HSBIAS_HIZ_MASK |
+ CS42L42_M_SHORT_RLS_MASK |
+ CS42L42_M_SHORT_DET_MASK,
+ (0 << CS42L42_M_DETECT_TF_SHIFT) |
+ (0 << CS42L42_M_DETECT_FT_SHIFT) |
+ (0 << CS42L42_M_HSBIAS_HIZ_SHIFT) |
+ (1 << CS42L42_M_SHORT_RLS_SHIFT) |
+ (1 << CS42L42_M_SHORT_DET_SHIFT));
+ } else {
+ /* Make sure button detect and HS bias circuits are off */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_MISC_DET_CTL,
+ CS42L42_DETECT_MODE_MASK |
+ CS42L42_HSBIAS_CTL_MASK |
+ CS42L42_PDN_MIC_LVL_DET_MASK,
+ (0 << CS42L42_DETECT_MODE_SHIFT) |
+ (1 << CS42L42_HSBIAS_CTL_SHIFT) |
+ (1 << CS42L42_PDN_MIC_LVL_DET_SHIFT));
+ }
+
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_DAC_CTL2,
+ CS42L42_HPOUT_PULLDOWN_MASK |
+ CS42L42_HPOUT_LOAD_MASK |
+ CS42L42_HPOUT_CLAMP_MASK |
+ CS42L42_DAC_HPF_EN_MASK |
+ CS42L42_DAC_MON_EN_MASK,
+ (0 << CS42L42_HPOUT_PULLDOWN_SHIFT) |
+ (0 << CS42L42_HPOUT_LOAD_SHIFT) |
+ (0 << CS42L42_HPOUT_CLAMP_SHIFT) |
+ (1 << CS42L42_DAC_HPF_EN_SHIFT) |
+ (0 << CS42L42_DAC_MON_EN_SHIFT));
+
+ /* Unmask tip sense interrupts */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_TSRS_PLUG_INT_MASK,
+ CS42L42_RS_PLUG_MASK |
+ CS42L42_RS_UNPLUG_MASK |
+ CS42L42_TS_PLUG_MASK |
+ CS42L42_TS_UNPLUG_MASK,
+ (1 << CS42L42_RS_PLUG_SHIFT) |
+ (1 << CS42L42_RS_UNPLUG_SHIFT) |
+ (0 << CS42L42_TS_PLUG_SHIFT) |
+ (0 << CS42L42_TS_UNPLUG_SHIFT));
+}
+
+static void cs42l42_init_hs_type_detect(struct cs42l42_private *cs42l42)
+{
+ /* Mask tip sense interrupts */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_TSRS_PLUG_INT_MASK,
+ CS42L42_RS_PLUG_MASK |
+ CS42L42_RS_UNPLUG_MASK |
+ CS42L42_TS_PLUG_MASK |
+ CS42L42_TS_UNPLUG_MASK,
+ (1 << CS42L42_RS_PLUG_SHIFT) |
+ (1 << CS42L42_RS_UNPLUG_SHIFT) |
+ (1 << CS42L42_TS_PLUG_SHIFT) |
+ (1 << CS42L42_TS_UNPLUG_SHIFT));
+
+ /* Make sure button detect and HS bias circuits are off */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_MISC_DET_CTL,
+ CS42L42_DETECT_MODE_MASK |
+ CS42L42_HSBIAS_CTL_MASK |
+ CS42L42_PDN_MIC_LVL_DET_MASK,
+ (0 << CS42L42_DETECT_MODE_SHIFT) |
+ (1 << CS42L42_HSBIAS_CTL_SHIFT) |
+ (1 << CS42L42_PDN_MIC_LVL_DET_SHIFT));
+
+ /* Set auto HS bias settings to default */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_HSBIAS_SC_AUTOCTL,
+ CS42L42_HSBIAS_SENSE_EN_MASK |
+ CS42L42_AUTO_HSBIAS_HIZ_MASK |
+ CS42L42_TIP_SENSE_EN_MASK |
+ CS42L42_HSBIAS_SENSE_TRIP_MASK,
+ (0 << CS42L42_HSBIAS_SENSE_EN_SHIFT) |
+ (0 << CS42L42_AUTO_HSBIAS_HIZ_SHIFT) |
+ (0 << CS42L42_TIP_SENSE_EN_SHIFT) |
+ (3 << CS42L42_HSBIAS_SENSE_TRIP_SHIFT));
+
+ /* Set hs detect to manual, disabled mode */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_HSDET_CTL2,
+ CS42L42_HSDET_CTRL_MASK |
+ CS42L42_HSDET_SET_MASK |
+ CS42L42_HSBIAS_REF_MASK |
+ CS42L42_HSDET_AUTO_TIME_MASK,
+ (0 << CS42L42_HSDET_CTRL_SHIFT) |
+ (2 << CS42L42_HSDET_SET_SHIFT) |
+ (0 << CS42L42_HSBIAS_REF_SHIFT) |
+ (3 << CS42L42_HSDET_AUTO_TIME_SHIFT));
+
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_DAC_CTL2,
+ CS42L42_HPOUT_PULLDOWN_MASK |
+ CS42L42_HPOUT_LOAD_MASK |
+ CS42L42_HPOUT_CLAMP_MASK |
+ CS42L42_DAC_HPF_EN_MASK |
+ CS42L42_DAC_MON_EN_MASK,
+ (8 << CS42L42_HPOUT_PULLDOWN_SHIFT) |
+ (0 << CS42L42_HPOUT_LOAD_SHIFT) |
+ (1 << CS42L42_HPOUT_CLAMP_SHIFT) |
+ (1 << CS42L42_DAC_HPF_EN_SHIFT) |
+ (1 << CS42L42_DAC_MON_EN_SHIFT));
+
+ /* Power up HS bias to 2.7V */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_MISC_DET_CTL,
+ CS42L42_DETECT_MODE_MASK |
+ CS42L42_HSBIAS_CTL_MASK |
+ CS42L42_PDN_MIC_LVL_DET_MASK,
+ (0 << CS42L42_DETECT_MODE_SHIFT) |
+ (3 << CS42L42_HSBIAS_CTL_SHIFT) |
+ (1 << CS42L42_PDN_MIC_LVL_DET_SHIFT));
+
+ /* Wait for HS bias to ramp up */
+ msleep(cs42l42->hs_bias_ramp_time);
+
+ /* Unmask auto detect interrupt */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_CODEC_INT_MASK,
+ CS42L42_PDN_DONE_MASK |
+ CS42L42_HSDET_AUTO_DONE_MASK,
+ (1 << CS42L42_PDN_DONE_SHIFT) |
+ (0 << CS42L42_HSDET_AUTO_DONE_SHIFT));
+
+ /* Set hs detect to automatic, enabled mode */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_HSDET_CTL2,
+ CS42L42_HSDET_CTRL_MASK |
+ CS42L42_HSDET_SET_MASK |
+ CS42L42_HSBIAS_REF_MASK |
+ CS42L42_HSDET_AUTO_TIME_MASK,
+ (3 << CS42L42_HSDET_CTRL_SHIFT) |
+ (2 << CS42L42_HSDET_SET_SHIFT) |
+ (0 << CS42L42_HSBIAS_REF_SHIFT) |
+ (3 << CS42L42_HSDET_AUTO_TIME_SHIFT));
+}
+
+static void cs42l42_cancel_hs_type_detect(struct cs42l42_private *cs42l42)
+{
+ /* Mask button detect interrupts */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_DET_INT2_MASK,
+ CS42L42_M_DETECT_TF_MASK |
+ CS42L42_M_DETECT_FT_MASK |
+ CS42L42_M_HSBIAS_HIZ_MASK |
+ CS42L42_M_SHORT_RLS_MASK |
+ CS42L42_M_SHORT_DET_MASK,
+ (1 << CS42L42_M_DETECT_TF_SHIFT) |
+ (1 << CS42L42_M_DETECT_FT_SHIFT) |
+ (1 << CS42L42_M_HSBIAS_HIZ_SHIFT) |
+ (1 << CS42L42_M_SHORT_RLS_SHIFT) |
+ (1 << CS42L42_M_SHORT_DET_SHIFT));
+
+ /* Ground HS bias */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_MISC_DET_CTL,
+ CS42L42_DETECT_MODE_MASK |
+ CS42L42_HSBIAS_CTL_MASK |
+ CS42L42_PDN_MIC_LVL_DET_MASK,
+ (0 << CS42L42_DETECT_MODE_SHIFT) |
+ (1 << CS42L42_HSBIAS_CTL_SHIFT) |
+ (1 << CS42L42_PDN_MIC_LVL_DET_SHIFT));
+
+ /* Set auto HS bias settings to default */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_HSBIAS_SC_AUTOCTL,
+ CS42L42_HSBIAS_SENSE_EN_MASK |
+ CS42L42_AUTO_HSBIAS_HIZ_MASK |
+ CS42L42_TIP_SENSE_EN_MASK |
+ CS42L42_HSBIAS_SENSE_TRIP_MASK,
+ (0 << CS42L42_HSBIAS_SENSE_EN_SHIFT) |
+ (0 << CS42L42_AUTO_HSBIAS_HIZ_SHIFT) |
+ (0 << CS42L42_TIP_SENSE_EN_SHIFT) |
+ (3 << CS42L42_HSBIAS_SENSE_TRIP_SHIFT));
+
+ /* Set hs detect to manual, disabled mode */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_HSDET_CTL2,
+ CS42L42_HSDET_CTRL_MASK |
+ CS42L42_HSDET_SET_MASK |
+ CS42L42_HSBIAS_REF_MASK |
+ CS42L42_HSDET_AUTO_TIME_MASK,
+ (0 << CS42L42_HSDET_CTRL_SHIFT) |
+ (2 << CS42L42_HSDET_SET_SHIFT) |
+ (0 << CS42L42_HSBIAS_REF_SHIFT) |
+ (3 << CS42L42_HSDET_AUTO_TIME_SHIFT));
+}
+
+static void cs42l42_handle_button_press(struct cs42l42_private *cs42l42)
+{
+ int bias_level;
+ unsigned int detect_status;
+
+ /* Mask button detect interrupts */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_DET_INT2_MASK,
+ CS42L42_M_DETECT_TF_MASK |
+ CS42L42_M_DETECT_FT_MASK |
+ CS42L42_M_HSBIAS_HIZ_MASK |
+ CS42L42_M_SHORT_RLS_MASK |
+ CS42L42_M_SHORT_DET_MASK,
+ (1 << CS42L42_M_DETECT_TF_SHIFT) |
+ (1 << CS42L42_M_DETECT_FT_SHIFT) |
+ (1 << CS42L42_M_HSBIAS_HIZ_SHIFT) |
+ (1 << CS42L42_M_SHORT_RLS_SHIFT) |
+ (1 << CS42L42_M_SHORT_DET_SHIFT));
+
+ usleep_range(cs42l42->btn_det_event_dbnce * 1000,
+ cs42l42->btn_det_event_dbnce * 2000);
+
+ /* Test all 4 level detect biases */
+ bias_level = 1;
+ do {
+ /* Adjust button detect level sensitivity */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_MIC_DET_CTL1,
+ CS42L42_LATCH_TO_VP_MASK |
+ CS42L42_EVENT_STAT_SEL_MASK |
+ CS42L42_HS_DET_LEVEL_MASK,
+ (1 << CS42L42_LATCH_TO_VP_SHIFT) |
+ (0 << CS42L42_EVENT_STAT_SEL_SHIFT) |
+ (cs42l42->bias_thresholds[bias_level] <<
+ CS42L42_HS_DET_LEVEL_SHIFT));
+
+ regmap_read(cs42l42->regmap, CS42L42_DET_STATUS2,
+ &detect_status);
+ } while ((detect_status & CS42L42_HS_TRUE_MASK) &&
+ (++bias_level < CS42L42_NUM_BIASES));
+
+ switch (bias_level) {
+ case 1: /* Function C button press */
+ dev_dbg(cs42l42->codec->dev, "Function C button press\n");
+ break;
+ case 2: /* Function B button press */
+ dev_dbg(cs42l42->codec->dev, "Function B button press\n");
+ break;
+ case 3: /* Function D button press */
+ dev_dbg(cs42l42->codec->dev, "Function D button press\n");
+ break;
+ case 4: /* Function A button press */
+ dev_dbg(cs42l42->codec->dev, "Function A button press\n");
+ break;
+ }
+
+ /* Set button detect level sensitivity back to default */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_MIC_DET_CTL1,
+ CS42L42_LATCH_TO_VP_MASK |
+ CS42L42_EVENT_STAT_SEL_MASK |
+ CS42L42_HS_DET_LEVEL_MASK,
+ (1 << CS42L42_LATCH_TO_VP_SHIFT) |
+ (0 << CS42L42_EVENT_STAT_SEL_SHIFT) |
+ (cs42l42->bias_thresholds[0] << CS42L42_HS_DET_LEVEL_SHIFT));
+
+ /* Clear any button interrupts before unmasking them */
+ regmap_read(cs42l42->regmap, CS42L42_DET_INT_STATUS2,
+ &detect_status);
+
+ /* Unmask button detect interrupts */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_DET_INT2_MASK,
+ CS42L42_M_DETECT_TF_MASK |
+ CS42L42_M_DETECT_FT_MASK |
+ CS42L42_M_HSBIAS_HIZ_MASK |
+ CS42L42_M_SHORT_RLS_MASK |
+ CS42L42_M_SHORT_DET_MASK,
+ (0 << CS42L42_M_DETECT_TF_SHIFT) |
+ (0 << CS42L42_M_DETECT_FT_SHIFT) |
+ (0 << CS42L42_M_HSBIAS_HIZ_SHIFT) |
+ (1 << CS42L42_M_SHORT_RLS_SHIFT) |
+ (1 << CS42L42_M_SHORT_DET_SHIFT));
+}
+
+struct cs42l42_irq_params {
+ u16 status_addr;
+ u16 mask_addr;
+ u8 mask;
+};
+
+static const struct cs42l42_irq_params irq_params_table[] = {
+ {CS42L42_ADC_OVFL_STATUS, CS42L42_ADC_OVFL_INT_MASK,
+ CS42L42_ADC_OVFL_VAL_MASK},
+ {CS42L42_MIXER_STATUS, CS42L42_MIXER_INT_MASK,
+ CS42L42_MIXER_VAL_MASK},
+ {CS42L42_SRC_STATUS, CS42L42_SRC_INT_MASK,
+ CS42L42_SRC_VAL_MASK},
+ {CS42L42_ASP_RX_STATUS, CS42L42_ASP_RX_INT_MASK,
+ CS42L42_ASP_RX_VAL_MASK},
+ {CS42L42_ASP_TX_STATUS, CS42L42_ASP_TX_INT_MASK,
+ CS42L42_ASP_TX_VAL_MASK},
+ {CS42L42_CODEC_STATUS, CS42L42_CODEC_INT_MASK,
+ CS42L42_CODEC_VAL_MASK},
+ {CS42L42_DET_INT_STATUS1, CS42L42_DET_INT1_MASK,
+ CS42L42_DET_INT_VAL1_MASK},
+ {CS42L42_DET_INT_STATUS2, CS42L42_DET_INT2_MASK,
+ CS42L42_DET_INT_VAL2_MASK},
+ {CS42L42_SRCPL_INT_STATUS, CS42L42_SRCPL_INT_MASK,
+ CS42L42_SRCPL_VAL_MASK},
+ {CS42L42_VPMON_STATUS, CS42L42_VPMON_INT_MASK,
+ CS42L42_VPMON_VAL_MASK},
+ {CS42L42_PLL_LOCK_STATUS, CS42L42_PLL_LOCK_INT_MASK,
+ CS42L42_PLL_LOCK_VAL_MASK},
+ {CS42L42_TSRS_PLUG_STATUS, CS42L42_TSRS_PLUG_INT_MASK,
+ CS42L42_TSRS_PLUG_VAL_MASK}
+};
+
+static irqreturn_t cs42l42_irq_thread(int irq, void *data)
+{
+ struct cs42l42_private *cs42l42 = (struct cs42l42_private *)data;
+ struct snd_soc_codec *codec = cs42l42->codec;
+ unsigned int stickies[12];
+ unsigned int masks[12];
+ unsigned int current_plug_status;
+ unsigned int current_button_status;
+ unsigned int i;
+
+ /* Read sticky registers to clear interurpt */
+ for (i = 0; i < ARRAY_SIZE(stickies); i++) {
+ regmap_read(cs42l42->regmap, irq_params_table[i].status_addr,
+ &(stickies[i]));
+ regmap_read(cs42l42->regmap, irq_params_table[i].mask_addr,
+ &(masks[i]));
+ stickies[i] = stickies[i] & (~masks[i]) &
+ irq_params_table[i].mask;
+ }
+
+ /* Read tip sense status before handling type detect */
+ current_plug_status = (stickies[11] &
+ (CS42L42_TS_PLUG_MASK | CS42L42_TS_UNPLUG_MASK)) >>
+ CS42L42_TS_PLUG_SHIFT;
+
+ /* Read button sense status */
+ current_button_status = stickies[7] &
+ (CS42L42_M_DETECT_TF_MASK |
+ CS42L42_M_DETECT_FT_MASK |
+ CS42L42_M_HSBIAS_HIZ_MASK);
+
+ /* Check auto-detect status */
+ if ((~masks[5]) & irq_params_table[5].mask) {
+ if (stickies[5] & CS42L42_HSDET_AUTO_DONE_MASK) {
+ cs42l42_process_hs_type_detect(cs42l42);
+ dev_dbg(codec->dev,
+ "Auto detect done (%d)\n",
+ cs42l42->hs_type);
+ }
+ }
+
+ /* Check tip sense status */
+ if ((~masks[11]) & irq_params_table[11].mask) {
+ switch (current_plug_status) {
+ case CS42L42_TS_PLUG:
+ if (cs42l42->plug_state != CS42L42_TS_PLUG) {
+ cs42l42->plug_state = CS42L42_TS_PLUG;
+ cs42l42_init_hs_type_detect(cs42l42);
+ }
+ break;
+
+ case CS42L42_TS_UNPLUG:
+ if (cs42l42->plug_state != CS42L42_TS_UNPLUG) {
+ cs42l42->plug_state = CS42L42_TS_UNPLUG;
+ cs42l42_cancel_hs_type_detect(cs42l42);
+ dev_dbg(codec->dev,
+ "Unplug event\n");
+ }
+ break;
+
+ default:
+ if (cs42l42->plug_state != CS42L42_TS_TRANS)
+ cs42l42->plug_state = CS42L42_TS_TRANS;
+ }
+ }
+
+ /* Check button detect status */
+ if ((~masks[7]) & irq_params_table[7].mask) {
+ if (!(current_button_status &
+ CS42L42_M_HSBIAS_HIZ_MASK)) {
+
+ if (current_button_status &
+ CS42L42_M_DETECT_TF_MASK) {
+ dev_dbg(codec->dev,
+ "Button released\n");
+ } else if (current_button_status &
+ CS42L42_M_DETECT_FT_MASK) {
+ cs42l42_handle_button_press(cs42l42);
+ }
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void cs42l42_set_interrupt_masks(struct cs42l42_private *cs42l42)
+{
+ regmap_update_bits(cs42l42->regmap, CS42L42_ADC_OVFL_INT_MASK,
+ CS42L42_ADC_OVFL_MASK,
+ (1 << CS42L42_ADC_OVFL_SHIFT));
+
+ regmap_update_bits(cs42l42->regmap, CS42L42_MIXER_INT_MASK,
+ CS42L42_MIX_CHB_OVFL_MASK |
+ CS42L42_MIX_CHA_OVFL_MASK |
+ CS42L42_EQ_OVFL_MASK |
+ CS42L42_EQ_BIQUAD_OVFL_MASK,
+ (1 << CS42L42_MIX_CHB_OVFL_SHIFT) |
+ (1 << CS42L42_MIX_CHA_OVFL_SHIFT) |
+ (1 << CS42L42_EQ_OVFL_SHIFT) |
+ (1 << CS42L42_EQ_BIQUAD_OVFL_SHIFT));
+
+ regmap_update_bits(cs42l42->regmap, CS42L42_SRC_INT_MASK,
+ CS42L42_SRC_ILK_MASK |
+ CS42L42_SRC_OLK_MASK |
+ CS42L42_SRC_IUNLK_MASK |
+ CS42L42_SRC_OUNLK_MASK,
+ (1 << CS42L42_SRC_ILK_SHIFT) |
+ (1 << CS42L42_SRC_OLK_SHIFT) |
+ (1 << CS42L42_SRC_IUNLK_SHIFT) |
+ (1 << CS42L42_SRC_OUNLK_SHIFT));
+
+ regmap_update_bits(cs42l42->regmap, CS42L42_ASP_RX_INT_MASK,
+ CS42L42_ASPRX_NOLRCK_MASK |
+ CS42L42_ASPRX_EARLY_MASK |
+ CS42L42_ASPRX_LATE_MASK |
+ CS42L42_ASPRX_ERROR_MASK |
+ CS42L42_ASPRX_OVLD_MASK,
+ (1 << CS42L42_ASPRX_NOLRCK_SHIFT) |
+ (1 << CS42L42_ASPRX_EARLY_SHIFT) |
+ (1 << CS42L42_ASPRX_LATE_SHIFT) |
+ (1 << CS42L42_ASPRX_ERROR_SHIFT) |
+ (1 << CS42L42_ASPRX_OVLD_SHIFT));
+
+ regmap_update_bits(cs42l42->regmap, CS42L42_ASP_TX_INT_MASK,
+ CS42L42_ASPTX_NOLRCK_MASK |
+ CS42L42_ASPTX_EARLY_MASK |
+ CS42L42_ASPTX_LATE_MASK |
+ CS42L42_ASPTX_SMERROR_MASK,
+ (1 << CS42L42_ASPTX_NOLRCK_SHIFT) |
+ (1 << CS42L42_ASPTX_EARLY_SHIFT) |
+ (1 << CS42L42_ASPTX_LATE_SHIFT) |
+ (1 << CS42L42_ASPTX_SMERROR_SHIFT));
+
+ regmap_update_bits(cs42l42->regmap, CS42L42_CODEC_INT_MASK,
+ CS42L42_PDN_DONE_MASK |
+ CS42L42_HSDET_AUTO_DONE_MASK,
+ (1 << CS42L42_PDN_DONE_SHIFT) |
+ (1 << CS42L42_HSDET_AUTO_DONE_SHIFT));
+
+ regmap_update_bits(cs42l42->regmap, CS42L42_SRCPL_INT_MASK,
+ CS42L42_SRCPL_ADC_LK_MASK |
+ CS42L42_SRCPL_DAC_LK_MASK |
+ CS42L42_SRCPL_ADC_UNLK_MASK |
+ CS42L42_SRCPL_DAC_UNLK_MASK,
+ (1 << CS42L42_SRCPL_ADC_LK_SHIFT) |
+ (1 << CS42L42_SRCPL_DAC_LK_SHIFT) |
+ (1 << CS42L42_SRCPL_ADC_UNLK_SHIFT) |
+ (1 << CS42L42_SRCPL_DAC_UNLK_SHIFT));
+
+ regmap_update_bits(cs42l42->regmap, CS42L42_DET_INT1_MASK,
+ CS42L42_TIP_SENSE_UNPLUG_MASK |
+ CS42L42_TIP_SENSE_PLUG_MASK |
+ CS42L42_HSBIAS_SENSE_MASK,
+ (1 << CS42L42_TIP_SENSE_UNPLUG_SHIFT) |
+ (1 << CS42L42_TIP_SENSE_PLUG_SHIFT) |
+ (1 << CS42L42_HSBIAS_SENSE_SHIFT));
+
+ regmap_update_bits(cs42l42->regmap, CS42L42_DET_INT2_MASK,
+ CS42L42_M_DETECT_TF_MASK |
+ CS42L42_M_DETECT_FT_MASK |
+ CS42L42_M_HSBIAS_HIZ_MASK |
+ CS42L42_M_SHORT_RLS_MASK |
+ CS42L42_M_SHORT_DET_MASK,
+ (1 << CS42L42_M_DETECT_TF_SHIFT) |
+ (1 << CS42L42_M_DETECT_FT_SHIFT) |
+ (1 << CS42L42_M_HSBIAS_HIZ_SHIFT) |
+ (1 << CS42L42_M_SHORT_RLS_SHIFT) |
+ (1 << CS42L42_M_SHORT_DET_SHIFT));
+
+ regmap_update_bits(cs42l42->regmap, CS42L42_VPMON_INT_MASK,
+ CS42L42_VPMON_MASK,
+ (1 << CS42L42_VPMON_SHIFT));
+
+ regmap_update_bits(cs42l42->regmap, CS42L42_PLL_LOCK_INT_MASK,
+ CS42L42_PLL_LOCK_MASK,
+ (1 << CS42L42_PLL_LOCK_SHIFT));
+
+ regmap_update_bits(cs42l42->regmap, CS42L42_TSRS_PLUG_INT_MASK,
+ CS42L42_RS_PLUG_MASK |
+ CS42L42_RS_UNPLUG_MASK |
+ CS42L42_TS_PLUG_MASK |
+ CS42L42_TS_UNPLUG_MASK,
+ (1 << CS42L42_RS_PLUG_SHIFT) |
+ (1 << CS42L42_RS_UNPLUG_SHIFT) |
+ (0 << CS42L42_TS_PLUG_SHIFT) |
+ (0 << CS42L42_TS_UNPLUG_SHIFT));
+}
+
+static void cs42l42_setup_hs_type_detect(struct cs42l42_private *cs42l42)
+{
+ unsigned int reg;
+
+ cs42l42->hs_type = CS42L42_PLUG_INVALID;
+
+ /* Latch analog controls to VP power domain */
+ regmap_update_bits(cs42l42->regmap, CS42L42_MIC_DET_CTL1,
+ CS42L42_LATCH_TO_VP_MASK |
+ CS42L42_EVENT_STAT_SEL_MASK |
+ CS42L42_HS_DET_LEVEL_MASK,
+ (1 << CS42L42_LATCH_TO_VP_SHIFT) |
+ (0 << CS42L42_EVENT_STAT_SEL_SHIFT) |
+ (cs42l42->bias_thresholds[0] <<
+ CS42L42_HS_DET_LEVEL_SHIFT));
+
+ /* Remove ground noise-suppression clamps */
+ regmap_update_bits(cs42l42->regmap,
+ CS42L42_HS_CLAMP_DISABLE,
+ CS42L42_HS_CLAMP_DISABLE_MASK,
+ (1 << CS42L42_HS_CLAMP_DISABLE_SHIFT));
+
+ /* Enable the tip sense circuit */
+ regmap_update_bits(cs42l42->regmap, CS42L42_TIPSENSE_CTL,
+ CS42L42_TIP_SENSE_CTRL_MASK |
+ CS42L42_TIP_SENSE_INV_MASK |
+ CS42L42_TIP_SENSE_DEBOUNCE_MASK,
+ (3 << CS42L42_TIP_SENSE_CTRL_SHIFT) |
+ (0 << CS42L42_TIP_SENSE_INV_SHIFT) |
+ (2 << CS42L42_TIP_SENSE_DEBOUNCE_SHIFT));
+
+ /* Save the initial status of the tip sense */
+ regmap_read(cs42l42->regmap,
+ CS42L42_TSRS_PLUG_STATUS,
+ &reg);
+ cs42l42->plug_state = (((char) reg) &
+ (CS42L42_TS_PLUG_MASK | CS42L42_TS_UNPLUG_MASK)) >>
+ CS42L42_TS_PLUG_SHIFT;
+}
+
+static const unsigned int threshold_defaults[] = {
+ CS42L42_HS_DET_LEVEL_15,
+ CS42L42_HS_DET_LEVEL_8,
+ CS42L42_HS_DET_LEVEL_4,
+ CS42L42_HS_DET_LEVEL_1
+};
+
+static int cs42l42_handle_device_data(struct i2c_client *i2c_client,
+ struct cs42l42_private *cs42l42)
+{
+ struct device_node *np = i2c_client->dev.of_node;
+ unsigned int val;
+ unsigned int thresholds[CS42L42_NUM_BIASES];
+ int ret;
+ int i;
+
+ ret = of_property_read_u32(np, "cirrus,ts-inv", &val);
+
+ if (!ret) {
+ switch (val) {
+ case CS42L42_TS_INV_EN:
+ case CS42L42_TS_INV_DIS:
+ cs42l42->ts_inv = val;
+ break;
+ default:
+ dev_err(&i2c_client->dev,
+ "Wrong cirrus,ts-inv DT value %d\n",
+ val);
+ cs42l42->ts_inv = CS42L42_TS_INV_DIS;
+ }
+ } else {
+ cs42l42->ts_inv = CS42L42_TS_INV_DIS;
+ }
+
+ regmap_update_bits(cs42l42->regmap, CS42L42_TSENSE_CTL,
+ CS42L42_TS_INV_MASK,
+ (cs42l42->ts_inv << CS42L42_TS_INV_SHIFT));
+
+ ret = of_property_read_u32(np, "cirrus,ts-dbnc-rise", &val);
+
+ if (!ret) {
+ switch (val) {
+ case CS42L42_TS_DBNCE_0:
+ case CS42L42_TS_DBNCE_125:
+ case CS42L42_TS_DBNCE_250:
+ case CS42L42_TS_DBNCE_500:
+ case CS42L42_TS_DBNCE_750:
+ case CS42L42_TS_DBNCE_1000:
+ case CS42L42_TS_DBNCE_1250:
+ case CS42L42_TS_DBNCE_1500:
+ cs42l42->ts_dbnc_rise = val;
+ break;
+ default:
+ dev_err(&i2c_client->dev,
+ "Wrong cirrus,ts-dbnc-rise DT value %d\n",
+ val);
+ cs42l42->ts_dbnc_rise = CS42L42_TS_DBNCE_1000;
+ }
+ } else {
+ cs42l42->ts_dbnc_rise = CS42L42_TS_DBNCE_1000;
+ }
+
+ regmap_update_bits(cs42l42->regmap, CS42L42_TSENSE_CTL,
+ CS42L42_TS_RISE_DBNCE_TIME_MASK,
+ (cs42l42->ts_dbnc_rise <<
+ CS42L42_TS_RISE_DBNCE_TIME_SHIFT));
+
+ ret = of_property_read_u32(np, "cirrus,ts-dbnc-fall", &val);
+
+ if (!ret) {
+ switch (val) {
+ case CS42L42_TS_DBNCE_0:
+ case CS42L42_TS_DBNCE_125:
+ case CS42L42_TS_DBNCE_250:
+ case CS42L42_TS_DBNCE_500:
+ case CS42L42_TS_DBNCE_750:
+ case CS42L42_TS_DBNCE_1000:
+ case CS42L42_TS_DBNCE_1250:
+ case CS42L42_TS_DBNCE_1500:
+ cs42l42->ts_dbnc_fall = val;
+ break;
+ default:
+ dev_err(&i2c_client->dev,
+ "Wrong cirrus,ts-dbnc-fall DT value %d\n",
+ val);
+ cs42l42->ts_dbnc_fall = CS42L42_TS_DBNCE_0;
+ }
+ } else {
+ cs42l42->ts_dbnc_fall = CS42L42_TS_DBNCE_0;
+ }
+
+ regmap_update_bits(cs42l42->regmap, CS42L42_TSENSE_CTL,
+ CS42L42_TS_FALL_DBNCE_TIME_MASK,
+ (cs42l42->ts_dbnc_fall <<
+ CS42L42_TS_FALL_DBNCE_TIME_SHIFT));
+
+ ret = of_property_read_u32(np, "cirrus,btn-det-init-dbnce", &val);
+
+ if (!ret) {
+ if ((val >= CS42L42_BTN_DET_INIT_DBNCE_MIN) &&
+ (val <= CS42L42_BTN_DET_INIT_DBNCE_MAX))
+ cs42l42->btn_det_init_dbnce = val;
+ else {
+ dev_err(&i2c_client->dev,
+ "Wrong cirrus,btn-det-init-dbnce DT value %d\n",
+ val);
+ cs42l42->btn_det_init_dbnce =
+ CS42L42_BTN_DET_INIT_DBNCE_DEFAULT;
+ }
+ } else {
+ cs42l42->btn_det_init_dbnce =
+ CS42L42_BTN_DET_INIT_DBNCE_DEFAULT;
+ }
+
+ ret = of_property_read_u32(np, "cirrus,btn-det-event-dbnce", &val);
+
+ if (!ret) {
+ if ((val >= CS42L42_BTN_DET_EVENT_DBNCE_MIN) &&
+ (val <= CS42L42_BTN_DET_EVENT_DBNCE_MAX))
+ cs42l42->btn_det_event_dbnce = val;
+ else {
+ dev_err(&i2c_client->dev,
+ "Wrong cirrus,btn-det-event-dbnce DT value %d\n", val);
+ cs42l42->btn_det_event_dbnce =
+ CS42L42_BTN_DET_EVENT_DBNCE_DEFAULT;
+ }
+ } else {
+ cs42l42->btn_det_event_dbnce =
+ CS42L42_BTN_DET_EVENT_DBNCE_DEFAULT;
+ }
+
+ ret = of_property_read_u32_array(np, "cirrus,bias-lvls",
+ (u32 *)thresholds, CS42L42_NUM_BIASES);
+
+ if (!ret) {
+ for (i = 0; i < CS42L42_NUM_BIASES; i++) {
+ if ((thresholds[i] >= CS42L42_HS_DET_LEVEL_MIN) &&
+ (thresholds[i] <= CS42L42_HS_DET_LEVEL_MAX))
+ cs42l42->bias_thresholds[i] = thresholds[i];
+ else {
+ dev_err(&i2c_client->dev,
+ "Wrong cirrus,bias-lvls[%d] DT value %d\n", i,
+ thresholds[i]);
+ cs42l42->bias_thresholds[i] =
+ threshold_defaults[i];
+ }
+ }
+ } else {
+ for (i = 0; i < CS42L42_NUM_BIASES; i++)
+ cs42l42->bias_thresholds[i] = threshold_defaults[i];
+ }
+
+ ret = of_property_read_u32(np, "cirrus,hs-bias-ramp-rate", &val);
+
+ if (!ret) {
+ switch (val) {
+ case CS42L42_HSBIAS_RAMP_FAST_RISE_SLOW_FALL:
+ cs42l42->hs_bias_ramp_rate = val;
+ cs42l42->hs_bias_ramp_time = CS42L42_HSBIAS_RAMP_TIME0;
+ break;
+ case CS42L42_HSBIAS_RAMP_FAST:
+ cs42l42->hs_bias_ramp_rate = val;
+ cs42l42->hs_bias_ramp_time = CS42L42_HSBIAS_RAMP_TIME1;
+ break;
+ case CS42L42_HSBIAS_RAMP_SLOW:
+ cs42l42->hs_bias_ramp_rate = val;
+ cs42l42->hs_bias_ramp_time = CS42L42_HSBIAS_RAMP_TIME2;
+ break;
+ case CS42L42_HSBIAS_RAMP_SLOWEST:
+ cs42l42->hs_bias_ramp_rate = val;
+ cs42l42->hs_bias_ramp_time = CS42L42_HSBIAS_RAMP_TIME3;
+ break;
+ default:
+ dev_err(&i2c_client->dev,
+ "Wrong cirrus,hs-bias-ramp-rate DT value %d\n",
+ val);
+ cs42l42->hs_bias_ramp_rate = CS42L42_HSBIAS_RAMP_SLOW;
+ cs42l42->hs_bias_ramp_time = CS42L42_HSBIAS_RAMP_TIME2;
+ }
+ } else {
+ cs42l42->hs_bias_ramp_rate = CS42L42_HSBIAS_RAMP_SLOW;
+ cs42l42->hs_bias_ramp_time = CS42L42_HSBIAS_RAMP_TIME2;
+ }
+
+ regmap_update_bits(cs42l42->regmap, CS42L42_HS_BIAS_CTL,
+ CS42L42_HSBIAS_RAMP_MASK,
+ (cs42l42->hs_bias_ramp_rate <<
+ CS42L42_HSBIAS_RAMP_SHIFT));
+
+ return 0;
+}
+
+static int cs42l42_i2c_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id)
+{
+ struct cs42l42_private *cs42l42;
+ int ret, i;
+ unsigned int devid = 0;
+ unsigned int reg;
+
+ cs42l42 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs42l42_private),
+ GFP_KERNEL);
+ if (!cs42l42)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c_client, cs42l42);
+
+ cs42l42->regmap = devm_regmap_init_i2c(i2c_client, &cs42l42_regmap);
+ if (IS_ERR(cs42l42->regmap)) {
+ ret = PTR_ERR(cs42l42->regmap);
+ dev_err(&i2c_client->dev, "regmap_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cs42l42->supplies); i++)
+ cs42l42->supplies[i].supply = cs42l42_supply_names[i];
+
+ ret = devm_regulator_bulk_get(&i2c_client->dev,
+ ARRAY_SIZE(cs42l42->supplies),
+ cs42l42->supplies);
+ if (ret != 0) {
+ dev_err(&i2c_client->dev,
+ "Failed to request supplies: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(cs42l42->supplies),
+ cs42l42->supplies);
+ if (ret != 0) {
+ dev_err(&i2c_client->dev,
+ "Failed to enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ /* Reset the Device */
+ cs42l42->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(cs42l42->reset_gpio))
+ return PTR_ERR(cs42l42->reset_gpio);
+
+ if (cs42l42->reset_gpio) {
+ dev_dbg(&i2c_client->dev, "Found reset GPIO\n");
+ gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
+ }
+ mdelay(3);
+
+ /* Request IRQ */
+ ret = devm_request_threaded_irq(&i2c_client->dev,
+ i2c_client->irq,
+ NULL, cs42l42_irq_thread,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ "cs42l42", cs42l42);
+
+ if (ret != 0)
+ dev_err(&i2c_client->dev,
+ "Failed to request IRQ: %d\n", ret);
+
+ /* initialize codec */
+ ret = regmap_read(cs42l42->regmap, CS42L42_DEVID_AB, &reg);
+ devid = (reg & 0xFF) << 12;
+
+ ret = regmap_read(cs42l42->regmap, CS42L42_DEVID_CD, &reg);
+ devid |= (reg & 0xFF) << 4;
+
+ ret = regmap_read(cs42l42->regmap, CS42L42_DEVID_E, &reg);
+ devid |= (reg & 0xF0) >> 4;
+
+ if (devid != CS42L42_CHIP_ID) {
+ ret = -ENODEV;
+ dev_err(&i2c_client->dev,
+ "CS42L42 Device ID (%X). Expected %X\n",
+ devid, CS42L42_CHIP_ID);
+ return ret;
+ }
+
+ ret = regmap_read(cs42l42->regmap, CS42L42_REVID, &reg);
+ if (ret < 0) {
+ dev_err(&i2c_client->dev, "Get Revision ID failed\n");
+ return ret;
+ }
+
+ dev_info(&i2c_client->dev,
+ "Cirrus Logic CS42L42, Revision: %02X\n", reg & 0xFF);
+
+ /* Power up the codec */
+ regmap_update_bits(cs42l42->regmap, CS42L42_PWR_CTL1,
+ CS42L42_ASP_DAO_PDN_MASK |
+ CS42L42_ASP_DAI_PDN_MASK |
+ CS42L42_MIXER_PDN_MASK |
+ CS42L42_EQ_PDN_MASK |
+ CS42L42_HP_PDN_MASK |
+ CS42L42_ADC_PDN_MASK |
+ CS42L42_PDN_ALL_MASK,
+ (1 << CS42L42_ASP_DAO_PDN_SHIFT) |
+ (1 << CS42L42_ASP_DAI_PDN_SHIFT) |
+ (1 << CS42L42_MIXER_PDN_SHIFT) |
+ (1 << CS42L42_EQ_PDN_SHIFT) |
+ (1 << CS42L42_HP_PDN_SHIFT) |
+ (1 << CS42L42_ADC_PDN_SHIFT) |
+ (0 << CS42L42_PDN_ALL_SHIFT));
+
+ if (i2c_client->dev.of_node) {
+ ret = cs42l42_handle_device_data(i2c_client, cs42l42);
+ if (ret != 0)
+ return ret;
+ }
+
+ /* Setup headset detection */
+ cs42l42_setup_hs_type_detect(cs42l42);
+
+ /* Mask/Unmask Interrupts */
+ cs42l42_set_interrupt_masks(cs42l42);
+
+ /* Register codec for machine driver */
+ ret = snd_soc_register_codec(&i2c_client->dev,
+ &soc_codec_dev_cs42l42, &cs42l42_dai, 1);
+ if (ret < 0)
+ goto err_disable;
+ return 0;
+
+err_disable:
+ regulator_bulk_disable(ARRAY_SIZE(cs42l42->supplies),
+ cs42l42->supplies);
+ return ret;
+}
+
+static int cs42l42_i2c_remove(struct i2c_client *i2c_client)
+{
+ struct cs42l42_private *cs42l42 = i2c_get_clientdata(i2c_client);
+
+ snd_soc_unregister_codec(&i2c_client->dev);
+
+ /* Hold down reset */
+ if (cs42l42->reset_gpio)
+ gpiod_set_value_cansleep(cs42l42->reset_gpio, 0);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int cs42l42_runtime_suspend(struct device *dev)
+{
+ struct cs42l42_private *cs42l42 = dev_get_drvdata(dev);
+
+ regcache_cache_only(cs42l42->regmap, true);
+ regcache_mark_dirty(cs42l42->regmap);
+
+ /* Hold down reset */
+ if (cs42l42->reset_gpio)
+ gpiod_set_value_cansleep(cs42l42->reset_gpio, 0);
+
+ /* remove power */
+ regulator_bulk_disable(ARRAY_SIZE(cs42l42->supplies),
+ cs42l42->supplies);
+
+ return 0;
+}
+
+static int cs42l42_runtime_resume(struct device *dev)
+{
+ struct cs42l42_private *cs42l42 = dev_get_drvdata(dev);
+ int ret;
+
+ /* Enable power */
+ ret = regulator_bulk_enable(ARRAY_SIZE(cs42l42->supplies),
+ cs42l42->supplies);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (cs42l42->reset_gpio)
+ gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
+
+ regcache_cache_only(cs42l42->regmap, false);
+ regcache_sync(cs42l42->regmap);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops cs42l42_runtime_pm = {
+ SET_RUNTIME_PM_OPS(cs42l42_runtime_suspend, cs42l42_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id cs42l42_of_match[] = {
+ { .compatible = "cirrus,cs42l42", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cs42l42_of_match);
+
+
+static const struct i2c_device_id cs42l42_id[] = {
+ {"cs42l42", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, cs42l42_id);
+
+static struct i2c_driver cs42l42_i2c_driver = {
+ .driver = {
+ .name = "cs42l42",
+ .pm = &cs42l42_runtime_pm,
+ .of_match_table = cs42l42_of_match,
+ },
+ .id_table = cs42l42_id,
+ .probe = cs42l42_i2c_probe,
+ .remove = cs42l42_i2c_remove,
+};
+
+module_i2c_driver(cs42l42_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC CS42L42 driver");
+MODULE_AUTHOR("James Schulman, Cirrus Logic Inc, <james.schulman@cirrus.com>");
+MODULE_AUTHOR("Brian Austin, Cirrus Logic Inc, <brian.austin@cirrus.com>");
+MODULE_AUTHOR("Michael White, Cirrus Logic Inc, <michael.white@cirrus.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs42l42.h b/sound/soc/codecs/cs42l42.h
new file mode 100644
index 000000000000..d87a0a5322d5
--- /dev/null
+++ b/sound/soc/codecs/cs42l42.h
@@ -0,0 +1,776 @@
+/*
+ * cs42l42.h -- CS42L42 ALSA SoC audio driver header
+ *
+ * Copyright 2016 Cirrus Logic, Inc.
+ *
+ * Author: James Schulman <james.schulman@cirrus.com>
+ * Author: Brian Austin <brian.austin@cirrus.com>
+ * Author: Michael White <michael.white@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __CS42L42_H__
+#define __CS42L42_H__
+
+#define CS42L42_PAGE_REGISTER 0x00 /* Page Select Register */
+#define CS42L42_WIN_START 0x00
+#define CS42L42_WIN_LEN 0x100
+#define CS42L42_RANGE_MIN 0x00
+#define CS42L42_RANGE_MAX 0x7F
+
+#define CS42L42_PAGE_10 0x1000
+#define CS42L42_PAGE_11 0x1100
+#define CS42L42_PAGE_12 0x1200
+#define CS42L42_PAGE_13 0x1300
+#define CS42L42_PAGE_15 0x1500
+#define CS42L42_PAGE_19 0x1900
+#define CS42L42_PAGE_1B 0x1B00
+#define CS42L42_PAGE_1C 0x1C00
+#define CS42L42_PAGE_1D 0x1D00
+#define CS42L42_PAGE_1F 0x1F00
+#define CS42L42_PAGE_20 0x2000
+#define CS42L42_PAGE_21 0x2100
+#define CS42L42_PAGE_23 0x2300
+#define CS42L42_PAGE_24 0x2400
+#define CS42L42_PAGE_25 0x2500
+#define CS42L42_PAGE_26 0x2600
+#define CS42L42_PAGE_28 0x2800
+#define CS42L42_PAGE_29 0x2900
+#define CS42L42_PAGE_2A 0x2A00
+#define CS42L42_PAGE_30 0x3000
+
+#define CS42L42_CHIP_ID 0x42A42
+
+/* Page 0x10 Global Registers */
+#define CS42L42_DEVID_AB (CS42L42_PAGE_10 + 0x01)
+#define CS42L42_DEVID_CD (CS42L42_PAGE_10 + 0x02)
+#define CS42L42_DEVID_E (CS42L42_PAGE_10 + 0x03)
+#define CS42L42_FABID (CS42L42_PAGE_10 + 0x04)
+#define CS42L42_REVID (CS42L42_PAGE_10 + 0x05)
+#define CS42L42_FRZ_CTL (CS42L42_PAGE_10 + 0x06)
+
+#define CS42L42_SRC_CTL (CS42L42_PAGE_10 + 0x07)
+#define CS42L42_SRC_BYPASS_DAC_SHIFT 1
+#define CS42L42_SRC_BYPASS_DAC_MASK (1 << CS42L42_SRC_BYPASS_DAC_SHIFT)
+
+#define CS42L42_MCLK_STATUS (CS42L42_PAGE_10 + 0x08)
+
+#define CS42L42_MCLK_CTL (CS42L42_PAGE_10 + 0x09)
+#define CS42L42_INTERNAL_FS_SHIFT 1
+#define CS42L42_INTERNAL_FS_MASK (1 << CS42L42_INTERNAL_FS_SHIFT)
+
+#define CS42L42_SFTRAMP_RATE (CS42L42_PAGE_10 + 0x0A)
+#define CS42L42_I2C_DEBOUNCE (CS42L42_PAGE_10 + 0x0E)
+#define CS42L42_I2C_STRETCH (CS42L42_PAGE_10 + 0x0F)
+#define CS42L42_I2C_TIMEOUT (CS42L42_PAGE_10 + 0x10)
+
+/* Page 0x11 Power and Headset Detect Registers */
+#define CS42L42_PWR_CTL1 (CS42L42_PAGE_11 + 0x01)
+#define CS42L42_ASP_DAO_PDN_SHIFT 7
+#define CS42L42_ASP_DAO_PDN_MASK (1 << CS42L42_ASP_DAO_PDN_SHIFT)
+#define CS42L42_ASP_DAI_PDN_SHIFT 6
+#define CS42L42_ASP_DAI_PDN_MASK (1 << CS42L42_ASP_DAI_PDN_SHIFT)
+#define CS42L42_MIXER_PDN_SHIFT 5
+#define CS42L42_MIXER_PDN_MASK (1 << CS42L42_MIXER_PDN_SHIFT)
+#define CS42L42_EQ_PDN_SHIFT 4
+#define CS42L42_EQ_PDN_MASK (1 << CS42L42_EQ_PDN_SHIFT)
+#define CS42L42_HP_PDN_SHIFT 3
+#define CS42L42_HP_PDN_MASK (1 << CS42L42_HP_PDN_SHIFT)
+#define CS42L42_ADC_PDN_SHIFT 2
+#define CS42L42_ADC_PDN_MASK (1 << CS42L42_HP_PDN_SHIFT)
+#define CS42L42_PDN_ALL_SHIFT 0
+#define CS42L42_PDN_ALL_MASK (1 << CS42L42_PDN_ALL_SHIFT)
+
+#define CS42L42_PWR_CTL2 (CS42L42_PAGE_11 + 0x02)
+#define CS42L42_ADC_SRC_PDNB_SHIFT 0
+#define CS42L42_ADC_SRC_PDNB_MASK (1 << CS42L42_ADC_SRC_PDNB_SHIFT)
+#define CS42L42_DAC_SRC_PDNB_SHIFT 1
+#define CS42L42_DAC_SRC_PDNB_MASK (1 << CS42L42_DAC_SRC_PDNB_SHIFT)
+#define CS42L42_ASP_DAI1_PDN_SHIFT 2
+#define CS42L42_ASP_DAI1_PDN_MASK (1 << CS42L42_ASP_DAI1_PDN_SHIFT)
+#define CS42L42_SRC_PDN_OVERRIDE_SHIFT 3
+#define CS42L42_SRC_PDN_OVERRIDE_MASK (1 << CS42L42_SRC_PDN_OVERRIDE_SHIFT)
+#define CS42L42_DISCHARGE_FILT_SHIFT 4
+#define CS42L42_DISCHARGE_FILT_MASK (1 << CS42L42_DISCHARGE_FILT_SHIFT)
+
+#define CS42L42_PWR_CTL3 (CS42L42_PAGE_11 + 0x03)
+#define CS42L42_RING_SENSE_PDNB_SHIFT 1
+#define CS42L42_RING_SENSE_PDNB_MASK (1 << \
+ CS42L42_RING_SENSE_PDNB_SHIFT)
+#define CS42L42_VPMON_PDNB_SHIFT 2
+#define CS42L42_VPMON_PDNB_MASK (1 << \
+ CS42L42_VPMON_PDNB_SHIFT)
+#define CS42L42_SW_CLK_STP_STAT_SEL_SHIFT 5
+#define CS42L42_SW_CLK_STP_STAT_SEL_MASK (3 << \
+ CS42L42_SW_CLK_STP_STAT_SEL_SHIFT)
+
+#define CS42L42_RSENSE_CTL1 (CS42L42_PAGE_11 + 0x04)
+#define CS42L42_RS_TRIM_R_SHIFT 0
+#define CS42L42_RS_TRIM_R_MASK (1 << \
+ CS42L42_RS_TRIM_R_SHIFT)
+#define CS42L42_RS_TRIM_T_SHIFT 1
+#define CS42L42_RS_TRIM_T_MASK (1 << \
+ CS42L42_RS_TRIM_T_SHIFT)
+#define CS42L42_HPREF_RS_SHIFT 2
+#define CS42L42_HPREF_RS_MASK (1 << \
+ CS42L42_HPREF_RS_SHIFT)
+#define CS42L42_HSBIAS_FILT_REF_RS_SHIFT 3
+#define CS42L42_HSBIAS_FILT_REF_RS_MASK (1 << \
+ CS42L42_HSBIAS_FILT_REF_RS_SHIFT)
+#define CS42L42_RING_SENSE_PU_HIZ_SHIFT 6
+#define CS42L42_RING_SENSE_PU_HIZ_MASK (1 << \
+ CS42L42_RING_SENSE_PU_HIZ_SHIFT)
+
+#define CS42L42_RSENSE_CTL2 (CS42L42_PAGE_11 + 0x05)
+#define CS42L42_TS_RS_GATE_SHIFT 7
+#define CS42L42_TS_RS_GATE_MAS (1 << CS42L42_TS_RS_GATE_SHIFT)
+
+#define CS42L42_OSC_SWITCH (CS42L42_PAGE_11 + 0x07)
+#define CS42L42_SCLK_PRESENT_SHIFT 0
+#define CS42L42_SCLK_PRESENT_MASK (1 << CS42L42_SCLK_PRESENT_SHIFT)
+
+#define CS42L42_OSC_SWITCH_STATUS (CS42L42_PAGE_11 + 0x09)
+#define CS42L42_OSC_SW_SEL_STAT_SHIFT 0
+#define CS42L42_OSC_SW_SEL_STAT_MASK (3 << CS42L42_OSC_SW_SEL_STAT_SHIFT)
+#define CS42L42_OSC_PDNB_STAT_SHIFT 2
+#define CS42L42_OSC_PDNB_STAT_MASK (1 << CS42L42_OSC_SW_SEL_STAT_SHIFT)
+
+#define CS42L42_RSENSE_CTL3 (CS42L42_PAGE_11 + 0x12)
+#define CS42L42_RS_RISE_DBNCE_TIME_SHIFT 0
+#define CS42L42_RS_RISE_DBNCE_TIME_MASK (7 << \
+ CS42L42_RS_RISE_DBNCE_TIME_SHIFT)
+#define CS42L42_RS_FALL_DBNCE_TIME_SHIFT 3
+#define CS42L42_RS_FALL_DBNCE_TIME_MASK (7 << \
+ CS42L42_RS_FALL_DBNCE_TIME_SHIFT)
+#define CS42L42_RS_PU_EN_SHIFT 6
+#define CS42L42_RS_PU_EN_MASK (1 << \
+ CS42L42_RS_PU_EN_SHIFT)
+#define CS42L42_RS_INV_SHIFT 7
+#define CS42L42_RS_INV_MASK (1 << \
+ CS42L42_RS_INV_SHIFT)
+
+#define CS42L42_TSENSE_CTL (CS42L42_PAGE_11 + 0x13)
+#define CS42L42_TS_RISE_DBNCE_TIME_SHIFT 0
+#define CS42L42_TS_RISE_DBNCE_TIME_MASK (7 << \
+ CS42L42_TS_RISE_DBNCE_TIME_SHIFT)
+#define CS42L42_TS_FALL_DBNCE_TIME_SHIFT 3
+#define CS42L42_TS_FALL_DBNCE_TIME_MASK (7 << \
+ CS42L42_TS_FALL_DBNCE_TIME_SHIFT)
+#define CS42L42_TS_INV_SHIFT 7
+#define CS42L42_TS_INV_MASK (1 << \
+ CS42L42_TS_INV_SHIFT)
+
+#define CS42L42_TSRS_INT_DISABLE (CS42L42_PAGE_11 + 0x14)
+#define CS42L42_D_RS_PLUG_DBNC_SHIFT 0
+#define CS42L42_D_RS_PLUG_DBNC_MASK (1 << CS42L42_D_RS_PLUG_DBNC_SHIFT)
+#define CS42L42_D_RS_UNPLUG_DBNC_SHIFT 1
+#define CS42L42_D_RS_UNPLUG_DBNC_MASK (1 << CS42L42_D_RS_UNPLUG_DBNC_SHIFT)
+#define CS42L42_D_TS_PLUG_DBNC_SHIFT 2
+#define CS42L42_D_TS_PLUG_DBNC_MASK (1 << CS42L42_D_TS_PLUG_DBNC_SHIFT)
+#define CS42L42_D_TS_UNPLUG_DBNC_SHIFT 3
+#define CS42L42_D_TS_UNPLUG_DBNC_MASK (1 << CS42L42_D_TS_UNPLUG_DBNC_SHIFT)
+
+#define CS42L42_TRSENSE_STATUS (CS42L42_PAGE_11 + 0x15)
+#define CS42L42_RS_PLUG_DBNC_SHIFT 0
+#define CS42L42_RS_PLUG_DBNC_MASK (1 << CS42L42_RS_PLUG_DBNC_SHIFT)
+#define CS42L42_RS_UNPLUG_DBNC_SHIFT 1
+#define CS42L42_RS_UNPLUG_DBNC_MASK (1 << CS42L42_RS_UNPLUG_DBNC_SHIFT)
+#define CS42L42_TS_PLUG_DBNC_SHIFT 2
+#define CS42L42_TS_PLUG_DBNC_MASK (1 << CS42L42_TS_PLUG_DBNC_SHIFT)
+#define CS42L42_TS_UNPLUG_DBNC_SHIFT 3
+#define CS42L42_TS_UNPLUG_DBNC_MASK (1 << CS42L42_TS_UNPLUG_DBNC_SHIFT)
+
+#define CS42L42_HSDET_CTL1 (CS42L42_PAGE_11 + 0x1F)
+#define CS42L42_HSDET_COMP1_LVL_SHIFT 0
+#define CS42L42_HSDET_COMP1_LVL_MASK (15 << CS42L42_HSDET_COMP1_LVL_SHIFT)
+#define CS42L42_HSDET_COMP2_LVL_SHIFT 4
+#define CS42L42_HSDET_COMP2_LVL_MASK (15 << CS42L42_HSDET_COMP2_LVL_SHIFT)
+
+#define CS42L42_HSDET_CTL2 (CS42L42_PAGE_11 + 0x20)
+#define CS42L42_HSDET_AUTO_TIME_SHIFT 0
+#define CS42L42_HSDET_AUTO_TIME_MASK (3 << CS42L42_HSDET_AUTO_TIME_SHIFT)
+#define CS42L42_HSBIAS_REF_SHIFT 3
+#define CS42L42_HSBIAS_REF_MASK (1 << CS42L42_HSBIAS_REF_SHIFT)
+#define CS42L42_HSDET_SET_SHIFT 4
+#define CS42L42_HSDET_SET_MASK (3 << CS42L42_HSDET_SET_SHIFT)
+#define CS42L42_HSDET_CTRL_SHIFT 6
+#define CS42L42_HSDET_CTRL_MASK (3 << CS42L42_HSDET_CTRL_SHIFT)
+
+#define CS42L42_HS_SWITCH_CTL (CS42L42_PAGE_11 + 0x21)
+#define CS42L42_SW_GNDHS_HS4_SHIFT 0
+#define CS42L42_SW_GNDHS_HS4_MASK (1 << CS42L42_SW_GNDHS_HS4_SHIFT)
+#define CS42L42_SW_GNDHS_HS3_SHIFT 1
+#define CS42L42_SW_GNDHS_HS3_MASK (1 << CS42L42_SW_GNDHS_HS3_SHIFT)
+#define CS42L42_SW_HSB_HS4_SHIFT 2
+#define CS42L42_SW_HSB_HS4_MASK (1 << CS42L42_SW_HSB_HS4_SHIFT)
+#define CS42L42_SW_HSB_HS3_SHIFT 3
+#define CS42L42_SW_HSB_HS3_MASK (1 << CS42L42_SW_HSB_HS3_SHIFT)
+#define CS42L42_SW_HSB_FILT_HS4_SHIFT 4
+#define CS42L42_SW_HSB_FILT_HS4_MASK (1 << CS42L42_SW_HSB_FILT_HS4_SHIFT)
+#define CS42L42_SW_HSB_FILT_HS3_SHIFT 5
+#define CS42L42_SW_HSB_FILT_HS3_MASK (1 << CS42L42_SW_HSB_FILT_HS3_SHIFT)
+#define CS42L42_SW_REF_HS4_SHIFT 6
+#define CS42L42_SW_REF_HS4_MASK (1 << CS42L42_SW_REF_HS4_SHIFT)
+#define CS42L42_SW_REF_HS3_SHIFT 7
+#define CS42L42_SW_REF_HS3_MASK (1 << CS42L42_SW_REF_HS3_SHIFT)
+
+#define CS42L42_HS_DET_STATUS (CS42L42_PAGE_11 + 0x24)
+#define CS42L42_HSDET_TYPE_SHIFT 0
+#define CS42L42_HSDET_TYPE_MASK (3 << CS42L42_HSDET_TYPE_SHIFT)
+#define CS42L42_HSDET_COMP1_OUT_SHIFT 6
+#define CS42L42_HSDET_COMP1_OUT_MASK (1 << CS42L42_HSDET_COMP1_OUT_SHIFT)
+#define CS42L42_HSDET_COMP2_OUT_SHIFT 7
+#define CS42L42_HSDET_COMP2_OUT_MASK (1 << CS42L42_HSDET_COMP2_OUT_SHIFT)
+#define CS42L42_PLUG_CTIA 0
+#define CS42L42_PLUG_OMTP 1
+#define CS42L42_PLUG_HEADPHONE 2
+#define CS42L42_PLUG_INVALID 3
+
+#define CS42L42_HS_CLAMP_DISABLE (CS42L42_PAGE_11 + 0x29)
+#define CS42L42_HS_CLAMP_DISABLE_SHIFT 0
+#define CS42L42_HS_CLAMP_DISABLE_MASK (1 << CS42L42_HS_CLAMP_DISABLE_SHIFT)
+
+/* Page 0x12 Clocking Registers */
+#define CS42L42_MCLK_SRC_SEL (CS42L42_PAGE_12 + 0x01)
+#define CS42L42_MCLKDIV_SHIFT 1
+#define CS42L42_MCLKDIV_MASK (1 << CS42L42_MCLKDIV_SHIFT)
+#define CS42L42_MCLK_SRC_SEL_SHIFT 0
+#define CS42L42_MCLK_SRC_SEL_MASK (1 << CS42L42_MCLK_SRC_SEL_SHIFT)
+
+#define CS42L42_SPDIF_CLK_CFG (CS42L42_PAGE_12 + 0x02)
+#define CS42L42_FSYNC_PW_LOWER (CS42L42_PAGE_12 + 0x03)
+
+#define CS42L42_FSYNC_PW_UPPER (CS42L42_PAGE_12 + 0x04)
+#define CS42L42_FSYNC_PULSE_WIDTH_SHIFT 0
+#define CS42L42_FSYNC_PULSE_WIDTH_MASK (0xff << \
+ CS42L42_FSYNC_PULSE_WIDTH_SHIFT)
+
+#define CS42L42_FSYNC_P_LOWER (CS42L42_PAGE_12 + 0x05)
+
+#define CS42L42_FSYNC_P_UPPER (CS42L42_PAGE_12 + 0x06)
+#define CS42L42_FSYNC_PERIOD_SHIFT 0
+#define CS42L42_FSYNC_PERIOD_MASK (0xff << CS42L42_FSYNC_PERIOD_SHIFT)
+
+#define CS42L42_ASP_CLK_CFG (CS42L42_PAGE_12 + 0x07)
+#define CS42L42_ASP_SCLK_EN_SHIFT 5
+#define CS42L42_ASP_SCLK_EN_MASK (1 << CS42L42_ASP_SCLK_EN_SHIFT)
+#define CS42L42_ASP_MASTER_MODE 0x01
+#define CS42L42_ASP_SLAVE_MODE 0x00
+#define CS42L42_ASP_MODE_SHIFT 4
+#define CS42L42_ASP_MODE_MASK (1 << CS42L42_ASP_MODE_SHIFT)
+#define CS42L42_ASP_SCPOL_IN_DAC_SHIFT 2
+#define CS42L42_ASP_SCPOL_IN_DAC_MASK (1 << CS42L42_ASP_SCPOL_IN_DAC_SHIFT)
+#define CS42L42_ASP_LCPOL_IN_SHIFT 0
+#define CS42L42_ASP_LCPOL_IN_MASK (1 << CS42L42_ASP_LCPOL_IN_SHIFT)
+#define CS42L42_ASP_POL_INV 1
+
+#define CS42L42_ASP_FRM_CFG (CS42L42_PAGE_12 + 0x08)
+#define CS42L42_ASP_STP_SHIFT 4
+#define CS42L42_ASP_STP_MASK (1 << CS42L42_ASP_STP_SHIFT)
+#define CS42L42_ASP_5050_SHIFT 3
+#define CS42L42_ASP_5050_MASK (1 << CS42L42_ASP_5050_SHIFT)
+#define CS42L42_ASP_FSD_SHIFT 0
+#define CS42L42_ASP_FSD_MASK (7 << CS42L42_ASP_FSD_SHIFT)
+#define CS42L42_ASP_FSD_0_5 1
+#define CS42L42_ASP_FSD_1_0 2
+#define CS42L42_ASP_FSD_1_5 3
+#define CS42L42_ASP_FSD_2_0 4
+
+#define CS42L42_FS_RATE_EN (CS42L42_PAGE_12 + 0x09)
+#define CS42L42_FS_EN_SHIFT 0
+#define CS42L42_FS_EN_MASK (0xf << CS42L42_FS_EN_SHIFT)
+#define CS42L42_FS_EN_IASRC_96K 0x1
+#define CS42L42_FS_EN_OASRC_96K 0x2
+
+#define CS42L42_IN_ASRC_CLK (CS42L42_PAGE_12 + 0x0A)
+#define CS42L42_CLK_IASRC_SEL_SHIFT 0
+#define CS42L42_CLK_IASRC_SEL_MASK (1 << CS42L42_CLK_IASRC_SEL_SHIFT)
+#define CS42L42_CLK_IASRC_SEL_12 1
+
+#define CS42L42_OUT_ASRC_CLK (CS42L42_PAGE_12 + 0x0B)
+#define CS42L42_CLK_OASRC_SEL_SHIFT 0
+#define CS42L42_CLK_OASRC_SEL_MASK (1 << CS42L42_CLK_OASRC_SEL_SHIFT)
+#define CS42L42_CLK_OASRC_SEL_12 1
+
+#define CS42L42_PLL_DIV_CFG1 (CS42L42_PAGE_12 + 0x0C)
+#define CS42L42_SCLK_PREDIV_SHIFT 0
+#define CS42L42_SCLK_PREDIV_MASK (3 << CS42L42_SCLK_PREDIV_SHIFT)
+
+/* Page 0x13 Interrupt Registers */
+/* Interrupts */
+#define CS42L42_ADC_OVFL_STATUS (CS42L42_PAGE_13 + 0x01)
+#define CS42L42_MIXER_STATUS (CS42L42_PAGE_13 + 0x02)
+#define CS42L42_SRC_STATUS (CS42L42_PAGE_13 + 0x03)
+#define CS42L42_ASP_RX_STATUS (CS42L42_PAGE_13 + 0x04)
+#define CS42L42_ASP_TX_STATUS (CS42L42_PAGE_13 + 0x05)
+#define CS42L42_CODEC_STATUS (CS42L42_PAGE_13 + 0x08)
+#define CS42L42_DET_INT_STATUS1 (CS42L42_PAGE_13 + 0x09)
+#define CS42L42_DET_INT_STATUS2 (CS42L42_PAGE_13 + 0x0A)
+#define CS42L42_SRCPL_INT_STATUS (CS42L42_PAGE_13 + 0x0B)
+#define CS42L42_VPMON_STATUS (CS42L42_PAGE_13 + 0x0D)
+#define CS42L42_PLL_LOCK_STATUS (CS42L42_PAGE_13 + 0x0E)
+#define CS42L42_TSRS_PLUG_STATUS (CS42L42_PAGE_13 + 0x0F)
+/* Masks */
+#define CS42L42_ADC_OVFL_INT_MASK (CS42L42_PAGE_13 + 0x16)
+#define CS42L42_ADC_OVFL_SHIFT 0
+#define CS42L42_ADC_OVFL_MASK (1 << CS42L42_ADC_OVFL_SHIFT)
+#define CS42L42_ADC_OVFL_VAL_MASK CS42L42_ADC_OVFL_MASK
+
+#define CS42L42_MIXER_INT_MASK (CS42L42_PAGE_13 + 0x17)
+#define CS42L42_MIX_CHB_OVFL_SHIFT 0
+#define CS42L42_MIX_CHB_OVFL_MASK (1 << CS42L42_MIX_CHB_OVFL_SHIFT)
+#define CS42L42_MIX_CHA_OVFL_SHIFT 1
+#define CS42L42_MIX_CHA_OVFL_MASK (1 << CS42L42_MIX_CHA_OVFL_SHIFT)
+#define CS42L42_EQ_OVFL_SHIFT 2
+#define CS42L42_EQ_OVFL_MASK (1 << CS42L42_EQ_OVFL_SHIFT)
+#define CS42L42_EQ_BIQUAD_OVFL_SHIFT 3
+#define CS42L42_EQ_BIQUAD_OVFL_MASK (1 << CS42L42_EQ_BIQUAD_OVFL_SHIFT)
+#define CS42L42_MIXER_VAL_MASK (CS42L42_MIX_CHB_OVFL_MASK | \
+ CS42L42_MIX_CHA_OVFL_MASK | \
+ CS42L42_EQ_OVFL_MASK | \
+ CS42L42_EQ_BIQUAD_OVFL_MASK)
+
+#define CS42L42_SRC_INT_MASK (CS42L42_PAGE_13 + 0x18)
+#define CS42L42_SRC_ILK_SHIFT 0
+#define CS42L42_SRC_ILK_MASK (1 << CS42L42_SRC_ILK_SHIFT)
+#define CS42L42_SRC_OLK_SHIFT 1
+#define CS42L42_SRC_OLK_MASK (1 << CS42L42_SRC_OLK_SHIFT)
+#define CS42L42_SRC_IUNLK_SHIFT 2
+#define CS42L42_SRC_IUNLK_MASK (1 << CS42L42_SRC_IUNLK_SHIFT)
+#define CS42L42_SRC_OUNLK_SHIFT 3
+#define CS42L42_SRC_OUNLK_MASK (1 << CS42L42_SRC_OUNLK_SHIFT)
+#define CS42L42_SRC_VAL_MASK (CS42L42_SRC_ILK_MASK | \
+ CS42L42_SRC_OLK_MASK | \
+ CS42L42_SRC_IUNLK_MASK | \
+ CS42L42_SRC_OUNLK_MASK)
+
+#define CS42L42_ASP_RX_INT_MASK (CS42L42_PAGE_13 + 0x19)
+#define CS42L42_ASPRX_NOLRCK_SHIFT 0
+#define CS42L42_ASPRX_NOLRCK_MASK (1 << CS42L42_ASPRX_NOLRCK_SHIFT)
+#define CS42L42_ASPRX_EARLY_SHIFT 1
+#define CS42L42_ASPRX_EARLY_MASK (1 << CS42L42_ASPRX_EARLY_SHIFT)
+#define CS42L42_ASPRX_LATE_SHIFT 2
+#define CS42L42_ASPRX_LATE_MASK (1 << CS42L42_ASPRX_LATE_SHIFT)
+#define CS42L42_ASPRX_ERROR_SHIFT 3
+#define CS42L42_ASPRX_ERROR_MASK (1 << CS42L42_ASPRX_ERROR_SHIFT)
+#define CS42L42_ASPRX_OVLD_SHIFT 4
+#define CS42L42_ASPRX_OVLD_MASK (1 << CS42L42_ASPRX_OVLD_SHIFT)
+#define CS42L42_ASP_RX_VAL_MASK (CS42L42_ASPRX_NOLRCK_MASK | \
+ CS42L42_ASPRX_EARLY_MASK | \
+ CS42L42_ASPRX_LATE_MASK | \
+ CS42L42_ASPRX_ERROR_MASK | \
+ CS42L42_ASPRX_OVLD_MASK)
+
+#define CS42L42_ASP_TX_INT_MASK (CS42L42_PAGE_13 + 0x1A)
+#define CS42L42_ASPTX_NOLRCK_SHIFT 0
+#define CS42L42_ASPTX_NOLRCK_MASK (1 << CS42L42_ASPTX_NOLRCK_SHIFT)
+#define CS42L42_ASPTX_EARLY_SHIFT 1
+#define CS42L42_ASPTX_EARLY_MASK (1 << CS42L42_ASPTX_EARLY_SHIFT)
+#define CS42L42_ASPTX_LATE_SHIFT 2
+#define CS42L42_ASPTX_LATE_MASK (1 << CS42L42_ASPTX_LATE_SHIFT)
+#define CS42L42_ASPTX_SMERROR_SHIFT 3
+#define CS42L42_ASPTX_SMERROR_MASK (1 << CS42L42_ASPTX_SMERROR_SHIFT)
+#define CS42L42_ASP_TX_VAL_MASK (CS42L42_ASPTX_NOLRCK_MASK | \
+ CS42L42_ASPTX_EARLY_MASK | \
+ CS42L42_ASPTX_LATE_MASK | \
+ CS42L42_ASPTX_SMERROR_MASK)
+
+#define CS42L42_CODEC_INT_MASK (CS42L42_PAGE_13 + 0x1B)
+#define CS42L42_PDN_DONE_SHIFT 0
+#define CS42L42_PDN_DONE_MASK (1 << CS42L42_PDN_DONE_SHIFT)
+#define CS42L42_HSDET_AUTO_DONE_SHIFT 1
+#define CS42L42_HSDET_AUTO_DONE_MASK (1 << CS42L42_HSDET_AUTO_DONE_SHIFT)
+#define CS42L42_CODEC_VAL_MASK (CS42L42_PDN_DONE_MASK | \
+ CS42L42_HSDET_AUTO_DONE_MASK)
+
+#define CS42L42_SRCPL_INT_MASK (CS42L42_PAGE_13 + 0x1C)
+#define CS42L42_SRCPL_ADC_LK_SHIFT 0
+#define CS42L42_SRCPL_ADC_LK_MASK (1 << CS42L42_SRCPL_ADC_LK_SHIFT)
+#define CS42L42_SRCPL_DAC_LK_SHIFT 2
+#define CS42L42_SRCPL_DAC_LK_MASK (1 << CS42L42_SRCPL_DAC_LK_SHIFT)
+#define CS42L42_SRCPL_ADC_UNLK_SHIFT 5
+#define CS42L42_SRCPL_ADC_UNLK_MASK (1 << CS42L42_SRCPL_ADC_UNLK_SHIFT)
+#define CS42L42_SRCPL_DAC_UNLK_SHIFT 6
+#define CS42L42_SRCPL_DAC_UNLK_MASK (1 << CS42L42_SRCPL_DAC_UNLK_SHIFT)
+#define CS42L42_SRCPL_VAL_MASK (CS42L42_SRCPL_ADC_LK_MASK | \
+ CS42L42_SRCPL_DAC_LK_MASK | \
+ CS42L42_SRCPL_ADC_UNLK_MASK | \
+ CS42L42_SRCPL_DAC_UNLK_MASK)
+
+#define CS42L42_VPMON_INT_MASK (CS42L42_PAGE_13 + 0x1E)
+#define CS42L42_VPMON_SHIFT 0
+#define CS42L42_VPMON_MASK (1 << CS42L42_VPMON_SHIFT)
+#define CS42L42_VPMON_VAL_MASK CS42L42_VPMON_MASK
+
+#define CS42L42_PLL_LOCK_INT_MASK (CS42L42_PAGE_13 + 0x1F)
+#define CS42L42_PLL_LOCK_SHIFT 0
+#define CS42L42_PLL_LOCK_MASK (1 << CS42L42_PLL_LOCK_SHIFT)
+#define CS42L42_PLL_LOCK_VAL_MASK CS42L42_PLL_LOCK_MASK
+
+#define CS42L42_TSRS_PLUG_INT_MASK (CS42L42_PAGE_13 + 0x20)
+#define CS42L42_RS_PLUG_SHIFT 0
+#define CS42L42_RS_PLUG_MASK (1 << CS42L42_RS_PLUG_SHIFT)
+#define CS42L42_RS_UNPLUG_SHIFT 1
+#define CS42L42_RS_UNPLUG_MASK (1 << CS42L42_RS_UNPLUG_SHIFT)
+#define CS42L42_TS_PLUG_SHIFT 2
+#define CS42L42_TS_PLUG_MASK (1 << CS42L42_TS_PLUG_SHIFT)
+#define CS42L42_TS_UNPLUG_SHIFT 3
+#define CS42L42_TS_UNPLUG_MASK (1 << CS42L42_TS_UNPLUG_SHIFT)
+#define CS42L42_TSRS_PLUG_VAL_MASK (CS42L42_RS_PLUG_MASK | \
+ CS42L42_RS_UNPLUG_MASK | \
+ CS42L42_TS_PLUG_MASK | \
+ CS42L42_TS_UNPLUG_MASK)
+#define CS42L42_TS_PLUG 3
+#define CS42L42_TS_UNPLUG 0
+#define CS42L42_TS_TRANS 1
+
+/* Page 0x15 Fractional-N PLL Registers */
+#define CS42L42_PLL_CTL1 (CS42L42_PAGE_15 + 0x01)
+#define CS42L42_PLL_START_SHIFT 0
+#define CS42L42_PLL_START_MASK (1 << CS42L42_PLL_START_SHIFT)
+
+#define CS42L42_PLL_DIV_FRAC0 (CS42L42_PAGE_15 + 0x02)
+#define CS42L42_PLL_DIV_FRAC_SHIFT 0
+#define CS42L42_PLL_DIV_FRAC_MASK (0xff << CS42L42_PLL_DIV_FRAC_SHIFT)
+
+#define CS42L42_PLL_DIV_FRAC1 (CS42L42_PAGE_15 + 0x03)
+#define CS42L42_PLL_DIV_FRAC2 (CS42L42_PAGE_15 + 0x04)
+
+#define CS42L42_PLL_DIV_INT (CS42L42_PAGE_15 + 0x05)
+#define CS42L42_PLL_DIV_INT_SHIFT 0
+#define CS42L42_PLL_DIV_INT_MASK (0xff << CS42L42_PLL_DIV_INT_SHIFT)
+
+#define CS42L42_PLL_CTL3 (CS42L42_PAGE_15 + 0x08)
+#define CS42L42_PLL_DIVOUT_SHIFT 0
+#define CS42L42_PLL_DIVOUT_MASK (0xff << CS42L42_PLL_DIVOUT_SHIFT)
+
+#define CS42L42_PLL_CAL_RATIO (CS42L42_PAGE_15 + 0x0A)
+#define CS42L42_PLL_CAL_RATIO_SHIFT 0
+#define CS42L42_PLL_CAL_RATIO_MASK (0xff << CS42L42_PLL_CAL_RATIO_SHIFT)
+
+#define CS42L42_PLL_CTL4 (CS42L42_PAGE_15 + 0x1B)
+#define CS42L42_PLL_MODE_SHIFT 0
+#define CS42L42_PLL_MODE_MASK (3 << CS42L42_PLL_MODE_SHIFT)
+
+/* Page 0x19 HP Load Detect Registers */
+#define CS42L42_LOAD_DET_RCSTAT (CS42L42_PAGE_19 + 0x25)
+#define CS42L42_RLA_STAT_SHIFT 0
+#define CS42L42_RLA_STAT_MASK (3 << CS42L42_RLA_STAT_SHIFT)
+#define CS42L42_RLA_STAT_15_OHM 0
+
+#define CS42L42_LOAD_DET_DONE (CS42L42_PAGE_19 + 0x26)
+#define CS42L42_HPLOAD_DET_DONE_SHIFT 0
+#define CS42L42_HPLOAD_DET_DONE_MASK (1 << CS42L42_HPLOAD_DET_DONE_SHIFT)
+
+#define CS42L42_LOAD_DET_EN (CS42L42_PAGE_19 + 0x27)
+#define CS42L42_HP_LD_EN_SHIFT 0
+#define CS42L42_HP_LD_EN_MASK (1 << CS42L42_HP_LD_EN_SHIFT)
+
+/* Page 0x1B Headset Interface Registers */
+#define CS42L42_HSBIAS_SC_AUTOCTL (CS42L42_PAGE_1B + 0x70)
+#define CS42L42_HSBIAS_SENSE_TRIP_SHIFT 0
+#define CS42L42_HSBIAS_SENSE_TRIP_MASK (7 << \
+ CS42L42_HSBIAS_SENSE_TRIP_SHIFT)
+#define CS42L42_TIP_SENSE_EN_SHIFT 5
+#define CS42L42_TIP_SENSE_EN_MASK (1 << \
+ CS42L42_TIP_SENSE_EN_SHIFT)
+#define CS42L42_AUTO_HSBIAS_HIZ_SHIFT 6
+#define CS42L42_AUTO_HSBIAS_HIZ_MASK (1 << \
+ CS42L42_AUTO_HSBIAS_HIZ_SHIFT)
+#define CS42L42_HSBIAS_SENSE_EN_SHIFT 7
+#define CS42L42_HSBIAS_SENSE_EN_MASK (1 << \
+ CS42L42_HSBIAS_SENSE_EN_SHIFT)
+
+#define CS42L42_WAKE_CTL (CS42L42_PAGE_1B + 0x71)
+#define CS42L42_WAKEB_CLEAR_SHIFT 0
+#define CS42L42_WAKEB_CLEAR_MASK (1 << CS42L42_WAKEB_CLEAR_SHIFT)
+#define CS42L42_WAKEB_MODE_SHIFT 5
+#define CS42L42_WAKEB_MODE_MASK (1 << CS42L42_WAKEB_MODE_SHIFT)
+#define CS42L42_M_HP_WAKE_SHIFT 6
+#define CS42L42_M_HP_WAKE_MASK (1 << CS42L42_M_HP_WAKE_SHIFT)
+#define CS42L42_M_MIC_WAKE_SHIFT 7
+#define CS42L42_M_MIC_WAKE_MASK (1 << CS42L42_M_MIC_WAKE_SHIFT)
+
+#define CS42L42_ADC_DISABLE_MUTE (CS42L42_PAGE_1B + 0x72)
+#define CS42L42_ADC_DISABLE_S0_MUTE_SHIFT 7
+#define CS42L42_ADC_DISABLE_S0_MUTE_MASK (1 << \
+ CS42L42_ADC_DISABLE_S0_MUTE_SHIFT)
+
+#define CS42L42_TIPSENSE_CTL (CS42L42_PAGE_1B + 0x73)
+#define CS42L42_TIP_SENSE_DEBOUNCE_SHIFT 0
+#define CS42L42_TIP_SENSE_DEBOUNCE_MASK (3 << \
+ CS42L42_TIP_SENSE_DEBOUNCE_SHIFT)
+#define CS42L42_TIP_SENSE_INV_SHIFT 5
+#define CS42L42_TIP_SENSE_INV_MASK (1 << \
+ CS42L42_TIP_SENSE_INV_SHIFT)
+#define CS42L42_TIP_SENSE_CTRL_SHIFT 6
+#define CS42L42_TIP_SENSE_CTRL_MASK (3 << \
+ CS42L42_TIP_SENSE_CTRL_SHIFT)
+
+#define CS42L42_MISC_DET_CTL (CS42L42_PAGE_1B + 0x74)
+#define CS42L42_PDN_MIC_LVL_DET_SHIFT 0
+#define CS42L42_PDN_MIC_LVL_DET_MASK (1 << CS42L42_PDN_MIC_LVL_DET_SHIFT)
+#define CS42L42_HSBIAS_CTL_SHIFT 1
+#define CS42L42_HSBIAS_CTL_MASK (3 << CS42L42_HSBIAS_CTL_SHIFT)
+#define CS42L42_DETECT_MODE_SHIFT 3
+#define CS42L42_DETECT_MODE_MASK (3 << CS42L42_DETECT_MODE_SHIFT)
+
+#define CS42L42_MIC_DET_CTL1 (CS42L42_PAGE_1B + 0x75)
+#define CS42L42_HS_DET_LEVEL_SHIFT 0
+#define CS42L42_HS_DET_LEVEL_MASK (0x3F << CS42L42_HS_DET_LEVEL_SHIFT)
+#define CS42L42_EVENT_STAT_SEL_SHIFT 6
+#define CS42L42_EVENT_STAT_SEL_MASK (1 << CS42L42_EVENT_STAT_SEL_SHIFT)
+#define CS42L42_LATCH_TO_VP_SHIFT 7
+#define CS42L42_LATCH_TO_VP_MASK (1 << CS42L42_LATCH_TO_VP_SHIFT)
+
+#define CS42L42_MIC_DET_CTL2 (CS42L42_PAGE_1B + 0x76)
+#define CS42L42_DEBOUNCE_TIME_SHIFT 5
+#define CS42L42_DEBOUNCE_TIME_MASK (0x07 << CS42L42_DEBOUNCE_TIME_SHIFT)
+
+#define CS42L42_DET_STATUS1 (CS42L42_PAGE_1B + 0x77)
+#define CS42L42_HSBIAS_HIZ_MODE_SHIFT 6
+#define CS42L42_HSBIAS_HIZ_MODE_MASK (1 << CS42L42_HSBIAS_HIZ_MODE_SHIFT)
+#define CS42L42_TIP_SENSE_SHIFT 7
+#define CS42L42_TIP_SENSE_MASK (1 << CS42L42_TIP_SENSE_SHIFT)
+
+#define CS42L42_DET_STATUS2 (CS42L42_PAGE_1B + 0x78)
+#define CS42L42_SHORT_TRUE_SHIFT 0
+#define CS42L42_SHORT_TRUE_MASK (1 << CS42L42_SHORT_TRUE_SHIFT)
+#define CS42L42_HS_TRUE_SHIFT 1
+#define CS42L42_HS_TRUE_MASK (1 << CS42L42_HS_TRUE_SHIFT)
+
+#define CS42L42_DET_INT1_MASK (CS42L42_PAGE_1B + 0x79)
+#define CS42L42_TIP_SENSE_UNPLUG_SHIFT 5
+#define CS42L42_TIP_SENSE_UNPLUG_MASK (1 << CS42L42_TIP_SENSE_UNPLUG_SHIFT)
+#define CS42L42_TIP_SENSE_PLUG_SHIFT 6
+#define CS42L42_TIP_SENSE_PLUG_MASK (1 << CS42L42_TIP_SENSE_PLUG_SHIFT)
+#define CS42L42_HSBIAS_SENSE_SHIFT 7
+#define CS42L42_HSBIAS_SENSE_MASK (1 << CS42L42_HSBIAS_SENSE_SHIFT)
+#define CS42L42_DET_INT_VAL1_MASK (CS42L42_TIP_SENSE_UNPLUG_MASK | \
+ CS42L42_TIP_SENSE_PLUG_MASK | \
+ CS42L42_HSBIAS_SENSE_MASK)
+
+#define CS42L42_DET_INT2_MASK (CS42L42_PAGE_1B + 0x7A)
+#define CS42L42_M_SHORT_DET_SHIFT 0
+#define CS42L42_M_SHORT_DET_MASK (1 << \
+ CS42L42_M_SHORT_DET_SHIFT)
+#define CS42L42_M_SHORT_RLS_SHIFT 1
+#define CS42L42_M_SHORT_RLS_MASK (1 << \
+ CS42L42_M_SHORT_RLS_SHIFT)
+#define CS42L42_M_HSBIAS_HIZ_SHIFT 2
+#define CS42L42_M_HSBIAS_HIZ_MASK (1 << \
+ CS42L42_M_HSBIAS_HIZ_SHIFT)
+#define CS42L42_M_DETECT_FT_SHIFT 6
+#define CS42L42_M_DETECT_FT_MASK (1 << \
+ CS42L42_M_DETECT_FT_SHIFT)
+#define CS42L42_M_DETECT_TF_SHIFT 7
+#define CS42L42_M_DETECT_TF_MASK (1 << \
+ CS42L42_M_DETECT_TF_SHIFT)
+#define CS42L42_DET_INT_VAL2_MASK (CS42L42_M_SHORT_DET_MASK | \
+ CS42L42_M_SHORT_RLS_MASK | \
+ CS42L42_M_HSBIAS_HIZ_MASK | \
+ CS42L42_M_DETECT_FT_MASK | \
+ CS42L42_M_DETECT_TF_MASK)
+
+/* Page 0x1C Headset Bias Registers */
+#define CS42L42_HS_BIAS_CTL (CS42L42_PAGE_1C + 0x03)
+#define CS42L42_HSBIAS_RAMP_SHIFT 0
+#define CS42L42_HSBIAS_RAMP_MASK (3 << CS42L42_HSBIAS_RAMP_SHIFT)
+#define CS42L42_HSBIAS_PD_SHIFT 4
+#define CS42L42_HSBIAS_PD_MASK (1 << CS42L42_HSBIAS_PD_SHIFT)
+#define CS42L42_HSBIAS_CAPLESS_SHIFT 7
+#define CS42L42_HSBIAS_CAPLESS_MASK (1 << CS42L42_HSBIAS_CAPLESS_SHIFT)
+
+/* Page 0x1D ADC Registers */
+#define CS42L42_ADC_CTL (CS42L42_PAGE_1D + 0x01)
+#define CS42L42_ADC_NOTCH_DIS_SHIFT 5
+#define CS42L42_ADC_FORCE_WEAK_VCM_SHIFT 4
+#define CS42L42_ADC_INV_SHIFT 2
+#define CS42L42_ADC_DIG_BOOST_SHIFT 0
+
+#define CS42L42_ADC_VOLUME (CS42L42_PAGE_1D + 0x03)
+#define CS42L42_ADC_VOL_SHIFT 0
+
+#define CS42L42_ADC_WNF_HPF_CTL (CS42L42_PAGE_1D + 0x04)
+#define CS42L42_ADC_WNF_CF_SHIFT 4
+#define CS42L42_ADC_WNF_EN_SHIFT 3
+#define CS42L42_ADC_HPF_CF_SHIFT 1
+#define CS42L42_ADC_HPF_EN_SHIFT 0
+
+/* Page 0x1F DAC Registers */
+#define CS42L42_DAC_CTL1 (CS42L42_PAGE_1F + 0x01)
+#define CS42L42_DACB_INV_SHIFT 1
+#define CS42L42_DACA_INV_SHIFT 0
+
+#define CS42L42_DAC_CTL2 (CS42L42_PAGE_1F + 0x06)
+#define CS42L42_HPOUT_PULLDOWN_SHIFT 4
+#define CS42L42_HPOUT_PULLDOWN_MASK (15 << CS42L42_HPOUT_PULLDOWN_SHIFT)
+#define CS42L42_HPOUT_LOAD_SHIFT 3
+#define CS42L42_HPOUT_LOAD_MASK (1 << CS42L42_HPOUT_LOAD_SHIFT)
+#define CS42L42_HPOUT_CLAMP_SHIFT 2
+#define CS42L42_HPOUT_CLAMP_MASK (1 << CS42L42_HPOUT_CLAMP_SHIFT)
+#define CS42L42_DAC_HPF_EN_SHIFT 1
+#define CS42L42_DAC_HPF_EN_MASK (1 << CS42L42_DAC_HPF_EN_SHIFT)
+#define CS42L42_DAC_MON_EN_SHIFT 0
+#define CS42L42_DAC_MON_EN_MASK (1 << CS42L42_DAC_MON_EN_SHIFT)
+
+/* Page 0x20 HP CTL Registers */
+#define CS42L42_HP_CTL (CS42L42_PAGE_20 + 0x01)
+#define CS42L42_HP_ANA_BMUTE_SHIFT 3
+#define CS42L42_HP_ANA_BMUTE_MASK (1 << CS42L42_HP_ANA_BMUTE_SHIFT)
+#define CS42L42_HP_ANA_AMUTE_SHIFT 2
+#define CS42L42_HP_ANA_AMUTE_MASK (1 << CS42L42_HP_ANA_AMUTE_SHIFT)
+#define CS42L42_HP_FULL_SCALE_VOL_SHIFT 1
+#define CS42L42_HP_FULL_SCALE_VOL_MASK (1 << CS42L42_HP_FULL_SCALE_VOL_SHIFT)
+
+/* Page 0x21 Class H Registers */
+#define CS42L42_CLASSH_CTL (CS42L42_PAGE_21 + 0x01)
+
+/* Page 0x23 Mixer Volume Registers */
+#define CS42L42_MIXER_CHA_VOL (CS42L42_PAGE_23 + 0x01)
+#define CS42L42_MIXER_ADC_VOL (CS42L42_PAGE_23 + 0x02)
+
+#define CS42L42_MIXER_CHB_VOL (CS42L42_PAGE_23 + 0x03)
+#define CS42L42_MIXER_CH_VOL_SHIFT 0
+#define CS42L42_MIXER_CH_VOL_MASK (0x3f << CS42L42_MIXER_CH_VOL_SHIFT)
+
+/* Page 0x24 EQ Registers */
+#define CS42L42_EQ_COEF_IN0 (CS42L42_PAGE_24 + 0x01)
+#define CS42L42_EQ_COEF_IN1 (CS42L42_PAGE_24 + 0x02)
+#define CS42L42_EQ_COEF_IN2 (CS42L42_PAGE_24 + 0x03)
+#define CS42L42_EQ_COEF_IN3 (CS42L42_PAGE_24 + 0x04)
+#define CS42L42_EQ_COEF_RW (CS42L42_PAGE_24 + 0x06)
+#define CS42L42_EQ_COEF_OUT0 (CS42L42_PAGE_24 + 0x07)
+#define CS42L42_EQ_COEF_OUT1 (CS42L42_PAGE_24 + 0x08)
+#define CS42L42_EQ_COEF_OUT2 (CS42L42_PAGE_24 + 0x09)
+#define CS42L42_EQ_COEF_OUT3 (CS42L42_PAGE_24 + 0x0A)
+#define CS42L42_EQ_INIT_STAT (CS42L42_PAGE_24 + 0x0B)
+#define CS42L42_EQ_START_FILT (CS42L42_PAGE_24 + 0x0C)
+#define CS42L42_EQ_MUTE_CTL (CS42L42_PAGE_24 + 0x0E)
+
+/* Page 0x25 Audio Port Registers */
+#define CS42L42_SP_RX_CH_SEL (CS42L42_PAGE_25 + 0x01)
+
+#define CS42L42_SP_RX_ISOC_CTL (CS42L42_PAGE_25 + 0x02)
+#define CS42L42_SP_RX_RSYNC_SHIFT 6
+#define CS42L42_SP_RX_RSYNC_MASK (1 << CS42L42_SP_RX_RSYNC_SHIFT)
+#define CS42L42_SP_RX_NSB_POS_SHIFT 3
+#define CS42L42_SP_RX_NSB_POS_MASK (7 << CS42L42_SP_RX_NSB_POS_SHIFT)
+#define CS42L42_SP_RX_NFS_NSBB_SHIFT 2
+#define CS42L42_SP_RX_NFS_NSBB_MASK (1 << CS42L42_SP_RX_NFS_NSBB_SHIFT)
+#define CS42L42_SP_RX_ISOC_MODE_SHIFT 0
+#define CS42L42_SP_RX_ISOC_MODE_MASK (3 << CS42L42_SP_RX_ISOC_MODE_SHIFT)
+
+#define CS42L42_SP_RX_FS (CS42L42_PAGE_25 + 0x03)
+#define CS42l42_SPDIF_CH_SEL (CS42L42_PAGE_25 + 0x04)
+#define CS42L42_SP_TX_ISOC_CTL (CS42L42_PAGE_25 + 0x05)
+#define CS42L42_SP_TX_FS (CS42L42_PAGE_25 + 0x06)
+#define CS42L42_SPDIF_SW_CTL1 (CS42L42_PAGE_25 + 0x07)
+
+/* Page 0x26 SRC Registers */
+#define CS42L42_SRC_SDIN_FS (CS42L42_PAGE_26 + 0x01)
+#define CS42L42_SRC_SDIN_FS_SHIFT 0
+#define CS42L42_SRC_SDIN_FS_MASK (0x1f << CS42L42_SRC_SDIN_FS_SHIFT)
+
+#define CS42L42_SRC_SDOUT_FS (CS42L42_PAGE_26 + 0x09)
+
+/* Page 0x28 S/PDIF Registers */
+#define CS42L42_SPDIF_CTL1 (CS42L42_PAGE_28 + 0x01)
+#define CS42L42_SPDIF_CTL2 (CS42L42_PAGE_28 + 0x02)
+#define CS42L42_SPDIF_CTL3 (CS42L42_PAGE_28 + 0x03)
+#define CS42L42_SPDIF_CTL4 (CS42L42_PAGE_28 + 0x04)
+
+/* Page 0x29 Serial Port TX Registers */
+#define CS42L42_ASP_TX_SZ_EN (CS42L42_PAGE_29 + 0x01)
+#define CS42L42_ASP_TX_CH_EN (CS42L42_PAGE_29 + 0x02)
+#define CS42L42_ASP_TX_CH_AP_RES (CS42L42_PAGE_29 + 0x03)
+#define CS42L42_ASP_TX_CH1_BIT_MSB (CS42L42_PAGE_29 + 0x04)
+#define CS42L42_ASP_TX_CH1_BIT_LSB (CS42L42_PAGE_29 + 0x05)
+#define CS42L42_ASP_TX_HIZ_DLY_CFG (CS42L42_PAGE_29 + 0x06)
+#define CS42L42_ASP_TX_CH2_BIT_MSB (CS42L42_PAGE_29 + 0x0A)
+#define CS42L42_ASP_TX_CH2_BIT_LSB (CS42L42_PAGE_29 + 0x0B)
+
+/* Page 0x2A Serial Port RX Registers */
+#define CS42L42_ASP_RX_DAI0_EN (CS42L42_PAGE_2A + 0x01)
+#define CS42L42_ASP_RX0_CH_EN_SHIFT 2
+#define CS42L42_ASP_RX0_CH_EN_MASK (0xf << CS42L42_ASP_RX0_CH_EN_SHIFT)
+#define CS42L42_ASP_RX0_CH1_EN 1
+#define CS42L42_ASP_RX0_CH2_EN 2
+#define CS42L42_ASP_RX0_CH3_EN 4
+#define CS42L42_ASP_RX0_CH4_EN 8
+
+#define CS42L42_ASP_RX_DAI0_CH1_AP_RES (CS42L42_PAGE_2A + 0x02)
+#define CS42L42_ASP_RX_DAI0_CH1_BIT_MSB (CS42L42_PAGE_2A + 0x03)
+#define CS42L42_ASP_RX_DAI0_CH1_BIT_LSB (CS42L42_PAGE_2A + 0x04)
+#define CS42L42_ASP_RX_DAI0_CH2_AP_RES (CS42L42_PAGE_2A + 0x05)
+#define CS42L42_ASP_RX_DAI0_CH2_BIT_MSB (CS42L42_PAGE_2A + 0x06)
+#define CS42L42_ASP_RX_DAI0_CH2_BIT_LSB (CS42L42_PAGE_2A + 0x07)
+#define CS42L42_ASP_RX_DAI0_CH3_AP_RES (CS42L42_PAGE_2A + 0x08)
+#define CS42L42_ASP_RX_DAI0_CH3_BIT_MSB (CS42L42_PAGE_2A + 0x09)
+#define CS42L42_ASP_RX_DAI0_CH3_BIT_LSB (CS42L42_PAGE_2A + 0x0A)
+#define CS42L42_ASP_RX_DAI0_CH4_AP_RES (CS42L42_PAGE_2A + 0x0B)
+#define CS42L42_ASP_RX_DAI0_CH4_BIT_MSB (CS42L42_PAGE_2A + 0x0C)
+#define CS42L42_ASP_RX_DAI0_CH4_BIT_LSB (CS42L42_PAGE_2A + 0x0D)
+#define CS42L42_ASP_RX_DAI1_CH1_AP_RES (CS42L42_PAGE_2A + 0x0E)
+#define CS42L42_ASP_RX_DAI1_CH1_BIT_MSB (CS42L42_PAGE_2A + 0x0F)
+#define CS42L42_ASP_RX_DAI1_CH1_BIT_LSB (CS42L42_PAGE_2A + 0x10)
+#define CS42L42_ASP_RX_DAI1_CH2_AP_RES (CS42L42_PAGE_2A + 0x11)
+#define CS42L42_ASP_RX_DAI1_CH2_BIT_MSB (CS42L42_PAGE_2A + 0x12)
+#define CS42L42_ASP_RX_DAI1_CH2_BIT_LSB (CS42L42_PAGE_2A + 0x13)
+
+#define CS42L42_ASP_RX_CH_AP_SHIFT 6
+#define CS42L42_ASP_RX_CH_AP_MASK (1 << CS42L42_ASP_RX_CH_AP_SHIFT)
+#define CS42L42_ASP_RX_CH_AP_LOW 0
+#define CS42L42_ASP_RX_CH_AP_HI 1
+#define CS42L42_ASP_RX_CH_RES_SHIFT 0
+#define CS42L42_ASP_RX_CH_RES_MASK (3 << CS42L42_ASP_RX_CH_RES_SHIFT)
+#define CS42L42_ASP_RX_CH_RES_32 3
+#define CS42L42_ASP_RX_CH_RES_16 1
+#define CS42L42_ASP_RX_CH_BIT_ST_SHIFT 0
+#define CS42L42_ASP_RX_CH_BIT_ST_MASK (0xff << CS42L42_ASP_RX_CH_BIT_ST_SHIFT)
+
+/* Page 0x30 ID Registers */
+#define CS42L42_SUB_REVID (CS42L42_PAGE_30 + 0x14)
+#define CS42L42_MAX_REGISTER (CS42L42_PAGE_30 + 0x14)
+
+/* Defines for fracturing values spread across multiple registers */
+#define CS42L42_FRAC0_VAL(val) ((val) & 0x0000ff)
+#define CS42L42_FRAC1_VAL(val) (((val) & 0x00ff00) >> 8)
+#define CS42L42_FRAC2_VAL(val) (((val) & 0xff0000) >> 16)
+
+#define CS42L42_NUM_SUPPLIES 5
+
+static const char *const cs42l42_supply_names[CS42L42_NUM_SUPPLIES] = {
+ "VA",
+ "VP",
+ "VCP",
+ "VD_FILT",
+ "VL",
+};
+
+struct cs42l42_private {
+ struct regmap *regmap;
+ struct snd_soc_codec *codec;
+ struct regulator_bulk_data supplies[CS42L42_NUM_SUPPLIES];
+ struct gpio_desc *reset_gpio;
+ struct completion pdn_done;
+ u32 sclk;
+ u32 srate;
+ u32 swidth;
+ u8 plug_state;
+ u8 hs_type;
+ u8 ts_inv;
+ u8 ts_dbnc_rise;
+ u8 ts_dbnc_fall;
+ u8 btn_det_init_dbnce;
+ u8 btn_det_event_dbnce;
+ u8 bias_thresholds[CS42L42_NUM_BIASES];
+ u8 hs_bias_ramp_rate;
+ u8 hs_bias_ramp_time;
+};
+
+#endif /* __CS42L42_H__ */
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index 54c1768bc818..cb6ca85f1536 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -64,8 +64,6 @@ struct cs42l56_private {
};
static const struct reg_default cs42l56_reg_defaults[] = {
- { 1, 0x56 }, /* r01 - ID 1 */
- { 2, 0x04 }, /* r02 - ID 2 */
{ 3, 0x7f }, /* r03 - Power Ctl 1 */
{ 4, 0xff }, /* r04 - Power Ctl 2 */
{ 5, 0x00 }, /* ro5 - Clocking Ctl 1 */
@@ -1262,8 +1260,6 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
return ret;
}
- regcache_cache_bypass(cs42l56->regmap, true);
-
ret = regmap_read(cs42l56->regmap, CS42L56_CHIP_ID_1, &reg);
devid = reg & CS42L56_CHIP_ID_MASK;
if (devid != CS42L56_DEVID) {
@@ -1279,23 +1275,25 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
dev_info(&i2c_client->dev, "Alpha Rev %X Metal Rev %X\n",
alpha_rev, metal_rev);
- regcache_cache_bypass(cs42l56->regmap, false);
-
if (cs42l56->pdata.ain1a_ref_cfg)
regmap_update_bits(cs42l56->regmap, CS42L56_AIN_REFCFG_ADC_MUX,
- CS42L56_AIN1A_REF_MASK, 1);
+ CS42L56_AIN1A_REF_MASK,
+ CS42L56_AIN1A_REF_MASK);
if (cs42l56->pdata.ain1b_ref_cfg)
regmap_update_bits(cs42l56->regmap, CS42L56_AIN_REFCFG_ADC_MUX,
- CS42L56_AIN1B_REF_MASK, 1);
+ CS42L56_AIN1B_REF_MASK,
+ CS42L56_AIN1B_REF_MASK);
if (cs42l56->pdata.ain2a_ref_cfg)
regmap_update_bits(cs42l56->regmap, CS42L56_AIN_REFCFG_ADC_MUX,
- CS42L56_AIN2A_REF_MASK, 1);
+ CS42L56_AIN2A_REF_MASK,
+ CS42L56_AIN2A_REF_MASK);
if (cs42l56->pdata.ain2b_ref_cfg)
regmap_update_bits(cs42l56->regmap, CS42L56_AIN_REFCFG_ADC_MUX,
- CS42L56_AIN2B_REF_MASK, 1);
+ CS42L56_AIN2B_REF_MASK,
+ CS42L56_AIN2B_REF_MASK);
if (cs42l56->pdata.micbias_lvl)
regmap_update_bits(cs42l56->regmap, CS42L56_GAIN_BIAS_CTL,
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index 71ba5605495f..3df2c473ab88 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -1337,8 +1337,6 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
gpio_set_value_cansleep(cs42l73->pdata.reset_gpio, 1);
}
- regcache_cache_bypass(cs42l73->regmap, true);
-
/* initialize codec */
ret = regmap_read(cs42l73->regmap, CS42L73_DEVID_AB, &reg);
devid = (reg & 0xFF) << 12;
@@ -1366,8 +1364,6 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
dev_info(&i2c_client->dev,
"Cirrus Logic CS42L73, Revision: %02X\n", reg & 0xFF);
- regcache_cache_bypass(cs42l73->regmap, false);
-
ret = snd_soc_register_codec(&i2c_client->dev,
&soc_codec_dev_cs42l73, cs42l73_dai,
ARRAY_SIZE(cs42l73_dai));
diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c
index b4d87379d2bc..c1785bd4ff19 100644
--- a/sound/soc/codecs/cs42xx8.c
+++ b/sound/soc/codecs/cs42xx8.c
@@ -321,7 +321,6 @@ static struct snd_soc_dai_driver cs42xx8_dai = {
};
static const struct reg_default cs42xx8_reg[] = {
- { 0x01, 0x01 }, /* Chip I.D. and Revision Register */
{ 0x02, 0x00 }, /* Power Control */
{ 0x03, 0xF0 }, /* Functional Mode */
{ 0x04, 0x46 }, /* Interface Formats */
@@ -498,13 +497,6 @@ int cs42xx8_probe(struct device *dev, struct regmap *regmap)
/* Make sure hardware reset done */
msleep(5);
- /*
- * We haven't marked the chip revision as volatile due to
- * sharing a register with the right input volume; explicitly
- * bypass the cache to read it.
- */
- regcache_cache_bypass(cs42xx8->regmap, true);
-
/* Validate the chip ID */
ret = regmap_read(cs42xx8->regmap, CS42XX8_CHIPID, &val);
if (ret < 0) {
@@ -523,8 +515,6 @@ int cs42xx8_probe(struct device *dev, struct regmap *regmap)
dev_info(dev, "found device, revision %X\n",
val & CS42XX8_CHIPID_REV_ID_MASK);
- regcache_cache_bypass(cs42xx8->regmap, false);
-
cs42xx8_dai.name = cs42xx8->drvdata->name;
/* Each adc supports stereo input */
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index 5b22564f037c..73559ae864b6 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -335,9 +335,11 @@ static const struct snd_kcontrol_new cs47l24_aec_loopback_mux =
static const struct snd_soc_dapm_widget cs47l24_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1,
- ARIZONA_SYSCLK_ENA_SHIFT, 0, NULL, 0),
+ ARIZONA_SYSCLK_ENA_SHIFT, 0, arizona_clk_ev,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
- ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
+ ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, arizona_clk_ev,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK,
ARIZONA_OPCLK_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("ASYNCOPCLK", ARIZONA_OUTPUT_ASYNC_CLOCK,
@@ -1064,7 +1066,7 @@ static struct snd_soc_dai_driver cs47l24_dai[] = {
static int cs47l24_open(struct snd_compr_stream *stream)
{
struct snd_soc_pcm_runtime *rtd = stream->private_data;
- struct cs47l24_priv *priv = snd_soc_codec_get_drvdata(rtd->codec);
+ struct cs47l24_priv *priv = snd_soc_platform_get_drvdata(rtd->platform);
struct arizona *arizona = priv->core.arizona;
int n_adsp;
@@ -1113,8 +1115,8 @@ static irqreturn_t cs47l24_adsp2_irq(int irq, void *data)
static int cs47l24_codec_probe(struct snd_soc_codec *codec)
{
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
struct cs47l24_priv *priv = snd_soc_codec_get_drvdata(codec);
- struct arizona *arizona = priv->core.arizona;
int ret;
priv->core.arizona->dapm = dapm;
@@ -1124,14 +1126,6 @@ static int cs47l24_codec_probe(struct snd_soc_codec *codec)
arizona_init_mono(codec);
arizona_init_notifiers(codec);
- ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
- "ADSP2 Compressed IRQ", cs47l24_adsp2_irq,
- priv);
- if (ret != 0) {
- dev_err(codec->dev, "Failed to request DSP IRQ: %d\n", ret);
- return ret;
- }
-
ret = wm_adsp2_codec_probe(&priv->core.adsp[1], codec);
if (ret)
goto err_adsp2_codec_probe;
@@ -1145,7 +1139,7 @@ static int cs47l24_codec_probe(struct snd_soc_codec *codec)
if (ret)
goto err_adsp2_codec_probe;
- snd_soc_dapm_disable_pin(dapm, "HAPTICS");
+ snd_soc_component_disable_pin(component, "HAPTICS");
return 0;
@@ -1159,17 +1153,12 @@ err_adsp2_codec_probe:
static int cs47l24_codec_remove(struct snd_soc_codec *codec)
{
struct cs47l24_priv *priv = snd_soc_codec_get_drvdata(codec);
- struct arizona *arizona = priv->core.arizona;
wm_adsp2_codec_remove(&priv->core.adsp[1], codec);
wm_adsp2_codec_remove(&priv->core.adsp[2], codec);
priv->core.arizona->dapm = NULL;
- arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
-
- arizona_free_spk(codec);
-
return 0;
}
@@ -1285,25 +1274,47 @@ static int cs47l24_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_idle(&pdev->dev);
+ ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
+ "ADSP2 Compressed IRQ", cs47l24_adsp2_irq,
+ cs47l24);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to request DSP IRQ: %d\n", ret);
+ return ret;
+ }
+
+ ret = arizona_init_spk_irqs(arizona);
+ if (ret < 0)
+ goto err_dsp_irq;
+
ret = snd_soc_register_platform(&pdev->dev, &cs47l24_compr_platform);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register platform: %d\n", ret);
- return ret;
+ goto err_spk_irqs;
}
ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_cs47l24,
cs47l24_dai, ARRAY_SIZE(cs47l24_dai));
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register codec: %d\n", ret);
- snd_soc_unregister_platform(&pdev->dev);
+ goto err_platform;
}
return ret;
+
+err_platform:
+ snd_soc_unregister_platform(&pdev->dev);
+err_spk_irqs:
+ arizona_free_spk_irqs(arizona);
+err_dsp_irq:
+ arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, cs47l24);
+
+ return ret;
}
static int cs47l24_remove(struct platform_device *pdev)
{
struct cs47l24_priv *cs47l24 = platform_get_drvdata(pdev);
+ struct arizona *arizona = cs47l24->core.arizona;
snd_soc_unregister_platform(&pdev->dev);
snd_soc_unregister_codec(&pdev->dev);
@@ -1312,6 +1323,10 @@ static int cs47l24_remove(struct platform_device *pdev)
wm_adsp2_remove(&cs47l24->core.adsp[1]);
wm_adsp2_remove(&cs47l24->core.adsp[2]);
+ arizona_free_spk_irqs(arizona);
+
+ arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, cs47l24);
+
return 0;
}
diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
index 2b8914dd5990..6274d79c1353 100644
--- a/sound/soc/codecs/da7219-aad.c
+++ b/sound/soc/codecs/da7219-aad.c
@@ -204,10 +204,19 @@ static void da7219_aad_hptest_work(struct work_struct *work)
snd_soc_update_bits(codec, DA7219_MIXOUT_R_CTRL,
DA7219_MIXOUT_R_AMP_EN_MASK,
DA7219_MIXOUT_R_AMP_EN_MASK);
- snd_soc_write(codec, DA7219_HP_L_CTRL,
- DA7219_HP_L_AMP_OE_MASK | DA7219_HP_L_AMP_EN_MASK);
- snd_soc_write(codec, DA7219_HP_R_CTRL,
- DA7219_HP_R_AMP_OE_MASK | DA7219_HP_R_AMP_EN_MASK);
+ snd_soc_update_bits(codec, DA7219_HP_L_CTRL,
+ DA7219_HP_L_AMP_OE_MASK | DA7219_HP_L_AMP_EN_MASK,
+ DA7219_HP_L_AMP_OE_MASK | DA7219_HP_L_AMP_EN_MASK);
+ snd_soc_update_bits(codec, DA7219_HP_R_CTRL,
+ DA7219_HP_R_AMP_OE_MASK | DA7219_HP_R_AMP_EN_MASK,
+ DA7219_HP_R_AMP_OE_MASK | DA7219_HP_R_AMP_EN_MASK);
+ msleep(DA7219_SETTLING_DELAY);
+ snd_soc_update_bits(codec, DA7219_HP_L_CTRL,
+ DA7219_HP_L_AMP_MUTE_EN_MASK |
+ DA7219_HP_L_AMP_MIN_GAIN_EN_MASK, 0);
+ snd_soc_update_bits(codec, DA7219_HP_R_CTRL,
+ DA7219_HP_R_AMP_MUTE_EN_MASK |
+ DA7219_HP_R_AMP_MIN_GAIN_EN_MASK, 0);
/*
* If we're running from the internal oscillator then give audio paths
@@ -244,6 +253,7 @@ static void da7219_aad_hptest_work(struct work_struct *work)
regcache_mark_dirty(da7219->regmap);
regcache_sync_region(da7219->regmap, DA7219_HP_L_CTRL,
DA7219_HP_R_CTRL);
+ msleep(DA7219_SETTLING_DELAY);
regcache_sync_region(da7219->regmap, DA7219_MIXOUT_L_CTRL,
DA7219_MIXOUT_R_CTRL);
regcache_sync_region(da7219->regmap, DA7219_DROUTING_ST_OUTFILT_1L,
diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
index cf37936bfe3a..99601627f83c 100644
--- a/sound/soc/codecs/da7219.c
+++ b/sound/soc/codecs/da7219.c
@@ -823,6 +823,85 @@ static int da7219_dai_event(struct snd_soc_dapm_widget *w,
}
}
+static int da7219_settling_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ case SND_SOC_DAPM_POST_PMD:
+ msleep(DA7219_SETTLING_DELAY);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int da7219_mixout_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ u8 hp_ctrl, min_gain_mask;
+
+ switch (w->reg) {
+ case DA7219_MIXOUT_L_CTRL:
+ hp_ctrl = DA7219_HP_L_CTRL;
+ min_gain_mask = DA7219_HP_L_AMP_MIN_GAIN_EN_MASK;
+ break;
+ case DA7219_MIXOUT_R_CTRL:
+ hp_ctrl = DA7219_HP_R_CTRL;
+ min_gain_mask = DA7219_HP_R_AMP_MIN_GAIN_EN_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMD:
+ /* Enable minimum gain on HP to avoid pops */
+ snd_soc_update_bits(codec, hp_ctrl, min_gain_mask,
+ min_gain_mask);
+
+ msleep(DA7219_MIN_GAIN_DELAY);
+
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /* Remove minimum gain on HP */
+ snd_soc_update_bits(codec, hp_ctrl, min_gain_mask, 0);
+
+ break;
+ }
+
+ return 0;
+}
+
+static int da7219_gain_ramp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ case SND_SOC_DAPM_PRE_PMD:
+ /* Ensure nominal gain ramping for DAPM sequence */
+ da7219->gain_ramp_ctrl =
+ snd_soc_read(codec, DA7219_GAIN_RAMP_CTRL);
+ snd_soc_write(codec, DA7219_GAIN_RAMP_CTRL,
+ DA7219_GAIN_RAMP_RATE_NOMINAL);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ case SND_SOC_DAPM_POST_PMD:
+ /* Restore previous gain ramp settings */
+ snd_soc_write(codec, DA7219_GAIN_RAMP_CTRL,
+ da7219->gain_ramp_ctrl);
+ break;
+ }
+
+ return 0;
+}
+
/*
* DAPM Widgets
@@ -907,30 +986,46 @@ static const struct snd_soc_dapm_widget da7219_dapm_widgets[] = {
ARRAY_SIZE(da7219_st_out_filtr_mix_controls)),
/* DACs */
- SND_SOC_DAPM_DAC("DACL", NULL, DA7219_DAC_L_CTRL, DA7219_DAC_L_EN_SHIFT,
- DA7219_NO_INVERT),
- SND_SOC_DAPM_DAC("DACR", NULL, DA7219_DAC_R_CTRL, DA7219_DAC_R_EN_SHIFT,
- DA7219_NO_INVERT),
+ SND_SOC_DAPM_DAC_E("DACL", NULL, DA7219_DAC_L_CTRL,
+ DA7219_DAC_L_EN_SHIFT, DA7219_NO_INVERT,
+ da7219_settling_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_DAC_E("DACR", NULL, DA7219_DAC_R_CTRL,
+ DA7219_DAC_R_EN_SHIFT, DA7219_NO_INVERT,
+ da7219_settling_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
/* Output PGAs */
- SND_SOC_DAPM_PGA("Mixout Left PGA", DA7219_MIXOUT_L_CTRL,
- DA7219_MIXOUT_L_AMP_EN_SHIFT, DA7219_NO_INVERT,
- NULL, 0),
- SND_SOC_DAPM_PGA("Mixout Right PGA", DA7219_MIXOUT_R_CTRL,
- DA7219_MIXOUT_R_AMP_EN_SHIFT, DA7219_NO_INVERT,
- NULL, 0),
- SND_SOC_DAPM_PGA("Headphone Left PGA", DA7219_HP_L_CTRL,
- DA7219_HP_L_AMP_EN_SHIFT, DA7219_NO_INVERT, NULL, 0),
- SND_SOC_DAPM_PGA("Headphone Right PGA", DA7219_HP_R_CTRL,
- DA7219_HP_R_AMP_EN_SHIFT, DA7219_NO_INVERT, NULL, 0),
+ SND_SOC_DAPM_PGA_E("Mixout Left PGA", DA7219_MIXOUT_L_CTRL,
+ DA7219_MIXOUT_L_AMP_EN_SHIFT, DA7219_NO_INVERT,
+ NULL, 0, da7219_mixout_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_PGA_E("Mixout Right PGA", DA7219_MIXOUT_R_CTRL,
+ DA7219_MIXOUT_R_AMP_EN_SHIFT, DA7219_NO_INVERT,
+ NULL, 0, da7219_mixout_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_SUPPLY_S("Headphone Left PGA", 1, DA7219_HP_L_CTRL,
+ DA7219_HP_L_AMP_EN_SHIFT, DA7219_NO_INVERT,
+ da7219_settling_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY_S("Headphone Right PGA", 1, DA7219_HP_R_CTRL,
+ DA7219_HP_R_AMP_EN_SHIFT, DA7219_NO_INVERT,
+ da7219_settling_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
/* Output Supplies */
- SND_SOC_DAPM_SUPPLY("Charge Pump", DA7219_CP_CTRL, DA7219_CP_EN_SHIFT,
- DA7219_NO_INVERT, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("Charge Pump", 0, DA7219_CP_CTRL,
+ DA7219_CP_EN_SHIFT, DA7219_NO_INVERT,
+ da7219_settling_event,
+ SND_SOC_DAPM_POST_PMU),
/* Outputs */
SND_SOC_DAPM_OUTPUT("HPL"),
SND_SOC_DAPM_OUTPUT("HPR"),
+
+ /* Pre/Post Power */
+ SND_SOC_DAPM_PRE("Pre Power Gain Ramp", da7219_gain_ramp_event),
+ SND_SOC_DAPM_POST("Post Power Gain Ramp", da7219_gain_ramp_event),
};
@@ -1003,8 +1098,8 @@ static const struct snd_soc_dapm_route da7219_audio_map[] = {
{"Mixout Left PGA", NULL, "DACL"},
{"Mixout Right PGA", NULL, "DACR"},
- {"Headphone Left PGA", NULL, "Mixout Left PGA"},
- {"Headphone Right PGA", NULL, "Mixout Right PGA"},
+ {"HPL", NULL, "Mixout Left PGA"},
+ {"HPR", NULL, "Mixout Right PGA"},
{"HPL", NULL, "Headphone Left PGA"},
{"HPR", NULL, "Headphone Right PGA"},
@@ -1712,6 +1807,14 @@ static int da7219_probe(struct snd_soc_codec *codec)
DA7219_HP_R_AMP_RAMP_EN_MASK,
DA7219_HP_R_AMP_RAMP_EN_MASK);
+ /* Default minimum gain on HP to avoid pops during DAPM sequencing */
+ snd_soc_update_bits(codec, DA7219_HP_L_CTRL,
+ DA7219_HP_L_AMP_MIN_GAIN_EN_MASK,
+ DA7219_HP_L_AMP_MIN_GAIN_EN_MASK);
+ snd_soc_update_bits(codec, DA7219_HP_R_CTRL,
+ DA7219_HP_R_AMP_MIN_GAIN_EN_MASK,
+ DA7219_HP_R_AMP_MIN_GAIN_EN_MASK);
+
/* Default infinite tone gen, start/stop by Kcontrol */
snd_soc_write(codec, DA7219_TONE_GEN_CYCLES, DA7219_BEEP_CYCLES_MASK);
diff --git a/sound/soc/codecs/da7219.h b/sound/soc/codecs/da7219.h
index 66d3bad86739..6baba7455fa1 100644
--- a/sound/soc/codecs/da7219.h
+++ b/sound/soc/codecs/da7219.h
@@ -777,6 +777,10 @@
#define DA7219_SYS_STAT_CHECK_RETRIES 6
#define DA7219_SYS_STAT_CHECK_DELAY 50
+/* Power up/down Delays */
+#define DA7219_SETTLING_DELAY 40
+#define DA7219_MIN_GAIN_DELAY 30
+
enum da7219_clk_src {
DA7219_CLKSRC_MCLK = 0,
DA7219_CLKSRC_MCLK_SQR,
@@ -814,6 +818,7 @@ struct da7219_priv {
bool master;
bool alc_en;
+ u8 gain_ramp_ctrl;
};
#endif /* __DA7219_H */
diff --git a/sound/soc/codecs/es8328.h b/sound/soc/codecs/es8328.h
index 1a736e72a929..8930322d712b 100644
--- a/sound/soc/codecs/es8328.h
+++ b/sound/soc/codecs/es8328.h
@@ -278,43 +278,6 @@ int es8328_probe(struct device *dev, struct regmap *regmap);
#define ES8328_REG_MAX 0x35
-#define ES8328_PLL1 0
-#define ES8328_PLL2 1
-
-/* clock inputs */
-#define ES8328_MCLK 0
-#define ES8328_PCMCLK 1
-
-/* clock divider id's */
-#define ES8328_PCMDIV 0
-#define ES8328_BCLKDIV 1
-#define ES8328_VXCLKDIV 2
-
-/* PCM clock dividers */
-#define ES8328_PCM_DIV_1 (0 << 6)
-#define ES8328_PCM_DIV_3 (2 << 6)
-#define ES8328_PCM_DIV_5_5 (3 << 6)
-#define ES8328_PCM_DIV_2 (4 << 6)
-#define ES8328_PCM_DIV_4 (5 << 6)
-#define ES8328_PCM_DIV_6 (6 << 6)
-#define ES8328_PCM_DIV_8 (7 << 6)
-
-/* BCLK clock dividers */
-#define ES8328_BCLK_DIV_1 (0 << 7)
-#define ES8328_BCLK_DIV_2 (1 << 7)
-#define ES8328_BCLK_DIV_4 (2 << 7)
-#define ES8328_BCLK_DIV_8 (3 << 7)
-
-/* VXCLK clock dividers */
-#define ES8328_VXCLK_DIV_1 (0 << 6)
-#define ES8328_VXCLK_DIV_2 (1 << 6)
-#define ES8328_VXCLK_DIV_4 (2 << 6)
-#define ES8328_VXCLK_DIV_8 (3 << 6)
-#define ES8328_VXCLK_DIV_16 (4 << 6)
-
-#define ES8328_DAI_HIFI 0
-#define ES8328_DAI_VOICE 1
-
#define ES8328_1536FS 1536
#define ES8328_1024FS 1024
#define ES8328_768FS 768
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
new file mode 100644
index 000000000000..d8e8590746af
--- /dev/null
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
@@ -0,0 +1,890 @@
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/tlv.h>
+
+#define CDC_D_REVISION1 (0xf000)
+#define CDC_D_PERPH_SUBTYPE (0xf005)
+#define CDC_D_CDC_RST_CTL (0xf046)
+#define RST_CTL_DIG_SW_RST_N_MASK BIT(7)
+#define RST_CTL_DIG_SW_RST_N_RESET 0
+#define RST_CTL_DIG_SW_RST_N_REMOVE_RESET BIT(7)
+
+#define CDC_D_CDC_TOP_CLK_CTL (0xf048)
+#define TOP_CLK_CTL_A_MCLK_MCLK2_EN_MASK (BIT(2) | BIT(3))
+#define TOP_CLK_CTL_A_MCLK_EN_ENABLE BIT(2)
+#define TOP_CLK_CTL_A_MCLK2_EN_ENABLE BIT(3)
+
+#define CDC_D_CDC_ANA_CLK_CTL (0xf049)
+#define ANA_CLK_CTL_EAR_HPHR_CLK_EN_MASK BIT(0)
+#define ANA_CLK_CTL_EAR_HPHR_CLK_EN BIT(0)
+#define ANA_CLK_CTL_EAR_HPHL_CLK_EN BIT(1)
+#define ANA_CLK_CTL_SPKR_CLK_EN_MASK BIT(4)
+#define ANA_CLK_CTL_SPKR_CLK_EN BIT(4)
+#define ANA_CLK_CTL_TXA_CLK25_EN BIT(5)
+
+#define CDC_D_CDC_DIG_CLK_CTL (0xf04A)
+#define DIG_CLK_CTL_RXD1_CLK_EN BIT(0)
+#define DIG_CLK_CTL_RXD2_CLK_EN BIT(1)
+#define DIG_CLK_CTL_RXD3_CLK_EN BIT(3)
+#define DIG_CLK_CTL_TXD_CLK_EN BIT(4)
+#define DIG_CLK_CTL_NCP_CLK_EN_MASK BIT(6)
+#define DIG_CLK_CTL_NCP_CLK_EN BIT(6)
+#define DIG_CLK_CTL_RXD_PDM_CLK_EN_MASK BIT(7)
+#define DIG_CLK_CTL_RXD_PDM_CLK_EN BIT(7)
+
+#define CDC_D_CDC_CONN_TX1_CTL (0xf050)
+#define CONN_TX1_SERIAL_TX1_MUX GENMASK(1, 0)
+#define CONN_TX1_SERIAL_TX1_ADC_1 0x0
+#define CONN_TX1_SERIAL_TX1_RX_PDM_LB 0x1
+#define CONN_TX1_SERIAL_TX1_ZERO 0x2
+
+#define CDC_D_CDC_CONN_TX2_CTL (0xf051)
+#define CONN_TX2_SERIAL_TX2_MUX GENMASK(1, 0)
+#define CONN_TX2_SERIAL_TX2_ADC_2 0x0
+#define CONN_TX2_SERIAL_TX2_RX_PDM_LB 0x1
+#define CONN_TX2_SERIAL_TX2_ZERO 0x2
+#define CDC_D_CDC_CONN_HPHR_DAC_CTL (0xf052)
+#define CDC_D_CDC_CONN_RX1_CTL (0xf053)
+#define CDC_D_CDC_CONN_RX2_CTL (0xf054)
+#define CDC_D_CDC_CONN_RX3_CTL (0xf055)
+#define CDC_D_CDC_CONN_RX_LB_CTL (0xf056)
+#define CDC_D_SEC_ACCESS (0xf0D0)
+#define CDC_D_PERPH_RESET_CTL3 (0xf0DA)
+#define CDC_D_PERPH_RESET_CTL4 (0xf0DB)
+#define CDC_A_REVISION1 (0xf100)
+#define CDC_A_REVISION2 (0xf101)
+#define CDC_A_REVISION3 (0xf102)
+#define CDC_A_REVISION4 (0xf103)
+#define CDC_A_PERPH_TYPE (0xf104)
+#define CDC_A_PERPH_SUBTYPE (0xf105)
+#define CDC_A_INT_RT_STS (0xf110)
+#define CDC_A_INT_SET_TYPE (0xf111)
+#define CDC_A_INT_POLARITY_HIGH (0xf112)
+#define CDC_A_INT_POLARITY_LOW (0xf113)
+#define CDC_A_INT_LATCHED_CLR (0xf114)
+#define CDC_A_INT_EN_SET (0xf115)
+#define CDC_A_INT_EN_CLR (0xf116)
+#define CDC_A_INT_LATCHED_STS (0xf118)
+#define CDC_A_INT_PENDING_STS (0xf119)
+#define CDC_A_INT_MID_SEL (0xf11A)
+#define CDC_A_INT_PRIORITY (0xf11B)
+#define CDC_A_MICB_1_EN (0xf140)
+#define MICB_1_EN_MICB_ENABLE BIT(7)
+#define MICB_1_EN_BYP_CAP_MASK BIT(6)
+#define MICB_1_EN_NO_EXT_BYP_CAP BIT(6)
+#define MICB_1_EN_EXT_BYP_CAP 0
+#define MICB_1_EN_PULL_DOWN_EN_MASK BIT(5)
+#define MICB_1_EN_PULL_DOWN_EN_ENABLE BIT(5)
+#define MICB_1_EN_OPA_STG2_TAIL_CURR_MASK GENMASK(3, 1)
+#define MICB_1_EN_OPA_STG2_TAIL_CURR_1_60UA (0x4)
+#define MICB_1_EN_PULL_UP_EN_MASK BIT(4)
+#define MICB_1_EN_TX3_GND_SEL_MASK BIT(0)
+#define MICB_1_EN_TX3_GND_SEL_TX_GND 0
+
+#define CDC_A_MICB_1_VAL (0xf141)
+#define MICB_1_VAL_MICB_OUT_VAL_MASK GENMASK(7, 3)
+#define MICB_1_VAL_MICB_OUT_VAL_V2P70V ((0x16) << 3)
+#define CDC_A_MICB_1_CTL (0xf142)
+
+#define MICB_1_CTL_CFILT_REF_SEL_MASK BIT(1)
+#define MICB_1_CTL_CFILT_REF_SEL_HPF_REF BIT(1)
+#define MICB_1_CTL_EXT_PRECHARG_EN_MASK BIT(5)
+#define MICB_1_CTL_EXT_PRECHARG_EN_ENABLE BIT(5)
+#define MICB_1_CTL_INT_PRECHARG_BYP_MASK BIT(6)
+#define MICB_1_CTL_INT_PRECHARG_BYP_EXT_PRECHRG_SEL BIT(6)
+
+#define CDC_A_MICB_1_INT_RBIAS (0xf143)
+#define MICB_1_INT_TX1_INT_RBIAS_EN_MASK BIT(7)
+#define MICB_1_INT_TX1_INT_RBIAS_EN_ENABLE BIT(7)
+#define MICB_1_INT_TX1_INT_RBIAS_EN_DISABLE 0
+
+#define MICB_1_INT_TX1_INT_PULLUP_EN_MASK BIT(6)
+#define MICB_1_INT_TX1_INT_PULLUP_EN_TX1N_TO_MICBIAS BIT(6)
+#define MICB_1_INT_TX1_INT_PULLUP_EN_TX1N_TO_GND 0
+
+#define MICB_1_INT_TX2_INT_RBIAS_EN_MASK BIT(4)
+#define MICB_1_INT_TX2_INT_RBIAS_EN_ENABLE BIT(4)
+#define MICB_1_INT_TX2_INT_RBIAS_EN_DISABLE 0
+#define MICB_1_INT_TX2_INT_PULLUP_EN_MASK BIT(3)
+#define MICB_1_INT_TX2_INT_PULLUP_EN_TX1N_TO_MICBIAS BIT(3)
+#define MICB_1_INT_TX2_INT_PULLUP_EN_TX1N_TO_GND 0
+
+#define MICB_1_INT_TX3_INT_RBIAS_EN_MASK BIT(1)
+#define MICB_1_INT_TX3_INT_RBIAS_EN_ENABLE BIT(1)
+#define MICB_1_INT_TX3_INT_RBIAS_EN_DISABLE 0
+#define MICB_1_INT_TX3_INT_PULLUP_EN_MASK BIT(0)
+#define MICB_1_INT_TX3_INT_PULLUP_EN_TX1N_TO_MICBIAS BIT(0)
+#define MICB_1_INT_TX3_INT_PULLUP_EN_TX1N_TO_GND 0
+
+#define CDC_A_MICB_2_EN (0xf144)
+#define CDC_A_TX_1_2_ATEST_CTL_2 (0xf145)
+#define CDC_A_MASTER_BIAS_CTL (0xf146)
+#define CDC_A_TX_1_EN (0xf160)
+#define CDC_A_TX_2_EN (0xf161)
+#define CDC_A_TX_1_2_TEST_CTL_1 (0xf162)
+#define CDC_A_TX_1_2_TEST_CTL_2 (0xf163)
+#define CDC_A_TX_1_2_ATEST_CTL (0xf164)
+#define CDC_A_TX_1_2_OPAMP_BIAS (0xf165)
+#define CDC_A_TX_3_EN (0xf167)
+#define CDC_A_NCP_EN (0xf180)
+#define CDC_A_NCP_CLK (0xf181)
+#define CDC_A_NCP_FBCTRL (0xf183)
+#define CDC_A_NCP_FBCTRL_FB_CLK_INV_MASK BIT(5)
+#define CDC_A_NCP_FBCTRL_FB_CLK_INV BIT(5)
+#define CDC_A_NCP_BIAS (0xf184)
+#define CDC_A_NCP_VCTRL (0xf185)
+#define CDC_A_NCP_TEST (0xf186)
+#define CDC_A_NCP_CLIM_ADDR (0xf187)
+#define CDC_A_RX_CLOCK_DIVIDER (0xf190)
+#define CDC_A_RX_COM_OCP_CTL (0xf191)
+#define CDC_A_RX_COM_OCP_COUNT (0xf192)
+#define CDC_A_RX_COM_BIAS_DAC (0xf193)
+#define RX_COM_BIAS_DAC_RX_BIAS_EN_MASK BIT(7)
+#define RX_COM_BIAS_DAC_RX_BIAS_EN_ENABLE BIT(7)
+#define RX_COM_BIAS_DAC_DAC_REF_EN_MASK BIT(0)
+#define RX_COM_BIAS_DAC_DAC_REF_EN_ENABLE BIT(0)
+
+#define CDC_A_RX_HPH_BIAS_PA (0xf194)
+#define CDC_A_RX_HPH_BIAS_LDO_OCP (0xf195)
+#define CDC_A_RX_HPH_BIAS_CNP (0xf196)
+#define CDC_A_RX_HPH_CNP_EN (0xf197)
+#define CDC_A_RX_HPH_L_PA_DAC_CTL (0xf19B)
+#define RX_HPA_L_PA_DAC_CTL_DATA_RESET_MASK BIT(1)
+#define RX_HPA_L_PA_DAC_CTL_DATA_RESET_RESET BIT(1)
+#define CDC_A_RX_HPH_R_PA_DAC_CTL (0xf19D)
+#define RX_HPH_R_PA_DAC_CTL_DATA_RESET BIT(1)
+#define RX_HPH_R_PA_DAC_CTL_DATA_RESET_MASK BIT(1)
+
+#define CDC_A_RX_EAR_CTL (0xf19E)
+#define RX_EAR_CTL_SPK_VBAT_LDO_EN_MASK BIT(0)
+#define RX_EAR_CTL_SPK_VBAT_LDO_EN_ENABLE BIT(0)
+
+#define CDC_A_SPKR_DAC_CTL (0xf1B0)
+#define SPKR_DAC_CTL_DAC_RESET_MASK BIT(4)
+#define SPKR_DAC_CTL_DAC_RESET_NORMAL 0
+
+#define CDC_A_SPKR_DRV_CTL (0xf1B2)
+#define SPKR_DRV_CTL_DEF_MASK 0xEF
+#define SPKR_DRV_CLASSD_PA_EN_MASK BIT(7)
+#define SPKR_DRV_CLASSD_PA_EN_ENABLE BIT(7)
+#define SPKR_DRV_CAL_EN BIT(6)
+#define SPKR_DRV_SETTLE_EN BIT(5)
+#define SPKR_DRV_FW_EN BIT(3)
+#define SPKR_DRV_BOOST_SET BIT(2)
+#define SPKR_DRV_CMFB_SET BIT(1)
+#define SPKR_DRV_GAIN_SET BIT(0)
+#define SPKR_DRV_CTL_DEF_VAL (SPKR_DRV_CLASSD_PA_EN_ENABLE | \
+ SPKR_DRV_CAL_EN | SPKR_DRV_SETTLE_EN | \
+ SPKR_DRV_FW_EN | SPKR_DRV_BOOST_SET | \
+ SPKR_DRV_CMFB_SET | SPKR_DRV_GAIN_SET)
+#define CDC_A_SPKR_OCP_CTL (0xf1B4)
+#define CDC_A_SPKR_PWRSTG_CTL (0xf1B5)
+#define SPKR_PWRSTG_CTL_DAC_EN_MASK BIT(0)
+#define SPKR_PWRSTG_CTL_DAC_EN BIT(0)
+#define SPKR_PWRSTG_CTL_MASK 0xE0
+#define SPKR_PWRSTG_CTL_BBM_MASK BIT(7)
+#define SPKR_PWRSTG_CTL_BBM_EN BIT(7)
+#define SPKR_PWRSTG_CTL_HBRDGE_EN_MASK BIT(6)
+#define SPKR_PWRSTG_CTL_HBRDGE_EN BIT(6)
+#define SPKR_PWRSTG_CTL_CLAMP_EN_MASK BIT(5)
+#define SPKR_PWRSTG_CTL_CLAMP_EN BIT(5)
+
+#define CDC_A_SPKR_DRV_DBG (0xf1B7)
+#define CDC_A_CURRENT_LIMIT (0xf1C0)
+#define CDC_A_BOOST_EN_CTL (0xf1C3)
+#define CDC_A_SLOPE_COMP_IP_ZERO (0xf1C4)
+#define CDC_A_SEC_ACCESS (0xf1D0)
+#define CDC_A_PERPH_RESET_CTL3 (0xf1DA)
+#define CDC_A_PERPH_RESET_CTL4 (0xf1DB)
+
+#define MSM8916_WCD_ANALOG_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000)
+#define MSM8916_WCD_ANALOG_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+static const char * const supply_names[] = {
+ "vdd-cdc-io",
+ "vdd-cdc-tx-rx-cx",
+};
+
+struct pm8916_wcd_analog_priv {
+ u16 pmic_rev;
+ u16 codec_version;
+ struct clk *mclk;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(supply_names)];
+ bool micbias1_cap_mode;
+ bool micbias2_cap_mode;
+};
+
+static const char *const adc2_mux_text[] = { "ZERO", "INP2", "INP3" };
+static const char *const rdac2_mux_text[] = { "ZERO", "RX2", "RX1" };
+static const char *const hph_text[] = { "ZERO", "Switch", };
+
+static const struct soc_enum hph_enum = SOC_ENUM_SINGLE_VIRT(
+ ARRAY_SIZE(hph_text), hph_text);
+
+static const struct snd_kcontrol_new hphl_mux = SOC_DAPM_ENUM("HPHL", hph_enum);
+static const struct snd_kcontrol_new hphr_mux = SOC_DAPM_ENUM("HPHR", hph_enum);
+
+/* ADC2 MUX */
+static const struct soc_enum adc2_enum = SOC_ENUM_SINGLE_VIRT(
+ ARRAY_SIZE(adc2_mux_text), adc2_mux_text);
+
+/* RDAC2 MUX */
+static const struct soc_enum rdac2_mux_enum = SOC_ENUM_SINGLE(
+ CDC_D_CDC_CONN_HPHR_DAC_CTL, 0, 3, rdac2_mux_text);
+
+static const struct snd_kcontrol_new spkr_switch[] = {
+ SOC_DAPM_SINGLE("Switch", CDC_A_SPKR_DAC_CTL, 7, 1, 0)
+};
+
+static const struct snd_kcontrol_new rdac2_mux = SOC_DAPM_ENUM(
+ "RDAC2 MUX Mux", rdac2_mux_enum);
+static const struct snd_kcontrol_new tx_adc2_mux = SOC_DAPM_ENUM(
+ "ADC2 MUX Mux", adc2_enum);
+
+/* Analog Gain control 0 dB to +24 dB in 6 dB steps */
+static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 600, 0);
+
+static const struct snd_kcontrol_new pm8916_wcd_analog_snd_controls[] = {
+ SOC_SINGLE_TLV("ADC1 Volume", CDC_A_TX_1_EN, 3, 8, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC2 Volume", CDC_A_TX_2_EN, 3, 8, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC3 Volume", CDC_A_TX_3_EN, 3, 8, 0, analog_gain),
+};
+
+static void pm8916_wcd_analog_micbias_enable(struct snd_soc_codec *codec)
+{
+ snd_soc_update_bits(codec, CDC_A_MICB_1_CTL,
+ MICB_1_CTL_EXT_PRECHARG_EN_MASK |
+ MICB_1_CTL_INT_PRECHARG_BYP_MASK,
+ MICB_1_CTL_INT_PRECHARG_BYP_EXT_PRECHRG_SEL
+ | MICB_1_CTL_EXT_PRECHARG_EN_ENABLE);
+
+ snd_soc_write(codec, CDC_A_MICB_1_VAL, MICB_1_VAL_MICB_OUT_VAL_V2P70V);
+ /*
+ * Special headset needs MICBIAS as 2.7V so wait for
+ * 50 msec for the MICBIAS to reach 2.7 volts.
+ */
+ msleep(50);
+ snd_soc_update_bits(codec, CDC_A_MICB_1_CTL,
+ MICB_1_CTL_EXT_PRECHARG_EN_MASK |
+ MICB_1_CTL_INT_PRECHARG_BYP_MASK, 0);
+
+}
+
+static int pm8916_wcd_analog_enable_micbias_ext(struct snd_soc_codec
+ *codec, int event,
+ int reg, u32 cap_mode)
+{
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ pm8916_wcd_analog_micbias_enable(codec);
+ snd_soc_update_bits(codec, CDC_A_MICB_1_EN,
+ MICB_1_EN_BYP_CAP_MASK, cap_mode);
+ break;
+ }
+
+ return 0;
+}
+
+static int pm8916_wcd_analog_enable_micbias_int(struct snd_soc_codec
+ *codec, int event,
+ int reg, u32 cap_mode)
+{
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec, CDC_A_MICB_1_INT_RBIAS,
+ MICB_1_INT_TX2_INT_RBIAS_EN_MASK,
+ MICB_1_INT_TX2_INT_RBIAS_EN_ENABLE);
+ snd_soc_update_bits(codec, reg, MICB_1_EN_PULL_DOWN_EN_MASK, 0);
+ snd_soc_update_bits(codec, CDC_A_MICB_1_EN,
+ MICB_1_EN_OPA_STG2_TAIL_CURR_MASK,
+ MICB_1_EN_OPA_STG2_TAIL_CURR_1_60UA);
+
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ pm8916_wcd_analog_micbias_enable(codec);
+ snd_soc_update_bits(codec, CDC_A_MICB_1_EN,
+ MICB_1_EN_BYP_CAP_MASK, cap_mode);
+ break;
+ }
+
+ return 0;
+}
+
+static int pm8916_wcd_analog_enable_micbias_ext1(struct
+ snd_soc_dapm_widget
+ *w, struct snd_kcontrol
+ *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct pm8916_wcd_analog_priv *wcd = snd_soc_codec_get_drvdata(codec);
+
+ return pm8916_wcd_analog_enable_micbias_ext(codec, event, w->reg,
+ wcd->micbias1_cap_mode);
+}
+
+static int pm8916_wcd_analog_enable_micbias_ext2(struct
+ snd_soc_dapm_widget
+ *w, struct snd_kcontrol
+ *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct pm8916_wcd_analog_priv *wcd = snd_soc_codec_get_drvdata(codec);
+
+ return pm8916_wcd_analog_enable_micbias_ext(codec, event, w->reg,
+ wcd->micbias2_cap_mode);
+
+}
+
+static int pm8916_wcd_analog_enable_micbias_int1(struct
+ snd_soc_dapm_widget
+ *w, struct snd_kcontrol
+ *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct pm8916_wcd_analog_priv *wcd = snd_soc_codec_get_drvdata(codec);
+
+ return pm8916_wcd_analog_enable_micbias_int(codec, event, w->reg,
+ wcd->micbias1_cap_mode);
+}
+
+static int pm8916_wcd_analog_enable_micbias_int2(struct
+ snd_soc_dapm_widget
+ *w, struct snd_kcontrol
+ *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct pm8916_wcd_analog_priv *wcd = snd_soc_codec_get_drvdata(codec);
+
+ return pm8916_wcd_analog_enable_micbias_int(codec, event, w->reg,
+ wcd->micbias2_cap_mode);
+}
+
+static int pm8916_wcd_analog_enable_adc(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ u16 adc_reg = CDC_A_TX_1_2_TEST_CTL_2;
+ u8 init_bit_shift;
+
+ if (w->reg == CDC_A_TX_1_EN)
+ init_bit_shift = 5;
+ else
+ init_bit_shift = 4;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (w->reg == CDC_A_TX_2_EN)
+ snd_soc_update_bits(codec, CDC_A_MICB_1_CTL,
+ MICB_1_CTL_CFILT_REF_SEL_MASK,
+ MICB_1_CTL_CFILT_REF_SEL_HPF_REF);
+ /*
+ * Add delay of 10 ms to give sufficient time for the voltage
+ * to shoot up and settle so that the txfe init does not
+ * happen when the input voltage is changing too much.
+ */
+ usleep_range(10000, 10010);
+ snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift,
+ 1 << init_bit_shift);
+ switch (w->reg) {
+ case CDC_A_TX_1_EN:
+ snd_soc_update_bits(codec, CDC_D_CDC_CONN_TX1_CTL,
+ CONN_TX1_SERIAL_TX1_MUX,
+ CONN_TX1_SERIAL_TX1_ADC_1);
+ break;
+ case CDC_A_TX_2_EN:
+ case CDC_A_TX_3_EN:
+ snd_soc_update_bits(codec, CDC_D_CDC_CONN_TX2_CTL,
+ CONN_TX2_SERIAL_TX2_MUX,
+ CONN_TX2_SERIAL_TX2_ADC_2);
+ break;
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /*
+ * Add delay of 12 ms before deasserting the init
+ * to reduce the tx pop
+ */
+ usleep_range(12000, 12010);
+ snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift, 0x00);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ switch (w->reg) {
+ case CDC_A_TX_1_EN:
+ snd_soc_update_bits(codec, CDC_D_CDC_CONN_TX1_CTL,
+ CONN_TX1_SERIAL_TX1_MUX,
+ CONN_TX1_SERIAL_TX1_ZERO);
+ break;
+ case CDC_A_TX_2_EN:
+ snd_soc_update_bits(codec, CDC_A_MICB_1_CTL,
+ MICB_1_CTL_CFILT_REF_SEL_MASK, 0);
+ case CDC_A_TX_3_EN:
+ snd_soc_update_bits(codec, CDC_D_CDC_CONN_TX2_CTL,
+ CONN_TX2_SERIAL_TX2_MUX,
+ CONN_TX2_SERIAL_TX2_ZERO);
+ break;
+ }
+
+
+ break;
+ }
+ return 0;
+}
+
+static int pm8916_wcd_analog_enable_spk_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec, CDC_A_SPKR_PWRSTG_CTL,
+ SPKR_PWRSTG_CTL_DAC_EN_MASK |
+ SPKR_PWRSTG_CTL_BBM_MASK |
+ SPKR_PWRSTG_CTL_HBRDGE_EN_MASK |
+ SPKR_PWRSTG_CTL_CLAMP_EN_MASK,
+ SPKR_PWRSTG_CTL_DAC_EN|
+ SPKR_PWRSTG_CTL_BBM_EN |
+ SPKR_PWRSTG_CTL_HBRDGE_EN |
+ SPKR_PWRSTG_CTL_CLAMP_EN);
+
+ snd_soc_update_bits(codec, CDC_A_RX_EAR_CTL,
+ RX_EAR_CTL_SPK_VBAT_LDO_EN_MASK,
+ RX_EAR_CTL_SPK_VBAT_LDO_EN_ENABLE);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec, CDC_A_SPKR_DRV_CTL,
+ SPKR_DRV_CTL_DEF_MASK,
+ SPKR_DRV_CTL_DEF_VAL);
+ snd_soc_update_bits(codec, w->reg,
+ SPKR_DRV_CLASSD_PA_EN_MASK,
+ SPKR_DRV_CLASSD_PA_EN_ENABLE);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, CDC_A_SPKR_PWRSTG_CTL,
+ SPKR_PWRSTG_CTL_DAC_EN_MASK|
+ SPKR_PWRSTG_CTL_BBM_MASK |
+ SPKR_PWRSTG_CTL_HBRDGE_EN_MASK |
+ SPKR_PWRSTG_CTL_CLAMP_EN_MASK, 0);
+
+ snd_soc_update_bits(codec, CDC_A_SPKR_DAC_CTL,
+ SPKR_DAC_CTL_DAC_RESET_MASK,
+ SPKR_DAC_CTL_DAC_RESET_NORMAL);
+ snd_soc_update_bits(codec, CDC_A_RX_EAR_CTL,
+ RX_EAR_CTL_SPK_VBAT_LDO_EN_MASK, 0);
+ break;
+ }
+ return 0;
+}
+
+static const struct reg_default wcd_reg_defaults_2_0[] = {
+ {CDC_A_RX_COM_OCP_CTL, 0xD1},
+ {CDC_A_RX_COM_OCP_COUNT, 0xFF},
+ {CDC_D_SEC_ACCESS, 0xA5},
+ {CDC_D_PERPH_RESET_CTL3, 0x0F},
+ {CDC_A_TX_1_2_OPAMP_BIAS, 0x4F},
+ {CDC_A_NCP_FBCTRL, 0x28},
+ {CDC_A_SPKR_DRV_CTL, 0x69},
+ {CDC_A_SPKR_DRV_DBG, 0x01},
+ {CDC_A_BOOST_EN_CTL, 0x5F},
+ {CDC_A_SLOPE_COMP_IP_ZERO, 0x88},
+ {CDC_A_SEC_ACCESS, 0xA5},
+ {CDC_A_PERPH_RESET_CTL3, 0x0F},
+ {CDC_A_CURRENT_LIMIT, 0x82},
+ {CDC_A_SPKR_DAC_CTL, 0x03},
+ {CDC_A_SPKR_OCP_CTL, 0xE1},
+ {CDC_A_MASTER_BIAS_CTL, 0x30},
+};
+
+static int pm8916_wcd_analog_probe(struct snd_soc_codec *codec)
+{
+ struct pm8916_wcd_analog_priv *priv = dev_get_drvdata(codec->dev);
+ int err, reg;
+
+ err = regulator_bulk_enable(ARRAY_SIZE(priv->supplies), priv->supplies);
+ if (err != 0) {
+ dev_err(codec->dev, "failed to enable regulators (%d)\n", err);
+ return err;
+ }
+
+ snd_soc_codec_set_drvdata(codec, priv);
+ priv->pmic_rev = snd_soc_read(codec, CDC_D_REVISION1);
+ priv->codec_version = snd_soc_read(codec, CDC_D_PERPH_SUBTYPE);
+
+ dev_info(codec->dev, "PMIC REV: %d\t CODEC Version: %d\n",
+ priv->pmic_rev, priv->codec_version);
+
+ snd_soc_write(codec, CDC_D_PERPH_RESET_CTL4, 0x01);
+ snd_soc_write(codec, CDC_A_PERPH_RESET_CTL4, 0x01);
+
+ for (reg = 0; reg < ARRAY_SIZE(wcd_reg_defaults_2_0); reg++)
+ snd_soc_write(codec, wcd_reg_defaults_2_0[reg].reg,
+ wcd_reg_defaults_2_0[reg].def);
+
+ return 0;
+}
+
+static int pm8916_wcd_analog_remove(struct snd_soc_codec *codec)
+{
+ struct pm8916_wcd_analog_priv *priv = dev_get_drvdata(codec->dev);
+
+ return regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+}
+
+static const struct snd_soc_dapm_route pm8916_wcd_analog_audio_map[] = {
+
+ {"PDM_RX1", NULL, "PDM Playback"},
+ {"PDM_RX2", NULL, "PDM Playback"},
+ {"PDM_RX3", NULL, "PDM Playback"},
+ {"PDM Capture", NULL, "PDM_TX"},
+
+ /* ADC Connections */
+ {"PDM_TX", NULL, "ADC2"},
+ {"PDM_TX", NULL, "ADC3"},
+ {"ADC2", NULL, "ADC2 MUX"},
+ {"ADC3", NULL, "ADC2 MUX"},
+ {"ADC2 MUX", "INP2", "ADC2_INP2"},
+ {"ADC2 MUX", "INP3", "ADC2_INP3"},
+
+ {"PDM_TX", NULL, "ADC1"},
+ {"ADC1", NULL, "AMIC1"},
+ {"ADC2_INP2", NULL, "AMIC2"},
+ {"ADC2_INP3", NULL, "AMIC3"},
+
+ /* RDAC Connections */
+ {"HPHR DAC", NULL, "RDAC2 MUX"},
+ {"RDAC2 MUX", "RX1", "PDM_RX1"},
+ {"RDAC2 MUX", "RX2", "PDM_RX2"},
+ {"HPHL DAC", NULL, "PDM_RX1"},
+ {"PDM_RX1", NULL, "RXD1_CLK"},
+ {"PDM_RX2", NULL, "RXD2_CLK"},
+ {"PDM_RX3", NULL, "RXD3_CLK"},
+
+ {"PDM_RX1", NULL, "RXD_PDM_CLK"},
+ {"PDM_RX2", NULL, "RXD_PDM_CLK"},
+ {"PDM_RX3", NULL, "RXD_PDM_CLK"},
+
+ {"ADC1", NULL, "TXD_CLK"},
+ {"ADC2", NULL, "TXD_CLK"},
+ {"ADC3", NULL, "TXD_CLK"},
+
+ {"ADC1", NULL, "TXA_CLK25"},
+ {"ADC2", NULL, "TXA_CLK25"},
+ {"ADC3", NULL, "TXA_CLK25"},
+
+ {"PDM_RX1", NULL, "A_MCLK2"},
+ {"PDM_RX2", NULL, "A_MCLK2"},
+ {"PDM_RX3", NULL, "A_MCLK2"},
+
+ {"PDM_TX", NULL, "A_MCLK2"},
+ {"A_MCLK2", NULL, "A_MCLK"},
+
+ /* Headset (RX MIX1 and RX MIX2) */
+ {"HEADPHONE", NULL, "HPHL PA"},
+ {"HEADPHONE", NULL, "HPHR PA"},
+
+ {"HPHL PA", NULL, "EAR_HPHL_CLK"},
+ {"HPHR PA", NULL, "EAR_HPHR_CLK"},
+
+ {"CP", NULL, "NCP_CLK"},
+
+ {"HPHL PA", NULL, "HPHL"},
+ {"HPHR PA", NULL, "HPHR"},
+ {"HPHL PA", NULL, "CP"},
+ {"HPHL PA", NULL, "RX_BIAS"},
+ {"HPHR PA", NULL, "CP"},
+ {"HPHR PA", NULL, "RX_BIAS"},
+ {"HPHL", "Switch", "HPHL DAC"},
+ {"HPHR", "Switch", "HPHR DAC"},
+
+ {"RX_BIAS", NULL, "DAC_REF"},
+
+ {"SPK_OUT", NULL, "SPK PA"},
+ {"SPK PA", NULL, "RX_BIAS"},
+ {"SPK PA", NULL, "SPKR_CLK"},
+ {"SPK PA", NULL, "SPK DAC"},
+ {"SPK DAC", "Switch", "PDM_RX3"},
+
+ {"MIC BIAS Internal1", NULL, "INT_LDO_H"},
+ {"MIC BIAS Internal2", NULL, "INT_LDO_H"},
+ {"MIC BIAS External1", NULL, "INT_LDO_H"},
+ {"MIC BIAS External2", NULL, "INT_LDO_H"},
+ {"MIC BIAS Internal1", NULL, "vdd-micbias"},
+ {"MIC BIAS Internal2", NULL, "vdd-micbias"},
+ {"MIC BIAS External1", NULL, "vdd-micbias"},
+ {"MIC BIAS External2", NULL, "vdd-micbias"},
+};
+
+static const struct snd_soc_dapm_widget pm8916_wcd_analog_dapm_widgets[] = {
+
+ SND_SOC_DAPM_AIF_IN("PDM_RX1", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PDM_RX2", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PDM_RX3", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PDM_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_INPUT("AMIC1"),
+ SND_SOC_DAPM_INPUT("AMIC3"),
+ SND_SOC_DAPM_INPUT("AMIC2"),
+ SND_SOC_DAPM_OUTPUT("HEADPHONE"),
+
+ /* RX stuff */
+ SND_SOC_DAPM_SUPPLY("INT_LDO_H", SND_SOC_NOPM, 1, 0, NULL, 0),
+
+ SND_SOC_DAPM_PGA("HPHL PA", CDC_A_RX_HPH_CNP_EN, 5, 0, NULL, 0),
+ SND_SOC_DAPM_MUX("HPHL", SND_SOC_NOPM, 0, 0, &hphl_mux),
+ SND_SOC_DAPM_MIXER("HPHL DAC", CDC_A_RX_HPH_L_PA_DAC_CTL, 3, 0, NULL,
+ 0),
+ SND_SOC_DAPM_PGA("HPHR PA", CDC_A_RX_HPH_CNP_EN, 4, 0, NULL, 0),
+ SND_SOC_DAPM_MUX("HPHR", SND_SOC_NOPM, 0, 0, &hphr_mux),
+ SND_SOC_DAPM_MIXER("HPHR DAC", CDC_A_RX_HPH_R_PA_DAC_CTL, 3, 0, NULL,
+ 0),
+ SND_SOC_DAPM_MIXER("SPK DAC", SND_SOC_NOPM, 0, 0,
+ spkr_switch, ARRAY_SIZE(spkr_switch)),
+
+ /* Speaker */
+ SND_SOC_DAPM_OUTPUT("SPK_OUT"),
+ SND_SOC_DAPM_PGA_E("SPK PA", CDC_A_SPKR_DRV_CTL,
+ 6, 0, NULL, 0,
+ pm8916_wcd_analog_enable_spk_pa,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_REGULATOR_SUPPLY("vdd-micbias", 0, 0),
+ SND_SOC_DAPM_SUPPLY("CP", CDC_A_NCP_EN, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("DAC_REF", CDC_A_RX_COM_BIAS_DAC, 0, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("RX_BIAS", CDC_A_RX_COM_BIAS_DAC, 7, 0, NULL, 0),
+
+ /* TX */
+ SND_SOC_DAPM_SUPPLY("MIC BIAS Internal1", CDC_A_MICB_1_EN, 7, 0,
+ pm8916_wcd_analog_enable_micbias_int1,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("MIC BIAS Internal2", CDC_A_MICB_2_EN, 7, 0,
+ pm8916_wcd_analog_enable_micbias_int2,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY("MIC BIAS External1", CDC_A_MICB_1_EN, 7, 0,
+ pm8916_wcd_analog_enable_micbias_ext1,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("MIC BIAS External2", CDC_A_MICB_2_EN, 7, 0,
+ pm8916_wcd_analog_enable_micbias_ext2,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_ADC_E("ADC1", NULL, CDC_A_TX_1_EN, 7, 0,
+ pm8916_wcd_analog_enable_adc,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("ADC2_INP2", NULL, CDC_A_TX_2_EN, 7, 0,
+ pm8916_wcd_analog_enable_adc,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("ADC2_INP3", NULL, CDC_A_TX_3_EN, 7, 0,
+ pm8916_wcd_analog_enable_adc,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MIXER("ADC2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("ADC3", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MUX("ADC2 MUX", SND_SOC_NOPM, 0, 0, &tx_adc2_mux),
+ SND_SOC_DAPM_MUX("RDAC2 MUX", SND_SOC_NOPM, 0, 0, &rdac2_mux),
+
+ /* Analog path clocks */
+ SND_SOC_DAPM_SUPPLY("EAR_HPHR_CLK", CDC_D_CDC_ANA_CLK_CTL, 0, 0, NULL,
+ 0),
+ SND_SOC_DAPM_SUPPLY("EAR_HPHL_CLK", CDC_D_CDC_ANA_CLK_CTL, 1, 0, NULL,
+ 0),
+ SND_SOC_DAPM_SUPPLY("SPKR_CLK", CDC_D_CDC_ANA_CLK_CTL, 4, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("TXA_CLK25", CDC_D_CDC_ANA_CLK_CTL, 5, 0, NULL, 0),
+
+ /* Digital path clocks */
+
+ SND_SOC_DAPM_SUPPLY("RXD1_CLK", CDC_D_CDC_DIG_CLK_CTL, 0, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("RXD2_CLK", CDC_D_CDC_DIG_CLK_CTL, 1, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("RXD3_CLK", CDC_D_CDC_DIG_CLK_CTL, 2, 0, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("TXD_CLK", CDC_D_CDC_DIG_CLK_CTL, 4, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("NCP_CLK", CDC_D_CDC_DIG_CLK_CTL, 6, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("RXD_PDM_CLK", CDC_D_CDC_DIG_CLK_CTL, 7, 0, NULL,
+ 0),
+
+ /* System Clock source */
+ SND_SOC_DAPM_SUPPLY("A_MCLK", CDC_D_CDC_TOP_CLK_CTL, 2, 0, NULL, 0),
+ /* TX ADC and RX DAC Clock source. */
+ SND_SOC_DAPM_SUPPLY("A_MCLK2", CDC_D_CDC_TOP_CLK_CTL, 3, 0, NULL, 0),
+};
+
+static struct regmap *pm8916_get_regmap(struct device *dev)
+{
+ return dev_get_regmap(dev->parent, NULL);
+}
+
+static int pm8916_wcd_analog_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ snd_soc_update_bits(dai->codec, CDC_D_CDC_RST_CTL,
+ RST_CTL_DIG_SW_RST_N_MASK,
+ RST_CTL_DIG_SW_RST_N_REMOVE_RESET);
+
+ return 0;
+}
+
+static void pm8916_wcd_analog_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ snd_soc_update_bits(dai->codec, CDC_D_CDC_RST_CTL,
+ RST_CTL_DIG_SW_RST_N_MASK, 0);
+}
+
+static struct snd_soc_dai_ops pm8916_wcd_analog_dai_ops = {
+ .startup = pm8916_wcd_analog_startup,
+ .shutdown = pm8916_wcd_analog_shutdown,
+};
+
+static struct snd_soc_dai_driver pm8916_wcd_analog_dai[] = {
+ [0] = {
+ .name = "pm8916_wcd_analog_pdm_rx",
+ .id = 0,
+ .playback = {
+ .stream_name = "PDM Playback",
+ .rates = MSM8916_WCD_ANALOG_RATES,
+ .formats = MSM8916_WCD_ANALOG_FORMATS,
+ .channels_min = 1,
+ .channels_max = 3,
+ },
+ .ops = &pm8916_wcd_analog_dai_ops,
+ },
+ [1] = {
+ .name = "pm8916_wcd_analog_pdm_tx",
+ .id = 1,
+ .capture = {
+ .stream_name = "PDM Capture",
+ .rates = MSM8916_WCD_ANALOG_RATES,
+ .formats = MSM8916_WCD_ANALOG_FORMATS,
+ .channels_min = 1,
+ .channels_max = 4,
+ },
+ .ops = &pm8916_wcd_analog_dai_ops,
+ },
+};
+
+static struct snd_soc_codec_driver pm8916_wcd_analog = {
+ .probe = pm8916_wcd_analog_probe,
+ .remove = pm8916_wcd_analog_remove,
+ .get_regmap = pm8916_get_regmap,
+ .component_driver = {
+ .controls = pm8916_wcd_analog_snd_controls,
+ .num_controls = ARRAY_SIZE(pm8916_wcd_analog_snd_controls),
+ .dapm_widgets = pm8916_wcd_analog_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(pm8916_wcd_analog_dapm_widgets),
+ .dapm_routes = pm8916_wcd_analog_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(pm8916_wcd_analog_audio_map),
+ },
+};
+
+static int pm8916_wcd_analog_parse_dt(struct device *dev,
+ struct pm8916_wcd_analog_priv *priv)
+{
+
+ if (of_property_read_bool(dev->of_node, "qcom,micbias1-ext-cap"))
+ priv->micbias1_cap_mode = MICB_1_EN_EXT_BYP_CAP;
+ else
+ priv->micbias1_cap_mode = MICB_1_EN_NO_EXT_BYP_CAP;
+
+ if (of_property_read_bool(dev->of_node, "qcom,micbias2-ext-cap"))
+ priv->micbias2_cap_mode = MICB_1_EN_EXT_BYP_CAP;
+ else
+ priv->micbias2_cap_mode = MICB_1_EN_NO_EXT_BYP_CAP;
+
+ return 0;
+}
+
+static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
+{
+ struct pm8916_wcd_analog_priv *priv;
+ struct device *dev = &pdev->dev;
+ int ret, i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = pm8916_wcd_analog_parse_dt(dev, priv);
+ if (ret < 0)
+ return ret;
+
+ priv->mclk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(priv->mclk)) {
+ dev_err(dev, "failed to get mclk\n");
+ return PTR_ERR(priv->mclk);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(supply_names); i++)
+ priv->supplies[i].supply = supply_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret) {
+ dev_err(dev, "Failed to get regulator supplies %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->mclk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable mclk %d\n", ret);
+ return ret;
+ }
+
+ dev_set_drvdata(dev, priv);
+
+ return snd_soc_register_codec(dev, &pm8916_wcd_analog,
+ pm8916_wcd_analog_dai,
+ ARRAY_SIZE(pm8916_wcd_analog_dai));
+}
+
+static int pm8916_wcd_analog_spmi_remove(struct platform_device *pdev)
+{
+ struct pm8916_wcd_analog_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ snd_soc_unregister_codec(&pdev->dev);
+ clk_disable_unprepare(priv->mclk);
+
+ return 0;
+}
+
+static const struct of_device_id pm8916_wcd_analog_spmi_match_table[] = {
+ { .compatible = "qcom,pm8916-wcd-analog-codec", },
+ { }
+};
+
+static struct platform_driver pm8916_wcd_analog_spmi_driver = {
+ .driver = {
+ .name = "qcom,pm8916-wcd-spmi-codec",
+ .of_match_table = pm8916_wcd_analog_spmi_match_table,
+ },
+ .probe = pm8916_wcd_analog_spmi_probe,
+ .remove = pm8916_wcd_analog_spmi_remove,
+};
+
+module_platform_driver(pm8916_wcd_analog_spmi_driver);
+
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>");
+MODULE_DESCRIPTION("PMIC PM8916 WCD Analog Codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c
new file mode 100644
index 000000000000..f690442af8c9
--- /dev/null
+++ b/sound/soc/codecs/msm8916-wcd-digital.c
@@ -0,0 +1,923 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <sound/soc.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/tlv.h>
+
+#define LPASS_CDC_CLK_RX_RESET_CTL (0x000)
+#define LPASS_CDC_CLK_TX_RESET_B1_CTL (0x004)
+#define CLK_RX_RESET_B1_CTL_TX1_RESET_MASK BIT(0)
+#define CLK_RX_RESET_B1_CTL_TX2_RESET_MASK BIT(1)
+#define LPASS_CDC_CLK_DMIC_B1_CTL (0x008)
+#define DMIC_B1_CTL_DMIC0_CLK_SEL_MASK GENMASK(3, 1)
+#define DMIC_B1_CTL_DMIC0_CLK_SEL_DIV2 (0x0 << 1)
+#define DMIC_B1_CTL_DMIC0_CLK_SEL_DIV3 (0x1 << 1)
+#define DMIC_B1_CTL_DMIC0_CLK_SEL_DIV4 (0x2 << 1)
+#define DMIC_B1_CTL_DMIC0_CLK_SEL_DIV6 (0x3 << 1)
+#define DMIC_B1_CTL_DMIC0_CLK_SEL_DIV16 (0x4 << 1)
+#define DMIC_B1_CTL_DMIC0_CLK_EN_MASK BIT(0)
+#define DMIC_B1_CTL_DMIC0_CLK_EN_ENABLE BIT(0)
+
+#define LPASS_CDC_CLK_RX_I2S_CTL (0x00C)
+#define RX_I2S_CTL_RX_I2S_MODE_MASK BIT(5)
+#define RX_I2S_CTL_RX_I2S_MODE_16 BIT(5)
+#define RX_I2S_CTL_RX_I2S_MODE_32 0
+#define RX_I2S_CTL_RX_I2S_FS_RATE_MASK GENMASK(2, 0)
+#define RX_I2S_CTL_RX_I2S_FS_RATE_F_8_KHZ 0x0
+#define RX_I2S_CTL_RX_I2S_FS_RATE_F_16_KHZ 0x1
+#define RX_I2S_CTL_RX_I2S_FS_RATE_F_32_KHZ 0x2
+#define RX_I2S_CTL_RX_I2S_FS_RATE_F_48_KHZ 0x3
+#define RX_I2S_CTL_RX_I2S_FS_RATE_F_96_KHZ 0x4
+#define RX_I2S_CTL_RX_I2S_FS_RATE_F_192_KHZ 0x5
+#define LPASS_CDC_CLK_TX_I2S_CTL (0x010)
+#define TX_I2S_CTL_TX_I2S_MODE_MASK BIT(5)
+#define TX_I2S_CTL_TX_I2S_MODE_16 BIT(5)
+#define TX_I2S_CTL_TX_I2S_MODE_32 0
+#define TX_I2S_CTL_TX_I2S_FS_RATE_MASK GENMASK(2, 0)
+#define TX_I2S_CTL_TX_I2S_FS_RATE_F_8_KHZ 0x0
+#define TX_I2S_CTL_TX_I2S_FS_RATE_F_16_KHZ 0x1
+#define TX_I2S_CTL_TX_I2S_FS_RATE_F_32_KHZ 0x2
+#define TX_I2S_CTL_TX_I2S_FS_RATE_F_48_KHZ 0x3
+#define TX_I2S_CTL_TX_I2S_FS_RATE_F_96_KHZ 0x4
+#define TX_I2S_CTL_TX_I2S_FS_RATE_F_192_KHZ 0x5
+
+#define LPASS_CDC_CLK_OTHR_RESET_B1_CTL (0x014)
+#define LPASS_CDC_CLK_TX_CLK_EN_B1_CTL (0x018)
+#define LPASS_CDC_CLK_OTHR_CTL (0x01C)
+#define LPASS_CDC_CLK_RX_B1_CTL (0x020)
+#define LPASS_CDC_CLK_MCLK_CTL (0x024)
+#define MCLK_CTL_MCLK_EN_MASK BIT(0)
+#define MCLK_CTL_MCLK_EN_ENABLE BIT(0)
+#define MCLK_CTL_MCLK_EN_DISABLE 0
+#define LPASS_CDC_CLK_PDM_CTL (0x028)
+#define LPASS_CDC_CLK_PDM_CTL_PDM_EN_MASK BIT(0)
+#define LPASS_CDC_CLK_PDM_CTL_PDM_EN BIT(0)
+#define LPASS_CDC_CLK_PDM_CTL_PDM_CLK_SEL_MASK BIT(1)
+#define LPASS_CDC_CLK_PDM_CTL_PDM_CLK_SEL_FB BIT(1)
+#define LPASS_CDC_CLK_PDM_CTL_PDM_CLK_PDM_CLK 0
+
+#define LPASS_CDC_CLK_SD_CTL (0x02C)
+#define LPASS_CDC_RX1_B1_CTL (0x040)
+#define LPASS_CDC_RX2_B1_CTL (0x060)
+#define LPASS_CDC_RX3_B1_CTL (0x080)
+#define LPASS_CDC_RX1_B2_CTL (0x044)
+#define LPASS_CDC_RX2_B2_CTL (0x064)
+#define LPASS_CDC_RX3_B2_CTL (0x084)
+#define LPASS_CDC_RX1_B3_CTL (0x048)
+#define LPASS_CDC_RX2_B3_CTL (0x068)
+#define LPASS_CDC_RX3_B3_CTL (0x088)
+#define LPASS_CDC_RX1_B4_CTL (0x04C)
+#define LPASS_CDC_RX2_B4_CTL (0x06C)
+#define LPASS_CDC_RX3_B4_CTL (0x08C)
+#define LPASS_CDC_RX1_B5_CTL (0x050)
+#define LPASS_CDC_RX2_B5_CTL (0x070)
+#define LPASS_CDC_RX3_B5_CTL (0x090)
+#define LPASS_CDC_RX1_B6_CTL (0x054)
+#define RXn_B6_CTL_MUTE_MASK BIT(0)
+#define RXn_B6_CTL_MUTE_ENABLE BIT(0)
+#define RXn_B6_CTL_MUTE_DISABLE 0
+#define LPASS_CDC_RX2_B6_CTL (0x074)
+#define LPASS_CDC_RX3_B6_CTL (0x094)
+#define LPASS_CDC_RX1_VOL_CTL_B1_CTL (0x058)
+#define LPASS_CDC_RX2_VOL_CTL_B1_CTL (0x078)
+#define LPASS_CDC_RX3_VOL_CTL_B1_CTL (0x098)
+#define LPASS_CDC_RX1_VOL_CTL_B2_CTL (0x05C)
+#define LPASS_CDC_RX2_VOL_CTL_B2_CTL (0x07C)
+#define LPASS_CDC_RX3_VOL_CTL_B2_CTL (0x09C)
+#define LPASS_CDC_TOP_GAIN_UPDATE (0x0A0)
+#define LPASS_CDC_TOP_CTL (0x0A4)
+#define TOP_CTL_DIG_MCLK_FREQ_MASK BIT(0)
+#define TOP_CTL_DIG_MCLK_FREQ_F_12_288MHZ 0
+#define TOP_CTL_DIG_MCLK_FREQ_F_9_6MHZ BIT(0)
+
+#define LPASS_CDC_DEBUG_DESER1_CTL (0x0E0)
+#define LPASS_CDC_DEBUG_DESER2_CTL (0x0E4)
+#define LPASS_CDC_DEBUG_B1_CTL_CFG (0x0E8)
+#define LPASS_CDC_DEBUG_B2_CTL_CFG (0x0EC)
+#define LPASS_CDC_DEBUG_B3_CTL_CFG (0x0F0)
+#define LPASS_CDC_IIR1_GAIN_B1_CTL (0x100)
+#define LPASS_CDC_IIR2_GAIN_B1_CTL (0x140)
+#define LPASS_CDC_IIR1_GAIN_B2_CTL (0x104)
+#define LPASS_CDC_IIR2_GAIN_B2_CTL (0x144)
+#define LPASS_CDC_IIR1_GAIN_B3_CTL (0x108)
+#define LPASS_CDC_IIR2_GAIN_B3_CTL (0x148)
+#define LPASS_CDC_IIR1_GAIN_B4_CTL (0x10C)
+#define LPASS_CDC_IIR2_GAIN_B4_CTL (0x14C)
+#define LPASS_CDC_IIR1_GAIN_B5_CTL (0x110)
+#define LPASS_CDC_IIR2_GAIN_B5_CTL (0x150)
+#define LPASS_CDC_IIR1_GAIN_B6_CTL (0x114)
+#define LPASS_CDC_IIR2_GAIN_B6_CTL (0x154)
+#define LPASS_CDC_IIR1_GAIN_B7_CTL (0x118)
+#define LPASS_CDC_IIR2_GAIN_B7_CTL (0x158)
+#define LPASS_CDC_IIR1_GAIN_B8_CTL (0x11C)
+#define LPASS_CDC_IIR2_GAIN_B8_CTL (0x15C)
+#define LPASS_CDC_IIR1_CTL (0x120)
+#define LPASS_CDC_IIR2_CTL (0x160)
+#define LPASS_CDC_IIR1_GAIN_TIMER_CTL (0x124)
+#define LPASS_CDC_IIR2_GAIN_TIMER_CTL (0x164)
+#define LPASS_CDC_IIR1_COEF_B1_CTL (0x128)
+#define LPASS_CDC_IIR2_COEF_B1_CTL (0x168)
+#define LPASS_CDC_IIR1_COEF_B2_CTL (0x12C)
+#define LPASS_CDC_IIR2_COEF_B2_CTL (0x16C)
+#define LPASS_CDC_CONN_RX1_B1_CTL (0x180)
+#define LPASS_CDC_CONN_RX1_B2_CTL (0x184)
+#define LPASS_CDC_CONN_RX1_B3_CTL (0x188)
+#define LPASS_CDC_CONN_RX2_B1_CTL (0x18C)
+#define LPASS_CDC_CONN_RX2_B2_CTL (0x190)
+#define LPASS_CDC_CONN_RX2_B3_CTL (0x194)
+#define LPASS_CDC_CONN_RX3_B1_CTL (0x198)
+#define LPASS_CDC_CONN_RX3_B2_CTL (0x19C)
+#define LPASS_CDC_CONN_TX_B1_CTL (0x1A0)
+#define LPASS_CDC_CONN_EQ1_B1_CTL (0x1A8)
+#define LPASS_CDC_CONN_EQ1_B2_CTL (0x1AC)
+#define LPASS_CDC_CONN_EQ1_B3_CTL (0x1B0)
+#define LPASS_CDC_CONN_EQ1_B4_CTL (0x1B4)
+#define LPASS_CDC_CONN_EQ2_B1_CTL (0x1B8)
+#define LPASS_CDC_CONN_EQ2_B2_CTL (0x1BC)
+#define LPASS_CDC_CONN_EQ2_B3_CTL (0x1C0)
+#define LPASS_CDC_CONN_EQ2_B4_CTL (0x1C4)
+#define LPASS_CDC_CONN_TX_I2S_SD1_CTL (0x1C8)
+#define LPASS_CDC_TX1_VOL_CTL_TIMER (0x280)
+#define LPASS_CDC_TX2_VOL_CTL_TIMER (0x2A0)
+#define LPASS_CDC_TX1_VOL_CTL_GAIN (0x284)
+#define LPASS_CDC_TX2_VOL_CTL_GAIN (0x2A4)
+#define LPASS_CDC_TX1_VOL_CTL_CFG (0x288)
+#define TX_VOL_CTL_CFG_MUTE_EN_MASK BIT(0)
+#define TX_VOL_CTL_CFG_MUTE_EN_ENABLE BIT(0)
+
+#define LPASS_CDC_TX2_VOL_CTL_CFG (0x2A8)
+#define LPASS_CDC_TX1_MUX_CTL (0x28C)
+#define TX_MUX_CTL_CUT_OFF_FREQ_MASK GENMASK(5, 4)
+#define TX_MUX_CTL_CUT_OFF_FREQ_SHIFT 4
+#define TX_MUX_CTL_CF_NEG_3DB_4HZ (0x0 << 4)
+#define TX_MUX_CTL_CF_NEG_3DB_75HZ (0x1 << 4)
+#define TX_MUX_CTL_CF_NEG_3DB_150HZ (0x2 << 4)
+#define TX_MUX_CTL_HPF_BP_SEL_MASK BIT(3)
+#define TX_MUX_CTL_HPF_BP_SEL_BYPASS BIT(3)
+#define TX_MUX_CTL_HPF_BP_SEL_NO_BYPASS 0
+
+#define LPASS_CDC_TX2_MUX_CTL (0x2AC)
+#define LPASS_CDC_TX1_CLK_FS_CTL (0x290)
+#define LPASS_CDC_TX2_CLK_FS_CTL (0x2B0)
+#define LPASS_CDC_TX1_DMIC_CTL (0x294)
+#define LPASS_CDC_TX2_DMIC_CTL (0x2B4)
+#define TXN_DMIC_CTL_CLK_SEL_MASK GENMASK(2, 0)
+#define TXN_DMIC_CTL_CLK_SEL_DIV2 0x0
+#define TXN_DMIC_CTL_CLK_SEL_DIV3 0x1
+#define TXN_DMIC_CTL_CLK_SEL_DIV4 0x2
+#define TXN_DMIC_CTL_CLK_SEL_DIV6 0x3
+#define TXN_DMIC_CTL_CLK_SEL_DIV16 0x4
+
+#define MSM8916_WCD_DIGITAL_RATES (SNDRV_PCM_RATE_8000 | \
+ SNDRV_PCM_RATE_16000 | \
+ SNDRV_PCM_RATE_32000 | \
+ SNDRV_PCM_RATE_48000)
+#define MSM8916_WCD_DIGITAL_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+struct msm8916_wcd_digital_priv {
+ struct clk *ahbclk, *mclk;
+};
+
+static const unsigned long rx_gain_reg[] = {
+ LPASS_CDC_RX1_VOL_CTL_B2_CTL,
+ LPASS_CDC_RX2_VOL_CTL_B2_CTL,
+ LPASS_CDC_RX3_VOL_CTL_B2_CTL,
+};
+
+static const unsigned long tx_gain_reg[] = {
+ LPASS_CDC_TX1_VOL_CTL_GAIN,
+ LPASS_CDC_TX2_VOL_CTL_GAIN,
+};
+
+static const char *const rx_mix1_text[] = {
+ "ZERO", "IIR1", "IIR2", "RX1", "RX2", "RX3"
+};
+
+static const char *const dec_mux_text[] = {
+ "ZERO", "ADC1", "ADC2", "ADC3", "DMIC1", "DMIC2"
+};
+static const char *const rx_mix2_text[] = { "ZERO", "IIR1", "IIR2" };
+static const char *const adc2_mux_text[] = { "ZERO", "INP2", "INP3" };
+
+/* RX1 MIX1 */
+static const struct soc_enum rx_mix1_inp_enum[] = {
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX1_B1_CTL, 0, 6, rx_mix1_text),
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX1_B1_CTL, 3, 6, rx_mix1_text),
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX1_B2_CTL, 0, 6, rx_mix1_text),
+};
+
+/* RX1 MIX2 */
+static const struct soc_enum rx_mix2_inp1_chain_enum = SOC_ENUM_SINGLE(
+ LPASS_CDC_CONN_RX1_B3_CTL, 0, 3, rx_mix2_text);
+
+/* RX2 MIX1 */
+static const struct soc_enum rx2_mix1_inp_enum[] = {
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 0, 6, rx_mix1_text),
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 3, 6, rx_mix1_text),
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 0, 6, rx_mix1_text),
+};
+
+/* RX2 MIX2 */
+static const struct soc_enum rx2_mix2_inp1_chain_enum = SOC_ENUM_SINGLE(
+ LPASS_CDC_CONN_RX2_B3_CTL, 0, 3, rx_mix2_text);
+
+/* RX3 MIX1 */
+static const struct soc_enum rx3_mix1_inp_enum[] = {
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 0, 6, rx_mix1_text),
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 3, 6, rx_mix1_text),
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 0, 6, rx_mix1_text),
+};
+
+/* DEC */
+static const struct soc_enum dec1_mux_enum = SOC_ENUM_SINGLE(
+ LPASS_CDC_CONN_TX_B1_CTL, 0, 6, dec_mux_text);
+static const struct soc_enum dec2_mux_enum = SOC_ENUM_SINGLE(
+ LPASS_CDC_CONN_TX_B1_CTL, 3, 6, dec_mux_text);
+
+/* RDAC2 MUX */
+static const struct snd_kcontrol_new dec1_mux = SOC_DAPM_ENUM(
+ "DEC1 MUX Mux", dec1_mux_enum);
+static const struct snd_kcontrol_new dec2_mux = SOC_DAPM_ENUM(
+ "DEC2 MUX Mux", dec2_mux_enum);
+static const struct snd_kcontrol_new rx_mix1_inp1_mux = SOC_DAPM_ENUM(
+ "RX1 MIX1 INP1 Mux", rx_mix1_inp_enum[0]);
+static const struct snd_kcontrol_new rx_mix1_inp2_mux = SOC_DAPM_ENUM(
+ "RX1 MIX1 INP2 Mux", rx_mix1_inp_enum[1]);
+static const struct snd_kcontrol_new rx_mix1_inp3_mux = SOC_DAPM_ENUM(
+ "RX1 MIX1 INP3 Mux", rx_mix1_inp_enum[2]);
+static const struct snd_kcontrol_new rx2_mix1_inp1_mux = SOC_DAPM_ENUM(
+ "RX2 MIX1 INP1 Mux", rx2_mix1_inp_enum[0]);
+static const struct snd_kcontrol_new rx2_mix1_inp2_mux = SOC_DAPM_ENUM(
+ "RX2 MIX1 INP2 Mux", rx2_mix1_inp_enum[1]);
+static const struct snd_kcontrol_new rx2_mix1_inp3_mux = SOC_DAPM_ENUM(
+ "RX2 MIX1 INP3 Mux", rx2_mix1_inp_enum[2]);
+static const struct snd_kcontrol_new rx3_mix1_inp1_mux = SOC_DAPM_ENUM(
+ "RX3 MIX1 INP1 Mux", rx3_mix1_inp_enum[0]);
+static const struct snd_kcontrol_new rx3_mix1_inp2_mux = SOC_DAPM_ENUM(
+ "RX3 MIX1 INP2 Mux", rx3_mix1_inp_enum[1]);
+static const struct snd_kcontrol_new rx3_mix1_inp3_mux = SOC_DAPM_ENUM(
+ "RX3 MIX1 INP3 Mux", rx3_mix1_inp_enum[2]);
+
+/* Digital Gain control -38.4 dB to +38.4 dB in 0.3 dB steps */
+static const DECLARE_TLV_DB_SCALE(digital_gain, -3840, 30, 0);
+
+/* Cutoff Freq for High Pass Filter at -3dB */
+static const char * const hpf_cutoff_text[] = {
+ "4Hz", "75Hz", "150Hz",
+};
+
+static SOC_ENUM_SINGLE_DECL(tx1_hpf_cutoff_enum, LPASS_CDC_TX1_MUX_CTL, 4,
+ hpf_cutoff_text);
+static SOC_ENUM_SINGLE_DECL(tx2_hpf_cutoff_enum, LPASS_CDC_TX2_MUX_CTL, 4,
+ hpf_cutoff_text);
+
+/* cut off for dc blocker inside rx chain */
+static const char * const dc_blocker_cutoff_text[] = {
+ "4Hz", "75Hz", "150Hz",
+};
+
+static SOC_ENUM_SINGLE_DECL(rx1_dcb_cutoff_enum, LPASS_CDC_RX1_B4_CTL, 0,
+ dc_blocker_cutoff_text);
+static SOC_ENUM_SINGLE_DECL(rx2_dcb_cutoff_enum, LPASS_CDC_RX2_B4_CTL, 0,
+ dc_blocker_cutoff_text);
+static SOC_ENUM_SINGLE_DECL(rx3_dcb_cutoff_enum, LPASS_CDC_RX3_B4_CTL, 0,
+ dc_blocker_cutoff_text);
+
+static const struct snd_kcontrol_new msm8916_wcd_digital_snd_controls[] = {
+ SOC_SINGLE_S8_TLV("RX1 Digital Volume", LPASS_CDC_RX1_VOL_CTL_B2_CTL,
+ -128, 127, digital_gain),
+ SOC_SINGLE_S8_TLV("RX2 Digital Volume", LPASS_CDC_RX2_VOL_CTL_B2_CTL,
+ -128, 127, digital_gain),
+ SOC_SINGLE_S8_TLV("RX3 Digital Volume", LPASS_CDC_RX3_VOL_CTL_B2_CTL,
+ -128, 127, digital_gain),
+ SOC_SINGLE_S8_TLV("TX1 Digital Volume", LPASS_CDC_TX1_VOL_CTL_GAIN,
+ -128, 127, digital_gain),
+ SOC_SINGLE_S8_TLV("TX2 Digital Volume", LPASS_CDC_TX2_VOL_CTL_GAIN,
+ -128, 127, digital_gain),
+ SOC_ENUM("TX1 HPF Cutoff", tx1_hpf_cutoff_enum),
+ SOC_ENUM("TX2 HPF Cutoff", tx2_hpf_cutoff_enum),
+ SOC_SINGLE("TX1 HPF Switch", LPASS_CDC_TX1_MUX_CTL, 3, 1, 0),
+ SOC_SINGLE("TX2 HPF Switch", LPASS_CDC_TX2_MUX_CTL, 3, 1, 0),
+ SOC_ENUM("RX1 DCB Cutoff", rx1_dcb_cutoff_enum),
+ SOC_ENUM("RX2 DCB Cutoff", rx2_dcb_cutoff_enum),
+ SOC_ENUM("RX3 DCB Cutoff", rx3_dcb_cutoff_enum),
+ SOC_SINGLE("RX1 DCB Switch", LPASS_CDC_RX1_B5_CTL, 2, 1, 0),
+ SOC_SINGLE("RX2 DCB Switch", LPASS_CDC_RX2_B5_CTL, 2, 1, 0),
+ SOC_SINGLE("RX3 DCB Switch", LPASS_CDC_RX3_B5_CTL, 2, 1, 0),
+ SOC_SINGLE("RX1 Mute Switch", LPASS_CDC_RX1_B6_CTL, 0, 1, 0),
+ SOC_SINGLE("RX2 Mute Switch", LPASS_CDC_RX2_B6_CTL, 0, 1, 0),
+ SOC_SINGLE("RX3 Mute Switch", LPASS_CDC_RX3_B6_CTL, 0, 1, 0),
+};
+
+static int msm8916_wcd_digital_enable_interpolator(
+ struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ /* apply the digital gain after the interpolator is enabled */
+ usleep_range(10000, 10100);
+ snd_soc_write(codec, rx_gain_reg[w->shift],
+ snd_soc_read(codec, rx_gain_reg[w->shift]));
+ break;
+ }
+ return 0;
+}
+
+static int msm8916_wcd_digital_enable_dec(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ unsigned int decimator = w->shift + 1;
+ u16 dec_reset_reg, tx_vol_ctl_reg, tx_mux_ctl_reg;
+ u8 dec_hpf_cut_of_freq;
+
+ dec_reset_reg = LPASS_CDC_CLK_TX_RESET_B1_CTL;
+ tx_vol_ctl_reg = LPASS_CDC_TX1_VOL_CTL_CFG + 32 * (decimator - 1);
+ tx_mux_ctl_reg = LPASS_CDC_TX1_MUX_CTL + 32 * (decimator - 1);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* Enable TX digital mute */
+ snd_soc_update_bits(codec, tx_vol_ctl_reg,
+ TX_VOL_CTL_CFG_MUTE_EN_MASK,
+ TX_VOL_CTL_CFG_MUTE_EN_ENABLE);
+ dec_hpf_cut_of_freq = snd_soc_read(codec, tx_mux_ctl_reg) &
+ TX_MUX_CTL_CUT_OFF_FREQ_MASK;
+ dec_hpf_cut_of_freq >>= TX_MUX_CTL_CUT_OFF_FREQ_SHIFT;
+ if (dec_hpf_cut_of_freq != TX_MUX_CTL_CF_NEG_3DB_150HZ) {
+ /* set cut of freq to CF_MIN_3DB_150HZ (0x1) */
+ snd_soc_update_bits(codec, tx_mux_ctl_reg,
+ TX_MUX_CTL_CUT_OFF_FREQ_MASK,
+ TX_MUX_CTL_CF_NEG_3DB_150HZ);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /* enable HPF */
+ snd_soc_update_bits(codec, tx_mux_ctl_reg,
+ TX_MUX_CTL_HPF_BP_SEL_MASK,
+ TX_MUX_CTL_HPF_BP_SEL_NO_BYPASS);
+ /* apply the digital gain after the decimator is enabled */
+ snd_soc_write(codec, tx_gain_reg[w->shift],
+ snd_soc_read(codec, tx_gain_reg[w->shift]));
+ snd_soc_update_bits(codec, tx_vol_ctl_reg,
+ TX_VOL_CTL_CFG_MUTE_EN_MASK, 0);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec, tx_vol_ctl_reg,
+ TX_VOL_CTL_CFG_MUTE_EN_MASK,
+ TX_VOL_CTL_CFG_MUTE_EN_ENABLE);
+ snd_soc_update_bits(codec, tx_mux_ctl_reg,
+ TX_MUX_CTL_HPF_BP_SEL_MASK,
+ TX_MUX_CTL_HPF_BP_SEL_BYPASS);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift,
+ 1 << w->shift);
+ snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift, 0x0);
+ snd_soc_update_bits(codec, tx_mux_ctl_reg,
+ TX_MUX_CTL_HPF_BP_SEL_MASK,
+ TX_MUX_CTL_HPF_BP_SEL_BYPASS);
+ snd_soc_update_bits(codec, tx_vol_ctl_reg,
+ TX_VOL_CTL_CFG_MUTE_EN_MASK, 0);
+ break;
+ }
+
+ return 0;
+}
+
+static int msm8916_wcd_digital_enable_dmic(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ unsigned int dmic;
+ int ret;
+ /* get dmic number out of widget name */
+ char *dmic_num = strpbrk(w->name, "12");
+
+ if (dmic_num == NULL) {
+ dev_err(codec->dev, "Invalid DMIC\n");
+ return -EINVAL;
+ }
+ ret = kstrtouint(dmic_num, 10, &dmic);
+ if (ret < 0 || dmic > 2) {
+ dev_err(codec->dev, "Invalid DMIC line on the codec\n");
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec, LPASS_CDC_CLK_DMIC_B1_CTL,
+ DMIC_B1_CTL_DMIC0_CLK_SEL_MASK,
+ DMIC_B1_CTL_DMIC0_CLK_SEL_DIV3);
+ switch (dmic) {
+ case 1:
+ snd_soc_update_bits(codec, LPASS_CDC_TX1_DMIC_CTL,
+ TXN_DMIC_CTL_CLK_SEL_MASK,
+ TXN_DMIC_CTL_CLK_SEL_DIV3);
+ break;
+ case 2:
+ snd_soc_update_bits(codec, LPASS_CDC_TX2_DMIC_CTL,
+ TXN_DMIC_CTL_CLK_SEL_MASK,
+ TXN_DMIC_CTL_CLK_SEL_DIV3);
+ break;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget msm8916_wcd_digital_dapm_widgets[] = {
+ /*RX stuff */
+ SND_SOC_DAPM_AIF_IN("I2S RX1", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("I2S RX2", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("I2S RX3", NULL, 0, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_OUTPUT("PDM_RX1"),
+ SND_SOC_DAPM_OUTPUT("PDM_RX2"),
+ SND_SOC_DAPM_OUTPUT("PDM_RX3"),
+
+ SND_SOC_DAPM_INPUT("LPASS_PDM_TX"),
+
+ SND_SOC_DAPM_MIXER("RX1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX2 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX3 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* Interpolator */
+ SND_SOC_DAPM_MIXER_E("RX1 INT", LPASS_CDC_CLK_RX_B1_CTL, 0, 0, NULL,
+ 0, msm8916_wcd_digital_enable_interpolator,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX2 INT", LPASS_CDC_CLK_RX_B1_CTL, 1, 0, NULL,
+ 0, msm8916_wcd_digital_enable_interpolator,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX3 INT", LPASS_CDC_CLK_RX_B1_CTL, 2, 0, NULL,
+ 0, msm8916_wcd_digital_enable_interpolator,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX("RX1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx_mix1_inp1_mux),
+ SND_SOC_DAPM_MUX("RX1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx_mix1_inp2_mux),
+ SND_SOC_DAPM_MUX("RX1 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+ &rx_mix1_inp3_mux),
+ SND_SOC_DAPM_MUX("RX2 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx2_mix1_inp1_mux),
+ SND_SOC_DAPM_MUX("RX2 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx2_mix1_inp2_mux),
+ SND_SOC_DAPM_MUX("RX2 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+ &rx2_mix1_inp3_mux),
+ SND_SOC_DAPM_MUX("RX3 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx3_mix1_inp1_mux),
+ SND_SOC_DAPM_MUX("RX3 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx3_mix1_inp2_mux),
+ SND_SOC_DAPM_MUX("RX3 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+ &rx3_mix1_inp3_mux),
+
+ /* TX */
+ SND_SOC_DAPM_MIXER("ADC1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("ADC2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("ADC3", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MUX_E("DEC1 MUX", LPASS_CDC_CLK_TX_CLK_EN_B1_CTL, 0, 0,
+ &dec1_mux, msm8916_wcd_digital_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX_E("DEC2 MUX", LPASS_CDC_CLK_TX_CLK_EN_B1_CTL, 1, 0,
+ &dec2_mux, msm8916_wcd_digital_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_AIF_OUT("I2S TX1", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("I2S TX2", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("I2S TX3", NULL, 0, SND_SOC_NOPM, 0, 0),
+
+ /* Digital Mic Inputs */
+ SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0,
+ msm8916_wcd_digital_enable_dmic,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("DMIC2", NULL, SND_SOC_NOPM, 0, 0,
+ msm8916_wcd_digital_enable_dmic,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("DMIC_CLK", LPASS_CDC_CLK_DMIC_B1_CTL, 0, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY("RX_I2S_CLK", LPASS_CDC_CLK_RX_I2S_CTL,
+ 4, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("TX_I2S_CLK", LPASS_CDC_CLK_TX_I2S_CTL, 4, 0,
+ NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("PDM_CLK", LPASS_CDC_CLK_PDM_CTL, 0, 0, NULL, 0),
+ /* Connectivity Clock */
+ SND_SOC_DAPM_SUPPLY_S("CDC_CONN", -2, LPASS_CDC_CLK_OTHR_CTL, 2, 0,
+ NULL, 0),
+
+};
+
+static int msm8916_wcd_digital_get_clks(struct platform_device *pdev,
+ struct msm8916_wcd_digital_priv *priv)
+{
+ struct device *dev = &pdev->dev;
+
+ priv->ahbclk = devm_clk_get(dev, "ahbix-clk");
+ if (IS_ERR(priv->ahbclk)) {
+ dev_err(dev, "failed to get ahbix clk\n");
+ return PTR_ERR(priv->ahbclk);
+ }
+
+ priv->mclk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(priv->mclk)) {
+ dev_err(dev, "failed to get mclk\n");
+ return PTR_ERR(priv->mclk);
+ }
+
+ return 0;
+}
+
+static int msm8916_wcd_digital_codec_probe(struct snd_soc_codec *codec)
+{
+ struct msm8916_wcd_digital_priv *priv = dev_get_drvdata(codec->dev);
+
+ snd_soc_codec_set_drvdata(codec, priv);
+
+ return 0;
+}
+
+static int msm8916_wcd_digital_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ u8 tx_fs_rate;
+ u8 rx_fs_rate;
+
+ switch (params_rate(params)) {
+ case 8000:
+ tx_fs_rate = TX_I2S_CTL_TX_I2S_FS_RATE_F_8_KHZ;
+ rx_fs_rate = RX_I2S_CTL_RX_I2S_FS_RATE_F_8_KHZ;
+ break;
+ case 16000:
+ tx_fs_rate = TX_I2S_CTL_TX_I2S_FS_RATE_F_16_KHZ;
+ rx_fs_rate = RX_I2S_CTL_RX_I2S_FS_RATE_F_16_KHZ;
+ break;
+ case 32000:
+ tx_fs_rate = TX_I2S_CTL_TX_I2S_FS_RATE_F_32_KHZ;
+ rx_fs_rate = RX_I2S_CTL_RX_I2S_FS_RATE_F_32_KHZ;
+ break;
+ case 48000:
+ tx_fs_rate = TX_I2S_CTL_TX_I2S_FS_RATE_F_48_KHZ;
+ rx_fs_rate = RX_I2S_CTL_RX_I2S_FS_RATE_F_48_KHZ;
+ break;
+ default:
+ dev_err(dai->codec->dev, "Invalid sampling rate %d\n",
+ params_rate(params));
+ return -EINVAL;
+ }
+
+ switch (substream->stream) {
+ case SNDRV_PCM_STREAM_CAPTURE:
+ snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_TX_I2S_CTL,
+ TX_I2S_CTL_TX_I2S_FS_RATE_MASK, tx_fs_rate);
+ break;
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_RX_I2S_CTL,
+ RX_I2S_CTL_RX_I2S_FS_RATE_MASK, rx_fs_rate);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_TX_I2S_CTL,
+ TX_I2S_CTL_TX_I2S_MODE_MASK,
+ TX_I2S_CTL_TX_I2S_MODE_16);
+ snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_RX_I2S_CTL,
+ RX_I2S_CTL_RX_I2S_MODE_MASK,
+ RX_I2S_CTL_RX_I2S_MODE_16);
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_TX_I2S_CTL,
+ TX_I2S_CTL_TX_I2S_MODE_MASK,
+ TX_I2S_CTL_TX_I2S_MODE_32);
+ snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_RX_I2S_CTL,
+ RX_I2S_CTL_RX_I2S_MODE_MASK,
+ RX_I2S_CTL_RX_I2S_MODE_32);
+ break;
+ default:
+ dev_err(dai->dev, "%s: wrong format selected\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_route msm8916_wcd_digital_audio_map[] = {
+
+ {"I2S RX1", NULL, "AIF1 Playback"},
+ {"I2S RX2", NULL, "AIF1 Playback"},
+ {"I2S RX3", NULL, "AIF1 Playback"},
+
+ {"AIF1 Capture", NULL, "I2S TX1"},
+ {"AIF1 Capture", NULL, "I2S TX2"},
+ {"AIF1 Capture", NULL, "I2S TX3"},
+
+ /* Decimator Inputs */
+ {"DEC1 MUX", "DMIC1", "DMIC1"},
+ {"DEC1 MUX", "DMIC2", "DMIC2"},
+ {"DEC1 MUX", "ADC1", "ADC1"},
+ {"DEC1 MUX", "ADC2", "ADC2"},
+ {"DEC1 MUX", "ADC3", "ADC3"},
+ {"DEC1 MUX", NULL, "CDC_CONN"},
+
+ {"DEC2 MUX", "DMIC1", "DMIC1"},
+ {"DEC2 MUX", "DMIC2", "DMIC2"},
+ {"DEC2 MUX", "ADC1", "ADC1"},
+ {"DEC2 MUX", "ADC2", "ADC2"},
+ {"DEC2 MUX", "ADC3", "ADC3"},
+ {"DEC2 MUX", NULL, "CDC_CONN"},
+
+ {"DMIC1", NULL, "DMIC_CLK"},
+ {"DMIC2", NULL, "DMIC_CLK"},
+
+ {"I2S TX1", NULL, "DEC1 MUX"},
+ {"I2S TX2", NULL, "DEC2 MUX"},
+
+ {"I2S TX1", NULL, "TX_I2S_CLK"},
+ {"I2S TX2", NULL, "TX_I2S_CLK"},
+
+ {"TX_I2S_CLK", NULL, "MCLK"},
+ {"TX_I2S_CLK", NULL, "PDM_CLK"},
+
+ {"ADC1", NULL, "LPASS_PDM_TX"},
+ {"ADC2", NULL, "LPASS_PDM_TX"},
+ {"ADC3", NULL, "LPASS_PDM_TX"},
+
+ {"I2S RX1", NULL, "RX_I2S_CLK"},
+ {"I2S RX2", NULL, "RX_I2S_CLK"},
+ {"I2S RX3", NULL, "RX_I2S_CLK"},
+
+ {"RX_I2S_CLK", NULL, "PDM_CLK"},
+ {"RX_I2S_CLK", NULL, "MCLK"},
+ {"RX_I2S_CLK", NULL, "CDC_CONN"},
+
+ /* RX1 PATH.. */
+ {"PDM_RX1", NULL, "RX1 INT"},
+ {"RX1 INT", NULL, "RX1 MIX1"},
+
+ {"RX1 MIX1", NULL, "RX1 MIX1 INP1"},
+ {"RX1 MIX1", NULL, "RX1 MIX1 INP2"},
+ {"RX1 MIX1", NULL, "RX1 MIX1 INP3"},
+
+ {"RX1 MIX1 INP1", "RX1", "I2S RX1"},
+ {"RX1 MIX1 INP1", "RX2", "I2S RX2"},
+ {"RX1 MIX1 INP1", "RX3", "I2S RX3"},
+
+ {"RX1 MIX1 INP2", "RX1", "I2S RX1"},
+ {"RX1 MIX1 INP2", "RX2", "I2S RX2"},
+ {"RX1 MIX1 INP2", "RX3", "I2S RX3"},
+
+ {"RX1 MIX1 INP3", "RX1", "I2S RX1"},
+ {"RX1 MIX1 INP3", "RX2", "I2S RX2"},
+ {"RX1 MIX1 INP3", "RX3", "I2S RX3"},
+
+ /* RX2 PATH */
+ {"PDM_RX2", NULL, "RX2 INT"},
+ {"RX2 INT", NULL, "RX2 MIX1"},
+
+ {"RX2 MIX1", NULL, "RX2 MIX1 INP1"},
+ {"RX2 MIX1", NULL, "RX2 MIX1 INP2"},
+ {"RX2 MIX1", NULL, "RX2 MIX1 INP3"},
+
+ {"RX2 MIX1 INP1", "RX1", "I2S RX1"},
+ {"RX2 MIX1 INP1", "RX2", "I2S RX2"},
+ {"RX2 MIX1 INP1", "RX3", "I2S RX3"},
+
+ {"RX2 MIX1 INP2", "RX1", "I2S RX1"},
+ {"RX2 MIX1 INP2", "RX2", "I2S RX2"},
+ {"RX2 MIX1 INP2", "RX3", "I2S RX3"},
+
+ {"RX2 MIX1 INP3", "RX1", "I2S RX1"},
+ {"RX2 MIX1 INP3", "RX2", "I2S RX2"},
+ {"RX2 MIX1 INP3", "RX3", "I2S RX3"},
+
+ /* RX3 PATH */
+ {"PDM_RX3", NULL, "RX3 INT"},
+ {"RX3 INT", NULL, "RX3 MIX1"},
+
+ {"RX3 MIX1", NULL, "RX3 MIX1 INP1"},
+ {"RX3 MIX1", NULL, "RX3 MIX1 INP2"},
+ {"RX3 MIX1", NULL, "RX3 MIX1 INP3"},
+
+ {"RX3 MIX1 INP1", "RX1", "I2S RX1"},
+ {"RX3 MIX1 INP1", "RX2", "I2S RX2"},
+ {"RX3 MIX1 INP1", "RX3", "I2S RX3"},
+
+ {"RX3 MIX1 INP2", "RX1", "I2S RX1"},
+ {"RX3 MIX1 INP2", "RX2", "I2S RX2"},
+ {"RX3 MIX1 INP2", "RX3", "I2S RX3"},
+
+ {"RX3 MIX1 INP3", "RX1", "I2S RX1"},
+ {"RX3 MIX1 INP3", "RX2", "I2S RX2"},
+ {"RX3 MIX1 INP3", "RX3", "I2S RX3"},
+
+};
+
+static int msm8916_wcd_digital_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct msm8916_wcd_digital_priv *msm8916_wcd;
+ unsigned long mclk_rate;
+
+ msm8916_wcd = snd_soc_codec_get_drvdata(codec);
+ snd_soc_update_bits(codec, LPASS_CDC_CLK_MCLK_CTL,
+ MCLK_CTL_MCLK_EN_MASK,
+ MCLK_CTL_MCLK_EN_ENABLE);
+ snd_soc_update_bits(codec, LPASS_CDC_CLK_PDM_CTL,
+ LPASS_CDC_CLK_PDM_CTL_PDM_CLK_SEL_MASK,
+ LPASS_CDC_CLK_PDM_CTL_PDM_CLK_SEL_FB);
+
+ mclk_rate = clk_get_rate(msm8916_wcd->mclk);
+ switch (mclk_rate) {
+ case 12288000:
+ snd_soc_update_bits(codec, LPASS_CDC_TOP_CTL,
+ TOP_CTL_DIG_MCLK_FREQ_MASK,
+ TOP_CTL_DIG_MCLK_FREQ_F_12_288MHZ);
+ break;
+ case 9600000:
+ snd_soc_update_bits(codec, LPASS_CDC_TOP_CTL,
+ TOP_CTL_DIG_MCLK_FREQ_MASK,
+ TOP_CTL_DIG_MCLK_FREQ_F_9_6MHZ);
+ break;
+ default:
+ dev_err(codec->dev, "Invalid mclk rate %ld\n", mclk_rate);
+ break;
+ }
+ return 0;
+}
+
+static void msm8916_wcd_digital_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_PDM_CTL,
+ LPASS_CDC_CLK_PDM_CTL_PDM_CLK_SEL_MASK, 0);
+}
+
+static struct snd_soc_dai_ops msm8916_wcd_digital_dai_ops = {
+ .startup = msm8916_wcd_digital_startup,
+ .shutdown = msm8916_wcd_digital_shutdown,
+ .hw_params = msm8916_wcd_digital_hw_params,
+};
+
+static struct snd_soc_dai_driver msm8916_wcd_digital_dai[] = {
+ [0] = {
+ .name = "msm8916_wcd_digital_i2s_rx1",
+ .id = 0,
+ .playback = {
+ .stream_name = "AIF1 Playback",
+ .rates = MSM8916_WCD_DIGITAL_RATES,
+ .formats = MSM8916_WCD_DIGITAL_FORMATS,
+ .channels_min = 1,
+ .channels_max = 3,
+ },
+ .ops = &msm8916_wcd_digital_dai_ops,
+ },
+ [1] = {
+ .name = "msm8916_wcd_digital_i2s_tx1",
+ .id = 1,
+ .capture = {
+ .stream_name = "AIF1 Capture",
+ .rates = MSM8916_WCD_DIGITAL_RATES,
+ .formats = MSM8916_WCD_DIGITAL_FORMATS,
+ .channels_min = 1,
+ .channels_max = 4,
+ },
+ .ops = &msm8916_wcd_digital_dai_ops,
+ },
+};
+
+static struct snd_soc_codec_driver msm8916_wcd_digital = {
+ .probe = msm8916_wcd_digital_codec_probe,
+ .component_driver = {
+ .controls = msm8916_wcd_digital_snd_controls,
+ .num_controls = ARRAY_SIZE(msm8916_wcd_digital_snd_controls),
+ .dapm_widgets = msm8916_wcd_digital_dapm_widgets,
+ .num_dapm_widgets =
+ ARRAY_SIZE(msm8916_wcd_digital_dapm_widgets),
+ .dapm_routes = msm8916_wcd_digital_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(msm8916_wcd_digital_audio_map),
+ },
+};
+
+static const struct regmap_config msm8916_codec_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = LPASS_CDC_TX2_DMIC_CTL,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static int msm8916_wcd_digital_probe(struct platform_device *pdev)
+{
+ struct msm8916_wcd_digital_priv *priv;
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ struct resource *mem_res;
+ struct regmap *digital_map;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ digital_map =
+ devm_regmap_init_mmio(&pdev->dev, base,
+ &msm8916_codec_regmap_config);
+ if (IS_ERR(digital_map))
+ return PTR_ERR(digital_map);
+
+ ret = msm8916_wcd_digital_get_clks(pdev, priv);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(priv->ahbclk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable ahbclk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->mclk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable mclk %d\n", ret);
+ return ret;
+ }
+
+ dev_set_drvdata(dev, priv);
+
+ return snd_soc_register_codec(dev, &msm8916_wcd_digital,
+ msm8916_wcd_digital_dai,
+ ARRAY_SIZE(msm8916_wcd_digital_dai));
+}
+
+static int msm8916_wcd_digital_remove(struct platform_device *pdev)
+{
+ struct msm8916_wcd_digital_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ snd_soc_unregister_codec(&pdev->dev);
+ clk_disable_unprepare(priv->mclk);
+ clk_disable_unprepare(priv->ahbclk);
+
+ return 0;
+}
+
+static const struct of_device_id msm8916_wcd_digital_match_table[] = {
+ { .compatible = "qcom,msm8916-wcd-digital-codec" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, msm8916_wcd_digital_match_table);
+
+static struct platform_driver msm8916_wcd_digital_driver = {
+ .driver = {
+ .name = "msm8916-wcd-digital-codec",
+ .of_match_table = msm8916_wcd_digital_match_table,
+ },
+ .probe = msm8916_wcd_digital_probe,
+ .remove = msm8916_wcd_digital_remove,
+};
+
+module_platform_driver(msm8916_wcd_digital_driver);
+
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>");
+MODULE_DESCRIPTION("MSM8916 WCD Digital Codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index e643be91d762..efe3a44658d5 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -43,6 +43,8 @@
#define GAIN_AUGMENT 22500
#define SIDETONE_BASE 207000
+/* the maximum frequency of CLK_ADC and CLK_DAC */
+#define CLK_DA_AD_MAX 6144000
static int nau8825_configure_sysclk(struct nau8825 *nau8825,
int clk_id, unsigned int freq);
@@ -95,6 +97,27 @@ static const struct nau8825_fll_attr fll_pre_scalar[] = {
{ 8, 0x3 },
};
+/* over sampling rate */
+struct nau8825_osr_attr {
+ unsigned int osr;
+ unsigned int clk_src;
+};
+
+static const struct nau8825_osr_attr osr_dac_sel[] = {
+ { 64, 2 }, /* OSR 64, SRC 1/4 */
+ { 256, 0 }, /* OSR 256, SRC 1 */
+ { 128, 1 }, /* OSR 128, SRC 1/2 */
+ { 0, 0 },
+ { 32, 3 }, /* OSR 32, SRC 1/8 */
+};
+
+static const struct nau8825_osr_attr osr_adc_sel[] = {
+ { 32, 3 }, /* OSR 32, SRC 1/8 */
+ { 64, 2 }, /* OSR 64, SRC 1/4 */
+ { 128, 1 }, /* OSR 128, SRC 1/2 */
+ { 256, 0 }, /* OSR 256, SRC 1 */
+};
+
static const struct reg_default nau8825_reg_defaults[] = {
{ NAU8825_REG_ENA_CTRL, 0x00ff },
{ NAU8825_REG_IIC_ADDR_SET, 0x0 },
@@ -1179,15 +1202,64 @@ static const struct snd_soc_dapm_route nau8825_dapm_routes[] = {
{"HPOR", NULL, "Class G"},
};
+static int nau8825_clock_check(struct nau8825 *nau8825,
+ int stream, int rate, int osr)
+{
+ int osrate;
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (osr >= ARRAY_SIZE(osr_dac_sel))
+ return -EINVAL;
+ osrate = osr_dac_sel[osr].osr;
+ } else {
+ if (osr >= ARRAY_SIZE(osr_adc_sel))
+ return -EINVAL;
+ osrate = osr_adc_sel[osr].osr;
+ }
+
+ if (!osrate || rate * osr > CLK_DA_AD_MAX) {
+ dev_err(nau8825->dev, "exceed the maximum frequency of CLK_ADC or CLK_DAC\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nau8825_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
- unsigned int val_len = 0;
+ unsigned int val_len = 0, osr;
+
+ nau8825_sema_acquire(nau8825, 3 * HZ);
- nau8825_sema_acquire(nau8825, 2 * HZ);
+ /* CLK_DAC or CLK_ADC = OSR * FS
+ * DAC or ADC clock frequency is defined as Over Sampling Rate (OSR)
+ * multiplied by the audio sample rate (Fs). Note that the OSR and Fs
+ * values must be selected such that the maximum frequency is less
+ * than 6.144 MHz.
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ regmap_read(nau8825->regmap, NAU8825_REG_DAC_CTRL1, &osr);
+ osr &= NAU8825_DAC_OVERSAMPLE_MASK;
+ if (nau8825_clock_check(nau8825, substream->stream,
+ params_rate(params), osr))
+ return -EINVAL;
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
+ NAU8825_CLK_DAC_SRC_MASK,
+ osr_dac_sel[osr].clk_src << NAU8825_CLK_DAC_SRC_SFT);
+ } else {
+ regmap_read(nau8825->regmap, NAU8825_REG_ADC_RATE, &osr);
+ osr &= NAU8825_ADC_SYNC_DOWN_MASK;
+ if (nau8825_clock_check(nau8825, substream->stream,
+ params_rate(params), osr))
+ return -EINVAL;
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
+ NAU8825_CLK_ADC_SRC_MASK,
+ osr_adc_sel[osr].clk_src << NAU8825_CLK_ADC_SRC_SFT);
+ }
switch (params_width(params)) {
case 16:
@@ -1221,7 +1293,7 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
unsigned int ctrl1_val = 0, ctrl2_val = 0;
- nau8825_sema_acquire(nau8825, 2 * HZ);
+ nau8825_sema_acquire(nau8825, 3 * HZ);
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
@@ -1774,9 +1846,10 @@ static void nau8825_init_regs(struct nau8825 *nau8825)
* (audible hiss). Set it to something better.
*/
regmap_update_bits(regmap, NAU8825_REG_ADC_RATE,
- NAU8825_ADC_SYNC_DOWN_MASK, NAU8825_ADC_SYNC_DOWN_128);
+ NAU8825_ADC_SYNC_DOWN_MASK | NAU8825_ADC_SINC4_EN,
+ NAU8825_ADC_SYNC_DOWN_64);
regmap_update_bits(regmap, NAU8825_REG_DAC_CTRL1,
- NAU8825_DAC_OVERSAMPLE_MASK, NAU8825_DAC_OVERSAMPLE_128);
+ NAU8825_DAC_OVERSAMPLE_MASK, NAU8825_DAC_OVERSAMPLE_64);
/* Disable DACR/L power */
regmap_update_bits(regmap, NAU8825_REG_CHARGE_PUMP,
NAU8825_POWER_DOWN_DACR | NAU8825_POWER_DOWN_DACL,
@@ -1811,6 +1884,9 @@ static void nau8825_init_regs(struct nau8825 *nau8825)
NAU8825_DACL_CH_SEL_MASK, NAU8825_DACL_CH_SEL_L);
regmap_update_bits(nau8825->regmap, NAU8825_REG_DACR_CTRL,
NAU8825_DACL_CH_SEL_MASK, NAU8825_DACL_CH_SEL_R);
+ /* Disable short Frame Sync detection logic */
+ regmap_update_bits(regmap, NAU8825_REG_LEFT_TIME_SLOT,
+ NAU8825_DIS_FS_SHORT_DET, NAU8825_DIS_FS_SHORT_DET);
}
static const struct regmap_config nau8825_regmap_config = {
@@ -1919,8 +1995,10 @@ static void nau8825_fll_apply(struct nau8825 *nau8825,
regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
NAU8825_CLK_SRC_MASK | NAU8825_CLK_MCLK_SRC_MASK,
NAU8825_CLK_SRC_MCLK | fll_param->mclk_src);
+ /* Make DSP operate at high speed for better performance. */
regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL1,
- NAU8825_FLL_RATIO_MASK, fll_param->ratio);
+ NAU8825_FLL_RATIO_MASK | NAU8825_ICTRL_LATCH_MASK,
+ fll_param->ratio | (0x6 << NAU8825_ICTRL_LATCH_SFT));
/* FLL 16-bit fractional input */
regmap_write(nau8825->regmap, NAU8825_REG_FLL2, fll_param->fll_frac);
/* FLL 10-bit integer input */
@@ -1936,19 +2014,22 @@ static void nau8825_fll_apply(struct nau8825 *nau8825,
regmap_update_bits(nau8825->regmap,
NAU8825_REG_FLL6, NAU8825_DCO_EN, 0);
if (fll_param->fll_frac) {
+ /* set FLL loop filter enable and cutoff frequency at 500Khz */
regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5,
NAU8825_FLL_PDB_DAC_EN | NAU8825_FLL_LOOP_FTR_EN |
NAU8825_FLL_FTR_SW_MASK,
NAU8825_FLL_PDB_DAC_EN | NAU8825_FLL_LOOP_FTR_EN |
NAU8825_FLL_FTR_SW_FILTER);
regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL6,
- NAU8825_SDM_EN, NAU8825_SDM_EN);
+ NAU8825_SDM_EN | NAU8825_CUTOFF500,
+ NAU8825_SDM_EN | NAU8825_CUTOFF500);
} else {
+ /* disable FLL loop filter and cutoff frequency */
regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5,
NAU8825_FLL_PDB_DAC_EN | NAU8825_FLL_LOOP_FTR_EN |
NAU8825_FLL_FTR_SW_MASK, NAU8825_FLL_FTR_SW_ACCU);
- regmap_update_bits(nau8825->regmap,
- NAU8825_REG_FLL6, NAU8825_SDM_EN, 0);
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL6,
+ NAU8825_SDM_EN | NAU8825_CUTOFF500, 0);
}
}
@@ -2014,6 +2095,9 @@ static void nau8825_configure_mclk_as_sysclk(struct regmap *regmap)
NAU8825_CLK_SRC_MASK, NAU8825_CLK_SRC_MCLK);
regmap_update_bits(regmap, NAU8825_REG_FLL6,
NAU8825_DCO_EN, 0);
+ /* Make DSP operate as default setting for power saving. */
+ regmap_update_bits(regmap, NAU8825_REG_FLL1,
+ NAU8825_ICTRL_LATCH_MASK, 0);
}
static int nau8825_configure_sysclk(struct nau8825 *nau8825, int clk_id,
@@ -2038,7 +2122,7 @@ static int nau8825_configure_sysclk(struct nau8825 *nau8825, int clk_id,
* fered by cross talk process, the driver make the playback
* preparation halted until cross talk process finish.
*/
- nau8825_sema_acquire(nau8825, 2 * HZ);
+ nau8825_sema_acquire(nau8825, 3 * HZ);
nau8825_configure_mclk_as_sysclk(regmap);
/* MCLK not changed by clock tree */
regmap_update_bits(regmap, NAU8825_REG_CLK_DIVIDER,
@@ -2057,10 +2141,13 @@ static int nau8825_configure_sysclk(struct nau8825 *nau8825, int clk_id,
NAU8825_DCO_EN, NAU8825_DCO_EN);
regmap_update_bits(regmap, NAU8825_REG_CLK_DIVIDER,
NAU8825_CLK_SRC_MASK, NAU8825_CLK_SRC_VCO);
- /* Decrease the VCO frequency for power saving */
+ /* Decrease the VCO frequency and make DSP operate
+ * as default setting for power saving.
+ */
regmap_update_bits(regmap, NAU8825_REG_CLK_DIVIDER,
NAU8825_CLK_MCLK_SRC_MASK, 0xf);
regmap_update_bits(regmap, NAU8825_REG_FLL1,
+ NAU8825_ICTRL_LATCH_MASK |
NAU8825_FLL_RATIO_MASK, 0x10);
regmap_update_bits(regmap, NAU8825_REG_FLL6,
NAU8825_SDM_EN, NAU8825_SDM_EN);
@@ -2083,9 +2170,14 @@ static int nau8825_configure_sysclk(struct nau8825 *nau8825, int clk_id,
* fered by cross talk process, the driver make the playback
* preparation halted until cross talk process finish.
*/
- nau8825_sema_acquire(nau8825, 2 * HZ);
+ nau8825_sema_acquire(nau8825, 3 * HZ);
+ /* Higher FLL reference input frequency can only set lower
+ * gain error, such as 0000 for input reference from MCLK
+ * 12.288Mhz.
+ */
regmap_update_bits(regmap, NAU8825_REG_FLL3,
- NAU8825_FLL_CLK_SRC_MASK, NAU8825_FLL_CLK_SRC_MCLK);
+ NAU8825_FLL_CLK_SRC_MASK | NAU8825_GAIN_ERR_MASK,
+ NAU8825_FLL_CLK_SRC_MCLK | 0);
/* Release the semaphone. */
nau8825_sema_release(nau8825);
@@ -2100,9 +2192,17 @@ static int nau8825_configure_sysclk(struct nau8825 *nau8825, int clk_id,
* fered by cross talk process, the driver make the playback
* preparation halted until cross talk process finish.
*/
- nau8825_sema_acquire(nau8825, 2 * HZ);
+ nau8825_sema_acquire(nau8825, 3 * HZ);
+ /* If FLL reference input is from low frequency source,
+ * higher error gain can apply such as 0xf which has
+ * the most sensitive gain error correction threshold,
+ * Therefore, FLL has the most accurate DCO to
+ * target frequency.
+ */
regmap_update_bits(regmap, NAU8825_REG_FLL3,
- NAU8825_FLL_CLK_SRC_MASK, NAU8825_FLL_CLK_SRC_BLK);
+ NAU8825_FLL_CLK_SRC_MASK | NAU8825_GAIN_ERR_MASK,
+ NAU8825_FLL_CLK_SRC_BLK |
+ (0xf << NAU8825_GAIN_ERR_SFT));
/* Release the semaphone. */
nau8825_sema_release(nau8825);
@@ -2118,9 +2218,17 @@ static int nau8825_configure_sysclk(struct nau8825 *nau8825, int clk_id,
* fered by cross talk process, the driver make the playback
* preparation halted until cross talk process finish.
*/
- nau8825_sema_acquire(nau8825, 2 * HZ);
+ nau8825_sema_acquire(nau8825, 3 * HZ);
+ /* If FLL reference input is from low frequency source,
+ * higher error gain can apply such as 0xf which has
+ * the most sensitive gain error correction threshold,
+ * Therefore, FLL has the most accurate DCO to
+ * target frequency.
+ */
regmap_update_bits(regmap, NAU8825_REG_FLL3,
- NAU8825_FLL_CLK_SRC_MASK, NAU8825_FLL_CLK_SRC_FS);
+ NAU8825_FLL_CLK_SRC_MASK | NAU8825_GAIN_ERR_MASK,
+ NAU8825_FLL_CLK_SRC_FS |
+ (0xf << NAU8825_GAIN_ERR_SFT));
/* Release the semaphone. */
nau8825_sema_release(nau8825);
diff --git a/sound/soc/codecs/nau8825.h b/sound/soc/codecs/nau8825.h
index 1c63e2abafa9..5d1704e73241 100644
--- a/sound/soc/codecs/nau8825.h
+++ b/sound/soc/codecs/nau8825.h
@@ -115,12 +115,20 @@
#define NAU8825_CLK_SRC_MASK (1 << NAU8825_CLK_SRC_SFT)
#define NAU8825_CLK_SRC_VCO (1 << NAU8825_CLK_SRC_SFT)
#define NAU8825_CLK_SRC_MCLK (0 << NAU8825_CLK_SRC_SFT)
+#define NAU8825_CLK_ADC_SRC_SFT 6
+#define NAU8825_CLK_ADC_SRC_MASK (0x3 << NAU8825_CLK_ADC_SRC_SFT)
+#define NAU8825_CLK_DAC_SRC_SFT 4
+#define NAU8825_CLK_DAC_SRC_MASK (0x3 << NAU8825_CLK_DAC_SRC_SFT)
#define NAU8825_CLK_MCLK_SRC_MASK (0xf << 0)
/* FLL1 (0x04) */
+#define NAU8825_ICTRL_LATCH_SFT 10
+#define NAU8825_ICTRL_LATCH_MASK (0x7 << NAU8825_ICTRL_LATCH_SFT)
#define NAU8825_FLL_RATIO_MASK (0x7f << 0)
/* FLL3 (0x06) */
+#define NAU8825_GAIN_ERR_SFT 12
+#define NAU8825_GAIN_ERR_MASK (0xf << NAU8825_GAIN_ERR_SFT)
#define NAU8825_FLL_INTEGER_MASK (0x3ff << 0)
#define NAU8825_FLL_CLK_SRC_SFT 10
#define NAU8825_FLL_CLK_SRC_MASK (0x3 << NAU8825_FLL_CLK_SRC_SFT)
@@ -144,6 +152,7 @@
/* FLL6 (0x9) */
#define NAU8825_DCO_EN (0x1 << 15)
#define NAU8825_SDM_EN (0x1 << 14)
+#define NAU8825_CUTOFF500 (0x1 << 13)
/* HSD_CTRL (0xc) */
#define NAU8825_HSD_AUTO_MODE (1 << 6)
@@ -246,6 +255,11 @@
#define NAU8825_I2S_MS_SLAVE (0 << NAU8825_I2S_MS_SFT)
#define NAU8825_I2S_BLK_DIV_MASK 0x7
+/* LEFT_TIME_SLOT (0x1e) */
+#define NAU8825_FS_ERR_CMP_SEL_SFT 14
+#define NAU8825_FS_ERR_CMP_SEL_MASK (0x3 << NAU8825_FS_ERR_CMP_SEL_SFT)
+#define NAU8825_DIS_FS_SHORT_DET (1 << 13)
+
/* BIQ_CTRL (0x20) */
#define NAU8825_BIQ_WRT_SFT 4
#define NAU8825_BIQ_WRT_EN (1 << NAU8825_BIQ_WRT_SFT)
@@ -255,6 +269,8 @@
#define NAU8825_BIQ_PATH_DAC (1 << NAU8825_BIQ_PATH_SFT)
/* ADC_RATE (0x2b) */
+#define NAU8825_ADC_SINC4_SFT 4
+#define NAU8825_ADC_SINC4_EN (1 << NAU8825_ADC_SINC4_SFT)
#define NAU8825_ADC_SYNC_DOWN_SFT 0
#define NAU8825_ADC_SYNC_DOWN_MASK 0x3
#define NAU8825_ADC_SYNC_DOWN_32 0
diff --git a/sound/soc/codecs/rl6231.c b/sound/soc/codecs/rl6231.c
index 1dc68ab08a17..7b447d0b173a 100644
--- a/sound/soc/codecs/rl6231.c
+++ b/sound/soc/codecs/rl6231.c
@@ -102,6 +102,7 @@ struct pll_calc_map {
};
static const struct pll_calc_map pll_preset_table[] = {
+ {19200000, 4096000, 23, 14, 1, false},
{19200000, 24576000, 3, 30, 3, false},
};
diff --git a/sound/soc/codecs/rl6347a.c b/sound/soc/codecs/rl6347a.c
index a4b910efbd45..8f571cf8edd4 100644
--- a/sound/soc/codecs/rl6347a.c
+++ b/sound/soc/codecs/rl6347a.c
@@ -51,7 +51,7 @@ int rl6347a_hw_write(void *context, unsigned int reg, unsigned int value)
if (ret == 4)
return 0;
else
- pr_err("ret=%d\n", ret);
+ dev_err(&client->dev, "I2C error %d\n", ret);
if (ret < 0)
return ret;
else
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
index 2db8179047ae..7150a407ffd9 100644
--- a/sound/soc/codecs/rt298.c
+++ b/sound/soc/codecs/rt298.c
@@ -326,11 +326,31 @@ static void rt298_jack_detect_work(struct work_struct *work)
int rt298_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack)
{
struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_dapm_context *dapm;
+ bool hp = false;
+ bool mic = false;
+ int status = 0;
+
+ /* If jack in NULL, disable HS jack */
+ if (!jack) {
+ regmap_update_bits(rt298->regmap, RT298_IRQ_CTRL, 0x2, 0x0);
+ dapm = snd_soc_codec_get_dapm(codec);
+ snd_soc_dapm_disable_pin(dapm, "LDO1");
+ snd_soc_dapm_sync(dapm);
+ return 0;
+ }
rt298->jack = jack;
+ regmap_update_bits(rt298->regmap, RT298_IRQ_CTRL, 0x2, 0x2);
- /* Send an initial empty report */
- snd_soc_jack_report(rt298->jack, 0,
+ rt298_jack_detect(rt298, &hp, &mic);
+ if (hp == true)
+ status |= SND_JACK_HEADPHONE;
+
+ if (mic == true)
+ status |= SND_JACK_MICROPHONE;
+
+ snd_soc_jack_report(rt298->jack, status,
SND_JACK_MICROPHONE | SND_JACK_HEADPHONE);
return 0;
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 09103aab0cb2..0901e25d6db6 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -20,7 +20,6 @@
#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/sched.h>
-#include <linux/kthread.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/regulator/consumer.h>
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index f24b7cfd3a89..b281a46d769d 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -452,6 +452,9 @@ static int rt5514_set_dmic_clk(struct snd_soc_dapm_widget *w,
RT5514_CLK_DMIC_OUT_SEL_MASK,
idx << RT5514_CLK_DMIC_OUT_SEL_SFT);
+ if (rt5514->pdata.dmic_init_delay)
+ msleep(rt5514->pdata.dmic_init_delay);
+
return idx;
}
@@ -1073,9 +1076,18 @@ static const struct of_device_id rt5514_of_match[] = {
MODULE_DEVICE_TABLE(of, rt5514_of_match);
#endif
+static int rt5514_parse_dt(struct rt5514_priv *rt5514, struct device *dev)
+{
+ device_property_read_u32(dev, "realtek,dmic-init-delay-ms",
+ &rt5514->pdata.dmic_init_delay);
+
+ return 0;
+}
+
static int rt5514_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
+ struct rt5514_platform_data *pdata = dev_get_platdata(&i2c->dev);
struct rt5514_priv *rt5514;
int ret;
unsigned int val;
@@ -1087,6 +1099,11 @@ static int rt5514_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, rt5514);
+ if (pdata)
+ rt5514->pdata = *pdata;
+ else if (i2c->dev.of_node)
+ rt5514_parse_dt(rt5514, &i2c->dev);
+
rt5514->i2c_regmap = devm_regmap_init_i2c(i2c, &rt5514_i2c_regmap);
if (IS_ERR(rt5514->i2c_regmap)) {
ret = PTR_ERR(rt5514->i2c_regmap);
diff --git a/sound/soc/codecs/rt5514.h b/sound/soc/codecs/rt5514.h
index 229de0e2c88c..5d343fb6d125 100644
--- a/sound/soc/codecs/rt5514.h
+++ b/sound/soc/codecs/rt5514.h
@@ -13,6 +13,7 @@
#define __RT5514_H__
#include <linux/clk.h>
+#include <sound/rt5514.h>
#define RT5514_DEVICE_ID 0x10ec5514
@@ -243,6 +244,7 @@ enum {
};
struct rt5514_priv {
+ struct rt5514_platform_data pdata;
struct snd_soc_codec *codec;
struct regmap *i2c_regmap, *regmap;
struct clk *mclk;
diff --git a/sound/soc/codecs/rt5616.c b/sound/soc/codecs/rt5616.c
index d1f273b24991..7d6e0823f98f 100644
--- a/sound/soc/codecs/rt5616.c
+++ b/sound/soc/codecs/rt5616.c
@@ -960,8 +960,7 @@ static int rt5616_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_codec *codec = dai->codec;
struct rt5616_priv *rt5616 = snd_soc_codec_get_drvdata(codec);
unsigned int val_len = 0, val_clk, mask_clk;
int pre_div, bclk_ms, frame_size;
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index 3cc1135fc2cd..e29a6defefa0 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -423,6 +423,8 @@ static const struct snd_kcontrol_new rt5640_snd_controls[] = {
SOC_DOUBLE_TLV("ADC Capture Volume", RT5640_ADC_DIG_VOL,
RT5640_L_VOL_SFT, RT5640_R_VOL_SFT,
127, 0, adc_vol_tlv),
+ SOC_DOUBLE("Mono ADC Capture Switch", RT5640_DUMMY1,
+ RT5640_M_MONO_ADC_L_SFT, RT5640_M_MONO_ADC_R_SFT, 1, 1),
SOC_DOUBLE_TLV("Mono ADC Capture Volume", RT5640_ADC_DATA,
RT5640_L_VOL_SFT, RT5640_R_VOL_SFT,
127, 0, adc_vol_tlv),
@@ -2407,6 +2409,9 @@ static int rt5640_i2c_probe(struct i2c_client *i2c,
if (ret != 0)
dev_warn(&i2c->dev, "Failed to apply regmap patch: %d\n", ret);
+ regmap_update_bits(rt5640->regmap, RT5640_DUMMY1,
+ RT5640_MCLK_DET, RT5640_MCLK_DET);
+
if (rt5640->pdata.in1_diff)
regmap_update_bits(rt5640->regmap, RT5640_IN1_IN2,
RT5640_IN_DF1, RT5640_IN_DF1);
diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
index 90c88711c72a..b8a811732a52 100644
--- a/sound/soc/codecs/rt5640.h
+++ b/sound/soc/codecs/rt5640.h
@@ -1970,6 +1970,12 @@
#define RT5640_ZCD_HP_DIS (0x0 << 15)
#define RT5640_ZCD_HP_EN (0x1 << 15)
+/* General Control 1 (0xfa) */
+#define RT5640_M_MONO_ADC_L (0x1 << 13)
+#define RT5640_M_MONO_ADC_L_SFT 13
+#define RT5640_M_MONO_ADC_R (0x1 << 12)
+#define RT5640_M_MONO_ADC_R_SFT 12
+#define RT5640_MCLK_DET (0x1 << 11)
/* Codec Private Register definition */
/* 3D Speaker Control (0x63) */
diff --git a/sound/soc/codecs/rt5660.c b/sound/soc/codecs/rt5660.c
index 9f0933ced804..76cf76a2e9b6 100644
--- a/sound/soc/codecs/rt5660.c
+++ b/sound/soc/codecs/rt5660.c
@@ -1311,6 +1311,10 @@ static int rt5660_i2c_probe(struct i2c_client *i2c,
if (ret != 0)
dev_warn(&i2c->dev, "Failed to apply regmap patch: %d\n", ret);
+ regmap_update_bits(rt5660->regmap, RT5660_GEN_CTRL1,
+ RT5660_AUTO_DIS_AMP | RT5660_MCLK_DET | RT5660_POW_CLKDET,
+ RT5660_AUTO_DIS_AMP | RT5660_MCLK_DET | RT5660_POW_CLKDET);
+
if (rt5660->pdata.dmic1_data_pin) {
regmap_update_bits(rt5660->regmap, RT5660_GPIO_CTRL1,
RT5660_GP1_PIN_MASK, RT5660_GP1_PIN_DMIC1_SCL);
diff --git a/sound/soc/codecs/rt5660.h b/sound/soc/codecs/rt5660.h
index 6cdb9269ec9e..bba18fb66b6f 100644
--- a/sound/soc/codecs/rt5660.h
+++ b/sound/soc/codecs/rt5660.h
@@ -810,6 +810,9 @@
/* General Control 1 (0xfa) */
#define RT5660_PWR_VREF_HP (0x1 << 11)
#define RT5660_PWR_VREF_HP_SFT 11
+#define RT5660_AUTO_DIS_AMP (0x1 << 6)
+#define RT5660_MCLK_DET (0x1 << 5)
+#define RT5660_POW_CLKDET (0x1 << 1)
#define RT5660_DIG_GATE_CTRL (0x1)
#define RT5660_DIG_GATE_CTRL_SFT 0
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c
index 00ff2788879e..a32508d7dcfd 100644
--- a/sound/soc/codecs/rt5663.c
+++ b/sound/soc/codecs/rt5663.c
@@ -1,5 +1,5 @@
/*
- * rt5663.c -- RT5668/RT5663 ALSA SoC audio codec driver
+ * rt5663.c -- RT5663 ALSA SoC audio codec driver
*
* Copyright 2016 Realtek Semiconductor Corp.
* Author: Jack Yu <jack.yu@realtek.com>
@@ -30,12 +30,12 @@
#include "rt5663.h"
#include "rl6231.h"
-#define RT5668_DEVICE_ID 0x6451
-#define RT5663_DEVICE_ID 0x6406
+#define RT5663_DEVICE_ID_2 0x6451
+#define RT5663_DEVICE_ID_1 0x6406
enum {
- CODEC_TYPE_RT5668,
- CODEC_TYPE_RT5663,
+ CODEC_VER_1,
+ CODEC_VER_0,
};
struct rt5663_priv {
@@ -45,7 +45,7 @@ struct rt5663_priv {
struct snd_soc_jack *hs_jack;
struct timer_list btn_check_timer;
- int codec_type;
+ int codec_ver;
int sysclk;
int sysclk_src;
int lrck;
@@ -57,7 +57,7 @@ struct rt5663_priv {
int jack_type;
};
-static const struct reg_default rt5668_reg[] = {
+static const struct reg_default rt5663_v2_reg[] = {
{ 0x0000, 0x0000 },
{ 0x0001, 0xc8c8 },
{ 0x0002, 0x8080 },
@@ -730,7 +730,7 @@ static bool rt5663_volatile_register(struct device *dev, unsigned int reg)
case RT5663_ADC_EQ_1:
case RT5663_INT_ST_1:
case RT5663_INT_ST_2:
- case RT5663_GPIO_STA:
+ case RT5663_GPIO_STA1:
case RT5663_SIN_GEN_1:
case RT5663_IL_CMD_1:
case RT5663_IL_CMD_5:
@@ -846,7 +846,7 @@ static bool rt5663_readable_register(struct device *dev, unsigned int reg)
case RT5663_INT_ST_2:
case RT5663_GPIO_1:
case RT5663_GPIO_2:
- case RT5663_GPIO_STA:
+ case RT5663_GPIO_STA1:
case RT5663_SIN_GEN_1:
case RT5663_SIN_GEN_2:
case RT5663_SIN_GEN_3:
@@ -1036,23 +1036,23 @@ static bool rt5663_readable_register(struct device *dev, unsigned int reg)
}
}
-static bool rt5668_volatile_register(struct device *dev, unsigned int reg)
+static bool rt5663_v2_volatile_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case RT5663_RESET:
- case RT5668_CBJ_TYPE_2:
- case RT5668_PDM_OUT_CTL:
- case RT5668_PDM_I2C_DATA_CTL1:
- case RT5668_PDM_I2C_DATA_CTL4:
- case RT5668_ALC_BK_GAIN:
+ case RT5663_CBJ_TYPE_2:
+ case RT5663_PDM_OUT_CTL:
+ case RT5663_PDM_I2C_DATA_CTL1:
+ case RT5663_PDM_I2C_DATA_CTL4:
+ case RT5663_ALC_BK_GAIN:
case RT5663_PLL_2:
case RT5663_MICBIAS_1:
case RT5663_ADC_EQ_1:
case RT5663_INT_ST_1:
- case RT5668_GPIO_STA:
+ case RT5663_GPIO_STA2:
case RT5663_IL_CMD_1:
case RT5663_IL_CMD_5:
- case RT5668_A_JD_CTRL:
+ case RT5663_A_JD_CTRL:
case RT5663_JD_CTRL2:
case RT5663_VENDOR_ID:
case RT5663_VENDOR_ID_1:
@@ -1061,15 +1061,15 @@ static bool rt5668_volatile_register(struct device *dev, unsigned int reg)
case RT5663_STO_DRE_5:
case RT5663_STO_DRE_6:
case RT5663_STO_DRE_7:
- case RT5668_MONO_DYNA_6:
- case RT5668_STO1_SIL_DET:
- case RT5668_MONOL_SIL_DET:
- case RT5668_MONOR_SIL_DET:
- case RT5668_STO2_DAC_SIL:
- case RT5668_MONO_AMP_CAL_ST1:
- case RT5668_MONO_AMP_CAL_ST2:
- case RT5668_MONO_AMP_CAL_ST3:
- case RT5668_MONO_AMP_CAL_ST4:
+ case RT5663_MONO_DYNA_6:
+ case RT5663_STO1_SIL_DET:
+ case RT5663_MONOL_SIL_DET:
+ case RT5663_MONOR_SIL_DET:
+ case RT5663_STO2_DAC_SIL:
+ case RT5663_MONO_AMP_CAL_ST1:
+ case RT5663_MONO_AMP_CAL_ST2:
+ case RT5663_MONO_AMP_CAL_ST3:
+ case RT5663_MONO_AMP_CAL_ST4:
case RT5663_HP_IMP_SEN_2:
case RT5663_HP_IMP_SEN_3:
case RT5663_HP_IMP_SEN_4:
@@ -1083,218 +1083,218 @@ static bool rt5668_volatile_register(struct device *dev, unsigned int reg)
case RT5663_HP_CALIB_ST7:
case RT5663_HP_CALIB_ST8:
case RT5663_HP_CALIB_ST9:
- case RT5668_HP_CALIB_ST10:
- case RT5668_HP_CALIB_ST11:
+ case RT5663_HP_CALIB_ST10:
+ case RT5663_HP_CALIB_ST11:
return true;
default:
return false;
}
}
-static bool rt5668_readable_register(struct device *dev, unsigned int reg)
+static bool rt5663_v2_readable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
- case RT5668_LOUT_CTRL:
- case RT5668_HP_AMP_2:
- case RT5668_MONO_OUT:
- case RT5668_MONO_GAIN:
- case RT5668_AEC_BST:
- case RT5668_IN1_IN2:
- case RT5668_IN3_IN4:
- case RT5668_INL1_INR1:
- case RT5668_CBJ_TYPE_2:
- case RT5668_CBJ_TYPE_3:
- case RT5668_CBJ_TYPE_4:
- case RT5668_CBJ_TYPE_5:
- case RT5668_CBJ_TYPE_8:
- case RT5668_DAC3_DIG_VOL:
- case RT5668_DAC3_CTRL:
- case RT5668_MONO_ADC_DIG_VOL:
- case RT5668_STO2_ADC_DIG_VOL:
- case RT5668_MONO_ADC_BST_GAIN:
- case RT5668_STO2_ADC_BST_GAIN:
- case RT5668_SIDETONE_CTRL:
- case RT5668_MONO1_ADC_MIXER:
- case RT5668_STO2_ADC_MIXER:
- case RT5668_MONO_DAC_MIXER:
- case RT5668_DAC2_SRC_CTRL:
- case RT5668_IF_3_4_DATA_CTL:
- case RT5668_IF_5_DATA_CTL:
- case RT5668_PDM_OUT_CTL:
- case RT5668_PDM_I2C_DATA_CTL1:
- case RT5668_PDM_I2C_DATA_CTL2:
- case RT5668_PDM_I2C_DATA_CTL3:
- case RT5668_PDM_I2C_DATA_CTL4:
- case RT5668_RECMIX1_NEW:
- case RT5668_RECMIX1L_0:
- case RT5668_RECMIX1L:
- case RT5668_RECMIX1R_0:
- case RT5668_RECMIX1R:
- case RT5668_RECMIX2_NEW:
- case RT5668_RECMIX2_L_2:
- case RT5668_RECMIX2_R:
- case RT5668_RECMIX2_R_2:
- case RT5668_CALIB_REC_LR:
- case RT5668_ALC_BK_GAIN:
- case RT5668_MONOMIX_GAIN:
- case RT5668_MONOMIX_IN_GAIN:
- case RT5668_OUT_MIXL_GAIN:
- case RT5668_OUT_LMIX_IN_GAIN:
- case RT5668_OUT_RMIX_IN_GAIN:
- case RT5668_OUT_RMIX_IN_GAIN1:
- case RT5668_LOUT_MIXER_CTRL:
- case RT5668_PWR_VOL:
- case RT5668_ADCDAC_RST:
- case RT5668_I2S34_SDP:
- case RT5668_I2S5_SDP:
- case RT5668_TDM_5:
- case RT5668_TDM_6:
- case RT5668_TDM_7:
- case RT5668_TDM_8:
- case RT5668_ASRC_3:
- case RT5668_ASRC_6:
- case RT5668_ASRC_7:
- case RT5668_PLL_TRK_13:
- case RT5668_I2S_M_CLK_CTL:
- case RT5668_FDIV_I2S34_M_CLK:
- case RT5668_FDIV_I2S34_M_CLK2:
- case RT5668_FDIV_I2S5_M_CLK:
- case RT5668_FDIV_I2S5_M_CLK2:
- case RT5668_IRQ_4:
- case RT5668_GPIO_3:
- case RT5668_GPIO_4:
- case RT5668_GPIO_STA:
- case RT5668_HP_AMP_DET1:
- case RT5668_HP_AMP_DET2:
- case RT5668_HP_AMP_DET3:
- case RT5668_MID_BD_HP_AMP:
- case RT5668_LOW_BD_HP_AMP:
- case RT5668_SOF_VOL_ZC2:
- case RT5668_ADC_STO2_ADJ1:
- case RT5668_ADC_STO2_ADJ2:
- case RT5668_A_JD_CTRL:
- case RT5668_JD1_TRES_CTRL:
- case RT5668_JD2_TRES_CTRL:
- case RT5668_JD_CTRL2:
- case RT5668_DUM_REG_2:
- case RT5668_DUM_REG_3:
+ case RT5663_LOUT_CTRL:
+ case RT5663_HP_AMP_2:
+ case RT5663_MONO_OUT:
+ case RT5663_MONO_GAIN:
+ case RT5663_AEC_BST:
+ case RT5663_IN1_IN2:
+ case RT5663_IN3_IN4:
+ case RT5663_INL1_INR1:
+ case RT5663_CBJ_TYPE_2:
+ case RT5663_CBJ_TYPE_3:
+ case RT5663_CBJ_TYPE_4:
+ case RT5663_CBJ_TYPE_5:
+ case RT5663_CBJ_TYPE_8:
+ case RT5663_DAC3_DIG_VOL:
+ case RT5663_DAC3_CTRL:
+ case RT5663_MONO_ADC_DIG_VOL:
+ case RT5663_STO2_ADC_DIG_VOL:
+ case RT5663_MONO_ADC_BST_GAIN:
+ case RT5663_STO2_ADC_BST_GAIN:
+ case RT5663_SIDETONE_CTRL:
+ case RT5663_MONO1_ADC_MIXER:
+ case RT5663_STO2_ADC_MIXER:
+ case RT5663_MONO_DAC_MIXER:
+ case RT5663_DAC2_SRC_CTRL:
+ case RT5663_IF_3_4_DATA_CTL:
+ case RT5663_IF_5_DATA_CTL:
+ case RT5663_PDM_OUT_CTL:
+ case RT5663_PDM_I2C_DATA_CTL1:
+ case RT5663_PDM_I2C_DATA_CTL2:
+ case RT5663_PDM_I2C_DATA_CTL3:
+ case RT5663_PDM_I2C_DATA_CTL4:
+ case RT5663_RECMIX1_NEW:
+ case RT5663_RECMIX1L_0:
+ case RT5663_RECMIX1L:
+ case RT5663_RECMIX1R_0:
+ case RT5663_RECMIX1R:
+ case RT5663_RECMIX2_NEW:
+ case RT5663_RECMIX2_L_2:
+ case RT5663_RECMIX2_R:
+ case RT5663_RECMIX2_R_2:
+ case RT5663_CALIB_REC_LR:
+ case RT5663_ALC_BK_GAIN:
+ case RT5663_MONOMIX_GAIN:
+ case RT5663_MONOMIX_IN_GAIN:
+ case RT5663_OUT_MIXL_GAIN:
+ case RT5663_OUT_LMIX_IN_GAIN:
+ case RT5663_OUT_RMIX_IN_GAIN:
+ case RT5663_OUT_RMIX_IN_GAIN1:
+ case RT5663_LOUT_MIXER_CTRL:
+ case RT5663_PWR_VOL:
+ case RT5663_ADCDAC_RST:
+ case RT5663_I2S34_SDP:
+ case RT5663_I2S5_SDP:
+ case RT5663_TDM_6:
+ case RT5663_TDM_7:
+ case RT5663_TDM_8:
+ case RT5663_TDM_9:
+ case RT5663_ASRC_3:
+ case RT5663_ASRC_6:
+ case RT5663_ASRC_7:
+ case RT5663_PLL_TRK_13:
+ case RT5663_I2S_M_CLK_CTL:
+ case RT5663_FDIV_I2S34_M_CLK:
+ case RT5663_FDIV_I2S34_M_CLK2:
+ case RT5663_FDIV_I2S5_M_CLK:
+ case RT5663_FDIV_I2S5_M_CLK2:
+ case RT5663_V2_IRQ_4:
+ case RT5663_GPIO_3:
+ case RT5663_GPIO_4:
+ case RT5663_GPIO_STA2:
+ case RT5663_HP_AMP_DET1:
+ case RT5663_HP_AMP_DET2:
+ case RT5663_HP_AMP_DET3:
+ case RT5663_MID_BD_HP_AMP:
+ case RT5663_LOW_BD_HP_AMP:
+ case RT5663_SOF_VOL_ZC2:
+ case RT5663_ADC_STO2_ADJ1:
+ case RT5663_ADC_STO2_ADJ2:
+ case RT5663_A_JD_CTRL:
+ case RT5663_JD1_TRES_CTRL:
+ case RT5663_JD2_TRES_CTRL:
+ case RT5663_V2_JD_CTRL2:
+ case RT5663_DUM_REG_2:
+ case RT5663_DUM_REG_3:
case RT5663_VENDOR_ID:
case RT5663_VENDOR_ID_1:
case RT5663_VENDOR_ID_2:
- case RT5668_DACADC_DIG_VOL2:
- case RT5668_DIG_IN_PIN2:
- case RT5668_PAD_DRV_CTL1:
- case RT5668_SOF_RAM_DEPOP:
- case RT5668_VOL_TEST:
- case RT5668_TEST_MODE_3:
- case RT5668_TEST_MODE_4:
+ case RT5663_DACADC_DIG_VOL2:
+ case RT5663_DIG_IN_PIN2:
+ case RT5663_PAD_DRV_CTL1:
+ case RT5663_SOF_RAM_DEPOP:
+ case RT5663_VOL_TEST:
+ case RT5663_TEST_MODE_4:
+ case RT5663_TEST_MODE_5:
case RT5663_STO_DRE_9:
- case RT5668_MONO_DYNA_1:
- case RT5668_MONO_DYNA_2:
- case RT5668_MONO_DYNA_3:
- case RT5668_MONO_DYNA_4:
- case RT5668_MONO_DYNA_5:
- case RT5668_MONO_DYNA_6:
- case RT5668_STO1_SIL_DET:
- case RT5668_MONOL_SIL_DET:
- case RT5668_MONOR_SIL_DET:
- case RT5668_STO2_DAC_SIL:
- case RT5668_PWR_SAV_CTL1:
- case RT5668_PWR_SAV_CTL2:
- case RT5668_PWR_SAV_CTL3:
- case RT5668_PWR_SAV_CTL4:
- case RT5668_PWR_SAV_CTL5:
- case RT5668_PWR_SAV_CTL6:
- case RT5668_MONO_AMP_CAL1:
- case RT5668_MONO_AMP_CAL2:
- case RT5668_MONO_AMP_CAL3:
- case RT5668_MONO_AMP_CAL4:
- case RT5668_MONO_AMP_CAL5:
- case RT5668_MONO_AMP_CAL6:
- case RT5668_MONO_AMP_CAL7:
- case RT5668_MONO_AMP_CAL_ST1:
- case RT5668_MONO_AMP_CAL_ST2:
- case RT5668_MONO_AMP_CAL_ST3:
- case RT5668_MONO_AMP_CAL_ST4:
- case RT5668_MONO_AMP_CAL_ST5:
- case RT5668_HP_IMP_SEN_13:
- case RT5668_HP_IMP_SEN_14:
- case RT5668_HP_IMP_SEN_6:
- case RT5668_HP_IMP_SEN_7:
- case RT5668_HP_IMP_SEN_8:
- case RT5668_HP_IMP_SEN_9:
- case RT5668_HP_IMP_SEN_10:
- case RT5668_HP_LOGIC_3:
- case RT5668_HP_CALIB_ST10:
- case RT5668_HP_CALIB_ST11:
- case RT5668_PRO_REG_TBL_4:
- case RT5668_PRO_REG_TBL_5:
- case RT5668_PRO_REG_TBL_6:
- case RT5668_PRO_REG_TBL_7:
- case RT5668_PRO_REG_TBL_8:
- case RT5668_PRO_REG_TBL_9:
- case RT5668_SAR_ADC_INL_1:
- case RT5668_SAR_ADC_INL_2:
- case RT5668_SAR_ADC_INL_3:
- case RT5668_SAR_ADC_INL_4:
- case RT5668_SAR_ADC_INL_5:
- case RT5668_SAR_ADC_INL_6:
- case RT5668_SAR_ADC_INL_7:
- case RT5668_SAR_ADC_INL_8:
- case RT5668_SAR_ADC_INL_9:
- case RT5668_SAR_ADC_INL_10:
- case RT5668_SAR_ADC_INL_11:
- case RT5668_SAR_ADC_INL_12:
- case RT5668_DRC_CTRL_1:
- case RT5668_DRC1_CTRL_2:
- case RT5668_DRC1_CTRL_3:
- case RT5668_DRC1_CTRL_4:
- case RT5668_DRC1_CTRL_5:
- case RT5668_DRC1_CTRL_6:
- case RT5668_DRC1_HD_CTRL_1:
- case RT5668_DRC1_HD_CTRL_2:
- case RT5668_DRC1_PRI_REG_1:
- case RT5668_DRC1_PRI_REG_2:
- case RT5668_DRC1_PRI_REG_3:
- case RT5668_DRC1_PRI_REG_4:
- case RT5668_DRC1_PRI_REG_5:
- case RT5668_DRC1_PRI_REG_6:
- case RT5668_DRC1_PRI_REG_7:
- case RT5668_DRC1_PRI_REG_8:
- case RT5668_ALC_PGA_CTL_1:
- case RT5668_ALC_PGA_CTL_2:
- case RT5668_ALC_PGA_CTL_3:
- case RT5668_ALC_PGA_CTL_4:
- case RT5668_ALC_PGA_CTL_5:
- case RT5668_ALC_PGA_CTL_6:
- case RT5668_ALC_PGA_CTL_7:
- case RT5668_ALC_PGA_CTL_8:
- case RT5668_ALC_PGA_REG_1:
- case RT5668_ALC_PGA_REG_2:
- case RT5668_ALC_PGA_REG_3:
- case RT5668_ADC_EQ_RECOV_1:
- case RT5668_ADC_EQ_RECOV_2:
- case RT5668_ADC_EQ_RECOV_3:
- case RT5668_ADC_EQ_RECOV_4:
- case RT5668_ADC_EQ_RECOV_5:
- case RT5668_ADC_EQ_RECOV_6:
- case RT5668_ADC_EQ_RECOV_7:
- case RT5668_ADC_EQ_RECOV_8:
- case RT5668_ADC_EQ_RECOV_9:
- case RT5668_ADC_EQ_RECOV_10:
- case RT5668_ADC_EQ_RECOV_11:
- case RT5668_ADC_EQ_RECOV_12:
- case RT5668_ADC_EQ_RECOV_13:
- case RT5668_VID_HIDDEN:
- case RT5668_VID_CUSTOMER:
- case RT5668_SCAN_MODE:
- case RT5668_I2C_BYPA:
+ case RT5663_MONO_DYNA_1:
+ case RT5663_MONO_DYNA_2:
+ case RT5663_MONO_DYNA_3:
+ case RT5663_MONO_DYNA_4:
+ case RT5663_MONO_DYNA_5:
+ case RT5663_MONO_DYNA_6:
+ case RT5663_STO1_SIL_DET:
+ case RT5663_MONOL_SIL_DET:
+ case RT5663_MONOR_SIL_DET:
+ case RT5663_STO2_DAC_SIL:
+ case RT5663_PWR_SAV_CTL1:
+ case RT5663_PWR_SAV_CTL2:
+ case RT5663_PWR_SAV_CTL3:
+ case RT5663_PWR_SAV_CTL4:
+ case RT5663_PWR_SAV_CTL5:
+ case RT5663_PWR_SAV_CTL6:
+ case RT5663_MONO_AMP_CAL1:
+ case RT5663_MONO_AMP_CAL2:
+ case RT5663_MONO_AMP_CAL3:
+ case RT5663_MONO_AMP_CAL4:
+ case RT5663_MONO_AMP_CAL5:
+ case RT5663_MONO_AMP_CAL6:
+ case RT5663_MONO_AMP_CAL7:
+ case RT5663_MONO_AMP_CAL_ST1:
+ case RT5663_MONO_AMP_CAL_ST2:
+ case RT5663_MONO_AMP_CAL_ST3:
+ case RT5663_MONO_AMP_CAL_ST4:
+ case RT5663_MONO_AMP_CAL_ST5:
+ case RT5663_V2_HP_IMP_SEN_13:
+ case RT5663_V2_HP_IMP_SEN_14:
+ case RT5663_V2_HP_IMP_SEN_6:
+ case RT5663_V2_HP_IMP_SEN_7:
+ case RT5663_V2_HP_IMP_SEN_8:
+ case RT5663_V2_HP_IMP_SEN_9:
+ case RT5663_V2_HP_IMP_SEN_10:
+ case RT5663_HP_LOGIC_3:
+ case RT5663_HP_CALIB_ST10:
+ case RT5663_HP_CALIB_ST11:
+ case RT5663_PRO_REG_TBL_4:
+ case RT5663_PRO_REG_TBL_5:
+ case RT5663_PRO_REG_TBL_6:
+ case RT5663_PRO_REG_TBL_7:
+ case RT5663_PRO_REG_TBL_8:
+ case RT5663_PRO_REG_TBL_9:
+ case RT5663_SAR_ADC_INL_1:
+ case RT5663_SAR_ADC_INL_2:
+ case RT5663_SAR_ADC_INL_3:
+ case RT5663_SAR_ADC_INL_4:
+ case RT5663_SAR_ADC_INL_5:
+ case RT5663_SAR_ADC_INL_6:
+ case RT5663_SAR_ADC_INL_7:
+ case RT5663_SAR_ADC_INL_8:
+ case RT5663_SAR_ADC_INL_9:
+ case RT5663_SAR_ADC_INL_10:
+ case RT5663_SAR_ADC_INL_11:
+ case RT5663_SAR_ADC_INL_12:
+ case RT5663_DRC_CTRL_1:
+ case RT5663_DRC1_CTRL_2:
+ case RT5663_DRC1_CTRL_3:
+ case RT5663_DRC1_CTRL_4:
+ case RT5663_DRC1_CTRL_5:
+ case RT5663_DRC1_CTRL_6:
+ case RT5663_DRC1_HD_CTRL_1:
+ case RT5663_DRC1_HD_CTRL_2:
+ case RT5663_DRC1_PRI_REG_1:
+ case RT5663_DRC1_PRI_REG_2:
+ case RT5663_DRC1_PRI_REG_3:
+ case RT5663_DRC1_PRI_REG_4:
+ case RT5663_DRC1_PRI_REG_5:
+ case RT5663_DRC1_PRI_REG_6:
+ case RT5663_DRC1_PRI_REG_7:
+ case RT5663_DRC1_PRI_REG_8:
+ case RT5663_ALC_PGA_CTL_1:
+ case RT5663_ALC_PGA_CTL_2:
+ case RT5663_ALC_PGA_CTL_3:
+ case RT5663_ALC_PGA_CTL_4:
+ case RT5663_ALC_PGA_CTL_5:
+ case RT5663_ALC_PGA_CTL_6:
+ case RT5663_ALC_PGA_CTL_7:
+ case RT5663_ALC_PGA_CTL_8:
+ case RT5663_ALC_PGA_REG_1:
+ case RT5663_ALC_PGA_REG_2:
+ case RT5663_ALC_PGA_REG_3:
+ case RT5663_ADC_EQ_RECOV_1:
+ case RT5663_ADC_EQ_RECOV_2:
+ case RT5663_ADC_EQ_RECOV_3:
+ case RT5663_ADC_EQ_RECOV_4:
+ case RT5663_ADC_EQ_RECOV_5:
+ case RT5663_ADC_EQ_RECOV_6:
+ case RT5663_ADC_EQ_RECOV_7:
+ case RT5663_ADC_EQ_RECOV_8:
+ case RT5663_ADC_EQ_RECOV_9:
+ case RT5663_ADC_EQ_RECOV_10:
+ case RT5663_ADC_EQ_RECOV_11:
+ case RT5663_ADC_EQ_RECOV_12:
+ case RT5663_ADC_EQ_RECOV_13:
+ case RT5663_VID_HIDDEN:
+ case RT5663_VID_CUSTOMER:
+ case RT5663_SCAN_MODE:
+ case RT5663_I2C_BYPA:
return true;
case RT5663_TDM_1:
case RT5663_DEPOP_3:
case RT5663_ASRC_11_2:
case RT5663_INT_ST_2:
- case RT5663_GPIO_STA:
+ case RT5663_GPIO_STA1:
case RT5663_SIN_GEN_1:
case RT5663_SIN_GEN_2:
case RT5663_SIN_GEN_3:
@@ -1344,7 +1344,7 @@ static bool rt5668_readable_register(struct device *dev, unsigned int reg)
}
static const DECLARE_TLV_DB_SCALE(rt5663_hp_vol_tlv, -2400, 150, 0);
-static const DECLARE_TLV_DB_SCALE(rt5668_hp_vol_tlv, -2250, 150, 0);
+static const DECLARE_TLV_DB_SCALE(rt5663_v2_hp_vol_tlv, -2250, 150, 0);
static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -6525, 75, 0);
static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -1725, 75, 0);
@@ -1374,57 +1374,57 @@ static void rt5663_enable_push_button_irq(struct snd_soc_codec *codec,
if (enable) {
snd_soc_update_bits(codec, RT5663_IL_CMD_6,
- RT5668_EN_4BTN_INL_MASK, RT5668_EN_4BTN_INL_EN);
+ RT5663_EN_4BTN_INL_MASK, RT5663_EN_4BTN_INL_EN);
/* reset in-line command */
snd_soc_update_bits(codec, RT5663_IL_CMD_6,
- RT5668_RESET_4BTN_INL_MASK,
- RT5668_RESET_4BTN_INL_RESET);
+ RT5663_RESET_4BTN_INL_MASK,
+ RT5663_RESET_4BTN_INL_RESET);
snd_soc_update_bits(codec, RT5663_IL_CMD_6,
- RT5668_RESET_4BTN_INL_MASK,
- RT5668_RESET_4BTN_INL_NOR);
- switch (rt5663->codec_type) {
- case CODEC_TYPE_RT5668:
+ RT5663_RESET_4BTN_INL_MASK,
+ RT5663_RESET_4BTN_INL_NOR);
+ switch (rt5663->codec_ver) {
+ case CODEC_VER_1:
snd_soc_update_bits(codec, RT5663_IRQ_3,
- RT5668_EN_IRQ_INLINE_MASK,
- RT5668_EN_IRQ_INLINE_NOR);
+ RT5663_V2_EN_IRQ_INLINE_MASK,
+ RT5663_V2_EN_IRQ_INLINE_NOR);
break;
- case CODEC_TYPE_RT5663:
+ case CODEC_VER_0:
snd_soc_update_bits(codec, RT5663_IRQ_2,
RT5663_EN_IRQ_INLINE_MASK,
RT5663_EN_IRQ_INLINE_NOR);
break;
default:
- dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+ dev_err(codec->dev, "Unknown CODEC Version\n");
}
} else {
- switch (rt5663->codec_type) {
- case CODEC_TYPE_RT5668:
+ switch (rt5663->codec_ver) {
+ case CODEC_VER_1:
snd_soc_update_bits(codec, RT5663_IRQ_3,
- RT5668_EN_IRQ_INLINE_MASK,
- RT5668_EN_IRQ_INLINE_BYP);
+ RT5663_V2_EN_IRQ_INLINE_MASK,
+ RT5663_V2_EN_IRQ_INLINE_BYP);
break;
- case CODEC_TYPE_RT5663:
+ case CODEC_VER_0:
snd_soc_update_bits(codec, RT5663_IRQ_2,
RT5663_EN_IRQ_INLINE_MASK,
RT5663_EN_IRQ_INLINE_BYP);
break;
default:
- dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+ dev_err(codec->dev, "Unknown CODEC Version\n");
}
snd_soc_update_bits(codec, RT5663_IL_CMD_6,
- RT5668_EN_4BTN_INL_MASK, RT5668_EN_4BTN_INL_DIS);
+ RT5663_EN_4BTN_INL_MASK, RT5663_EN_4BTN_INL_DIS);
/* reset in-line command */
snd_soc_update_bits(codec, RT5663_IL_CMD_6,
- RT5668_RESET_4BTN_INL_MASK,
- RT5668_RESET_4BTN_INL_RESET);
+ RT5663_RESET_4BTN_INL_MASK,
+ RT5663_RESET_4BTN_INL_RESET);
snd_soc_update_bits(codec, RT5663_IL_CMD_6,
- RT5668_RESET_4BTN_INL_MASK,
- RT5668_RESET_4BTN_INL_NOR);
+ RT5663_RESET_4BTN_INL_MASK,
+ RT5663_RESET_4BTN_INL_NOR);
}
}
/**
- * rt5668_jack_detect - Detect headset.
+ * rt5663_v2_jack_detect - Detect headset.
* @codec: SoC audio codec device.
* @jack_insert: Jack insert or not.
*
@@ -1433,16 +1433,16 @@ static void rt5663_enable_push_button_irq(struct snd_soc_codec *codec,
* Returns detect status.
*/
-static int rt5668_jack_detect(struct snd_soc_codec *codec, int jack_insert)
+static int rt5663_v2_jack_detect(struct snd_soc_codec *codec, int jack_insert)
{
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
- struct rt5663_priv *rt5668 = snd_soc_codec_get_drvdata(codec);
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
int val, i = 0, sleep_time[5] = {300, 150, 100, 50, 30};
dev_dbg(codec->dev, "%s jack_insert:%d\n", __func__, jack_insert);
if (jack_insert) {
- snd_soc_write(codec, RT5668_CBJ_TYPE_2, 0x8040);
- snd_soc_write(codec, RT5668_CBJ_TYPE_3, 0x1484);
+ snd_soc_write(codec, RT5663_CBJ_TYPE_2, 0x8040);
+ snd_soc_write(codec, RT5663_CBJ_TYPE_3, 0x1484);
snd_soc_dapm_force_enable_pin(dapm, "MICBIAS1");
snd_soc_dapm_force_enable_pin(dapm, "MICBIAS2");
@@ -1450,12 +1450,12 @@ static int rt5668_jack_detect(struct snd_soc_codec *codec, int jack_insert)
snd_soc_dapm_force_enable_pin(dapm, "CBJ Power");
snd_soc_dapm_sync(dapm);
snd_soc_update_bits(codec, RT5663_RC_CLK,
- RT5668_DIG_1M_CLK_MASK, RT5668_DIG_1M_CLK_EN);
+ RT5663_DIG_1M_CLK_MASK, RT5663_DIG_1M_CLK_EN);
snd_soc_update_bits(codec, RT5663_RECMIX, 0x8, 0x8);
while (i < 5) {
msleep(sleep_time[i]);
- val = snd_soc_read(codec, RT5668_CBJ_TYPE_2) & 0x0003;
+ val = snd_soc_read(codec, RT5663_CBJ_TYPE_2) & 0x0003;
if (val == 0x1 || val == 0x2 || val == 0x3)
break;
dev_dbg(codec->dev, "%s: MX-0011 val=%x sleep %d\n",
@@ -1466,7 +1466,7 @@ static int rt5668_jack_detect(struct snd_soc_codec *codec, int jack_insert)
switch (val) {
case 1:
case 2:
- rt5668->jack_type = SND_JACK_HEADSET;
+ rt5663->jack_type = SND_JACK_HEADSET;
rt5663_enable_push_button_irq(codec, true);
break;
default:
@@ -1475,13 +1475,13 @@ static int rt5668_jack_detect(struct snd_soc_codec *codec, int jack_insert)
snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
snd_soc_dapm_disable_pin(dapm, "CBJ Power");
snd_soc_dapm_sync(dapm);
- rt5668->jack_type = SND_JACK_HEADPHONE;
+ rt5663->jack_type = SND_JACK_HEADPHONE;
break;
}
} else {
snd_soc_update_bits(codec, RT5663_RECMIX, 0x8, 0x0);
- if (rt5668->jack_type == SND_JACK_HEADSET) {
+ if (rt5663->jack_type == SND_JACK_HEADSET) {
rt5663_enable_push_button_irq(codec, false);
snd_soc_dapm_disable_pin(dapm, "MICBIAS1");
snd_soc_dapm_disable_pin(dapm, "MICBIAS2");
@@ -1489,11 +1489,11 @@ static int rt5668_jack_detect(struct snd_soc_codec *codec, int jack_insert)
snd_soc_dapm_disable_pin(dapm, "CBJ Power");
snd_soc_dapm_sync(dapm);
}
- rt5668->jack_type = 0;
+ rt5663->jack_type = 0;
}
- dev_dbg(codec->dev, "jack_type = %d\n", rt5668->jack_type);
- return rt5668->jack_type;
+ dev_dbg(codec->dev, "jack_type = %d\n", rt5663->jack_type);
+ return rt5663->jack_type;
}
/**
@@ -1514,11 +1514,11 @@ static int rt5663_jack_detect(struct snd_soc_codec *codec, int jack_insert)
if (jack_insert) {
snd_soc_update_bits(codec, RT5663_DIG_MISC,
- RT5668_DIG_GATE_CTRL_MASK, RT5668_DIG_GATE_CTRL_EN);
+ RT5663_DIG_GATE_CTRL_MASK, RT5663_DIG_GATE_CTRL_EN);
snd_soc_update_bits(codec, RT5663_HP_CHARGE_PUMP_1,
- RT5663_SI_HP_MASK | RT5668_OSW_HP_L_MASK |
- RT5668_OSW_HP_R_MASK, RT5663_SI_HP_EN |
- RT5668_OSW_HP_L_DIS | RT5668_OSW_HP_R_DIS);
+ RT5663_SI_HP_MASK | RT5663_OSW_HP_L_MASK |
+ RT5663_OSW_HP_R_MASK, RT5663_SI_HP_EN |
+ RT5663_OSW_HP_L_DIS | RT5663_OSW_HP_R_DIS);
snd_soc_update_bits(codec, RT5663_DUMMY_1,
RT5663_EMB_CLK_MASK | RT5663_HPA_CPL_BIAS_MASK |
RT5663_HPA_CPR_BIAS_MASK, RT5663_EMB_CLK_EN |
@@ -1530,17 +1530,17 @@ static int rt5663_jack_detect(struct snd_soc_codec *codec, int jack_insert)
RT5663_PWR_MIC_DET_MASK, RT5663_PWR_MIC_DET_ON);
/* BST1 power on for JD */
snd_soc_update_bits(codec, RT5663_PWR_ANLG_2,
- RT5668_PWR_BST1_MASK, RT5668_PWR_BST1_ON);
+ RT5663_PWR_BST1_MASK, RT5663_PWR_BST1_ON);
snd_soc_update_bits(codec, RT5663_EM_JACK_TYPE_1,
RT5663_CBJ_DET_MASK | RT5663_EXT_JD_MASK |
RT5663_POL_EXT_JD_MASK, RT5663_CBJ_DET_EN |
RT5663_EXT_JD_EN | RT5663_POL_EXT_JD_EN);
snd_soc_update_bits(codec, RT5663_PWR_ANLG_1,
- RT5668_PWR_MB_MASK | RT5668_LDO1_DVO_MASK |
- RT5668_AMP_HP_MASK, RT5668_PWR_MB |
- RT5668_LDO1_DVO_0_9V | RT5668_AMP_HP_3X);
+ RT5663_PWR_MB_MASK | RT5663_LDO1_DVO_MASK |
+ RT5663_AMP_HP_MASK, RT5663_PWR_MB |
+ RT5663_LDO1_DVO_0_9V | RT5663_AMP_HP_3X);
snd_soc_update_bits(codec, RT5663_AUTO_1MRC_CLK,
- RT5668_IRQ_POW_SAV_MASK, RT5668_IRQ_POW_SAV_EN);
+ RT5663_IRQ_POW_SAV_MASK, RT5663_IRQ_POW_SAV_EN);
snd_soc_update_bits(codec, RT5663_IRQ_1,
RT5663_EN_IRQ_JD1_MASK, RT5663_EN_IRQ_JD1_EN);
while (i < 5) {
@@ -1619,13 +1619,13 @@ static bool rt5663_check_jd_status(struct snd_soc_codec *codec)
dev_dbg(codec->dev, "%s val=%x\n", __func__, val);
/* JD1 */
- switch (rt5663->codec_type) {
- case CODEC_TYPE_RT5668:
+ switch (rt5663->codec_ver) {
+ case CODEC_VER_1:
return !(val & 0x2000);
- case CODEC_TYPE_RT5663:
+ case CODEC_VER_0:
return !(val & 0x1000);
default:
- dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+ dev_err(codec->dev, "Unknown CODEC Version\n");
}
return false;
@@ -1645,15 +1645,16 @@ static void rt5663_jack_detect_work(struct work_struct *work)
/* jack in */
if (rt5663->jack_type == 0) {
/* jack was out, report jack type */
- switch (rt5663->codec_type) {
- case CODEC_TYPE_RT5668:
- report = rt5668_jack_detect(rt5663->codec, 1);
+ switch (rt5663->codec_ver) {
+ case CODEC_VER_1:
+ report = rt5663_v2_jack_detect(
+ rt5663->codec, 1);
break;
- case CODEC_TYPE_RT5663:
+ case CODEC_VER_0:
report = rt5663_jack_detect(rt5663->codec, 1);
break;
default:
- dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+ dev_err(codec->dev, "Unknown CODEC Version\n");
}
} else {
/* jack is already in, report button event */
@@ -1702,15 +1703,15 @@ static void rt5663_jack_detect_work(struct work_struct *work)
}
} else {
/* jack out */
- switch (rt5663->codec_type) {
- case CODEC_TYPE_RT5668:
- report = rt5668_jack_detect(rt5663->codec, 0);
+ switch (rt5663->codec_ver) {
+ case CODEC_VER_1:
+ report = rt5663_v2_jack_detect(rt5663->codec, 0);
break;
- case CODEC_TYPE_RT5663:
+ case CODEC_VER_0:
report = rt5663_jack_detect(rt5663->codec, 0);
break;
default:
- dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+ dev_err(codec->dev, "Unknown CODEC Version\n");
}
}
dev_dbg(codec->dev, "%s jack report: 0x%04x\n", __func__, report);
@@ -1722,24 +1723,24 @@ static void rt5663_jack_detect_work(struct work_struct *work)
static const struct snd_kcontrol_new rt5663_snd_controls[] = {
/* DAC Digital Volume */
SOC_DOUBLE_TLV("DAC Playback Volume", RT5663_STO1_DAC_DIG_VOL,
- RT5668_DAC_L1_VOL_SHIFT + 1, RT5668_DAC_R1_VOL_SHIFT + 1,
+ RT5663_DAC_L1_VOL_SHIFT + 1, RT5663_DAC_R1_VOL_SHIFT + 1,
87, 0, dac_vol_tlv),
/* ADC Digital Volume Control */
SOC_DOUBLE("ADC Capture Switch", RT5663_STO1_ADC_DIG_VOL,
- RT5668_ADC_L_MUTE_SHIFT, RT5668_ADC_R_MUTE_SHIFT, 1, 1),
+ RT5663_ADC_L_MUTE_SHIFT, RT5663_ADC_R_MUTE_SHIFT, 1, 1),
SOC_DOUBLE_TLV("ADC Capture Volume", RT5663_STO1_ADC_DIG_VOL,
- RT5668_ADC_L_VOL_SHIFT + 1, RT5668_ADC_R_VOL_SHIFT + 1,
+ RT5663_ADC_L_VOL_SHIFT + 1, RT5663_ADC_R_VOL_SHIFT + 1,
63, 0, adc_vol_tlv),
};
-static const struct snd_kcontrol_new rt5668_specific_controls[] = {
+static const struct snd_kcontrol_new rt5663_v2_specific_controls[] = {
/* Headphone Output Volume */
SOC_DOUBLE_R_TLV("Headphone Playback Volume", RT5663_HP_LCH_DRE,
- RT5663_HP_RCH_DRE, RT5668_GAIN_HP_SHIFT, 15, 1,
- rt5668_hp_vol_tlv),
+ RT5663_HP_RCH_DRE, RT5663_GAIN_HP_SHIFT, 15, 1,
+ rt5663_v2_hp_vol_tlv),
/* Mic Boost Volume */
- SOC_SINGLE_TLV("IN1 Capture Volume", RT5668_AEC_BST,
- RT5668_GAIN_CBJ_SHIFT, 8, 0, in_bst_tlv),
+ SOC_SINGLE_TLV("IN1 Capture Volume", RT5663_AEC_BST,
+ RT5663_GAIN_CBJ_SHIFT, 8, 0, in_bst_tlv),
};
static const struct snd_kcontrol_new rt5663_specific_controls[] = {
@@ -1775,15 +1776,15 @@ static int rt5663_is_using_asrc(struct snd_soc_dapm_widget *w,
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
- if (rt5663->codec_type == CODEC_TYPE_RT5668) {
+ if (rt5663->codec_ver == CODEC_VER_1) {
switch (w->shift) {
- case RT5668_ADC_STO1_ASRC_SHIFT:
- reg = RT5668_ASRC_3;
- shift = RT5668_AD_STO1_TRACK_SHIFT;
+ case RT5663_ADC_STO1_ASRC_SHIFT:
+ reg = RT5663_ASRC_3;
+ shift = RT5663_V2_AD_STO1_TRACK_SHIFT;
break;
- case RT5668_DAC_STO1_ASRC_SHIFT:
+ case RT5663_DAC_STO1_ASRC_SHIFT:
reg = RT5663_ASRC_2;
- shift = RT5668_DA_STO1_TRACK_SHIFT;
+ shift = RT5663_DA_STO1_TRACK_SHIFT;
break;
default:
return 0;
@@ -1820,17 +1821,17 @@ static int rt5663_i2s_use_asrc(struct snd_soc_dapm_widget *source,
da_asrc_en = (snd_soc_read(codec, RT5663_ASRC_2) &
RT5663_DA_STO1_TRACK_MASK) ? 1 : 0;
- switch (rt5663->codec_type) {
- case CODEC_TYPE_RT5668:
- ad_asrc_en = (snd_soc_read(codec, RT5668_ASRC_3) &
- RT5668_AD_STO1_TRACK_MASK) ? 1 : 0;
+ switch (rt5663->codec_ver) {
+ case CODEC_VER_1:
+ ad_asrc_en = (snd_soc_read(codec, RT5663_ASRC_3) &
+ RT5663_V2_AD_STO1_TRACK_MASK) ? 1 : 0;
break;
- case CODEC_TYPE_RT5663:
+ case CODEC_VER_0:
ad_asrc_en = (snd_soc_read(codec, RT5663_ASRC_2) &
RT5663_AD_STO1_TRACK_MASK) ? 1 : 0;
break;
default:
- dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+ dev_err(codec->dev, "Unknown CODEC Version\n");
return 1;
}
@@ -1849,7 +1850,7 @@ static int rt5663_i2s_use_asrc(struct snd_soc_dapm_widget *source,
* @filter_mask: mask of filters.
* @clk_src: clock source
*
- * The ASRC function is for asynchronous MCLK and LRCK. Also, since RT5668 can
+ * The ASRC function is for asynchronous MCLK and LRCK. Also, since RT5663 can
* only support standard 32fs or 64fs i2s format, ASRC should be enabled to
* support special i2s clock format such as Intel's 100fs(100 * sampling rate).
* ASRC function will track i2s clock and generate a corresponding system clock
@@ -1860,7 +1861,7 @@ static int rt5663_i2s_use_asrc(struct snd_soc_dapm_widget *source,
int rt5663_sel_asrc_clk_src(struct snd_soc_codec *codec,
unsigned int filter_mask, unsigned int clk_src)
{
- struct rt5663_priv *rt5668 = snd_soc_codec_get_drvdata(codec);
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
unsigned int asrc2_mask = 0;
unsigned int asrc2_value = 0;
unsigned int asrc3_mask = 0;
@@ -1876,22 +1877,22 @@ int rt5663_sel_asrc_clk_src(struct snd_soc_codec *codec,
}
if (filter_mask & RT5663_DA_STEREO_FILTER) {
- asrc2_mask |= RT5668_DA_STO1_TRACK_MASK;
- asrc2_value |= clk_src << RT5668_DA_STO1_TRACK_SHIFT;
+ asrc2_mask |= RT5663_DA_STO1_TRACK_MASK;
+ asrc2_value |= clk_src << RT5663_DA_STO1_TRACK_SHIFT;
}
if (filter_mask & RT5663_AD_STEREO_FILTER) {
- switch (rt5668->codec_type) {
- case CODEC_TYPE_RT5668:
- asrc3_mask |= RT5668_AD_STO1_TRACK_MASK;
- asrc3_value |= clk_src << RT5668_AD_STO1_TRACK_SHIFT;
+ switch (rt5663->codec_ver) {
+ case CODEC_VER_1:
+ asrc3_mask |= RT5663_V2_AD_STO1_TRACK_MASK;
+ asrc3_value |= clk_src << RT5663_V2_AD_STO1_TRACK_SHIFT;
break;
- case CODEC_TYPE_RT5663:
+ case CODEC_VER_0:
asrc2_mask |= RT5663_AD_STO1_TRACK_MASK;
asrc2_value |= clk_src << RT5663_AD_STO1_TRACK_SHIFT;
break;
default:
- dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+ dev_err(codec->dev, "Unknown CODEC Version\n");
}
}
@@ -1900,7 +1901,7 @@ int rt5663_sel_asrc_clk_src(struct snd_soc_codec *codec,
asrc2_value);
if (asrc3_mask)
- snd_soc_update_bits(codec, RT5668_ASRC_3, asrc3_mask,
+ snd_soc_update_bits(codec, RT5663_ASRC_3, asrc3_mask,
asrc3_value);
return 0;
@@ -1908,82 +1909,82 @@ int rt5663_sel_asrc_clk_src(struct snd_soc_codec *codec,
EXPORT_SYMBOL_GPL(rt5663_sel_asrc_clk_src);
/* Analog Mixer */
-static const struct snd_kcontrol_new rt5668_recmix1l[] = {
- SOC_DAPM_SINGLE("BST2 Switch", RT5668_RECMIX1L,
- RT5668_RECMIX1L_BST2_SHIFT, 1, 1),
- SOC_DAPM_SINGLE("BST1 CBJ Switch", RT5668_RECMIX1L,
- RT5668_RECMIX1L_BST1_CBJ_SHIFT, 1, 1),
+static const struct snd_kcontrol_new rt5663_recmix1l[] = {
+ SOC_DAPM_SINGLE("BST2 Switch", RT5663_RECMIX1L,
+ RT5663_RECMIX1L_BST2_SHIFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 CBJ Switch", RT5663_RECMIX1L,
+ RT5663_RECMIX1L_BST1_CBJ_SHIFT, 1, 1),
};
-static const struct snd_kcontrol_new rt5668_recmix1r[] = {
- SOC_DAPM_SINGLE("BST2 Switch", RT5668_RECMIX1R,
- RT5668_RECMIX1R_BST2_SHIFT, 1, 1),
+static const struct snd_kcontrol_new rt5663_recmix1r[] = {
+ SOC_DAPM_SINGLE("BST2 Switch", RT5663_RECMIX1R,
+ RT5663_RECMIX1R_BST2_SHIFT, 1, 1),
};
/* Digital Mixer */
static const struct snd_kcontrol_new rt5663_sto1_adc_l_mix[] = {
SOC_DAPM_SINGLE("ADC1 Switch", RT5663_STO1_ADC_MIXER,
- RT5668_M_STO1_ADC_L1_SHIFT, 1, 1),
+ RT5663_M_STO1_ADC_L1_SHIFT, 1, 1),
SOC_DAPM_SINGLE("ADC2 Switch", RT5663_STO1_ADC_MIXER,
- RT5668_M_STO1_ADC_L2_SHIFT, 1, 1),
+ RT5663_M_STO1_ADC_L2_SHIFT, 1, 1),
};
-static const struct snd_kcontrol_new rt5668_sto1_adc_r_mix[] = {
+static const struct snd_kcontrol_new rt5663_sto1_adc_r_mix[] = {
SOC_DAPM_SINGLE("ADC1 Switch", RT5663_STO1_ADC_MIXER,
- RT5668_M_STO1_ADC_R1_SHIFT, 1, 1),
+ RT5663_M_STO1_ADC_R1_SHIFT, 1, 1),
SOC_DAPM_SINGLE("ADC2 Switch", RT5663_STO1_ADC_MIXER,
- RT5668_M_STO1_ADC_R2_SHIFT, 1, 1),
+ RT5663_M_STO1_ADC_R2_SHIFT, 1, 1),
};
static const struct snd_kcontrol_new rt5663_adda_l_mix[] = {
SOC_DAPM_SINGLE("ADC L Switch", RT5663_AD_DA_MIXER,
- RT5668_M_ADCMIX_L_SHIFT, 1, 1),
+ RT5663_M_ADCMIX_L_SHIFT, 1, 1),
SOC_DAPM_SINGLE("DAC L Switch", RT5663_AD_DA_MIXER,
- RT5668_M_DAC1_L_SHIFT, 1, 1),
+ RT5663_M_DAC1_L_SHIFT, 1, 1),
};
static const struct snd_kcontrol_new rt5663_adda_r_mix[] = {
SOC_DAPM_SINGLE("ADC R Switch", RT5663_AD_DA_MIXER,
- RT5668_M_ADCMIX_R_SHIFT, 1, 1),
+ RT5663_M_ADCMIX_R_SHIFT, 1, 1),
SOC_DAPM_SINGLE("DAC R Switch", RT5663_AD_DA_MIXER,
- RT5668_M_DAC1_R_SHIFT, 1, 1),
+ RT5663_M_DAC1_R_SHIFT, 1, 1),
};
static const struct snd_kcontrol_new rt5663_sto1_dac_l_mix[] = {
SOC_DAPM_SINGLE("DAC L Switch", RT5663_STO_DAC_MIXER,
- RT5668_M_DAC_L1_STO_L_SHIFT, 1, 1),
+ RT5663_M_DAC_L1_STO_L_SHIFT, 1, 1),
SOC_DAPM_SINGLE("DAC R Switch", RT5663_STO_DAC_MIXER,
- RT5668_M_DAC_R1_STO_L_SHIFT, 1, 1),
+ RT5663_M_DAC_R1_STO_L_SHIFT, 1, 1),
};
static const struct snd_kcontrol_new rt5663_sto1_dac_r_mix[] = {
SOC_DAPM_SINGLE("DAC L Switch", RT5663_STO_DAC_MIXER,
- RT5668_M_DAC_L1_STO_R_SHIFT, 1, 1),
+ RT5663_M_DAC_L1_STO_R_SHIFT, 1, 1),
SOC_DAPM_SINGLE("DAC R Switch", RT5663_STO_DAC_MIXER,
- RT5668_M_DAC_R1_STO_R_SHIFT, 1, 1),
+ RT5663_M_DAC_R1_STO_R_SHIFT, 1, 1),
};
/* Out Switch */
-static const struct snd_kcontrol_new rt5668_hpo_switch =
- SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5668_HP_AMP_2,
- RT5668_EN_DAC_HPO_SHIFT, 1, 0);
+static const struct snd_kcontrol_new rt5663_hpo_switch =
+ SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5663_HP_AMP_2,
+ RT5663_EN_DAC_HPO_SHIFT, 1, 0);
/* Stereo ADC source */
-static const char * const rt5668_sto1_adc_src[] = {
+static const char * const rt5663_sto1_adc_src[] = {
"ADC L", "ADC R"
};
-static SOC_ENUM_SINGLE_DECL(rt5668_sto1_adcl_enum, RT5663_STO1_ADC_MIXER,
- RT5668_STO1_ADC_L_SRC_SHIFT, rt5668_sto1_adc_src);
+static SOC_ENUM_SINGLE_DECL(rt5663_sto1_adcl_enum, RT5663_STO1_ADC_MIXER,
+ RT5663_STO1_ADC_L_SRC_SHIFT, rt5663_sto1_adc_src);
-static const struct snd_kcontrol_new rt5668_sto1_adcl_mux =
- SOC_DAPM_ENUM("STO1 ADC L Mux", rt5668_sto1_adcl_enum);
+static const struct snd_kcontrol_new rt5663_sto1_adcl_mux =
+ SOC_DAPM_ENUM("STO1 ADC L Mux", rt5663_sto1_adcl_enum);
-static SOC_ENUM_SINGLE_DECL(rt5668_sto1_adcr_enum, RT5663_STO1_ADC_MIXER,
- RT5668_STO1_ADC_R_SRC_SHIFT, rt5668_sto1_adc_src);
+static SOC_ENUM_SINGLE_DECL(rt5663_sto1_adcr_enum, RT5663_STO1_ADC_MIXER,
+ RT5663_STO1_ADC_R_SRC_SHIFT, rt5663_sto1_adc_src);
-static const struct snd_kcontrol_new rt5668_sto1_adcr_mux =
- SOC_DAPM_ENUM("STO1 ADC R Mux", rt5668_sto1_adcr_enum);
+static const struct snd_kcontrol_new rt5663_sto1_adcr_mux =
+ SOC_DAPM_ENUM("STO1 ADC R Mux", rt5663_sto1_adcr_enum);
/* RT5663: Analog DACL1 input source */
static const char * const rt5663_alg_dacl_src[] = {
@@ -2015,12 +2016,12 @@ static int rt5663_hp_event(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_POST_PMU:
- if (rt5663->codec_type == CODEC_TYPE_RT5668) {
+ if (rt5663->codec_ver == CODEC_VER_1) {
snd_soc_update_bits(codec, RT5663_HP_CHARGE_PUMP_1,
- RT5668_SEL_PM_HP_SHIFT, RT5668_SEL_PM_HP_HIGH);
+ RT5663_SEL_PM_HP_SHIFT, RT5663_SEL_PM_HP_HIGH);
snd_soc_update_bits(codec, RT5663_HP_LOGIC_2,
- RT5668_HP_SIG_SRC1_MASK,
- RT5668_HP_SIG_SRC1_SILENCE);
+ RT5663_HP_SIG_SRC1_MASK,
+ RT5663_HP_SIG_SRC1_SILENCE);
} else {
snd_soc_write(codec, RT5663_DEPOP_2, 0x3003);
snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x000b,
@@ -2028,7 +2029,7 @@ static int rt5663_hp_event(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x0030,
0x0030);
snd_soc_update_bits(codec, RT5663_HP_CHARGE_PUMP_1,
- RT5668_OVCD_HP_MASK, RT5668_OVCD_HP_DIS);
+ RT5663_OVCD_HP_MASK, RT5663_OVCD_HP_DIS);
snd_soc_write(codec, RT5663_HP_CHARGE_PUMP_2, 0x1371);
snd_soc_write(codec, RT5663_HP_BIAS, 0xabba);
snd_soc_write(codec, RT5663_CHARGE_PUMP_1, 0x2224);
@@ -2041,14 +2042,14 @@ static int rt5663_hp_event(struct snd_soc_dapm_widget *w,
break;
case SND_SOC_DAPM_PRE_PMD:
- if (rt5663->codec_type == CODEC_TYPE_RT5668) {
+ if (rt5663->codec_ver == CODEC_VER_1) {
snd_soc_update_bits(codec, RT5663_HP_LOGIC_2,
- RT5668_HP_SIG_SRC1_MASK,
- RT5668_HP_SIG_SRC1_REG);
+ RT5663_HP_SIG_SRC1_MASK,
+ RT5663_HP_SIG_SRC1_REG);
} else {
snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x3000, 0x0);
snd_soc_update_bits(codec, RT5663_HP_CHARGE_PUMP_1,
- RT5668_OVCD_HP_MASK, RT5668_OVCD_HP_EN);
+ RT5663_OVCD_HP_MASK, RT5663_OVCD_HP_EN);
snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x0030, 0x0);
snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x000b,
0x000b);
@@ -2062,7 +2063,7 @@ static int rt5663_hp_event(struct snd_soc_dapm_widget *w,
return 0;
}
-static int rt5668_bst2_power(struct snd_soc_dapm_widget *w,
+static int rt5663_bst2_power(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
@@ -2070,13 +2071,13 @@ static int rt5668_bst2_power(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_POST_PMU:
snd_soc_update_bits(codec, RT5663_PWR_ANLG_2,
- RT5668_PWR_BST2_MASK | RT5668_PWR_BST2_OP_MASK,
- RT5668_PWR_BST2 | RT5668_PWR_BST2_OP);
+ RT5663_PWR_BST2_MASK | RT5663_PWR_BST2_OP_MASK,
+ RT5663_PWR_BST2 | RT5663_PWR_BST2_OP);
break;
case SND_SOC_DAPM_PRE_PMD:
snd_soc_update_bits(codec, RT5663_PWR_ANLG_2,
- RT5668_PWR_BST2_MASK | RT5668_PWR_BST2_OP_MASK, 0);
+ RT5663_PWR_BST2_MASK | RT5663_PWR_BST2_OP_MASK, 0);
break;
default:
@@ -2110,14 +2111,14 @@ static int rt5663_pre_div_power(struct snd_soc_dapm_widget *w,
}
static const struct snd_soc_dapm_widget rt5663_dapm_widgets[] = {
- SND_SOC_DAPM_SUPPLY("PLL", RT5663_PWR_ANLG_3, RT5668_PWR_PLL_SHIFT, 0,
+ SND_SOC_DAPM_SUPPLY("PLL", RT5663_PWR_ANLG_3, RT5663_PWR_PLL_SHIFT, 0,
NULL, 0),
/* micbias */
SND_SOC_DAPM_MICBIAS("MICBIAS1", RT5663_PWR_ANLG_2,
- RT5668_PWR_MB1_SHIFT, 0),
+ RT5663_PWR_MB1_SHIFT, 0),
SND_SOC_DAPM_MICBIAS("MICBIAS2", RT5663_PWR_ANLG_2,
- RT5668_PWR_MB2_SHIFT, 0),
+ RT5663_PWR_MB2_SHIFT, 0),
/* Input Lines */
SND_SOC_DAPM_INPUT("IN1P"),
@@ -2125,14 +2126,14 @@ static const struct snd_soc_dapm_widget rt5663_dapm_widgets[] = {
/* REC Mixer Power */
SND_SOC_DAPM_SUPPLY("RECMIX1L Power", RT5663_PWR_ANLG_2,
- RT5668_PWR_RECMIX1_SHIFT, 0, NULL, 0),
+ RT5663_PWR_RECMIX1_SHIFT, 0, NULL, 0),
/* ADCs */
SND_SOC_DAPM_ADC("ADC L", NULL, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_SUPPLY("ADC L Power", RT5663_PWR_DIG_1,
- RT5668_PWR_ADC_L1_SHIFT, 0, NULL, 0),
+ RT5663_PWR_ADC_L1_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("ADC Clock", RT5663_CHOP_ADC,
- RT5668_CKGEN_ADCC_SHIFT, 0, NULL, 0),
+ RT5663_CKGEN_ADCC_SHIFT, 0, NULL, 0),
/* ADC Mixer */
SND_SOC_DAPM_MIXER("STO1 ADC MIXL", SND_SOC_NOPM,
@@ -2141,10 +2142,10 @@ static const struct snd_soc_dapm_widget rt5663_dapm_widgets[] = {
/* ADC Filter Power */
SND_SOC_DAPM_SUPPLY("STO1 ADC Filter", RT5663_PWR_DIG_2,
- RT5668_PWR_ADC_S1F_SHIFT, 0, NULL, 0),
+ RT5663_PWR_ADC_S1F_SHIFT, 0, NULL, 0),
/* Digital Interface */
- SND_SOC_DAPM_SUPPLY("I2S", RT5663_PWR_DIG_1, RT5668_PWR_I2S1_SHIFT, 0,
+ SND_SOC_DAPM_SUPPLY("I2S", RT5663_PWR_DIG_1, RT5663_PWR_I2S1_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_PGA("IF DAC", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("IF1 DAC1 L", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -2166,7 +2167,7 @@ static const struct snd_soc_dapm_widget rt5663_dapm_widgets[] = {
/* DAC Mixer */
SND_SOC_DAPM_SUPPLY("STO1 DAC Filter", RT5663_PWR_DIG_2,
- RT5668_PWR_DAC_S1F_SHIFT, 0, NULL, 0),
+ RT5663_PWR_DAC_S1F_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_MIXER("STO1 DAC MIXL", SND_SOC_NOPM, 0, 0,
rt5663_sto1_dac_l_mix, ARRAY_SIZE(rt5663_sto1_dac_l_mix)),
SND_SOC_DAPM_MIXER("STO1 DAC MIXR", SND_SOC_NOPM, 0, 0,
@@ -2174,9 +2175,9 @@ static const struct snd_soc_dapm_widget rt5663_dapm_widgets[] = {
/* DACs */
SND_SOC_DAPM_SUPPLY("STO1 DAC L Power", RT5663_PWR_DIG_1,
- RT5668_PWR_DAC_L1_SHIFT, 0, NULL, 0),
+ RT5663_PWR_DAC_L1_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("STO1 DAC R Power", RT5663_PWR_DIG_1,
- RT5668_PWR_DAC_R1_SHIFT, 0, NULL, 0),
+ RT5663_PWR_DAC_R1_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_DAC("DAC L", NULL, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC("DAC R", NULL, SND_SOC_NOPM, 0, 0),
@@ -2189,21 +2190,21 @@ static const struct snd_soc_dapm_widget rt5663_dapm_widgets[] = {
SND_SOC_DAPM_OUTPUT("HPOR"),
};
-static const struct snd_soc_dapm_widget rt5668_specific_dapm_widgets[] = {
+static const struct snd_soc_dapm_widget rt5663_v2_specific_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("LDO2", RT5663_PWR_ANLG_3,
- RT5668_PWR_LDO2_SHIFT, 0, NULL, 0),
- SND_SOC_DAPM_SUPPLY("Mic Det Power", RT5668_PWR_VOL,
- RT5668_PWR_MIC_DET_SHIFT, 0, NULL, 0),
+ RT5663_PWR_LDO2_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Mic Det Power", RT5663_PWR_VOL,
+ RT5663_V2_PWR_MIC_DET_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("LDO DAC", RT5663_PWR_DIG_1,
- RT5668_PWR_LDO_DACREF_SHIFT, 0, NULL, 0),
+ RT5663_PWR_LDO_DACREF_SHIFT, 0, NULL, 0),
/* ASRC */
SND_SOC_DAPM_SUPPLY("I2S ASRC", RT5663_ASRC_1,
- RT5668_I2S1_ASRC_SHIFT, 0, NULL, 0),
+ RT5663_I2S1_ASRC_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("DAC ASRC", RT5663_ASRC_1,
- RT5668_DAC_STO1_ASRC_SHIFT, 0, NULL, 0),
+ RT5663_DAC_STO1_ASRC_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("ADC ASRC", RT5663_ASRC_1,
- RT5668_ADC_STO1_ASRC_SHIFT, 0, NULL, 0),
+ RT5663_ADC_STO1_ASRC_SHIFT, 0, NULL, 0),
/* Input Lines */
SND_SOC_DAPM_INPUT("IN2P"),
@@ -2212,51 +2213,51 @@ static const struct snd_soc_dapm_widget rt5668_specific_dapm_widgets[] = {
/* Boost */
SND_SOC_DAPM_PGA("BST1 CBJ", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("CBJ Power", RT5663_PWR_ANLG_3,
- RT5668_PWR_CBJ_SHIFT, 0, NULL, 0),
+ RT5663_PWR_CBJ_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA("BST2", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("BST2 Power", SND_SOC_NOPM, 0, 0,
- rt5668_bst2_power, SND_SOC_DAPM_PRE_PMD |
+ rt5663_bst2_power, SND_SOC_DAPM_PRE_PMD |
SND_SOC_DAPM_POST_PMU),
/* REC Mixer */
- SND_SOC_DAPM_MIXER("RECMIX1L", SND_SOC_NOPM, 0, 0, rt5668_recmix1l,
- ARRAY_SIZE(rt5668_recmix1l)),
- SND_SOC_DAPM_MIXER("RECMIX1R", SND_SOC_NOPM, 0, 0, rt5668_recmix1r,
- ARRAY_SIZE(rt5668_recmix1r)),
+ SND_SOC_DAPM_MIXER("RECMIX1L", SND_SOC_NOPM, 0, 0, rt5663_recmix1l,
+ ARRAY_SIZE(rt5663_recmix1l)),
+ SND_SOC_DAPM_MIXER("RECMIX1R", SND_SOC_NOPM, 0, 0, rt5663_recmix1r,
+ ARRAY_SIZE(rt5663_recmix1r)),
SND_SOC_DAPM_SUPPLY("RECMIX1R Power", RT5663_PWR_ANLG_2,
- RT5668_PWR_RECMIX2_SHIFT, 0, NULL, 0),
+ RT5663_PWR_RECMIX2_SHIFT, 0, NULL, 0),
/* ADC */
SND_SOC_DAPM_ADC("ADC R", NULL, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_SUPPLY("ADC R Power", RT5663_PWR_DIG_1,
- RT5668_PWR_ADC_R1_SHIFT, 0, NULL, 0),
+ RT5663_PWR_ADC_R1_SHIFT, 0, NULL, 0),
/* ADC Mux */
SND_SOC_DAPM_PGA("STO1 ADC L1", RT5663_STO1_ADC_MIXER,
- RT5668_STO1_ADC_L1_SRC_SHIFT, 0, NULL, 0),
+ RT5663_STO1_ADC_L1_SRC_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA("STO1 ADC R1", RT5663_STO1_ADC_MIXER,
- RT5668_STO1_ADC_R1_SRC_SHIFT, 0, NULL, 0),
+ RT5663_STO1_ADC_R1_SRC_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA("STO1 ADC L2", RT5663_STO1_ADC_MIXER,
- RT5668_STO1_ADC_L2_SRC_SHIFT, 1, NULL, 0),
+ RT5663_STO1_ADC_L2_SRC_SHIFT, 1, NULL, 0),
SND_SOC_DAPM_PGA("STO1 ADC R2", RT5663_STO1_ADC_MIXER,
- RT5668_STO1_ADC_R2_SRC_SHIFT, 1, NULL, 0),
+ RT5663_STO1_ADC_R2_SRC_SHIFT, 1, NULL, 0),
SND_SOC_DAPM_MUX("STO1 ADC L Mux", SND_SOC_NOPM, 0, 0,
- &rt5668_sto1_adcl_mux),
+ &rt5663_sto1_adcl_mux),
SND_SOC_DAPM_MUX("STO1 ADC R Mux", SND_SOC_NOPM, 0, 0,
- &rt5668_sto1_adcr_mux),
+ &rt5663_sto1_adcr_mux),
/* ADC Mix */
SND_SOC_DAPM_MIXER("STO1 ADC MIXR", SND_SOC_NOPM, 0, 0,
- rt5668_sto1_adc_r_mix, ARRAY_SIZE(rt5668_sto1_adc_r_mix)),
+ rt5663_sto1_adc_r_mix, ARRAY_SIZE(rt5663_sto1_adc_r_mix)),
/* Analog DAC Clock */
SND_SOC_DAPM_SUPPLY("DAC Clock", RT5663_CHOP_DAC_L,
- RT5668_CKGEN_DAC1_SHIFT, 0, NULL, 0),
+ RT5663_CKGEN_DAC1_SHIFT, 0, NULL, 0),
/* Headphone out */
SND_SOC_DAPM_SWITCH("HPO Playback", SND_SOC_NOPM, 0, 0,
- &rt5668_hpo_switch),
+ &rt5663_hpo_switch),
};
static const struct snd_soc_dapm_widget rt5663_specific_dapm_widgets[] = {
@@ -2267,7 +2268,7 @@ static const struct snd_soc_dapm_widget rt5663_specific_dapm_widgets[] = {
/* LDO */
SND_SOC_DAPM_SUPPLY("LDO ADC", RT5663_PWR_DIG_1,
- RT5668_PWR_LDO_DACREF_SHIFT, 0, NULL, 0),
+ RT5663_PWR_LDO_DACREF_SHIFT, 0, NULL, 0),
/* ASRC */
SND_SOC_DAPM_SUPPLY("I2S ASRC", RT5663_ASRC_1,
@@ -2341,7 +2342,7 @@ static const struct snd_soc_dapm_route rt5663_dapm_routes[] = {
{ "HP Amp", NULL, "DAC R" },
};
-static const struct snd_soc_dapm_route rt5668_specific_dapm_routes[] = {
+static const struct snd_soc_dapm_route rt5663_v2_specific_dapm_routes[] = {
{ "MICBIAS1", NULL, "LDO2" },
{ "MICBIAS2", NULL, "LDO2" },
@@ -2440,26 +2441,26 @@ static int rt5663_hw_params(struct snd_pcm_substream *substream,
switch (params_width(params)) {
case 8:
- val_len = RT5668_I2S_DL_8;
+ val_len = RT5663_I2S_DL_8;
break;
case 16:
- val_len = RT5668_I2S_DL_16;
+ val_len = RT5663_I2S_DL_16;
break;
case 20:
- val_len = RT5668_I2S_DL_20;
+ val_len = RT5663_I2S_DL_20;
break;
case 24:
- val_len = RT5668_I2S_DL_24;
+ val_len = RT5663_I2S_DL_24;
break;
default:
return -EINVAL;
}
snd_soc_update_bits(codec, RT5663_I2S1_SDP,
- RT5668_I2S_DL_MASK, val_len);
+ RT5663_I2S_DL_MASK, val_len);
snd_soc_update_bits(codec, RT5663_ADDA_CLK_1,
- RT5668_I2S_PD1_MASK, pre_div << RT5668_I2S_PD1_SHIFT);
+ RT5663_I2S_PD1_MASK, pre_div << RT5663_I2S_PD1_SHIFT);
return 0;
}
@@ -2473,7 +2474,7 @@ static int rt5663_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
case SND_SOC_DAIFMT_CBM_CFM:
break;
case SND_SOC_DAIFMT_CBS_CFS:
- reg_val |= RT5668_I2S_MS_S;
+ reg_val |= RT5663_I2S_MS_S;
break;
default:
return -EINVAL;
@@ -2483,7 +2484,7 @@ static int rt5663_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_NF:
- reg_val |= RT5668_I2S_BP_INV;
+ reg_val |= RT5663_I2S_BP_INV;
break;
default:
return -EINVAL;
@@ -2493,20 +2494,20 @@ static int rt5663_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
case SND_SOC_DAIFMT_I2S:
break;
case SND_SOC_DAIFMT_LEFT_J:
- reg_val |= RT5668_I2S_DF_LEFT;
+ reg_val |= RT5663_I2S_DF_LEFT;
break;
case SND_SOC_DAIFMT_DSP_A:
- reg_val |= RT5668_I2S_DF_PCM_A;
+ reg_val |= RT5663_I2S_DF_PCM_A;
break;
case SND_SOC_DAIFMT_DSP_B:
- reg_val |= RT5668_I2S_DF_PCM_B;
+ reg_val |= RT5663_I2S_DF_PCM_B;
break;
default:
return -EINVAL;
}
- snd_soc_update_bits(codec, RT5663_I2S1_SDP, RT5668_I2S_MS_MASK |
- RT5668_I2S_BP_MASK | RT5668_I2S_DF_MASK, reg_val);
+ snd_soc_update_bits(codec, RT5663_I2S1_SDP, RT5663_I2S_MS_MASK |
+ RT5663_I2S_BP_MASK | RT5663_I2S_DF_MASK, reg_val);
return 0;
}
@@ -2535,7 +2536,7 @@ static int rt5663_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
dev_err(codec->dev, "Invalid clock id (%d)\n", clk_id);
return -EINVAL;
}
- snd_soc_update_bits(codec, RT5663_GLB_CLK, RT5668_SCLK_SRC_MASK,
+ snd_soc_update_bits(codec, RT5663_GLB_CLK, RT5663_SCLK_SRC_MASK,
reg_val);
rt5663->sysclk = freq;
rt5663->sysclk_src = clk_id;
@@ -2569,17 +2570,17 @@ static int rt5663_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
return 0;
}
- switch (rt5663->codec_type) {
- case CODEC_TYPE_RT5668:
- mask = RT5668_PLL1_SRC_MASK;
- shift = RT5668_PLL1_SRC_SHIFT;
+ switch (rt5663->codec_ver) {
+ case CODEC_VER_1:
+ mask = RT5663_V2_PLL1_SRC_MASK;
+ shift = RT5663_V2_PLL1_SRC_SHIFT;
break;
- case CODEC_TYPE_RT5663:
+ case CODEC_VER_0:
mask = RT5663_PLL1_SRC_MASK;
shift = RT5663_PLL1_SRC_SHIFT;
break;
default:
- dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+ dev_err(codec->dev, "Unknown CODEC Version\n");
return -EINVAL;
}
@@ -2607,10 +2608,10 @@ static int rt5663_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
pll_code.k_code);
snd_soc_write(codec, RT5663_PLL_1,
- pll_code.n_code << RT5668_PLL_N_SHIFT | pll_code.k_code);
+ pll_code.n_code << RT5663_PLL_N_SHIFT | pll_code.k_code);
snd_soc_write(codec, RT5663_PLL_2,
- (pll_code.m_bp ? 0 : pll_code.m_code) << RT5668_PLL_M_SHIFT |
- pll_code.m_bp << RT5668_PLL_M_BP_SHIFT);
+ (pll_code.m_bp ? 0 : pll_code.m_code) << RT5663_PLL_M_SHIFT |
+ pll_code.m_bp << RT5663_PLL_M_BP_SHIFT);
rt5663->pll_in = freq_in;
rt5663->pll_out = freq_out;
@@ -2627,20 +2628,20 @@ static int rt5663_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
unsigned int val = 0, reg;
if (rx_mask || tx_mask)
- val |= RT5668_TDM_MODE_TDM;
+ val |= RT5663_TDM_MODE_TDM;
switch (slots) {
case 4:
- val |= RT5668_TDM_IN_CH_4;
- val |= RT5668_TDM_OUT_CH_4;
+ val |= RT5663_TDM_IN_CH_4;
+ val |= RT5663_TDM_OUT_CH_4;
break;
case 6:
- val |= RT5668_TDM_IN_CH_6;
- val |= RT5668_TDM_OUT_CH_6;
+ val |= RT5663_TDM_IN_CH_6;
+ val |= RT5663_TDM_OUT_CH_6;
break;
case 8:
- val |= RT5668_TDM_IN_CH_8;
- val |= RT5668_TDM_OUT_CH_8;
+ val |= RT5663_TDM_IN_CH_8;
+ val |= RT5663_TDM_OUT_CH_8;
break;
case 2:
break;
@@ -2650,16 +2651,16 @@ static int rt5663_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
switch (slot_width) {
case 20:
- val |= RT5668_TDM_IN_LEN_20;
- val |= RT5668_TDM_OUT_LEN_20;
+ val |= RT5663_TDM_IN_LEN_20;
+ val |= RT5663_TDM_OUT_LEN_20;
break;
case 24:
- val |= RT5668_TDM_IN_LEN_24;
- val |= RT5668_TDM_OUT_LEN_24;
+ val |= RT5663_TDM_IN_LEN_24;
+ val |= RT5663_TDM_OUT_LEN_24;
break;
case 32:
- val |= RT5668_TDM_IN_LEN_32;
- val |= RT5668_TDM_OUT_LEN_32;
+ val |= RT5663_TDM_IN_LEN_32;
+ val |= RT5663_TDM_OUT_LEN_32;
break;
case 16:
break;
@@ -2667,21 +2668,21 @@ static int rt5663_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
return -EINVAL;
}
- switch (rt5663->codec_type) {
- case CODEC_TYPE_RT5668:
+ switch (rt5663->codec_ver) {
+ case CODEC_VER_1:
reg = RT5663_TDM_2;
break;
- case CODEC_TYPE_RT5663:
+ case CODEC_VER_0:
reg = RT5663_TDM_1;
break;
default:
- dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+ dev_err(codec->dev, "Unknown CODEC Version\n");
return -EINVAL;
}
- snd_soc_update_bits(codec, reg, RT5668_TDM_MODE_MASK |
- RT5668_TDM_IN_CH_MASK | RT5668_TDM_OUT_CH_MASK |
- RT5668_TDM_IN_LEN_MASK | RT5668_TDM_OUT_LEN_MASK, val);
+ snd_soc_update_bits(codec, reg, RT5663_TDM_MODE_MASK |
+ RT5663_TDM_IN_CH_MASK | RT5663_TDM_OUT_CH_MASK |
+ RT5663_TDM_IN_LEN_MASK | RT5663_TDM_OUT_LEN_MASK, val);
return 0;
}
@@ -2694,8 +2695,8 @@ static int rt5663_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
dev_dbg(codec->dev, "%s ratio = %d\n", __func__, ratio);
- if (rt5663->codec_type == CODEC_TYPE_RT5668)
- reg = RT5668_TDM_8;
+ if (rt5663->codec_ver == CODEC_VER_1)
+ reg = RT5663_TDM_9;
else
reg = RT5663_TDM_5;
@@ -2736,47 +2737,47 @@ static int rt5663_set_bias_level(struct snd_soc_codec *codec,
switch (level) {
case SND_SOC_BIAS_ON:
snd_soc_update_bits(codec, RT5663_PWR_ANLG_1,
- RT5668_PWR_FV1_MASK | RT5668_PWR_FV2_MASK,
- RT5668_PWR_FV1 | RT5668_PWR_FV2);
+ RT5663_PWR_FV1_MASK | RT5663_PWR_FV2_MASK,
+ RT5663_PWR_FV1 | RT5663_PWR_FV2);
break;
case SND_SOC_BIAS_PREPARE:
- if (rt5663->codec_type == CODEC_TYPE_RT5668) {
+ if (rt5663->codec_ver == CODEC_VER_1) {
snd_soc_update_bits(codec, RT5663_DIG_MISC,
- RT5668_DIG_GATE_CTRL_MASK,
- RT5668_DIG_GATE_CTRL_EN);
+ RT5663_DIG_GATE_CTRL_MASK,
+ RT5663_DIG_GATE_CTRL_EN);
snd_soc_update_bits(codec, RT5663_SIG_CLK_DET,
- RT5668_EN_ANA_CLK_DET_MASK |
- RT5668_PWR_CLK_DET_MASK,
- RT5668_EN_ANA_CLK_DET_AUTO |
- RT5668_PWR_CLK_DET_EN);
+ RT5663_EN_ANA_CLK_DET_MASK |
+ RT5663_PWR_CLK_DET_MASK,
+ RT5663_EN_ANA_CLK_DET_AUTO |
+ RT5663_PWR_CLK_DET_EN);
}
break;
case SND_SOC_BIAS_STANDBY:
- if (rt5663->codec_type == CODEC_TYPE_RT5668)
+ if (rt5663->codec_ver == CODEC_VER_1)
snd_soc_update_bits(codec, RT5663_DIG_MISC,
- RT5668_DIG_GATE_CTRL_MASK,
- RT5668_DIG_GATE_CTRL_DIS);
+ RT5663_DIG_GATE_CTRL_MASK,
+ RT5663_DIG_GATE_CTRL_DIS);
snd_soc_update_bits(codec, RT5663_PWR_ANLG_1,
- RT5668_PWR_VREF1_MASK | RT5668_PWR_VREF2_MASK |
- RT5668_PWR_FV1_MASK | RT5668_PWR_FV2_MASK |
- RT5668_PWR_MB_MASK, RT5668_PWR_VREF1 |
- RT5668_PWR_VREF2 | RT5668_PWR_MB);
+ RT5663_PWR_VREF1_MASK | RT5663_PWR_VREF2_MASK |
+ RT5663_PWR_FV1_MASK | RT5663_PWR_FV2_MASK |
+ RT5663_PWR_MB_MASK, RT5663_PWR_VREF1 |
+ RT5663_PWR_VREF2 | RT5663_PWR_MB);
usleep_range(10000, 10005);
- if (rt5663->codec_type == CODEC_TYPE_RT5668) {
+ if (rt5663->codec_ver == CODEC_VER_1) {
snd_soc_update_bits(codec, RT5663_SIG_CLK_DET,
- RT5668_EN_ANA_CLK_DET_MASK |
- RT5668_PWR_CLK_DET_MASK,
- RT5668_EN_ANA_CLK_DET_DIS |
- RT5668_PWR_CLK_DET_DIS);
+ RT5663_EN_ANA_CLK_DET_MASK |
+ RT5663_PWR_CLK_DET_MASK,
+ RT5663_EN_ANA_CLK_DET_DIS |
+ RT5663_PWR_CLK_DET_DIS);
}
break;
case SND_SOC_BIAS_OFF:
snd_soc_update_bits(codec, RT5663_PWR_ANLG_1,
- RT5668_PWR_VREF1_MASK | RT5668_PWR_VREF2_MASK |
- RT5668_PWR_FV1 | RT5668_PWR_FV2, 0x0);
+ RT5663_PWR_VREF1_MASK | RT5663_PWR_VREF2_MASK |
+ RT5663_PWR_FV1 | RT5663_PWR_FV2, 0x0);
break;
default:
@@ -2793,18 +2794,18 @@ static int rt5663_probe(struct snd_soc_codec *codec)
rt5663->codec = codec;
- switch (rt5663->codec_type) {
- case CODEC_TYPE_RT5668:
+ switch (rt5663->codec_ver) {
+ case CODEC_VER_1:
snd_soc_dapm_new_controls(dapm,
- rt5668_specific_dapm_widgets,
- ARRAY_SIZE(rt5668_specific_dapm_widgets));
+ rt5663_v2_specific_dapm_widgets,
+ ARRAY_SIZE(rt5663_v2_specific_dapm_widgets));
snd_soc_dapm_add_routes(dapm,
- rt5668_specific_dapm_routes,
- ARRAY_SIZE(rt5668_specific_dapm_routes));
- snd_soc_add_codec_controls(codec, rt5668_specific_controls,
- ARRAY_SIZE(rt5668_specific_controls));
+ rt5663_v2_specific_dapm_routes,
+ ARRAY_SIZE(rt5663_v2_specific_dapm_routes));
+ snd_soc_add_codec_controls(codec, rt5663_v2_specific_controls,
+ ARRAY_SIZE(rt5663_v2_specific_controls));
break;
- case CODEC_TYPE_RT5663:
+ case CODEC_VER_0:
snd_soc_dapm_new_controls(dapm,
rt5663_specific_dapm_widgets,
ARRAY_SIZE(rt5663_specific_dapm_widgets));
@@ -2905,16 +2906,16 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5663 = {
}
};
-static const struct regmap_config rt5668_regmap = {
+static const struct regmap_config rt5663_v2_regmap = {
.reg_bits = 16,
.val_bits = 16,
.use_single_rw = true,
.max_register = 0x07fa,
- .volatile_reg = rt5668_volatile_register,
- .readable_reg = rt5668_readable_register,
+ .volatile_reg = rt5663_v2_volatile_register,
+ .readable_reg = rt5663_v2_readable_register,
.cache_type = REGCACHE_RBTREE,
- .reg_defaults = rt5668_reg,
- .num_reg_defaults = ARRAY_SIZE(rt5668_reg),
+ .reg_defaults = rt5663_v2_reg,
+ .num_reg_defaults = ARRAY_SIZE(rt5663_v2_reg),
};
static const struct regmap_config rt5663_regmap = {
@@ -2939,7 +2940,6 @@ static const struct regmap_config temp_regmap = {
};
static const struct i2c_device_id rt5663_i2c_id[] = {
- { "rt5668", 0 },
{ "rt5663", 0 },
{}
};
@@ -2947,7 +2947,6 @@ MODULE_DEVICE_TABLE(i2c, rt5663_i2c_id);
#if defined(CONFIG_OF)
static const struct of_device_id rt5663_of_match[] = {
- { .compatible = "realtek,rt5668", },
{ .compatible = "realtek,rt5663", },
{},
};
@@ -2956,80 +2955,79 @@ MODULE_DEVICE_TABLE(of, rt5663_of_match);
#ifdef CONFIG_ACPI
static struct acpi_device_id rt5663_acpi_match[] = {
- { "10EC5668", 0},
{ "10EC5663", 0},
{},
};
MODULE_DEVICE_TABLE(acpi, rt5663_acpi_match);
#endif
-static void rt5668_calibrate(struct rt5663_priv *rt5668)
+static void rt5663_v2_calibrate(struct rt5663_priv *rt5663)
{
- regmap_write(rt5668->regmap, RT5663_BIAS_CUR_8, 0xa402);
- regmap_write(rt5668->regmap, RT5663_PWR_DIG_1, 0x0100);
- regmap_write(rt5668->regmap, RT5663_RECMIX, 0x4040);
- regmap_write(rt5668->regmap, RT5663_DIG_MISC, 0x0001);
- regmap_write(rt5668->regmap, RT5663_RC_CLK, 0x0380);
- regmap_write(rt5668->regmap, RT5663_GLB_CLK, 0x8000);
- regmap_write(rt5668->regmap, RT5663_ADDA_CLK_1, 0x1000);
- regmap_write(rt5668->regmap, RT5663_CHOP_DAC_L, 0x3030);
- regmap_write(rt5668->regmap, RT5663_CALIB_ADC, 0x3c05);
- regmap_write(rt5668->regmap, RT5663_PWR_ANLG_1, 0xa23e);
+ regmap_write(rt5663->regmap, RT5663_BIAS_CUR_8, 0xa402);
+ regmap_write(rt5663->regmap, RT5663_PWR_DIG_1, 0x0100);
+ regmap_write(rt5663->regmap, RT5663_RECMIX, 0x4040);
+ regmap_write(rt5663->regmap, RT5663_DIG_MISC, 0x0001);
+ regmap_write(rt5663->regmap, RT5663_RC_CLK, 0x0380);
+ regmap_write(rt5663->regmap, RT5663_GLB_CLK, 0x8000);
+ regmap_write(rt5663->regmap, RT5663_ADDA_CLK_1, 0x1000);
+ regmap_write(rt5663->regmap, RT5663_CHOP_DAC_L, 0x3030);
+ regmap_write(rt5663->regmap, RT5663_CALIB_ADC, 0x3c05);
+ regmap_write(rt5663->regmap, RT5663_PWR_ANLG_1, 0xa23e);
msleep(40);
- regmap_write(rt5668->regmap, RT5663_PWR_ANLG_1, 0xf23e);
- regmap_write(rt5668->regmap, RT5663_HP_CALIB_2, 0x0321);
- regmap_write(rt5668->regmap, RT5663_HP_CALIB_1, 0xfc00);
+ regmap_write(rt5663->regmap, RT5663_PWR_ANLG_1, 0xf23e);
+ regmap_write(rt5663->regmap, RT5663_HP_CALIB_2, 0x0321);
+ regmap_write(rt5663->regmap, RT5663_HP_CALIB_1, 0xfc00);
msleep(500);
}
-static void rt5663_calibrate(struct rt5663_priv *rt5668)
+static void rt5663_calibrate(struct rt5663_priv *rt5663)
{
int value, count;
- regmap_write(rt5668->regmap, RT5663_RC_CLK, 0x0280);
- regmap_write(rt5668->regmap, RT5663_GLB_CLK, 0x8000);
- regmap_write(rt5668->regmap, RT5663_DIG_MISC, 0x8001);
- regmap_write(rt5668->regmap, RT5663_VREF_RECMIX, 0x0032);
- regmap_write(rt5668->regmap, RT5663_PWR_ANLG_1, 0xa2be);
+ regmap_write(rt5663->regmap, RT5663_RC_CLK, 0x0280);
+ regmap_write(rt5663->regmap, RT5663_GLB_CLK, 0x8000);
+ regmap_write(rt5663->regmap, RT5663_DIG_MISC, 0x8001);
+ regmap_write(rt5663->regmap, RT5663_VREF_RECMIX, 0x0032);
+ regmap_write(rt5663->regmap, RT5663_PWR_ANLG_1, 0xa2be);
msleep(20);
- regmap_write(rt5668->regmap, RT5663_PWR_ANLG_1, 0xf2be);
- regmap_write(rt5668->regmap, RT5663_PWR_DIG_2, 0x8400);
- regmap_write(rt5668->regmap, RT5663_CHOP_ADC, 0x3000);
- regmap_write(rt5668->regmap, RT5663_DEPOP_1, 0x003b);
- regmap_write(rt5668->regmap, RT5663_PWR_DIG_1, 0x8df8);
- regmap_write(rt5668->regmap, RT5663_PWR_ANLG_2, 0x0003);
- regmap_write(rt5668->regmap, RT5663_PWR_ANLG_3, 0x018c);
- regmap_write(rt5668->regmap, RT5663_ADDA_CLK_1, 0x1111);
- regmap_write(rt5668->regmap, RT5663_PRE_DIV_GATING_1, 0xffff);
- regmap_write(rt5668->regmap, RT5663_PRE_DIV_GATING_2, 0xffff);
- regmap_write(rt5668->regmap, RT5663_DEPOP_2, 0x3003);
- regmap_write(rt5668->regmap, RT5663_DEPOP_1, 0x003b);
- regmap_write(rt5668->regmap, RT5663_HP_CHARGE_PUMP_1, 0x1e32);
- regmap_write(rt5668->regmap, RT5663_HP_CHARGE_PUMP_2, 0x1371);
- regmap_write(rt5668->regmap, RT5663_DACREF_LDO, 0x3b0b);
- regmap_write(rt5668->regmap, RT5663_STO_DAC_MIXER, 0x2080);
- regmap_write(rt5668->regmap, RT5663_BYPASS_STO_DAC, 0x000c);
- regmap_write(rt5668->regmap, RT5663_HP_BIAS, 0xabba);
- regmap_write(rt5668->regmap, RT5663_CHARGE_PUMP_1, 0x2224);
- regmap_write(rt5668->regmap, RT5663_HP_OUT_EN, 0x8088);
- regmap_write(rt5668->regmap, RT5663_STO_DRE_9, 0x0017);
- regmap_write(rt5668->regmap, RT5663_STO_DRE_10, 0x0017);
- regmap_write(rt5668->regmap, RT5663_STO1_ADC_MIXER, 0x4040);
- regmap_write(rt5668->regmap, RT5663_RECMIX, 0x0005);
- regmap_write(rt5668->regmap, RT5663_ADDA_RST, 0xc000);
- regmap_write(rt5668->regmap, RT5663_STO1_HPF_ADJ1, 0x3320);
- regmap_write(rt5668->regmap, RT5663_HP_CALIB_2, 0x00c9);
- regmap_write(rt5668->regmap, RT5663_DUMMY_1, 0x004c);
- regmap_write(rt5668->regmap, RT5663_ANA_BIAS_CUR_1, 0x7766);
- regmap_write(rt5668->regmap, RT5663_BIAS_CUR_8, 0x4702);
+ regmap_write(rt5663->regmap, RT5663_PWR_ANLG_1, 0xf2be);
+ regmap_write(rt5663->regmap, RT5663_PWR_DIG_2, 0x8400);
+ regmap_write(rt5663->regmap, RT5663_CHOP_ADC, 0x3000);
+ regmap_write(rt5663->regmap, RT5663_DEPOP_1, 0x003b);
+ regmap_write(rt5663->regmap, RT5663_PWR_DIG_1, 0x8df8);
+ regmap_write(rt5663->regmap, RT5663_PWR_ANLG_2, 0x0003);
+ regmap_write(rt5663->regmap, RT5663_PWR_ANLG_3, 0x018c);
+ regmap_write(rt5663->regmap, RT5663_ADDA_CLK_1, 0x1111);
+ regmap_write(rt5663->regmap, RT5663_PRE_DIV_GATING_1, 0xffff);
+ regmap_write(rt5663->regmap, RT5663_PRE_DIV_GATING_2, 0xffff);
+ regmap_write(rt5663->regmap, RT5663_DEPOP_2, 0x3003);
+ regmap_write(rt5663->regmap, RT5663_DEPOP_1, 0x003b);
+ regmap_write(rt5663->regmap, RT5663_HP_CHARGE_PUMP_1, 0x1e32);
+ regmap_write(rt5663->regmap, RT5663_HP_CHARGE_PUMP_2, 0x1371);
+ regmap_write(rt5663->regmap, RT5663_DACREF_LDO, 0x3b0b);
+ regmap_write(rt5663->regmap, RT5663_STO_DAC_MIXER, 0x2080);
+ regmap_write(rt5663->regmap, RT5663_BYPASS_STO_DAC, 0x000c);
+ regmap_write(rt5663->regmap, RT5663_HP_BIAS, 0xabba);
+ regmap_write(rt5663->regmap, RT5663_CHARGE_PUMP_1, 0x2224);
+ regmap_write(rt5663->regmap, RT5663_HP_OUT_EN, 0x8088);
+ regmap_write(rt5663->regmap, RT5663_STO_DRE_9, 0x0017);
+ regmap_write(rt5663->regmap, RT5663_STO_DRE_10, 0x0017);
+ regmap_write(rt5663->regmap, RT5663_STO1_ADC_MIXER, 0x4040);
+ regmap_write(rt5663->regmap, RT5663_RECMIX, 0x0005);
+ regmap_write(rt5663->regmap, RT5663_ADDA_RST, 0xc000);
+ regmap_write(rt5663->regmap, RT5663_STO1_HPF_ADJ1, 0x3320);
+ regmap_write(rt5663->regmap, RT5663_HP_CALIB_2, 0x00c9);
+ regmap_write(rt5663->regmap, RT5663_DUMMY_1, 0x004c);
+ regmap_write(rt5663->regmap, RT5663_ANA_BIAS_CUR_1, 0x7766);
+ regmap_write(rt5663->regmap, RT5663_BIAS_CUR_8, 0x4702);
msleep(200);
- regmap_write(rt5668->regmap, RT5663_HP_CALIB_1, 0x0069);
- regmap_write(rt5668->regmap, RT5663_HP_CALIB_3, 0x06c2);
- regmap_write(rt5668->regmap, RT5663_HP_CALIB_1_1, 0x7b00);
- regmap_write(rt5668->regmap, RT5663_HP_CALIB_1_1, 0xfb00);
+ regmap_write(rt5663->regmap, RT5663_HP_CALIB_1, 0x0069);
+ regmap_write(rt5663->regmap, RT5663_HP_CALIB_3, 0x06c2);
+ regmap_write(rt5663->regmap, RT5663_HP_CALIB_1_1, 0x7b00);
+ regmap_write(rt5663->regmap, RT5663_HP_CALIB_1_1, 0xfb00);
count = 0;
while (true) {
- regmap_read(rt5668->regmap, RT5663_HP_CALIB_1_1, &value);
+ regmap_read(rt5663->regmap, RT5663_HP_CALIB_1_1, &value);
if (value & 0x8000)
usleep_range(10000, 10005);
else
@@ -3066,17 +3064,17 @@ static int rt5663_i2c_probe(struct i2c_client *i2c,
}
regmap_read(regmap, RT5663_VENDOR_ID_2, &val);
switch (val) {
- case RT5668_DEVICE_ID:
- rt5663->regmap = devm_regmap_init_i2c(i2c, &rt5668_regmap);
- rt5663->codec_type = CODEC_TYPE_RT5668;
+ case RT5663_DEVICE_ID_2:
+ rt5663->regmap = devm_regmap_init_i2c(i2c, &rt5663_v2_regmap);
+ rt5663->codec_ver = CODEC_VER_1;
break;
- case RT5663_DEVICE_ID:
+ case RT5663_DEVICE_ID_1:
rt5663->regmap = devm_regmap_init_i2c(i2c, &rt5663_regmap);
- rt5663->codec_type = CODEC_TYPE_RT5663;
+ rt5663->codec_ver = CODEC_VER_0;
break;
default:
dev_err(&i2c->dev,
- "Device with ID register %#x is not rt5663 or rt5668\n",
+ "Device with ID register %#x is not rt5663\n",
val);
return -ENODEV;
}
@@ -3091,11 +3089,11 @@ static int rt5663_i2c_probe(struct i2c_client *i2c,
/* reset and calibrate */
regmap_write(rt5663->regmap, RT5663_RESET, 0);
regcache_cache_bypass(rt5663->regmap, true);
- switch (rt5663->codec_type) {
- case CODEC_TYPE_RT5668:
- rt5668_calibrate(rt5663);
+ switch (rt5663->codec_ver) {
+ case CODEC_VER_1:
+ rt5663_v2_calibrate(rt5663);
break;
- case CODEC_TYPE_RT5663:
+ case CODEC_VER_0:
rt5663_calibrate(rt5663);
break;
default:
@@ -3106,46 +3104,55 @@ static int rt5663_i2c_probe(struct i2c_client *i2c,
dev_dbg(&i2c->dev, "calibrate done\n");
/* GPIO1 as IRQ */
- regmap_update_bits(rt5663->regmap, RT5663_GPIO_1, RT5668_GP1_PIN_MASK,
- RT5668_GP1_PIN_IRQ);
+ regmap_update_bits(rt5663->regmap, RT5663_GPIO_1, RT5663_GP1_PIN_MASK,
+ RT5663_GP1_PIN_IRQ);
/* 4btn inline command debounce */
regmap_update_bits(rt5663->regmap, RT5663_IL_CMD_5,
- RT5668_4BTN_CLK_DEB_MASK, RT5668_4BTN_CLK_DEB_65MS);
+ RT5663_4BTN_CLK_DEB_MASK, RT5663_4BTN_CLK_DEB_65MS);
- switch (rt5663->codec_type) {
- case CODEC_TYPE_RT5668:
+ switch (rt5663->codec_ver) {
+ case CODEC_VER_1:
regmap_write(rt5663->regmap, RT5663_BIAS_CUR_8, 0xa402);
/* JD1 */
regmap_update_bits(rt5663->regmap, RT5663_AUTO_1MRC_CLK,
- RT5668_IRQ_POW_SAV_MASK | RT5668_IRQ_POW_SAV_JD1_MASK,
- RT5668_IRQ_POW_SAV_EN | RT5668_IRQ_POW_SAV_JD1_EN);
+ RT5663_IRQ_POW_SAV_MASK | RT5663_IRQ_POW_SAV_JD1_MASK,
+ RT5663_IRQ_POW_SAV_EN | RT5663_IRQ_POW_SAV_JD1_EN);
regmap_update_bits(rt5663->regmap, RT5663_PWR_ANLG_2,
- RT5668_PWR_JD1_MASK, RT5668_PWR_JD1);
+ RT5663_PWR_JD1_MASK, RT5663_PWR_JD1);
regmap_update_bits(rt5663->regmap, RT5663_IRQ_1,
- RT5668_EN_CB_JD_MASK, RT5668_EN_CB_JD_EN);
+ RT5663_EN_CB_JD_MASK, RT5663_EN_CB_JD_EN);
regmap_update_bits(rt5663->regmap, RT5663_HP_LOGIC_2,
- RT5668_HP_SIG_SRC1_MASK, RT5668_HP_SIG_SRC1_REG);
+ RT5663_HP_SIG_SRC1_MASK, RT5663_HP_SIG_SRC1_REG);
regmap_update_bits(rt5663->regmap, RT5663_RECMIX,
- RT5668_VREF_BIAS_MASK | RT5668_CBJ_DET_MASK |
- RT5668_DET_TYPE_MASK, RT5668_VREF_BIAS_REG |
- RT5668_CBJ_DET_EN | RT5668_DET_TYPE_QFN);
+ RT5663_VREF_BIAS_MASK | RT5663_CBJ_DET_MASK |
+ RT5663_DET_TYPE_MASK, RT5663_VREF_BIAS_REG |
+ RT5663_CBJ_DET_EN | RT5663_DET_TYPE_QFN);
/* Set GPIO4 and GPIO8 as input for combo jack */
regmap_update_bits(rt5663->regmap, RT5663_GPIO_2,
- RT5668_GP4_PIN_CONF_MASK, RT5668_GP4_PIN_CONF_INPUT);
- regmap_update_bits(rt5663->regmap, RT5668_GPIO_3,
- RT5668_GP8_PIN_CONF_MASK, RT5668_GP8_PIN_CONF_INPUT);
+ RT5663_GP4_PIN_CONF_MASK, RT5663_GP4_PIN_CONF_INPUT);
+ regmap_update_bits(rt5663->regmap, RT5663_GPIO_3,
+ RT5663_GP8_PIN_CONF_MASK, RT5663_GP8_PIN_CONF_INPUT);
regmap_update_bits(rt5663->regmap, RT5663_PWR_ANLG_1,
- RT5668_LDO1_DVO_MASK | RT5668_AMP_HP_MASK,
- RT5668_LDO1_DVO_0_9V | RT5668_AMP_HP_3X);
+ RT5663_LDO1_DVO_MASK | RT5663_AMP_HP_MASK,
+ RT5663_LDO1_DVO_0_9V | RT5663_AMP_HP_3X);
break;
- case CODEC_TYPE_RT5663:
+ case CODEC_VER_0:
+ regmap_update_bits(rt5663->regmap, RT5663_DIG_MISC,
+ RT5663_DIG_GATE_CTRL_MASK, RT5663_DIG_GATE_CTRL_EN);
+ regmap_update_bits(rt5663->regmap, RT5663_AUTO_1MRC_CLK,
+ RT5663_IRQ_POW_SAV_MASK, RT5663_IRQ_POW_SAV_EN);
+ regmap_update_bits(rt5663->regmap, RT5663_IRQ_1,
+ RT5663_EN_IRQ_JD1_MASK, RT5663_EN_IRQ_JD1_EN);
+ regmap_update_bits(rt5663->regmap, RT5663_GPIO_1,
+ RT5663_GPIO1_TYPE_MASK, RT5663_GPIO1_TYPE_EN);
regmap_write(rt5663->regmap, RT5663_VREF_RECMIX, 0x0032);
regmap_write(rt5663->regmap, RT5663_PWR_ANLG_1, 0xa2be);
msleep(20);
regmap_write(rt5663->regmap, RT5663_PWR_ANLG_1, 0xf2be);
regmap_update_bits(rt5663->regmap, RT5663_GPIO_2,
- RT5663_GP1_PIN_CONF_MASK, RT5663_GP1_PIN_CONF_OUTPUT);
+ RT5663_GP1_PIN_CONF_MASK | RT5663_SEL_GPIO1_MASK,
+ RT5663_GP1_PIN_CONF_OUTPUT | RT5663_SEL_GPIO1_EN);
/* DACREF LDO control */
regmap_update_bits(rt5663->regmap, RT5663_DACREF_LDO, 0x3e0e,
0x3a0a);
diff --git a/sound/soc/codecs/rt5663.h b/sound/soc/codecs/rt5663.h
index 2cc8f28080f6..d77fae619f2f 100644
--- a/sound/soc/codecs/rt5663.h
+++ b/sound/soc/codecs/rt5663.h
@@ -18,655 +18,652 @@
#define RT5663_VENDOR_ID_1 0x00fe
#define RT5663_VENDOR_ID_2 0x00ff
-#define RT5668_LOUT_CTRL 0x0001
-#define RT5668_HP_AMP_2 0x0003
-#define RT5668_MONO_OUT 0x0004
-#define RT5668_MONO_GAIN 0x0007
-
-#define RT5668_AEC_BST 0x000b
-#define RT5668_IN1_IN2 0x000c
-#define RT5668_IN3_IN4 0x000d
-#define RT5668_INL1_INR1 0x000f
-#define RT5668_CBJ_TYPE_2 0x0011
-#define RT5668_CBJ_TYPE_3 0x0012
-#define RT5668_CBJ_TYPE_4 0x0013
-#define RT5668_CBJ_TYPE_5 0x0014
-#define RT5668_CBJ_TYPE_8 0x0017
+#define RT5663_LOUT_CTRL 0x0001
+#define RT5663_HP_AMP_2 0x0003
+#define RT5663_MONO_OUT 0x0004
+#define RT5663_MONO_GAIN 0x0007
+
+#define RT5663_AEC_BST 0x000b
+#define RT5663_IN1_IN2 0x000c
+#define RT5663_IN3_IN4 0x000d
+#define RT5663_INL1_INR1 0x000f
+#define RT5663_CBJ_TYPE_2 0x0011
+#define RT5663_CBJ_TYPE_3 0x0012
+#define RT5663_CBJ_TYPE_4 0x0013
+#define RT5663_CBJ_TYPE_5 0x0014
+#define RT5663_CBJ_TYPE_8 0x0017
/* I/O - ADC/DAC/DMIC */
-#define RT5668_DAC3_DIG_VOL 0x001a
-#define RT5668_DAC3_CTRL 0x001b
-#define RT5668_MONO_ADC_DIG_VOL 0x001d
-#define RT5668_STO2_ADC_DIG_VOL 0x001e
-#define RT5668_MONO_ADC_BST_GAIN 0x0020
-#define RT5668_STO2_ADC_BST_GAIN 0x0021
-#define RT5668_SIDETONE_CTRL 0x0024
+#define RT5663_DAC3_DIG_VOL 0x001a
+#define RT5663_DAC3_CTRL 0x001b
+#define RT5663_MONO_ADC_DIG_VOL 0x001d
+#define RT5663_STO2_ADC_DIG_VOL 0x001e
+#define RT5663_MONO_ADC_BST_GAIN 0x0020
+#define RT5663_STO2_ADC_BST_GAIN 0x0021
+#define RT5663_SIDETONE_CTRL 0x0024
/* Mixer - D-D */
-#define RT5668_MONO1_ADC_MIXER 0x0027
-#define RT5668_STO2_ADC_MIXER 0x0028
-#define RT5668_MONO_DAC_MIXER 0x002b
-#define RT5668_DAC2_SRC_CTRL 0x002e
-#define RT5668_IF_3_4_DATA_CTL 0x002f
-#define RT5668_IF_5_DATA_CTL 0x0030
-#define RT5668_PDM_OUT_CTL 0x0031
-#define RT5668_PDM_I2C_DATA_CTL1 0x0032
-#define RT5668_PDM_I2C_DATA_CTL2 0x0033
-#define RT5668_PDM_I2C_DATA_CTL3 0x0034
-#define RT5668_PDM_I2C_DATA_CTL4 0x0035
+#define RT5663_MONO1_ADC_MIXER 0x0027
+#define RT5663_STO2_ADC_MIXER 0x0028
+#define RT5663_MONO_DAC_MIXER 0x002b
+#define RT5663_DAC2_SRC_CTRL 0x002e
+#define RT5663_IF_3_4_DATA_CTL 0x002f
+#define RT5663_IF_5_DATA_CTL 0x0030
+#define RT5663_PDM_OUT_CTL 0x0031
+#define RT5663_PDM_I2C_DATA_CTL1 0x0032
+#define RT5663_PDM_I2C_DATA_CTL2 0x0033
+#define RT5663_PDM_I2C_DATA_CTL3 0x0034
+#define RT5663_PDM_I2C_DATA_CTL4 0x0035
/*Mixer - Analog*/
-#define RT5668_RECMIX1_NEW 0x003a
-#define RT5668_RECMIX1L_0 0x003b
-#define RT5668_RECMIX1L 0x003c
-#define RT5668_RECMIX1R_0 0x003d
-#define RT5668_RECMIX1R 0x003e
-#define RT5668_RECMIX2_NEW 0x003f
-#define RT5668_RECMIX2_L_2 0x0041
-#define RT5668_RECMIX2_R 0x0042
-#define RT5668_RECMIX2_R_2 0x0043
-#define RT5668_CALIB_REC_LR 0x0044
-#define RT5668_ALC_BK_GAIN 0x0049
-#define RT5668_MONOMIX_GAIN 0x004a
-#define RT5668_MONOMIX_IN_GAIN 0x004b
-#define RT5668_OUT_MIXL_GAIN 0x004d
-#define RT5668_OUT_LMIX_IN_GAIN 0x004e
-#define RT5668_OUT_RMIX_IN_GAIN 0x004f
-#define RT5668_OUT_RMIX_IN_GAIN1 0x0050
-#define RT5668_LOUT_MIXER_CTRL 0x0052
+#define RT5663_RECMIX1_NEW 0x003a
+#define RT5663_RECMIX1L_0 0x003b
+#define RT5663_RECMIX1L 0x003c
+#define RT5663_RECMIX1R_0 0x003d
+#define RT5663_RECMIX1R 0x003e
+#define RT5663_RECMIX2_NEW 0x003f
+#define RT5663_RECMIX2_L_2 0x0041
+#define RT5663_RECMIX2_R 0x0042
+#define RT5663_RECMIX2_R_2 0x0043
+#define RT5663_CALIB_REC_LR 0x0044
+#define RT5663_ALC_BK_GAIN 0x0049
+#define RT5663_MONOMIX_GAIN 0x004a
+#define RT5663_MONOMIX_IN_GAIN 0x004b
+#define RT5663_OUT_MIXL_GAIN 0x004d
+#define RT5663_OUT_LMIX_IN_GAIN 0x004e
+#define RT5663_OUT_RMIX_IN_GAIN 0x004f
+#define RT5663_OUT_RMIX_IN_GAIN1 0x0050
+#define RT5663_LOUT_MIXER_CTRL 0x0052
/* Power */
-#define RT5668_PWR_VOL 0x0067
+#define RT5663_PWR_VOL 0x0067
-#define RT5668_ADCDAC_RST 0x006d
+#define RT5663_ADCDAC_RST 0x006d
/* Format - ADC/DAC */
-#define RT5668_I2S34_SDP 0x0071
-#define RT5668_I2S5_SDP 0x0072
-/* Format - TDM Control */
-#define RT5668_TDM_5 0x007c
-#define RT5668_TDM_6 0x007d
-#define RT5668_TDM_7 0x007e
-#define RT5668_TDM_8 0x007f
+#define RT5663_I2S34_SDP 0x0071
+#define RT5663_I2S5_SDP 0x0072
/* Function - Analog */
-#define RT5668_ASRC_3 0x0085
-#define RT5668_ASRC_6 0x0088
-#define RT5668_ASRC_7 0x0089
-#define RT5668_PLL_TRK_13 0x0099
-#define RT5668_I2S_M_CLK_CTL 0x00a0
-#define RT5668_FDIV_I2S34_M_CLK 0x00a1
-#define RT5668_FDIV_I2S34_M_CLK2 0x00a2
-#define RT5668_FDIV_I2S5_M_CLK 0x00a3
-#define RT5668_FDIV_I2S5_M_CLK2 0x00a4
+#define RT5663_ASRC_3 0x0085
+#define RT5663_ASRC_6 0x0088
+#define RT5663_ASRC_7 0x0089
+#define RT5663_PLL_TRK_13 0x0099
+#define RT5663_I2S_M_CLK_CTL 0x00a0
+#define RT5663_FDIV_I2S34_M_CLK 0x00a1
+#define RT5663_FDIV_I2S34_M_CLK2 0x00a2
+#define RT5663_FDIV_I2S5_M_CLK 0x00a3
+#define RT5663_FDIV_I2S5_M_CLK2 0x00a4
/* Function - Digital */
-#define RT5668_IRQ_4 0x00b9
-#define RT5668_GPIO_3 0x00c2
-#define RT5668_GPIO_4 0x00c3
-#define RT5668_GPIO_STA 0x00c4
-#define RT5668_HP_AMP_DET1 0x00d0
-#define RT5668_HP_AMP_DET2 0x00d1
-#define RT5668_HP_AMP_DET3 0x00d2
-#define RT5668_MID_BD_HP_AMP 0x00d3
-#define RT5668_LOW_BD_HP_AMP 0x00d4
-#define RT5668_SOF_VOL_ZC2 0x00da
-#define RT5668_ADC_STO2_ADJ1 0x00ee
-#define RT5668_ADC_STO2_ADJ2 0x00ef
+#define RT5663_V2_IRQ_4 0x00b9
+#define RT5663_GPIO_3 0x00c2
+#define RT5663_GPIO_4 0x00c3
+#define RT5663_GPIO_STA2 0x00c4
+#define RT5663_HP_AMP_DET1 0x00d0
+#define RT5663_HP_AMP_DET2 0x00d1
+#define RT5663_HP_AMP_DET3 0x00d2
+#define RT5663_MID_BD_HP_AMP 0x00d3
+#define RT5663_LOW_BD_HP_AMP 0x00d4
+#define RT5663_SOF_VOL_ZC2 0x00da
+#define RT5663_ADC_STO2_ADJ1 0x00ee
+#define RT5663_ADC_STO2_ADJ2 0x00ef
/* General Control */
-#define RT5668_A_JD_CTRL 0x00f0
-#define RT5668_JD1_TRES_CTRL 0x00f1
-#define RT5668_JD2_TRES_CTRL 0x00f2
-#define RT5668_JD_CTRL2 0x00f7
-#define RT5668_DUM_REG_2 0x00fb
-#define RT5668_DUM_REG_3 0x00fc
-
-
-#define RT5668_DACADC_DIG_VOL2 0x0101
-#define RT5668_DIG_IN_PIN2 0x0133
-#define RT5668_PAD_DRV_CTL1 0x0136
-#define RT5668_SOF_RAM_DEPOP 0x0138
-#define RT5668_VOL_TEST 0x013f
-#define RT5668_TEST_MODE_3 0x0147
-#define RT5668_TEST_MODE_4 0x0148
-#define RT5668_MONO_DYNA_1 0x0170
-#define RT5668_MONO_DYNA_2 0x0171
-#define RT5668_MONO_DYNA_3 0x0172
-#define RT5668_MONO_DYNA_4 0x0173
-#define RT5668_MONO_DYNA_5 0x0174
-#define RT5668_MONO_DYNA_6 0x0175
-#define RT5668_STO1_SIL_DET 0x0190
-#define RT5668_MONOL_SIL_DET 0x0191
-#define RT5668_MONOR_SIL_DET 0x0192
-#define RT5668_STO2_DAC_SIL 0x0193
-#define RT5668_PWR_SAV_CTL1 0x0194
-#define RT5668_PWR_SAV_CTL2 0x0195
-#define RT5668_PWR_SAV_CTL3 0x0196
-#define RT5668_PWR_SAV_CTL4 0x0197
-#define RT5668_PWR_SAV_CTL5 0x0198
-#define RT5668_PWR_SAV_CTL6 0x0199
-#define RT5668_MONO_AMP_CAL1 0x01a0
-#define RT5668_MONO_AMP_CAL2 0x01a1
-#define RT5668_MONO_AMP_CAL3 0x01a2
-#define RT5668_MONO_AMP_CAL4 0x01a3
-#define RT5668_MONO_AMP_CAL5 0x01a4
-#define RT5668_MONO_AMP_CAL6 0x01a5
-#define RT5668_MONO_AMP_CAL7 0x01a6
-#define RT5668_MONO_AMP_CAL_ST1 0x01a7
-#define RT5668_MONO_AMP_CAL_ST2 0x01a8
-#define RT5668_MONO_AMP_CAL_ST3 0x01a9
-#define RT5668_MONO_AMP_CAL_ST4 0x01aa
-#define RT5668_MONO_AMP_CAL_ST5 0x01ab
-#define RT5668_HP_IMP_SEN_13 0x01b9
-#define RT5668_HP_IMP_SEN_14 0x01ba
-#define RT5668_HP_IMP_SEN_6 0x01bb
-#define RT5668_HP_IMP_SEN_7 0x01bc
-#define RT5668_HP_IMP_SEN_8 0x01bd
-#define RT5668_HP_IMP_SEN_9 0x01be
-#define RT5668_HP_IMP_SEN_10 0x01bf
-#define RT5668_HP_LOGIC_3 0x01dc
-#define RT5668_HP_CALIB_ST10 0x01f3
-#define RT5668_HP_CALIB_ST11 0x01f4
-#define RT5668_PRO_REG_TBL_4 0x0203
-#define RT5668_PRO_REG_TBL_5 0x0204
-#define RT5668_PRO_REG_TBL_6 0x0205
-#define RT5668_PRO_REG_TBL_7 0x0206
-#define RT5668_PRO_REG_TBL_8 0x0207
-#define RT5668_PRO_REG_TBL_9 0x0208
-#define RT5668_SAR_ADC_INL_1 0x0210
-#define RT5668_SAR_ADC_INL_2 0x0211
-#define RT5668_SAR_ADC_INL_3 0x0212
-#define RT5668_SAR_ADC_INL_4 0x0213
-#define RT5668_SAR_ADC_INL_5 0x0214
-#define RT5668_SAR_ADC_INL_6 0x0215
-#define RT5668_SAR_ADC_INL_7 0x0216
-#define RT5668_SAR_ADC_INL_8 0x0217
-#define RT5668_SAR_ADC_INL_9 0x0218
-#define RT5668_SAR_ADC_INL_10 0x0219
-#define RT5668_SAR_ADC_INL_11 0x021a
-#define RT5668_SAR_ADC_INL_12 0x021b
-#define RT5668_DRC_CTRL_1 0x02ff
-#define RT5668_DRC1_CTRL_2 0x0301
-#define RT5668_DRC1_CTRL_3 0x0302
-#define RT5668_DRC1_CTRL_4 0x0303
-#define RT5668_DRC1_CTRL_5 0x0304
-#define RT5668_DRC1_CTRL_6 0x0305
-#define RT5668_DRC1_HD_CTRL_1 0x0306
-#define RT5668_DRC1_HD_CTRL_2 0x0307
-#define RT5668_DRC1_PRI_REG_1 0x0310
-#define RT5668_DRC1_PRI_REG_2 0x0311
-#define RT5668_DRC1_PRI_REG_3 0x0312
-#define RT5668_DRC1_PRI_REG_4 0x0313
-#define RT5668_DRC1_PRI_REG_5 0x0314
-#define RT5668_DRC1_PRI_REG_6 0x0315
-#define RT5668_DRC1_PRI_REG_7 0x0316
-#define RT5668_DRC1_PRI_REG_8 0x0317
-#define RT5668_ALC_PGA_CTL_1 0x0330
-#define RT5668_ALC_PGA_CTL_2 0x0331
-#define RT5668_ALC_PGA_CTL_3 0x0332
-#define RT5668_ALC_PGA_CTL_4 0x0333
-#define RT5668_ALC_PGA_CTL_5 0x0334
-#define RT5668_ALC_PGA_CTL_6 0x0335
-#define RT5668_ALC_PGA_CTL_7 0x0336
-#define RT5668_ALC_PGA_CTL_8 0x0337
-#define RT5668_ALC_PGA_REG_1 0x0338
-#define RT5668_ALC_PGA_REG_2 0x0339
-#define RT5668_ALC_PGA_REG_3 0x033a
-#define RT5668_ADC_EQ_RECOV_1 0x03c0
-#define RT5668_ADC_EQ_RECOV_2 0x03c1
-#define RT5668_ADC_EQ_RECOV_3 0x03c2
-#define RT5668_ADC_EQ_RECOV_4 0x03c3
-#define RT5668_ADC_EQ_RECOV_5 0x03c4
-#define RT5668_ADC_EQ_RECOV_6 0x03c5
-#define RT5668_ADC_EQ_RECOV_7 0x03c6
-#define RT5668_ADC_EQ_RECOV_8 0x03c7
-#define RT5668_ADC_EQ_RECOV_9 0x03c8
-#define RT5668_ADC_EQ_RECOV_10 0x03c9
-#define RT5668_ADC_EQ_RECOV_11 0x03ca
-#define RT5668_ADC_EQ_RECOV_12 0x03cb
-#define RT5668_ADC_EQ_RECOV_13 0x03cc
-#define RT5668_VID_HIDDEN 0x03fe
-#define RT5668_VID_CUSTOMER 0x03ff
-#define RT5668_SCAN_MODE 0x07f0
-#define RT5668_I2C_BYPA 0x07fa
+#define RT5663_A_JD_CTRL 0x00f0
+#define RT5663_JD1_TRES_CTRL 0x00f1
+#define RT5663_JD2_TRES_CTRL 0x00f2
+#define RT5663_V2_JD_CTRL2 0x00f7
+#define RT5663_DUM_REG_2 0x00fb
+#define RT5663_DUM_REG_3 0x00fc
+
+
+#define RT5663_DACADC_DIG_VOL2 0x0101
+#define RT5663_DIG_IN_PIN2 0x0133
+#define RT5663_PAD_DRV_CTL1 0x0136
+#define RT5663_SOF_RAM_DEPOP 0x0138
+#define RT5663_VOL_TEST 0x013f
+#define RT5663_MONO_DYNA_1 0x0170
+#define RT5663_MONO_DYNA_2 0x0171
+#define RT5663_MONO_DYNA_3 0x0172
+#define RT5663_MONO_DYNA_4 0x0173
+#define RT5663_MONO_DYNA_5 0x0174
+#define RT5663_MONO_DYNA_6 0x0175
+#define RT5663_STO1_SIL_DET 0x0190
+#define RT5663_MONOL_SIL_DET 0x0191
+#define RT5663_MONOR_SIL_DET 0x0192
+#define RT5663_STO2_DAC_SIL 0x0193
+#define RT5663_PWR_SAV_CTL1 0x0194
+#define RT5663_PWR_SAV_CTL2 0x0195
+#define RT5663_PWR_SAV_CTL3 0x0196
+#define RT5663_PWR_SAV_CTL4 0x0197
+#define RT5663_PWR_SAV_CTL5 0x0198
+#define RT5663_PWR_SAV_CTL6 0x0199
+#define RT5663_MONO_AMP_CAL1 0x01a0
+#define RT5663_MONO_AMP_CAL2 0x01a1
+#define RT5663_MONO_AMP_CAL3 0x01a2
+#define RT5663_MONO_AMP_CAL4 0x01a3
+#define RT5663_MONO_AMP_CAL5 0x01a4
+#define RT5663_MONO_AMP_CAL6 0x01a5
+#define RT5663_MONO_AMP_CAL7 0x01a6
+#define RT5663_MONO_AMP_CAL_ST1 0x01a7
+#define RT5663_MONO_AMP_CAL_ST2 0x01a8
+#define RT5663_MONO_AMP_CAL_ST3 0x01a9
+#define RT5663_MONO_AMP_CAL_ST4 0x01aa
+#define RT5663_MONO_AMP_CAL_ST5 0x01ab
+#define RT5663_V2_HP_IMP_SEN_13 0x01b9
+#define RT5663_V2_HP_IMP_SEN_14 0x01ba
+#define RT5663_V2_HP_IMP_SEN_6 0x01bb
+#define RT5663_V2_HP_IMP_SEN_7 0x01bc
+#define RT5663_V2_HP_IMP_SEN_8 0x01bd
+#define RT5663_V2_HP_IMP_SEN_9 0x01be
+#define RT5663_V2_HP_IMP_SEN_10 0x01bf
+#define RT5663_HP_LOGIC_3 0x01dc
+#define RT5663_HP_CALIB_ST10 0x01f3
+#define RT5663_HP_CALIB_ST11 0x01f4
+#define RT5663_PRO_REG_TBL_4 0x0203
+#define RT5663_PRO_REG_TBL_5 0x0204
+#define RT5663_PRO_REG_TBL_6 0x0205
+#define RT5663_PRO_REG_TBL_7 0x0206
+#define RT5663_PRO_REG_TBL_8 0x0207
+#define RT5663_PRO_REG_TBL_9 0x0208
+#define RT5663_SAR_ADC_INL_1 0x0210
+#define RT5663_SAR_ADC_INL_2 0x0211
+#define RT5663_SAR_ADC_INL_3 0x0212
+#define RT5663_SAR_ADC_INL_4 0x0213
+#define RT5663_SAR_ADC_INL_5 0x0214
+#define RT5663_SAR_ADC_INL_6 0x0215
+#define RT5663_SAR_ADC_INL_7 0x0216
+#define RT5663_SAR_ADC_INL_8 0x0217
+#define RT5663_SAR_ADC_INL_9 0x0218
+#define RT5663_SAR_ADC_INL_10 0x0219
+#define RT5663_SAR_ADC_INL_11 0x021a
+#define RT5663_SAR_ADC_INL_12 0x021b
+#define RT5663_DRC_CTRL_1 0x02ff
+#define RT5663_DRC1_CTRL_2 0x0301
+#define RT5663_DRC1_CTRL_3 0x0302
+#define RT5663_DRC1_CTRL_4 0x0303
+#define RT5663_DRC1_CTRL_5 0x0304
+#define RT5663_DRC1_CTRL_6 0x0305
+#define RT5663_DRC1_HD_CTRL_1 0x0306
+#define RT5663_DRC1_HD_CTRL_2 0x0307
+#define RT5663_DRC1_PRI_REG_1 0x0310
+#define RT5663_DRC1_PRI_REG_2 0x0311
+#define RT5663_DRC1_PRI_REG_3 0x0312
+#define RT5663_DRC1_PRI_REG_4 0x0313
+#define RT5663_DRC1_PRI_REG_5 0x0314
+#define RT5663_DRC1_PRI_REG_6 0x0315
+#define RT5663_DRC1_PRI_REG_7 0x0316
+#define RT5663_DRC1_PRI_REG_8 0x0317
+#define RT5663_ALC_PGA_CTL_1 0x0330
+#define RT5663_ALC_PGA_CTL_2 0x0331
+#define RT5663_ALC_PGA_CTL_3 0x0332
+#define RT5663_ALC_PGA_CTL_4 0x0333
+#define RT5663_ALC_PGA_CTL_5 0x0334
+#define RT5663_ALC_PGA_CTL_6 0x0335
+#define RT5663_ALC_PGA_CTL_7 0x0336
+#define RT5663_ALC_PGA_CTL_8 0x0337
+#define RT5663_ALC_PGA_REG_1 0x0338
+#define RT5663_ALC_PGA_REG_2 0x0339
+#define RT5663_ALC_PGA_REG_3 0x033a
+#define RT5663_ADC_EQ_RECOV_1 0x03c0
+#define RT5663_ADC_EQ_RECOV_2 0x03c1
+#define RT5663_ADC_EQ_RECOV_3 0x03c2
+#define RT5663_ADC_EQ_RECOV_4 0x03c3
+#define RT5663_ADC_EQ_RECOV_5 0x03c4
+#define RT5663_ADC_EQ_RECOV_6 0x03c5
+#define RT5663_ADC_EQ_RECOV_7 0x03c6
+#define RT5663_ADC_EQ_RECOV_8 0x03c7
+#define RT5663_ADC_EQ_RECOV_9 0x03c8
+#define RT5663_ADC_EQ_RECOV_10 0x03c9
+#define RT5663_ADC_EQ_RECOV_11 0x03ca
+#define RT5663_ADC_EQ_RECOV_12 0x03cb
+#define RT5663_ADC_EQ_RECOV_13 0x03cc
+#define RT5663_VID_HIDDEN 0x03fe
+#define RT5663_VID_CUSTOMER 0x03ff
+#define RT5663_SCAN_MODE 0x07f0
+#define RT5663_I2C_BYPA 0x07fa
/* Headphone Amp Control 2 (0x0003) */
-#define RT5668_EN_DAC_HPO_MASK (0x1 << 14)
-#define RT5668_EN_DAC_HPO_SHIFT 14
-#define RT5668_EN_DAC_HPO_DIS (0x0 << 14)
-#define RT5668_EN_DAC_HPO_EN (0x1 << 14)
+#define RT5663_EN_DAC_HPO_MASK (0x1 << 14)
+#define RT5663_EN_DAC_HPO_SHIFT 14
+#define RT5663_EN_DAC_HPO_DIS (0x0 << 14)
+#define RT5663_EN_DAC_HPO_EN (0x1 << 14)
/*Headphone Amp L/R Analog Gain and Digital NG2 Gain Control (0x0005 0x0006)*/
-#define RT5668_GAIN_HP (0x1f << 8)
-#define RT5668_GAIN_HP_SHIFT 8
+#define RT5663_GAIN_HP (0x1f << 8)
+#define RT5663_GAIN_HP_SHIFT 8
/* AEC BST Control (0x000b) */
-#define RT5668_GAIN_CBJ_MASK (0xf << 8)
-#define RT5668_GAIN_CBJ_SHIFT 8
+#define RT5663_GAIN_CBJ_MASK (0xf << 8)
+#define RT5663_GAIN_CBJ_SHIFT 8
/* IN1 Control / MIC GND REF (0x000c) */
-#define RT5668_IN1_DF_MASK (0x1 << 15)
-#define RT5668_IN1_DF_SHIFT 15
+#define RT5663_IN1_DF_MASK (0x1 << 15)
+#define RT5663_IN1_DF_SHIFT 15
/* Combo Jack and Type Detection Control 1 (0x0010) */
-#define RT5668_CBJ_DET_MASK (0x1 << 15)
-#define RT5668_CBJ_DET_SHIFT 15
-#define RT5668_CBJ_DET_DIS (0x0 << 15)
-#define RT5668_CBJ_DET_EN (0x1 << 15)
-#define RT5668_DET_TYPE_MASK (0x1 << 12)
-#define RT5668_DET_TYPE_SHIFT 12
-#define RT5668_DET_TYPE_WLCSP (0x0 << 12)
-#define RT5668_DET_TYPE_QFN (0x1 << 12)
-#define RT5668_VREF_BIAS_MASK (0x1 << 6)
-#define RT5668_VREF_BIAS_SHIFT 6
-#define RT5668_VREF_BIAS_FSM (0x0 << 6)
-#define RT5668_VREF_BIAS_REG (0x1 << 6)
+#define RT5663_CBJ_DET_MASK (0x1 << 15)
+#define RT5663_CBJ_DET_SHIFT 15
+#define RT5663_CBJ_DET_DIS (0x0 << 15)
+#define RT5663_CBJ_DET_EN (0x1 << 15)
+#define RT5663_DET_TYPE_MASK (0x1 << 12)
+#define RT5663_DET_TYPE_SHIFT 12
+#define RT5663_DET_TYPE_WLCSP (0x0 << 12)
+#define RT5663_DET_TYPE_QFN (0x1 << 12)
+#define RT5663_VREF_BIAS_MASK (0x1 << 6)
+#define RT5663_VREF_BIAS_SHIFT 6
+#define RT5663_VREF_BIAS_FSM (0x0 << 6)
+#define RT5663_VREF_BIAS_REG (0x1 << 6)
/* REC Left Mixer Control 2 (0x003c) */
-#define RT5668_RECMIX1L_BST1_CBJ (0x1 << 7)
-#define RT5668_RECMIX1L_BST1_CBJ_SHIFT 7
-#define RT5668_RECMIX1L_BST2 (0x1 << 4)
-#define RT5668_RECMIX1L_BST2_SHIFT 4
+#define RT5663_RECMIX1L_BST1_CBJ (0x1 << 7)
+#define RT5663_RECMIX1L_BST1_CBJ_SHIFT 7
+#define RT5663_RECMIX1L_BST2 (0x1 << 4)
+#define RT5663_RECMIX1L_BST2_SHIFT 4
/* REC Right Mixer Control 2 (0x003e) */
-#define RT5668_RECMIX1R_BST2 (0x1 << 4)
-#define RT5668_RECMIX1R_BST2_SHIFT 4
+#define RT5663_RECMIX1R_BST2 (0x1 << 4)
+#define RT5663_RECMIX1R_BST2_SHIFT 4
/* DAC1 Digital Volume (0x0019) */
-#define RT5668_DAC_L1_VOL_MASK (0xff << 8)
-#define RT5668_DAC_L1_VOL_SHIFT 8
-#define RT5668_DAC_R1_VOL_MASK (0xff)
-#define RT5668_DAC_R1_VOL_SHIFT 0
+#define RT5663_DAC_L1_VOL_MASK (0xff << 8)
+#define RT5663_DAC_L1_VOL_SHIFT 8
+#define RT5663_DAC_R1_VOL_MASK (0xff)
+#define RT5663_DAC_R1_VOL_SHIFT 0
/* ADC Digital Volume Control (0x001c) */
-#define RT5668_ADC_L_MUTE_MASK (0x1 << 15)
-#define RT5668_ADC_L_MUTE_SHIFT 15
-#define RT5668_ADC_L_VOL_MASK (0x7f << 8)
-#define RT5668_ADC_L_VOL_SHIFT 8
-#define RT5668_ADC_R_MUTE_MASK (0x1 << 7)
-#define RT5668_ADC_R_MUTE_SHIFT 7
-#define RT5668_ADC_R_VOL_MASK (0x7f)
-#define RT5668_ADC_R_VOL_SHIFT 0
+#define RT5663_ADC_L_MUTE_MASK (0x1 << 15)
+#define RT5663_ADC_L_MUTE_SHIFT 15
+#define RT5663_ADC_L_VOL_MASK (0x7f << 8)
+#define RT5663_ADC_L_VOL_SHIFT 8
+#define RT5663_ADC_R_MUTE_MASK (0x1 << 7)
+#define RT5663_ADC_R_MUTE_SHIFT 7
+#define RT5663_ADC_R_VOL_MASK (0x7f)
+#define RT5663_ADC_R_VOL_SHIFT 0
/* Stereo ADC Mixer Control (0x0026) */
-#define RT5668_M_STO1_ADC_L1 (0x1 << 15)
-#define RT5668_M_STO1_ADC_L1_SHIFT 15
-#define RT5668_M_STO1_ADC_L2 (0x1 << 14)
-#define RT5668_M_STO1_ADC_L2_SHIFT 14
-#define RT5668_STO1_ADC_L1_SRC (0x1 << 13)
-#define RT5668_STO1_ADC_L1_SRC_SHIFT 13
-#define RT5668_STO1_ADC_L2_SRC (0x1 << 12)
-#define RT5668_STO1_ADC_L2_SRC_SHIFT 12
-#define RT5668_STO1_ADC_L_SRC (0x3 << 10)
-#define RT5668_STO1_ADC_L_SRC_SHIFT 10
-#define RT5668_M_STO1_ADC_R1 (0x1 << 7)
-#define RT5668_M_STO1_ADC_R1_SHIFT 7
-#define RT5668_M_STO1_ADC_R2 (0x1 << 6)
-#define RT5668_M_STO1_ADC_R2_SHIFT 6
-#define RT5668_STO1_ADC_R1_SRC (0x1 << 5)
-#define RT5668_STO1_ADC_R1_SRC_SHIFT 5
-#define RT5668_STO1_ADC_R2_SRC (0x1 << 4)
-#define RT5668_STO1_ADC_R2_SRC_SHIFT 4
-#define RT5668_STO1_ADC_R_SRC (0x3 << 2)
-#define RT5668_STO1_ADC_R_SRC_SHIFT 2
+#define RT5663_M_STO1_ADC_L1 (0x1 << 15)
+#define RT5663_M_STO1_ADC_L1_SHIFT 15
+#define RT5663_M_STO1_ADC_L2 (0x1 << 14)
+#define RT5663_M_STO1_ADC_L2_SHIFT 14
+#define RT5663_STO1_ADC_L1_SRC (0x1 << 13)
+#define RT5663_STO1_ADC_L1_SRC_SHIFT 13
+#define RT5663_STO1_ADC_L2_SRC (0x1 << 12)
+#define RT5663_STO1_ADC_L2_SRC_SHIFT 12
+#define RT5663_STO1_ADC_L_SRC (0x3 << 10)
+#define RT5663_STO1_ADC_L_SRC_SHIFT 10
+#define RT5663_M_STO1_ADC_R1 (0x1 << 7)
+#define RT5663_M_STO1_ADC_R1_SHIFT 7
+#define RT5663_M_STO1_ADC_R2 (0x1 << 6)
+#define RT5663_M_STO1_ADC_R2_SHIFT 6
+#define RT5663_STO1_ADC_R1_SRC (0x1 << 5)
+#define RT5663_STO1_ADC_R1_SRC_SHIFT 5
+#define RT5663_STO1_ADC_R2_SRC (0x1 << 4)
+#define RT5663_STO1_ADC_R2_SRC_SHIFT 4
+#define RT5663_STO1_ADC_R_SRC (0x3 << 2)
+#define RT5663_STO1_ADC_R_SRC_SHIFT 2
/* ADC Mixer to DAC Mixer Control (0x0029) */
-#define RT5668_M_ADCMIX_L (0x1 << 15)
-#define RT5668_M_ADCMIX_L_SHIFT 15
-#define RT5668_M_DAC1_L (0x1 << 14)
-#define RT5668_M_DAC1_L_SHIFT 14
-#define RT5668_M_ADCMIX_R (0x1 << 7)
-#define RT5668_M_ADCMIX_R_SHIFT 7
-#define RT5668_M_DAC1_R (0x1 << 6)
-#define RT5668_M_DAC1_R_SHIFT 6
+#define RT5663_M_ADCMIX_L (0x1 << 15)
+#define RT5663_M_ADCMIX_L_SHIFT 15
+#define RT5663_M_DAC1_L (0x1 << 14)
+#define RT5663_M_DAC1_L_SHIFT 14
+#define RT5663_M_ADCMIX_R (0x1 << 7)
+#define RT5663_M_ADCMIX_R_SHIFT 7
+#define RT5663_M_DAC1_R (0x1 << 6)
+#define RT5663_M_DAC1_R_SHIFT 6
/* Stereo DAC Mixer Control (0x002a) */
-#define RT5668_M_DAC_L1_STO_L (0x1 << 15)
-#define RT5668_M_DAC_L1_STO_L_SHIFT 15
-#define RT5668_M_DAC_R1_STO_L (0x1 << 13)
-#define RT5668_M_DAC_R1_STO_L_SHIFT 13
-#define RT5668_M_DAC_L1_STO_R (0x1 << 7)
-#define RT5668_M_DAC_L1_STO_R_SHIFT 7
-#define RT5668_M_DAC_R1_STO_R (0x1 << 5)
-#define RT5668_M_DAC_R1_STO_R_SHIFT 5
+#define RT5663_M_DAC_L1_STO_L (0x1 << 15)
+#define RT5663_M_DAC_L1_STO_L_SHIFT 15
+#define RT5663_M_DAC_R1_STO_L (0x1 << 13)
+#define RT5663_M_DAC_R1_STO_L_SHIFT 13
+#define RT5663_M_DAC_L1_STO_R (0x1 << 7)
+#define RT5663_M_DAC_L1_STO_R_SHIFT 7
+#define RT5663_M_DAC_R1_STO_R (0x1 << 5)
+#define RT5663_M_DAC_R1_STO_R_SHIFT 5
/* Power Management for Digital 1 (0x0061) */
-#define RT5668_PWR_I2S1 (0x1 << 15)
-#define RT5668_PWR_I2S1_SHIFT 15
-#define RT5668_PWR_DAC_L1 (0x1 << 11)
-#define RT5668_PWR_DAC_L1_SHIFT 11
-#define RT5668_PWR_DAC_R1 (0x1 << 10)
-#define RT5668_PWR_DAC_R1_SHIFT 10
-#define RT5668_PWR_LDO_DACREF_MASK (0x1 << 8)
-#define RT5668_PWR_LDO_DACREF_SHIFT 8
-#define RT5668_PWR_LDO_DACREF_ON (0x1 << 8)
-#define RT5668_PWR_LDO_DACREF_DOWN (0x0 << 8)
-#define RT5668_PWR_LDO_SHIFT 8
-#define RT5668_PWR_ADC_L1 (0x1 << 4)
-#define RT5668_PWR_ADC_L1_SHIFT 4
-#define RT5668_PWR_ADC_R1 (0x1 << 3)
-#define RT5668_PWR_ADC_R1_SHIFT 3
+#define RT5663_PWR_I2S1 (0x1 << 15)
+#define RT5663_PWR_I2S1_SHIFT 15
+#define RT5663_PWR_DAC_L1 (0x1 << 11)
+#define RT5663_PWR_DAC_L1_SHIFT 11
+#define RT5663_PWR_DAC_R1 (0x1 << 10)
+#define RT5663_PWR_DAC_R1_SHIFT 10
+#define RT5663_PWR_LDO_DACREF_MASK (0x1 << 8)
+#define RT5663_PWR_LDO_DACREF_SHIFT 8
+#define RT5663_PWR_LDO_DACREF_ON (0x1 << 8)
+#define RT5663_PWR_LDO_DACREF_DOWN (0x0 << 8)
+#define RT5663_PWR_LDO_SHIFT 8
+#define RT5663_PWR_ADC_L1 (0x1 << 4)
+#define RT5663_PWR_ADC_L1_SHIFT 4
+#define RT5663_PWR_ADC_R1 (0x1 << 3)
+#define RT5663_PWR_ADC_R1_SHIFT 3
/* Power Management for Digital 2 (0x0062) */
-#define RT5668_PWR_ADC_S1F (0x1 << 15)
-#define RT5668_PWR_ADC_S1F_SHIFT 15
-#define RT5668_PWR_DAC_S1F (0x1 << 10)
-#define RT5668_PWR_DAC_S1F_SHIFT 10
+#define RT5663_PWR_ADC_S1F (0x1 << 15)
+#define RT5663_PWR_ADC_S1F_SHIFT 15
+#define RT5663_PWR_DAC_S1F (0x1 << 10)
+#define RT5663_PWR_DAC_S1F_SHIFT 10
/* Power Management for Analog 1 (0x0063) */
-#define RT5668_PWR_VREF1 (0x1 << 15)
-#define RT5668_PWR_VREF1_MASK (0x1 << 15)
-#define RT5668_PWR_VREF1_SHIFT 15
-#define RT5668_PWR_FV1 (0x1 << 14)
-#define RT5668_PWR_FV1_MASK (0x1 << 14)
-#define RT5668_PWR_FV1_SHIFT 14
-#define RT5668_PWR_VREF2 (0x1 << 13)
-#define RT5668_PWR_VREF2_MASK (0x1 << 13)
-#define RT5668_PWR_VREF2_SHIFT 13
-#define RT5668_PWR_FV2 (0x1 << 12)
-#define RT5668_PWR_FV2_MASK (0x1 << 12)
-#define RT5668_PWR_FV2_SHIFT 12
-#define RT5668_PWR_MB (0x1 << 9)
-#define RT5668_PWR_MB_MASK (0x1 << 9)
-#define RT5668_PWR_MB_SHIFT 9
-#define RT5668_AMP_HP_MASK (0x3 << 2)
-#define RT5668_AMP_HP_SHIFT 2
-#define RT5668_AMP_HP_1X (0x0 << 2)
-#define RT5668_AMP_HP_3X (0x1 << 2)
-#define RT5668_AMP_HP_5X (0x3 << 2)
-#define RT5668_LDO1_DVO_MASK (0x3)
-#define RT5668_LDO1_DVO_SHIFT 0
-#define RT5668_LDO1_DVO_0_9V (0x0)
-#define RT5668_LDO1_DVO_1_0V (0x1)
-#define RT5668_LDO1_DVO_1_2V (0x2)
-#define RT5668_LDO1_DVO_1_4V (0x3)
+#define RT5663_PWR_VREF1 (0x1 << 15)
+#define RT5663_PWR_VREF1_MASK (0x1 << 15)
+#define RT5663_PWR_VREF1_SHIFT 15
+#define RT5663_PWR_FV1 (0x1 << 14)
+#define RT5663_PWR_FV1_MASK (0x1 << 14)
+#define RT5663_PWR_FV1_SHIFT 14
+#define RT5663_PWR_VREF2 (0x1 << 13)
+#define RT5663_PWR_VREF2_MASK (0x1 << 13)
+#define RT5663_PWR_VREF2_SHIFT 13
+#define RT5663_PWR_FV2 (0x1 << 12)
+#define RT5663_PWR_FV2_MASK (0x1 << 12)
+#define RT5663_PWR_FV2_SHIFT 12
+#define RT5663_PWR_MB (0x1 << 9)
+#define RT5663_PWR_MB_MASK (0x1 << 9)
+#define RT5663_PWR_MB_SHIFT 9
+#define RT5663_AMP_HP_MASK (0x3 << 2)
+#define RT5663_AMP_HP_SHIFT 2
+#define RT5663_AMP_HP_1X (0x0 << 2)
+#define RT5663_AMP_HP_3X (0x1 << 2)
+#define RT5663_AMP_HP_5X (0x3 << 2)
+#define RT5663_LDO1_DVO_MASK (0x3)
+#define RT5663_LDO1_DVO_SHIFT 0
+#define RT5663_LDO1_DVO_0_9V (0x0)
+#define RT5663_LDO1_DVO_1_0V (0x1)
+#define RT5663_LDO1_DVO_1_2V (0x2)
+#define RT5663_LDO1_DVO_1_4V (0x3)
/* Power Management for Analog 2 (0x0064) */
-#define RT5668_PWR_BST1 (0x1 << 15)
-#define RT5668_PWR_BST1_MASK (0x1 << 15)
-#define RT5668_PWR_BST1_SHIFT 15
-#define RT5668_PWR_BST1_OFF (0x0 << 15)
-#define RT5668_PWR_BST1_ON (0x1 << 15)
-#define RT5668_PWR_BST2 (0x1 << 14)
-#define RT5668_PWR_BST2_MASK (0x1 << 14)
-#define RT5668_PWR_BST2_SHIFT 14
-#define RT5668_PWR_MB1 (0x1 << 11)
-#define RT5668_PWR_MB1_SHIFT 11
-#define RT5668_PWR_MB2 (0x1 << 10)
-#define RT5668_PWR_MB2_SHIFT 10
-#define RT5668_PWR_BST2_OP (0x1 << 6)
-#define RT5668_PWR_BST2_OP_MASK (0x1 << 6)
-#define RT5668_PWR_BST2_OP_SHIFT 6
-#define RT5668_PWR_JD1 (0x1 << 3)
-#define RT5668_PWR_JD1_MASK (0x1 << 3)
-#define RT5668_PWR_JD1_SHIFT 3
-#define RT5668_PWR_JD2 (0x1 << 2)
-#define RT5668_PWR_JD2_MASK (0x1 << 2)
-#define RT5668_PWR_JD2_SHIFT 2
-#define RT5668_PWR_RECMIX1 (0x1 << 1)
-#define RT5668_PWR_RECMIX1_SHIFT 1
-#define RT5668_PWR_RECMIX2 (0x1)
-#define RT5668_PWR_RECMIX2_SHIFT 0
+#define RT5663_PWR_BST1 (0x1 << 15)
+#define RT5663_PWR_BST1_MASK (0x1 << 15)
+#define RT5663_PWR_BST1_SHIFT 15
+#define RT5663_PWR_BST1_OFF (0x0 << 15)
+#define RT5663_PWR_BST1_ON (0x1 << 15)
+#define RT5663_PWR_BST2 (0x1 << 14)
+#define RT5663_PWR_BST2_MASK (0x1 << 14)
+#define RT5663_PWR_BST2_SHIFT 14
+#define RT5663_PWR_MB1 (0x1 << 11)
+#define RT5663_PWR_MB1_SHIFT 11
+#define RT5663_PWR_MB2 (0x1 << 10)
+#define RT5663_PWR_MB2_SHIFT 10
+#define RT5663_PWR_BST2_OP (0x1 << 6)
+#define RT5663_PWR_BST2_OP_MASK (0x1 << 6)
+#define RT5663_PWR_BST2_OP_SHIFT 6
+#define RT5663_PWR_JD1 (0x1 << 3)
+#define RT5663_PWR_JD1_MASK (0x1 << 3)
+#define RT5663_PWR_JD1_SHIFT 3
+#define RT5663_PWR_JD2 (0x1 << 2)
+#define RT5663_PWR_JD2_MASK (0x1 << 2)
+#define RT5663_PWR_JD2_SHIFT 2
+#define RT5663_PWR_RECMIX1 (0x1 << 1)
+#define RT5663_PWR_RECMIX1_SHIFT 1
+#define RT5663_PWR_RECMIX2 (0x1)
+#define RT5663_PWR_RECMIX2_SHIFT 0
/* Power Management for Analog 3 (0x0065) */
-#define RT5668_PWR_CBJ_MASK (0x1 << 9)
-#define RT5668_PWR_CBJ_SHIFT 9
-#define RT5668_PWR_CBJ_OFF (0x0 << 9)
-#define RT5668_PWR_CBJ_ON (0x1 << 9)
-#define RT5668_PWR_PLL (0x1 << 6)
-#define RT5668_PWR_PLL_SHIFT 6
-#define RT5668_PWR_LDO2 (0x1 << 2)
-#define RT5668_PWR_LDO2_SHIFT 2
+#define RT5663_PWR_CBJ_MASK (0x1 << 9)
+#define RT5663_PWR_CBJ_SHIFT 9
+#define RT5663_PWR_CBJ_OFF (0x0 << 9)
+#define RT5663_PWR_CBJ_ON (0x1 << 9)
+#define RT5663_PWR_PLL (0x1 << 6)
+#define RT5663_PWR_PLL_SHIFT 6
+#define RT5663_PWR_LDO2 (0x1 << 2)
+#define RT5663_PWR_LDO2_SHIFT 2
/* Power Management for Volume (0x0067) */
-#define RT5668_PWR_MIC_DET (0x1 << 5)
-#define RT5668_PWR_MIC_DET_SHIFT 5
+#define RT5663_V2_PWR_MIC_DET (0x1 << 5)
+#define RT5663_V2_PWR_MIC_DET_SHIFT 5
/* MCLK and System Clock Detection Control (0x006b) */
-#define RT5668_EN_ANA_CLK_DET_MASK (0x1 << 15)
-#define RT5668_EN_ANA_CLK_DET_SHIFT 15
-#define RT5668_EN_ANA_CLK_DET_DIS (0x0 << 15)
-#define RT5668_EN_ANA_CLK_DET_AUTO (0x1 << 15)
-#define RT5668_PWR_CLK_DET_MASK (0x1)
-#define RT5668_PWR_CLK_DET_SHIFT 0
-#define RT5668_PWR_CLK_DET_DIS (0x0)
-#define RT5668_PWR_CLK_DET_EN (0x1)
+#define RT5663_EN_ANA_CLK_DET_MASK (0x1 << 15)
+#define RT5663_EN_ANA_CLK_DET_SHIFT 15
+#define RT5663_EN_ANA_CLK_DET_DIS (0x0 << 15)
+#define RT5663_EN_ANA_CLK_DET_AUTO (0x1 << 15)
+#define RT5663_PWR_CLK_DET_MASK (0x1)
+#define RT5663_PWR_CLK_DET_SHIFT 0
+#define RT5663_PWR_CLK_DET_DIS (0x0)
+#define RT5663_PWR_CLK_DET_EN (0x1)
/* I2S1 Audio Serial Data Port Control (0x0070) */
-#define RT5668_I2S_MS_MASK (0x1 << 15)
-#define RT5668_I2S_MS_SHIFT 15
-#define RT5668_I2S_MS_M (0x0 << 15)
-#define RT5668_I2S_MS_S (0x1 << 15)
-#define RT5668_I2S_BP_MASK (0x1 << 8)
-#define RT5668_I2S_BP_SHIFT 8
-#define RT5668_I2S_BP_NOR (0x0 << 8)
-#define RT5668_I2S_BP_INV (0x1 << 8)
-#define RT5668_I2S_DL_MASK (0x3 << 4)
-#define RT5668_I2S_DL_SHIFT 4
-#define RT5668_I2S_DL_16 (0x0 << 4)
-#define RT5668_I2S_DL_20 (0x1 << 4)
-#define RT5668_I2S_DL_24 (0x2 << 4)
-#define RT5668_I2S_DL_8 (0x3 << 4)
-#define RT5668_I2S_DF_MASK (0x7)
-#define RT5668_I2S_DF_SHIFT 0
-#define RT5668_I2S_DF_I2S (0x0)
-#define RT5668_I2S_DF_LEFT (0x1)
-#define RT5668_I2S_DF_PCM_A (0x2)
-#define RT5668_I2S_DF_PCM_B (0x3)
-#define RT5668_I2S_DF_PCM_A_N (0x6)
-#define RT5668_I2S_DF_PCM_B_N (0x7)
+#define RT5663_I2S_MS_MASK (0x1 << 15)
+#define RT5663_I2S_MS_SHIFT 15
+#define RT5663_I2S_MS_M (0x0 << 15)
+#define RT5663_I2S_MS_S (0x1 << 15)
+#define RT5663_I2S_BP_MASK (0x1 << 8)
+#define RT5663_I2S_BP_SHIFT 8
+#define RT5663_I2S_BP_NOR (0x0 << 8)
+#define RT5663_I2S_BP_INV (0x1 << 8)
+#define RT5663_I2S_DL_MASK (0x3 << 4)
+#define RT5663_I2S_DL_SHIFT 4
+#define RT5663_I2S_DL_16 (0x0 << 4)
+#define RT5663_I2S_DL_20 (0x1 << 4)
+#define RT5663_I2S_DL_24 (0x2 << 4)
+#define RT5663_I2S_DL_8 (0x3 << 4)
+#define RT5663_I2S_DF_MASK (0x7)
+#define RT5663_I2S_DF_SHIFT 0
+#define RT5663_I2S_DF_I2S (0x0)
+#define RT5663_I2S_DF_LEFT (0x1)
+#define RT5663_I2S_DF_PCM_A (0x2)
+#define RT5663_I2S_DF_PCM_B (0x3)
+#define RT5663_I2S_DF_PCM_A_N (0x6)
+#define RT5663_I2S_DF_PCM_B_N (0x7)
/* ADC/DAC Clock Control 1 (0x0073) */
-#define RT5668_I2S_PD1_MASK (0x7 << 12)
-#define RT5668_I2S_PD1_SHIFT 12
-#define RT5668_M_I2S_DIV_MASK (0x7 << 8)
-#define RT5668_M_I2S_DIV_SHIFT 8
-#define RT5668_CLK_SRC_MASK (0x3 << 4)
-#define RT5668_CLK_SRC_MCLK (0x0 << 4)
-#define RT5668_CLK_SRC_PLL_OUT (0x1 << 4)
-#define RT5668_CLK_SRC_DIV (0x2 << 4)
-#define RT5668_CLK_SRC_RC (0x3 << 4)
-#define RT5668_DAC_OSR_MASK (0x3 << 2)
-#define RT5668_DAC_OSR_SHIFT 2
-#define RT5668_DAC_OSR_128 (0x0 << 2)
-#define RT5668_DAC_OSR_64 (0x1 << 2)
-#define RT5668_DAC_OSR_32 (0x2 << 2)
-#define RT5668_ADC_OSR_MASK (0x3)
-#define RT5668_ADC_OSR_SHIFT 0
-#define RT5668_ADC_OSR_128 (0x0)
-#define RT5668_ADC_OSR_64 (0x1)
-#define RT5668_ADC_OSR_32 (0x2)
+#define RT5663_I2S_PD1_MASK (0x7 << 12)
+#define RT5663_I2S_PD1_SHIFT 12
+#define RT5663_M_I2S_DIV_MASK (0x7 << 8)
+#define RT5663_M_I2S_DIV_SHIFT 8
+#define RT5663_CLK_SRC_MASK (0x3 << 4)
+#define RT5663_CLK_SRC_MCLK (0x0 << 4)
+#define RT5663_CLK_SRC_PLL_OUT (0x1 << 4)
+#define RT5663_CLK_SRC_DIV (0x2 << 4)
+#define RT5663_CLK_SRC_RC (0x3 << 4)
+#define RT5663_DAC_OSR_MASK (0x3 << 2)
+#define RT5663_DAC_OSR_SHIFT 2
+#define RT5663_DAC_OSR_128 (0x0 << 2)
+#define RT5663_DAC_OSR_64 (0x1 << 2)
+#define RT5663_DAC_OSR_32 (0x2 << 2)
+#define RT5663_ADC_OSR_MASK (0x3)
+#define RT5663_ADC_OSR_SHIFT 0
+#define RT5663_ADC_OSR_128 (0x0)
+#define RT5663_ADC_OSR_64 (0x1)
+#define RT5663_ADC_OSR_32 (0x2)
/* TDM1 control 1 (0x0078) */
-#define RT5668_TDM_MODE_MASK (0x1 << 15)
-#define RT5668_TDM_MODE_SHIFT 15
-#define RT5668_TDM_MODE_I2S (0x0 << 15)
-#define RT5668_TDM_MODE_TDM (0x1 << 15)
-#define RT5668_TDM_IN_CH_MASK (0x3 << 10)
-#define RT5668_TDM_IN_CH_SHIFT 10
-#define RT5668_TDM_IN_CH_2 (0x0 << 10)
-#define RT5668_TDM_IN_CH_4 (0x1 << 10)
-#define RT5668_TDM_IN_CH_6 (0x2 << 10)
-#define RT5668_TDM_IN_CH_8 (0x3 << 10)
-#define RT5668_TDM_OUT_CH_MASK (0x3 << 8)
-#define RT5668_TDM_OUT_CH_SHIFT 8
-#define RT5668_TDM_OUT_CH_2 (0x0 << 8)
-#define RT5668_TDM_OUT_CH_4 (0x1 << 8)
-#define RT5668_TDM_OUT_CH_6 (0x2 << 8)
-#define RT5668_TDM_OUT_CH_8 (0x3 << 8)
-#define RT5668_TDM_IN_LEN_MASK (0x3 << 6)
-#define RT5668_TDM_IN_LEN_SHIFT 6
-#define RT5668_TDM_IN_LEN_16 (0x0 << 6)
-#define RT5668_TDM_IN_LEN_20 (0x1 << 6)
-#define RT5668_TDM_IN_LEN_24 (0x2 << 6)
-#define RT5668_TDM_IN_LEN_32 (0x3 << 6)
-#define RT5668_TDM_OUT_LEN_MASK (0x3 << 4)
-#define RT5668_TDM_OUT_LEN_SHIFT 4
-#define RT5668_TDM_OUT_LEN_16 (0x0 << 4)
-#define RT5668_TDM_OUT_LEN_20 (0x1 << 4)
-#define RT5668_TDM_OUT_LEN_24 (0x2 << 4)
-#define RT5668_TDM_OUT_LEN_32 (0x3 << 4)
+#define RT5663_TDM_MODE_MASK (0x1 << 15)
+#define RT5663_TDM_MODE_SHIFT 15
+#define RT5663_TDM_MODE_I2S (0x0 << 15)
+#define RT5663_TDM_MODE_TDM (0x1 << 15)
+#define RT5663_TDM_IN_CH_MASK (0x3 << 10)
+#define RT5663_TDM_IN_CH_SHIFT 10
+#define RT5663_TDM_IN_CH_2 (0x0 << 10)
+#define RT5663_TDM_IN_CH_4 (0x1 << 10)
+#define RT5663_TDM_IN_CH_6 (0x2 << 10)
+#define RT5663_TDM_IN_CH_8 (0x3 << 10)
+#define RT5663_TDM_OUT_CH_MASK (0x3 << 8)
+#define RT5663_TDM_OUT_CH_SHIFT 8
+#define RT5663_TDM_OUT_CH_2 (0x0 << 8)
+#define RT5663_TDM_OUT_CH_4 (0x1 << 8)
+#define RT5663_TDM_OUT_CH_6 (0x2 << 8)
+#define RT5663_TDM_OUT_CH_8 (0x3 << 8)
+#define RT5663_TDM_IN_LEN_MASK (0x3 << 6)
+#define RT5663_TDM_IN_LEN_SHIFT 6
+#define RT5663_TDM_IN_LEN_16 (0x0 << 6)
+#define RT5663_TDM_IN_LEN_20 (0x1 << 6)
+#define RT5663_TDM_IN_LEN_24 (0x2 << 6)
+#define RT5663_TDM_IN_LEN_32 (0x3 << 6)
+#define RT5663_TDM_OUT_LEN_MASK (0x3 << 4)
+#define RT5663_TDM_OUT_LEN_SHIFT 4
+#define RT5663_TDM_OUT_LEN_16 (0x0 << 4)
+#define RT5663_TDM_OUT_LEN_20 (0x1 << 4)
+#define RT5663_TDM_OUT_LEN_24 (0x2 << 4)
+#define RT5663_TDM_OUT_LEN_32 (0x3 << 4)
/* Global Clock Control (0x0080) */
-#define RT5668_SCLK_SRC_MASK (0x3 << 14)
-#define RT5668_SCLK_SRC_SHIFT 14
-#define RT5668_SCLK_SRC_MCLK (0x0 << 14)
-#define RT5668_SCLK_SRC_PLL1 (0x1 << 14)
-#define RT5668_SCLK_SRC_RCCLK (0x2 << 14)
-#define RT5668_PLL1_SRC_MASK (0x7 << 8)
-#define RT5668_PLL1_SRC_SHIFT 8
-#define RT5668_PLL1_SRC_MCLK (0x0 << 8)
-#define RT5668_PLL1_SRC_BCLK1 (0x1 << 8)
-#define RT5668_PLL1_PD_MASK (0x1 << 4)
-#define RT5668_PLL1_PD_SHIFT 4
-
-#define RT5668_PLL_INP_MAX 40000000
-#define RT5668_PLL_INP_MIN 256000
+#define RT5663_SCLK_SRC_MASK (0x3 << 14)
+#define RT5663_SCLK_SRC_SHIFT 14
+#define RT5663_SCLK_SRC_MCLK (0x0 << 14)
+#define RT5663_SCLK_SRC_PLL1 (0x1 << 14)
+#define RT5663_SCLK_SRC_RCCLK (0x2 << 14)
+#define RT5663_PLL1_SRC_MASK (0x7 << 11)
+#define RT5663_PLL1_SRC_SHIFT 11
+#define RT5663_PLL1_SRC_MCLK (0x0 << 11)
+#define RT5663_PLL1_SRC_BCLK1 (0x1 << 11)
+#define RT5663_V2_PLL1_SRC_MASK (0x7 << 8)
+#define RT5663_V2_PLL1_SRC_SHIFT 8
+#define RT5663_V2_PLL1_SRC_MCLK (0x0 << 8)
+#define RT5663_V2_PLL1_SRC_BCLK1 (0x1 << 8)
+#define RT5663_PLL1_PD_MASK (0x1 << 4)
+#define RT5663_PLL1_PD_SHIFT 4
+
+#define RT5663_PLL_INP_MAX 40000000
+#define RT5663_PLL_INP_MIN 256000
/* PLL M/N/K Code Control 1 (0x0081) */
-#define RT5668_PLL_N_MAX 0x001ff
-#define RT5668_PLL_N_MASK (RT5668_PLL_N_MAX << 7)
-#define RT5668_PLL_N_SHIFT 7
-#define RT5668_PLL_K_MAX 0x001f
-#define RT5668_PLL_K_MASK (RT5668_PLL_K_MAX)
-#define RT5668_PLL_K_SHIFT 0
+#define RT5663_PLL_N_MAX 0x001ff
+#define RT5663_PLL_N_MASK (RT5663_PLL_N_MAX << 7)
+#define RT5663_PLL_N_SHIFT 7
+#define RT5663_PLL_K_MAX 0x001f
+#define RT5663_PLL_K_MASK (RT5663_PLL_K_MAX)
+#define RT5663_PLL_K_SHIFT 0
/* PLL M/N/K Code Control 2 (0x0082) */
-#define RT5668_PLL_M_MAX 0x00f
-#define RT5668_PLL_M_MASK (RT5668_PLL_M_MAX << 12)
-#define RT5668_PLL_M_SHIFT 12
-#define RT5668_PLL_M_BP (0x1 << 11)
-#define RT5668_PLL_M_BP_SHIFT 11
+#define RT5663_PLL_M_MAX 0x00f
+#define RT5663_PLL_M_MASK (RT5663_PLL_M_MAX << 12)
+#define RT5663_PLL_M_SHIFT 12
+#define RT5663_PLL_M_BP (0x1 << 11)
+#define RT5663_PLL_M_BP_SHIFT 11
/* PLL tracking mode 1 (0x0083) */
-#define RT5668_I2S1_ASRC_MASK (0x1 << 13)
-#define RT5668_I2S1_ASRC_SHIFT 13
-#define RT5668_DAC_STO1_ASRC_MASK (0x1 << 12)
-#define RT5668_DAC_STO1_ASRC_SHIFT 12
-#define RT5668_ADC_STO1_ASRC_MASK (0x1 << 4)
-#define RT5668_ADC_STO1_ASRC_SHIFT 4
+#define RT5663_V2_I2S1_ASRC_MASK (0x1 << 13)
+#define RT5663_V2_I2S1_ASRC_SHIFT 13
+#define RT5663_V2_DAC_STO1_ASRC_MASK (0x1 << 12)
+#define RT5663_V2_DAC_STO1_ASRC_SHIFT 12
+#define RT5663_V2_ADC_STO1_ASRC_MASK (0x1 << 4)
+#define RT5663_V2_ADC_STO1_ASRC_SHIFT 4
/* PLL tracking mode 2 (0x0084)*/
-#define RT5668_DA_STO1_TRACK_MASK (0x7 << 12)
-#define RT5668_DA_STO1_TRACK_SHIFT 12
-#define RT5668_DA_STO1_TRACK_SYSCLK (0x0 << 12)
-#define RT5668_DA_STO1_TRACK_I2S1 (0x1 << 12)
+#define RT5663_DA_STO1_TRACK_MASK (0x7 << 12)
+#define RT5663_DA_STO1_TRACK_SHIFT 12
+#define RT5663_DA_STO1_TRACK_SYSCLK (0x0 << 12)
+#define RT5663_DA_STO1_TRACK_I2S1 (0x1 << 12)
/* PLL tracking mode 3 (0x0085)*/
-#define RT5668_AD_STO1_TRACK_MASK (0x7 << 12)
-#define RT5668_AD_STO1_TRACK_SHIFT 12
-#define RT5668_AD_STO1_TRACK_SYSCLK (0x0 << 12)
-#define RT5668_AD_STO1_TRACK_I2S1 (0x1 << 12)
+#define RT5663_V2_AD_STO1_TRACK_MASK (0x7 << 12)
+#define RT5663_V2_AD_STO1_TRACK_SHIFT 12
+#define RT5663_V2_AD_STO1_TRACK_SYSCLK (0x0 << 12)
+#define RT5663_V2_AD_STO1_TRACK_I2S1 (0x1 << 12)
/* HPOUT Charge pump control 1 (0x0091) */
-#define RT5668_OSW_HP_L_MASK (0x1 << 11)
-#define RT5668_OSW_HP_L_SHIFT 11
-#define RT5668_OSW_HP_L_EN (0x1 << 11)
-#define RT5668_OSW_HP_L_DIS (0x0 << 11)
-#define RT5668_OSW_HP_R_MASK (0x1 << 10)
-#define RT5668_OSW_HP_R_SHIFT 10
-#define RT5668_OSW_HP_R_EN (0x1 << 10)
-#define RT5668_OSW_HP_R_DIS (0x0 << 10)
-#define RT5668_SEL_PM_HP_MASK (0x3 << 8)
-#define RT5668_SEL_PM_HP_SHIFT 8
-#define RT5668_SEL_PM_HP_0_6 (0x0 << 8)
-#define RT5668_SEL_PM_HP_0_9 (0x1 << 8)
-#define RT5668_SEL_PM_HP_1_8 (0x2 << 8)
-#define RT5668_SEL_PM_HP_HIGH (0x3 << 8)
-#define RT5668_OVCD_HP_MASK (0x1 << 2)
-#define RT5668_OVCD_HP_SHIFT 2
-#define RT5668_OVCD_HP_EN (0x1 << 2)
-#define RT5668_OVCD_HP_DIS (0x0 << 2)
+#define RT5663_OSW_HP_L_MASK (0x1 << 11)
+#define RT5663_OSW_HP_L_SHIFT 11
+#define RT5663_OSW_HP_L_EN (0x1 << 11)
+#define RT5663_OSW_HP_L_DIS (0x0 << 11)
+#define RT5663_OSW_HP_R_MASK (0x1 << 10)
+#define RT5663_OSW_HP_R_SHIFT 10
+#define RT5663_OSW_HP_R_EN (0x1 << 10)
+#define RT5663_OSW_HP_R_DIS (0x0 << 10)
+#define RT5663_SEL_PM_HP_MASK (0x3 << 8)
+#define RT5663_SEL_PM_HP_SHIFT 8
+#define RT5663_SEL_PM_HP_0_6 (0x0 << 8)
+#define RT5663_SEL_PM_HP_0_9 (0x1 << 8)
+#define RT5663_SEL_PM_HP_1_8 (0x2 << 8)
+#define RT5663_SEL_PM_HP_HIGH (0x3 << 8)
+#define RT5663_OVCD_HP_MASK (0x1 << 2)
+#define RT5663_OVCD_HP_SHIFT 2
+#define RT5663_OVCD_HP_EN (0x1 << 2)
+#define RT5663_OVCD_HP_DIS (0x0 << 2)
/* RC Clock Control (0x0094) */
-#define RT5668_DIG_25M_CLK_MASK (0x1 << 9)
-#define RT5668_DIG_25M_CLK_SHIFT 9
-#define RT5668_DIG_25M_CLK_DIS (0x0 << 9)
-#define RT5668_DIG_25M_CLK_EN (0x1 << 9)
-#define RT5668_DIG_1M_CLK_MASK (0x1 << 8)
-#define RT5668_DIG_1M_CLK_SHIFT 8
-#define RT5668_DIG_1M_CLK_DIS (0x0 << 8)
-#define RT5668_DIG_1M_CLK_EN (0x1 << 8)
+#define RT5663_DIG_25M_CLK_MASK (0x1 << 9)
+#define RT5663_DIG_25M_CLK_SHIFT 9
+#define RT5663_DIG_25M_CLK_DIS (0x0 << 9)
+#define RT5663_DIG_25M_CLK_EN (0x1 << 9)
+#define RT5663_DIG_1M_CLK_MASK (0x1 << 8)
+#define RT5663_DIG_1M_CLK_SHIFT 8
+#define RT5663_DIG_1M_CLK_DIS (0x0 << 8)
+#define RT5663_DIG_1M_CLK_EN (0x1 << 8)
/* Auto Turn On 1M RC CLK (0x009f) */
-#define RT5668_IRQ_POW_SAV_MASK (0x1 << 15)
-#define RT5668_IRQ_POW_SAV_SHIFT 15
-#define RT5668_IRQ_POW_SAV_DIS (0x0 << 15)
-#define RT5668_IRQ_POW_SAV_EN (0x1 << 15)
-#define RT5668_IRQ_POW_SAV_JD1_MASK (0x1 << 14)
-#define RT5668_IRQ_POW_SAV_JD1_SHIFT 14
-#define RT5668_IRQ_POW_SAV_JD1_DIS (0x0 << 14)
-#define RT5668_IRQ_POW_SAV_JD1_EN (0x1 << 14)
+#define RT5663_IRQ_POW_SAV_MASK (0x1 << 15)
+#define RT5663_IRQ_POW_SAV_SHIFT 15
+#define RT5663_IRQ_POW_SAV_DIS (0x0 << 15)
+#define RT5663_IRQ_POW_SAV_EN (0x1 << 15)
+#define RT5663_IRQ_POW_SAV_JD1_MASK (0x1 << 14)
+#define RT5663_IRQ_POW_SAV_JD1_SHIFT 14
+#define RT5663_IRQ_POW_SAV_JD1_DIS (0x0 << 14)
+#define RT5663_IRQ_POW_SAV_JD1_EN (0x1 << 14)
/* IRQ Control 1 (0x00b6) */
-#define RT5668_EN_CB_JD_MASK (0x1 << 3)
-#define RT5668_EN_CB_JD_SHIFT 3
-#define RT5668_EN_CB_JD_EN (0x1 << 3)
-#define RT5668_EN_CB_JD_DIS (0x0 << 3)
+#define RT5663_EN_CB_JD_MASK (0x1 << 3)
+#define RT5663_EN_CB_JD_SHIFT 3
+#define RT5663_EN_CB_JD_EN (0x1 << 3)
+#define RT5663_EN_CB_JD_DIS (0x0 << 3)
/* IRQ Control 3 (0x00b8) */
-#define RT5668_EN_IRQ_INLINE_MASK (0x1 << 6)
-#define RT5668_EN_IRQ_INLINE_SHIFT 6
-#define RT5668_EN_IRQ_INLINE_BYP (0x0 << 6)
-#define RT5668_EN_IRQ_INLINE_NOR (0x1 << 6)
+#define RT5663_V2_EN_IRQ_INLINE_MASK (0x1 << 6)
+#define RT5663_V2_EN_IRQ_INLINE_SHIFT 6
+#define RT5663_V2_EN_IRQ_INLINE_BYP (0x0 << 6)
+#define RT5663_V2_EN_IRQ_INLINE_NOR (0x1 << 6)
/* GPIO Control 1 (0x00c0) */
-#define RT5668_GP1_PIN_MASK (0x1 << 15)
-#define RT5668_GP1_PIN_SHIFT 15
-#define RT5668_GP1_PIN_GPIO1 (0x0 << 15)
-#define RT5668_GP1_PIN_IRQ (0x1 << 15)
+#define RT5663_GP1_PIN_MASK (0x1 << 15)
+#define RT5663_GP1_PIN_SHIFT 15
+#define RT5663_GP1_PIN_GPIO1 (0x0 << 15)
+#define RT5663_GP1_PIN_IRQ (0x1 << 15)
/* GPIO Control 2 (0x00c1) */
-#define RT5668_GP4_PIN_CONF_MASK (0x1 << 5)
-#define RT5668_GP4_PIN_CONF_SHIFT 5
-#define RT5668_GP4_PIN_CONF_INPUT (0x0 << 5)
-#define RT5668_GP4_PIN_CONF_OUTPUT (0x1 << 5)
+#define RT5663_GP4_PIN_CONF_MASK (0x1 << 5)
+#define RT5663_GP4_PIN_CONF_SHIFT 5
+#define RT5663_GP4_PIN_CONF_INPUT (0x0 << 5)
+#define RT5663_GP4_PIN_CONF_OUTPUT (0x1 << 5)
/* GPIO Control 2 (0x00c2) */
-#define RT5668_GP8_PIN_CONF_MASK (0x1 << 13)
-#define RT5668_GP8_PIN_CONF_SHIFT 13
-#define RT5668_GP8_PIN_CONF_INPUT (0x0 << 13)
-#define RT5668_GP8_PIN_CONF_OUTPUT (0x1 << 13)
+#define RT5663_GP8_PIN_CONF_MASK (0x1 << 13)
+#define RT5663_GP8_PIN_CONF_SHIFT 13
+#define RT5663_GP8_PIN_CONF_INPUT (0x0 << 13)
+#define RT5663_GP8_PIN_CONF_OUTPUT (0x1 << 13)
/* 4 Buttons Inline Command Function 1 (0x00df) */
-#define RT5668_4BTN_CLK_DEB_MASK (0x3 << 2)
-#define RT5668_4BTN_CLK_DEB_SHIFT 2
-#define RT5668_4BTN_CLK_DEB_8MS (0x0 << 2)
-#define RT5668_4BTN_CLK_DEB_16MS (0x1 << 2)
-#define RT5668_4BTN_CLK_DEB_32MS (0x2 << 2)
-#define RT5668_4BTN_CLK_DEB_65MS (0x3 << 2)
+#define RT5663_4BTN_CLK_DEB_MASK (0x3 << 2)
+#define RT5663_4BTN_CLK_DEB_SHIFT 2
+#define RT5663_4BTN_CLK_DEB_8MS (0x0 << 2)
+#define RT5663_4BTN_CLK_DEB_16MS (0x1 << 2)
+#define RT5663_4BTN_CLK_DEB_32MS (0x2 << 2)
+#define RT5663_4BTN_CLK_DEB_65MS (0x3 << 2)
/* Inline Command Function 6 (0x00e0) */
-#define RT5668_EN_4BTN_INL_MASK (0x1 << 15)
-#define RT5668_EN_4BTN_INL_SHIFT 15
-#define RT5668_EN_4BTN_INL_DIS (0x0 << 15)
-#define RT5668_EN_4BTN_INL_EN (0x1 << 15)
-#define RT5668_RESET_4BTN_INL_MASK (0x1 << 14)
-#define RT5668_RESET_4BTN_INL_SHIFT 14
-#define RT5668_RESET_4BTN_INL_RESET (0x0 << 14)
-#define RT5668_RESET_4BTN_INL_NOR (0x1 << 14)
+#define RT5663_EN_4BTN_INL_MASK (0x1 << 15)
+#define RT5663_EN_4BTN_INL_SHIFT 15
+#define RT5663_EN_4BTN_INL_DIS (0x0 << 15)
+#define RT5663_EN_4BTN_INL_EN (0x1 << 15)
+#define RT5663_RESET_4BTN_INL_MASK (0x1 << 14)
+#define RT5663_RESET_4BTN_INL_SHIFT 14
+#define RT5663_RESET_4BTN_INL_RESET (0x0 << 14)
+#define RT5663_RESET_4BTN_INL_NOR (0x1 << 14)
/* Digital Misc Control (0x00fa) */
-#define RT5668_DIG_GATE_CTRL_MASK 0x1
-#define RT5668_DIG_GATE_CTRL_SHIFT (0)
-#define RT5668_DIG_GATE_CTRL_DIS 0x0
-#define RT5668_DIG_GATE_CTRL_EN 0x1
+#define RT5663_DIG_GATE_CTRL_MASK 0x1
+#define RT5663_DIG_GATE_CTRL_SHIFT (0)
+#define RT5663_DIG_GATE_CTRL_DIS 0x0
+#define RT5663_DIG_GATE_CTRL_EN 0x1
/* Chopper and Clock control for DAC L (0x013a)*/
-#define RT5668_CKXEN_DAC1_MASK (0x1 << 13)
-#define RT5668_CKXEN_DAC1_SHIFT 13
-#define RT5668_CKGEN_DAC1_MASK (0x1 << 12)
-#define RT5668_CKGEN_DAC1_SHIFT 12
+#define RT5663_CKXEN_DAC1_MASK (0x1 << 13)
+#define RT5663_CKXEN_DAC1_SHIFT 13
+#define RT5663_CKGEN_DAC1_MASK (0x1 << 12)
+#define RT5663_CKGEN_DAC1_SHIFT 12
/* Chopper and Clock control for ADC (0x013b)*/
-#define RT5668_CKXEN_ADCC_MASK (0x1 << 13)
-#define RT5668_CKXEN_ADCC_SHIFT 13
-#define RT5668_CKGEN_ADCC_MASK (0x1 << 12)
-#define RT5668_CKGEN_ADCC_SHIFT 12
+#define RT5663_CKXEN_ADCC_MASK (0x1 << 13)
+#define RT5663_CKXEN_ADCC_SHIFT 13
+#define RT5663_CKGEN_ADCC_MASK (0x1 << 12)
+#define RT5663_CKGEN_ADCC_SHIFT 12
/* HP Behavior Logic Control 2 (0x01db) */
-#define RT5668_HP_SIG_SRC1_MASK (0x3)
-#define RT5668_HP_SIG_SRC1_SHIFT 0
-#define RT5668_HP_SIG_SRC1_HP_DC (0x0)
-#define RT5668_HP_SIG_SRC1_HP_CALIB (0x1)
-#define RT5668_HP_SIG_SRC1_REG (0x2)
-#define RT5668_HP_SIG_SRC1_SILENCE (0x3)
+#define RT5663_HP_SIG_SRC1_MASK (0x3)
+#define RT5663_HP_SIG_SRC1_SHIFT 0
+#define RT5663_HP_SIG_SRC1_HP_DC (0x0)
+#define RT5663_HP_SIG_SRC1_HP_CALIB (0x1)
+#define RT5663_HP_SIG_SRC1_REG (0x2)
+#define RT5663_HP_SIG_SRC1_SILENCE (0x3)
/* RT5663 specific register */
#define RT5663_HP_OUT_EN 0x0002
@@ -707,6 +704,10 @@
#define RT5663_TDM_3 0x0079
#define RT5663_TDM_4 0x007a
#define RT5663_TDM_5 0x007b
+#define RT5663_TDM_6 0x007c
+#define RT5663_TDM_7 0x007d
+#define RT5663_TDM_8 0x007e
+#define RT5663_TDM_9 0x007f
#define RT5663_GLB_CLK 0x0080
#define RT5663_PLL_1 0x0081
#define RT5663_PLL_2 0x0082
@@ -739,7 +740,7 @@
#define RT5663_INT_ST_2 0x00bf
#define RT5663_GPIO_1 0x00c0
#define RT5663_GPIO_2 0x00c1
-#define RT5663_GPIO_STA 0x00c5
+#define RT5663_GPIO_STA1 0x00c5
#define RT5663_SIN_GEN_1 0x00cb
#define RT5663_SIN_GEN_2 0x00cc
#define RT5663_SIN_GEN_3 0x00cd
@@ -800,6 +801,8 @@
#define RT5663_TEST_MODE_1 0x0144
#define RT5663_TEST_MODE_2 0x0145
#define RT5663_TEST_MODE_3 0x0146
+#define RT5663_TEST_MODE_4 0x0147
+#define RT5663_TEST_MODE_5 0x0148
#define RT5663_STO_DRE_1 0x0160
#define RT5663_STO_DRE_2 0x0161
#define RT5663_STO_DRE_3 0x0162
@@ -921,19 +924,19 @@
#define RT5663_ADC_EQ_POST_VOL_L 0x03f2
#define RT5663_ADC_EQ_POST_VOL_R 0x03f3
-/* RT5663: RECMIX Control (0x0010) */
+/* RECMIX Control (0x0010) */
#define RT5663_RECMIX1_BST1_MASK (0x1)
#define RT5663_RECMIX1_BST1_SHIFT 0
#define RT5663_RECMIX1_BST1_ON (0x0)
#define RT5663_RECMIX1_BST1_OFF (0x1)
-/* RT5663: Bypass Stereo1 DAC Mixer Control (0x002d) */
+/* Bypass Stereo1 DAC Mixer Control (0x002d) */
#define RT5663_DACL1_SRC_MASK (0x1 << 3)
#define RT5663_DACL1_SRC_SHIFT 3
#define RT5663_DACR1_SRC_MASK (0x1 << 2)
#define RT5663_DACR1_SRC_SHIFT 2
-/* RT5663: TDM control 2 (0x0078) */
+/* TDM control 2 (0x0078) */
#define RT5663_DATA_SWAP_ADCDAT1_MASK (0x3 << 14)
#define RT5663_DATA_SWAP_ADCDAT1_SHIFT 14
#define RT5663_DATA_SWAP_ADCDAT1_LR (0x0 << 14)
@@ -941,7 +944,7 @@
#define RT5663_DATA_SWAP_ADCDAT1_LL (0x2 << 14)
#define RT5663_DATA_SWAP_ADCDAT1_RR (0x3 << 14)
-/* RT5663: TDM control 5 (0x007b) */
+/* TDM control 5 (0x007b) */
#define RT5663_TDM_LENGTN_MASK (0x3)
#define RT5663_TDM_LENGTN_SHIFT 0
#define RT5663_TDM_LENGTN_16 (0x0)
@@ -949,17 +952,6 @@
#define RT5663_TDM_LENGTN_24 (0x2)
#define RT5663_TDM_LENGTN_32 (0x3)
-/* RT5663: Global Clock Control (0x0080) */
-#define RT5663_SCLK_SRC_MASK (0x3 << 14)
-#define RT5663_SCLK_SRC_SHIFT 14
-#define RT5663_SCLK_SRC_MCLK (0x0 << 14)
-#define RT5663_SCLK_SRC_PLL1 (0x1 << 14)
-#define RT5663_SCLK_SRC_RCCLK (0x2 << 14)
-#define RT5663_PLL1_SRC_MASK (0x7 << 11)
-#define RT5663_PLL1_SRC_SHIFT 11
-#define RT5663_PLL1_SRC_MCLK (0x0 << 11)
-#define RT5663_PLL1_SRC_BCLK1 (0x1 << 11)
-
/* PLL tracking mode 1 (0x0083) */
#define RT5663_I2S1_ASRC_MASK (0x1 << 11)
#define RT5663_I2S1_ASRC_SHIFT 11
@@ -978,37 +970,47 @@
#define RT5663_AD_STO1_TRACK_SYSCLK (0x0)
#define RT5663_AD_STO1_TRACK_I2S1 (0x1)
-/* RT5663: HPOUT Charge pump control 1 (0x0091) */
+/* HPOUT Charge pump control 1 (0x0091) */
#define RT5663_SI_HP_MASK (0x1 << 12)
#define RT5663_SI_HP_SHIFT 12
#define RT5663_SI_HP_EN (0x1 << 12)
#define RT5663_SI_HP_DIS (0x0 << 12)
-/* RT5663: GPIO Control 2 (0x00b6) */
+/* GPIO Control 2 (0x00b6) */
#define RT5663_GP1_PIN_CONF_MASK (0x1 << 2)
#define RT5663_GP1_PIN_CONF_SHIFT 2
#define RT5663_GP1_PIN_CONF_OUTPUT (0x1 << 2)
#define RT5663_GP1_PIN_CONF_INPUT (0x0 << 2)
-/* RT5663: GPIO Control 2 (0x00b7) */
+/* GPIO Control 2 (0x00b7) */
#define RT5663_EN_IRQ_INLINE_MASK (0x1 << 3)
#define RT5663_EN_IRQ_INLINE_SHIFT 3
#define RT5663_EN_IRQ_INLINE_NOR (0x1 << 3)
#define RT5663_EN_IRQ_INLINE_BYP (0x0 << 3)
-/* RT5663: IRQ Control 1 (0x00c1) */
+/* GPIO Control 1 (0x00c0) */
+#define RT5663_GPIO1_TYPE_MASK (0x1 << 15)
+#define RT5663_GPIO1_TYPE_SHIFT 15
+#define RT5663_GPIO1_TYPE_EN (0x1 << 15)
+#define RT5663_GPIO1_TYPE_DIS (0x0 << 15)
+
+/* IRQ Control 1 (0x00c1) */
#define RT5663_EN_IRQ_JD1_MASK (0x1 << 6)
#define RT5663_EN_IRQ_JD1_SHIFT 6
#define RT5663_EN_IRQ_JD1_EN (0x1 << 6)
#define RT5663_EN_IRQ_JD1_DIS (0x0 << 6)
+#define RT5663_SEL_GPIO1_MASK (0x1 << 2)
+#define RT5663_SEL_GPIO1_SHIFT 6
+#define RT5663_SEL_GPIO1_EN (0x1 << 2)
+#define RT5663_SEL_GPIO1_DIS (0x0 << 2)
-/* RT5663: Inline Command Function 2 (0x00dc) */
+/* Inline Command Function 2 (0x00dc) */
#define RT5663_PWR_MIC_DET_MASK (0x1)
#define RT5663_PWR_MIC_DET_SHIFT 0
#define RT5663_PWR_MIC_DET_ON (0x1)
#define RT5663_PWR_MIC_DET_OFF (0x0)
-/* RT5663: Embeeded Jack and Type Detection Control 1 (0x00e6)*/
+/* Embeeded Jack and Type Detection Control 1 (0x00e6)*/
#define RT5663_CBJ_DET_MASK (0x1 << 15)
#define RT5663_CBJ_DET_SHIFT 15
#define RT5663_CBJ_DET_DIS (0x0 << 15)
@@ -1022,17 +1024,17 @@
#define RT5663_POL_EXT_JD_EN (0x1 << 10)
#define RT5663_POL_EXT_JD_DIS (0x0 << 10)
-/* RT5663: DACREF LDO Control (0x0112)*/
+/* DACREF LDO Control (0x0112)*/
#define RT5663_PWR_LDO_DACREFL_MASK (0x1 << 9)
#define RT5663_PWR_LDO_DACREFL_SHIFT 9
#define RT5663_PWR_LDO_DACREFR_MASK (0x1 << 1)
#define RT5663_PWR_LDO_DACREFR_SHIFT 1
-/* RT5663: Stereo Dynamic Range Enhancement Control 9 (0x0168, 0x0169)*/
+/* Stereo Dynamic Range Enhancement Control 9 (0x0168, 0x0169)*/
#define RT5663_DRE_GAIN_HP_MASK (0x1f)
#define RT5663_DRE_GAIN_HP_SHIFT 0
-/* RT5663: Combo Jack Control (0x0250) */
+/* Combo Jack Control (0x0250) */
#define RT5663_INBUF_CBJ_BST1_MASK (0x1 << 11)
#define RT5663_INBUF_CBJ_BST1_SHIFT 11
#define RT5663_INBUF_CBJ_BST1_ON (0x1 << 11)
@@ -1042,11 +1044,11 @@
#define RT5663_CBJ_SENSE_BST1_L (0x1 << 10)
#define RT5663_CBJ_SENSE_BST1_R (0x0 << 10)
-/* RT5663: Combo Jack Control (0x0251) */
+/* Combo Jack Control (0x0251) */
#define RT5663_GAIN_BST1_MASK (0xf)
#define RT5663_GAIN_BST1_SHIFT 0
-/* RT5663: Dummy register 1 (0x02fa) */
+/* Dummy register 1 (0x02fa) */
#define RT5663_EMB_CLK_MASK (0x1 << 9)
#define RT5663_EMB_CLK_SHIFT 9
#define RT5663_EMB_CLK_EN (0x1 << 9)
diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c
new file mode 100644
index 000000000000..324461e985b3
--- /dev/null
+++ b/sound/soc/codecs/rt5665.c
@@ -0,0 +1,4874 @@
+/*
+ * rt5665.c -- RT5665/RT5658 ALSA SoC audio codec driver
+ *
+ * Copyright 2016 Realtek Semiconductor Corp.
+ * Author: Bard Liao <bardliao@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/acpi.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mutex.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/jack.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <sound/rt5665.h>
+
+#include "rl6231.h"
+#include "rt5665.h"
+
+#define RT5665_NUM_SUPPLIES 3
+
+static const char *rt5665_supply_names[RT5665_NUM_SUPPLIES] = {
+ "AVDD",
+ "MICVDD",
+ "VBAT",
+};
+
+struct rt5665_priv {
+ struct snd_soc_codec *codec;
+ struct rt5665_platform_data pdata;
+ struct regmap *regmap;
+ struct gpio_desc *gpiod_ldo1_en;
+ struct gpio_desc *gpiod_reset;
+ struct snd_soc_jack *hs_jack;
+ struct regulator_bulk_data supplies[RT5665_NUM_SUPPLIES];
+ struct delayed_work jack_detect_work;
+ struct delayed_work calibrate_work;
+ struct delayed_work jd_check_work;
+ struct mutex calibrate_mutex;
+
+ int sysclk;
+ int sysclk_src;
+ int lrck[RT5665_AIFS];
+ int bclk[RT5665_AIFS];
+ int master[RT5665_AIFS];
+ int id;
+
+ int pll_src;
+ int pll_in;
+ int pll_out;
+
+ int jack_type;
+ int irq_work_delay_time;
+ unsigned int sar_adc_value;
+};
+
+static const struct reg_default rt5665_reg[] = {
+ {0x0000, 0x0000},
+ {0x0001, 0xc8c8},
+ {0x0002, 0x8080},
+ {0x0003, 0x8000},
+ {0x0004, 0xc80a},
+ {0x0005, 0x0000},
+ {0x0006, 0x0000},
+ {0x0007, 0x0000},
+ {0x000a, 0x0000},
+ {0x000b, 0x0000},
+ {0x000c, 0x0000},
+ {0x000d, 0x0000},
+ {0x000f, 0x0808},
+ {0x0010, 0x4040},
+ {0x0011, 0x0000},
+ {0x0012, 0x1404},
+ {0x0013, 0x1000},
+ {0x0014, 0xa00a},
+ {0x0015, 0x0404},
+ {0x0016, 0x0404},
+ {0x0017, 0x0011},
+ {0x0018, 0xafaf},
+ {0x0019, 0xafaf},
+ {0x001a, 0xafaf},
+ {0x001b, 0x0011},
+ {0x001c, 0x2f2f},
+ {0x001d, 0x2f2f},
+ {0x001e, 0x2f2f},
+ {0x001f, 0x0000},
+ {0x0020, 0x0000},
+ {0x0021, 0x0000},
+ {0x0022, 0x5757},
+ {0x0023, 0x0039},
+ {0x0026, 0xc0c0},
+ {0x0027, 0xc0c0},
+ {0x0028, 0xc0c0},
+ {0x0029, 0x8080},
+ {0x002a, 0xaaaa},
+ {0x002b, 0xaaaa},
+ {0x002c, 0xaba8},
+ {0x002d, 0x0000},
+ {0x002e, 0x0000},
+ {0x002f, 0x0000},
+ {0x0030, 0x0000},
+ {0x0031, 0x5000},
+ {0x0032, 0x0000},
+ {0x0033, 0x0000},
+ {0x0034, 0x0000},
+ {0x0035, 0x0000},
+ {0x003a, 0x0000},
+ {0x003b, 0x0000},
+ {0x003c, 0x00ff},
+ {0x003d, 0x0000},
+ {0x003e, 0x00ff},
+ {0x003f, 0x0000},
+ {0x0040, 0x0000},
+ {0x0041, 0x00ff},
+ {0x0042, 0x0000},
+ {0x0043, 0x00ff},
+ {0x0044, 0x0c0c},
+ {0x0049, 0xc00b},
+ {0x004a, 0x0000},
+ {0x004b, 0x031f},
+ {0x004d, 0x0000},
+ {0x004e, 0x001f},
+ {0x004f, 0x0000},
+ {0x0050, 0x001f},
+ {0x0052, 0xf000},
+ {0x0061, 0x0000},
+ {0x0062, 0x0000},
+ {0x0063, 0x003e},
+ {0x0064, 0x0000},
+ {0x0065, 0x0000},
+ {0x0066, 0x003f},
+ {0x0067, 0x0000},
+ {0x006b, 0x0000},
+ {0x006d, 0xff00},
+ {0x006e, 0x2808},
+ {0x006f, 0x000a},
+ {0x0070, 0x8000},
+ {0x0071, 0x8000},
+ {0x0072, 0x8000},
+ {0x0073, 0x7000},
+ {0x0074, 0x7770},
+ {0x0075, 0x0002},
+ {0x0076, 0x0001},
+ {0x0078, 0x00f0},
+ {0x0079, 0x0000},
+ {0x007a, 0x0000},
+ {0x007b, 0x0000},
+ {0x007c, 0x0000},
+ {0x007d, 0x0123},
+ {0x007e, 0x4500},
+ {0x007f, 0x8003},
+ {0x0080, 0x0000},
+ {0x0081, 0x0000},
+ {0x0082, 0x0000},
+ {0x0083, 0x0000},
+ {0x0084, 0x0000},
+ {0x0085, 0x0000},
+ {0x0086, 0x0008},
+ {0x0087, 0x0000},
+ {0x0088, 0x0000},
+ {0x0089, 0x0000},
+ {0x008a, 0x0000},
+ {0x008b, 0x0000},
+ {0x008c, 0x0003},
+ {0x008e, 0x0060},
+ {0x008f, 0x1000},
+ {0x0091, 0x0c26},
+ {0x0092, 0x0073},
+ {0x0093, 0x0000},
+ {0x0094, 0x0080},
+ {0x0098, 0x0000},
+ {0x0099, 0x0000},
+ {0x009a, 0x0007},
+ {0x009f, 0x0000},
+ {0x00a0, 0x0000},
+ {0x00a1, 0x0002},
+ {0x00a2, 0x0001},
+ {0x00a3, 0x0002},
+ {0x00a4, 0x0001},
+ {0x00ae, 0x2040},
+ {0x00af, 0x0000},
+ {0x00b6, 0x0000},
+ {0x00b7, 0x0000},
+ {0x00b8, 0x0000},
+ {0x00b9, 0x0000},
+ {0x00ba, 0x0002},
+ {0x00bb, 0x0000},
+ {0x00be, 0x0000},
+ {0x00c0, 0x0000},
+ {0x00c1, 0x0aaa},
+ {0x00c2, 0xaa80},
+ {0x00c3, 0x0003},
+ {0x00c4, 0x0000},
+ {0x00d0, 0x0000},
+ {0x00d1, 0x2244},
+ {0x00d3, 0x3300},
+ {0x00d4, 0x2200},
+ {0x00d9, 0x0809},
+ {0x00da, 0x0000},
+ {0x00db, 0x0008},
+ {0x00dc, 0x00c0},
+ {0x00dd, 0x6724},
+ {0x00de, 0x3131},
+ {0x00df, 0x0008},
+ {0x00e0, 0x4000},
+ {0x00e1, 0x3131},
+ {0x00e2, 0x600c},
+ {0x00ea, 0xb320},
+ {0x00eb, 0x0000},
+ {0x00ec, 0xb300},
+ {0x00ed, 0x0000},
+ {0x00ee, 0xb320},
+ {0x00ef, 0x0000},
+ {0x00f0, 0x0201},
+ {0x00f1, 0x0ddd},
+ {0x00f2, 0x0ddd},
+ {0x00f6, 0x0000},
+ {0x00f7, 0x0000},
+ {0x00f8, 0x0000},
+ {0x00fa, 0x0000},
+ {0x00fb, 0x0000},
+ {0x00fc, 0x0000},
+ {0x00fd, 0x0000},
+ {0x00fe, 0x10ec},
+ {0x00ff, 0x6451},
+ {0x0100, 0xaaaa},
+ {0x0101, 0x000a},
+ {0x010a, 0xaaaa},
+ {0x010b, 0xa0a0},
+ {0x010c, 0xaeae},
+ {0x010d, 0xaaaa},
+ {0x010e, 0xaaaa},
+ {0x010f, 0xaaaa},
+ {0x0110, 0xe002},
+ {0x0111, 0xa402},
+ {0x0112, 0xaaaa},
+ {0x0113, 0x2000},
+ {0x0117, 0x0f00},
+ {0x0125, 0x0410},
+ {0x0132, 0x0000},
+ {0x0133, 0x0000},
+ {0x0137, 0x5540},
+ {0x0138, 0x3700},
+ {0x0139, 0x79a1},
+ {0x013a, 0x2020},
+ {0x013b, 0x2020},
+ {0x013c, 0x2005},
+ {0x013f, 0x0000},
+ {0x0145, 0x0002},
+ {0x0146, 0x0000},
+ {0x0147, 0x0000},
+ {0x0148, 0x0000},
+ {0x0150, 0x0000},
+ {0x0160, 0x4eff},
+ {0x0161, 0x0080},
+ {0x0162, 0x0200},
+ {0x0163, 0x0800},
+ {0x0164, 0x0000},
+ {0x0165, 0x0000},
+ {0x0166, 0x0000},
+ {0x0167, 0x000f},
+ {0x0170, 0x4e87},
+ {0x0171, 0x0080},
+ {0x0172, 0x0200},
+ {0x0173, 0x0800},
+ {0x0174, 0x00ff},
+ {0x0175, 0x0000},
+ {0x0190, 0x413d},
+ {0x0191, 0x4139},
+ {0x0192, 0x4135},
+ {0x0193, 0x413d},
+ {0x0194, 0x0000},
+ {0x0195, 0x0000},
+ {0x0196, 0x0000},
+ {0x0197, 0x0000},
+ {0x0198, 0x0000},
+ {0x0199, 0x0000},
+ {0x01a0, 0x1e64},
+ {0x01a1, 0x06a3},
+ {0x01a2, 0x0000},
+ {0x01a3, 0x0000},
+ {0x01a4, 0x0000},
+ {0x01a5, 0x0000},
+ {0x01a6, 0x0000},
+ {0x01a7, 0x8000},
+ {0x01a8, 0x0000},
+ {0x01a9, 0x0000},
+ {0x01aa, 0x0000},
+ {0x01ab, 0x0000},
+ {0x01b5, 0x0000},
+ {0x01b6, 0x01c3},
+ {0x01b7, 0x02a0},
+ {0x01b8, 0x03e9},
+ {0x01b9, 0x1389},
+ {0x01ba, 0xc351},
+ {0x01bb, 0x0009},
+ {0x01bc, 0x0018},
+ {0x01bd, 0x002a},
+ {0x01be, 0x004c},
+ {0x01bf, 0x0097},
+ {0x01c0, 0x433d},
+ {0x01c1, 0x0000},
+ {0x01c2, 0x0000},
+ {0x01c3, 0x0000},
+ {0x01c4, 0x0000},
+ {0x01c5, 0x0000},
+ {0x01c6, 0x0000},
+ {0x01c7, 0x0000},
+ {0x01c8, 0x40af},
+ {0x01c9, 0x0702},
+ {0x01ca, 0x0000},
+ {0x01cb, 0x0000},
+ {0x01cc, 0x5757},
+ {0x01cd, 0x5757},
+ {0x01ce, 0x5757},
+ {0x01cf, 0x5757},
+ {0x01d0, 0x5757},
+ {0x01d1, 0x5757},
+ {0x01d2, 0x5757},
+ {0x01d3, 0x5757},
+ {0x01d4, 0x5757},
+ {0x01d5, 0x5757},
+ {0x01d6, 0x003c},
+ {0x01da, 0x0000},
+ {0x01db, 0x0000},
+ {0x01dc, 0x0000},
+ {0x01de, 0x7c00},
+ {0x01df, 0x0320},
+ {0x01e0, 0x06a1},
+ {0x01e1, 0x0000},
+ {0x01e2, 0x0000},
+ {0x01e3, 0x0000},
+ {0x01e4, 0x0000},
+ {0x01e6, 0x0001},
+ {0x01e7, 0x0000},
+ {0x01e8, 0x0000},
+ {0x01ea, 0xbf3f},
+ {0x01eb, 0x0000},
+ {0x01ec, 0x0000},
+ {0x01ed, 0x0000},
+ {0x01ee, 0x0000},
+ {0x01ef, 0x0000},
+ {0x01f0, 0x0000},
+ {0x01f1, 0x0000},
+ {0x01f2, 0x0000},
+ {0x01f3, 0x0000},
+ {0x01f4, 0x0000},
+ {0x0200, 0x0000},
+ {0x0201, 0x0000},
+ {0x0202, 0x0000},
+ {0x0203, 0x0000},
+ {0x0204, 0x0000},
+ {0x0205, 0x0000},
+ {0x0206, 0x0000},
+ {0x0207, 0x0000},
+ {0x0208, 0x0000},
+ {0x0210, 0x60b1},
+ {0x0211, 0xa005},
+ {0x0212, 0x024c},
+ {0x0213, 0xf7ff},
+ {0x0214, 0x024c},
+ {0x0215, 0x0102},
+ {0x0216, 0x00a3},
+ {0x0217, 0x0048},
+ {0x0218, 0xa2c0},
+ {0x0219, 0x0400},
+ {0x021a, 0x00c8},
+ {0x021b, 0x00c0},
+ {0x02ff, 0x0110},
+ {0x0300, 0x001f},
+ {0x0301, 0x032c},
+ {0x0302, 0x5f21},
+ {0x0303, 0x4000},
+ {0x0304, 0x4000},
+ {0x0305, 0x06d5},
+ {0x0306, 0x8000},
+ {0x0307, 0x0700},
+ {0x0310, 0x4560},
+ {0x0311, 0xa4a8},
+ {0x0312, 0x7418},
+ {0x0313, 0x0000},
+ {0x0314, 0x0006},
+ {0x0315, 0xffff},
+ {0x0316, 0xc400},
+ {0x0317, 0x0000},
+ {0x0330, 0x00a6},
+ {0x0331, 0x04c3},
+ {0x0332, 0x27c8},
+ {0x0333, 0xbf50},
+ {0x0334, 0x0045},
+ {0x0335, 0x0007},
+ {0x0336, 0x7418},
+ {0x0337, 0x0501},
+ {0x0338, 0x0000},
+ {0x0339, 0x0010},
+ {0x033a, 0x1010},
+ {0x03c0, 0x7e00},
+ {0x03c1, 0x8000},
+ {0x03c2, 0x8000},
+ {0x03c3, 0x8000},
+ {0x03c4, 0x8000},
+ {0x03c5, 0x8000},
+ {0x03c6, 0x8000},
+ {0x03c7, 0x8000},
+ {0x03c8, 0x8000},
+ {0x03c9, 0x8000},
+ {0x03ca, 0x8000},
+ {0x03cb, 0x8000},
+ {0x03cc, 0x8000},
+ {0x03d0, 0x0000},
+ {0x03d1, 0x0000},
+ {0x03d2, 0x0000},
+ {0x03d3, 0x0000},
+ {0x03d4, 0x2000},
+ {0x03d5, 0x2000},
+ {0x03d6, 0x0000},
+ {0x03d7, 0x0000},
+ {0x03d8, 0x2000},
+ {0x03d9, 0x2000},
+ {0x03da, 0x2000},
+ {0x03db, 0x2000},
+ {0x03dc, 0x0000},
+ {0x03dd, 0x0000},
+ {0x03de, 0x0000},
+ {0x03df, 0x2000},
+ {0x03e0, 0x0000},
+ {0x03e1, 0x0000},
+ {0x03e2, 0x0000},
+ {0x03e3, 0x0000},
+ {0x03e4, 0x0000},
+ {0x03e5, 0x0000},
+ {0x03e6, 0x0000},
+ {0x03e7, 0x0000},
+ {0x03e8, 0x0000},
+ {0x03e9, 0x0000},
+ {0x03ea, 0x0000},
+ {0x03eb, 0x0000},
+ {0x03ec, 0x0000},
+ {0x03ed, 0x0000},
+ {0x03ee, 0x0000},
+ {0x03ef, 0x0000},
+ {0x03f0, 0x0800},
+ {0x03f1, 0x0800},
+ {0x03f2, 0x0800},
+ {0x03f3, 0x0800},
+};
+
+static bool rt5665_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RT5665_RESET:
+ case RT5665_EJD_CTRL_2:
+ case RT5665_GPIO_STA:
+ case RT5665_INT_ST_1:
+ case RT5665_IL_CMD_1:
+ case RT5665_4BTN_IL_CMD_1:
+ case RT5665_PSV_IL_CMD_1:
+ case RT5665_AJD1_CTRL:
+ case RT5665_JD_CTRL_3:
+ case RT5665_STO_NG2_CTRL_1:
+ case RT5665_SAR_IL_CMD_4:
+ case RT5665_DEVICE_ID:
+ case RT5665_STO1_DAC_SIL_DET ... RT5665_STO2_DAC_SIL_DET:
+ case RT5665_MONO_AMP_CALIB_STA1 ... RT5665_MONO_AMP_CALIB_STA6:
+ case RT5665_HP_IMP_SENS_CTRL_12 ... RT5665_HP_IMP_SENS_CTRL_15:
+ case RT5665_HP_CALIB_STA_1 ... RT5665_HP_CALIB_STA_11:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rt5665_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RT5665_RESET:
+ case RT5665_VENDOR_ID:
+ case RT5665_VENDOR_ID_1:
+ case RT5665_DEVICE_ID:
+ case RT5665_LOUT:
+ case RT5665_HP_CTRL_1:
+ case RT5665_HP_CTRL_2:
+ case RT5665_MONO_OUT:
+ case RT5665_HPL_GAIN:
+ case RT5665_HPR_GAIN:
+ case RT5665_MONO_GAIN:
+ case RT5665_CAL_BST_CTRL:
+ case RT5665_CBJ_BST_CTRL:
+ case RT5665_IN1_IN2:
+ case RT5665_IN3_IN4:
+ case RT5665_INL1_INR1_VOL:
+ case RT5665_EJD_CTRL_1:
+ case RT5665_EJD_CTRL_2:
+ case RT5665_EJD_CTRL_3:
+ case RT5665_EJD_CTRL_4:
+ case RT5665_EJD_CTRL_5:
+ case RT5665_EJD_CTRL_6:
+ case RT5665_EJD_CTRL_7:
+ case RT5665_DAC2_CTRL:
+ case RT5665_DAC2_DIG_VOL:
+ case RT5665_DAC1_DIG_VOL:
+ case RT5665_DAC3_DIG_VOL:
+ case RT5665_DAC3_CTRL:
+ case RT5665_STO1_ADC_DIG_VOL:
+ case RT5665_MONO_ADC_DIG_VOL:
+ case RT5665_STO2_ADC_DIG_VOL:
+ case RT5665_STO1_ADC_BOOST:
+ case RT5665_MONO_ADC_BOOST:
+ case RT5665_STO2_ADC_BOOST:
+ case RT5665_HP_IMP_GAIN_1:
+ case RT5665_HP_IMP_GAIN_2:
+ case RT5665_STO1_ADC_MIXER:
+ case RT5665_MONO_ADC_MIXER:
+ case RT5665_STO2_ADC_MIXER:
+ case RT5665_AD_DA_MIXER:
+ case RT5665_STO1_DAC_MIXER:
+ case RT5665_MONO_DAC_MIXER:
+ case RT5665_STO2_DAC_MIXER:
+ case RT5665_A_DAC1_MUX:
+ case RT5665_A_DAC2_MUX:
+ case RT5665_DIG_INF2_DATA:
+ case RT5665_DIG_INF3_DATA:
+ case RT5665_PDM_OUT_CTRL:
+ case RT5665_PDM_DATA_CTRL_1:
+ case RT5665_PDM_DATA_CTRL_2:
+ case RT5665_PDM_DATA_CTRL_3:
+ case RT5665_PDM_DATA_CTRL_4:
+ case RT5665_REC1_GAIN:
+ case RT5665_REC1_L1_MIXER:
+ case RT5665_REC1_L2_MIXER:
+ case RT5665_REC1_R1_MIXER:
+ case RT5665_REC1_R2_MIXER:
+ case RT5665_REC2_GAIN:
+ case RT5665_REC2_L1_MIXER:
+ case RT5665_REC2_L2_MIXER:
+ case RT5665_REC2_R1_MIXER:
+ case RT5665_REC2_R2_MIXER:
+ case RT5665_CAL_REC:
+ case RT5665_ALC_BACK_GAIN:
+ case RT5665_MONOMIX_GAIN:
+ case RT5665_MONOMIX_IN_GAIN:
+ case RT5665_OUT_L_GAIN:
+ case RT5665_OUT_L_MIXER:
+ case RT5665_OUT_R_GAIN:
+ case RT5665_OUT_R_MIXER:
+ case RT5665_LOUT_MIXER:
+ case RT5665_PWR_DIG_1:
+ case RT5665_PWR_DIG_2:
+ case RT5665_PWR_ANLG_1:
+ case RT5665_PWR_ANLG_2:
+ case RT5665_PWR_ANLG_3:
+ case RT5665_PWR_MIXER:
+ case RT5665_PWR_VOL:
+ case RT5665_CLK_DET:
+ case RT5665_HPF_CTRL1:
+ case RT5665_DMIC_CTRL_1:
+ case RT5665_DMIC_CTRL_2:
+ case RT5665_I2S1_SDP:
+ case RT5665_I2S2_SDP:
+ case RT5665_I2S3_SDP:
+ case RT5665_ADDA_CLK_1:
+ case RT5665_ADDA_CLK_2:
+ case RT5665_I2S1_F_DIV_CTRL_1:
+ case RT5665_I2S1_F_DIV_CTRL_2:
+ case RT5665_TDM_CTRL_1:
+ case RT5665_TDM_CTRL_2:
+ case RT5665_TDM_CTRL_3:
+ case RT5665_TDM_CTRL_4:
+ case RT5665_TDM_CTRL_5:
+ case RT5665_TDM_CTRL_6:
+ case RT5665_TDM_CTRL_7:
+ case RT5665_TDM_CTRL_8:
+ case RT5665_GLB_CLK:
+ case RT5665_PLL_CTRL_1:
+ case RT5665_PLL_CTRL_2:
+ case RT5665_ASRC_1:
+ case RT5665_ASRC_2:
+ case RT5665_ASRC_3:
+ case RT5665_ASRC_4:
+ case RT5665_ASRC_5:
+ case RT5665_ASRC_6:
+ case RT5665_ASRC_7:
+ case RT5665_ASRC_8:
+ case RT5665_ASRC_9:
+ case RT5665_ASRC_10:
+ case RT5665_DEPOP_1:
+ case RT5665_DEPOP_2:
+ case RT5665_HP_CHARGE_PUMP_1:
+ case RT5665_HP_CHARGE_PUMP_2:
+ case RT5665_MICBIAS_1:
+ case RT5665_MICBIAS_2:
+ case RT5665_ASRC_12:
+ case RT5665_ASRC_13:
+ case RT5665_ASRC_14:
+ case RT5665_RC_CLK_CTRL:
+ case RT5665_I2S_M_CLK_CTRL_1:
+ case RT5665_I2S2_F_DIV_CTRL_1:
+ case RT5665_I2S2_F_DIV_CTRL_2:
+ case RT5665_I2S3_F_DIV_CTRL_1:
+ case RT5665_I2S3_F_DIV_CTRL_2:
+ case RT5665_EQ_CTRL_1:
+ case RT5665_EQ_CTRL_2:
+ case RT5665_IRQ_CTRL_1:
+ case RT5665_IRQ_CTRL_2:
+ case RT5665_IRQ_CTRL_3:
+ case RT5665_IRQ_CTRL_4:
+ case RT5665_IRQ_CTRL_5:
+ case RT5665_IRQ_CTRL_6:
+ case RT5665_INT_ST_1:
+ case RT5665_GPIO_CTRL_1:
+ case RT5665_GPIO_CTRL_2:
+ case RT5665_GPIO_CTRL_3:
+ case RT5665_GPIO_CTRL_4:
+ case RT5665_GPIO_STA:
+ case RT5665_HP_AMP_DET_CTRL_1:
+ case RT5665_HP_AMP_DET_CTRL_2:
+ case RT5665_MID_HP_AMP_DET:
+ case RT5665_LOW_HP_AMP_DET:
+ case RT5665_SV_ZCD_1:
+ case RT5665_SV_ZCD_2:
+ case RT5665_IL_CMD_1:
+ case RT5665_IL_CMD_2:
+ case RT5665_IL_CMD_3:
+ case RT5665_IL_CMD_4:
+ case RT5665_4BTN_IL_CMD_1:
+ case RT5665_4BTN_IL_CMD_2:
+ case RT5665_4BTN_IL_CMD_3:
+ case RT5665_PSV_IL_CMD_1:
+ case RT5665_ADC_STO1_HP_CTRL_1:
+ case RT5665_ADC_STO1_HP_CTRL_2:
+ case RT5665_ADC_MONO_HP_CTRL_1:
+ case RT5665_ADC_MONO_HP_CTRL_2:
+ case RT5665_ADC_STO2_HP_CTRL_1:
+ case RT5665_ADC_STO2_HP_CTRL_2:
+ case RT5665_AJD1_CTRL:
+ case RT5665_JD1_THD:
+ case RT5665_JD2_THD:
+ case RT5665_JD_CTRL_1:
+ case RT5665_JD_CTRL_2:
+ case RT5665_JD_CTRL_3:
+ case RT5665_DIG_MISC:
+ case RT5665_DUMMY_2:
+ case RT5665_DUMMY_3:
+ case RT5665_DAC_ADC_DIG_VOL1:
+ case RT5665_DAC_ADC_DIG_VOL2:
+ case RT5665_BIAS_CUR_CTRL_1:
+ case RT5665_BIAS_CUR_CTRL_2:
+ case RT5665_BIAS_CUR_CTRL_3:
+ case RT5665_BIAS_CUR_CTRL_4:
+ case RT5665_BIAS_CUR_CTRL_5:
+ case RT5665_BIAS_CUR_CTRL_6:
+ case RT5665_BIAS_CUR_CTRL_7:
+ case RT5665_BIAS_CUR_CTRL_8:
+ case RT5665_BIAS_CUR_CTRL_9:
+ case RT5665_BIAS_CUR_CTRL_10:
+ case RT5665_VREF_REC_OP_FB_CAP_CTRL:
+ case RT5665_CHARGE_PUMP_1:
+ case RT5665_DIG_IN_CTRL_1:
+ case RT5665_DIG_IN_CTRL_2:
+ case RT5665_PAD_DRIVING_CTRL:
+ case RT5665_SOFT_RAMP_DEPOP:
+ case RT5665_PLL:
+ case RT5665_CHOP_DAC:
+ case RT5665_CHOP_ADC:
+ case RT5665_CALIB_ADC_CTRL:
+ case RT5665_VOL_TEST:
+ case RT5665_TEST_MODE_CTRL_1:
+ case RT5665_TEST_MODE_CTRL_2:
+ case RT5665_TEST_MODE_CTRL_3:
+ case RT5665_TEST_MODE_CTRL_4:
+ case RT5665_BASSBACK_CTRL:
+ case RT5665_STO_NG2_CTRL_1:
+ case RT5665_STO_NG2_CTRL_2:
+ case RT5665_STO_NG2_CTRL_3:
+ case RT5665_STO_NG2_CTRL_4:
+ case RT5665_STO_NG2_CTRL_5:
+ case RT5665_STO_NG2_CTRL_6:
+ case RT5665_STO_NG2_CTRL_7:
+ case RT5665_STO_NG2_CTRL_8:
+ case RT5665_MONO_NG2_CTRL_1:
+ case RT5665_MONO_NG2_CTRL_2:
+ case RT5665_MONO_NG2_CTRL_3:
+ case RT5665_MONO_NG2_CTRL_4:
+ case RT5665_MONO_NG2_CTRL_5:
+ case RT5665_MONO_NG2_CTRL_6:
+ case RT5665_STO1_DAC_SIL_DET:
+ case RT5665_MONOL_DAC_SIL_DET:
+ case RT5665_MONOR_DAC_SIL_DET:
+ case RT5665_STO2_DAC_SIL_DET:
+ case RT5665_SIL_PSV_CTRL1:
+ case RT5665_SIL_PSV_CTRL2:
+ case RT5665_SIL_PSV_CTRL3:
+ case RT5665_SIL_PSV_CTRL4:
+ case RT5665_SIL_PSV_CTRL5:
+ case RT5665_SIL_PSV_CTRL6:
+ case RT5665_MONO_AMP_CALIB_CTRL_1:
+ case RT5665_MONO_AMP_CALIB_CTRL_2:
+ case RT5665_MONO_AMP_CALIB_CTRL_3:
+ case RT5665_MONO_AMP_CALIB_CTRL_4:
+ case RT5665_MONO_AMP_CALIB_CTRL_5:
+ case RT5665_MONO_AMP_CALIB_CTRL_6:
+ case RT5665_MONO_AMP_CALIB_CTRL_7:
+ case RT5665_MONO_AMP_CALIB_STA1:
+ case RT5665_MONO_AMP_CALIB_STA2:
+ case RT5665_MONO_AMP_CALIB_STA3:
+ case RT5665_MONO_AMP_CALIB_STA4:
+ case RT5665_MONO_AMP_CALIB_STA6:
+ case RT5665_HP_IMP_SENS_CTRL_01:
+ case RT5665_HP_IMP_SENS_CTRL_02:
+ case RT5665_HP_IMP_SENS_CTRL_03:
+ case RT5665_HP_IMP_SENS_CTRL_04:
+ case RT5665_HP_IMP_SENS_CTRL_05:
+ case RT5665_HP_IMP_SENS_CTRL_06:
+ case RT5665_HP_IMP_SENS_CTRL_07:
+ case RT5665_HP_IMP_SENS_CTRL_08:
+ case RT5665_HP_IMP_SENS_CTRL_09:
+ case RT5665_HP_IMP_SENS_CTRL_10:
+ case RT5665_HP_IMP_SENS_CTRL_11:
+ case RT5665_HP_IMP_SENS_CTRL_12:
+ case RT5665_HP_IMP_SENS_CTRL_13:
+ case RT5665_HP_IMP_SENS_CTRL_14:
+ case RT5665_HP_IMP_SENS_CTRL_15:
+ case RT5665_HP_IMP_SENS_CTRL_16:
+ case RT5665_HP_IMP_SENS_CTRL_17:
+ case RT5665_HP_IMP_SENS_CTRL_18:
+ case RT5665_HP_IMP_SENS_CTRL_19:
+ case RT5665_HP_IMP_SENS_CTRL_20:
+ case RT5665_HP_IMP_SENS_CTRL_21:
+ case RT5665_HP_IMP_SENS_CTRL_22:
+ case RT5665_HP_IMP_SENS_CTRL_23:
+ case RT5665_HP_IMP_SENS_CTRL_24:
+ case RT5665_HP_IMP_SENS_CTRL_25:
+ case RT5665_HP_IMP_SENS_CTRL_26:
+ case RT5665_HP_IMP_SENS_CTRL_27:
+ case RT5665_HP_IMP_SENS_CTRL_28:
+ case RT5665_HP_IMP_SENS_CTRL_29:
+ case RT5665_HP_IMP_SENS_CTRL_30:
+ case RT5665_HP_IMP_SENS_CTRL_31:
+ case RT5665_HP_IMP_SENS_CTRL_32:
+ case RT5665_HP_IMP_SENS_CTRL_33:
+ case RT5665_HP_IMP_SENS_CTRL_34:
+ case RT5665_HP_LOGIC_CTRL_1:
+ case RT5665_HP_LOGIC_CTRL_2:
+ case RT5665_HP_LOGIC_CTRL_3:
+ case RT5665_HP_CALIB_CTRL_1:
+ case RT5665_HP_CALIB_CTRL_2:
+ case RT5665_HP_CALIB_CTRL_3:
+ case RT5665_HP_CALIB_CTRL_4:
+ case RT5665_HP_CALIB_CTRL_5:
+ case RT5665_HP_CALIB_CTRL_6:
+ case RT5665_HP_CALIB_CTRL_7:
+ case RT5665_HP_CALIB_CTRL_9:
+ case RT5665_HP_CALIB_CTRL_10:
+ case RT5665_HP_CALIB_CTRL_11:
+ case RT5665_HP_CALIB_STA_1:
+ case RT5665_HP_CALIB_STA_2:
+ case RT5665_HP_CALIB_STA_3:
+ case RT5665_HP_CALIB_STA_4:
+ case RT5665_HP_CALIB_STA_5:
+ case RT5665_HP_CALIB_STA_6:
+ case RT5665_HP_CALIB_STA_7:
+ case RT5665_HP_CALIB_STA_8:
+ case RT5665_HP_CALIB_STA_9:
+ case RT5665_HP_CALIB_STA_10:
+ case RT5665_HP_CALIB_STA_11:
+ case RT5665_PGM_TAB_CTRL1:
+ case RT5665_PGM_TAB_CTRL2:
+ case RT5665_PGM_TAB_CTRL3:
+ case RT5665_PGM_TAB_CTRL4:
+ case RT5665_PGM_TAB_CTRL5:
+ case RT5665_PGM_TAB_CTRL6:
+ case RT5665_PGM_TAB_CTRL7:
+ case RT5665_PGM_TAB_CTRL8:
+ case RT5665_PGM_TAB_CTRL9:
+ case RT5665_SAR_IL_CMD_1:
+ case RT5665_SAR_IL_CMD_2:
+ case RT5665_SAR_IL_CMD_3:
+ case RT5665_SAR_IL_CMD_4:
+ case RT5665_SAR_IL_CMD_5:
+ case RT5665_SAR_IL_CMD_6:
+ case RT5665_SAR_IL_CMD_7:
+ case RT5665_SAR_IL_CMD_8:
+ case RT5665_SAR_IL_CMD_9:
+ case RT5665_SAR_IL_CMD_10:
+ case RT5665_SAR_IL_CMD_11:
+ case RT5665_SAR_IL_CMD_12:
+ case RT5665_DRC1_CTRL_0:
+ case RT5665_DRC1_CTRL_1:
+ case RT5665_DRC1_CTRL_2:
+ case RT5665_DRC1_CTRL_3:
+ case RT5665_DRC1_CTRL_4:
+ case RT5665_DRC1_CTRL_5:
+ case RT5665_DRC1_CTRL_6:
+ case RT5665_DRC1_HARD_LMT_CTRL_1:
+ case RT5665_DRC1_HARD_LMT_CTRL_2:
+ case RT5665_DRC1_PRIV_1:
+ case RT5665_DRC1_PRIV_2:
+ case RT5665_DRC1_PRIV_3:
+ case RT5665_DRC1_PRIV_4:
+ case RT5665_DRC1_PRIV_5:
+ case RT5665_DRC1_PRIV_6:
+ case RT5665_DRC1_PRIV_7:
+ case RT5665_DRC1_PRIV_8:
+ case RT5665_ALC_PGA_CTRL_1:
+ case RT5665_ALC_PGA_CTRL_2:
+ case RT5665_ALC_PGA_CTRL_3:
+ case RT5665_ALC_PGA_CTRL_4:
+ case RT5665_ALC_PGA_CTRL_5:
+ case RT5665_ALC_PGA_CTRL_6:
+ case RT5665_ALC_PGA_CTRL_7:
+ case RT5665_ALC_PGA_CTRL_8:
+ case RT5665_ALC_PGA_STA_1:
+ case RT5665_ALC_PGA_STA_2:
+ case RT5665_ALC_PGA_STA_3:
+ case RT5665_EQ_AUTO_RCV_CTRL1:
+ case RT5665_EQ_AUTO_RCV_CTRL2:
+ case RT5665_EQ_AUTO_RCV_CTRL3:
+ case RT5665_EQ_AUTO_RCV_CTRL4:
+ case RT5665_EQ_AUTO_RCV_CTRL5:
+ case RT5665_EQ_AUTO_RCV_CTRL6:
+ case RT5665_EQ_AUTO_RCV_CTRL7:
+ case RT5665_EQ_AUTO_RCV_CTRL8:
+ case RT5665_EQ_AUTO_RCV_CTRL9:
+ case RT5665_EQ_AUTO_RCV_CTRL10:
+ case RT5665_EQ_AUTO_RCV_CTRL11:
+ case RT5665_EQ_AUTO_RCV_CTRL12:
+ case RT5665_EQ_AUTO_RCV_CTRL13:
+ case RT5665_ADC_L_EQ_LPF1_A1:
+ case RT5665_R_EQ_LPF1_A1:
+ case RT5665_L_EQ_LPF1_H0:
+ case RT5665_R_EQ_LPF1_H0:
+ case RT5665_L_EQ_BPF1_A1:
+ case RT5665_R_EQ_BPF1_A1:
+ case RT5665_L_EQ_BPF1_A2:
+ case RT5665_R_EQ_BPF1_A2:
+ case RT5665_L_EQ_BPF1_H0:
+ case RT5665_R_EQ_BPF1_H0:
+ case RT5665_L_EQ_BPF2_A1:
+ case RT5665_R_EQ_BPF2_A1:
+ case RT5665_L_EQ_BPF2_A2:
+ case RT5665_R_EQ_BPF2_A2:
+ case RT5665_L_EQ_BPF2_H0:
+ case RT5665_R_EQ_BPF2_H0:
+ case RT5665_L_EQ_BPF3_A1:
+ case RT5665_R_EQ_BPF3_A1:
+ case RT5665_L_EQ_BPF3_A2:
+ case RT5665_R_EQ_BPF3_A2:
+ case RT5665_L_EQ_BPF3_H0:
+ case RT5665_R_EQ_BPF3_H0:
+ case RT5665_L_EQ_BPF4_A1:
+ case RT5665_R_EQ_BPF4_A1:
+ case RT5665_L_EQ_BPF4_A2:
+ case RT5665_R_EQ_BPF4_A2:
+ case RT5665_L_EQ_BPF4_H0:
+ case RT5665_R_EQ_BPF4_H0:
+ case RT5665_L_EQ_HPF1_A1:
+ case RT5665_R_EQ_HPF1_A1:
+ case RT5665_L_EQ_HPF1_H0:
+ case RT5665_R_EQ_HPF1_H0:
+ case RT5665_L_EQ_PRE_VOL:
+ case RT5665_R_EQ_PRE_VOL:
+ case RT5665_L_EQ_POST_VOL:
+ case RT5665_R_EQ_POST_VOL:
+ case RT5665_SCAN_MODE_CTRL:
+ case RT5665_I2C_MODE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const DECLARE_TLV_DB_SCALE(hp_vol_tlv, -2250, 150, 0);
+static const DECLARE_TLV_DB_SCALE(mono_vol_tlv, -1400, 150, 0);
+static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0);
+static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
+static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
+static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
+static const DECLARE_TLV_DB_SCALE(in_bst_tlv, -1200, 75, 0);
+
+/* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
+ 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+ 1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
+ 2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
+ 3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
+ 6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
+ 7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
+ 8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
+
+/* Interface data select */
+static const char * const rt5665_data_select[] = {
+ "L/R", "R/L", "L/L", "R/R"
+};
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if1_1_01_adc_enum,
+ RT5665_TDM_CTRL_2, RT5665_I2S1_1_DS_ADC_SLOT01_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if1_1_23_adc_enum,
+ RT5665_TDM_CTRL_2, RT5665_I2S1_1_DS_ADC_SLOT23_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if1_1_45_adc_enum,
+ RT5665_TDM_CTRL_2, RT5665_I2S1_1_DS_ADC_SLOT45_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if1_1_67_adc_enum,
+ RT5665_TDM_CTRL_2, RT5665_I2S1_1_DS_ADC_SLOT67_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if1_2_01_adc_enum,
+ RT5665_TDM_CTRL_2, RT5665_I2S1_2_DS_ADC_SLOT01_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if1_2_23_adc_enum,
+ RT5665_TDM_CTRL_2, RT5665_I2S1_2_DS_ADC_SLOT23_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if1_2_45_adc_enum,
+ RT5665_TDM_CTRL_2, RT5665_I2S1_2_DS_ADC_SLOT45_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if1_2_67_adc_enum,
+ RT5665_TDM_CTRL_2, RT5665_I2S1_2_DS_ADC_SLOT67_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if2_1_dac_enum,
+ RT5665_DIG_INF2_DATA, RT5665_IF2_1_DAC_SEL_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if2_1_adc_enum,
+ RT5665_DIG_INF2_DATA, RT5665_IF2_1_ADC_SEL_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if2_2_dac_enum,
+ RT5665_DIG_INF2_DATA, RT5665_IF2_2_DAC_SEL_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if2_2_adc_enum,
+ RT5665_DIG_INF2_DATA, RT5665_IF2_2_ADC_SEL_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if3_dac_enum,
+ RT5665_DIG_INF3_DATA, RT5665_IF3_DAC_SEL_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if3_adc_enum,
+ RT5665_DIG_INF3_DATA, RT5665_IF3_ADC_SEL_SFT, rt5665_data_select);
+
+static const struct snd_kcontrol_new rt5665_if1_1_01_adc_swap_mux =
+ SOC_DAPM_ENUM("IF1_1 01 ADC Swap Mux", rt5665_if1_1_01_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if1_1_23_adc_swap_mux =
+ SOC_DAPM_ENUM("IF1_1 23 ADC Swap Mux", rt5665_if1_1_23_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if1_1_45_adc_swap_mux =
+ SOC_DAPM_ENUM("IF1_1 45 ADC Swap Mux", rt5665_if1_1_45_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if1_1_67_adc_swap_mux =
+ SOC_DAPM_ENUM("IF1_1 67 ADC Swap Mux", rt5665_if1_1_67_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if1_2_01_adc_swap_mux =
+ SOC_DAPM_ENUM("IF1_2 01 ADC Swap Mux", rt5665_if1_2_01_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if1_2_23_adc_swap_mux =
+ SOC_DAPM_ENUM("IF1_2 23 ADC1 Swap Mux", rt5665_if1_2_23_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if1_2_45_adc_swap_mux =
+ SOC_DAPM_ENUM("IF1_2 45 ADC1 Swap Mux", rt5665_if1_2_45_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if1_2_67_adc_swap_mux =
+ SOC_DAPM_ENUM("IF1_2 67 ADC1 Swap Mux", rt5665_if1_2_67_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if2_1_dac_swap_mux =
+ SOC_DAPM_ENUM("IF2_1 DAC Swap Source", rt5665_if2_1_dac_enum);
+
+static const struct snd_kcontrol_new rt5665_if2_1_adc_swap_mux =
+ SOC_DAPM_ENUM("IF2_1 ADC Swap Source", rt5665_if2_1_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if2_2_dac_swap_mux =
+ SOC_DAPM_ENUM("IF2_2 DAC Swap Source", rt5665_if2_2_dac_enum);
+
+static const struct snd_kcontrol_new rt5665_if2_2_adc_swap_mux =
+ SOC_DAPM_ENUM("IF2_2 ADC Swap Source", rt5665_if2_2_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if3_dac_swap_mux =
+ SOC_DAPM_ENUM("IF3 DAC Swap Source", rt5665_if3_dac_enum);
+
+static const struct snd_kcontrol_new rt5665_if3_adc_swap_mux =
+ SOC_DAPM_ENUM("IF3 ADC Swap Source", rt5665_if3_adc_enum);
+
+static int rt5665_hp_vol_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ int ret = snd_soc_put_volsw(kcontrol, ucontrol);
+
+ if (snd_soc_read(codec, RT5665_STO_NG2_CTRL_1) & RT5665_NG2_EN) {
+ snd_soc_update_bits(codec, RT5665_STO_NG2_CTRL_1,
+ RT5665_NG2_EN_MASK, RT5665_NG2_DIS);
+ snd_soc_update_bits(codec, RT5665_STO_NG2_CTRL_1,
+ RT5665_NG2_EN_MASK, RT5665_NG2_EN);
+ }
+
+ return ret;
+}
+
+static int rt5665_mono_vol_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ int ret = snd_soc_put_volsw(kcontrol, ucontrol);
+
+ if (snd_soc_read(codec, RT5665_MONO_NG2_CTRL_1) & RT5665_NG2_EN) {
+ snd_soc_update_bits(codec, RT5665_MONO_NG2_CTRL_1,
+ RT5665_NG2_EN_MASK, RT5665_NG2_DIS);
+ snd_soc_update_bits(codec, RT5665_MONO_NG2_CTRL_1,
+ RT5665_NG2_EN_MASK, RT5665_NG2_EN);
+ }
+
+ return ret;
+}
+
+/**
+ * rt5665_sel_asrc_clk_src - select ASRC clock source for a set of filters
+ * @codec: SoC audio codec device.
+ * @filter_mask: mask of filters.
+ * @clk_src: clock source
+ *
+ * The ASRC function is for asynchronous MCLK and LRCK. Also, since RT5665 can
+ * only support standard 32fs or 64fs i2s format, ASRC should be enabled to
+ * support special i2s clock format such as Intel's 100fs(100 * sampling rate).
+ * ASRC function will track i2s clock and generate a corresponding system clock
+ * for codec. This function provides an API to select the clock source for a
+ * set of filters specified by the mask. And the codec driver will turn on ASRC
+ * for these filters if ASRC is selected as their clock source.
+ */
+int rt5665_sel_asrc_clk_src(struct snd_soc_codec *codec,
+ unsigned int filter_mask, unsigned int clk_src)
+{
+ unsigned int asrc2_mask = 0;
+ unsigned int asrc2_value = 0;
+ unsigned int asrc3_mask = 0;
+ unsigned int asrc3_value = 0;
+
+ switch (clk_src) {
+ case RT5665_CLK_SEL_SYS:
+ case RT5665_CLK_SEL_I2S1_ASRC:
+ case RT5665_CLK_SEL_I2S2_ASRC:
+ case RT5665_CLK_SEL_I2S3_ASRC:
+ case RT5665_CLK_SEL_SYS2:
+ case RT5665_CLK_SEL_SYS3:
+ case RT5665_CLK_SEL_SYS4:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (filter_mask & RT5665_DA_STEREO1_FILTER) {
+ asrc2_mask |= RT5665_DA_STO1_CLK_SEL_MASK;
+ asrc2_value = (asrc2_value & ~RT5665_DA_STO1_CLK_SEL_MASK)
+ | (clk_src << RT5665_DA_STO1_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5665_DA_STEREO2_FILTER) {
+ asrc2_mask |= RT5665_DA_STO2_CLK_SEL_MASK;
+ asrc2_value = (asrc2_value & ~RT5665_DA_STO2_CLK_SEL_MASK)
+ | (clk_src << RT5665_DA_STO2_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5665_DA_MONO_L_FILTER) {
+ asrc2_mask |= RT5665_DA_MONOL_CLK_SEL_MASK;
+ asrc2_value = (asrc2_value & ~RT5665_DA_MONOL_CLK_SEL_MASK)
+ | (clk_src << RT5665_DA_MONOL_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5665_DA_MONO_R_FILTER) {
+ asrc2_mask |= RT5665_DA_MONOR_CLK_SEL_MASK;
+ asrc2_value = (asrc2_value & ~RT5665_DA_MONOR_CLK_SEL_MASK)
+ | (clk_src << RT5665_DA_MONOR_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5665_AD_STEREO1_FILTER) {
+ asrc3_mask |= RT5665_AD_STO1_CLK_SEL_MASK;
+ asrc3_value = (asrc2_value & ~RT5665_AD_STO1_CLK_SEL_MASK)
+ | (clk_src << RT5665_AD_STO1_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5665_AD_STEREO2_FILTER) {
+ asrc3_mask |= RT5665_AD_STO2_CLK_SEL_MASK;
+ asrc3_value = (asrc2_value & ~RT5665_AD_STO2_CLK_SEL_MASK)
+ | (clk_src << RT5665_AD_STO2_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5665_AD_MONO_L_FILTER) {
+ asrc3_mask |= RT5665_AD_MONOL_CLK_SEL_MASK;
+ asrc3_value = (asrc3_value & ~RT5665_AD_MONOL_CLK_SEL_MASK)
+ | (clk_src << RT5665_AD_MONOL_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5665_AD_MONO_R_FILTER) {
+ asrc3_mask |= RT5665_AD_MONOR_CLK_SEL_MASK;
+ asrc3_value = (asrc3_value & ~RT5665_AD_MONOR_CLK_SEL_MASK)
+ | (clk_src << RT5665_AD_MONOR_CLK_SEL_SFT);
+ }
+
+ if (asrc2_mask)
+ snd_soc_update_bits(codec, RT5665_ASRC_2,
+ asrc2_mask, asrc2_value);
+
+ if (asrc3_mask)
+ snd_soc_update_bits(codec, RT5665_ASRC_3,
+ asrc3_mask, asrc3_value);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt5665_sel_asrc_clk_src);
+
+static int rt5665_button_detect(struct snd_soc_codec *codec)
+{
+ int btn_type, val;
+
+ val = snd_soc_read(codec, RT5665_4BTN_IL_CMD_1);
+ btn_type = val & 0xfff0;
+ snd_soc_write(codec, RT5665_4BTN_IL_CMD_1, val);
+
+ return btn_type;
+}
+
+static void rt5665_enable_push_button_irq(struct snd_soc_codec *codec,
+ bool enable)
+{
+ if (enable) {
+ snd_soc_write(codec, RT5665_4BTN_IL_CMD_1, 0x000b);
+ snd_soc_write(codec, RT5665_IL_CMD_1, 0x0048);
+ snd_soc_update_bits(codec, RT5665_4BTN_IL_CMD_2,
+ RT5665_4BTN_IL_MASK | RT5665_4BTN_IL_RST_MASK,
+ RT5665_4BTN_IL_EN | RT5665_4BTN_IL_NOR);
+ snd_soc_update_bits(codec, RT5665_IRQ_CTRL_3,
+ RT5665_IL_IRQ_MASK, RT5665_IL_IRQ_EN);
+ } else {
+ snd_soc_update_bits(codec, RT5665_IRQ_CTRL_3,
+ RT5665_IL_IRQ_MASK, RT5665_IL_IRQ_DIS);
+ snd_soc_update_bits(codec, RT5665_4BTN_IL_CMD_2,
+ RT5665_4BTN_IL_MASK, RT5665_4BTN_IL_DIS);
+ snd_soc_update_bits(codec, RT5665_4BTN_IL_CMD_2,
+ RT5665_4BTN_IL_RST_MASK, RT5665_4BTN_IL_RST);
+ }
+}
+
+/**
+ * rt5665_headset_detect - Detect headset.
+ * @codec: SoC audio codec device.
+ * @jack_insert: Jack insert or not.
+ *
+ * Detect whether is headset or not when jack inserted.
+ *
+ * Returns detect status.
+ */
+static int rt5665_headset_detect(struct snd_soc_codec *codec, int jack_insert)
+{
+ struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ unsigned int sar_hs_type, val;
+
+ if (jack_insert) {
+ snd_soc_dapm_force_enable_pin(dapm, "MICBIAS1");
+ snd_soc_dapm_sync(dapm);
+
+ regmap_update_bits(rt5665->regmap, RT5665_MICBIAS_2, 0x100,
+ 0x100);
+
+ regmap_read(rt5665->regmap, RT5665_GPIO_STA, &val);
+ if (val & 0x4) {
+ regmap_update_bits(rt5665->regmap, RT5665_EJD_CTRL_1,
+ 0x100, 0);
+
+ regmap_read(rt5665->regmap, RT5665_GPIO_STA, &val);
+ while (val & 0x4) {
+ usleep_range(10000, 15000);
+ regmap_read(rt5665->regmap, RT5665_GPIO_STA,
+ &val);
+ }
+ }
+
+ regmap_update_bits(rt5665->regmap, RT5665_EJD_CTRL_1,
+ 0x180, 0x180);
+ regmap_write(rt5665->regmap, RT5665_EJD_CTRL_3, 0x3424);
+ regmap_write(rt5665->regmap, RT5665_SAR_IL_CMD_1, 0xa291);
+
+ rt5665->sar_adc_value = snd_soc_read(rt5665->codec,
+ RT5665_SAR_IL_CMD_4) & 0x7ff;
+
+ sar_hs_type = rt5665->pdata.sar_hs_type ?
+ rt5665->pdata.sar_hs_type : 729;
+
+ if (rt5665->sar_adc_value > sar_hs_type) {
+ rt5665->jack_type = SND_JACK_HEADSET;
+ rt5665_enable_push_button_irq(codec, true);
+ } else {
+ rt5665->jack_type = SND_JACK_HEADPHONE;
+ regmap_write(rt5665->regmap, RT5665_SAR_IL_CMD_1,
+ 0x2291);
+ regmap_update_bits(rt5665->regmap, RT5665_MICBIAS_2,
+ 0x100, 0);
+ snd_soc_dapm_disable_pin(dapm, "MICBIAS1");
+ snd_soc_dapm_sync(dapm);
+ }
+ } else {
+ regmap_write(rt5665->regmap, RT5665_SAR_IL_CMD_1, 0x2291);
+ regmap_update_bits(rt5665->regmap, RT5665_MICBIAS_2, 0x100, 0);
+ snd_soc_dapm_disable_pin(dapm, "MICBIAS1");
+ snd_soc_dapm_sync(dapm);
+ if (rt5665->jack_type == SND_JACK_HEADSET)
+ rt5665_enable_push_button_irq(codec, false);
+ rt5665->jack_type = 0;
+ }
+
+ dev_dbg(codec->dev, "jack_type = %d\n", rt5665->jack_type);
+ return rt5665->jack_type;
+}
+
+static irqreturn_t rt5665_irq(int irq, void *data)
+{
+ struct rt5665_priv *rt5665 = data;
+
+ mod_delayed_work(system_power_efficient_wq,
+ &rt5665->jack_detect_work, msecs_to_jiffies(250));
+
+ return IRQ_HANDLED;
+}
+
+static void rt5665_jd_check_handler(struct work_struct *work)
+{
+ struct rt5665_priv *rt5665 = container_of(work, struct rt5665_priv,
+ calibrate_work.work);
+
+ if (snd_soc_read(rt5665->codec, RT5665_AJD1_CTRL) & 0x0010) {
+ /* jack out */
+ rt5665->jack_type = rt5665_headset_detect(rt5665->codec, 0);
+
+ snd_soc_jack_report(rt5665->hs_jack, rt5665->jack_type,
+ SND_JACK_HEADSET |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3);
+ } else {
+ schedule_delayed_work(&rt5665->jd_check_work, 500);
+ }
+}
+
+int rt5665_set_jack_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *hs_jack)
+{
+ struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+
+ switch (rt5665->pdata.jd_src) {
+ case RT5665_JD1:
+ regmap_update_bits(rt5665->regmap, RT5665_GPIO_CTRL_1,
+ RT5665_GP1_PIN_MASK, RT5665_GP1_PIN_IRQ);
+ regmap_update_bits(rt5665->regmap, RT5665_RC_CLK_CTRL,
+ 0xc000, 0xc000);
+ regmap_update_bits(rt5665->regmap, RT5665_PWR_ANLG_2,
+ RT5665_PWR_JD1, RT5665_PWR_JD1);
+ regmap_update_bits(rt5665->regmap, RT5665_IRQ_CTRL_1, 0x8, 0x8);
+ break;
+
+ case RT5665_JD_NULL:
+ break;
+
+ default:
+ dev_warn(codec->dev, "Wrong JD source\n");
+ break;
+ }
+
+ rt5665->hs_jack = hs_jack;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt5665_set_jack_detect);
+
+static void rt5665_jack_detect_handler(struct work_struct *work)
+{
+ struct rt5665_priv *rt5665 =
+ container_of(work, struct rt5665_priv, jack_detect_work.work);
+ int val, btn_type;
+
+ while (!rt5665->codec) {
+ pr_debug("%s codec = null\n", __func__);
+ usleep_range(10000, 15000);
+ }
+
+ while (!rt5665->codec->component.card->instantiated) {
+ pr_debug("%s\n", __func__);
+ usleep_range(10000, 15000);
+ }
+
+ mutex_lock(&rt5665->calibrate_mutex);
+
+ val = snd_soc_read(rt5665->codec, RT5665_AJD1_CTRL) & 0x0010;
+ if (!val) {
+ /* jack in */
+ if (rt5665->jack_type == 0) {
+ /* jack was out, report jack type */
+ rt5665->jack_type =
+ rt5665_headset_detect(rt5665->codec, 1);
+ } else {
+ /* jack is already in, report button event */
+ rt5665->jack_type = SND_JACK_HEADSET;
+ btn_type = rt5665_button_detect(rt5665->codec);
+ /**
+ * rt5665 can report three kinds of button behavior,
+ * one click, double click and hold. However,
+ * currently we will report button pressed/released
+ * event. So all the three button behaviors are
+ * treated as button pressed.
+ */
+ switch (btn_type) {
+ case 0x8000:
+ case 0x4000:
+ case 0x2000:
+ rt5665->jack_type |= SND_JACK_BTN_0;
+ break;
+ case 0x1000:
+ case 0x0800:
+ case 0x0400:
+ rt5665->jack_type |= SND_JACK_BTN_1;
+ break;
+ case 0x0200:
+ case 0x0100:
+ case 0x0080:
+ rt5665->jack_type |= SND_JACK_BTN_2;
+ break;
+ case 0x0040:
+ case 0x0020:
+ case 0x0010:
+ rt5665->jack_type |= SND_JACK_BTN_3;
+ break;
+ case 0x0000: /* unpressed */
+ break;
+ default:
+ btn_type = 0;
+ dev_err(rt5665->codec->dev,
+ "Unexpected button code 0x%04x\n",
+ btn_type);
+ break;
+ }
+ }
+ } else {
+ /* jack out */
+ rt5665->jack_type = rt5665_headset_detect(rt5665->codec, 0);
+ }
+
+ snd_soc_jack_report(rt5665->hs_jack, rt5665->jack_type,
+ SND_JACK_HEADSET |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3);
+
+ if (rt5665->jack_type & (SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3))
+ schedule_delayed_work(&rt5665->jd_check_work, 0);
+ else
+ cancel_delayed_work_sync(&rt5665->jd_check_work);
+
+ mutex_unlock(&rt5665->calibrate_mutex);
+}
+
+static const struct snd_kcontrol_new rt5665_snd_controls[] = {
+ /* Headphone Output Volume */
+ SOC_DOUBLE_R_EXT_TLV("Headphone Playback Volume", RT5665_HPL_GAIN,
+ RT5665_HPR_GAIN, RT5665_G_HP_SFT, 15, 1, snd_soc_get_volsw,
+ rt5665_hp_vol_put, hp_vol_tlv),
+
+ /* Mono Output Volume */
+ SOC_SINGLE_EXT_TLV("Mono Playback Volume", RT5665_MONO_GAIN,
+ RT5665_L_VOL_SFT, 15, 1, snd_soc_get_volsw,
+ rt5665_mono_vol_put, mono_vol_tlv),
+
+ /* Output Volume */
+ SOC_DOUBLE_TLV("OUT Playback Volume", RT5665_LOUT, RT5665_L_VOL_SFT,
+ RT5665_R_VOL_SFT, 39, 1, out_vol_tlv),
+
+ /* DAC Digital Volume */
+ SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5665_DAC1_DIG_VOL,
+ RT5665_L_VOL_SFT, RT5665_R_VOL_SFT, 175, 0, dac_vol_tlv),
+ SOC_DOUBLE_TLV("DAC2 Playback Volume", RT5665_DAC2_DIG_VOL,
+ RT5665_L_VOL_SFT, RT5665_R_VOL_SFT, 175, 0, dac_vol_tlv),
+ SOC_DOUBLE("DAC2 Playback Switch", RT5665_DAC2_CTRL,
+ RT5665_M_DAC2_L_VOL_SFT, RT5665_M_DAC2_R_VOL_SFT, 1, 1),
+
+ /* IN1/IN2/IN3/IN4 Volume */
+ SOC_SINGLE_TLV("IN1 Boost Volume", RT5665_IN1_IN2,
+ RT5665_BST1_SFT, 69, 0, in_bst_tlv),
+ SOC_SINGLE_TLV("IN2 Boost Volume", RT5665_IN1_IN2,
+ RT5665_BST2_SFT, 69, 0, in_bst_tlv),
+ SOC_SINGLE_TLV("IN3 Boost Volume", RT5665_IN3_IN4,
+ RT5665_BST3_SFT, 69, 0, in_bst_tlv),
+ SOC_SINGLE_TLV("IN4 Boost Volume", RT5665_IN3_IN4,
+ RT5665_BST4_SFT, 69, 0, in_bst_tlv),
+ SOC_SINGLE_TLV("CBJ Boost Volume", RT5665_CBJ_BST_CTRL,
+ RT5665_BST_CBJ_SFT, 8, 0, bst_tlv),
+
+ /* INL/INR Volume Control */
+ SOC_DOUBLE_TLV("IN Capture Volume", RT5665_INL1_INR1_VOL,
+ RT5665_INL_VOL_SFT, RT5665_INR_VOL_SFT, 31, 1, in_vol_tlv),
+
+ /* ADC Digital Volume Control */
+ SOC_DOUBLE("STO1 ADC Capture Switch", RT5665_STO1_ADC_DIG_VOL,
+ RT5665_L_MUTE_SFT, RT5665_R_MUTE_SFT, 1, 1),
+ SOC_DOUBLE_TLV("STO1 ADC Capture Volume", RT5665_STO1_ADC_DIG_VOL,
+ RT5665_L_VOL_SFT, RT5665_R_VOL_SFT, 127, 0, adc_vol_tlv),
+ SOC_DOUBLE("Mono ADC Capture Switch", RT5665_MONO_ADC_DIG_VOL,
+ RT5665_L_MUTE_SFT, RT5665_R_MUTE_SFT, 1, 1),
+ SOC_DOUBLE_TLV("Mono ADC Capture Volume", RT5665_MONO_ADC_DIG_VOL,
+ RT5665_L_VOL_SFT, RT5665_R_VOL_SFT, 127, 0, adc_vol_tlv),
+ SOC_DOUBLE("STO2 ADC Capture Switch", RT5665_STO2_ADC_DIG_VOL,
+ RT5665_L_MUTE_SFT, RT5665_R_MUTE_SFT, 1, 1),
+ SOC_DOUBLE_TLV("STO2 ADC Capture Volume", RT5665_STO2_ADC_DIG_VOL,
+ RT5665_L_VOL_SFT, RT5665_R_VOL_SFT, 127, 0, adc_vol_tlv),
+
+ /* ADC Boost Volume Control */
+ SOC_DOUBLE_TLV("STO1 ADC Boost Gain Volume", RT5665_STO1_ADC_BOOST,
+ RT5665_STO1_ADC_L_BST_SFT, RT5665_STO1_ADC_R_BST_SFT,
+ 3, 0, adc_bst_tlv),
+
+ SOC_DOUBLE_TLV("Mono ADC Boost Gain Volume", RT5665_MONO_ADC_BOOST,
+ RT5665_MONO_ADC_L_BST_SFT, RT5665_MONO_ADC_R_BST_SFT,
+ 3, 0, adc_bst_tlv),
+
+ SOC_DOUBLE_TLV("STO2 ADC Boost Gain Volume", RT5665_STO2_ADC_BOOST,
+ RT5665_STO2_ADC_L_BST_SFT, RT5665_STO2_ADC_R_BST_SFT,
+ 3, 0, adc_bst_tlv),
+};
+
+/**
+ * set_dmic_clk - Set parameter of dmic.
+ *
+ * @w: DAPM widget.
+ * @kcontrol: The kcontrol of this widget.
+ * @event: Event id.
+ *
+ * Choose dmic clock between 1MHz and 3MHz.
+ * It is better for clock to approximate 3MHz.
+ */
+static int set_dmic_clk(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+ int pd, idx = -EINVAL;
+
+ pd = rl6231_get_pre_div(rt5665->regmap,
+ RT5665_ADDA_CLK_1, RT5665_I2S_PD1_SFT);
+ idx = rl6231_calc_dmic_clk(rt5665->sysclk / pd);
+
+ if (idx < 0)
+ dev_err(codec->dev, "Failed to set DMIC clock\n");
+ else {
+ snd_soc_update_bits(codec, RT5665_DMIC_CTRL_1,
+ RT5665_DMIC_CLK_MASK, idx << RT5665_DMIC_CLK_SFT);
+ }
+ return idx;
+}
+
+static int rt5665_charge_pump_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec, RT5665_HP_CHARGE_PUMP_1,
+ RT5665_PM_HP_MASK | RT5665_OSW_L_MASK,
+ RT5665_PM_HP_HV | RT5665_OSW_L_EN);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, RT5665_HP_CHARGE_PUMP_1,
+ RT5665_PM_HP_MASK | RT5665_OSW_L_MASK,
+ RT5665_PM_HP_LV | RT5665_OSW_L_DIS);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int is_sys_clk_from_pll(struct snd_soc_dapm_widget *w,
+ struct snd_soc_dapm_widget *sink)
+{
+ unsigned int val;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ val = snd_soc_read(codec, RT5665_GLB_CLK);
+ val &= RT5665_SCLK_SRC_MASK;
+ if (val == RT5665_SCLK_SRC_PLL1)
+ return 1;
+ else
+ return 0;
+}
+
+static int is_using_asrc(struct snd_soc_dapm_widget *w,
+ struct snd_soc_dapm_widget *sink)
+{
+ unsigned int reg, shift, val;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (w->shift) {
+ case RT5665_ADC_MONO_R_ASRC_SFT:
+ reg = RT5665_ASRC_3;
+ shift = RT5665_AD_MONOR_CLK_SEL_SFT;
+ break;
+ case RT5665_ADC_MONO_L_ASRC_SFT:
+ reg = RT5665_ASRC_3;
+ shift = RT5665_AD_MONOL_CLK_SEL_SFT;
+ break;
+ case RT5665_ADC_STO1_ASRC_SFT:
+ reg = RT5665_ASRC_3;
+ shift = RT5665_AD_STO1_CLK_SEL_SFT;
+ break;
+ case RT5665_ADC_STO2_ASRC_SFT:
+ reg = RT5665_ASRC_3;
+ shift = RT5665_AD_STO2_CLK_SEL_SFT;
+ break;
+ case RT5665_DAC_MONO_R_ASRC_SFT:
+ reg = RT5665_ASRC_2;
+ shift = RT5665_DA_MONOR_CLK_SEL_SFT;
+ break;
+ case RT5665_DAC_MONO_L_ASRC_SFT:
+ reg = RT5665_ASRC_2;
+ shift = RT5665_DA_MONOL_CLK_SEL_SFT;
+ break;
+ case RT5665_DAC_STO1_ASRC_SFT:
+ reg = RT5665_ASRC_2;
+ shift = RT5665_DA_STO1_CLK_SEL_SFT;
+ break;
+ case RT5665_DAC_STO2_ASRC_SFT:
+ reg = RT5665_ASRC_2;
+ shift = RT5665_DA_STO2_CLK_SEL_SFT;
+ break;
+ default:
+ return 0;
+ }
+
+ val = (snd_soc_read(codec, reg) >> shift) & 0xf;
+ switch (val) {
+ case RT5665_CLK_SEL_I2S1_ASRC:
+ case RT5665_CLK_SEL_I2S2_ASRC:
+ case RT5665_CLK_SEL_I2S3_ASRC:
+ /* I2S_Pre_Div1 should be 1 in asrc mode */
+ snd_soc_update_bits(codec, RT5665_ADDA_CLK_1,
+ RT5665_I2S_PD1_MASK, RT5665_I2S_PD1_2);
+ return 1;
+ default:
+ return 0;
+ }
+
+}
+
+/* Digital Mixer */
+static const struct snd_kcontrol_new rt5665_sto1_adc_l_mix[] = {
+ SOC_DAPM_SINGLE("ADC1 Switch", RT5665_STO1_ADC_MIXER,
+ RT5665_M_STO1_ADC_L1_SFT, 1, 1),
+ SOC_DAPM_SINGLE("ADC2 Switch", RT5665_STO1_ADC_MIXER,
+ RT5665_M_STO1_ADC_L2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_sto1_adc_r_mix[] = {
+ SOC_DAPM_SINGLE("ADC1 Switch", RT5665_STO1_ADC_MIXER,
+ RT5665_M_STO1_ADC_R1_SFT, 1, 1),
+ SOC_DAPM_SINGLE("ADC2 Switch", RT5665_STO1_ADC_MIXER,
+ RT5665_M_STO1_ADC_R2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_sto2_adc_l_mix[] = {
+ SOC_DAPM_SINGLE("ADC1 Switch", RT5665_STO2_ADC_MIXER,
+ RT5665_M_STO2_ADC_L1_SFT, 1, 1),
+ SOC_DAPM_SINGLE("ADC2 Switch", RT5665_STO2_ADC_MIXER,
+ RT5665_M_STO2_ADC_L2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_sto2_adc_r_mix[] = {
+ SOC_DAPM_SINGLE("ADC1 Switch", RT5665_STO2_ADC_MIXER,
+ RT5665_M_STO2_ADC_R1_SFT, 1, 1),
+ SOC_DAPM_SINGLE("ADC2 Switch", RT5665_STO2_ADC_MIXER,
+ RT5665_M_STO2_ADC_R2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_mono_adc_l_mix[] = {
+ SOC_DAPM_SINGLE("ADC1 Switch", RT5665_MONO_ADC_MIXER,
+ RT5665_M_MONO_ADC_L1_SFT, 1, 1),
+ SOC_DAPM_SINGLE("ADC2 Switch", RT5665_MONO_ADC_MIXER,
+ RT5665_M_MONO_ADC_L2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_mono_adc_r_mix[] = {
+ SOC_DAPM_SINGLE("ADC1 Switch", RT5665_MONO_ADC_MIXER,
+ RT5665_M_MONO_ADC_R1_SFT, 1, 1),
+ SOC_DAPM_SINGLE("ADC2 Switch", RT5665_MONO_ADC_MIXER,
+ RT5665_M_MONO_ADC_R2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_dac_l_mix[] = {
+ SOC_DAPM_SINGLE("Stereo ADC Switch", RT5665_AD_DA_MIXER,
+ RT5665_M_ADCMIX_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC1 Switch", RT5665_AD_DA_MIXER,
+ RT5665_M_DAC1_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_dac_r_mix[] = {
+ SOC_DAPM_SINGLE("Stereo ADC Switch", RT5665_AD_DA_MIXER,
+ RT5665_M_ADCMIX_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC1 Switch", RT5665_AD_DA_MIXER,
+ RT5665_M_DAC1_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_sto1_dac_l_mix[] = {
+ SOC_DAPM_SINGLE("DAC L1 Switch", RT5665_STO1_DAC_MIXER,
+ RT5665_M_DAC_L1_STO_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R1 Switch", RT5665_STO1_DAC_MIXER,
+ RT5665_M_DAC_R1_STO_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L2 Switch", RT5665_STO1_DAC_MIXER,
+ RT5665_M_DAC_L2_STO_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R2 Switch", RT5665_STO1_DAC_MIXER,
+ RT5665_M_DAC_R2_STO_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_sto1_dac_r_mix[] = {
+ SOC_DAPM_SINGLE("DAC L1 Switch", RT5665_STO1_DAC_MIXER,
+ RT5665_M_DAC_L1_STO_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R1 Switch", RT5665_STO1_DAC_MIXER,
+ RT5665_M_DAC_R1_STO_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L2 Switch", RT5665_STO1_DAC_MIXER,
+ RT5665_M_DAC_L2_STO_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R2 Switch", RT5665_STO1_DAC_MIXER,
+ RT5665_M_DAC_R2_STO_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_sto2_dac_l_mix[] = {
+ SOC_DAPM_SINGLE("DAC L1 Switch", RT5665_STO2_DAC_MIXER,
+ RT5665_M_DAC_L1_STO2_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L2 Switch", RT5665_STO2_DAC_MIXER,
+ RT5665_M_DAC_L2_STO2_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L3 Switch", RT5665_STO2_DAC_MIXER,
+ RT5665_M_DAC_L3_STO2_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_sto2_dac_r_mix[] = {
+ SOC_DAPM_SINGLE("DAC R1 Switch", RT5665_STO2_DAC_MIXER,
+ RT5665_M_DAC_R1_STO2_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R2 Switch", RT5665_STO2_DAC_MIXER,
+ RT5665_M_DAC_R2_STO2_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R3 Switch", RT5665_STO2_DAC_MIXER,
+ RT5665_M_DAC_R3_STO2_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_mono_dac_l_mix[] = {
+ SOC_DAPM_SINGLE("DAC L1 Switch", RT5665_MONO_DAC_MIXER,
+ RT5665_M_DAC_L1_MONO_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R1 Switch", RT5665_MONO_DAC_MIXER,
+ RT5665_M_DAC_R1_MONO_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L2 Switch", RT5665_MONO_DAC_MIXER,
+ RT5665_M_DAC_L2_MONO_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R2 Switch", RT5665_MONO_DAC_MIXER,
+ RT5665_M_DAC_R2_MONO_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_mono_dac_r_mix[] = {
+ SOC_DAPM_SINGLE("DAC L1 Switch", RT5665_MONO_DAC_MIXER,
+ RT5665_M_DAC_L1_MONO_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R1 Switch", RT5665_MONO_DAC_MIXER,
+ RT5665_M_DAC_R1_MONO_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L2 Switch", RT5665_MONO_DAC_MIXER,
+ RT5665_M_DAC_L2_MONO_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R2 Switch", RT5665_MONO_DAC_MIXER,
+ RT5665_M_DAC_R2_MONO_R_SFT, 1, 1),
+};
+
+/* Analog Input Mixer */
+static const struct snd_kcontrol_new rt5665_rec1_l_mix[] = {
+ SOC_DAPM_SINGLE("CBJ Switch", RT5665_REC1_L2_MIXER,
+ RT5665_M_CBJ_RM1_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("INL Switch", RT5665_REC1_L2_MIXER,
+ RT5665_M_INL_RM1_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("INR Switch", RT5665_REC1_L2_MIXER,
+ RT5665_M_INR_RM1_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST4 Switch", RT5665_REC1_L2_MIXER,
+ RT5665_M_BST4_RM1_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST3 Switch", RT5665_REC1_L2_MIXER,
+ RT5665_M_BST3_RM1_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST2 Switch", RT5665_REC1_L2_MIXER,
+ RT5665_M_BST2_RM1_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5665_REC1_L2_MIXER,
+ RT5665_M_BST1_RM1_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_rec1_r_mix[] = {
+ SOC_DAPM_SINGLE("MONOVOL Switch", RT5665_REC1_R2_MIXER,
+ RT5665_M_AEC_REF_RM1_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("INR Switch", RT5665_REC1_R2_MIXER,
+ RT5665_M_INR_RM1_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST4 Switch", RT5665_REC1_R2_MIXER,
+ RT5665_M_BST4_RM1_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST3 Switch", RT5665_REC1_R2_MIXER,
+ RT5665_M_BST3_RM1_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST2 Switch", RT5665_REC1_R2_MIXER,
+ RT5665_M_BST2_RM1_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5665_REC1_R2_MIXER,
+ RT5665_M_BST1_RM1_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_rec2_l_mix[] = {
+ SOC_DAPM_SINGLE("INL Switch", RT5665_REC2_L2_MIXER,
+ RT5665_M_INL_RM2_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("INR Switch", RT5665_REC2_L2_MIXER,
+ RT5665_M_INR_RM2_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("CBJ Switch", RT5665_REC2_L2_MIXER,
+ RT5665_M_CBJ_RM2_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST4 Switch", RT5665_REC2_L2_MIXER,
+ RT5665_M_BST4_RM2_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST3 Switch", RT5665_REC2_L2_MIXER,
+ RT5665_M_BST3_RM2_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST2 Switch", RT5665_REC2_L2_MIXER,
+ RT5665_M_BST2_RM2_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5665_REC2_L2_MIXER,
+ RT5665_M_BST1_RM2_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_rec2_r_mix[] = {
+ SOC_DAPM_SINGLE("MONOVOL Switch", RT5665_REC2_R2_MIXER,
+ RT5665_M_MONOVOL_RM2_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("INL Switch", RT5665_REC2_R2_MIXER,
+ RT5665_M_INL_RM2_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("INR Switch", RT5665_REC2_R2_MIXER,
+ RT5665_M_INR_RM2_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST4 Switch", RT5665_REC2_R2_MIXER,
+ RT5665_M_BST4_RM2_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST3 Switch", RT5665_REC2_R2_MIXER,
+ RT5665_M_BST3_RM2_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST2 Switch", RT5665_REC2_R2_MIXER,
+ RT5665_M_BST2_RM2_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5665_REC2_R2_MIXER,
+ RT5665_M_BST1_RM2_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_monovol_mix[] = {
+ SOC_DAPM_SINGLE("DAC L2 Switch", RT5665_MONOMIX_IN_GAIN,
+ RT5665_M_DAC_L2_MM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("RECMIX2L Switch", RT5665_MONOMIX_IN_GAIN,
+ RT5665_M_RECMIC2L_MM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5665_MONOMIX_IN_GAIN,
+ RT5665_M_BST1_MM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST2 Switch", RT5665_MONOMIX_IN_GAIN,
+ RT5665_M_BST2_MM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST3 Switch", RT5665_MONOMIX_IN_GAIN,
+ RT5665_M_BST3_MM_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_out_l_mix[] = {
+ SOC_DAPM_SINGLE("DAC L2 Switch", RT5665_OUT_L_MIXER,
+ RT5665_M_DAC_L2_OM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("INL Switch", RT5665_OUT_L_MIXER,
+ RT5665_M_IN_L_OM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5665_OUT_L_MIXER,
+ RT5665_M_BST1_OM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST2 Switch", RT5665_OUT_L_MIXER,
+ RT5665_M_BST2_OM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST3 Switch", RT5665_OUT_L_MIXER,
+ RT5665_M_BST3_OM_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_out_r_mix[] = {
+ SOC_DAPM_SINGLE("DAC R2 Switch", RT5665_OUT_R_MIXER,
+ RT5665_M_DAC_R2_OM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("INR Switch", RT5665_OUT_R_MIXER,
+ RT5665_M_IN_R_OM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST2 Switch", RT5665_OUT_R_MIXER,
+ RT5665_M_BST2_OM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST3 Switch", RT5665_OUT_R_MIXER,
+ RT5665_M_BST3_OM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST4 Switch", RT5665_OUT_R_MIXER,
+ RT5665_M_BST4_OM_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_mono_mix[] = {
+ SOC_DAPM_SINGLE("DAC L2 Switch", RT5665_MONOMIX_IN_GAIN,
+ RT5665_M_DAC_L2_MA_SFT, 1, 1),
+ SOC_DAPM_SINGLE("MONOVOL Switch", RT5665_MONOMIX_IN_GAIN,
+ RT5665_M_MONOVOL_MA_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_lout_l_mix[] = {
+ SOC_DAPM_SINGLE("DAC L2 Switch", RT5665_LOUT_MIXER,
+ RT5665_M_DAC_L2_LM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("OUTVOL L Switch", RT5665_LOUT_MIXER,
+ RT5665_M_OV_L_LM_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_lout_r_mix[] = {
+ SOC_DAPM_SINGLE("DAC R2 Switch", RT5665_LOUT_MIXER,
+ RT5665_M_DAC_R2_LM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("OUTVOL R Switch", RT5665_LOUT_MIXER,
+ RT5665_M_OV_R_LM_SFT, 1, 1),
+};
+
+/*DAC L2, DAC R2*/
+/*MX-17 [6:4], MX-17 [2:0]*/
+static const char * const rt5665_dac2_src[] = {
+ "IF1 DAC2", "IF2_1 DAC", "IF2_2 DAC", "IF3 DAC", "Mono ADC MIX"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_dac_l2_enum, RT5665_DAC2_CTRL,
+ RT5665_DAC_L2_SEL_SFT, rt5665_dac2_src);
+
+static const struct snd_kcontrol_new rt5665_dac_l2_mux =
+ SOC_DAPM_ENUM("Digital DAC L2 Source", rt5665_dac_l2_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_dac_r2_enum, RT5665_DAC2_CTRL,
+ RT5665_DAC_R2_SEL_SFT, rt5665_dac2_src);
+
+static const struct snd_kcontrol_new rt5665_dac_r2_mux =
+ SOC_DAPM_ENUM("Digital DAC R2 Source", rt5665_dac_r2_enum);
+
+/*DAC L3, DAC R3*/
+/*MX-1B [6:4], MX-1B [2:0]*/
+static const char * const rt5665_dac3_src[] = {
+ "IF1 DAC2", "IF2_1 DAC", "IF2_2 DAC", "IF3 DAC", "STO2 ADC MIX"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_dac_l3_enum, RT5665_DAC3_CTRL,
+ RT5665_DAC_L3_SEL_SFT, rt5665_dac3_src);
+
+static const struct snd_kcontrol_new rt5665_dac_l3_mux =
+ SOC_DAPM_ENUM("Digital DAC L3 Source", rt5665_dac_l3_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_dac_r3_enum, RT5665_DAC3_CTRL,
+ RT5665_DAC_R3_SEL_SFT, rt5665_dac3_src);
+
+static const struct snd_kcontrol_new rt5665_dac_r3_mux =
+ SOC_DAPM_ENUM("Digital DAC R3 Source", rt5665_dac_r3_enum);
+
+/* STO1 ADC1 Source */
+/* MX-26 [13] [5] */
+static const char * const rt5665_sto1_adc1_src[] = {
+ "DD Mux", "ADC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_sto1_adc1l_enum, RT5665_STO1_ADC_MIXER,
+ RT5665_STO1_ADC1L_SRC_SFT, rt5665_sto1_adc1_src);
+
+static const struct snd_kcontrol_new rt5665_sto1_adc1l_mux =
+ SOC_DAPM_ENUM("Stereo1 ADC1L Source", rt5665_sto1_adc1l_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_sto1_adc1r_enum, RT5665_STO1_ADC_MIXER,
+ RT5665_STO1_ADC1R_SRC_SFT, rt5665_sto1_adc1_src);
+
+static const struct snd_kcontrol_new rt5665_sto1_adc1r_mux =
+ SOC_DAPM_ENUM("Stereo1 ADC1L Source", rt5665_sto1_adc1r_enum);
+
+/* STO1 ADC Source */
+/* MX-26 [11:10] [3:2] */
+static const char * const rt5665_sto1_adc_src[] = {
+ "ADC1 L", "ADC1 R", "ADC2 L", "ADC2 R"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_sto1_adcl_enum, RT5665_STO1_ADC_MIXER,
+ RT5665_STO1_ADCL_SRC_SFT, rt5665_sto1_adc_src);
+
+static const struct snd_kcontrol_new rt5665_sto1_adcl_mux =
+ SOC_DAPM_ENUM("Stereo1 ADCL Source", rt5665_sto1_adcl_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_sto1_adcr_enum, RT5665_STO1_ADC_MIXER,
+ RT5665_STO1_ADCR_SRC_SFT, rt5665_sto1_adc_src);
+
+static const struct snd_kcontrol_new rt5665_sto1_adcr_mux =
+ SOC_DAPM_ENUM("Stereo1 ADCR Source", rt5665_sto1_adcr_enum);
+
+/* STO1 ADC2 Source */
+/* MX-26 [12] [4] */
+static const char * const rt5665_sto1_adc2_src[] = {
+ "DAC MIX", "DMIC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_sto1_adc2l_enum, RT5665_STO1_ADC_MIXER,
+ RT5665_STO1_ADC2L_SRC_SFT, rt5665_sto1_adc2_src);
+
+static const struct snd_kcontrol_new rt5665_sto1_adc2l_mux =
+ SOC_DAPM_ENUM("Stereo1 ADC2L Source", rt5665_sto1_adc2l_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_sto1_adc2r_enum, RT5665_STO1_ADC_MIXER,
+ RT5665_STO1_ADC2R_SRC_SFT, rt5665_sto1_adc2_src);
+
+static const struct snd_kcontrol_new rt5665_sto1_adc2r_mux =
+ SOC_DAPM_ENUM("Stereo1 ADC2R Source", rt5665_sto1_adc2r_enum);
+
+/* STO1 DMIC Source */
+/* MX-26 [8] */
+static const char * const rt5665_sto1_dmic_src[] = {
+ "DMIC1", "DMIC2"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_sto1_dmic_enum, RT5665_STO1_ADC_MIXER,
+ RT5665_STO1_DMIC_SRC_SFT, rt5665_sto1_dmic_src);
+
+static const struct snd_kcontrol_new rt5665_sto1_dmic_mux =
+ SOC_DAPM_ENUM("Stereo1 DMIC Mux", rt5665_sto1_dmic_enum);
+
+/* MX-26 [9] */
+static const char * const rt5665_sto1_dd_l_src[] = {
+ "STO2 DAC", "MONO DAC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_sto1_dd_l_enum, RT5665_STO1_ADC_MIXER,
+ RT5665_STO1_DD_L_SRC_SFT, rt5665_sto1_dd_l_src);
+
+static const struct snd_kcontrol_new rt5665_sto1_dd_l_mux =
+ SOC_DAPM_ENUM("Stereo1 DD L Source", rt5665_sto1_dd_l_enum);
+
+/* MX-26 [1:0] */
+static const char * const rt5665_sto1_dd_r_src[] = {
+ "STO2 DAC", "MONO DAC", "AEC REF"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_sto1_dd_r_enum, RT5665_STO1_ADC_MIXER,
+ RT5665_STO1_DD_R_SRC_SFT, rt5665_sto1_dd_r_src);
+
+static const struct snd_kcontrol_new rt5665_sto1_dd_r_mux =
+ SOC_DAPM_ENUM("Stereo1 DD R Source", rt5665_sto1_dd_r_enum);
+
+/* MONO ADC L2 Source */
+/* MX-27 [12] */
+static const char * const rt5665_mono_adc_l2_src[] = {
+ "DAC MIXL", "DMIC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_mono_adc_l2_enum, RT5665_MONO_ADC_MIXER,
+ RT5665_MONO_ADC_L2_SRC_SFT, rt5665_mono_adc_l2_src);
+
+static const struct snd_kcontrol_new rt5665_mono_adc_l2_mux =
+ SOC_DAPM_ENUM("Mono ADC L2 Source", rt5665_mono_adc_l2_enum);
+
+
+/* MONO ADC L1 Source */
+/* MX-27 [13] */
+static const char * const rt5665_mono_adc_l1_src[] = {
+ "DD Mux", "ADC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_mono_adc_l1_enum, RT5665_MONO_ADC_MIXER,
+ RT5665_MONO_ADC_L1_SRC_SFT, rt5665_mono_adc_l1_src);
+
+static const struct snd_kcontrol_new rt5665_mono_adc_l1_mux =
+ SOC_DAPM_ENUM("Mono ADC L1 Source", rt5665_mono_adc_l1_enum);
+
+/* MX-27 [9][1]*/
+static const char * const rt5665_mono_dd_src[] = {
+ "STO2 DAC", "MONO DAC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_mono_dd_l_enum, RT5665_MONO_ADC_MIXER,
+ RT5665_MONO_DD_L_SRC_SFT, rt5665_mono_dd_src);
+
+static const struct snd_kcontrol_new rt5665_mono_dd_l_mux =
+ SOC_DAPM_ENUM("Mono DD L Source", rt5665_mono_dd_l_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_mono_dd_r_enum, RT5665_MONO_ADC_MIXER,
+ RT5665_MONO_DD_R_SRC_SFT, rt5665_mono_dd_src);
+
+static const struct snd_kcontrol_new rt5665_mono_dd_r_mux =
+ SOC_DAPM_ENUM("Mono DD R Source", rt5665_mono_dd_r_enum);
+
+/* MONO ADC L Source, MONO ADC R Source*/
+/* MX-27 [11:10], MX-27 [3:2] */
+static const char * const rt5665_mono_adc_src[] = {
+ "ADC1 L", "ADC1 R", "ADC2 L", "ADC2 R"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_mono_adc_l_enum, RT5665_MONO_ADC_MIXER,
+ RT5665_MONO_ADC_L_SRC_SFT, rt5665_mono_adc_src);
+
+static const struct snd_kcontrol_new rt5665_mono_adc_l_mux =
+ SOC_DAPM_ENUM("Mono ADC L Source", rt5665_mono_adc_l_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_mono_adcr_enum, RT5665_MONO_ADC_MIXER,
+ RT5665_MONO_ADC_R_SRC_SFT, rt5665_mono_adc_src);
+
+static const struct snd_kcontrol_new rt5665_mono_adc_r_mux =
+ SOC_DAPM_ENUM("Mono ADC R Source", rt5665_mono_adcr_enum);
+
+/* MONO DMIC L Source */
+/* MX-27 [8] */
+static const char * const rt5665_mono_dmic_l_src[] = {
+ "DMIC1 L", "DMIC2 L"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_mono_dmic_l_enum, RT5665_MONO_ADC_MIXER,
+ RT5665_MONO_DMIC_L_SRC_SFT, rt5665_mono_dmic_l_src);
+
+static const struct snd_kcontrol_new rt5665_mono_dmic_l_mux =
+ SOC_DAPM_ENUM("Mono DMIC L Source", rt5665_mono_dmic_l_enum);
+
+/* MONO ADC R2 Source */
+/* MX-27 [4] */
+static const char * const rt5665_mono_adc_r2_src[] = {
+ "DAC MIXR", "DMIC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_mono_adc_r2_enum, RT5665_MONO_ADC_MIXER,
+ RT5665_MONO_ADC_R2_SRC_SFT, rt5665_mono_adc_r2_src);
+
+static const struct snd_kcontrol_new rt5665_mono_adc_r2_mux =
+ SOC_DAPM_ENUM("Mono ADC R2 Source", rt5665_mono_adc_r2_enum);
+
+/* MONO ADC R1 Source */
+/* MX-27 [5] */
+static const char * const rt5665_mono_adc_r1_src[] = {
+ "DD Mux", "ADC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_mono_adc_r1_enum, RT5665_MONO_ADC_MIXER,
+ RT5665_MONO_ADC_R1_SRC_SFT, rt5665_mono_adc_r1_src);
+
+static const struct snd_kcontrol_new rt5665_mono_adc_r1_mux =
+ SOC_DAPM_ENUM("Mono ADC R1 Source", rt5665_mono_adc_r1_enum);
+
+/* MONO DMIC R Source */
+/* MX-27 [0] */
+static const char * const rt5665_mono_dmic_r_src[] = {
+ "DMIC1 R", "DMIC2 R"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_mono_dmic_r_enum, RT5665_MONO_ADC_MIXER,
+ RT5665_MONO_DMIC_R_SRC_SFT, rt5665_mono_dmic_r_src);
+
+static const struct snd_kcontrol_new rt5665_mono_dmic_r_mux =
+ SOC_DAPM_ENUM("Mono DMIC R Source", rt5665_mono_dmic_r_enum);
+
+
+/* STO2 ADC1 Source */
+/* MX-28 [13] [5] */
+static const char * const rt5665_sto2_adc1_src[] = {
+ "DD Mux", "ADC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_sto2_adc1l_enum, RT5665_STO2_ADC_MIXER,
+ RT5665_STO2_ADC1L_SRC_SFT, rt5665_sto2_adc1_src);
+
+static const struct snd_kcontrol_new rt5665_sto2_adc1l_mux =
+ SOC_DAPM_ENUM("Stereo2 ADC1L Source", rt5665_sto2_adc1l_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_sto2_adc1r_enum, RT5665_STO2_ADC_MIXER,
+ RT5665_STO2_ADC1R_SRC_SFT, rt5665_sto2_adc1_src);
+
+static const struct snd_kcontrol_new rt5665_sto2_adc1r_mux =
+ SOC_DAPM_ENUM("Stereo2 ADC1L Source", rt5665_sto2_adc1r_enum);
+
+/* STO2 ADC Source */
+/* MX-28 [11:10] [3:2] */
+static const char * const rt5665_sto2_adc_src[] = {
+ "ADC1 L", "ADC1 R", "ADC2 L"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_sto2_adcl_enum, RT5665_STO2_ADC_MIXER,
+ RT5665_STO2_ADCL_SRC_SFT, rt5665_sto2_adc_src);
+
+static const struct snd_kcontrol_new rt5665_sto2_adcl_mux =
+ SOC_DAPM_ENUM("Stereo2 ADCL Source", rt5665_sto2_adcl_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_sto2_adcr_enum, RT5665_STO2_ADC_MIXER,
+ RT5665_STO2_ADCR_SRC_SFT, rt5665_sto2_adc_src);
+
+static const struct snd_kcontrol_new rt5665_sto2_adcr_mux =
+ SOC_DAPM_ENUM("Stereo2 ADCR Source", rt5665_sto2_adcr_enum);
+
+/* STO2 ADC2 Source */
+/* MX-28 [12] [4] */
+static const char * const rt5665_sto2_adc2_src[] = {
+ "DAC MIX", "DMIC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_sto2_adc2l_enum, RT5665_STO2_ADC_MIXER,
+ RT5665_STO2_ADC2L_SRC_SFT, rt5665_sto2_adc2_src);
+
+static const struct snd_kcontrol_new rt5665_sto2_adc2l_mux =
+ SOC_DAPM_ENUM("Stereo2 ADC2L Source", rt5665_sto2_adc2l_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_sto2_adc2r_enum, RT5665_STO2_ADC_MIXER,
+ RT5665_STO2_ADC2R_SRC_SFT, rt5665_sto2_adc2_src);
+
+static const struct snd_kcontrol_new rt5665_sto2_adc2r_mux =
+ SOC_DAPM_ENUM("Stereo2 ADC2R Source", rt5665_sto2_adc2r_enum);
+
+/* STO2 DMIC Source */
+/* MX-28 [8] */
+static const char * const rt5665_sto2_dmic_src[] = {
+ "DMIC1", "DMIC2"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_sto2_dmic_enum, RT5665_STO2_ADC_MIXER,
+ RT5665_STO2_DMIC_SRC_SFT, rt5665_sto2_dmic_src);
+
+static const struct snd_kcontrol_new rt5665_sto2_dmic_mux =
+ SOC_DAPM_ENUM("Stereo2 DMIC Source", rt5665_sto2_dmic_enum);
+
+/* MX-28 [9] */
+static const char * const rt5665_sto2_dd_l_src[] = {
+ "STO2 DAC", "MONO DAC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_sto2_dd_l_enum, RT5665_STO2_ADC_MIXER,
+ RT5665_STO2_DD_L_SRC_SFT, rt5665_sto2_dd_l_src);
+
+static const struct snd_kcontrol_new rt5665_sto2_dd_l_mux =
+ SOC_DAPM_ENUM("Stereo2 DD L Source", rt5665_sto2_dd_l_enum);
+
+/* MX-28 [1] */
+static const char * const rt5665_sto2_dd_r_src[] = {
+ "STO2 DAC", "MONO DAC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_sto2_dd_r_enum, RT5665_STO2_ADC_MIXER,
+ RT5665_STO2_DD_R_SRC_SFT, rt5665_sto2_dd_r_src);
+
+static const struct snd_kcontrol_new rt5665_sto2_dd_r_mux =
+ SOC_DAPM_ENUM("Stereo2 DD R Source", rt5665_sto2_dd_r_enum);
+
+/* DAC R1 Source, DAC L1 Source*/
+/* MX-29 [11:10], MX-29 [9:8]*/
+static const char * const rt5665_dac1_src[] = {
+ "IF1 DAC1", "IF2_1 DAC", "IF2_2 DAC", "IF3 DAC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_dac_r1_enum, RT5665_AD_DA_MIXER,
+ RT5665_DAC1_R_SEL_SFT, rt5665_dac1_src);
+
+static const struct snd_kcontrol_new rt5665_dac_r1_mux =
+ SOC_DAPM_ENUM("DAC R1 Source", rt5665_dac_r1_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_dac_l1_enum, RT5665_AD_DA_MIXER,
+ RT5665_DAC1_L_SEL_SFT, rt5665_dac1_src);
+
+static const struct snd_kcontrol_new rt5665_dac_l1_mux =
+ SOC_DAPM_ENUM("DAC L1 Source", rt5665_dac_l1_enum);
+
+/* DAC Digital Mixer L Source, DAC Digital Mixer R Source*/
+/* MX-2D [13:12], MX-2D [9:8]*/
+static const char * const rt5665_dig_dac_mix_src[] = {
+ "Stereo1 DAC Mixer", "Stereo2 DAC Mixer", "Mono DAC Mixer"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_dig_dac_mixl_enum, RT5665_A_DAC1_MUX,
+ RT5665_DAC_MIX_L_SFT, rt5665_dig_dac_mix_src);
+
+static const struct snd_kcontrol_new rt5665_dig_dac_mixl_mux =
+ SOC_DAPM_ENUM("DAC Digital Mixer L Source", rt5665_dig_dac_mixl_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_dig_dac_mixr_enum, RT5665_A_DAC1_MUX,
+ RT5665_DAC_MIX_R_SFT, rt5665_dig_dac_mix_src);
+
+static const struct snd_kcontrol_new rt5665_dig_dac_mixr_mux =
+ SOC_DAPM_ENUM("DAC Digital Mixer R Source", rt5665_dig_dac_mixr_enum);
+
+/* Analog DAC L1 Source, Analog DAC R1 Source*/
+/* MX-2D [5:4], MX-2D [1:0]*/
+static const char * const rt5665_alg_dac1_src[] = {
+ "Stereo1 DAC Mixer", "DAC1", "DMIC1"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_alg_dac_l1_enum, RT5665_A_DAC1_MUX,
+ RT5665_A_DACL1_SFT, rt5665_alg_dac1_src);
+
+static const struct snd_kcontrol_new rt5665_alg_dac_l1_mux =
+ SOC_DAPM_ENUM("Analog DAC L1 Source", rt5665_alg_dac_l1_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_alg_dac_r1_enum, RT5665_A_DAC1_MUX,
+ RT5665_A_DACR1_SFT, rt5665_alg_dac1_src);
+
+static const struct snd_kcontrol_new rt5665_alg_dac_r1_mux =
+ SOC_DAPM_ENUM("Analog DAC R1 Source", rt5665_alg_dac_r1_enum);
+
+/* Analog DAC LR Source, Analog DAC R2 Source*/
+/* MX-2E [5:4], MX-2E [0]*/
+static const char * const rt5665_alg_dac2_src[] = {
+ "Mono DAC Mixer", "DAC2"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_alg_dac_l2_enum, RT5665_A_DAC2_MUX,
+ RT5665_A_DACL2_SFT, rt5665_alg_dac2_src);
+
+static const struct snd_kcontrol_new rt5665_alg_dac_l2_mux =
+ SOC_DAPM_ENUM("Analog DAC L2 Source", rt5665_alg_dac_l2_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_alg_dac_r2_enum, RT5665_A_DAC2_MUX,
+ RT5665_A_DACR2_SFT, rt5665_alg_dac2_src);
+
+static const struct snd_kcontrol_new rt5665_alg_dac_r2_mux =
+ SOC_DAPM_ENUM("Analog DAC R2 Source", rt5665_alg_dac_r2_enum);
+
+/* Interface2 ADC Data Input*/
+/* MX-2F [14:12] */
+static const char * const rt5665_if2_1_adc_in_src[] = {
+ "STO1 ADC", "STO2 ADC", "MONO ADC", "IF1 DAC1",
+ "IF1 DAC2", "IF2_2 DAC", "IF3 DAC", "DAC1 MIX"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_if2_1_adc_in_enum, RT5665_DIG_INF2_DATA,
+ RT5665_IF3_ADC_IN_SFT, rt5665_if2_1_adc_in_src);
+
+static const struct snd_kcontrol_new rt5665_if2_1_adc_in_mux =
+ SOC_DAPM_ENUM("IF2_1 ADC IN Source", rt5665_if2_1_adc_in_enum);
+
+/* MX-2F [6:4] */
+static const char * const rt5665_if2_2_adc_in_src[] = {
+ "STO1 ADC", "STO2 ADC", "MONO ADC", "IF1 DAC1",
+ "IF1 DAC2", "IF2_1 DAC", "IF3 DAC", "DAC1 MIX"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_if2_2_adc_in_enum, RT5665_DIG_INF2_DATA,
+ RT5665_IF2_2_ADC_IN_SFT, rt5665_if2_2_adc_in_src);
+
+static const struct snd_kcontrol_new rt5665_if2_2_adc_in_mux =
+ SOC_DAPM_ENUM("IF2_1 ADC IN Source", rt5665_if2_2_adc_in_enum);
+
+/* Interface3 ADC Data Input*/
+/* MX-30 [6:4] */
+static const char * const rt5665_if3_adc_in_src[] = {
+ "STO1 ADC", "STO2 ADC", "MONO ADC", "IF1 DAC1",
+ "IF1 DAC2", "IF2_1 DAC", "IF2_2 DAC", "DAC1 MIX"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_if3_adc_in_enum, RT5665_DIG_INF3_DATA,
+ RT5665_IF3_ADC_IN_SFT, rt5665_if3_adc_in_src);
+
+static const struct snd_kcontrol_new rt5665_if3_adc_in_mux =
+ SOC_DAPM_ENUM("IF3 ADC IN Source", rt5665_if3_adc_in_enum);
+
+/* PDM 1 L/R*/
+/* MX-31 [11:10] [9:8] */
+static const char * const rt5665_pdm_src[] = {
+ "Stereo1 DAC", "Stereo2 DAC", "Mono DAC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_pdm_l_enum, RT5665_PDM_OUT_CTRL,
+ RT5665_PDM1_L_SFT, rt5665_pdm_src);
+
+static const struct snd_kcontrol_new rt5665_pdm_l_mux =
+ SOC_DAPM_ENUM("PDM L Source", rt5665_pdm_l_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_pdm_r_enum, RT5665_PDM_OUT_CTRL,
+ RT5665_PDM1_R_SFT, rt5665_pdm_src);
+
+static const struct snd_kcontrol_new rt5665_pdm_r_mux =
+ SOC_DAPM_ENUM("PDM R Source", rt5665_pdm_r_enum);
+
+
+/* I2S1 TDM ADCDAT Source */
+/* MX-7a[10] */
+static const char * const rt5665_if1_1_adc1_data_src[] = {
+ "STO1 ADC", "IF2_1 DAC",
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_if1_1_adc1_data_enum, RT5665_TDM_CTRL_3,
+ RT5665_IF1_ADC1_SEL_SFT, rt5665_if1_1_adc1_data_src);
+
+static const struct snd_kcontrol_new rt5665_if1_1_adc1_mux =
+ SOC_DAPM_ENUM("IF1_1 ADC1 Source", rt5665_if1_1_adc1_data_enum);
+
+/* MX-7a[9] */
+static const char * const rt5665_if1_1_adc2_data_src[] = {
+ "STO2 ADC", "IF2_2 DAC",
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_if1_1_adc2_data_enum, RT5665_TDM_CTRL_3,
+ RT5665_IF1_ADC2_SEL_SFT, rt5665_if1_1_adc2_data_src);
+
+static const struct snd_kcontrol_new rt5665_if1_1_adc2_mux =
+ SOC_DAPM_ENUM("IF1_1 ADC2 Source", rt5665_if1_1_adc2_data_enum);
+
+/* MX-7a[8] */
+static const char * const rt5665_if1_1_adc3_data_src[] = {
+ "MONO ADC", "IF3 DAC",
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_if1_1_adc3_data_enum, RT5665_TDM_CTRL_3,
+ RT5665_IF1_ADC3_SEL_SFT, rt5665_if1_1_adc3_data_src);
+
+static const struct snd_kcontrol_new rt5665_if1_1_adc3_mux =
+ SOC_DAPM_ENUM("IF1_1 ADC3 Source", rt5665_if1_1_adc3_data_enum);
+
+/* MX-7b[10] */
+static const char * const rt5665_if1_2_adc1_data_src[] = {
+ "STO1 ADC", "IF1 DAC",
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_if1_2_adc1_data_enum, RT5665_TDM_CTRL_4,
+ RT5665_IF1_ADC1_SEL_SFT, rt5665_if1_2_adc1_data_src);
+
+static const struct snd_kcontrol_new rt5665_if1_2_adc1_mux =
+ SOC_DAPM_ENUM("IF1_2 ADC1 Source", rt5665_if1_2_adc1_data_enum);
+
+/* MX-7b[9] */
+static const char * const rt5665_if1_2_adc2_data_src[] = {
+ "STO2 ADC", "IF2_1 DAC",
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_if1_2_adc2_data_enum, RT5665_TDM_CTRL_4,
+ RT5665_IF1_ADC2_SEL_SFT, rt5665_if1_2_adc2_data_src);
+
+static const struct snd_kcontrol_new rt5665_if1_2_adc2_mux =
+ SOC_DAPM_ENUM("IF1_2 ADC2 Source", rt5665_if1_2_adc2_data_enum);
+
+/* MX-7b[8] */
+static const char * const rt5665_if1_2_adc3_data_src[] = {
+ "MONO ADC", "IF2_2 DAC",
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_if1_2_adc3_data_enum, RT5665_TDM_CTRL_4,
+ RT5665_IF1_ADC3_SEL_SFT, rt5665_if1_2_adc3_data_src);
+
+static const struct snd_kcontrol_new rt5665_if1_2_adc3_mux =
+ SOC_DAPM_ENUM("IF1_2 ADC3 Source", rt5665_if1_2_adc3_data_enum);
+
+/* MX-7b[7] */
+static const char * const rt5665_if1_2_adc4_data_src[] = {
+ "DAC1", "IF3 DAC",
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_if1_2_adc4_data_enum, RT5665_TDM_CTRL_4,
+ RT5665_IF1_ADC4_SEL_SFT, rt5665_if1_2_adc4_data_src);
+
+static const struct snd_kcontrol_new rt5665_if1_2_adc4_mux =
+ SOC_DAPM_ENUM("IF1_2 ADC4 Source", rt5665_if1_2_adc4_data_enum);
+
+/* MX-7a[4:0] MX-7b[4:0] */
+static const char * const rt5665_tdm_adc_data_src[] = {
+ "1234", "1243", "1324", "1342", "1432", "1423",
+ "2134", "2143", "2314", "2341", "2431", "2413",
+ "3124", "3142", "3214", "3241", "3412", "3421",
+ "4123", "4132", "4213", "4231", "4312", "4321"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_tdm1_adc_data_enum, RT5665_TDM_CTRL_3,
+ RT5665_TDM_ADC_SEL_SFT, rt5665_tdm_adc_data_src);
+
+static const struct snd_kcontrol_new rt5665_tdm1_adc_mux =
+ SOC_DAPM_ENUM("TDM1 ADC Mux", rt5665_tdm1_adc_data_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5665_tdm2_adc_data_enum, RT5665_TDM_CTRL_4,
+ RT5665_TDM_ADC_SEL_SFT, rt5665_tdm_adc_data_src);
+
+static const struct snd_kcontrol_new rt5665_tdm2_adc_mux =
+ SOC_DAPM_ENUM("TDM2 ADCDAT Source", rt5665_tdm2_adc_data_enum);
+
+/* Out Volume Switch */
+static const struct snd_kcontrol_new monovol_switch =
+ SOC_DAPM_SINGLE("Switch", RT5665_MONO_OUT, RT5665_VOL_L_SFT, 1, 1);
+
+static const struct snd_kcontrol_new outvol_l_switch =
+ SOC_DAPM_SINGLE("Switch", RT5665_LOUT, RT5665_VOL_L_SFT, 1, 1);
+
+static const struct snd_kcontrol_new outvol_r_switch =
+ SOC_DAPM_SINGLE("Switch", RT5665_LOUT, RT5665_VOL_R_SFT, 1, 1);
+
+/* Out Switch */
+static const struct snd_kcontrol_new mono_switch =
+ SOC_DAPM_SINGLE("Switch", RT5665_MONO_OUT, RT5665_L_MUTE_SFT, 1, 1);
+
+static const struct snd_kcontrol_new hpo_switch =
+ SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5665_HP_CTRL_2,
+ RT5665_VOL_L_SFT, 1, 0);
+
+static const struct snd_kcontrol_new lout_l_switch =
+ SOC_DAPM_SINGLE("Switch", RT5665_LOUT, RT5665_L_MUTE_SFT, 1, 1);
+
+static const struct snd_kcontrol_new lout_r_switch =
+ SOC_DAPM_SINGLE("Switch", RT5665_LOUT, RT5665_R_MUTE_SFT, 1, 1);
+
+static const struct snd_kcontrol_new pdm_l_switch =
+ SOC_DAPM_SINGLE("Switch", RT5665_PDM_OUT_CTRL,
+ RT5665_M_PDM1_L_SFT, 1, 1);
+
+static const struct snd_kcontrol_new pdm_r_switch =
+ SOC_DAPM_SINGLE("Switch", RT5665_PDM_OUT_CTRL,
+ RT5665_M_PDM1_R_SFT, 1, 1);
+
+static int rt5665_mono_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec, RT5665_MONO_NG2_CTRL_1,
+ RT5665_NG2_EN_MASK, RT5665_NG2_EN);
+ snd_soc_update_bits(codec, RT5665_MONO_AMP_CALIB_CTRL_1, 0x40,
+ 0x0);
+ snd_soc_update_bits(codec, RT5665_MONO_OUT, 0x10, 0x10);
+ snd_soc_update_bits(codec, RT5665_MONO_OUT, 0x20, 0x20);
+ break;
+
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, RT5665_MONO_OUT, 0x20, 0);
+ snd_soc_update_bits(codec, RT5665_MONO_OUT, 0x10, 0);
+ snd_soc_update_bits(codec, RT5665_MONO_AMP_CALIB_CTRL_1, 0x40,
+ 0x40);
+ snd_soc_update_bits(codec, RT5665_MONO_NG2_CTRL_1,
+ RT5665_NG2_EN_MASK, RT5665_NG2_DIS);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+
+}
+
+static int rt5665_hp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec, RT5665_STO_NG2_CTRL_1,
+ RT5665_NG2_EN_MASK, RT5665_NG2_EN);
+ snd_soc_write(codec, RT5665_HP_LOGIC_CTRL_2, 0x0003);
+ break;
+
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_write(codec, RT5665_HP_LOGIC_CTRL_2, 0x0002);
+ snd_soc_update_bits(codec, RT5665_STO_NG2_CTRL_1,
+ RT5665_NG2_EN_MASK, RT5665_NG2_DIS);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+
+}
+
+static int rt5665_lout_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec, RT5665_DEPOP_1,
+ RT5665_PUMP_EN, RT5665_PUMP_EN);
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec, RT5665_DEPOP_1,
+ RT5665_PUMP_EN, 0);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+
+}
+
+static int set_dmic_power(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ /*Add delay to avoid pop noise*/
+ msleep(150);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int rt5655_set_verf(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ switch (w->shift) {
+ case RT5665_PWR_VREF1_BIT:
+ snd_soc_update_bits(codec, RT5665_PWR_ANLG_1,
+ RT5665_PWR_FV1, 0);
+ break;
+
+ case RT5665_PWR_VREF2_BIT:
+ snd_soc_update_bits(codec, RT5665_PWR_ANLG_1,
+ RT5665_PWR_FV2, 0);
+ break;
+
+ case RT5665_PWR_VREF3_BIT:
+ snd_soc_update_bits(codec, RT5665_PWR_ANLG_1,
+ RT5665_PWR_FV3, 0);
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case SND_SOC_DAPM_POST_PMU:
+ usleep_range(15000, 20000);
+ switch (w->shift) {
+ case RT5665_PWR_VREF1_BIT:
+ snd_soc_update_bits(codec, RT5665_PWR_ANLG_1,
+ RT5665_PWR_FV1, RT5665_PWR_FV1);
+ break;
+
+ case RT5665_PWR_VREF2_BIT:
+ snd_soc_update_bits(codec, RT5665_PWR_ANLG_1,
+ RT5665_PWR_FV2, RT5665_PWR_FV2);
+ break;
+
+ case RT5665_PWR_VREF3_BIT:
+ snd_soc_update_bits(codec, RT5665_PWR_ANLG_1,
+ RT5665_PWR_FV3, RT5665_PWR_FV3);
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+
+static const struct snd_soc_dapm_widget rt5665_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("LDO2", RT5665_PWR_ANLG_3, RT5665_PWR_LDO2_BIT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY("PLL", RT5665_PWR_ANLG_3, RT5665_PWR_PLL_BIT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Mic Det Power", RT5665_PWR_VOL,
+ RT5665_PWR_MIC_DET_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Vref1", RT5665_PWR_ANLG_1, RT5665_PWR_VREF1_BIT, 0,
+ rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_SUPPLY("Vref2", RT5665_PWR_ANLG_1, RT5665_PWR_VREF2_BIT, 0,
+ rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_SUPPLY("Vref3", RT5665_PWR_ANLG_1, RT5665_PWR_VREF3_BIT, 0,
+ rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+
+ /* ASRC */
+ SND_SOC_DAPM_SUPPLY_S("I2S1 ASRC", 1, RT5665_ASRC_1,
+ RT5665_I2S1_ASRC_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("I2S2 ASRC", 1, RT5665_ASRC_1,
+ RT5665_I2S2_ASRC_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("I2S3 ASRC", 1, RT5665_ASRC_1,
+ RT5665_I2S3_ASRC_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DAC STO1 ASRC", 1, RT5665_ASRC_1,
+ RT5665_DAC_STO1_ASRC_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DAC STO2 ASRC", 1, RT5665_ASRC_1,
+ RT5665_DAC_STO2_ASRC_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DAC Mono L ASRC", 1, RT5665_ASRC_1,
+ RT5665_DAC_MONO_L_ASRC_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DAC Mono R ASRC", 1, RT5665_ASRC_1,
+ RT5665_DAC_MONO_R_ASRC_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("ADC STO1 ASRC", 1, RT5665_ASRC_1,
+ RT5665_ADC_STO1_ASRC_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("ADC Mono L ASRC", 1, RT5665_ASRC_1,
+ RT5665_ADC_MONO_L_ASRC_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("ADC Mono R ASRC", 1, RT5665_ASRC_1,
+ RT5665_ADC_MONO_R_ASRC_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DMIC STO1 ASRC", 1, RT5665_ASRC_1,
+ RT5665_DMIC_STO1_ASRC_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DMIC STO2 ASRC", 1, RT5665_ASRC_1,
+ RT5665_DMIC_STO2_ASRC_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DMIC MONO L ASRC", 1, RT5665_ASRC_1,
+ RT5665_DMIC_MONO_L_ASRC_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DMIC MONO R ASRC", 1, RT5665_ASRC_1,
+ RT5665_DMIC_MONO_R_ASRC_SFT, 0, NULL, 0),
+
+ /* Input Side */
+ SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5665_PWR_ANLG_2, RT5665_PWR_MB1_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MICBIAS2", RT5665_PWR_ANLG_2, RT5665_PWR_MB2_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MICBIAS3", RT5665_PWR_ANLG_2, RT5665_PWR_MB3_BIT,
+ 0, NULL, 0),
+
+ /* Input Lines */
+ SND_SOC_DAPM_INPUT("DMIC L1"),
+ SND_SOC_DAPM_INPUT("DMIC R1"),
+ SND_SOC_DAPM_INPUT("DMIC L2"),
+ SND_SOC_DAPM_INPUT("DMIC R2"),
+
+ SND_SOC_DAPM_INPUT("IN1P"),
+ SND_SOC_DAPM_INPUT("IN1N"),
+ SND_SOC_DAPM_INPUT("IN2P"),
+ SND_SOC_DAPM_INPUT("IN2N"),
+ SND_SOC_DAPM_INPUT("IN3P"),
+ SND_SOC_DAPM_INPUT("IN3N"),
+ SND_SOC_DAPM_INPUT("IN4P"),
+ SND_SOC_DAPM_INPUT("IN4N"),
+
+ SND_SOC_DAPM_PGA("DMIC1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("DMIC2", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("DMIC CLK", SND_SOC_NOPM, 0, 0,
+ set_dmic_clk, SND_SOC_DAPM_PRE_PMU),
+ SND_SOC_DAPM_SUPPLY("DMIC1 Power", RT5665_DMIC_CTRL_1,
+ RT5665_DMIC_1_EN_SFT, 0, set_dmic_power, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_SUPPLY("DMIC2 Power", RT5665_DMIC_CTRL_1,
+ RT5665_DMIC_2_EN_SFT, 0, set_dmic_power, SND_SOC_DAPM_POST_PMU),
+
+ /* Boost */
+ SND_SOC_DAPM_PGA("BST1", SND_SOC_NOPM,
+ 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("BST2", SND_SOC_NOPM,
+ 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("BST3", SND_SOC_NOPM,
+ 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("BST4", SND_SOC_NOPM,
+ 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("BST1 CBJ", SND_SOC_NOPM,
+ 0, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("BST1 Power", RT5665_PWR_ANLG_2,
+ RT5665_PWR_BST1_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("BST2 Power", RT5665_PWR_ANLG_2,
+ RT5665_PWR_BST2_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("BST3 Power", RT5665_PWR_ANLG_2,
+ RT5665_PWR_BST3_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("BST4 Power", RT5665_PWR_ANLG_2,
+ RT5665_PWR_BST4_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("BST1P Power", RT5665_PWR_ANLG_2,
+ RT5665_PWR_BST1_P_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("BST2P Power", RT5665_PWR_ANLG_2,
+ RT5665_PWR_BST2_P_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("BST3P Power", RT5665_PWR_ANLG_2,
+ RT5665_PWR_BST3_P_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("BST4P Power", RT5665_PWR_ANLG_2,
+ RT5665_PWR_BST4_P_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("CBJ Power", RT5665_PWR_ANLG_3,
+ RT5665_PWR_CBJ_BIT, 0, NULL, 0),
+
+
+ /* Input Volume */
+ SND_SOC_DAPM_PGA("INL VOL", RT5665_PWR_VOL, RT5665_PWR_IN_L_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_PGA("INR VOL", RT5665_PWR_VOL, RT5665_PWR_IN_R_BIT,
+ 0, NULL, 0),
+
+ /* REC Mixer */
+ SND_SOC_DAPM_MIXER("RECMIX1L", SND_SOC_NOPM, 0, 0, rt5665_rec1_l_mix,
+ ARRAY_SIZE(rt5665_rec1_l_mix)),
+ SND_SOC_DAPM_MIXER("RECMIX1R", SND_SOC_NOPM, 0, 0, rt5665_rec1_r_mix,
+ ARRAY_SIZE(rt5665_rec1_r_mix)),
+ SND_SOC_DAPM_MIXER("RECMIX2L", SND_SOC_NOPM, 0, 0, rt5665_rec2_l_mix,
+ ARRAY_SIZE(rt5665_rec2_l_mix)),
+ SND_SOC_DAPM_MIXER("RECMIX2R", SND_SOC_NOPM, 0, 0, rt5665_rec2_r_mix,
+ ARRAY_SIZE(rt5665_rec2_r_mix)),
+ SND_SOC_DAPM_SUPPLY("RECMIX1L Power", RT5665_PWR_ANLG_2,
+ RT5665_PWR_RM1_L_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("RECMIX1R Power", RT5665_PWR_ANLG_2,
+ RT5665_PWR_RM1_R_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("RECMIX2L Power", RT5665_PWR_MIXER,
+ RT5665_PWR_RM2_L_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("RECMIX2R Power", RT5665_PWR_MIXER,
+ RT5665_PWR_RM2_R_BIT, 0, NULL, 0),
+
+ /* ADCs */
+ SND_SOC_DAPM_ADC("ADC1 L", NULL, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_ADC("ADC1 R", NULL, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_ADC("ADC2 L", NULL, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_ADC("ADC2 R", NULL, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_SUPPLY("ADC1 L Power", RT5665_PWR_DIG_1,
+ RT5665_PWR_ADC_L1_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC1 R Power", RT5665_PWR_DIG_1,
+ RT5665_PWR_ADC_R1_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC2 L Power", RT5665_PWR_DIG_1,
+ RT5665_PWR_ADC_L2_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC2 R Power", RT5665_PWR_DIG_1,
+ RT5665_PWR_ADC_R2_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC1 clock", RT5665_CHOP_ADC,
+ RT5665_CKGEN_ADC1_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC2 clock", RT5665_CHOP_ADC,
+ RT5665_CKGEN_ADC2_SFT, 0, NULL, 0),
+
+ /* ADC Mux */
+ SND_SOC_DAPM_MUX("Stereo1 DMIC L Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto1_dmic_mux),
+ SND_SOC_DAPM_MUX("Stereo1 DMIC R Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto1_dmic_mux),
+ SND_SOC_DAPM_MUX("Stereo1 ADC L1 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto1_adc1l_mux),
+ SND_SOC_DAPM_MUX("Stereo1 ADC R1 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto1_adc1r_mux),
+ SND_SOC_DAPM_MUX("Stereo1 ADC L2 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto1_adc2l_mux),
+ SND_SOC_DAPM_MUX("Stereo1 ADC R2 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto1_adc2r_mux),
+ SND_SOC_DAPM_MUX("Stereo1 ADC L Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto1_adcl_mux),
+ SND_SOC_DAPM_MUX("Stereo1 ADC R Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto1_adcr_mux),
+ SND_SOC_DAPM_MUX("Stereo1 DD L Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto1_dd_l_mux),
+ SND_SOC_DAPM_MUX("Stereo1 DD R Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto1_dd_r_mux),
+ SND_SOC_DAPM_MUX("Mono ADC L2 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_mono_adc_l2_mux),
+ SND_SOC_DAPM_MUX("Mono ADC R2 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_mono_adc_r2_mux),
+ SND_SOC_DAPM_MUX("Mono ADC L1 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_mono_adc_l1_mux),
+ SND_SOC_DAPM_MUX("Mono ADC R1 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_mono_adc_r1_mux),
+ SND_SOC_DAPM_MUX("Mono DMIC L Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_mono_dmic_l_mux),
+ SND_SOC_DAPM_MUX("Mono DMIC R Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_mono_dmic_r_mux),
+ SND_SOC_DAPM_MUX("Mono ADC L Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_mono_adc_l_mux),
+ SND_SOC_DAPM_MUX("Mono ADC R Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_mono_adc_r_mux),
+ SND_SOC_DAPM_MUX("Mono DD L Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_mono_dd_l_mux),
+ SND_SOC_DAPM_MUX("Mono DD R Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_mono_dd_r_mux),
+ SND_SOC_DAPM_MUX("Stereo2 DMIC L Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto2_dmic_mux),
+ SND_SOC_DAPM_MUX("Stereo2 DMIC R Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto2_dmic_mux),
+ SND_SOC_DAPM_MUX("Stereo2 ADC L1 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto2_adc1l_mux),
+ SND_SOC_DAPM_MUX("Stereo2 ADC R1 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto2_adc1r_mux),
+ SND_SOC_DAPM_MUX("Stereo2 ADC L2 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto2_adc2l_mux),
+ SND_SOC_DAPM_MUX("Stereo2 ADC R2 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto2_adc2r_mux),
+ SND_SOC_DAPM_MUX("Stereo2 ADC L Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto2_adcl_mux),
+ SND_SOC_DAPM_MUX("Stereo2 ADC R Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto2_adcr_mux),
+ SND_SOC_DAPM_MUX("Stereo2 DD L Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto2_dd_l_mux),
+ SND_SOC_DAPM_MUX("Stereo2 DD R Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_sto2_dd_r_mux),
+ /* ADC Mixer */
+ SND_SOC_DAPM_SUPPLY("ADC Stereo1 Filter", RT5665_PWR_DIG_2,
+ RT5665_PWR_ADC_S1F_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC Stereo2 Filter", RT5665_PWR_DIG_2,
+ RT5665_PWR_ADC_S2F_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("Stereo1 ADC MIXL", RT5665_STO1_ADC_DIG_VOL,
+ RT5665_L_MUTE_SFT, 1, rt5665_sto1_adc_l_mix,
+ ARRAY_SIZE(rt5665_sto1_adc_l_mix)),
+ SND_SOC_DAPM_MIXER("Stereo1 ADC MIXR", RT5665_STO1_ADC_DIG_VOL,
+ RT5665_R_MUTE_SFT, 1, rt5665_sto1_adc_r_mix,
+ ARRAY_SIZE(rt5665_sto1_adc_r_mix)),
+ SND_SOC_DAPM_MIXER("Stereo2 ADC MIXL", RT5665_STO2_ADC_DIG_VOL,
+ RT5665_L_MUTE_SFT, 1, rt5665_sto2_adc_l_mix,
+ ARRAY_SIZE(rt5665_sto2_adc_l_mix)),
+ SND_SOC_DAPM_MIXER("Stereo2 ADC MIXR", RT5665_STO2_ADC_DIG_VOL,
+ RT5665_R_MUTE_SFT, 1, rt5665_sto2_adc_r_mix,
+ ARRAY_SIZE(rt5665_sto2_adc_r_mix)),
+ SND_SOC_DAPM_SUPPLY("ADC Mono Left Filter", RT5665_PWR_DIG_2,
+ RT5665_PWR_ADC_MF_L_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("Mono ADC MIXL", RT5665_MONO_ADC_DIG_VOL,
+ RT5665_L_MUTE_SFT, 1, rt5665_mono_adc_l_mix,
+ ARRAY_SIZE(rt5665_mono_adc_l_mix)),
+ SND_SOC_DAPM_SUPPLY("ADC Mono Right Filter", RT5665_PWR_DIG_2,
+ RT5665_PWR_ADC_MF_R_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("Mono ADC MIXR", RT5665_MONO_ADC_DIG_VOL,
+ RT5665_R_MUTE_SFT, 1, rt5665_mono_adc_r_mix,
+ ARRAY_SIZE(rt5665_mono_adc_r_mix)),
+
+ /* ADC PGA */
+ SND_SOC_DAPM_PGA("Stereo1 ADC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Stereo2 ADC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Mono ADC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* Digital Interface */
+ SND_SOC_DAPM_SUPPLY("I2S1_1", RT5665_PWR_DIG_1, RT5665_PWR_I2S1_1_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("I2S1_2", RT5665_PWR_DIG_1, RT5665_PWR_I2S1_2_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("I2S2_1", RT5665_PWR_DIG_1, RT5665_PWR_I2S2_1_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("I2S2_2", RT5665_PWR_DIG_1, RT5665_PWR_I2S2_2_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("I2S3", RT5665_PWR_DIG_1, RT5665_PWR_I2S3_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC3", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC1 L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC1 R", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC2 L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC2 R", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC3 L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC3 R", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_PGA("IF2_1 DAC", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF2_2 DAC", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF2_1 DAC L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF2_1 DAC R", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF2_2 DAC L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF2_2 DAC R", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF2_1 ADC", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF2_2 ADC", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_PGA("IF3 DAC", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF3 DAC L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF3 DAC R", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF3 ADC", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* Digital Interface Select */
+ SND_SOC_DAPM_MUX("IF1_1_ADC1 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_1_adc1_mux),
+ SND_SOC_DAPM_MUX("IF1_1_ADC2 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_1_adc2_mux),
+ SND_SOC_DAPM_MUX("IF1_1_ADC3 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_1_adc3_mux),
+ SND_SOC_DAPM_PGA("IF1_1_ADC4", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MUX("IF1_2_ADC1 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_2_adc1_mux),
+ SND_SOC_DAPM_MUX("IF1_2_ADC2 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_2_adc2_mux),
+ SND_SOC_DAPM_MUX("IF1_2_ADC3 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_2_adc3_mux),
+ SND_SOC_DAPM_MUX("IF1_2_ADC4 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_2_adc4_mux),
+ SND_SOC_DAPM_MUX("TDM1 slot 01 Data Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_tdm1_adc_mux),
+ SND_SOC_DAPM_MUX("TDM1 slot 23 Data Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_tdm1_adc_mux),
+ SND_SOC_DAPM_MUX("TDM1 slot 45 Data Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_tdm1_adc_mux),
+ SND_SOC_DAPM_MUX("TDM1 slot 67 Data Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_tdm1_adc_mux),
+ SND_SOC_DAPM_MUX("TDM2 slot 01 Data Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_tdm2_adc_mux),
+ SND_SOC_DAPM_MUX("TDM2 slot 23 Data Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_tdm2_adc_mux),
+ SND_SOC_DAPM_MUX("TDM2 slot 45 Data Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_tdm2_adc_mux),
+ SND_SOC_DAPM_MUX("TDM2 slot 67 Data Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_tdm2_adc_mux),
+ SND_SOC_DAPM_MUX("IF2_1 ADC Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if2_1_adc_in_mux),
+ SND_SOC_DAPM_MUX("IF2_2 ADC Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if2_2_adc_in_mux),
+ SND_SOC_DAPM_MUX("IF3 ADC Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if3_adc_in_mux),
+ SND_SOC_DAPM_MUX("IF1_1 0 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_1_01_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF1_1 1 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_1_01_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF1_1 2 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_1_23_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF1_1 3 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_1_23_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF1_1 4 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_1_45_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF1_1 5 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_1_45_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF1_1 6 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_1_67_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF1_1 7 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_1_67_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF1_2 0 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_2_01_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF1_2 1 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_2_01_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF1_2 2 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_2_23_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF1_2 3 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_2_23_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF1_2 4 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_2_45_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF1_2 5 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_2_45_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF1_2 6 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_2_67_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF1_2 7 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if1_2_67_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF2_1 DAC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if2_1_dac_swap_mux),
+ SND_SOC_DAPM_MUX("IF2_1 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if2_1_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF2_2 DAC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if2_2_dac_swap_mux),
+ SND_SOC_DAPM_MUX("IF2_2 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if2_2_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF3 DAC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if3_dac_swap_mux),
+ SND_SOC_DAPM_MUX("IF3 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5665_if3_adc_swap_mux),
+
+ /* Audio Interface */
+ SND_SOC_DAPM_AIF_OUT("AIF1_1TX slot 0", "AIF1_1 Capture",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1_1TX slot 1", "AIF1_1 Capture",
+ 1, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1_1TX slot 2", "AIF1_1 Capture",
+ 2, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1_1TX slot 3", "AIF1_1 Capture",
+ 3, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1_1TX slot 4", "AIF1_1 Capture",
+ 4, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1_1TX slot 5", "AIF1_1 Capture",
+ 5, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1_1TX slot 6", "AIF1_1 Capture",
+ 6, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1_1TX slot 7", "AIF1_1 Capture",
+ 7, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1_2TX slot 0", "AIF1_2 Capture",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1_2TX slot 1", "AIF1_2 Capture",
+ 1, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1_2TX slot 2", "AIF1_2 Capture",
+ 2, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1_2TX slot 3", "AIF1_2 Capture",
+ 3, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1_2TX slot 4", "AIF1_2 Capture",
+ 4, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1_2TX slot 5", "AIF1_2 Capture",
+ 5, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1_2TX slot 6", "AIF1_2 Capture",
+ 6, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1_2TX slot 7", "AIF1_2 Capture",
+ 7, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF2_1TX", "AIF2_1 Capture",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF2_2TX", "AIF2_2 Capture",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF3TX", "AIF3 Capture",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("AIF1RX", "AIF1 Playback",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("AIF2_1RX", "AIF2_1 Playback",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("AIF2_2RX", "AIF2_2 Playback",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("AIF3RX", "AIF3 Playback",
+ 0, SND_SOC_NOPM, 0, 0),
+
+ /* Output Side */
+ /* DAC mixer before sound effect */
+ SND_SOC_DAPM_MIXER("DAC1 MIXL", SND_SOC_NOPM, 0, 0,
+ rt5665_dac_l_mix, ARRAY_SIZE(rt5665_dac_l_mix)),
+ SND_SOC_DAPM_MIXER("DAC1 MIXR", SND_SOC_NOPM, 0, 0,
+ rt5665_dac_r_mix, ARRAY_SIZE(rt5665_dac_r_mix)),
+
+ /* DAC channel Mux */
+ SND_SOC_DAPM_MUX("DAC L1 Mux", SND_SOC_NOPM, 0, 0, &rt5665_dac_l1_mux),
+ SND_SOC_DAPM_MUX("DAC R1 Mux", SND_SOC_NOPM, 0, 0, &rt5665_dac_r1_mux),
+ SND_SOC_DAPM_MUX("DAC L2 Mux", SND_SOC_NOPM, 0, 0, &rt5665_dac_l2_mux),
+ SND_SOC_DAPM_MUX("DAC R2 Mux", SND_SOC_NOPM, 0, 0, &rt5665_dac_r2_mux),
+ SND_SOC_DAPM_MUX("DAC L3 Mux", SND_SOC_NOPM, 0, 0, &rt5665_dac_l3_mux),
+ SND_SOC_DAPM_MUX("DAC R3 Mux", SND_SOC_NOPM, 0, 0, &rt5665_dac_r3_mux),
+
+ SND_SOC_DAPM_MUX("DAC L1 Source", SND_SOC_NOPM, 0, 0,
+ &rt5665_alg_dac_l1_mux),
+ SND_SOC_DAPM_MUX("DAC R1 Source", SND_SOC_NOPM, 0, 0,
+ &rt5665_alg_dac_r1_mux),
+ SND_SOC_DAPM_MUX("DAC L2 Source", SND_SOC_NOPM, 0, 0,
+ &rt5665_alg_dac_l2_mux),
+ SND_SOC_DAPM_MUX("DAC R2 Source", SND_SOC_NOPM, 0, 0,
+ &rt5665_alg_dac_r2_mux),
+
+ /* DAC Mixer */
+ SND_SOC_DAPM_SUPPLY("DAC Stereo1 Filter", RT5665_PWR_DIG_2,
+ RT5665_PWR_DAC_S1F_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("DAC Stereo2 Filter", RT5665_PWR_DIG_2,
+ RT5665_PWR_DAC_S2F_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("DAC Mono Left Filter", RT5665_PWR_DIG_2,
+ RT5665_PWR_DAC_MF_L_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("DAC Mono Right Filter", RT5665_PWR_DIG_2,
+ RT5665_PWR_DAC_MF_R_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("Stereo1 DAC MIXL", SND_SOC_NOPM, 0, 0,
+ rt5665_sto1_dac_l_mix, ARRAY_SIZE(rt5665_sto1_dac_l_mix)),
+ SND_SOC_DAPM_MIXER("Stereo1 DAC MIXR", SND_SOC_NOPM, 0, 0,
+ rt5665_sto1_dac_r_mix, ARRAY_SIZE(rt5665_sto1_dac_r_mix)),
+ SND_SOC_DAPM_MIXER("Stereo2 DAC MIXL", SND_SOC_NOPM, 0, 0,
+ rt5665_sto2_dac_l_mix, ARRAY_SIZE(rt5665_sto2_dac_l_mix)),
+ SND_SOC_DAPM_MIXER("Stereo2 DAC MIXR", SND_SOC_NOPM, 0, 0,
+ rt5665_sto2_dac_r_mix, ARRAY_SIZE(rt5665_sto2_dac_r_mix)),
+ SND_SOC_DAPM_MIXER("Mono DAC MIXL", SND_SOC_NOPM, 0, 0,
+ rt5665_mono_dac_l_mix, ARRAY_SIZE(rt5665_mono_dac_l_mix)),
+ SND_SOC_DAPM_MIXER("Mono DAC MIXR", SND_SOC_NOPM, 0, 0,
+ rt5665_mono_dac_r_mix, ARRAY_SIZE(rt5665_mono_dac_r_mix)),
+ SND_SOC_DAPM_MUX("DAC MIXL", SND_SOC_NOPM, 0, 0,
+ &rt5665_dig_dac_mixl_mux),
+ SND_SOC_DAPM_MUX("DAC MIXR", SND_SOC_NOPM, 0, 0,
+ &rt5665_dig_dac_mixr_mux),
+
+ /* DACs */
+ SND_SOC_DAPM_DAC("DAC L1", NULL, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_DAC("DAC R1", NULL, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_SUPPLY("DAC L2 Power", RT5665_PWR_DIG_1,
+ RT5665_PWR_DAC_L2_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("DAC R2 Power", RT5665_PWR_DIG_1,
+ RT5665_PWR_DAC_R2_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_DAC("DAC L2", NULL, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_DAC("DAC R2", NULL, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_PGA("DAC1 MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY_S("DAC 1 Clock", 1, RT5665_CHOP_DAC,
+ RT5665_CKGEN_DAC1_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DAC 2 Clock", 1, RT5665_CHOP_DAC,
+ RT5665_CKGEN_DAC2_SFT, 0, NULL, 0),
+
+ /* OUT Mixer */
+ SND_SOC_DAPM_MIXER("MONOVOL MIX", RT5665_PWR_MIXER, RT5665_PWR_MM_BIT,
+ 0, rt5665_monovol_mix, ARRAY_SIZE(rt5665_monovol_mix)),
+ SND_SOC_DAPM_MIXER("OUT MIXL", RT5665_PWR_MIXER, RT5665_PWR_OM_L_BIT,
+ 0, rt5665_out_l_mix, ARRAY_SIZE(rt5665_out_l_mix)),
+ SND_SOC_DAPM_MIXER("OUT MIXR", RT5665_PWR_MIXER, RT5665_PWR_OM_R_BIT,
+ 0, rt5665_out_r_mix, ARRAY_SIZE(rt5665_out_r_mix)),
+
+ /* Output Volume */
+ SND_SOC_DAPM_SWITCH("MONOVOL", RT5665_PWR_VOL, RT5665_PWR_MV_BIT, 0,
+ &monovol_switch),
+ SND_SOC_DAPM_SWITCH("OUTVOL L", RT5665_PWR_VOL, RT5665_PWR_OV_L_BIT, 0,
+ &outvol_l_switch),
+ SND_SOC_DAPM_SWITCH("OUTVOL R", RT5665_PWR_VOL, RT5665_PWR_OV_R_BIT, 0,
+ &outvol_r_switch),
+
+ /* MONO/HPO/LOUT */
+ SND_SOC_DAPM_MIXER("Mono MIX", SND_SOC_NOPM, 0, 0, rt5665_mono_mix,
+ ARRAY_SIZE(rt5665_mono_mix)),
+ SND_SOC_DAPM_MIXER("LOUT L MIX", SND_SOC_NOPM, 0, 0, rt5665_lout_l_mix,
+ ARRAY_SIZE(rt5665_lout_l_mix)),
+ SND_SOC_DAPM_MIXER("LOUT R MIX", SND_SOC_NOPM, 0, 0, rt5665_lout_r_mix,
+ ARRAY_SIZE(rt5665_lout_r_mix)),
+ SND_SOC_DAPM_PGA_S("Mono Amp", 1, RT5665_PWR_ANLG_1, RT5665_PWR_MA_BIT,
+ 0, rt5665_mono_event, SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU),
+ SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0, rt5665_hp_event,
+ SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU),
+ SND_SOC_DAPM_PGA_S("LOUT Amp", 1, RT5665_PWR_ANLG_1,
+ RT5665_PWR_LM_BIT, 0, rt5665_lout_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+ SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU),
+
+ SND_SOC_DAPM_SUPPLY("Charge Pump", SND_SOC_NOPM, 0, 0,
+ rt5665_charge_pump_event, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SWITCH("Mono Playback", SND_SOC_NOPM, 0, 0,
+ &mono_switch),
+ SND_SOC_DAPM_SWITCH("HPO Playback", SND_SOC_NOPM, 0, 0,
+ &hpo_switch),
+ SND_SOC_DAPM_SWITCH("LOUT L Playback", SND_SOC_NOPM, 0, 0,
+ &lout_l_switch),
+ SND_SOC_DAPM_SWITCH("LOUT R Playback", SND_SOC_NOPM, 0, 0,
+ &lout_r_switch),
+ SND_SOC_DAPM_SWITCH("PDM L Playback", SND_SOC_NOPM, 0, 0,
+ &pdm_l_switch),
+ SND_SOC_DAPM_SWITCH("PDM R Playback", SND_SOC_NOPM, 0, 0,
+ &pdm_r_switch),
+
+ /* PDM */
+ SND_SOC_DAPM_SUPPLY("PDM Power", RT5665_PWR_DIG_2,
+ RT5665_PWR_PDM1_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_MUX("PDM L Mux", SND_SOC_NOPM,
+ 0, 1, &rt5665_pdm_l_mux),
+ SND_SOC_DAPM_MUX("PDM R Mux", SND_SOC_NOPM,
+ 0, 1, &rt5665_pdm_r_mux),
+
+ /* CLK DET */
+ SND_SOC_DAPM_SUPPLY("CLKDET SYS", RT5665_CLK_DET, RT5665_SYS_CLK_DET,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("CLKDET HP", RT5665_CLK_DET, RT5665_HP_CLK_DET,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("CLKDET MONO", RT5665_CLK_DET, RT5665_MONO_CLK_DET,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("CLKDET LOUT", RT5665_CLK_DET, RT5665_LOUT_CLK_DET,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("CLKDET", RT5665_CLK_DET, RT5665_POW_CLK_DET,
+ 0, NULL, 0),
+
+ /* Output Lines */
+ SND_SOC_DAPM_OUTPUT("HPOL"),
+ SND_SOC_DAPM_OUTPUT("HPOR"),
+ SND_SOC_DAPM_OUTPUT("LOUTL"),
+ SND_SOC_DAPM_OUTPUT("LOUTR"),
+ SND_SOC_DAPM_OUTPUT("MONOOUT"),
+ SND_SOC_DAPM_OUTPUT("PDML"),
+ SND_SOC_DAPM_OUTPUT("PDMR"),
+};
+
+static const struct snd_soc_dapm_route rt5665_dapm_routes[] = {
+ /*PLL*/
+ {"ADC Stereo1 Filter", NULL, "PLL", is_sys_clk_from_pll},
+ {"ADC Stereo2 Filter", NULL, "PLL", is_sys_clk_from_pll},
+ {"ADC Mono Left Filter", NULL, "PLL", is_sys_clk_from_pll},
+ {"ADC Mono Right Filter", NULL, "PLL", is_sys_clk_from_pll},
+ {"DAC Stereo1 Filter", NULL, "PLL", is_sys_clk_from_pll},
+ {"DAC Stereo2 Filter", NULL, "PLL", is_sys_clk_from_pll},
+ {"DAC Mono Left Filter", NULL, "PLL", is_sys_clk_from_pll},
+ {"DAC Mono Right Filter", NULL, "PLL", is_sys_clk_from_pll},
+
+ /*ASRC*/
+ {"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc},
+ {"ADC Mono Left Filter", NULL, "ADC Mono L ASRC", is_using_asrc},
+ {"ADC Mono Right Filter", NULL, "ADC Mono R ASRC", is_using_asrc},
+ {"DAC Mono Left Filter", NULL, "DAC Mono L ASRC", is_using_asrc},
+ {"DAC Mono Right Filter", NULL, "DAC Mono R ASRC", is_using_asrc},
+ {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc},
+ {"DAC Stereo2 Filter", NULL, "DAC STO2 ASRC", is_using_asrc},
+
+ /*Vref*/
+ {"Mic Det Power", NULL, "Vref2"},
+ {"MICBIAS1", NULL, "Vref1"},
+ {"MICBIAS1", NULL, "Vref2"},
+ {"MICBIAS2", NULL, "Vref1"},
+ {"MICBIAS2", NULL, "Vref2"},
+ {"MICBIAS3", NULL, "Vref1"},
+ {"MICBIAS3", NULL, "Vref2"},
+
+ {"Stereo1 DMIC L Mux", NULL, "DMIC STO1 ASRC"},
+ {"Stereo1 DMIC R Mux", NULL, "DMIC STO1 ASRC"},
+ {"Stereo2 DMIC L Mux", NULL, "DMIC STO2 ASRC"},
+ {"Stereo2 DMIC R Mux", NULL, "DMIC STO2 ASRC"},
+ {"Mono DMIC L Mux", NULL, "DMIC MONO L ASRC"},
+ {"Mono DMIC R Mux", NULL, "DMIC MONO R ASRC"},
+
+ {"I2S1_1", NULL, "I2S1 ASRC"},
+ {"I2S1_2", NULL, "I2S1 ASRC"},
+ {"I2S2_1", NULL, "I2S2 ASRC"},
+ {"I2S2_2", NULL, "I2S2 ASRC"},
+ {"I2S3", NULL, "I2S3 ASRC"},
+
+ {"CLKDET SYS", NULL, "CLKDET"},
+ {"CLKDET HP", NULL, "CLKDET"},
+ {"CLKDET MONO", NULL, "CLKDET"},
+ {"CLKDET LOUT", NULL, "CLKDET"},
+
+ {"IN1P", NULL, "LDO2"},
+ {"IN2P", NULL, "LDO2"},
+ {"IN3P", NULL, "LDO2"},
+ {"IN4P", NULL, "LDO2"},
+
+ {"DMIC1", NULL, "DMIC L1"},
+ {"DMIC1", NULL, "DMIC R1"},
+ {"DMIC2", NULL, "DMIC L2"},
+ {"DMIC2", NULL, "DMIC R2"},
+
+ {"BST1", NULL, "IN1P"},
+ {"BST1", NULL, "IN1N"},
+ {"BST1", NULL, "BST1 Power"},
+ {"BST1", NULL, "BST1P Power"},
+ {"BST2", NULL, "IN2P"},
+ {"BST2", NULL, "IN2N"},
+ {"BST2", NULL, "BST2 Power"},
+ {"BST2", NULL, "BST2P Power"},
+ {"BST3", NULL, "IN3P"},
+ {"BST3", NULL, "IN3N"},
+ {"BST3", NULL, "BST3 Power"},
+ {"BST3", NULL, "BST3P Power"},
+ {"BST4", NULL, "IN4P"},
+ {"BST4", NULL, "IN4N"},
+ {"BST4", NULL, "BST4 Power"},
+ {"BST4", NULL, "BST4P Power"},
+ {"BST1 CBJ", NULL, "IN1P"},
+ {"BST1 CBJ", NULL, "IN1N"},
+ {"BST1 CBJ", NULL, "CBJ Power"},
+ {"CBJ Power", NULL, "Vref2"},
+
+ {"INL VOL", NULL, "IN3P"},
+ {"INR VOL", NULL, "IN3N"},
+
+ {"RECMIX1L", "CBJ Switch", "BST1 CBJ"},
+ {"RECMIX1L", "INL Switch", "INL VOL"},
+ {"RECMIX1L", "INR Switch", "INR VOL"},
+ {"RECMIX1L", "BST4 Switch", "BST4"},
+ {"RECMIX1L", "BST3 Switch", "BST3"},
+ {"RECMIX1L", "BST2 Switch", "BST2"},
+ {"RECMIX1L", "BST1 Switch", "BST1"},
+ {"RECMIX1L", NULL, "RECMIX1L Power"},
+
+ {"RECMIX1R", "MONOVOL Switch", "MONOVOL"},
+ {"RECMIX1R", "INR Switch", "INR VOL"},
+ {"RECMIX1R", "BST4 Switch", "BST4"},
+ {"RECMIX1R", "BST3 Switch", "BST3"},
+ {"RECMIX1R", "BST2 Switch", "BST2"},
+ {"RECMIX1R", "BST1 Switch", "BST1"},
+ {"RECMIX1R", NULL, "RECMIX1R Power"},
+
+ {"RECMIX2L", "CBJ Switch", "BST1 CBJ"},
+ {"RECMIX2L", "INL Switch", "INL VOL"},
+ {"RECMIX2L", "INR Switch", "INR VOL"},
+ {"RECMIX2L", "BST4 Switch", "BST4"},
+ {"RECMIX2L", "BST3 Switch", "BST3"},
+ {"RECMIX2L", "BST2 Switch", "BST2"},
+ {"RECMIX2L", "BST1 Switch", "BST1"},
+ {"RECMIX2L", NULL, "RECMIX2L Power"},
+
+ {"RECMIX2R", "MONOVOL Switch", "MONOVOL"},
+ {"RECMIX2R", "INL Switch", "INL VOL"},
+ {"RECMIX2R", "INR Switch", "INR VOL"},
+ {"RECMIX2R", "BST4 Switch", "BST4"},
+ {"RECMIX2R", "BST3 Switch", "BST3"},
+ {"RECMIX2R", "BST2 Switch", "BST2"},
+ {"RECMIX2R", "BST1 Switch", "BST1"},
+ {"RECMIX2R", NULL, "RECMIX2R Power"},
+
+ {"ADC1 L", NULL, "RECMIX1L"},
+ {"ADC1 L", NULL, "ADC1 L Power"},
+ {"ADC1 L", NULL, "ADC1 clock"},
+ {"ADC1 R", NULL, "RECMIX1R"},
+ {"ADC1 R", NULL, "ADC1 R Power"},
+ {"ADC1 R", NULL, "ADC1 clock"},
+
+ {"ADC2 L", NULL, "RECMIX2L"},
+ {"ADC2 L", NULL, "ADC2 L Power"},
+ {"ADC2 L", NULL, "ADC2 clock"},
+ {"ADC2 R", NULL, "RECMIX2R"},
+ {"ADC2 R", NULL, "ADC2 R Power"},
+ {"ADC2 R", NULL, "ADC2 clock"},
+
+ {"DMIC L1", NULL, "DMIC CLK"},
+ {"DMIC L1", NULL, "DMIC1 Power"},
+ {"DMIC R1", NULL, "DMIC CLK"},
+ {"DMIC R1", NULL, "DMIC1 Power"},
+ {"DMIC L2", NULL, "DMIC CLK"},
+ {"DMIC L2", NULL, "DMIC2 Power"},
+ {"DMIC R2", NULL, "DMIC CLK"},
+ {"DMIC R2", NULL, "DMIC2 Power"},
+
+ {"Stereo1 DMIC L Mux", "DMIC1", "DMIC L1"},
+ {"Stereo1 DMIC L Mux", "DMIC2", "DMIC L2"},
+
+ {"Stereo1 DMIC R Mux", "DMIC1", "DMIC R1"},
+ {"Stereo1 DMIC R Mux", "DMIC2", "DMIC R2"},
+
+ {"Mono DMIC L Mux", "DMIC1 L", "DMIC L1"},
+ {"Mono DMIC L Mux", "DMIC2 L", "DMIC L2"},
+
+ {"Mono DMIC R Mux", "DMIC1 R", "DMIC R1"},
+ {"Mono DMIC R Mux", "DMIC2 R", "DMIC R2"},
+
+ {"Stereo2 DMIC L Mux", "DMIC1", "DMIC L1"},
+ {"Stereo2 DMIC L Mux", "DMIC2", "DMIC L2"},
+
+ {"Stereo2 DMIC R Mux", "DMIC1", "DMIC R1"},
+ {"Stereo2 DMIC R Mux", "DMIC2", "DMIC R2"},
+
+ {"Stereo1 ADC L Mux", "ADC1 L", "ADC1 L"},
+ {"Stereo1 ADC L Mux", "ADC1 R", "ADC1 R"},
+ {"Stereo1 ADC L Mux", "ADC2 L", "ADC2 L"},
+ {"Stereo1 ADC L Mux", "ADC2 R", "ADC2 R"},
+ {"Stereo1 ADC R Mux", "ADC1 L", "ADC1 L"},
+ {"Stereo1 ADC R Mux", "ADC1 R", "ADC1 R"},
+ {"Stereo1 ADC R Mux", "ADC2 L", "ADC2 L"},
+ {"Stereo1 ADC R Mux", "ADC2 R", "ADC2 R"},
+
+ {"Stereo1 DD L Mux", "STO2 DAC", "Stereo2 DAC MIXL"},
+ {"Stereo1 DD L Mux", "MONO DAC", "Mono DAC MIXL"},
+
+ {"Stereo1 DD R Mux", "STO2 DAC", "Stereo2 DAC MIXR"},
+ {"Stereo1 DD R Mux", "MONO DAC", "Mono DAC MIXR"},
+
+ {"Stereo1 ADC L1 Mux", "ADC", "Stereo1 ADC L Mux"},
+ {"Stereo1 ADC L1 Mux", "DD Mux", "Stereo1 DD L Mux"},
+ {"Stereo1 ADC L2 Mux", "DMIC", "Stereo1 DMIC L Mux"},
+ {"Stereo1 ADC L2 Mux", "DAC MIX", "DAC MIXL"},
+
+ {"Stereo1 ADC R1 Mux", "ADC", "Stereo1 ADC R Mux"},
+ {"Stereo1 ADC R1 Mux", "DD Mux", "Stereo1 DD R Mux"},
+ {"Stereo1 ADC R2 Mux", "DMIC", "Stereo1 DMIC R Mux"},
+ {"Stereo1 ADC R2 Mux", "DAC MIX", "DAC MIXR"},
+
+ {"Mono ADC L Mux", "ADC1 L", "ADC1 L"},
+ {"Mono ADC L Mux", "ADC1 R", "ADC1 R"},
+ {"Mono ADC L Mux", "ADC2 L", "ADC2 L"},
+ {"Mono ADC L Mux", "ADC2 R", "ADC2 R"},
+
+ {"Mono ADC R Mux", "ADC1 L", "ADC1 L"},
+ {"Mono ADC R Mux", "ADC1 R", "ADC1 R"},
+ {"Mono ADC R Mux", "ADC2 L", "ADC2 L"},
+ {"Mono ADC R Mux", "ADC2 R", "ADC2 R"},
+
+ {"Mono DD L Mux", "STO2 DAC", "Stereo2 DAC MIXL"},
+ {"Mono DD L Mux", "MONO DAC", "Mono DAC MIXL"},
+
+ {"Mono DD R Mux", "STO2 DAC", "Stereo2 DAC MIXR"},
+ {"Mono DD R Mux", "MONO DAC", "Mono DAC MIXR"},
+
+ {"Mono ADC L2 Mux", "DMIC", "Mono DMIC L Mux"},
+ {"Mono ADC L2 Mux", "DAC MIXL", "DAC MIXL"},
+ {"Mono ADC L1 Mux", "DD Mux", "Mono DD L Mux"},
+ {"Mono ADC L1 Mux", "ADC", "Mono ADC L Mux"},
+
+ {"Mono ADC R1 Mux", "DD Mux", "Mono DD R Mux"},
+ {"Mono ADC R1 Mux", "ADC", "Mono ADC R Mux"},
+ {"Mono ADC R2 Mux", "DMIC", "Mono DMIC R Mux"},
+ {"Mono ADC R2 Mux", "DAC MIXR", "DAC MIXR"},
+
+ {"Stereo2 ADC L Mux", "ADC1 L", "ADC1 L"},
+ {"Stereo2 ADC L Mux", "ADC2 L", "ADC2 L"},
+ {"Stereo2 ADC L Mux", "ADC1 R", "ADC1 R"},
+ {"Stereo2 ADC R Mux", "ADC1 L", "ADC1 L"},
+ {"Stereo2 ADC R Mux", "ADC2 L", "ADC2 L"},
+ {"Stereo2 ADC R Mux", "ADC1 R", "ADC1 R"},
+
+ {"Stereo2 DD L Mux", "STO2 DAC", "Stereo2 DAC MIXL"},
+ {"Stereo2 DD L Mux", "MONO DAC", "Mono DAC MIXL"},
+
+ {"Stereo2 DD R Mux", "STO2 DAC", "Stereo2 DAC MIXR"},
+ {"Stereo2 DD R Mux", "MONO DAC", "Mono DAC MIXR"},
+
+ {"Stereo2 ADC L1 Mux", "ADC", "Stereo2 ADC L Mux"},
+ {"Stereo2 ADC L1 Mux", "DD Mux", "Stereo2 DD L Mux"},
+ {"Stereo2 ADC L2 Mux", "DMIC", "Stereo2 DMIC L Mux"},
+ {"Stereo2 ADC L2 Mux", "DAC MIX", "DAC MIXL"},
+
+ {"Stereo2 ADC R1 Mux", "ADC", "Stereo2 ADC R Mux"},
+ {"Stereo2 ADC R1 Mux", "DD Mux", "Stereo2 DD R Mux"},
+ {"Stereo2 ADC R2 Mux", "DMIC", "Stereo2 DMIC R Mux"},
+ {"Stereo2 ADC R2 Mux", "DAC MIX", "DAC MIXR"},
+
+ {"Stereo1 ADC MIXL", "ADC1 Switch", "Stereo1 ADC L1 Mux"},
+ {"Stereo1 ADC MIXL", "ADC2 Switch", "Stereo1 ADC L2 Mux"},
+ {"Stereo1 ADC MIXL", NULL, "ADC Stereo1 Filter"},
+
+ {"Stereo1 ADC MIXR", "ADC1 Switch", "Stereo1 ADC R1 Mux"},
+ {"Stereo1 ADC MIXR", "ADC2 Switch", "Stereo1 ADC R2 Mux"},
+ {"Stereo1 ADC MIXR", NULL, "ADC Stereo1 Filter"},
+
+ {"Mono ADC MIXL", "ADC1 Switch", "Mono ADC L1 Mux"},
+ {"Mono ADC MIXL", "ADC2 Switch", "Mono ADC L2 Mux"},
+ {"Mono ADC MIXL", NULL, "ADC Mono Left Filter"},
+
+ {"Mono ADC MIXR", "ADC1 Switch", "Mono ADC R1 Mux"},
+ {"Mono ADC MIXR", "ADC2 Switch", "Mono ADC R2 Mux"},
+ {"Mono ADC MIXR", NULL, "ADC Mono Right Filter"},
+
+ {"Stereo2 ADC MIXL", "ADC1 Switch", "Stereo2 ADC L1 Mux"},
+ {"Stereo2 ADC MIXL", "ADC2 Switch", "Stereo2 ADC L2 Mux"},
+ {"Stereo2 ADC MIXL", NULL, "ADC Stereo2 Filter"},
+
+ {"Stereo2 ADC MIXR", "ADC1 Switch", "Stereo2 ADC R1 Mux"},
+ {"Stereo2 ADC MIXR", "ADC2 Switch", "Stereo2 ADC R2 Mux"},
+ {"Stereo2 ADC MIXR", NULL, "ADC Stereo2 Filter"},
+
+ {"Stereo1 ADC MIX", NULL, "Stereo1 ADC MIXL"},
+ {"Stereo1 ADC MIX", NULL, "Stereo1 ADC MIXR"},
+ {"Stereo2 ADC MIX", NULL, "Stereo2 ADC MIXL"},
+ {"Stereo2 ADC MIX", NULL, "Stereo2 ADC MIXR"},
+ {"Mono ADC MIX", NULL, "Mono ADC MIXL"},
+ {"Mono ADC MIX", NULL, "Mono ADC MIXR"},
+
+ {"IF1_1_ADC1 Mux", "STO1 ADC", "Stereo1 ADC MIX"},
+ {"IF1_1_ADC1 Mux", "IF2_1 DAC", "IF2_1 DAC"},
+ {"IF1_1_ADC2 Mux", "STO2 ADC", "Stereo2 ADC MIX"},
+ {"IF1_1_ADC2 Mux", "IF2_2 DAC", "IF2_2 DAC"},
+ {"IF1_1_ADC3 Mux", "MONO ADC", "Mono ADC MIX"},
+ {"IF1_1_ADC3 Mux", "IF3 DAC", "IF3 DAC"},
+ {"IF1_1_ADC4", NULL, "DAC1 MIX"},
+
+ {"IF1_2_ADC1 Mux", "STO1 ADC", "Stereo1 ADC MIX"},
+ {"IF1_2_ADC1 Mux", "IF1 DAC", "IF1 DAC1"},
+ {"IF1_2_ADC2 Mux", "STO2 ADC", "Stereo2 ADC MIX"},
+ {"IF1_2_ADC2 Mux", "IF2_1 DAC", "IF2_1 DAC"},
+ {"IF1_2_ADC3 Mux", "MONO ADC", "Mono ADC MIX"},
+ {"IF1_2_ADC3 Mux", "IF2_2 DAC", "IF2_2 DAC"},
+ {"IF1_2_ADC4 Mux", "DAC1", "DAC1 MIX"},
+ {"IF1_2_ADC4 Mux", "IF3 DAC", "IF3 DAC"},
+
+ {"TDM1 slot 01 Data Mux", "1234", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 01 Data Mux", "1243", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 01 Data Mux", "1324", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 01 Data Mux", "1342", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 01 Data Mux", "1432", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 01 Data Mux", "1423", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 01 Data Mux", "2134", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 01 Data Mux", "2143", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 01 Data Mux", "2314", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 01 Data Mux", "2341", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 01 Data Mux", "2431", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 01 Data Mux", "2413", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 01 Data Mux", "3124", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 01 Data Mux", "3142", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 01 Data Mux", "3214", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 01 Data Mux", "3241", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 01 Data Mux", "3412", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 01 Data Mux", "3421", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 01 Data Mux", "4123", "IF1_1_ADC4"},
+ {"TDM1 slot 01 Data Mux", "4132", "IF1_1_ADC4"},
+ {"TDM1 slot 01 Data Mux", "4213", "IF1_1_ADC4"},
+ {"TDM1 slot 01 Data Mux", "4231", "IF1_1_ADC4"},
+ {"TDM1 slot 01 Data Mux", "4312", "IF1_1_ADC4"},
+ {"TDM1 slot 01 Data Mux", "4321", "IF1_1_ADC4"},
+ {"TDM1 slot 01 Data Mux", NULL, "I2S1_1"},
+
+ {"TDM1 slot 23 Data Mux", "1234", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 23 Data Mux", "1243", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 23 Data Mux", "1324", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 23 Data Mux", "1342", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 23 Data Mux", "1432", "IF1_1_ADC4"},
+ {"TDM1 slot 23 Data Mux", "1423", "IF1_1_ADC4"},
+ {"TDM1 slot 23 Data Mux", "2134", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 23 Data Mux", "2143", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 23 Data Mux", "2314", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 23 Data Mux", "2341", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 23 Data Mux", "2431", "IF1_1_ADC4"},
+ {"TDM1 slot 23 Data Mux", "2413", "IF1_1_ADC4"},
+ {"TDM1 slot 23 Data Mux", "3124", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 23 Data Mux", "3142", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 23 Data Mux", "3214", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 23 Data Mux", "3241", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 23 Data Mux", "3412", "IF1_1_ADC4"},
+ {"TDM1 slot 23 Data Mux", "3421", "IF1_1_ADC4"},
+ {"TDM1 slot 23 Data Mux", "4123", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 23 Data Mux", "4132", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 23 Data Mux", "4213", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 23 Data Mux", "4231", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 23 Data Mux", "4312", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 23 Data Mux", "4321", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 23 Data Mux", NULL, "I2S1_1"},
+
+ {"TDM1 slot 45 Data Mux", "1234", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 45 Data Mux", "1243", "IF1_1_ADC4"},
+ {"TDM1 slot 45 Data Mux", "1324", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 45 Data Mux", "1342", "IF1_1_ADC4"},
+ {"TDM1 slot 45 Data Mux", "1432", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 45 Data Mux", "1423", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 45 Data Mux", "2134", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 45 Data Mux", "2143", "IF1_1_ADC4"},
+ {"TDM1 slot 45 Data Mux", "2314", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 45 Data Mux", "2341", "IF1_1_ADC4"},
+ {"TDM1 slot 45 Data Mux", "2431", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 45 Data Mux", "2413", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 45 Data Mux", "3124", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 45 Data Mux", "3142", "IF1_1_ADC4"},
+ {"TDM1 slot 45 Data Mux", "3214", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 45 Data Mux", "3241", "IF1_1_ADC4"},
+ {"TDM1 slot 45 Data Mux", "3412", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 45 Data Mux", "3421", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 45 Data Mux", "4123", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 45 Data Mux", "4132", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 45 Data Mux", "4213", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 45 Data Mux", "4231", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 45 Data Mux", "4312", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 45 Data Mux", "4321", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 45 Data Mux", NULL, "I2S1_1"},
+
+ {"TDM1 slot 67 Data Mux", "1234", "IF1_1_ADC4"},
+ {"TDM1 slot 67 Data Mux", "1243", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 67 Data Mux", "1324", "IF1_1_ADC4"},
+ {"TDM1 slot 67 Data Mux", "1342", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 67 Data Mux", "1432", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 67 Data Mux", "1423", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 67 Data Mux", "2134", "IF1_1_ADC4"},
+ {"TDM1 slot 67 Data Mux", "2143", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 67 Data Mux", "2314", "IF1_1_ADC4"},
+ {"TDM1 slot 67 Data Mux", "2341", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 67 Data Mux", "2431", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 67 Data Mux", "2413", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 67 Data Mux", "3124", "IF1_1_ADC4"},
+ {"TDM1 slot 67 Data Mux", "3142", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 67 Data Mux", "3214", "IF1_1_ADC4"},
+ {"TDM1 slot 67 Data Mux", "3241", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 67 Data Mux", "3412", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 67 Data Mux", "3421", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 67 Data Mux", "4123", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 67 Data Mux", "4132", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 67 Data Mux", "4213", "IF1_1_ADC3 Mux"},
+ {"TDM1 slot 67 Data Mux", "4231", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 67 Data Mux", "4312", "IF1_1_ADC2 Mux"},
+ {"TDM1 slot 67 Data Mux", "4321", "IF1_1_ADC1 Mux"},
+ {"TDM1 slot 67 Data Mux", NULL, "I2S1_1"},
+
+
+ {"TDM2 slot 01 Data Mux", "1234", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 01 Data Mux", "1243", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 01 Data Mux", "1324", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 01 Data Mux", "1342", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 01 Data Mux", "1432", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 01 Data Mux", "1423", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 01 Data Mux", "2134", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 01 Data Mux", "2143", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 01 Data Mux", "2314", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 01 Data Mux", "2341", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 01 Data Mux", "2431", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 01 Data Mux", "2413", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 01 Data Mux", "3124", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 01 Data Mux", "3142", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 01 Data Mux", "3214", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 01 Data Mux", "3241", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 01 Data Mux", "3412", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 01 Data Mux", "3421", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 01 Data Mux", "4123", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 01 Data Mux", "4132", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 01 Data Mux", "4213", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 01 Data Mux", "4231", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 01 Data Mux", "4312", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 01 Data Mux", "4321", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 01 Data Mux", NULL, "I2S1_2"},
+
+ {"TDM2 slot 23 Data Mux", "1234", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 23 Data Mux", "1243", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 23 Data Mux", "1324", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 23 Data Mux", "1342", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 23 Data Mux", "1432", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 23 Data Mux", "1423", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 23 Data Mux", "2134", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 23 Data Mux", "2143", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 23 Data Mux", "2314", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 23 Data Mux", "2341", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 23 Data Mux", "2431", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 23 Data Mux", "2413", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 23 Data Mux", "3124", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 23 Data Mux", "3142", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 23 Data Mux", "3214", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 23 Data Mux", "3241", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 23 Data Mux", "3412", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 23 Data Mux", "3421", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 23 Data Mux", "4123", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 23 Data Mux", "4132", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 23 Data Mux", "4213", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 23 Data Mux", "4231", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 23 Data Mux", "4312", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 23 Data Mux", "4321", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 23 Data Mux", NULL, "I2S1_2"},
+
+ {"TDM2 slot 45 Data Mux", "1234", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 45 Data Mux", "1243", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 45 Data Mux", "1324", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 45 Data Mux", "1342", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 45 Data Mux", "1432", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 45 Data Mux", "1423", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 45 Data Mux", "2134", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 45 Data Mux", "2143", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 45 Data Mux", "2314", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 45 Data Mux", "2341", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 45 Data Mux", "2431", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 45 Data Mux", "2413", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 45 Data Mux", "3124", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 45 Data Mux", "3142", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 45 Data Mux", "3214", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 45 Data Mux", "3241", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 45 Data Mux", "3412", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 45 Data Mux", "3421", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 45 Data Mux", "4123", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 45 Data Mux", "4132", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 45 Data Mux", "4213", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 45 Data Mux", "4231", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 45 Data Mux", "4312", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 45 Data Mux", "4321", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 45 Data Mux", NULL, "I2S1_2"},
+
+ {"TDM2 slot 67 Data Mux", "1234", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 67 Data Mux", "1243", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 67 Data Mux", "1324", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 67 Data Mux", "1342", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 67 Data Mux", "1432", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 67 Data Mux", "1423", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 67 Data Mux", "2134", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 67 Data Mux", "2143", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 67 Data Mux", "2314", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 67 Data Mux", "2341", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 67 Data Mux", "2431", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 67 Data Mux", "2413", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 67 Data Mux", "3124", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 67 Data Mux", "3142", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 67 Data Mux", "3214", "IF1_2_ADC4 Mux"},
+ {"TDM2 slot 67 Data Mux", "3241", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 67 Data Mux", "3412", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 67 Data Mux", "3421", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 67 Data Mux", "4123", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 67 Data Mux", "4132", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 67 Data Mux", "4213", "IF1_2_ADC3 Mux"},
+ {"TDM2 slot 67 Data Mux", "4231", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 67 Data Mux", "4312", "IF1_2_ADC2 Mux"},
+ {"TDM2 slot 67 Data Mux", "4321", "IF1_2_ADC1 Mux"},
+ {"TDM2 slot 67 Data Mux", NULL, "I2S1_2"},
+
+ {"IF1_1 0 ADC Swap Mux", "L/R", "TDM1 slot 01 Data Mux"},
+ {"IF1_1 0 ADC Swap Mux", "L/L", "TDM1 slot 01 Data Mux"},
+ {"IF1_1 1 ADC Swap Mux", "R/L", "TDM1 slot 01 Data Mux"},
+ {"IF1_1 1 ADC Swap Mux", "R/R", "TDM1 slot 01 Data Mux"},
+ {"IF1_1 2 ADC Swap Mux", "L/R", "TDM1 slot 23 Data Mux"},
+ {"IF1_1 2 ADC Swap Mux", "R/L", "TDM1 slot 23 Data Mux"},
+ {"IF1_1 3 ADC Swap Mux", "L/L", "TDM1 slot 23 Data Mux"},
+ {"IF1_1 3 ADC Swap Mux", "R/R", "TDM1 slot 23 Data Mux"},
+ {"IF1_1 4 ADC Swap Mux", "L/R", "TDM1 slot 45 Data Mux"},
+ {"IF1_1 4 ADC Swap Mux", "R/L", "TDM1 slot 45 Data Mux"},
+ {"IF1_1 5 ADC Swap Mux", "L/L", "TDM1 slot 45 Data Mux"},
+ {"IF1_1 5 ADC Swap Mux", "R/R", "TDM1 slot 45 Data Mux"},
+ {"IF1_1 6 ADC Swap Mux", "L/R", "TDM1 slot 67 Data Mux"},
+ {"IF1_1 6 ADC Swap Mux", "R/L", "TDM1 slot 67 Data Mux"},
+ {"IF1_1 7 ADC Swap Mux", "L/L", "TDM1 slot 67 Data Mux"},
+ {"IF1_1 7 ADC Swap Mux", "R/R", "TDM1 slot 67 Data Mux"},
+ {"IF1_2 0 ADC Swap Mux", "L/R", "TDM2 slot 01 Data Mux"},
+ {"IF1_2 0 ADC Swap Mux", "R/L", "TDM2 slot 01 Data Mux"},
+ {"IF1_2 1 ADC Swap Mux", "L/L", "TDM2 slot 01 Data Mux"},
+ {"IF1_2 1 ADC Swap Mux", "R/R", "TDM2 slot 01 Data Mux"},
+ {"IF1_2 2 ADC Swap Mux", "L/R", "TDM2 slot 23 Data Mux"},
+ {"IF1_2 2 ADC Swap Mux", "R/L", "TDM2 slot 23 Data Mux"},
+ {"IF1_2 3 ADC Swap Mux", "L/L", "TDM2 slot 23 Data Mux"},
+ {"IF1_2 3 ADC Swap Mux", "R/R", "TDM2 slot 23 Data Mux"},
+ {"IF1_2 4 ADC Swap Mux", "L/R", "TDM2 slot 45 Data Mux"},
+ {"IF1_2 4 ADC Swap Mux", "R/L", "TDM2 slot 45 Data Mux"},
+ {"IF1_2 5 ADC Swap Mux", "L/L", "TDM2 slot 45 Data Mux"},
+ {"IF1_2 5 ADC Swap Mux", "R/R", "TDM2 slot 45 Data Mux"},
+ {"IF1_2 6 ADC Swap Mux", "L/R", "TDM2 slot 67 Data Mux"},
+ {"IF1_2 6 ADC Swap Mux", "R/L", "TDM2 slot 67 Data Mux"},
+ {"IF1_2 7 ADC Swap Mux", "L/L", "TDM2 slot 67 Data Mux"},
+ {"IF1_2 7 ADC Swap Mux", "R/R", "TDM2 slot 67 Data Mux"},
+
+ {"IF2_1 ADC Mux", "STO1 ADC", "Stereo1 ADC MIX"},
+ {"IF2_1 ADC Mux", "STO2 ADC", "Stereo2 ADC MIX"},
+ {"IF2_1 ADC Mux", "MONO ADC", "Mono ADC MIX"},
+ {"IF2_1 ADC Mux", "IF1 DAC1", "IF1 DAC1"},
+ {"IF2_1 ADC Mux", "IF1 DAC2", "IF1 DAC2"},
+ {"IF2_1 ADC Mux", "IF2_2 DAC", "IF2_2 DAC"},
+ {"IF2_1 ADC Mux", "IF3 DAC", "IF3 DAC"},
+ {"IF2_1 ADC Mux", "DAC1 MIX", "DAC1 MIX"},
+ {"IF2_1 ADC", NULL, "IF2_1 ADC Mux"},
+ {"IF2_1 ADC", NULL, "I2S2_1"},
+
+ {"IF2_2 ADC Mux", "STO1 ADC", "Stereo1 ADC MIX"},
+ {"IF2_2 ADC Mux", "STO2 ADC", "Stereo2 ADC MIX"},
+ {"IF2_2 ADC Mux", "MONO ADC", "Mono ADC MIX"},
+ {"IF2_2 ADC Mux", "IF1 DAC1", "IF1 DAC1"},
+ {"IF2_2 ADC Mux", "IF1 DAC2", "IF1 DAC2"},
+ {"IF2_2 ADC Mux", "IF2_1 DAC", "IF2_1 DAC"},
+ {"IF2_2 ADC Mux", "IF3 DAC", "IF3 DAC"},
+ {"IF2_2 ADC Mux", "DAC1 MIX", "DAC1 MIX"},
+ {"IF2_2 ADC", NULL, "IF2_2 ADC Mux"},
+ {"IF2_2 ADC", NULL, "I2S2_2"},
+
+ {"IF3 ADC Mux", "STO1 ADC", "Stereo1 ADC MIX"},
+ {"IF3 ADC Mux", "STO2 ADC", "Stereo2 ADC MIX"},
+ {"IF3 ADC Mux", "MONO ADC", "Mono ADC MIX"},
+ {"IF3 ADC Mux", "IF1 DAC1", "IF1 DAC1"},
+ {"IF3 ADC Mux", "IF1 DAC2", "IF1 DAC2"},
+ {"IF3 ADC Mux", "IF2_1 DAC", "IF2_1 DAC"},
+ {"IF3 ADC Mux", "IF2_2 DAC", "IF2_2 DAC"},
+ {"IF3 ADC Mux", "DAC1 MIX", "DAC1 MIX"},
+ {"IF3 ADC", NULL, "IF3 ADC Mux"},
+ {"IF3 ADC", NULL, "I2S3"},
+
+ {"AIF1_1TX slot 0", NULL, "IF1_1 0 ADC Swap Mux"},
+ {"AIF1_1TX slot 1", NULL, "IF1_1 1 ADC Swap Mux"},
+ {"AIF1_1TX slot 2", NULL, "IF1_1 2 ADC Swap Mux"},
+ {"AIF1_1TX slot 3", NULL, "IF1_1 3 ADC Swap Mux"},
+ {"AIF1_1TX slot 4", NULL, "IF1_1 4 ADC Swap Mux"},
+ {"AIF1_1TX slot 5", NULL, "IF1_1 5 ADC Swap Mux"},
+ {"AIF1_1TX slot 6", NULL, "IF1_1 6 ADC Swap Mux"},
+ {"AIF1_1TX slot 7", NULL, "IF1_1 7 ADC Swap Mux"},
+ {"AIF1_2TX slot 0", NULL, "IF1_2 0 ADC Swap Mux"},
+ {"AIF1_2TX slot 1", NULL, "IF1_2 1 ADC Swap Mux"},
+ {"AIF1_2TX slot 2", NULL, "IF1_2 2 ADC Swap Mux"},
+ {"AIF1_2TX slot 3", NULL, "IF1_2 3 ADC Swap Mux"},
+ {"AIF1_2TX slot 4", NULL, "IF1_2 4 ADC Swap Mux"},
+ {"AIF1_2TX slot 5", NULL, "IF1_2 5 ADC Swap Mux"},
+ {"AIF1_2TX slot 6", NULL, "IF1_2 6 ADC Swap Mux"},
+ {"AIF1_2TX slot 7", NULL, "IF1_2 7 ADC Swap Mux"},
+ {"IF2_1 ADC Swap Mux", "L/R", "IF2_1 ADC"},
+ {"IF2_1 ADC Swap Mux", "R/L", "IF2_1 ADC"},
+ {"IF2_1 ADC Swap Mux", "L/L", "IF2_1 ADC"},
+ {"IF2_1 ADC Swap Mux", "R/R", "IF2_1 ADC"},
+ {"AIF2_1TX", NULL, "IF2_1 ADC Swap Mux"},
+ {"IF2_2 ADC Swap Mux", "L/R", "IF2_2 ADC"},
+ {"IF2_2 ADC Swap Mux", "R/L", "IF2_2 ADC"},
+ {"IF2_2 ADC Swap Mux", "L/L", "IF2_2 ADC"},
+ {"IF2_2 ADC Swap Mux", "R/R", "IF2_2 ADC"},
+ {"AIF2_2TX", NULL, "IF2_2 ADC Swap Mux"},
+ {"IF3 ADC Swap Mux", "L/R", "IF3 ADC"},
+ {"IF3 ADC Swap Mux", "R/L", "IF3 ADC"},
+ {"IF3 ADC Swap Mux", "L/L", "IF3 ADC"},
+ {"IF3 ADC Swap Mux", "R/R", "IF3 ADC"},
+ {"AIF3TX", NULL, "IF3 ADC Swap Mux"},
+
+ {"IF1 DAC1", NULL, "AIF1RX"},
+ {"IF1 DAC2", NULL, "AIF1RX"},
+ {"IF1 DAC3", NULL, "AIF1RX"},
+ {"IF2_1 DAC Swap Mux", "L/R", "AIF2_1RX"},
+ {"IF2_1 DAC Swap Mux", "R/L", "AIF2_1RX"},
+ {"IF2_1 DAC Swap Mux", "L/L", "AIF2_1RX"},
+ {"IF2_1 DAC Swap Mux", "R/R", "AIF2_1RX"},
+ {"IF2_2 DAC Swap Mux", "L/R", "AIF2_2RX"},
+ {"IF2_2 DAC Swap Mux", "R/L", "AIF2_2RX"},
+ {"IF2_2 DAC Swap Mux", "L/L", "AIF2_2RX"},
+ {"IF2_2 DAC Swap Mux", "R/R", "AIF2_2RX"},
+ {"IF2_1 DAC", NULL, "IF2_1 DAC Swap Mux"},
+ {"IF2_2 DAC", NULL, "IF2_2 DAC Swap Mux"},
+ {"IF3 DAC Swap Mux", "L/R", "AIF3RX"},
+ {"IF3 DAC Swap Mux", "R/L", "AIF3RX"},
+ {"IF3 DAC Swap Mux", "L/L", "AIF3RX"},
+ {"IF3 DAC Swap Mux", "R/R", "AIF3RX"},
+ {"IF3 DAC", NULL, "IF3 DAC Swap Mux"},
+
+ {"IF1 DAC1", NULL, "I2S1_1"},
+ {"IF1 DAC2", NULL, "I2S1_1"},
+ {"IF1 DAC3", NULL, "I2S1_1"},
+ {"IF2_1 DAC", NULL, "I2S2_1"},
+ {"IF2_2 DAC", NULL, "I2S2_2"},
+ {"IF3 DAC", NULL, "I2S3"},
+
+ {"IF1 DAC1 L", NULL, "IF1 DAC1"},
+ {"IF1 DAC1 R", NULL, "IF1 DAC1"},
+ {"IF1 DAC2 L", NULL, "IF1 DAC2"},
+ {"IF1 DAC2 R", NULL, "IF1 DAC2"},
+ {"IF1 DAC3 L", NULL, "IF1 DAC3"},
+ {"IF1 DAC3 R", NULL, "IF1 DAC3"},
+ {"IF2_1 DAC L", NULL, "IF2_1 DAC"},
+ {"IF2_1 DAC R", NULL, "IF2_1 DAC"},
+ {"IF2_2 DAC L", NULL, "IF2_2 DAC"},
+ {"IF2_2 DAC R", NULL, "IF2_2 DAC"},
+ {"IF3 DAC L", NULL, "IF3 DAC"},
+ {"IF3 DAC R", NULL, "IF3 DAC"},
+
+ {"DAC L1 Mux", "IF1 DAC1", "IF1 DAC1 L"},
+ {"DAC L1 Mux", "IF2_1 DAC", "IF2_1 DAC L"},
+ {"DAC L1 Mux", "IF2_2 DAC", "IF2_2 DAC L"},
+ {"DAC L1 Mux", "IF3 DAC", "IF3 DAC L"},
+ {"DAC L1 Mux", NULL, "DAC Stereo1 Filter"},
+
+ {"DAC R1 Mux", "IF1 DAC1", "IF1 DAC1 R"},
+ {"DAC R1 Mux", "IF2_1 DAC", "IF2_1 DAC R"},
+ {"DAC R1 Mux", "IF2_2 DAC", "IF2_2 DAC R"},
+ {"DAC R1 Mux", "IF3 DAC", "IF3 DAC R"},
+ {"DAC R1 Mux", NULL, "DAC Stereo1 Filter"},
+
+ {"DAC1 MIXL", "Stereo ADC Switch", "Stereo1 ADC MIXL"},
+ {"DAC1 MIXL", "DAC1 Switch", "DAC L1 Mux"},
+ {"DAC1 MIXR", "Stereo ADC Switch", "Stereo1 ADC MIXR"},
+ {"DAC1 MIXR", "DAC1 Switch", "DAC R1 Mux"},
+
+ {"DAC1 MIX", NULL, "DAC1 MIXL"},
+ {"DAC1 MIX", NULL, "DAC1 MIXR"},
+
+ {"DAC L2 Mux", "IF1 DAC2", "IF1 DAC2 L"},
+ {"DAC L2 Mux", "IF2_1 DAC", "IF2_1 DAC L"},
+ {"DAC L2 Mux", "IF2_2 DAC", "IF2_2 DAC L"},
+ {"DAC L2 Mux", "IF3 DAC", "IF3 DAC L"},
+ {"DAC L2 Mux", "Mono ADC MIX", "Mono ADC MIXL"},
+ {"DAC L2 Mux", NULL, "DAC Mono Left Filter"},
+
+ {"DAC R2 Mux", "IF1 DAC2", "IF1 DAC2 R"},
+ {"DAC R2 Mux", "IF2_1 DAC", "IF2_1 DAC R"},
+ {"DAC R2 Mux", "IF2_2 DAC", "IF2_2 DAC R"},
+ {"DAC R2 Mux", "IF3 DAC", "IF3 DAC R"},
+ {"DAC R2 Mux", "Mono ADC MIX", "Mono ADC MIXR"},
+ {"DAC R2 Mux", NULL, "DAC Mono Right Filter"},
+
+ {"DAC L3 Mux", "IF1 DAC2", "IF1 DAC2 L"},
+ {"DAC L3 Mux", "IF2_1 DAC", "IF2_1 DAC L"},
+ {"DAC L3 Mux", "IF2_2 DAC", "IF2_2 DAC L"},
+ {"DAC L3 Mux", "IF3 DAC", "IF3 DAC L"},
+ {"DAC L3 Mux", "STO2 ADC MIX", "Stereo2 ADC MIXL"},
+ {"DAC L3 Mux", NULL, "DAC Stereo2 Filter"},
+
+ {"DAC R3 Mux", "IF1 DAC2", "IF1 DAC2 R"},
+ {"DAC R3 Mux", "IF2_1 DAC", "IF2_1 DAC R"},
+ {"DAC R3 Mux", "IF2_2 DAC", "IF2_2 DAC R"},
+ {"DAC R3 Mux", "IF3 DAC", "IF3 DAC R"},
+ {"DAC R3 Mux", "STO2 ADC MIX", "Stereo2 ADC MIXR"},
+ {"DAC R3 Mux", NULL, "DAC Stereo2 Filter"},
+
+ {"Stereo1 DAC MIXL", "DAC L1 Switch", "DAC1 MIXL"},
+ {"Stereo1 DAC MIXL", "DAC R1 Switch", "DAC1 MIXR"},
+ {"Stereo1 DAC MIXL", "DAC L2 Switch", "DAC L2 Mux"},
+ {"Stereo1 DAC MIXL", "DAC R2 Switch", "DAC R2 Mux"},
+
+ {"Stereo1 DAC MIXR", "DAC R1 Switch", "DAC1 MIXR"},
+ {"Stereo1 DAC MIXR", "DAC L1 Switch", "DAC1 MIXL"},
+ {"Stereo1 DAC MIXR", "DAC L2 Switch", "DAC L2 Mux"},
+ {"Stereo1 DAC MIXR", "DAC R2 Switch", "DAC R2 Mux"},
+
+ {"Stereo2 DAC MIXL", "DAC L1 Switch", "DAC1 MIXL"},
+ {"Stereo2 DAC MIXL", "DAC L2 Switch", "DAC L2 Mux"},
+ {"Stereo2 DAC MIXL", "DAC L3 Switch", "DAC L3 Mux"},
+
+ {"Stereo2 DAC MIXR", "DAC R1 Switch", "DAC1 MIXR"},
+ {"Stereo2 DAC MIXR", "DAC R2 Switch", "DAC R2 Mux"},
+ {"Stereo2 DAC MIXR", "DAC R3 Switch", "DAC R3 Mux"},
+
+ {"Mono DAC MIXL", "DAC L1 Switch", "DAC1 MIXL"},
+ {"Mono DAC MIXL", "DAC R1 Switch", "DAC1 MIXR"},
+ {"Mono DAC MIXL", "DAC L2 Switch", "DAC L2 Mux"},
+ {"Mono DAC MIXL", "DAC R2 Switch", "DAC R2 Mux"},
+ {"Mono DAC MIXR", "DAC L1 Switch", "DAC1 MIXL"},
+ {"Mono DAC MIXR", "DAC R1 Switch", "DAC1 MIXR"},
+ {"Mono DAC MIXR", "DAC L2 Switch", "DAC L2 Mux"},
+ {"Mono DAC MIXR", "DAC R2 Switch", "DAC R2 Mux"},
+
+ {"DAC MIXL", "Stereo1 DAC Mixer", "Stereo1 DAC MIXL"},
+ {"DAC MIXL", "Stereo2 DAC Mixer", "Stereo2 DAC MIXL"},
+ {"DAC MIXL", "Mono DAC Mixer", "Mono DAC MIXL"},
+ {"DAC MIXR", "Stereo1 DAC Mixer", "Stereo1 DAC MIXR"},
+ {"DAC MIXR", "Stereo2 DAC Mixer", "Stereo2 DAC MIXR"},
+ {"DAC MIXR", "Mono DAC Mixer", "Mono DAC MIXR"},
+
+ {"DAC L1 Source", "DAC1", "DAC1 MIXL"},
+ {"DAC L1 Source", "Stereo1 DAC Mixer", "Stereo1 DAC MIXL"},
+ {"DAC L1 Source", "DMIC1", "DMIC L1"},
+ {"DAC R1 Source", "DAC1", "DAC1 MIXR"},
+ {"DAC R1 Source", "Stereo1 DAC Mixer", "Stereo1 DAC MIXR"},
+ {"DAC R1 Source", "DMIC1", "DMIC R1"},
+
+ {"DAC L2 Source", "DAC2", "DAC L2 Mux"},
+ {"DAC L2 Source", "Mono DAC Mixer", "Mono DAC MIXL"},
+ {"DAC L2 Source", NULL, "DAC L2 Power"},
+ {"DAC R2 Source", "DAC2", "DAC R2 Mux"},
+ {"DAC R2 Source", "Mono DAC Mixer", "Mono DAC MIXR"},
+ {"DAC R2 Source", NULL, "DAC R2 Power"},
+
+ {"DAC L1", NULL, "DAC L1 Source"},
+ {"DAC R1", NULL, "DAC R1 Source"},
+ {"DAC L2", NULL, "DAC L2 Source"},
+ {"DAC R2", NULL, "DAC R2 Source"},
+
+ {"DAC L1", NULL, "DAC 1 Clock"},
+ {"DAC R1", NULL, "DAC 1 Clock"},
+ {"DAC L2", NULL, "DAC 2 Clock"},
+ {"DAC R2", NULL, "DAC 2 Clock"},
+
+ {"MONOVOL MIX", "DAC L2 Switch", "DAC L2"},
+ {"MONOVOL MIX", "RECMIX2L Switch", "RECMIX2L"},
+ {"MONOVOL MIX", "BST1 Switch", "BST1"},
+ {"MONOVOL MIX", "BST2 Switch", "BST2"},
+ {"MONOVOL MIX", "BST3 Switch", "BST3"},
+
+ {"OUT MIXL", "DAC L2 Switch", "DAC L2"},
+ {"OUT MIXL", "INL Switch", "INL VOL"},
+ {"OUT MIXL", "BST1 Switch", "BST1"},
+ {"OUT MIXL", "BST2 Switch", "BST2"},
+ {"OUT MIXL", "BST3 Switch", "BST3"},
+ {"OUT MIXR", "DAC R2 Switch", "DAC R2"},
+ {"OUT MIXR", "INR Switch", "INR VOL"},
+ {"OUT MIXR", "BST2 Switch", "BST2"},
+ {"OUT MIXR", "BST3 Switch", "BST3"},
+ {"OUT MIXR", "BST4 Switch", "BST4"},
+
+ {"MONOVOL", "Switch", "MONOVOL MIX"},
+ {"Mono MIX", "DAC L2 Switch", "DAC L2"},
+ {"Mono MIX", "MONOVOL Switch", "MONOVOL"},
+ {"Mono Amp", NULL, "Mono MIX"},
+ {"Mono Amp", NULL, "Vref2"},
+ {"Mono Amp", NULL, "CLKDET SYS"},
+ {"Mono Amp", NULL, "CLKDET MONO"},
+ {"Mono Playback", "Switch", "Mono Amp"},
+ {"MONOOUT", NULL, "Mono Playback"},
+
+ {"HP Amp", NULL, "DAC L1"},
+ {"HP Amp", NULL, "DAC R1"},
+ {"HP Amp", NULL, "Charge Pump"},
+ {"HP Amp", NULL, "CLKDET SYS"},
+ {"HP Amp", NULL, "CLKDET HP"},
+ {"HP Amp", NULL, "CBJ Power"},
+ {"HP Amp", NULL, "Vref2"},
+ {"HPO Playback", "Switch", "HP Amp"},
+ {"HPOL", NULL, "HPO Playback"},
+ {"HPOR", NULL, "HPO Playback"},
+
+ {"OUTVOL L", "Switch", "OUT MIXL"},
+ {"OUTVOL R", "Switch", "OUT MIXR"},
+ {"LOUT L MIX", "DAC L2 Switch", "DAC L2"},
+ {"LOUT L MIX", "OUTVOL L Switch", "OUTVOL L"},
+ {"LOUT R MIX", "DAC R2 Switch", "DAC R2"},
+ {"LOUT R MIX", "OUTVOL R Switch", "OUTVOL R"},
+ {"LOUT Amp", NULL, "LOUT L MIX"},
+ {"LOUT Amp", NULL, "LOUT R MIX"},
+ {"LOUT Amp", NULL, "Vref1"},
+ {"LOUT Amp", NULL, "Vref2"},
+ {"LOUT Amp", NULL, "CLKDET SYS"},
+ {"LOUT Amp", NULL, "CLKDET LOUT"},
+ {"LOUT L Playback", "Switch", "LOUT Amp"},
+ {"LOUT R Playback", "Switch", "LOUT Amp"},
+ {"LOUTL", NULL, "LOUT L Playback"},
+ {"LOUTR", NULL, "LOUT R Playback"},
+
+ {"PDM L Mux", "Mono DAC", "Mono DAC MIXL"},
+ {"PDM L Mux", "Stereo1 DAC", "Stereo1 DAC MIXL"},
+ {"PDM L Mux", "Stereo2 DAC", "Stereo2 DAC MIXL"},
+ {"PDM L Mux", NULL, "PDM Power"},
+ {"PDM R Mux", "Mono DAC", "Mono DAC MIXR"},
+ {"PDM R Mux", "Stereo1 DAC", "Stereo1 DAC MIXR"},
+ {"PDM R Mux", "Stereo2 DAC", "Stereo2 DAC MIXR"},
+ {"PDM R Mux", NULL, "PDM Power"},
+ {"PDM L Playback", "Switch", "PDM L Mux"},
+ {"PDM R Playback", "Switch", "PDM R Mux"},
+ {"PDML", NULL, "PDM L Playback"},
+ {"PDMR", NULL, "PDM R Playback"},
+};
+
+static int rt5665_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+ unsigned int val_len = 0, val_clk, mask_clk, val_bits = 0x0100;
+ int pre_div, frame_size;
+
+ rt5665->lrck[dai->id] = params_rate(params);
+ pre_div = rl6231_get_clk_info(rt5665->sysclk, rt5665->lrck[dai->id]);
+ if (pre_div < 0) {
+ dev_err(codec->dev, "Unsupported clock setting %d for DAI %d\n",
+ rt5665->lrck[dai->id], dai->id);
+ return -EINVAL;
+ }
+ frame_size = snd_soc_params_to_frame_size(params);
+ if (frame_size < 0) {
+ dev_err(codec->dev, "Unsupported frame size: %d\n", frame_size);
+ return -EINVAL;
+ }
+
+ dev_dbg(dai->dev, "lrck is %dHz and pre_div is %d for iis %d\n",
+ rt5665->lrck[dai->id], pre_div, dai->id);
+
+ switch (params_width(params)) {
+ case 16:
+ val_bits = 0x0100;
+ break;
+ case 20:
+ val_len |= RT5665_I2S_DL_20;
+ val_bits = 0x1300;
+ break;
+ case 24:
+ val_len |= RT5665_I2S_DL_24;
+ val_bits = 0x2500;
+ break;
+ case 8:
+ val_len |= RT5665_I2S_DL_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dai->id) {
+ case RT5665_AIF1_1:
+ case RT5665_AIF1_2:
+ mask_clk = RT5665_I2S_PD1_MASK;
+ val_clk = pre_div << RT5665_I2S_PD1_SFT;
+ snd_soc_update_bits(codec, RT5665_I2S1_SDP,
+ RT5665_I2S_DL_MASK, val_len);
+ break;
+ case RT5665_AIF2_1:
+ case RT5665_AIF2_2:
+ mask_clk = RT5665_I2S_PD2_MASK;
+ val_clk = pre_div << RT5665_I2S_PD2_SFT;
+ snd_soc_update_bits(codec, RT5665_I2S2_SDP,
+ RT5665_I2S_DL_MASK, val_len);
+ break;
+ case RT5665_AIF3:
+ mask_clk = RT5665_I2S_PD3_MASK;
+ val_clk = pre_div << RT5665_I2S_PD3_SFT;
+ snd_soc_update_bits(codec, RT5665_I2S3_SDP,
+ RT5665_I2S_DL_MASK, val_len);
+ break;
+ default:
+ dev_err(codec->dev, "Invalid dai->id: %d\n", dai->id);
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, RT5665_ADDA_CLK_1, mask_clk, val_clk);
+ snd_soc_update_bits(codec, RT5665_STO1_DAC_SIL_DET, 0x3700, val_bits);
+
+ switch (rt5665->lrck[dai->id]) {
+ case 192000:
+ snd_soc_update_bits(codec, RT5665_ADDA_CLK_1,
+ RT5665_DAC_OSR_MASK | RT5665_ADC_OSR_MASK,
+ RT5665_DAC_OSR_32 | RT5665_ADC_OSR_32);
+ break;
+ case 96000:
+ snd_soc_update_bits(codec, RT5665_ADDA_CLK_1,
+ RT5665_DAC_OSR_MASK | RT5665_ADC_OSR_MASK,
+ RT5665_DAC_OSR_64 | RT5665_ADC_OSR_64);
+ break;
+ default:
+ snd_soc_update_bits(codec, RT5665_ADDA_CLK_1,
+ RT5665_DAC_OSR_MASK | RT5665_ADC_OSR_MASK,
+ RT5665_DAC_OSR_128 | RT5665_ADC_OSR_128);
+ break;
+ }
+
+ return 0;
+}
+
+static int rt5665_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+ unsigned int reg_val = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ rt5665->master[dai->id] = 1;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ reg_val |= RT5665_I2S_MS_S;
+ rt5665->master[dai->id] = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ reg_val |= RT5665_I2S_BP_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ reg_val |= RT5665_I2S_DF_LEFT;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ reg_val |= RT5665_I2S_DF_PCM_A;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ reg_val |= RT5665_I2S_DF_PCM_B;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dai->id) {
+ case RT5665_AIF1_1:
+ case RT5665_AIF1_2:
+ snd_soc_update_bits(codec, RT5665_I2S1_SDP,
+ RT5665_I2S_MS_MASK | RT5665_I2S_BP_MASK |
+ RT5665_I2S_DF_MASK, reg_val);
+ break;
+ case RT5665_AIF2_1:
+ case RT5665_AIF2_2:
+ snd_soc_update_bits(codec, RT5665_I2S2_SDP,
+ RT5665_I2S_MS_MASK | RT5665_I2S_BP_MASK |
+ RT5665_I2S_DF_MASK, reg_val);
+ break;
+ case RT5665_AIF3:
+ snd_soc_update_bits(codec, RT5665_I2S3_SDP,
+ RT5665_I2S_MS_MASK | RT5665_I2S_BP_MASK |
+ RT5665_I2S_DF_MASK, reg_val);
+ break;
+ default:
+ dev_err(codec->dev, "Invalid dai->id: %d\n", dai->id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int rt5665_set_dai_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+ unsigned int reg_val = 0;
+
+ if (freq == rt5665->sysclk && clk_id == rt5665->sysclk_src)
+ return 0;
+
+ switch (clk_id) {
+ case RT5665_SCLK_S_MCLK:
+ reg_val |= RT5665_SCLK_SRC_MCLK;
+ break;
+ case RT5665_SCLK_S_PLL1:
+ reg_val |= RT5665_SCLK_SRC_PLL1;
+ break;
+ case RT5665_SCLK_S_RCCLK:
+ reg_val |= RT5665_SCLK_SRC_RCCLK;
+ break;
+ default:
+ dev_err(codec->dev, "Invalid clock id (%d)\n", clk_id);
+ return -EINVAL;
+ }
+ snd_soc_update_bits(codec, RT5665_GLB_CLK,
+ RT5665_SCLK_SRC_MASK, reg_val);
+ rt5665->sysclk = freq;
+ rt5665->sysclk_src = clk_id;
+
+ dev_dbg(dai->dev, "Sysclk is %dHz and clock id is %d\n", freq, clk_id);
+
+ return 0;
+}
+
+static int rt5665_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int Source,
+ unsigned int freq_in, unsigned int freq_out)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+ struct rl6231_pll_code pll_code;
+ int ret;
+
+ if (Source == rt5665->pll_src && freq_in == rt5665->pll_in &&
+ freq_out == rt5665->pll_out)
+ return 0;
+
+ if (!freq_in || !freq_out) {
+ dev_dbg(codec->dev, "PLL disabled\n");
+
+ rt5665->pll_in = 0;
+ rt5665->pll_out = 0;
+ snd_soc_update_bits(codec, RT5665_GLB_CLK,
+ RT5665_SCLK_SRC_MASK, RT5665_SCLK_SRC_MCLK);
+ return 0;
+ }
+
+ switch (Source) {
+ case RT5665_PLL1_S_MCLK:
+ snd_soc_update_bits(codec, RT5665_GLB_CLK,
+ RT5665_PLL1_SRC_MASK, RT5665_PLL1_SRC_MCLK);
+ break;
+ case RT5665_PLL1_S_BCLK1:
+ snd_soc_update_bits(codec, RT5665_GLB_CLK,
+ RT5665_PLL1_SRC_MASK, RT5665_PLL1_SRC_BCLK1);
+ break;
+ case RT5665_PLL1_S_BCLK2:
+ snd_soc_update_bits(codec, RT5665_GLB_CLK,
+ RT5665_PLL1_SRC_MASK, RT5665_PLL1_SRC_BCLK2);
+ break;
+ case RT5665_PLL1_S_BCLK3:
+ snd_soc_update_bits(codec, RT5665_GLB_CLK,
+ RT5665_PLL1_SRC_MASK, RT5665_PLL1_SRC_BCLK3);
+ break;
+ default:
+ dev_err(codec->dev, "Unknown PLL Source %d\n", Source);
+ return -EINVAL;
+ }
+
+ ret = rl6231_pll_calc(freq_in, freq_out, &pll_code);
+ if (ret < 0) {
+ dev_err(codec->dev, "Unsupport input clock %d\n", freq_in);
+ return ret;
+ }
+
+ dev_dbg(codec->dev, "bypass=%d m=%d n=%d k=%d\n",
+ pll_code.m_bp, (pll_code.m_bp ? 0 : pll_code.m_code),
+ pll_code.n_code, pll_code.k_code);
+
+ snd_soc_write(codec, RT5665_PLL_CTRL_1,
+ pll_code.n_code << RT5665_PLL_N_SFT | pll_code.k_code);
+ snd_soc_write(codec, RT5665_PLL_CTRL_2,
+ (pll_code.m_bp ? 0 : pll_code.m_code) << RT5665_PLL_M_SFT |
+ pll_code.m_bp << RT5665_PLL_M_BP_SFT);
+
+ rt5665->pll_in = freq_in;
+ rt5665->pll_out = freq_out;
+ rt5665->pll_src = Source;
+
+ return 0;
+}
+
+static int rt5665_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int rx_mask, int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ unsigned int val = 0;
+
+ if (rx_mask || tx_mask)
+ val |= RT5665_I2S1_MODE_TDM;
+
+ switch (slots) {
+ case 4:
+ val |= RT5665_TDM_IN_CH_4;
+ val |= RT5665_TDM_OUT_CH_4;
+ break;
+ case 6:
+ val |= RT5665_TDM_IN_CH_6;
+ val |= RT5665_TDM_OUT_CH_6;
+ break;
+ case 8:
+ val |= RT5665_TDM_IN_CH_8;
+ val |= RT5665_TDM_OUT_CH_8;
+ break;
+ case 2:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (slot_width) {
+ case 20:
+ val |= RT5665_TDM_IN_LEN_20;
+ val |= RT5665_TDM_OUT_LEN_20;
+ break;
+ case 24:
+ val |= RT5665_TDM_IN_LEN_24;
+ val |= RT5665_TDM_OUT_LEN_24;
+ break;
+ case 32:
+ val |= RT5665_TDM_IN_LEN_32;
+ val |= RT5665_TDM_OUT_LEN_32;
+ break;
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, RT5665_TDM_CTRL_1,
+ RT5665_I2S1_MODE_MASK | RT5665_TDM_IN_CH_MASK |
+ RT5665_TDM_OUT_CH_MASK | RT5665_TDM_IN_LEN_MASK |
+ RT5665_TDM_OUT_LEN_MASK, val);
+
+ return 0;
+}
+
+static int rt5665_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s ratio=%d\n", __func__, ratio);
+
+ rt5665->bclk[dai->id] = ratio;
+
+ if (ratio == 64) {
+ switch (dai->id) {
+ case RT5665_AIF2_1:
+ case RT5665_AIF2_2:
+ snd_soc_update_bits(codec, RT5665_ADDA_CLK_1,
+ RT5665_I2S_BCLK_MS2_MASK,
+ RT5665_I2S_BCLK_MS2_64);
+ break;
+ case RT5665_AIF3:
+ snd_soc_update_bits(codec, RT5665_ADDA_CLK_1,
+ RT5665_I2S_BCLK_MS3_MASK,
+ RT5665_I2S_BCLK_MS3_64);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int rt5665_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+
+ switch (level) {
+ case SND_SOC_BIAS_PREPARE:
+ regmap_update_bits(rt5665->regmap, RT5665_DIG_MISC,
+ RT5665_DIG_GATE_CTRL, RT5665_DIG_GATE_CTRL);
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ regmap_update_bits(rt5665->regmap, RT5665_PWR_DIG_1,
+ RT5665_PWR_LDO, RT5665_PWR_LDO);
+ regmap_update_bits(rt5665->regmap, RT5665_PWR_ANLG_1,
+ RT5665_PWR_MB, RT5665_PWR_MB);
+ regmap_update_bits(rt5665->regmap, RT5665_DIG_MISC,
+ RT5665_DIG_GATE_CTRL, 0);
+ break;
+ case SND_SOC_BIAS_OFF:
+ regmap_update_bits(rt5665->regmap, RT5665_PWR_DIG_1,
+ RT5665_PWR_LDO, 0);
+ regmap_update_bits(rt5665->regmap, RT5665_PWR_ANLG_1,
+ RT5665_PWR_MB, 0);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int rt5665_probe(struct snd_soc_codec *codec)
+{
+ struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+
+ rt5665->codec = codec;
+
+ schedule_delayed_work(&rt5665->calibrate_work, msecs_to_jiffies(100));
+
+ return 0;
+}
+
+static int rt5665_remove(struct snd_soc_codec *codec)
+{
+ struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+
+ regmap_write(rt5665->regmap, RT5665_RESET, 0);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int rt5665_suspend(struct snd_soc_codec *codec)
+{
+ struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+
+ regcache_cache_only(rt5665->regmap, true);
+ regcache_mark_dirty(rt5665->regmap);
+ return 0;
+}
+
+static int rt5665_resume(struct snd_soc_codec *codec)
+{
+ struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+
+ regcache_cache_only(rt5665->regmap, false);
+ regcache_sync(rt5665->regmap);
+
+ return 0;
+}
+#else
+#define rt5665_suspend NULL
+#define rt5665_resume NULL
+#endif
+
+#define RT5665_STEREO_RATES SNDRV_PCM_RATE_8000_192000
+#define RT5665_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
+
+static const struct snd_soc_dai_ops rt5665_aif_dai_ops = {
+ .hw_params = rt5665_hw_params,
+ .set_fmt = rt5665_set_dai_fmt,
+ .set_sysclk = rt5665_set_dai_sysclk,
+ .set_tdm_slot = rt5665_set_tdm_slot,
+ .set_pll = rt5665_set_dai_pll,
+ .set_bclk_ratio = rt5665_set_bclk_ratio,
+};
+
+static struct snd_soc_dai_driver rt5665_dai[] = {
+ {
+ .name = "rt5665-aif1_1",
+ .id = RT5665_AIF1_1,
+ .playback = {
+ .stream_name = "AIF1 Playback",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = RT5665_STEREO_RATES,
+ .formats = RT5665_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF1_1 Capture",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = RT5665_STEREO_RATES,
+ .formats = RT5665_FORMATS,
+ },
+ .ops = &rt5665_aif_dai_ops,
+ },
+ {
+ .name = "rt5665-aif1_2",
+ .id = RT5665_AIF1_2,
+ .capture = {
+ .stream_name = "AIF1_2 Capture",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = RT5665_STEREO_RATES,
+ .formats = RT5665_FORMATS,
+ },
+ .ops = &rt5665_aif_dai_ops,
+ },
+ {
+ .name = "rt5665-aif2_1",
+ .id = RT5665_AIF2_1,
+ .playback = {
+ .stream_name = "AIF2_1 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5665_STEREO_RATES,
+ .formats = RT5665_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF2_1 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5665_STEREO_RATES,
+ .formats = RT5665_FORMATS,
+ },
+ .ops = &rt5665_aif_dai_ops,
+ },
+ {
+ .name = "rt5665-aif2_2",
+ .id = RT5665_AIF2_2,
+ .playback = {
+ .stream_name = "AIF2_2 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5665_STEREO_RATES,
+ .formats = RT5665_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF2_2 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5665_STEREO_RATES,
+ .formats = RT5665_FORMATS,
+ },
+ .ops = &rt5665_aif_dai_ops,
+ },
+ {
+ .name = "rt5665-aif3",
+ .id = RT5665_AIF3,
+ .playback = {
+ .stream_name = "AIF3 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5665_STEREO_RATES,
+ .formats = RT5665_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF3 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5665_STEREO_RATES,
+ .formats = RT5665_FORMATS,
+ },
+ .ops = &rt5665_aif_dai_ops,
+ },
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_rt5665 = {
+ .probe = rt5665_probe,
+ .remove = rt5665_remove,
+ .suspend = rt5665_suspend,
+ .resume = rt5665_resume,
+ .set_bias_level = rt5665_set_bias_level,
+ .idle_bias_off = true,
+ .component_driver = {
+ .controls = rt5665_snd_controls,
+ .num_controls = ARRAY_SIZE(rt5665_snd_controls),
+ .dapm_widgets = rt5665_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rt5665_dapm_widgets),
+ .dapm_routes = rt5665_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rt5665_dapm_routes),
+ }
+};
+
+
+static const struct regmap_config rt5665_regmap = {
+ .reg_bits = 16,
+ .val_bits = 16,
+ .max_register = 0x0400,
+ .volatile_reg = rt5665_volatile_register,
+ .readable_reg = rt5665_readable_register,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = rt5665_reg,
+ .num_reg_defaults = ARRAY_SIZE(rt5665_reg),
+ .use_single_rw = true,
+};
+
+static const struct i2c_device_id rt5665_i2c_id[] = {
+ {"rt5665", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, rt5665_i2c_id);
+
+static int rt5665_parse_dt(struct rt5665_priv *rt5665, struct device *dev)
+{
+ rt5665->pdata.in1_diff = of_property_read_bool(dev->of_node,
+ "realtek,in1-differential");
+ rt5665->pdata.in2_diff = of_property_read_bool(dev->of_node,
+ "realtek,in2-differential");
+ rt5665->pdata.in3_diff = of_property_read_bool(dev->of_node,
+ "realtek,in3-differential");
+ rt5665->pdata.in4_diff = of_property_read_bool(dev->of_node,
+ "realtek,in4-differential");
+
+ of_property_read_u32(dev->of_node, "realtek,dmic1-data-pin",
+ &rt5665->pdata.dmic1_data_pin);
+ of_property_read_u32(dev->of_node, "realtek,dmic2-data-pin",
+ &rt5665->pdata.dmic2_data_pin);
+ of_property_read_u32(dev->of_node, "realtek,jd-src",
+ &rt5665->pdata.jd_src);
+
+ rt5665->pdata.ldo1_en = of_get_named_gpio(dev->of_node,
+ "realtek,ldo1-en-gpios", 0);
+
+ return 0;
+}
+
+static void rt5665_calibrate(struct rt5665_priv *rt5665)
+{
+ int value, count;
+
+ mutex_lock(&rt5665->calibrate_mutex);
+
+ regcache_cache_bypass(rt5665->regmap, true);
+
+ regmap_write(rt5665->regmap, RT5665_RESET, 0);
+ regmap_write(rt5665->regmap, RT5665_BIAS_CUR_CTRL_8, 0xa602);
+ regmap_write(rt5665->regmap, RT5665_HP_CHARGE_PUMP_1, 0x0c26);
+ regmap_write(rt5665->regmap, RT5665_MONOMIX_IN_GAIN, 0x021f);
+ regmap_write(rt5665->regmap, RT5665_MONO_OUT, 0x480a);
+ regmap_write(rt5665->regmap, RT5665_PWR_MIXER, 0x083f);
+ regmap_write(rt5665->regmap, RT5665_PWR_DIG_1, 0x0180);
+ regmap_write(rt5665->regmap, RT5665_EJD_CTRL_1, 0x4040);
+ regmap_write(rt5665->regmap, RT5665_HP_LOGIC_CTRL_2, 0x0000);
+ regmap_write(rt5665->regmap, RT5665_DIG_MISC, 0x0001);
+ regmap_write(rt5665->regmap, RT5665_MICBIAS_2, 0x0380);
+ regmap_write(rt5665->regmap, RT5665_GLB_CLK, 0x8000);
+ regmap_write(rt5665->regmap, RT5665_ADDA_CLK_1, 0x1000);
+ regmap_write(rt5665->regmap, RT5665_CHOP_DAC, 0x3030);
+ regmap_write(rt5665->regmap, RT5665_CALIB_ADC_CTRL, 0x3c05);
+ regmap_write(rt5665->regmap, RT5665_PWR_ANLG_1, 0xaa3e);
+ usleep_range(15000, 20000);
+ regmap_write(rt5665->regmap, RT5665_PWR_ANLG_1, 0xfe7e);
+ regmap_write(rt5665->regmap, RT5665_HP_CALIB_CTRL_2, 0x0321);
+
+ regmap_write(rt5665->regmap, RT5665_HP_CALIB_CTRL_1, 0xfc00);
+ count = 0;
+ while (true) {
+ regmap_read(rt5665->regmap, RT5665_HP_CALIB_STA_1, &value);
+ if (value & 0x8000)
+ usleep_range(10000, 10005);
+ else
+ break;
+
+ if (count > 60) {
+ pr_err("HP Calibration Failure\n");
+ regmap_write(rt5665->regmap, RT5665_RESET, 0);
+ regcache_cache_bypass(rt5665->regmap, false);
+ goto out_unlock;
+ }
+
+ count++;
+ }
+
+ regmap_write(rt5665->regmap, RT5665_MONO_AMP_CALIB_CTRL_1, 0x9e24);
+ count = 0;
+ while (true) {
+ regmap_read(rt5665->regmap, RT5665_MONO_AMP_CALIB_STA1, &value);
+ if (value & 0x8000)
+ usleep_range(10000, 10005);
+ else
+ break;
+
+ if (count > 60) {
+ pr_err("MONO Calibration Failure\n");
+ regmap_write(rt5665->regmap, RT5665_RESET, 0);
+ regcache_cache_bypass(rt5665->regmap, false);
+ goto out_unlock;
+ }
+
+ count++;
+ }
+
+ regmap_write(rt5665->regmap, RT5665_RESET, 0);
+ regcache_cache_bypass(rt5665->regmap, false);
+
+ regcache_mark_dirty(rt5665->regmap);
+ regcache_sync(rt5665->regmap);
+
+ regmap_write(rt5665->regmap, RT5665_BIAS_CUR_CTRL_8, 0xa602);
+ regmap_write(rt5665->regmap, RT5665_ASRC_8, 0x0120);
+
+out_unlock:
+ mutex_unlock(&rt5665->calibrate_mutex);
+}
+
+static void rt5665_calibrate_handler(struct work_struct *work)
+{
+ struct rt5665_priv *rt5665 = container_of(work, struct rt5665_priv,
+ calibrate_work.work);
+
+ while (!rt5665->codec->component.card->instantiated) {
+ pr_debug("%s\n", __func__);
+ usleep_range(10000, 15000);
+ }
+
+ rt5665_calibrate(rt5665);
+}
+
+static int rt5665_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct rt5665_platform_data *pdata = dev_get_platdata(&i2c->dev);
+ struct rt5665_priv *rt5665;
+ int i, ret;
+ unsigned int val;
+
+ rt5665 = devm_kzalloc(&i2c->dev, sizeof(struct rt5665_priv),
+ GFP_KERNEL);
+
+ if (rt5665 == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, rt5665);
+
+ if (pdata)
+ rt5665->pdata = *pdata;
+ else
+ rt5665_parse_dt(rt5665, &i2c->dev);
+
+ for (i = 0; i < ARRAY_SIZE(rt5665->supplies); i++)
+ rt5665->supplies[i].supply = rt5665_supply_names[i];
+
+ ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(rt5665->supplies),
+ rt5665->supplies);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(rt5665->supplies),
+ rt5665->supplies);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ if (gpio_is_valid(rt5665->pdata.ldo1_en)) {
+ if (devm_gpio_request_one(&i2c->dev, rt5665->pdata.ldo1_en,
+ GPIOF_OUT_INIT_HIGH, "rt5665"))
+ dev_err(&i2c->dev, "Fail gpio_request gpio_ldo\n");
+ }
+
+ /* Sleep for 300 ms miniumum */
+ usleep_range(300000, 350000);
+
+ rt5665->regmap = devm_regmap_init_i2c(i2c, &rt5665_regmap);
+ if (IS_ERR(rt5665->regmap)) {
+ ret = PTR_ERR(rt5665->regmap);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ regmap_read(rt5665->regmap, RT5665_DEVICE_ID, &val);
+ if (val != DEVICE_ID) {
+ dev_err(&i2c->dev,
+ "Device with ID register %x is not rt5665\n", val);
+ return -ENODEV;
+ }
+
+ regmap_read(rt5665->regmap, RT5665_RESET, &val);
+ switch (val) {
+ case 0x0:
+ rt5665->id = CODEC_5666;
+ break;
+ case 0x6:
+ rt5665->id = CODEC_5668;
+ break;
+ case 0x3:
+ default:
+ rt5665->id = CODEC_5665;
+ break;
+ }
+
+ regmap_write(rt5665->regmap, RT5665_RESET, 0);
+
+ /* line in diff mode*/
+ if (rt5665->pdata.in1_diff)
+ regmap_update_bits(rt5665->regmap, RT5665_IN1_IN2,
+ RT5665_IN1_DF_MASK, RT5665_IN1_DF_MASK);
+ if (rt5665->pdata.in2_diff)
+ regmap_update_bits(rt5665->regmap, RT5665_IN1_IN2,
+ RT5665_IN2_DF_MASK, RT5665_IN2_DF_MASK);
+ if (rt5665->pdata.in3_diff)
+ regmap_update_bits(rt5665->regmap, RT5665_IN3_IN4,
+ RT5665_IN3_DF_MASK, RT5665_IN3_DF_MASK);
+ if (rt5665->pdata.in4_diff)
+ regmap_update_bits(rt5665->regmap, RT5665_IN3_IN4,
+ RT5665_IN4_DF_MASK, RT5665_IN4_DF_MASK);
+
+ /* DMIC pin*/
+ if (rt5665->pdata.dmic1_data_pin != RT5665_DMIC1_NULL ||
+ rt5665->pdata.dmic2_data_pin != RT5665_DMIC2_NULL) {
+ regmap_update_bits(rt5665->regmap, RT5665_GPIO_CTRL_2,
+ RT5665_GP9_PIN_MASK, RT5665_GP9_PIN_DMIC1_SCL);
+ regmap_update_bits(rt5665->regmap, RT5665_GPIO_CTRL_1,
+ RT5665_GP8_PIN_MASK, RT5665_GP8_PIN_DMIC2_SCL);
+ switch (rt5665->pdata.dmic1_data_pin) {
+ case RT5665_DMIC1_DATA_IN2N:
+ regmap_update_bits(rt5665->regmap, RT5665_DMIC_CTRL_1,
+ RT5665_DMIC_1_DP_MASK, RT5665_DMIC_1_DP_IN2N);
+ break;
+
+ case RT5665_DMIC1_DATA_GPIO4:
+ regmap_update_bits(rt5665->regmap, RT5665_DMIC_CTRL_1,
+ RT5665_DMIC_1_DP_MASK, RT5665_DMIC_1_DP_GPIO4);
+ regmap_update_bits(rt5665->regmap, RT5665_GPIO_CTRL_1,
+ RT5665_GP4_PIN_MASK, RT5665_GP4_PIN_DMIC1_SDA);
+ break;
+
+ default:
+ dev_dbg(&i2c->dev, "no DMIC1\n");
+ break;
+ }
+
+ switch (rt5665->pdata.dmic2_data_pin) {
+ case RT5665_DMIC2_DATA_IN2P:
+ regmap_update_bits(rt5665->regmap, RT5665_DMIC_CTRL_1,
+ RT5665_DMIC_2_DP_MASK, RT5665_DMIC_2_DP_IN2P);
+ break;
+
+ case RT5665_DMIC2_DATA_GPIO5:
+ regmap_update_bits(rt5665->regmap,
+ RT5665_DMIC_CTRL_1,
+ RT5665_DMIC_2_DP_MASK,
+ RT5665_DMIC_2_DP_GPIO5);
+ regmap_update_bits(rt5665->regmap, RT5665_GPIO_CTRL_1,
+ RT5665_GP5_PIN_MASK, RT5665_GP5_PIN_DMIC2_SDA);
+ break;
+
+ default:
+ dev_dbg(&i2c->dev, "no DMIC2\n");
+ break;
+
+ }
+ }
+
+ regmap_write(rt5665->regmap, RT5665_HP_LOGIC_CTRL_2, 0x0002);
+ regmap_update_bits(rt5665->regmap, RT5665_EJD_CTRL_1,
+ 0xf000 | RT5665_VREF_POW_MASK, 0xd000 | RT5665_VREF_POW_REG);
+ /* Work around for pow_pump */
+ regmap_update_bits(rt5665->regmap, RT5665_STO1_DAC_SIL_DET,
+ RT5665_DEB_STO_DAC_MASK, RT5665_DEB_80_MS);
+
+ regmap_update_bits(rt5665->regmap, RT5665_HP_CHARGE_PUMP_1,
+ RT5665_PM_HP_MASK, RT5665_PM_HP_HV);
+
+ /* Set GPIO4,8 as input for combo jack */
+ if (rt5665->id == CODEC_5666) {
+ regmap_update_bits(rt5665->regmap, RT5665_GPIO_CTRL_2,
+ RT5665_GP4_PF_MASK, RT5665_GP4_PF_IN);
+ regmap_update_bits(rt5665->regmap, RT5665_GPIO_CTRL_3,
+ RT5665_GP8_PF_MASK, RT5665_GP8_PF_IN);
+ }
+
+ /* Enhance performance*/
+ regmap_update_bits(rt5665->regmap, RT5665_PWR_ANLG_1,
+ RT5665_HP_DRIVER_MASK | RT5665_LDO1_DVO_MASK,
+ RT5665_HP_DRIVER_5X | RT5665_LDO1_DVO_09);
+
+ INIT_DELAYED_WORK(&rt5665->jack_detect_work,
+ rt5665_jack_detect_handler);
+ INIT_DELAYED_WORK(&rt5665->calibrate_work,
+ rt5665_calibrate_handler);
+ INIT_DELAYED_WORK(&rt5665->jd_check_work,
+ rt5665_jd_check_handler);
+
+ mutex_init(&rt5665->calibrate_mutex);
+
+ if (i2c->irq) {
+ ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+ rt5665_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
+ | IRQF_ONESHOT, "rt5665", rt5665);
+ if (ret)
+ dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
+
+ }
+
+ return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5665,
+ rt5665_dai, ARRAY_SIZE(rt5665_dai));
+}
+
+static int rt5665_i2c_remove(struct i2c_client *i2c)
+{
+ snd_soc_unregister_codec(&i2c->dev);
+
+ return 0;
+}
+
+static void rt5665_i2c_shutdown(struct i2c_client *client)
+{
+ struct rt5665_priv *rt5665 = i2c_get_clientdata(client);
+
+ regmap_write(rt5665->regmap, RT5665_RESET, 0);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id rt5665_of_match[] = {
+ {.compatible = "realtek,rt5665"},
+ {.compatible = "realtek,rt5666"},
+ {.compatible = "realtek,rt5668"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, rt5665_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static struct acpi_device_id rt5665_acpi_match[] = {
+ {"10EC5665", 0,},
+ {"10EC5666", 0,},
+ {"10EC5668", 0,},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, rt5665_acpi_match);
+#endif
+
+struct i2c_driver rt5665_i2c_driver = {
+ .driver = {
+ .name = "rt5665",
+ .of_match_table = of_match_ptr(rt5665_of_match),
+ .acpi_match_table = ACPI_PTR(rt5665_acpi_match),
+ },
+ .probe = rt5665_i2c_probe,
+ .remove = rt5665_i2c_remove,
+ .shutdown = rt5665_i2c_shutdown,
+ .id_table = rt5665_i2c_id,
+};
+module_i2c_driver(rt5665_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC RT5665 driver");
+MODULE_AUTHOR("Bard Liao <bardliao@realtek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/rt5665.h b/sound/soc/codecs/rt5665.h
new file mode 100644
index 000000000000..12f7080a0d3c
--- /dev/null
+++ b/sound/soc/codecs/rt5665.h
@@ -0,0 +1,1990 @@
+/*
+ * rt5665.h -- RT5665/RT5658 ALSA SoC audio driver
+ *
+ * Copyright 2016 Realtek Microelectronics
+ * Author: Bard Liao <bardliao@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __RT5665_H__
+#define __RT5665_H__
+
+#include <sound/rt5665.h>
+
+#define DEVICE_ID 0x6451
+
+/* Info */
+#define RT5665_RESET 0x0000
+#define RT5665_VENDOR_ID 0x00fd
+#define RT5665_VENDOR_ID_1 0x00fe
+#define RT5665_DEVICE_ID 0x00ff
+/* I/O - Output */
+#define RT5665_LOUT 0x0001
+#define RT5665_HP_CTRL_1 0x0002
+#define RT5665_HP_CTRL_2 0x0003
+#define RT5665_MONO_OUT 0x0004
+#define RT5665_HPL_GAIN 0x0005
+#define RT5665_HPR_GAIN 0x0006
+#define RT5665_MONO_GAIN 0x0007
+
+/* I/O - Input */
+#define RT5665_CAL_BST_CTRL 0x000a
+#define RT5665_CBJ_BST_CTRL 0x000b
+#define RT5665_IN1_IN2 0x000c
+#define RT5665_IN3_IN4 0x000d
+#define RT5665_INL1_INR1_VOL 0x000f
+/* I/O - Speaker */
+#define RT5665_EJD_CTRL_1 0x0010
+#define RT5665_EJD_CTRL_2 0x0011
+#define RT5665_EJD_CTRL_3 0x0012
+#define RT5665_EJD_CTRL_4 0x0013
+#define RT5665_EJD_CTRL_5 0x0014
+#define RT5665_EJD_CTRL_6 0x0015
+#define RT5665_EJD_CTRL_7 0x0016
+/* I/O - ADC/DAC/DMIC */
+#define RT5665_DAC2_CTRL 0x0017
+#define RT5665_DAC2_DIG_VOL 0x0018
+#define RT5665_DAC1_DIG_VOL 0x0019
+#define RT5665_DAC3_DIG_VOL 0x001a
+#define RT5665_DAC3_CTRL 0x001b
+#define RT5665_STO1_ADC_DIG_VOL 0x001c
+#define RT5665_MONO_ADC_DIG_VOL 0x001d
+#define RT5665_STO2_ADC_DIG_VOL 0x001e
+#define RT5665_STO1_ADC_BOOST 0x001f
+#define RT5665_MONO_ADC_BOOST 0x0020
+#define RT5665_STO2_ADC_BOOST 0x0021
+#define RT5665_HP_IMP_GAIN_1 0x0022
+#define RT5665_HP_IMP_GAIN_2 0x0023
+/* Mixer - D-D */
+#define RT5665_STO1_ADC_MIXER 0x0026
+#define RT5665_MONO_ADC_MIXER 0x0027
+#define RT5665_STO2_ADC_MIXER 0x0028
+#define RT5665_AD_DA_MIXER 0x0029
+#define RT5665_STO1_DAC_MIXER 0x002a
+#define RT5665_MONO_DAC_MIXER 0x002b
+#define RT5665_STO2_DAC_MIXER 0x002c
+#define RT5665_A_DAC1_MUX 0x002d
+#define RT5665_A_DAC2_MUX 0x002e
+#define RT5665_DIG_INF2_DATA 0x002f
+#define RT5665_DIG_INF3_DATA 0x0030
+/* Mixer - PDM */
+#define RT5665_PDM_OUT_CTRL 0x0031
+#define RT5665_PDM_DATA_CTRL_1 0x0032
+#define RT5665_PDM_DATA_CTRL_2 0x0033
+#define RT5665_PDM_DATA_CTRL_3 0x0034
+#define RT5665_PDM_DATA_CTRL_4 0x0035
+/* Mixer - ADC */
+#define RT5665_REC1_GAIN 0x003a
+#define RT5665_REC1_L1_MIXER 0x003b
+#define RT5665_REC1_L2_MIXER 0x003c
+#define RT5665_REC1_R1_MIXER 0x003d
+#define RT5665_REC1_R2_MIXER 0x003e
+#define RT5665_REC2_GAIN 0x003f
+#define RT5665_REC2_L1_MIXER 0x0040
+#define RT5665_REC2_L2_MIXER 0x0041
+#define RT5665_REC2_R1_MIXER 0x0042
+#define RT5665_REC2_R2_MIXER 0x0043
+#define RT5665_CAL_REC 0x0044
+/* Mixer - DAC */
+#define RT5665_ALC_BACK_GAIN 0x0049
+#define RT5665_MONOMIX_GAIN 0x004a
+#define RT5665_MONOMIX_IN_GAIN 0x004b
+#define RT5665_OUT_L_GAIN 0x004d
+#define RT5665_OUT_L_MIXER 0x004e
+#define RT5665_OUT_R_GAIN 0x004f
+#define RT5665_OUT_R_MIXER 0x0050
+#define RT5665_LOUT_MIXER 0x0052
+/* Power */
+#define RT5665_PWR_DIG_1 0x0061
+#define RT5665_PWR_DIG_2 0x0062
+#define RT5665_PWR_ANLG_1 0x0063
+#define RT5665_PWR_ANLG_2 0x0064
+#define RT5665_PWR_ANLG_3 0x0065
+#define RT5665_PWR_MIXER 0x0066
+#define RT5665_PWR_VOL 0x0067
+/* Clock Detect */
+#define RT5665_CLK_DET 0x006b
+/* Filter */
+#define RT5665_HPF_CTRL1 0x006d
+/* DMIC */
+#define RT5665_DMIC_CTRL_1 0x006e
+#define RT5665_DMIC_CTRL_2 0x006f
+/* Format - ADC/DAC */
+#define RT5665_I2S1_SDP 0x0070
+#define RT5665_I2S2_SDP 0x0071
+#define RT5665_I2S3_SDP 0x0072
+#define RT5665_ADDA_CLK_1 0x0073
+#define RT5665_ADDA_CLK_2 0x0074
+#define RT5665_I2S1_F_DIV_CTRL_1 0x0075
+#define RT5665_I2S1_F_DIV_CTRL_2 0x0076
+/* Format - TDM Control */
+#define RT5665_TDM_CTRL_1 0x0078
+#define RT5665_TDM_CTRL_2 0x0079
+#define RT5665_TDM_CTRL_3 0x007a
+#define RT5665_TDM_CTRL_4 0x007b
+#define RT5665_TDM_CTRL_5 0x007c
+#define RT5665_TDM_CTRL_6 0x007d
+#define RT5665_TDM_CTRL_7 0x007e
+#define RT5665_TDM_CTRL_8 0x007f
+/* Function - Analog */
+#define RT5665_GLB_CLK 0x0080
+#define RT5665_PLL_CTRL_1 0x0081
+#define RT5665_PLL_CTRL_2 0x0082
+#define RT5665_ASRC_1 0x0083
+#define RT5665_ASRC_2 0x0084
+#define RT5665_ASRC_3 0x0085
+#define RT5665_ASRC_4 0x0086
+#define RT5665_ASRC_5 0x0087
+#define RT5665_ASRC_6 0x0088
+#define RT5665_ASRC_7 0x0089
+#define RT5665_ASRC_8 0x008a
+#define RT5665_ASRC_9 0x008b
+#define RT5665_ASRC_10 0x008c
+#define RT5665_DEPOP_1 0x008e
+#define RT5665_DEPOP_2 0x008f
+#define RT5665_HP_CHARGE_PUMP_1 0x0091
+#define RT5665_HP_CHARGE_PUMP_2 0x0092
+#define RT5665_MICBIAS_1 0x0093
+#define RT5665_MICBIAS_2 0x0094
+#define RT5665_ASRC_12 0x0098
+#define RT5665_ASRC_13 0x0099
+#define RT5665_ASRC_14 0x009a
+#define RT5665_RC_CLK_CTRL 0x009f
+#define RT5665_I2S_M_CLK_CTRL_1 0x00a0
+#define RT5665_I2S2_F_DIV_CTRL_1 0x00a1
+#define RT5665_I2S2_F_DIV_CTRL_2 0x00a2
+#define RT5665_I2S3_F_DIV_CTRL_1 0x00a3
+#define RT5665_I2S3_F_DIV_CTRL_2 0x00a4
+/* Function - Digital */
+#define RT5665_EQ_CTRL_1 0x00ae
+#define RT5665_EQ_CTRL_2 0x00af
+#define RT5665_IRQ_CTRL_1 0x00b6
+#define RT5665_IRQ_CTRL_2 0x00b7
+#define RT5665_IRQ_CTRL_3 0x00b8
+#define RT5665_IRQ_CTRL_4 0x00b9
+#define RT5665_IRQ_CTRL_5 0x00ba
+#define RT5665_IRQ_CTRL_6 0x00bb
+#define RT5665_INT_ST_1 0x00be
+#define RT5665_GPIO_CTRL_1 0x00c0
+#define RT5665_GPIO_CTRL_2 0x00c1
+#define RT5665_GPIO_CTRL_3 0x00c2
+#define RT5665_GPIO_CTRL_4 0x00c3
+#define RT5665_GPIO_STA 0x00c4
+#define RT5665_HP_AMP_DET_CTRL_1 0x00d0
+#define RT5665_HP_AMP_DET_CTRL_2 0x00d1
+#define RT5665_MID_HP_AMP_DET 0x00d3
+#define RT5665_LOW_HP_AMP_DET 0x00d4
+#define RT5665_SV_ZCD_1 0x00d9
+#define RT5665_SV_ZCD_2 0x00da
+#define RT5665_IL_CMD_1 0x00db
+#define RT5665_IL_CMD_2 0x00dc
+#define RT5665_IL_CMD_3 0x00dd
+#define RT5665_IL_CMD_4 0x00de
+#define RT5665_4BTN_IL_CMD_1 0x00df
+#define RT5665_4BTN_IL_CMD_2 0x00e0
+#define RT5665_4BTN_IL_CMD_3 0x00e1
+#define RT5665_PSV_IL_CMD_1 0x00e2
+
+#define RT5665_ADC_STO1_HP_CTRL_1 0x00ea
+#define RT5665_ADC_STO1_HP_CTRL_2 0x00eb
+#define RT5665_ADC_MONO_HP_CTRL_1 0x00ec
+#define RT5665_ADC_MONO_HP_CTRL_2 0x00ed
+#define RT5665_ADC_STO2_HP_CTRL_1 0x00ee
+#define RT5665_ADC_STO2_HP_CTRL_2 0x00ef
+#define RT5665_AJD1_CTRL 0x00f0
+#define RT5665_JD1_THD 0x00f1
+#define RT5665_JD2_THD 0x00f2
+#define RT5665_JD_CTRL_1 0x00f6
+#define RT5665_JD_CTRL_2 0x00f7
+#define RT5665_JD_CTRL_3 0x00f8
+/* General Control */
+#define RT5665_DIG_MISC 0x00fa
+#define RT5665_DUMMY_2 0x00fb
+#define RT5665_DUMMY_3 0x00fc
+
+#define RT5665_DAC_ADC_DIG_VOL1 0x0100
+#define RT5665_DAC_ADC_DIG_VOL2 0x0101
+#define RT5665_BIAS_CUR_CTRL_1 0x010a
+#define RT5665_BIAS_CUR_CTRL_2 0x010b
+#define RT5665_BIAS_CUR_CTRL_3 0x010c
+#define RT5665_BIAS_CUR_CTRL_4 0x010d
+#define RT5665_BIAS_CUR_CTRL_5 0x010e
+#define RT5665_BIAS_CUR_CTRL_6 0x010f
+#define RT5665_BIAS_CUR_CTRL_7 0x0110
+#define RT5665_BIAS_CUR_CTRL_8 0x0111
+#define RT5665_BIAS_CUR_CTRL_9 0x0112
+#define RT5665_BIAS_CUR_CTRL_10 0x0113
+#define RT5665_VREF_REC_OP_FB_CAP_CTRL 0x0117
+#define RT5665_CHARGE_PUMP_1 0x0125
+#define RT5665_DIG_IN_CTRL_1 0x0132
+#define RT5665_DIG_IN_CTRL_2 0x0133
+#define RT5665_PAD_DRIVING_CTRL 0x0137
+#define RT5665_SOFT_RAMP_DEPOP 0x0138
+#define RT5665_PLL 0x0139
+#define RT5665_CHOP_DAC 0x013a
+#define RT5665_CHOP_ADC 0x013b
+#define RT5665_CALIB_ADC_CTRL 0x013c
+#define RT5665_VOL_TEST 0x013f
+#define RT5665_TEST_MODE_CTRL_1 0x0145
+#define RT5665_TEST_MODE_CTRL_2 0x0146
+#define RT5665_TEST_MODE_CTRL_3 0x0147
+#define RT5665_TEST_MODE_CTRL_4 0x0148
+#define RT5665_BASSBACK_CTRL 0x0150
+#define RT5665_STO_NG2_CTRL_1 0x0160
+#define RT5665_STO_NG2_CTRL_2 0x0161
+#define RT5665_STO_NG2_CTRL_3 0x0162
+#define RT5665_STO_NG2_CTRL_4 0x0163
+#define RT5665_STO_NG2_CTRL_5 0x0164
+#define RT5665_STO_NG2_CTRL_6 0x0165
+#define RT5665_STO_NG2_CTRL_7 0x0166
+#define RT5665_STO_NG2_CTRL_8 0x0167
+#define RT5665_MONO_NG2_CTRL_1 0x0170
+#define RT5665_MONO_NG2_CTRL_2 0x0171
+#define RT5665_MONO_NG2_CTRL_3 0x0172
+#define RT5665_MONO_NG2_CTRL_4 0x0173
+#define RT5665_MONO_NG2_CTRL_5 0x0174
+#define RT5665_MONO_NG2_CTRL_6 0x0175
+#define RT5665_STO1_DAC_SIL_DET 0x0190
+#define RT5665_MONOL_DAC_SIL_DET 0x0191
+#define RT5665_MONOR_DAC_SIL_DET 0x0192
+#define RT5665_STO2_DAC_SIL_DET 0x0193
+#define RT5665_SIL_PSV_CTRL1 0x0194
+#define RT5665_SIL_PSV_CTRL2 0x0195
+#define RT5665_SIL_PSV_CTRL3 0x0196
+#define RT5665_SIL_PSV_CTRL4 0x0197
+#define RT5665_SIL_PSV_CTRL5 0x0198
+#define RT5665_SIL_PSV_CTRL6 0x0199
+#define RT5665_MONO_AMP_CALIB_CTRL_1 0x01a0
+#define RT5665_MONO_AMP_CALIB_CTRL_2 0x01a1
+#define RT5665_MONO_AMP_CALIB_CTRL_3 0x01a2
+#define RT5665_MONO_AMP_CALIB_CTRL_4 0x01a3
+#define RT5665_MONO_AMP_CALIB_CTRL_5 0x01a4
+#define RT5665_MONO_AMP_CALIB_CTRL_6 0x01a5
+#define RT5665_MONO_AMP_CALIB_CTRL_7 0x01a6
+#define RT5665_MONO_AMP_CALIB_STA1 0x01a7
+#define RT5665_MONO_AMP_CALIB_STA2 0x01a8
+#define RT5665_MONO_AMP_CALIB_STA3 0x01a9
+#define RT5665_MONO_AMP_CALIB_STA4 0x01aa
+#define RT5665_MONO_AMP_CALIB_STA6 0x01ab
+#define RT5665_HP_IMP_SENS_CTRL_01 0x01b5
+#define RT5665_HP_IMP_SENS_CTRL_02 0x01b6
+#define RT5665_HP_IMP_SENS_CTRL_03 0x01b7
+#define RT5665_HP_IMP_SENS_CTRL_04 0x01b8
+#define RT5665_HP_IMP_SENS_CTRL_05 0x01b9
+#define RT5665_HP_IMP_SENS_CTRL_06 0x01ba
+#define RT5665_HP_IMP_SENS_CTRL_07 0x01bb
+#define RT5665_HP_IMP_SENS_CTRL_08 0x01bc
+#define RT5665_HP_IMP_SENS_CTRL_09 0x01bd
+#define RT5665_HP_IMP_SENS_CTRL_10 0x01be
+#define RT5665_HP_IMP_SENS_CTRL_11 0x01bf
+#define RT5665_HP_IMP_SENS_CTRL_12 0x01c0
+#define RT5665_HP_IMP_SENS_CTRL_13 0x01c1
+#define RT5665_HP_IMP_SENS_CTRL_14 0x01c2
+#define RT5665_HP_IMP_SENS_CTRL_15 0x01c3
+#define RT5665_HP_IMP_SENS_CTRL_16 0x01c4
+#define RT5665_HP_IMP_SENS_CTRL_17 0x01c5
+#define RT5665_HP_IMP_SENS_CTRL_18 0x01c6
+#define RT5665_HP_IMP_SENS_CTRL_19 0x01c7
+#define RT5665_HP_IMP_SENS_CTRL_20 0x01c8
+#define RT5665_HP_IMP_SENS_CTRL_21 0x01c9
+#define RT5665_HP_IMP_SENS_CTRL_22 0x01ca
+#define RT5665_HP_IMP_SENS_CTRL_23 0x01cb
+#define RT5665_HP_IMP_SENS_CTRL_24 0x01cc
+#define RT5665_HP_IMP_SENS_CTRL_25 0x01cd
+#define RT5665_HP_IMP_SENS_CTRL_26 0x01ce
+#define RT5665_HP_IMP_SENS_CTRL_27 0x01cf
+#define RT5665_HP_IMP_SENS_CTRL_28 0x01d0
+#define RT5665_HP_IMP_SENS_CTRL_29 0x01d1
+#define RT5665_HP_IMP_SENS_CTRL_30 0x01d2
+#define RT5665_HP_IMP_SENS_CTRL_31 0x01d3
+#define RT5665_HP_IMP_SENS_CTRL_32 0x01d4
+#define RT5665_HP_IMP_SENS_CTRL_33 0x01d5
+#define RT5665_HP_IMP_SENS_CTRL_34 0x01d6
+#define RT5665_HP_LOGIC_CTRL_1 0x01da
+#define RT5665_HP_LOGIC_CTRL_2 0x01db
+#define RT5665_HP_LOGIC_CTRL_3 0x01dc
+#define RT5665_HP_CALIB_CTRL_1 0x01de
+#define RT5665_HP_CALIB_CTRL_2 0x01df
+#define RT5665_HP_CALIB_CTRL_3 0x01e0
+#define RT5665_HP_CALIB_CTRL_4 0x01e1
+#define RT5665_HP_CALIB_CTRL_5 0x01e2
+#define RT5665_HP_CALIB_CTRL_6 0x01e3
+#define RT5665_HP_CALIB_CTRL_7 0x01e4
+#define RT5665_HP_CALIB_CTRL_9 0x01e6
+#define RT5665_HP_CALIB_CTRL_10 0x01e7
+#define RT5665_HP_CALIB_CTRL_11 0x01e8
+#define RT5665_HP_CALIB_STA_1 0x01ea
+#define RT5665_HP_CALIB_STA_2 0x01eb
+#define RT5665_HP_CALIB_STA_3 0x01ec
+#define RT5665_HP_CALIB_STA_4 0x01ed
+#define RT5665_HP_CALIB_STA_5 0x01ee
+#define RT5665_HP_CALIB_STA_6 0x01ef
+#define RT5665_HP_CALIB_STA_7 0x01f0
+#define RT5665_HP_CALIB_STA_8 0x01f1
+#define RT5665_HP_CALIB_STA_9 0x01f2
+#define RT5665_HP_CALIB_STA_10 0x01f3
+#define RT5665_HP_CALIB_STA_11 0x01f4
+#define RT5665_PGM_TAB_CTRL1 0x0200
+#define RT5665_PGM_TAB_CTRL2 0x0201
+#define RT5665_PGM_TAB_CTRL3 0x0202
+#define RT5665_PGM_TAB_CTRL4 0x0203
+#define RT5665_PGM_TAB_CTRL5 0x0204
+#define RT5665_PGM_TAB_CTRL6 0x0205
+#define RT5665_PGM_TAB_CTRL7 0x0206
+#define RT5665_PGM_TAB_CTRL8 0x0207
+#define RT5665_PGM_TAB_CTRL9 0x0208
+#define RT5665_SAR_IL_CMD_1 0x0210
+#define RT5665_SAR_IL_CMD_2 0x0211
+#define RT5665_SAR_IL_CMD_3 0x0212
+#define RT5665_SAR_IL_CMD_4 0x0213
+#define RT5665_SAR_IL_CMD_5 0x0214
+#define RT5665_SAR_IL_CMD_6 0x0215
+#define RT5665_SAR_IL_CMD_7 0x0216
+#define RT5665_SAR_IL_CMD_8 0x0217
+#define RT5665_SAR_IL_CMD_9 0x0218
+#define RT5665_SAR_IL_CMD_10 0x0219
+#define RT5665_SAR_IL_CMD_11 0x021a
+#define RT5665_SAR_IL_CMD_12 0x021b
+#define RT5665_DRC1_CTRL_0 0x02ff
+#define RT5665_DRC1_CTRL_1 0x0300
+#define RT5665_DRC1_CTRL_2 0x0301
+#define RT5665_DRC1_CTRL_3 0x0302
+#define RT5665_DRC1_CTRL_4 0x0303
+#define RT5665_DRC1_CTRL_5 0x0304
+#define RT5665_DRC1_CTRL_6 0x0305
+#define RT5665_DRC1_HARD_LMT_CTRL_1 0x0306
+#define RT5665_DRC1_HARD_LMT_CTRL_2 0x0307
+#define RT5665_DRC1_PRIV_1 0x0310
+#define RT5665_DRC1_PRIV_2 0x0311
+#define RT5665_DRC1_PRIV_3 0x0312
+#define RT5665_DRC1_PRIV_4 0x0313
+#define RT5665_DRC1_PRIV_5 0x0314
+#define RT5665_DRC1_PRIV_6 0x0315
+#define RT5665_DRC1_PRIV_7 0x0316
+#define RT5665_DRC1_PRIV_8 0x0317
+#define RT5665_ALC_PGA_CTRL_1 0x0330
+#define RT5665_ALC_PGA_CTRL_2 0x0331
+#define RT5665_ALC_PGA_CTRL_3 0x0332
+#define RT5665_ALC_PGA_CTRL_4 0x0333
+#define RT5665_ALC_PGA_CTRL_5 0x0334
+#define RT5665_ALC_PGA_CTRL_6 0x0335
+#define RT5665_ALC_PGA_CTRL_7 0x0336
+#define RT5665_ALC_PGA_CTRL_8 0x0337
+#define RT5665_ALC_PGA_STA_1 0x0338
+#define RT5665_ALC_PGA_STA_2 0x0339
+#define RT5665_ALC_PGA_STA_3 0x033a
+#define RT5665_EQ_AUTO_RCV_CTRL1 0x03c0
+#define RT5665_EQ_AUTO_RCV_CTRL2 0x03c1
+#define RT5665_EQ_AUTO_RCV_CTRL3 0x03c2
+#define RT5665_EQ_AUTO_RCV_CTRL4 0x03c3
+#define RT5665_EQ_AUTO_RCV_CTRL5 0x03c4
+#define RT5665_EQ_AUTO_RCV_CTRL6 0x03c5
+#define RT5665_EQ_AUTO_RCV_CTRL7 0x03c6
+#define RT5665_EQ_AUTO_RCV_CTRL8 0x03c7
+#define RT5665_EQ_AUTO_RCV_CTRL9 0x03c8
+#define RT5665_EQ_AUTO_RCV_CTRL10 0x03c9
+#define RT5665_EQ_AUTO_RCV_CTRL11 0x03ca
+#define RT5665_EQ_AUTO_RCV_CTRL12 0x03cb
+#define RT5665_EQ_AUTO_RCV_CTRL13 0x03cc
+#define RT5665_ADC_L_EQ_LPF1_A1 0x03d0
+#define RT5665_R_EQ_LPF1_A1 0x03d1
+#define RT5665_L_EQ_LPF1_H0 0x03d2
+#define RT5665_R_EQ_LPF1_H0 0x03d3
+#define RT5665_L_EQ_BPF1_A1 0x03d4
+#define RT5665_R_EQ_BPF1_A1 0x03d5
+#define RT5665_L_EQ_BPF1_A2 0x03d6
+#define RT5665_R_EQ_BPF1_A2 0x03d7
+#define RT5665_L_EQ_BPF1_H0 0x03d8
+#define RT5665_R_EQ_BPF1_H0 0x03d9
+#define RT5665_L_EQ_BPF2_A1 0x03da
+#define RT5665_R_EQ_BPF2_A1 0x03db
+#define RT5665_L_EQ_BPF2_A2 0x03dc
+#define RT5665_R_EQ_BPF2_A2 0x03dd
+#define RT5665_L_EQ_BPF2_H0 0x03de
+#define RT5665_R_EQ_BPF2_H0 0x03df
+#define RT5665_L_EQ_BPF3_A1 0x03e0
+#define RT5665_R_EQ_BPF3_A1 0x03e1
+#define RT5665_L_EQ_BPF3_A2 0x03e2
+#define RT5665_R_EQ_BPF3_A2 0x03e3
+#define RT5665_L_EQ_BPF3_H0 0x03e4
+#define RT5665_R_EQ_BPF3_H0 0x03e5
+#define RT5665_L_EQ_BPF4_A1 0x03e6
+#define RT5665_R_EQ_BPF4_A1 0x03e7
+#define RT5665_L_EQ_BPF4_A2 0x03e8
+#define RT5665_R_EQ_BPF4_A2 0x03e9
+#define RT5665_L_EQ_BPF4_H0 0x03ea
+#define RT5665_R_EQ_BPF4_H0 0x03eb
+#define RT5665_L_EQ_HPF1_A1 0x03ec
+#define RT5665_R_EQ_HPF1_A1 0x03ed
+#define RT5665_L_EQ_HPF1_H0 0x03ee
+#define RT5665_R_EQ_HPF1_H0 0x03ef
+#define RT5665_L_EQ_PRE_VOL 0x03f0
+#define RT5665_R_EQ_PRE_VOL 0x03f1
+#define RT5665_L_EQ_POST_VOL 0x03f2
+#define RT5665_R_EQ_POST_VOL 0x03f3
+#define RT5665_SCAN_MODE_CTRL 0x07f0
+#define RT5665_I2C_MODE 0x07fa
+
+
+
+/* global definition */
+#define RT5665_L_MUTE (0x1 << 15)
+#define RT5665_L_MUTE_SFT 15
+#define RT5665_VOL_L_MUTE (0x1 << 14)
+#define RT5665_VOL_L_SFT 14
+#define RT5665_R_MUTE (0x1 << 7)
+#define RT5665_R_MUTE_SFT 7
+#define RT5665_VOL_R_MUTE (0x1 << 6)
+#define RT5665_VOL_R_SFT 6
+#define RT5665_L_VOL_MASK (0x3f << 8)
+#define RT5665_L_VOL_SFT 8
+#define RT5665_R_VOL_MASK (0x3f)
+#define RT5665_R_VOL_SFT 0
+
+/*Headphone Amp L/R Analog Gain and Digital NG2 Gain Control (0x0005 0x0006)*/
+#define RT5665_G_HP (0xf << 8)
+#define RT5665_G_HP_SFT 8
+#define RT5665_G_STO_DA_DMIX (0xf)
+#define RT5665_G_STO_DA_SFT 0
+
+/* CBJ Control (0x000b) */
+#define RT5665_BST_CBJ_MASK (0xf << 8)
+#define RT5665_BST_CBJ_SFT 8
+
+/* IN1/IN2 Control (0x000c) */
+#define RT5665_IN1_DF_MASK (0x1 << 15)
+#define RT5665_IN1_DF 15
+#define RT5665_BST1_MASK (0x7f << 8)
+#define RT5665_BST1_SFT 8
+#define RT5665_IN2_DF_MASK (0x1 << 7)
+#define RT5665_IN2_DF 7
+#define RT5665_BST2_MASK (0x7f)
+#define RT5665_BST2_SFT 0
+
+/* IN3/IN4 Control (0x000d) */
+#define RT5665_IN3_DF_MASK (0x1 << 15)
+#define RT5665_IN3_DF 15
+#define RT5665_BST3_MASK (0x7f << 8)
+#define RT5665_BST3_SFT 8
+#define RT5665_IN4_DF_MASK (0x1 << 7)
+#define RT5665_IN4_DF 7
+#define RT5665_BST4_MASK (0x7f)
+#define RT5665_BST4_SFT 0
+
+/* INL and INR Volume Control (0x000f) */
+#define RT5665_INL_VOL_MASK (0x1f << 8)
+#define RT5665_INL_VOL_SFT 8
+#define RT5665_INR_VOL_MASK (0x1f)
+#define RT5665_INR_VOL_SFT 0
+
+/* Embeeded Jack and Type Detection Control 1 (0x0010) */
+#define RT5665_EMB_JD_EN (0x1 << 15)
+#define RT5665_EMB_JD_EN_SFT 15
+#define RT5665_JD_MODE (0x1 << 13)
+#define RT5665_JD_MODE_SFT 13
+#define RT5665_POLA_EXT_JD_MASK (0x1 << 11)
+#define RT5665_POLA_EXT_JD_LOW (0x1 << 11)
+#define RT5665_POLA_EXT_JD_HIGH (0x0 << 11)
+#define RT5665_EXT_JD_DIG (0x1 << 9)
+#define RT5665_POL_FAST_OFF_MASK (0x1 << 8)
+#define RT5665_POL_FAST_OFF_HIGH (0x1 << 8)
+#define RT5665_POL_FAST_OFF_LOW (0x0 << 8)
+#define RT5665_VREF_POW_MASK (0x1 << 6)
+#define RT5665_VREF_POW_FSM (0x0 << 6)
+#define RT5665_VREF_POW_REG (0x1 << 6)
+#define RT5665_MB1_PATH_MASK (0x1 << 5)
+#define RT5665_CTRL_MB1_REG (0x1 << 5)
+#define RT5665_CTRL_MB1_FSM (0x0 << 5)
+#define RT5665_MB2_PATH_MASK (0x1 << 4)
+#define RT5665_CTRL_MB2_REG (0x1 << 4)
+#define RT5665_CTRL_MB2_FSM (0x0 << 4)
+#define RT5665_TRIG_JD_MASK (0x1 << 3)
+#define RT5665_TRIG_JD_HIGH (0x1 << 3)
+#define RT5665_TRIG_JD_LOW (0x0 << 3)
+
+/* Embeeded Jack and Type Detection Control 2 (0x0011) */
+#define RT5665_EXT_JD_SRC (0x7 << 4)
+#define RT5665_EXT_JD_SRC_SFT 4
+#define RT5665_EXT_JD_SRC_GPIO_JD1 (0x0 << 4)
+#define RT5665_EXT_JD_SRC_GPIO_JD2 (0x1 << 4)
+#define RT5665_EXT_JD_SRC_JD1_1 (0x2 << 4)
+#define RT5665_EXT_JD_SRC_JD1_2 (0x3 << 4)
+#define RT5665_EXT_JD_SRC_JD2 (0x4 << 4)
+#define RT5665_EXT_JD_SRC_JD3 (0x5 << 4)
+#define RT5665_EXT_JD_SRC_MANUAL (0x6 << 4)
+
+/* Combo Jack and Type Detection Control 4 (0x0013) */
+#define RT5665_SEL_SHT_MID_TON_MASK (0x3 << 12)
+#define RT5665_SEL_SHT_MID_TON_2 (0x0 << 12)
+#define RT5665_SEL_SHT_MID_TON_3 (0x1 << 12)
+#define RT5665_CBJ_JD_TEST_MASK (0x1 << 6)
+#define RT5665_CBJ_JD_TEST_NORM (0x0 << 6)
+#define RT5665_CBJ_JD_TEST_MODE (0x1 << 6)
+
+/* Slience Detection Control (0x0015) */
+#define RT5665_SIL_DET_MASK (0x1 << 15)
+#define RT5665_SIL_DET_DIS (0x0 << 15)
+#define RT5665_SIL_DET_EN (0x1 << 15)
+
+/* DAC2 Control (0x0017) */
+#define RT5665_M_DAC2_L_VOL (0x1 << 13)
+#define RT5665_M_DAC2_L_VOL_SFT 13
+#define RT5665_M_DAC2_R_VOL (0x1 << 12)
+#define RT5665_M_DAC2_R_VOL_SFT 12
+#define RT5665_DAC_L2_SEL_MASK (0x7 << 4)
+#define RT5665_DAC_L2_SEL_SFT 4
+#define RT5665_DAC_R2_SEL_MASK (0x7 << 0)
+#define RT5665_DAC_R2_SEL_SFT 0
+
+/* Sidetone Control (0x0018) */
+#define RT5665_ST_SEL_MASK (0x7 << 9)
+#define RT5665_ST_SEL_SFT 9
+#define RT5665_ST_EN (0x1 << 6)
+#define RT5665_ST_EN_SFT 6
+
+/* DAC1 Digital Volume (0x0019) */
+#define RT5665_DAC_L1_VOL_MASK (0xff << 8)
+#define RT5665_DAC_L1_VOL_SFT 8
+#define RT5665_DAC_R1_VOL_MASK (0xff)
+#define RT5665_DAC_R1_VOL_SFT 0
+
+/* DAC2 Digital Volume (0x001a) */
+#define RT5665_DAC_L2_VOL_MASK (0xff << 8)
+#define RT5665_DAC_L2_VOL_SFT 8
+#define RT5665_DAC_R2_VOL_MASK (0xff)
+#define RT5665_DAC_R2_VOL_SFT 0
+
+/* DAC3 Control (0x001b) */
+#define RT5665_M_DAC3_L_VOL (0x1 << 13)
+#define RT5665_M_DAC3_L_VOL_SFT 13
+#define RT5665_M_DAC3_R_VOL (0x1 << 12)
+#define RT5665_M_DAC3_R_VOL_SFT 12
+#define RT5665_DAC_L3_SEL_MASK (0x7 << 4)
+#define RT5665_DAC_L3_SEL_SFT 4
+#define RT5665_DAC_R3_SEL_MASK (0x7 << 0)
+#define RT5665_DAC_R3_SEL_SFT 0
+
+/* ADC Digital Volume Control (0x001c) */
+#define RT5665_ADC_L_VOL_MASK (0x7f << 8)
+#define RT5665_ADC_L_VOL_SFT 8
+#define RT5665_ADC_R_VOL_MASK (0x7f)
+#define RT5665_ADC_R_VOL_SFT 0
+
+/* Mono ADC Digital Volume Control (0x001d) */
+#define RT5665_MONO_ADC_L_VOL_MASK (0x7f << 8)
+#define RT5665_MONO_ADC_L_VOL_SFT 8
+#define RT5665_MONO_ADC_R_VOL_MASK (0x7f)
+#define RT5665_MONO_ADC_R_VOL_SFT 0
+
+/* Stereo1 ADC Boost Gain Control (0x001f) */
+#define RT5665_STO1_ADC_L_BST_MASK (0x3 << 14)
+#define RT5665_STO1_ADC_L_BST_SFT 14
+#define RT5665_STO1_ADC_R_BST_MASK (0x3 << 12)
+#define RT5665_STO1_ADC_R_BST_SFT 12
+
+/* Mono ADC Boost Gain Control (0x0020) */
+#define RT5665_MONO_ADC_L_BST_MASK (0x3 << 14)
+#define RT5665_MONO_ADC_L_BST_SFT 14
+#define RT5665_MONO_ADC_R_BST_MASK (0x3 << 12)
+#define RT5665_MONO_ADC_R_BST_SFT 12
+
+/* Stereo1 ADC Boost Gain Control (0x001f) */
+#define RT5665_STO2_ADC_L_BST_MASK (0x3 << 14)
+#define RT5665_STO2_ADC_L_BST_SFT 14
+#define RT5665_STO2_ADC_R_BST_MASK (0x3 << 12)
+#define RT5665_STO2_ADC_R_BST_SFT 12
+
+/* Stereo1 ADC Mixer Control (0x0026) */
+#define RT5665_M_STO1_ADC_L1 (0x1 << 15)
+#define RT5665_M_STO1_ADC_L1_SFT 15
+#define RT5665_M_STO1_ADC_L2 (0x1 << 14)
+#define RT5665_M_STO1_ADC_L2_SFT 14
+#define RT5665_STO1_ADC1L_SRC_MASK (0x1 << 13)
+#define RT5665_STO1_ADC1L_SRC_SFT 13
+#define RT5665_STO1_ADC1_SRC_ADC (0x1 << 13)
+#define RT5665_STO1_ADC1_SRC_DACMIX (0x0 << 13)
+#define RT5665_STO1_ADC2L_SRC_MASK (0x1 << 12)
+#define RT5665_STO1_ADC2L_SRC_SFT 12
+#define RT5665_STO1_ADCL_SRC_MASK (0x3 << 10)
+#define RT5665_STO1_ADCL_SRC_SFT 10
+#define RT5665_STO1_DD_L_SRC_MASK (0x1 << 9)
+#define RT5665_STO1_DD_L_SRC_SFT 9
+#define RT5665_STO1_DMIC_SRC_MASK (0x1 << 8)
+#define RT5665_STO1_DMIC_SRC_SFT 8
+#define RT5665_STO1_DMIC_SRC_DMIC2 (0x1 << 8)
+#define RT5665_STO1_DMIC_SRC_DMIC1 (0x0 << 8)
+#define RT5665_M_STO1_ADC_R1 (0x1 << 7)
+#define RT5665_M_STO1_ADC_R1_SFT 7
+#define RT5665_M_STO1_ADC_R2 (0x1 << 6)
+#define RT5665_M_STO1_ADC_R2_SFT 6
+#define RT5665_STO1_ADC1R_SRC_MASK (0x1 << 5)
+#define RT5665_STO1_ADC1R_SRC_SFT 5
+#define RT5665_STO1_ADC2R_SRC_MASK (0x1 << 4)
+#define RT5665_STO1_ADC2R_SRC_SFT 4
+#define RT5665_STO1_ADCR_SRC_MASK (0x3 << 2)
+#define RT5665_STO1_ADCR_SRC_SFT 2
+#define RT5665_STO1_DD_R_SRC_MASK (0x3)
+#define RT5665_STO1_DD_R_SRC_SFT 0
+
+
+/* Mono1 ADC Mixer control (0x0027) */
+#define RT5665_M_MONO_ADC_L1 (0x1 << 15)
+#define RT5665_M_MONO_ADC_L1_SFT 15
+#define RT5665_M_MONO_ADC_L2 (0x1 << 14)
+#define RT5665_M_MONO_ADC_L2_SFT 14
+#define RT5665_MONO_ADC_L1_SRC_MASK (0x1 << 13)
+#define RT5665_MONO_ADC_L1_SRC_SFT 13
+#define RT5665_MONO_ADC_L2_SRC_MASK (0x1 << 12)
+#define RT5665_MONO_ADC_L2_SRC_SFT 12
+#define RT5665_MONO_ADC_L_SRC_MASK (0x3 << 10)
+#define RT5665_MONO_ADC_L_SRC_SFT 10
+#define RT5665_MONO_DD_L_SRC_MASK (0x1 << 9)
+#define RT5665_MONO_DD_L_SRC_SFT 9
+#define RT5665_MONO_DMIC_L_SRC_MASK (0x1 << 8)
+#define RT5665_MONO_DMIC_L_SRC_SFT 8
+#define RT5665_M_MONO_ADC_R1 (0x1 << 7)
+#define RT5665_M_MONO_ADC_R1_SFT 7
+#define RT5665_M_MONO_ADC_R2 (0x1 << 6)
+#define RT5665_M_MONO_ADC_R2_SFT 6
+#define RT5665_MONO_ADC_R1_SRC_MASK (0x1 << 5)
+#define RT5665_MONO_ADC_R1_SRC_SFT 5
+#define RT5665_MONO_ADC_R2_SRC_MASK (0x1 << 4)
+#define RT5665_MONO_ADC_R2_SRC_SFT 4
+#define RT5665_MONO_ADC_R_SRC_MASK (0x3 << 2)
+#define RT5665_MONO_ADC_R_SRC_SFT 2
+#define RT5665_MONO_DD_R_SRC_MASK (0x1 << 1)
+#define RT5665_MONO_DD_R_SRC_SFT 1
+#define RT5665_MONO_DMIC_R_SRC_MASK 0x1
+#define RT5665_MONO_DMIC_R_SRC_SFT 0
+
+/* Stereo2 ADC Mixer Control (0x0028) */
+#define RT5665_M_STO2_ADC_L1 (0x1 << 15)
+#define RT5665_M_STO2_ADC_L1_UN (0x0 << 15)
+#define RT5665_M_STO2_ADC_L1_SFT 15
+#define RT5665_M_STO2_ADC_L2 (0x1 << 14)
+#define RT5665_M_STO2_ADC_L2_SFT 14
+#define RT5665_STO2_ADC1L_SRC_MASK (0x1 << 13)
+#define RT5665_STO2_ADC1L_SRC_SFT 13
+#define RT5665_STO2_ADC1_SRC_ADC (0x1 << 13)
+#define RT5665_STO2_ADC1_SRC_DACMIX (0x0 << 13)
+#define RT5665_STO2_ADC2L_SRC_MASK (0x1 << 12)
+#define RT5665_STO2_ADC2L_SRC_SFT 12
+#define RT5665_STO2_ADCL_SRC_MASK (0x3 << 10)
+#define RT5665_STO2_ADCL_SRC_SFT 10
+#define RT5665_STO2_DD_L_SRC_MASK (0x1 << 9)
+#define RT5665_STO2_DD_L_SRC_SFT 9
+#define RT5665_STO2_DMIC_SRC_MASK (0x1 << 8)
+#define RT5665_STO2_DMIC_SRC_SFT 8
+#define RT5665_STO2_DMIC_SRC_DMIC2 (0x1 << 8)
+#define RT5665_STO2_DMIC_SRC_DMIC1 (0x0 << 8)
+#define RT5665_M_STO2_ADC_R1 (0x1 << 7)
+#define RT5665_M_STO2_ADC_R1_UN (0x0 << 7)
+#define RT5665_M_STO2_ADC_R1_SFT 7
+#define RT5665_M_STO2_ADC_R2 (0x1 << 6)
+#define RT5665_M_STO2_ADC_R2_SFT 6
+#define RT5665_STO2_ADC1R_SRC_MASK (0x1 << 5)
+#define RT5665_STO2_ADC1R_SRC_SFT 5
+#define RT5665_STO2_ADC2R_SRC_MASK (0x1 << 4)
+#define RT5665_STO2_ADC2R_SRC_SFT 4
+#define RT5665_STO2_ADCR_SRC_MASK (0x3 << 2)
+#define RT5665_STO2_ADCR_SRC_SFT 2
+#define RT5665_STO2_DD_R_SRC_MASK (0x1 << 1)
+#define RT5665_STO2_DD_R_SRC_SFT 1
+
+/* ADC Mixer to DAC Mixer Control (0x0029) */
+#define RT5665_M_ADCMIX_L (0x1 << 15)
+#define RT5665_M_ADCMIX_L_SFT 15
+#define RT5665_M_DAC1_L (0x1 << 14)
+#define RT5665_M_DAC1_L_SFT 14
+#define RT5665_DAC1_R_SEL_MASK (0x3 << 10)
+#define RT5665_DAC1_R_SEL_SFT 10
+#define RT5665_DAC1_L_SEL_MASK (0x3 << 8)
+#define RT5665_DAC1_L_SEL_SFT 8
+#define RT5665_M_ADCMIX_R (0x1 << 7)
+#define RT5665_M_ADCMIX_R_SFT 7
+#define RT5665_M_DAC1_R (0x1 << 6)
+#define RT5665_M_DAC1_R_SFT 6
+
+/* Stereo1 DAC Mixer Control (0x002a) */
+#define RT5665_M_DAC_L1_STO_L (0x1 << 15)
+#define RT5665_M_DAC_L1_STO_L_SFT 15
+#define RT5665_G_DAC_L1_STO_L_MASK (0x1 << 14)
+#define RT5665_G_DAC_L1_STO_L_SFT 14
+#define RT5665_M_DAC_R1_STO_L (0x1 << 13)
+#define RT5665_M_DAC_R1_STO_L_SFT 13
+#define RT5665_G_DAC_R1_STO_L_MASK (0x1 << 12)
+#define RT5665_G_DAC_R1_STO_L_SFT 12
+#define RT5665_M_DAC_L2_STO_L (0x1 << 11)
+#define RT5665_M_DAC_L2_STO_L_SFT 11
+#define RT5665_G_DAC_L2_STO_L_MASK (0x1 << 10)
+#define RT5665_G_DAC_L2_STO_L_SFT 10
+#define RT5665_M_DAC_R2_STO_L (0x1 << 9)
+#define RT5665_M_DAC_R2_STO_L_SFT 9
+#define RT5665_G_DAC_R2_STO_L_MASK (0x1 << 8)
+#define RT5665_G_DAC_R2_STO_L_SFT 8
+#define RT5665_M_DAC_L1_STO_R (0x1 << 7)
+#define RT5665_M_DAC_L1_STO_R_SFT 7
+#define RT5665_G_DAC_L1_STO_R_MASK (0x1 << 6)
+#define RT5665_G_DAC_L1_STO_R_SFT 6
+#define RT5665_M_DAC_R1_STO_R (0x1 << 5)
+#define RT5665_M_DAC_R1_STO_R_SFT 5
+#define RT5665_G_DAC_R1_STO_R_MASK (0x1 << 4)
+#define RT5665_G_DAC_R1_STO_R_SFT 4
+#define RT5665_M_DAC_L2_STO_R (0x1 << 3)
+#define RT5665_M_DAC_L2_STO_R_SFT 3
+#define RT5665_G_DAC_L2_STO_R_MASK (0x1 << 2)
+#define RT5665_G_DAC_L2_STO_R_SFT 2
+#define RT5665_M_DAC_R2_STO_R (0x1 << 1)
+#define RT5665_M_DAC_R2_STO_R_SFT 1
+#define RT5665_G_DAC_R2_STO_R_MASK (0x1)
+#define RT5665_G_DAC_R2_STO_R_SFT 0
+
+/* Mono DAC Mixer Control (0x002b) */
+#define RT5665_M_DAC_L1_MONO_L (0x1 << 15)
+#define RT5665_M_DAC_L1_MONO_L_SFT 15
+#define RT5665_G_DAC_L1_MONO_L_MASK (0x1 << 14)
+#define RT5665_G_DAC_L1_MONO_L_SFT 14
+#define RT5665_M_DAC_R1_MONO_L (0x1 << 13)
+#define RT5665_M_DAC_R1_MONO_L_SFT 13
+#define RT5665_G_DAC_R1_MONO_L_MASK (0x1 << 12)
+#define RT5665_G_DAC_R1_MONO_L_SFT 12
+#define RT5665_M_DAC_L2_MONO_L (0x1 << 11)
+#define RT5665_M_DAC_L2_MONO_L_SFT 11
+#define RT5665_G_DAC_L2_MONO_L_MASK (0x1 << 10)
+#define RT5665_G_DAC_L2_MONO_L_SFT 10
+#define RT5665_M_DAC_R2_MONO_L (0x1 << 9)
+#define RT5665_M_DAC_R2_MONO_L_SFT 9
+#define RT5665_G_DAC_R2_MONO_L_MASK (0x1 << 8)
+#define RT5665_G_DAC_R2_MONO_L_SFT 8
+#define RT5665_M_DAC_L1_MONO_R (0x1 << 7)
+#define RT5665_M_DAC_L1_MONO_R_SFT 7
+#define RT5665_G_DAC_L1_MONO_R_MASK (0x1 << 6)
+#define RT5665_G_DAC_L1_MONO_R_SFT 6
+#define RT5665_M_DAC_R1_MONO_R (0x1 << 5)
+#define RT5665_M_DAC_R1_MONO_R_SFT 5
+#define RT5665_G_DAC_R1_MONO_R_MASK (0x1 << 4)
+#define RT5665_G_DAC_R1_MONO_R_SFT 4
+#define RT5665_M_DAC_L2_MONO_R (0x1 << 3)
+#define RT5665_M_DAC_L2_MONO_R_SFT 3
+#define RT5665_G_DAC_L2_MONO_R_MASK (0x1 << 2)
+#define RT5665_G_DAC_L2_MONO_R_SFT 2
+#define RT5665_M_DAC_R2_MONO_R (0x1 << 1)
+#define RT5665_M_DAC_R2_MONO_R_SFT 1
+#define RT5665_G_DAC_R2_MONO_R_MASK (0x1)
+#define RT5665_G_DAC_R2_MONO_R_SFT 0
+
+/* Stereo2 DAC Mixer Control (0x002c) */
+#define RT5665_M_DAC_L1_STO2_L (0x1 << 15)
+#define RT5665_M_DAC_L1_STO2_L_SFT 15
+#define RT5665_G_DAC_L1_STO2_L_MASK (0x1 << 14)
+#define RT5665_G_DAC_L1_STO2_L_SFT 14
+#define RT5665_M_DAC_L2_STO2_L (0x1 << 13)
+#define RT5665_M_DAC_L2_STO2_L_SFT 13
+#define RT5665_G_DAC_L2_STO2_L_MASK (0x1 << 12)
+#define RT5665_G_DAC_L2_STO2_L_SFT 12
+#define RT5665_M_DAC_L3_STO2_L (0x1 << 11)
+#define RT5665_M_DAC_L3_STO2_L_SFT 11
+#define RT5665_G_DAC_L3_STO2_L_MASK (0x1 << 10)
+#define RT5665_G_DAC_L3_STO2_L_SFT 10
+#define RT5665_M_ST_DAC_L1 (0x1 << 9)
+#define RT5665_M_ST_DAC_L1_SFT 9
+#define RT5665_M_ST_DAC_R1 (0x1 << 8)
+#define RT5665_M_ST_DAC_R1_SFT 8
+#define RT5665_M_DAC_R1_STO2_R (0x1 << 7)
+#define RT5665_M_DAC_R1_STO2_R_SFT 7
+#define RT5665_G_DAC_R1_STO2_R_MASK (0x1 << 6)
+#define RT5665_G_DAC_R1_STO2_R_SFT 6
+#define RT5665_M_DAC_R2_STO2_R (0x1 << 5)
+#define RT5665_M_DAC_R2_STO2_R_SFT 5
+#define RT5665_G_DAC_R2_STO2_R_MASK (0x1 << 4)
+#define RT5665_G_DAC_R2_STO2_R_SFT 4
+#define RT5665_M_DAC_R3_STO2_R (0x1 << 3)
+#define RT5665_M_DAC_R3_STO2_R_SFT 3
+#define RT5665_G_DAC_R3_STO2_R_MASK (0x1 << 2)
+#define RT5665_G_DAC_R3_STO2_R_SFT 2
+
+/* Analog DAC1 Input Source Control (0x002d) */
+#define RT5665_DAC_MIX_L_MASK (0x3 << 12)
+#define RT5665_DAC_MIX_L_SFT 12
+#define RT5665_DAC_MIX_R_MASK (0x3 << 8)
+#define RT5665_DAC_MIX_R_SFT 8
+#define RT5665_DAC_L1_SRC_MASK (0x3 << 4)
+#define RT5665_A_DACL1_SFT 4
+#define RT5665_DAC_R1_SRC_MASK (0x3)
+#define RT5665_A_DACR1_SFT 0
+
+/* Analog DAC Input Source Control (0x002e) */
+#define RT5665_A_DACL2_SEL (0x1 << 4)
+#define RT5665_A_DACL2_SFT 4
+#define RT5665_A_DACR2_SEL (0x1 << 0)
+#define RT5665_A_DACR2_SFT 0
+
+/* Digital Interface Data Control (0x002f) */
+#define RT5665_IF2_1_ADC_IN_MASK (0x7 << 12)
+#define RT5665_IF2_1_ADC_IN_SFT 12
+#define RT5665_IF2_1_DAC_SEL_MASK (0x3 << 10)
+#define RT5665_IF2_1_DAC_SEL_SFT 10
+#define RT5665_IF2_1_ADC_SEL_MASK (0x3 << 8)
+#define RT5665_IF2_1_ADC_SEL_SFT 8
+#define RT5665_IF2_2_ADC_IN_MASK (0x7 << 4)
+#define RT5665_IF2_2_ADC_IN_SFT 4
+#define RT5665_IF2_2_DAC_SEL_MASK (0x3 << 2)
+#define RT5665_IF2_2_DAC_SEL_SFT 2
+#define RT5665_IF2_2_ADC_SEL_MASK (0x3 << 0)
+#define RT5665_IF2_2_ADC_SEL_SFT 0
+
+/* Digital Interface Data Control (0x0030) */
+#define RT5665_IF3_ADC_IN_MASK (0x7 << 4)
+#define RT5665_IF3_ADC_IN_SFT 4
+#define RT5665_IF3_DAC_SEL_MASK (0x3 << 2)
+#define RT5665_IF3_DAC_SEL_SFT 2
+#define RT5665_IF3_ADC_SEL_MASK (0x3 << 0)
+#define RT5665_IF3_ADC_SEL_SFT 0
+
+/* PDM Output Control (0x0031) */
+#define RT5665_M_PDM1_L (0x1 << 14)
+#define RT5665_M_PDM1_L_SFT 14
+#define RT5665_M_PDM1_R (0x1 << 12)
+#define RT5665_M_PDM1_R_SFT 12
+#define RT5665_PDM1_L_MASK (0x3 << 10)
+#define RT5665_PDM1_L_SFT 10
+#define RT5665_PDM1_R_MASK (0x3 << 8)
+#define RT5665_PDM1_R_SFT 8
+#define RT5665_PDM1_BUSY (0x1 << 6)
+#define RT5665_PDM_PATTERN (0x1 << 5)
+#define RT5665_PDM_GAIN (0x1 << 4)
+#define RT5665_LRCK_PDM_PI2C (0x1 << 3)
+#define RT5665_PDM_DIV_MASK (0x3)
+
+/*S/PDIF Output Control (0x0036) */
+#define RT5665_SPDIF_SEL_MASK (0x3 << 0)
+#define RT5665_SPDIF_SEL_SFT 0
+
+/* REC Left Mixer Control 2 (0x003c) */
+#define RT5665_M_CBJ_RM1_L (0x1 << 7)
+#define RT5665_M_CBJ_RM1_L_SFT 7
+#define RT5665_M_BST1_RM1_L (0x1 << 5)
+#define RT5665_M_BST1_RM1_L_SFT 5
+#define RT5665_M_BST2_RM1_L (0x1 << 4)
+#define RT5665_M_BST2_RM1_L_SFT 4
+#define RT5665_M_BST3_RM1_L (0x1 << 3)
+#define RT5665_M_BST3_RM1_L_SFT 3
+#define RT5665_M_BST4_RM1_L (0x1 << 2)
+#define RT5665_M_BST4_RM1_L_SFT 2
+#define RT5665_M_INL_RM1_L (0x1 << 1)
+#define RT5665_M_INL_RM1_L_SFT 1
+#define RT5665_M_INR_RM1_L (0x1)
+#define RT5665_M_INR_RM1_L_SFT 0
+
+/* REC Right Mixer Control 2 (0x003e) */
+#define RT5665_M_AEC_REF_RM1_R (0x1 << 7)
+#define RT5665_M_AEC_REF_RM1_R_SFT 7
+#define RT5665_M_BST1_RM1_R (0x1 << 5)
+#define RT5665_M_BST1_RM1_R_SFT 5
+#define RT5665_M_BST2_RM1_R (0x1 << 4)
+#define RT5665_M_BST2_RM1_R_SFT 4
+#define RT5665_M_BST3_RM1_R (0x1 << 3)
+#define RT5665_M_BST3_RM1_R_SFT 3
+#define RT5665_M_BST4_RM1_R (0x1 << 2)
+#define RT5665_M_BST4_RM1_R_SFT 2
+#define RT5665_M_INR_RM1_R (0x1 << 1)
+#define RT5665_M_INR_RM1_R_SFT 1
+#define RT5665_M_MONOVOL_RM1_R (0x1)
+#define RT5665_M_MONOVOL_RM1_R_SFT 0
+
+/* REC Mixer 2 Left Control 2 (0x0041) */
+#define RT5665_M_CBJ_RM2_L (0x1 << 7)
+#define RT5665_M_CBJ_RM2_L_SFT 7
+#define RT5665_M_BST1_RM2_L (0x1 << 5)
+#define RT5665_M_BST1_RM2_L_SFT 5
+#define RT5665_M_BST2_RM2_L (0x1 << 4)
+#define RT5665_M_BST2_RM2_L_SFT 4
+#define RT5665_M_BST3_RM2_L (0x1 << 3)
+#define RT5665_M_BST3_RM2_L_SFT 3
+#define RT5665_M_BST4_RM2_L (0x1 << 2)
+#define RT5665_M_BST4_RM2_L_SFT 2
+#define RT5665_M_INL_RM2_L (0x1 << 1)
+#define RT5665_M_INL_RM2_L_SFT 1
+#define RT5665_M_INR_RM2_L (0x1)
+#define RT5665_M_INR_RM2_L_SFT 0
+
+/* REC Mixer 2 Right Control 2 (0x0043) */
+#define RT5665_M_MONOVOL_RM2_R (0x1 << 7)
+#define RT5665_M_MONOVOL_RM2_R_SFT 7
+#define RT5665_M_BST1_RM2_R (0x1 << 5)
+#define RT5665_M_BST1_RM2_R_SFT 5
+#define RT5665_M_BST2_RM2_R (0x1 << 4)
+#define RT5665_M_BST2_RM2_R_SFT 4
+#define RT5665_M_BST3_RM2_R (0x1 << 3)
+#define RT5665_M_BST3_RM2_R_SFT 3
+#define RT5665_M_BST4_RM2_R (0x1 << 2)
+#define RT5665_M_BST4_RM2_R_SFT 2
+#define RT5665_M_INL_RM2_R (0x1 << 1)
+#define RT5665_M_INL_RM2_R_SFT 1
+#define RT5665_M_INR_RM2_R (0x1)
+#define RT5665_M_INR_RM2_R_SFT 0
+
+/* SPK Left Mixer Control (0x0046) */
+#define RT5665_M_BST3_SM_L (0x1 << 4)
+#define RT5665_M_BST3_SM_L_SFT 4
+#define RT5665_M_IN_R_SM_L (0x1 << 3)
+#define RT5665_M_IN_R_SM_L_SFT 3
+#define RT5665_M_IN_L_SM_L (0x1 << 2)
+#define RT5665_M_IN_L_SM_L_SFT 2
+#define RT5665_M_BST1_SM_L (0x1 << 1)
+#define RT5665_M_BST1_SM_L_SFT 1
+#define RT5665_M_DAC_L2_SM_L (0x1)
+#define RT5665_M_DAC_L2_SM_L_SFT 0
+
+/* SPK Right Mixer Control (0x0047) */
+#define RT5665_M_BST3_SM_R (0x1 << 4)
+#define RT5665_M_BST3_SM_R_SFT 4
+#define RT5665_M_IN_R_SM_R (0x1 << 3)
+#define RT5665_M_IN_R_SM_R_SFT 3
+#define RT5665_M_IN_L_SM_R (0x1 << 2)
+#define RT5665_M_IN_L_SM_R_SFT 2
+#define RT5665_M_BST4_SM_R (0x1 << 1)
+#define RT5665_M_BST4_SM_R_SFT 1
+#define RT5665_M_DAC_R2_SM_R (0x1)
+#define RT5665_M_DAC_R2_SM_R_SFT 0
+
+/* SPO Amp Input and Gain Control (0x0048) */
+#define RT5665_M_DAC_L2_SPKOMIX (0x1 << 13)
+#define RT5665_M_DAC_L2_SPKOMIX_SFT 13
+#define RT5665_M_SPKVOLL_SPKOMIX (0x1 << 12)
+#define RT5665_M_SPKVOLL_SPKOMIX_SFT 12
+#define RT5665_M_DAC_R2_SPKOMIX (0x1 << 9)
+#define RT5665_M_DAC_R2_SPKOMIX_SFT 9
+#define RT5665_M_SPKVOLR_SPKOMIX (0x1 << 8)
+#define RT5665_M_SPKVOLR_SPKOMIX_SFT 8
+
+/* MONOMIX Input and Gain Control (0x004b) */
+#define RT5665_G_MONOVOL_MA (0x1 << 10)
+#define RT5665_G_MONOVOL_MA_SFT 10
+#define RT5665_M_MONOVOL_MA (0x1 << 9)
+#define RT5665_M_MONOVOL_MA_SFT 9
+#define RT5665_M_DAC_L2_MA (0x1 << 8)
+#define RT5665_M_DAC_L2_MA_SFT 8
+#define RT5665_M_BST3_MM (0x1 << 4)
+#define RT5665_M_BST3_MM_SFT 4
+#define RT5665_M_BST2_MM (0x1 << 3)
+#define RT5665_M_BST2_MM_SFT 3
+#define RT5665_M_BST1_MM (0x1 << 2)
+#define RT5665_M_BST1_MM_SFT 2
+#define RT5665_M_RECMIC2L_MM (0x1 << 1)
+#define RT5665_M_RECMIC2L_MM_SFT 1
+#define RT5665_M_DAC_L2_MM (0x1)
+#define RT5665_M_DAC_L2_MM_SFT 0
+
+/* Output Left Mixer Control 1 (0x004d) */
+#define RT5665_G_BST3_OM_L_MASK (0x7 << 12)
+#define RT5665_G_BST3_OM_L_SFT 12
+#define RT5665_G_BST2_OM_L_MASK (0x7 << 9)
+#define RT5665_G_BST2_OM_L_SFT 9
+#define RT5665_G_BST1_OM_L_MASK (0x7 << 6)
+#define RT5665_G_BST1_OM_L_SFT 6
+#define RT5665_G_IN_L_OM_L_MASK (0x7 << 3)
+#define RT5665_G_IN_L_OM_L_SFT 3
+#define RT5665_G_DAC_L2_OM_L_MASK (0x7 << 0)
+#define RT5665_G_DAC_L2_OM_L_SFT 0
+
+/* Output Left Mixer Input Control (0x004e) */
+#define RT5665_M_BST3_OM_L (0x1 << 4)
+#define RT5665_M_BST3_OM_L_SFT 4
+#define RT5665_M_BST2_OM_L (0x1 << 3)
+#define RT5665_M_BST2_OM_L_SFT 3
+#define RT5665_M_BST1_OM_L (0x1 << 2)
+#define RT5665_M_BST1_OM_L_SFT 2
+#define RT5665_M_IN_L_OM_L (0x1 << 1)
+#define RT5665_M_IN_L_OM_L_SFT 1
+#define RT5665_M_DAC_L2_OM_L (0x1)
+#define RT5665_M_DAC_L2_OM_L_SFT 0
+
+/* Output Right Mixer Input Control (0x0050) */
+#define RT5665_M_BST4_OM_R (0x1 << 4)
+#define RT5665_M_BST4_OM_R_SFT 4
+#define RT5665_M_BST3_OM_R (0x1 << 3)
+#define RT5665_M_BST3_OM_R_SFT 3
+#define RT5665_M_BST2_OM_R (0x1 << 2)
+#define RT5665_M_BST2_OM_R_SFT 2
+#define RT5665_M_IN_R_OM_R (0x1 << 1)
+#define RT5665_M_IN_R_OM_R_SFT 1
+#define RT5665_M_DAC_R2_OM_R (0x1)
+#define RT5665_M_DAC_R2_OM_R_SFT 0
+
+/* LOUT Mixer Control (0x0052) */
+#define RT5665_M_DAC_L2_LM (0x1 << 15)
+#define RT5665_M_DAC_L2_LM_SFT 15
+#define RT5665_M_DAC_R2_LM (0x1 << 14)
+#define RT5665_M_DAC_R2_LM_SFT 14
+#define RT5665_M_OV_L_LM (0x1 << 13)
+#define RT5665_M_OV_L_LM_SFT 13
+#define RT5665_M_OV_R_LM (0x1 << 12)
+#define RT5665_M_OV_R_LM_SFT 12
+#define RT5665_LOUT_BST_SFT 11
+#define RT5665_LOUT_DF (0x1 << 11)
+#define RT5665_LOUT_DF_SFT 11
+
+/* Power Management for Digital 1 (0x0061) */
+#define RT5665_PWR_I2S1_1 (0x1 << 15)
+#define RT5665_PWR_I2S1_1_BIT 15
+#define RT5665_PWR_I2S1_2 (0x1 << 14)
+#define RT5665_PWR_I2S1_2_BIT 14
+#define RT5665_PWR_I2S2_1 (0x1 << 13)
+#define RT5665_PWR_I2S2_1_BIT 13
+#define RT5665_PWR_I2S2_2 (0x1 << 12)
+#define RT5665_PWR_I2S2_2_BIT 12
+#define RT5665_PWR_DAC_L1 (0x1 << 11)
+#define RT5665_PWR_DAC_L1_BIT 11
+#define RT5665_PWR_DAC_R1 (0x1 << 10)
+#define RT5665_PWR_DAC_R1_BIT 10
+#define RT5665_PWR_I2S3 (0x1 << 9)
+#define RT5665_PWR_I2S3_BIT 9
+#define RT5665_PWR_LDO (0x1 << 8)
+#define RT5665_PWR_LDO_BIT 8
+#define RT5665_PWR_DAC_L2 (0x1 << 7)
+#define RT5665_PWR_DAC_L2_BIT 7
+#define RT5665_PWR_DAC_R2 (0x1 << 6)
+#define RT5665_PWR_DAC_R2_BIT 6
+#define RT5665_PWR_ADC_L1 (0x1 << 4)
+#define RT5665_PWR_ADC_L1_BIT 4
+#define RT5665_PWR_ADC_R1 (0x1 << 3)
+#define RT5665_PWR_ADC_R1_BIT 3
+#define RT5665_PWR_ADC_L2 (0x1 << 2)
+#define RT5665_PWR_ADC_L2_BIT 2
+#define RT5665_PWR_ADC_R2 (0x1 << 1)
+#define RT5665_PWR_ADC_R2_BIT 1
+
+/* Power Management for Digital 2 (0x0062) */
+#define RT5665_PWR_ADC_S1F (0x1 << 15)
+#define RT5665_PWR_ADC_S1F_BIT 15
+#define RT5665_PWR_ADC_S2F (0x1 << 14)
+#define RT5665_PWR_ADC_S2F_BIT 14
+#define RT5665_PWR_ADC_MF_L (0x1 << 13)
+#define RT5665_PWR_ADC_MF_L_BIT 13
+#define RT5665_PWR_ADC_MF_R (0x1 << 12)
+#define RT5665_PWR_ADC_MF_R_BIT 12
+#define RT5665_PWR_DAC_S2F (0x1 << 11)
+#define RT5665_PWR_DAC_S2F_BIT 11
+#define RT5665_PWR_DAC_S1F (0x1 << 10)
+#define RT5665_PWR_DAC_S1F_BIT 10
+#define RT5665_PWR_DAC_MF_L (0x1 << 9)
+#define RT5665_PWR_DAC_MF_L_BIT 9
+#define RT5665_PWR_DAC_MF_R (0x1 << 8)
+#define RT5665_PWR_DAC_MF_R_BIT 8
+#define RT5665_PWR_PDM1 (0x1 << 7)
+#define RT5665_PWR_PDM1_BIT 7
+
+/* Power Management for Analog 1 (0x0063) */
+#define RT5665_PWR_VREF1 (0x1 << 15)
+#define RT5665_PWR_VREF1_BIT 15
+#define RT5665_PWR_FV1 (0x1 << 14)
+#define RT5665_PWR_FV1_BIT 14
+#define RT5665_PWR_VREF2 (0x1 << 13)
+#define RT5665_PWR_VREF2_BIT 13
+#define RT5665_PWR_FV2 (0x1 << 12)
+#define RT5665_PWR_FV2_BIT 12
+#define RT5665_PWR_VREF3 (0x1 << 11)
+#define RT5665_PWR_VREF3_BIT 11
+#define RT5665_PWR_FV3 (0x1 << 10)
+#define RT5665_PWR_FV3_BIT 10
+#define RT5665_PWR_MB (0x1 << 9)
+#define RT5665_PWR_MB_BIT 9
+#define RT5665_PWR_LM (0x1 << 8)
+#define RT5665_PWR_LM_BIT 8
+#define RT5665_PWR_BG (0x1 << 7)
+#define RT5665_PWR_BG_BIT 7
+#define RT5665_PWR_MA (0x1 << 6)
+#define RT5665_PWR_MA_BIT 6
+#define RT5665_PWR_HA_L (0x1 << 5)
+#define RT5665_PWR_HA_L_BIT 5
+#define RT5665_PWR_HA_R (0x1 << 4)
+#define RT5665_PWR_HA_R_BIT 4
+#define RT5665_HP_DRIVER_MASK (0x3 << 2)
+#define RT5665_HP_DRIVER_1X (0x0 << 2)
+#define RT5665_HP_DRIVER_3X (0x1 << 2)
+#define RT5665_HP_DRIVER_5X (0x2 << 2)
+#define RT5665_LDO1_DVO_MASK (0x3)
+#define RT5665_LDO1_DVO_09 (0x0)
+#define RT5665_LDO1_DVO_10 (0x1)
+#define RT5665_LDO1_DVO_12 (0x2)
+#define RT5665_LDO1_DVO_14 (0x3)
+
+/* Power Management for Analog 2 (0x0064) */
+#define RT5665_PWR_BST1 (0x1 << 15)
+#define RT5665_PWR_BST1_BIT 15
+#define RT5665_PWR_BST2 (0x1 << 14)
+#define RT5665_PWR_BST2_BIT 14
+#define RT5665_PWR_BST3 (0x1 << 13)
+#define RT5665_PWR_BST3_BIT 13
+#define RT5665_PWR_BST4 (0x1 << 12)
+#define RT5665_PWR_BST4_BIT 12
+#define RT5665_PWR_MB1 (0x1 << 11)
+#define RT5665_PWR_MB1_PWR_DOWN (0x0 << 11)
+#define RT5665_PWR_MB1_BIT 11
+#define RT5665_PWR_MB2 (0x1 << 10)
+#define RT5665_PWR_MB2_PWR_DOWN (0x0 << 10)
+#define RT5665_PWR_MB2_BIT 10
+#define RT5665_PWR_MB3 (0x1 << 9)
+#define RT5665_PWR_MB3_BIT 9
+#define RT5665_PWR_BST1_P (0x1 << 7)
+#define RT5665_PWR_BST1_P_BIT 7
+#define RT5665_PWR_BST2_P (0x1 << 6)
+#define RT5665_PWR_BST2_P_BIT 6
+#define RT5665_PWR_BST3_P (0x1 << 5)
+#define RT5665_PWR_BST3_P_BIT 5
+#define RT5665_PWR_BST4_P (0x1 << 4)
+#define RT5665_PWR_BST4_P_BIT 4
+#define RT5665_PWR_JD1 (0x1 << 3)
+#define RT5665_PWR_JD1_BIT 3
+#define RT5665_PWR_JD2 (0x1 << 2)
+#define RT5665_PWR_JD2_BIT 2
+#define RT5665_PWR_RM1_L (0x1 << 1)
+#define RT5665_PWR_RM1_L_BIT 1
+#define RT5665_PWR_RM1_R (0x1)
+#define RT5665_PWR_RM1_R_BIT 0
+
+/* Power Management for Analog 3 (0x0065) */
+#define RT5665_PWR_CBJ (0x1 << 9)
+#define RT5665_PWR_CBJ_BIT 9
+#define RT5665_PWR_BST_L (0x1 << 8)
+#define RT5665_PWR_BST_L_BIT 8
+#define RT5665_PWR_BST_R (0x1 << 7)
+#define RT5665_PWR_BST_R_BIT 7
+#define RT5665_PWR_PLL (0x1 << 6)
+#define RT5665_PWR_PLL_BIT 6
+#define RT5665_PWR_LDO2 (0x1 << 2)
+#define RT5665_PWR_LDO2_BIT 2
+#define RT5665_PWR_SVD (0x1 << 1)
+#define RT5665_PWR_SVD_BIT 1
+
+/* Power Management for Mixer (0x0066) */
+#define RT5665_PWR_RM2_L (0x1 << 15)
+#define RT5665_PWR_RM2_L_BIT 15
+#define RT5665_PWR_RM2_R (0x1 << 14)
+#define RT5665_PWR_RM2_R_BIT 14
+#define RT5665_PWR_OM_L (0x1 << 13)
+#define RT5665_PWR_OM_L_BIT 13
+#define RT5665_PWR_OM_R (0x1 << 12)
+#define RT5665_PWR_OM_R_BIT 12
+#define RT5665_PWR_MM (0x1 << 11)
+#define RT5665_PWR_MM_BIT 11
+#define RT5665_PWR_AEC_REF (0x1 << 6)
+#define RT5665_PWR_AEC_REF_BIT 6
+#define RT5665_PWR_STO1_DAC_L (0x1 << 5)
+#define RT5665_PWR_STO1_DAC_L_BIT 5
+#define RT5665_PWR_STO1_DAC_R (0x1 << 4)
+#define RT5665_PWR_STO1_DAC_R_BIT 4
+#define RT5665_PWR_MONO_DAC_L (0x1 << 3)
+#define RT5665_PWR_MONO_DAC_L_BIT 3
+#define RT5665_PWR_MONO_DAC_R (0x1 << 2)
+#define RT5665_PWR_MONO_DAC_R_BIT 2
+#define RT5665_PWR_STO2_DAC_L (0x1 << 1)
+#define RT5665_PWR_STO2_DAC_L_BIT 1
+#define RT5665_PWR_STO2_DAC_R (0x1)
+#define RT5665_PWR_STO2_DAC_R_BIT 0
+
+/* Power Management for Volume (0x0067) */
+#define RT5665_PWR_OV_L (0x1 << 13)
+#define RT5665_PWR_OV_L_BIT 13
+#define RT5665_PWR_OV_R (0x1 << 12)
+#define RT5665_PWR_OV_R_BIT 12
+#define RT5665_PWR_IN_L (0x1 << 9)
+#define RT5665_PWR_IN_L_BIT 9
+#define RT5665_PWR_IN_R (0x1 << 8)
+#define RT5665_PWR_IN_R_BIT 8
+#define RT5665_PWR_MV (0x1 << 7)
+#define RT5665_PWR_MV_BIT 7
+#define RT5665_PWR_MIC_DET (0x1 << 5)
+#define RT5665_PWR_MIC_DET_BIT 5
+
+/* (0x006b) */
+#define RT5665_SYS_CLK_DET 15
+#define RT5665_HP_CLK_DET 14
+#define RT5665_MONO_CLK_DET 13
+#define RT5665_LOUT_CLK_DET 12
+#define RT5665_POW_CLK_DET 0
+
+/* Digital Microphone Control 1 (0x006e) */
+#define RT5665_DMIC_1_EN_MASK (0x1 << 15)
+#define RT5665_DMIC_1_EN_SFT 15
+#define RT5665_DMIC_1_DIS (0x0 << 15)
+#define RT5665_DMIC_1_EN (0x1 << 15)
+#define RT5665_DMIC_2_EN_MASK (0x1 << 14)
+#define RT5665_DMIC_2_EN_SFT 14
+#define RT5665_DMIC_2_DIS (0x0 << 14)
+#define RT5665_DMIC_2_EN (0x1 << 14)
+#define RT5665_DMIC_2_DP_MASK (0x1 << 9)
+#define RT5665_DMIC_2_DP_SFT 9
+#define RT5665_DMIC_2_DP_GPIO5 (0x0 << 9)
+#define RT5665_DMIC_2_DP_IN2P (0x1 << 9)
+#define RT5665_DMIC_CLK_MASK (0x7 << 5)
+#define RT5665_DMIC_CLK_SFT 5
+#define RT5665_DMIC_1_DP_MASK (0x1 << 1)
+#define RT5665_DMIC_1_DP_SFT 1
+#define RT5665_DMIC_1_DP_GPIO4 (0x0 << 1)
+#define RT5665_DMIC_1_DP_IN2N (0x1 << 1)
+
+
+/* Digital Microphone Control 1 (0x006f) */
+#define RT5665_DMIC_2L_LH_MASK (0x1 << 3)
+#define RT5665_DMIC_2L_LH_SFT 3
+#define RT5665_DMIC_2L_LH_RISING (0x0 << 3)
+#define RT5665_DMIC_2L_LH_FALLING (0x1 << 3)
+#define RT5665_DMIC_2R_LH_MASK (0x1 << 2)
+#define RT5665_DMIC_2R_LH_SFT 2
+#define RT5665_DMIC_2R_LH_RISING (0x0 << 2)
+#define RT5665_DMIC_2R_LH_FALLING (0x1 << 2)
+#define RT5665_DMIC_1L_LH_MASK (0x1 << 1)
+#define RT5665_DMIC_1L_LH_SFT 1
+#define RT5665_DMIC_1L_LH_RISING (0x0 << 1)
+#define RT5665_DMIC_1L_LH_FALLING (0x1 << 1)
+#define RT5665_DMIC_1R_LH_MASK (0x1 << 0)
+#define RT5665_DMIC_1R_LH_SFT 0
+#define RT5665_DMIC_1R_LH_RISING (0x0)
+#define RT5665_DMIC_1R_LH_FALLING (0x1)
+
+/* I2S1/2/3 Audio Serial Data Port Control (0x0070 0x0071 0x0072) */
+#define RT5665_I2S_MS_MASK (0x1 << 15)
+#define RT5665_I2S_MS_SFT 15
+#define RT5665_I2S_MS_M (0x0 << 15)
+#define RT5665_I2S_MS_S (0x1 << 15)
+#define RT5665_I2S_PIN_CFG_MASK (0x1 << 14)
+#define RT5665_I2S_PIN_CFG_SFT 14
+#define RT5665_I2S_CLK_SEL_MASK (0x1 << 11)
+#define RT5665_I2S_CLK_SEL_SFT 11
+#define RT5665_I2S_BP_MASK (0x1 << 8)
+#define RT5665_I2S_BP_SFT 8
+#define RT5665_I2S_BP_NOR (0x0 << 8)
+#define RT5665_I2S_BP_INV (0x1 << 8)
+#define RT5665_I2S_DL_MASK (0x3 << 4)
+#define RT5665_I2S_DL_SFT 4
+#define RT5665_I2S_DL_16 (0x0 << 4)
+#define RT5665_I2S_DL_20 (0x1 << 4)
+#define RT5665_I2S_DL_24 (0x2 << 4)
+#define RT5665_I2S_DL_8 (0x3 << 4)
+#define RT5665_I2S_DF_MASK (0x7)
+#define RT5665_I2S_DF_SFT 0
+#define RT5665_I2S_DF_I2S (0x0)
+#define RT5665_I2S_DF_LEFT (0x1)
+#define RT5665_I2S_DF_PCM_A (0x2)
+#define RT5665_I2S_DF_PCM_B (0x3)
+#define RT5665_I2S_DF_PCM_A_N (0x6)
+#define RT5665_I2S_DF_PCM_B_N (0x7)
+
+/* ADC/DAC Clock Control 1 (0x0073) */
+#define RT5665_I2S_PD1_MASK (0x7 << 12)
+#define RT5665_I2S_PD1_SFT 12
+#define RT5665_I2S_PD1_1 (0x0 << 12)
+#define RT5665_I2S_PD1_2 (0x1 << 12)
+#define RT5665_I2S_PD1_3 (0x2 << 12)
+#define RT5665_I2S_PD1_4 (0x3 << 12)
+#define RT5665_I2S_PD1_6 (0x4 << 12)
+#define RT5665_I2S_PD1_8 (0x5 << 12)
+#define RT5665_I2S_PD1_12 (0x6 << 12)
+#define RT5665_I2S_PD1_16 (0x7 << 12)
+#define RT5665_I2S_M_PD2_MASK (0x7 << 8)
+#define RT5665_I2S_M_PD2_SFT 8
+#define RT5665_I2S_M_PD2_1 (0x0 << 8)
+#define RT5665_I2S_M_PD2_2 (0x1 << 8)
+#define RT5665_I2S_M_PD2_3 (0x2 << 8)
+#define RT5665_I2S_M_PD2_4 (0x3 << 8)
+#define RT5665_I2S_M_PD2_6 (0x4 << 8)
+#define RT5665_I2S_M_PD2_8 (0x5 << 8)
+#define RT5665_I2S_M_PD2_12 (0x6 << 8)
+#define RT5665_I2S_M_PD2_16 (0x7 << 8)
+#define RT5665_I2S_CLK_SRC_MASK (0x3 << 4)
+#define RT5665_I2S_CLK_SRC_SFT 4
+#define RT5665_I2S_CLK_SRC_MCLK (0x0 << 4)
+#define RT5665_I2S_CLK_SRC_PLL1 (0x1 << 4)
+#define RT5665_I2S_CLK_SRC_RCCLK (0x2 << 4)
+#define RT5665_DAC_OSR_MASK (0x3 << 2)
+#define RT5665_DAC_OSR_SFT 2
+#define RT5665_DAC_OSR_128 (0x0 << 2)
+#define RT5665_DAC_OSR_64 (0x1 << 2)
+#define RT5665_DAC_OSR_32 (0x2 << 2)
+#define RT5665_ADC_OSR_MASK (0x3)
+#define RT5665_ADC_OSR_SFT 0
+#define RT5665_ADC_OSR_128 (0x0)
+#define RT5665_ADC_OSR_64 (0x1)
+#define RT5665_ADC_OSR_32 (0x2)
+
+/* ADC/DAC Clock Control 2 (0x0074) */
+#define RT5665_I2S_BCLK_MS2_MASK (0x1 << 15)
+#define RT5665_I2S_BCLK_MS2_SFT 15
+#define RT5665_I2S_BCLK_MS2_32 (0x0 << 15)
+#define RT5665_I2S_BCLK_MS2_64 (0x1 << 15)
+#define RT5665_I2S_PD2_MASK (0x7 << 12)
+#define RT5665_I2S_PD2_SFT 12
+#define RT5665_I2S_PD2_1 (0x0 << 12)
+#define RT5665_I2S_PD2_2 (0x1 << 12)
+#define RT5665_I2S_PD2_3 (0x2 << 12)
+#define RT5665_I2S_PD2_4 (0x3 << 12)
+#define RT5665_I2S_PD2_6 (0x4 << 12)
+#define RT5665_I2S_PD2_8 (0x5 << 12)
+#define RT5665_I2S_PD2_12 (0x6 << 12)
+#define RT5665_I2S_PD2_16 (0x7 << 12)
+#define RT5665_I2S_BCLK_MS3_MASK (0x1 << 11)
+#define RT5665_I2S_BCLK_MS3_SFT 11
+#define RT5665_I2S_BCLK_MS3_32 (0x0 << 11)
+#define RT5665_I2S_BCLK_MS3_64 (0x1 << 11)
+#define RT5665_I2S_PD3_MASK (0x7 << 8)
+#define RT5665_I2S_PD3_SFT 8
+#define RT5665_I2S_PD3_1 (0x0 << 8)
+#define RT5665_I2S_PD3_2 (0x1 << 8)
+#define RT5665_I2S_PD3_3 (0x2 << 8)
+#define RT5665_I2S_PD3_4 (0x3 << 8)
+#define RT5665_I2S_PD3_6 (0x4 << 8)
+#define RT5665_I2S_PD3_8 (0x5 << 8)
+#define RT5665_I2S_PD3_12 (0x6 << 8)
+#define RT5665_I2S_PD3_16 (0x7 << 8)
+#define RT5665_I2S_PD4_MASK (0x7 << 4)
+#define RT5665_I2S_PD4_SFT 4
+#define RT5665_I2S_PD4_1 (0x0 << 4)
+#define RT5665_I2S_PD4_2 (0x1 << 4)
+#define RT5665_I2S_PD4_3 (0x2 << 4)
+#define RT5665_I2S_PD4_4 (0x3 << 4)
+#define RT5665_I2S_PD4_6 (0x4 << 4)
+#define RT5665_I2S_PD4_8 (0x5 << 4)
+#define RT5665_I2S_PD4_12 (0x6 << 4)
+#define RT5665_I2S_PD4_16 (0x7 << 4)
+
+/* TDM control 1 (0x0078) */
+#define RT5665_I2S1_MODE_MASK (0x1 << 15)
+#define RT5665_I2S1_MODE_I2S (0x0 << 15)
+#define RT5665_I2S1_MODE_TDM (0x1 << 15)
+#define RT5665_TDM_IN_CH_MASK (0x3 << 10)
+#define RT5665_TDM_IN_CH_2 (0x0 << 10)
+#define RT5665_TDM_IN_CH_4 (0x1 << 10)
+#define RT5665_TDM_IN_CH_6 (0x2 << 10)
+#define RT5665_TDM_IN_CH_8 (0x3 << 10)
+#define RT5665_TDM_OUT_CH_MASK (0x3 << 8)
+#define RT5665_TDM_OUT_CH_2 (0x0 << 8)
+#define RT5665_TDM_OUT_CH_4 (0x1 << 8)
+#define RT5665_TDM_OUT_CH_6 (0x2 << 8)
+#define RT5665_TDM_OUT_CH_8 (0x3 << 8)
+#define RT5665_TDM_IN_LEN_MASK (0x3 << 6)
+#define RT5665_TDM_IN_LEN_16 (0x0 << 6)
+#define RT5665_TDM_IN_LEN_20 (0x1 << 6)
+#define RT5665_TDM_IN_LEN_24 (0x2 << 6)
+#define RT5665_TDM_IN_LEN_32 (0x3 << 6)
+#define RT5665_TDM_OUT_LEN_MASK (0x3 << 4)
+#define RT5665_TDM_OUT_LEN_16 (0x0 << 4)
+#define RT5665_TDM_OUT_LEN_20 (0x1 << 4)
+#define RT5665_TDM_OUT_LEN_24 (0x2 << 4)
+#define RT5665_TDM_OUT_LEN_32 (0x3 << 4)
+
+
+/* TDM control 2 (0x0079) */
+#define RT5665_I2S1_1_DS_ADC_SLOT01_SFT 14
+#define RT5665_I2S1_1_DS_ADC_SLOT23_SFT 12
+#define RT5665_I2S1_1_DS_ADC_SLOT45_SFT 10
+#define RT5665_I2S1_1_DS_ADC_SLOT67_SFT 8
+#define RT5665_I2S1_2_DS_ADC_SLOT01_SFT 6
+#define RT5665_I2S1_2_DS_ADC_SLOT23_SFT 4
+#define RT5665_I2S1_2_DS_ADC_SLOT45_SFT 2
+#define RT5665_I2S1_2_DS_ADC_SLOT67_SFT 0
+
+/* TDM control 3/4 (0x007a) (0x007b) */
+#define RT5665_IF1_ADC1_SEL_SFT 10
+#define RT5665_IF1_ADC2_SEL_SFT 9
+#define RT5665_IF1_ADC3_SEL_SFT 8
+#define RT5665_IF1_ADC4_SEL_SFT 7
+#define RT5665_TDM_ADC_SEL_SFT 0
+#define RT5665_TDM_ADC_CTRL_MASK (0x1f << 0)
+#define RT5665_TDM_ADC_DATA_06 (0x6 << 0)
+
+/* Global Clock Control (0x0080) */
+#define RT5665_SCLK_SRC_MASK (0x3 << 14)
+#define RT5665_SCLK_SRC_SFT 14
+#define RT5665_SCLK_SRC_MCLK (0x0 << 14)
+#define RT5665_SCLK_SRC_PLL1 (0x1 << 14)
+#define RT5665_SCLK_SRC_RCCLK (0x2 << 14)
+#define RT5665_PLL1_SRC_MASK (0x7 << 8)
+#define RT5665_PLL1_SRC_SFT 8
+#define RT5665_PLL1_SRC_MCLK (0x0 << 8)
+#define RT5665_PLL1_SRC_BCLK1 (0x1 << 8)
+#define RT5665_PLL1_SRC_BCLK2 (0x2 << 8)
+#define RT5665_PLL1_SRC_BCLK3 (0x3 << 8)
+#define RT5665_PLL1_PD_MASK (0x7 << 4)
+#define RT5665_PLL1_PD_SFT 4
+
+
+#define RT5665_PLL_INP_MAX 40000000
+#define RT5665_PLL_INP_MIN 256000
+/* PLL M/N/K Code Control 1 (0x0081) */
+#define RT5665_PLL_N_MAX 0x001ff
+#define RT5665_PLL_N_MASK (RT5665_PLL_N_MAX << 7)
+#define RT5665_PLL_N_SFT 7
+#define RT5665_PLL_K_MAX 0x001f
+#define RT5665_PLL_K_MASK (RT5665_PLL_K_MAX)
+#define RT5665_PLL_K_SFT 0
+
+/* PLL M/N/K Code Control 2 (0x0082) */
+#define RT5665_PLL_M_MAX 0x00f
+#define RT5665_PLL_M_MASK (RT5665_PLL_M_MAX << 12)
+#define RT5665_PLL_M_SFT 12
+#define RT5665_PLL_M_BP (0x1 << 11)
+#define RT5665_PLL_M_BP_SFT 11
+#define RT5665_PLL_K_BP (0x1 << 10)
+#define RT5665_PLL_K_BP_SFT 10
+
+/* PLL tracking mode 1 (0x0083) */
+#define RT5665_I2S3_ASRC_MASK (0x1 << 15)
+#define RT5665_I2S3_ASRC_SFT 15
+#define RT5665_I2S2_ASRC_MASK (0x1 << 14)
+#define RT5665_I2S2_ASRC_SFT 14
+#define RT5665_I2S1_ASRC_MASK (0x1 << 13)
+#define RT5665_I2S1_ASRC_SFT 13
+#define RT5665_DAC_STO1_ASRC_MASK (0x1 << 12)
+#define RT5665_DAC_STO1_ASRC_SFT 12
+#define RT5665_DAC_STO2_ASRC_MASK (0x1 << 11)
+#define RT5665_DAC_STO2_ASRC_SFT 11
+#define RT5665_DAC_MONO_L_ASRC_MASK (0x1 << 10)
+#define RT5665_DAC_MONO_L_ASRC_SFT 10
+#define RT5665_DAC_MONO_R_ASRC_MASK (0x1 << 9)
+#define RT5665_DAC_MONO_R_ASRC_SFT 9
+#define RT5665_DMIC_STO1_ASRC_MASK (0x1 << 8)
+#define RT5665_DMIC_STO1_ASRC_SFT 8
+#define RT5665_DMIC_STO2_ASRC_MASK (0x1 << 7)
+#define RT5665_DMIC_STO2_ASRC_SFT 7
+#define RT5665_DMIC_MONO_L_ASRC_MASK (0x1 << 6)
+#define RT5665_DMIC_MONO_L_ASRC_SFT 6
+#define RT5665_DMIC_MONO_R_ASRC_MASK (0x1 << 5)
+#define RT5665_DMIC_MONO_R_ASRC_SFT 5
+#define RT5665_ADC_STO1_ASRC_MASK (0x1 << 4)
+#define RT5665_ADC_STO1_ASRC_SFT 4
+#define RT5665_ADC_STO2_ASRC_MASK (0x1 << 3)
+#define RT5665_ADC_STO2_ASRC_SFT 3
+#define RT5665_ADC_MONO_L_ASRC_MASK (0x1 << 2)
+#define RT5665_ADC_MONO_L_ASRC_SFT 2
+#define RT5665_ADC_MONO_R_ASRC_MASK (0x1 << 1)
+#define RT5665_ADC_MONO_R_ASRC_SFT 1
+
+/* PLL tracking mode 2 (0x0084)*/
+#define RT5665_DA_STO1_CLK_SEL_MASK (0x7 << 12)
+#define RT5665_DA_STO1_CLK_SEL_SFT 12
+#define RT5665_DA_STO2_CLK_SEL_MASK (0x7 << 8)
+#define RT5665_DA_STO2_CLK_SEL_SFT 8
+#define RT5665_DA_MONOL_CLK_SEL_MASK (0x7 << 4)
+#define RT5665_DA_MONOL_CLK_SEL_SFT 4
+#define RT5665_DA_MONOR_CLK_SEL_MASK (0x7)
+#define RT5665_DA_MONOR_CLK_SEL_SFT 0
+
+/* PLL tracking mode 3 (0x0085)*/
+#define RT5665_AD_STO1_CLK_SEL_MASK (0x7 << 12)
+#define RT5665_AD_STO1_CLK_SEL_SFT 12
+#define RT5665_AD_STO2_CLK_SEL_MASK (0x7 << 8)
+#define RT5665_AD_STO2_CLK_SEL_SFT 8
+#define RT5665_AD_MONOL_CLK_SEL_MASK (0x7 << 4)
+#define RT5665_AD_MONOL_CLK_SEL_SFT 4
+#define RT5665_AD_MONOR_CLK_SEL_MASK (0x7)
+#define RT5665_AD_MONOR_CLK_SEL_SFT 0
+
+/* ASRC Control 4 (0x0086) */
+#define RT5665_I2S1_RATE_MASK (0xf << 12)
+#define RT5665_I2S1_RATE_SFT 12
+#define RT5665_I2S2_RATE_MASK (0xf << 8)
+#define RT5665_I2S2_RATE_SFT 8
+#define RT5665_I2S3_RATE_MASK (0xf << 4)
+#define RT5665_I2S3_RATE_SFT 4
+
+/* Depop Mode Control 1 (0x008e) */
+#define RT5665_PUMP_EN (0x1 << 3)
+
+/* Depop Mode Control 2 (0x8f) */
+#define RT5665_DEPOP_MASK (0x1 << 13)
+#define RT5665_DEPOP_SFT 13
+#define RT5665_DEPOP_AUTO (0x0 << 13)
+#define RT5665_DEPOP_MAN (0x1 << 13)
+#define RT5665_RAMP_MASK (0x1 << 12)
+#define RT5665_RAMP_SFT 12
+#define RT5665_RAMP_DIS (0x0 << 12)
+#define RT5665_RAMP_EN (0x1 << 12)
+#define RT5665_BPS_MASK (0x1 << 11)
+#define RT5665_BPS_SFT 11
+#define RT5665_BPS_DIS (0x0 << 11)
+#define RT5665_BPS_EN (0x1 << 11)
+#define RT5665_FAST_UPDN_MASK (0x1 << 10)
+#define RT5665_FAST_UPDN_SFT 10
+#define RT5665_FAST_UPDN_DIS (0x0 << 10)
+#define RT5665_FAST_UPDN_EN (0x1 << 10)
+#define RT5665_MRES_MASK (0x3 << 8)
+#define RT5665_MRES_SFT 8
+#define RT5665_MRES_15MO (0x0 << 8)
+#define RT5665_MRES_25MO (0x1 << 8)
+#define RT5665_MRES_35MO (0x2 << 8)
+#define RT5665_MRES_45MO (0x3 << 8)
+#define RT5665_VLO_MASK (0x1 << 7)
+#define RT5665_VLO_SFT 7
+#define RT5665_VLO_3V (0x0 << 7)
+#define RT5665_VLO_32V (0x1 << 7)
+#define RT5665_DIG_DP_MASK (0x1 << 6)
+#define RT5665_DIG_DP_SFT 6
+#define RT5665_DIG_DP_DIS (0x0 << 6)
+#define RT5665_DIG_DP_EN (0x1 << 6)
+#define RT5665_DP_TH_MASK (0x3 << 4)
+#define RT5665_DP_TH_SFT 4
+
+/* Depop Mode Control 3 (0x90) */
+#define RT5665_CP_SYS_MASK (0x7 << 12)
+#define RT5665_CP_SYS_SFT 12
+#define RT5665_CP_FQ1_MASK (0x7 << 8)
+#define RT5665_CP_FQ1_SFT 8
+#define RT5665_CP_FQ2_MASK (0x7 << 4)
+#define RT5665_CP_FQ2_SFT 4
+#define RT5665_CP_FQ3_MASK (0x7)
+#define RT5665_CP_FQ3_SFT 0
+#define RT5665_CP_FQ_1_5_KHZ 0
+#define RT5665_CP_FQ_3_KHZ 1
+#define RT5665_CP_FQ_6_KHZ 2
+#define RT5665_CP_FQ_12_KHZ 3
+#define RT5665_CP_FQ_24_KHZ 4
+#define RT5665_CP_FQ_48_KHZ 5
+#define RT5665_CP_FQ_96_KHZ 6
+#define RT5665_CP_FQ_192_KHZ 7
+
+/* HPOUT charge pump 1 (0x0091) */
+#define RT5665_OSW_L_MASK (0x1 << 11)
+#define RT5665_OSW_L_SFT 11
+#define RT5665_OSW_L_DIS (0x0 << 11)
+#define RT5665_OSW_L_EN (0x1 << 11)
+#define RT5665_OSW_R_MASK (0x1 << 10)
+#define RT5665_OSW_R_SFT 10
+#define RT5665_OSW_R_DIS (0x0 << 10)
+#define RT5665_OSW_R_EN (0x1 << 10)
+#define RT5665_PM_HP_MASK (0x3 << 8)
+#define RT5665_PM_HP_SFT 8
+#define RT5665_PM_HP_LV (0x0 << 8)
+#define RT5665_PM_HP_MV (0x1 << 8)
+#define RT5665_PM_HP_HV (0x2 << 8)
+#define RT5665_IB_HP_MASK (0x3 << 6)
+#define RT5665_IB_HP_SFT 6
+#define RT5665_IB_HP_125IL (0x0 << 6)
+#define RT5665_IB_HP_25IL (0x1 << 6)
+#define RT5665_IB_HP_5IL (0x2 << 6)
+#define RT5665_IB_HP_1IL (0x3 << 6)
+
+/* PV detection and SPK gain control (0x92) */
+#define RT5665_PVDD_DET_MASK (0x1 << 15)
+#define RT5665_PVDD_DET_SFT 15
+#define RT5665_PVDD_DET_DIS (0x0 << 15)
+#define RT5665_PVDD_DET_EN (0x1 << 15)
+#define RT5665_SPK_AG_MASK (0x1 << 14)
+#define RT5665_SPK_AG_SFT 14
+#define RT5665_SPK_AG_DIS (0x0 << 14)
+#define RT5665_SPK_AG_EN (0x1 << 14)
+
+/* Micbias Control1 (0x93) */
+#define RT5665_MIC1_BS_MASK (0x1 << 15)
+#define RT5665_MIC1_BS_SFT 15
+#define RT5665_MIC1_BS_9AV (0x0 << 15)
+#define RT5665_MIC1_BS_75AV (0x1 << 15)
+#define RT5665_MIC2_BS_MASK (0x1 << 14)
+#define RT5665_MIC2_BS_SFT 14
+#define RT5665_MIC2_BS_9AV (0x0 << 14)
+#define RT5665_MIC2_BS_75AV (0x1 << 14)
+#define RT5665_MIC1_CLK_MASK (0x1 << 13)
+#define RT5665_MIC1_CLK_SFT 13
+#define RT5665_MIC1_CLK_DIS (0x0 << 13)
+#define RT5665_MIC1_CLK_EN (0x1 << 13)
+#define RT5665_MIC2_CLK_MASK (0x1 << 12)
+#define RT5665_MIC2_CLK_SFT 12
+#define RT5665_MIC2_CLK_DIS (0x0 << 12)
+#define RT5665_MIC2_CLK_EN (0x1 << 12)
+#define RT5665_MIC1_OVCD_MASK (0x1 << 11)
+#define RT5665_MIC1_OVCD_SFT 11
+#define RT5665_MIC1_OVCD_DIS (0x0 << 11)
+#define RT5665_MIC1_OVCD_EN (0x1 << 11)
+#define RT5665_MIC1_OVTH_MASK (0x3 << 9)
+#define RT5665_MIC1_OVTH_SFT 9
+#define RT5665_MIC1_OVTH_600UA (0x0 << 9)
+#define RT5665_MIC1_OVTH_1500UA (0x1 << 9)
+#define RT5665_MIC1_OVTH_2000UA (0x2 << 9)
+#define RT5665_MIC2_OVCD_MASK (0x1 << 8)
+#define RT5665_MIC2_OVCD_SFT 8
+#define RT5665_MIC2_OVCD_DIS (0x0 << 8)
+#define RT5665_MIC2_OVCD_EN (0x1 << 8)
+#define RT5665_MIC2_OVTH_MASK (0x3 << 6)
+#define RT5665_MIC2_OVTH_SFT 6
+#define RT5665_MIC2_OVTH_600UA (0x0 << 6)
+#define RT5665_MIC2_OVTH_1500UA (0x1 << 6)
+#define RT5665_MIC2_OVTH_2000UA (0x2 << 6)
+#define RT5665_PWR_MB_MASK (0x1 << 5)
+#define RT5665_PWR_MB_SFT 5
+#define RT5665_PWR_MB_PD (0x0 << 5)
+#define RT5665_PWR_MB_PU (0x1 << 5)
+
+/* Micbias Control2 (0x94) */
+#define RT5665_PWR_CLK25M_MASK (0x1 << 9)
+#define RT5665_PWR_CLK25M_SFT 9
+#define RT5665_PWR_CLK25M_PD (0x0 << 9)
+#define RT5665_PWR_CLK25M_PU (0x1 << 9)
+#define RT5665_PWR_CLK1M_MASK (0x1 << 8)
+#define RT5665_PWR_CLK1M_SFT 8
+#define RT5665_PWR_CLK1M_PD (0x0 << 8)
+#define RT5665_PWR_CLK1M_PU (0x1 << 8)
+
+
+/* EQ Control 1 (0x00b0) */
+#define RT5665_EQ_SRC_DAC (0x0 << 15)
+#define RT5665_EQ_SRC_ADC (0x1 << 15)
+#define RT5665_EQ_UPD (0x1 << 14)
+#define RT5665_EQ_UPD_BIT 14
+#define RT5665_EQ_CD_MASK (0x1 << 13)
+#define RT5665_EQ_CD_SFT 13
+#define RT5665_EQ_CD_DIS (0x0 << 13)
+#define RT5665_EQ_CD_EN (0x1 << 13)
+#define RT5665_EQ_DITH_MASK (0x3 << 8)
+#define RT5665_EQ_DITH_SFT 8
+#define RT5665_EQ_DITH_NOR (0x0 << 8)
+#define RT5665_EQ_DITH_LSB (0x1 << 8)
+#define RT5665_EQ_DITH_LSB_1 (0x2 << 8)
+#define RT5665_EQ_DITH_LSB_2 (0x3 << 8)
+
+/* IRQ Control 1 (0x00b7) */
+#define RT5665_JD1_1_EN_MASK (0x1 << 15)
+#define RT5665_JD1_1_EN_SFT 15
+#define RT5665_JD1_1_DIS (0x0 << 15)
+#define RT5665_JD1_1_EN (0x1 << 15)
+#define RT5665_JD1_2_EN_MASK (0x1 << 12)
+#define RT5665_JD1_2_EN_SFT 12
+#define RT5665_JD1_2_DIS (0x0 << 12)
+#define RT5665_JD1_2_EN (0x1 << 12)
+
+/* IRQ Control 2 (0x00b8) */
+#define RT5665_IL_IRQ_MASK (0x1 << 6)
+#define RT5665_IL_IRQ_DIS (0x0 << 6)
+#define RT5665_IL_IRQ_EN (0x1 << 6)
+
+/* IRQ Control 5 (0x00ba) */
+#define RT5665_IRQ_JD_EN (0x1 << 3)
+#define RT5665_IRQ_JD_EN_SFT 3
+
+/* GPIO Control 1 (0x00c0) */
+#define RT5665_GP1_PIN_MASK (0x1 << 15)
+#define RT5665_GP1_PIN_SFT 15
+#define RT5665_GP1_PIN_GPIO1 (0x0 << 15)
+#define RT5665_GP1_PIN_IRQ (0x1 << 15)
+#define RT5665_GP2_PIN_MASK (0x3 << 13)
+#define RT5665_GP2_PIN_SFT 13
+#define RT5665_GP2_PIN_GPIO2 (0x0 << 13)
+#define RT5665_GP2_PIN_BCLK2 (0x1 << 13)
+#define RT5665_GP2_PIN_PDM_SCL (0x2 << 13)
+#define RT5665_GP3_PIN_MASK (0x3 << 11)
+#define RT5665_GP3_PIN_SFT 11
+#define RT5665_GP3_PIN_GPIO3 (0x0 << 11)
+#define RT5665_GP3_PIN_LRCK2 (0x1 << 11)
+#define RT5665_GP3_PIN_PDM_SDA (0x2 << 11)
+#define RT5665_GP4_PIN_MASK (0x3 << 9)
+#define RT5665_GP4_PIN_SFT 9
+#define RT5665_GP4_PIN_GPIO4 (0x0 << 9)
+#define RT5665_GP4_PIN_DACDAT2_1 (0x1 << 9)
+#define RT5665_GP4_PIN_DMIC1_SDA (0x2 << 9)
+#define RT5665_GP5_PIN_MASK (0x3 << 7)
+#define RT5665_GP5_PIN_SFT 7
+#define RT5665_GP5_PIN_GPIO5 (0x0 << 7)
+#define RT5665_GP5_PIN_ADCDAT2_1 (0x1 << 7)
+#define RT5665_GP5_PIN_DMIC2_SDA (0x2 << 7)
+#define RT5665_GP6_PIN_MASK (0x3 << 5)
+#define RT5665_GP6_PIN_SFT 5
+#define RT5665_GP6_PIN_GPIO6 (0x0 << 5)
+#define RT5665_GP6_PIN_BCLK3 (0x0 << 5)
+#define RT5665_GP6_PIN_PDM_SCL (0x1 << 5)
+#define RT5665_GP7_PIN_MASK (0x3 << 3)
+#define RT5665_GP7_PIN_SFT 3
+#define RT5665_GP7_PIN_GPIO7 (0x0 << 3)
+#define RT5665_GP7_PIN_LRCK3 (0x1 << 3)
+#define RT5665_GP7_PIN_PDM_SDA (0x2 << 3)
+#define RT5665_GP8_PIN_MASK (0x3 << 1)
+#define RT5665_GP8_PIN_SFT 1
+#define RT5665_GP8_PIN_GPIO8 (0x0 << 1)
+#define RT5665_GP8_PIN_DACDAT3 (0x1 << 1)
+#define RT5665_GP8_PIN_DMIC2_SCL (0x2 << 1)
+#define RT5665_GP8_PIN_DACDAT2_2 (0x3 << 1)
+
+
+/* GPIO Control 2 (0x00c1)*/
+#define RT5665_GP9_PIN_MASK (0x3 << 14)
+#define RT5665_GP9_PIN_SFT 14
+#define RT5665_GP9_PIN_GPIO9 (0x0 << 14)
+#define RT5665_GP9_PIN_ADCDAT3 (0x1 << 14)
+#define RT5665_GP9_PIN_DMIC1_SCL (0x2 << 14)
+#define RT5665_GP9_PIN_ADCDAT2_2 (0x3 << 14)
+#define RT5665_GP10_PIN_MASK (0x3 << 12)
+#define RT5665_GP10_PIN_SFT 12
+#define RT5665_GP10_PIN_GPIO10 (0x0 << 12)
+#define RT5665_GP10_PIN_ADCDAT1_2 (0x1 << 12)
+#define RT5665_GP10_PIN_LPD (0x2 << 12)
+#define RT5665_GP1_PF_MASK (0x1 << 11)
+#define RT5665_GP1_PF_IN (0x0 << 11)
+#define RT5665_GP1_PF_OUT (0x1 << 11)
+#define RT5665_GP1_OUT_MASK (0x1 << 10)
+#define RT5665_GP1_OUT_H (0x0 << 10)
+#define RT5665_GP1_OUT_L (0x1 << 10)
+#define RT5665_GP2_PF_MASK (0x1 << 9)
+#define RT5665_GP2_PF_IN (0x0 << 9)
+#define RT5665_GP2_PF_OUT (0x1 << 9)
+#define RT5665_GP2_OUT_MASK (0x1 << 8)
+#define RT5665_GP2_OUT_H (0x0 << 8)
+#define RT5665_GP2_OUT_L (0x1 << 8)
+#define RT5665_GP3_PF_MASK (0x1 << 7)
+#define RT5665_GP3_PF_IN (0x0 << 7)
+#define RT5665_GP3_PF_OUT (0x1 << 7)
+#define RT5665_GP3_OUT_MASK (0x1 << 6)
+#define RT5665_GP3_OUT_H (0x0 << 6)
+#define RT5665_GP3_OUT_L (0x1 << 6)
+#define RT5665_GP4_PF_MASK (0x1 << 5)
+#define RT5665_GP4_PF_IN (0x0 << 5)
+#define RT5665_GP4_PF_OUT (0x1 << 5)
+#define RT5665_GP4_OUT_MASK (0x1 << 4)
+#define RT5665_GP4_OUT_H (0x0 << 4)
+#define RT5665_GP4_OUT_L (0x1 << 4)
+#define RT5665_GP5_PF_MASK (0x1 << 3)
+#define RT5665_GP5_PF_IN (0x0 << 3)
+#define RT5665_GP5_PF_OUT (0x1 << 3)
+#define RT5665_GP5_OUT_MASK (0x1 << 2)
+#define RT5665_GP5_OUT_H (0x0 << 2)
+#define RT5665_GP5_OUT_L (0x1 << 2)
+#define RT5665_GP6_PF_MASK (0x1 << 1)
+#define RT5665_GP6_PF_IN (0x0 << 1)
+#define RT5665_GP6_PF_OUT (0x1 << 1)
+#define RT5665_GP6_OUT_MASK (0x1)
+#define RT5665_GP6_OUT_H (0x0)
+#define RT5665_GP6_OUT_L (0x1)
+
+
+/* GPIO Control 3 (0x00c2) */
+#define RT5665_GP7_PF_MASK (0x1 << 15)
+#define RT5665_GP7_PF_IN (0x0 << 15)
+#define RT5665_GP7_PF_OUT (0x1 << 15)
+#define RT5665_GP7_OUT_MASK (0x1 << 14)
+#define RT5665_GP7_OUT_H (0x0 << 14)
+#define RT5665_GP7_OUT_L (0x1 << 14)
+#define RT5665_GP8_PF_MASK (0x1 << 13)
+#define RT5665_GP8_PF_IN (0x0 << 13)
+#define RT5665_GP8_PF_OUT (0x1 << 13)
+#define RT5665_GP8_OUT_MASK (0x1 << 12)
+#define RT5665_GP8_OUT_H (0x0 << 12)
+#define RT5665_GP8_OUT_L (0x1 << 12)
+#define RT5665_GP9_PF_MASK (0x1 << 11)
+#define RT5665_GP9_PF_IN (0x0 << 11)
+#define RT5665_GP9_PF_OUT (0x1 << 11)
+#define RT5665_GP9_OUT_MASK (0x1 << 10)
+#define RT5665_GP9_OUT_H (0x0 << 10)
+#define RT5665_GP9_OUT_L (0x1 << 10)
+#define RT5665_GP10_PF_MASK (0x1 << 9)
+#define RT5665_GP10_PF_IN (0x0 << 9)
+#define RT5665_GP10_PF_OUT (0x1 << 9)
+#define RT5665_GP10_OUT_MASK (0x1 << 8)
+#define RT5665_GP10_OUT_H (0x0 << 8)
+#define RT5665_GP10_OUT_L (0x1 << 8)
+#define RT5665_GP11_PF_MASK (0x1 << 7)
+#define RT5665_GP11_PF_IN (0x0 << 7)
+#define RT5665_GP11_PF_OUT (0x1 << 7)
+#define RT5665_GP11_OUT_MASK (0x1 << 6)
+#define RT5665_GP11_OUT_H (0x0 << 6)
+#define RT5665_GP11_OUT_L (0x1 << 6)
+
+/* Soft volume and zero cross control 1 (0x00d9) */
+#define RT5665_SV_MASK (0x1 << 15)
+#define RT5665_SV_SFT 15
+#define RT5665_SV_DIS (0x0 << 15)
+#define RT5665_SV_EN (0x1 << 15)
+#define RT5665_OUT_SV_MASK (0x1 << 13)
+#define RT5665_OUT_SV_SFT 13
+#define RT5665_OUT_SV_DIS (0x0 << 13)
+#define RT5665_OUT_SV_EN (0x1 << 13)
+#define RT5665_HP_SV_MASK (0x1 << 12)
+#define RT5665_HP_SV_SFT 12
+#define RT5665_HP_SV_DIS (0x0 << 12)
+#define RT5665_HP_SV_EN (0x1 << 12)
+#define RT5665_ZCD_DIG_MASK (0x1 << 11)
+#define RT5665_ZCD_DIG_SFT 11
+#define RT5665_ZCD_DIG_DIS (0x0 << 11)
+#define RT5665_ZCD_DIG_EN (0x1 << 11)
+#define RT5665_ZCD_MASK (0x1 << 10)
+#define RT5665_ZCD_SFT 10
+#define RT5665_ZCD_PD (0x0 << 10)
+#define RT5665_ZCD_PU (0x1 << 10)
+#define RT5665_SV_DLY_MASK (0xf)
+#define RT5665_SV_DLY_SFT 0
+
+/* Soft volume and zero cross control 2 (0x00da) */
+#define RT5665_ZCD_HP_MASK (0x1 << 15)
+#define RT5665_ZCD_HP_SFT 15
+#define RT5665_ZCD_HP_DIS (0x0 << 15)
+#define RT5665_ZCD_HP_EN (0x1 << 15)
+
+/* 4 Button Inline Command Control 2 (0x00e0) */
+#define RT5665_4BTN_IL_MASK (0x1 << 15)
+#define RT5665_4BTN_IL_EN (0x1 << 15)
+#define RT5665_4BTN_IL_DIS (0x0 << 15)
+#define RT5665_4BTN_IL_RST_MASK (0x1 << 14)
+#define RT5665_4BTN_IL_NOR (0x1 << 14)
+#define RT5665_4BTN_IL_RST (0x0 << 14)
+
+/* Analog JD Control 1 (0x00f0) */
+#define RT5665_JD1_MODE_MASK (0x3 << 0)
+#define RT5665_JD1_MODE_0 (0x0 << 0)
+#define RT5665_JD1_MODE_1 (0x1 << 0)
+#define RT5665_JD1_MODE_2 (0x2 << 0)
+
+/* Jack Detect Control 3 (0x00f8) */
+#define RT5665_JD_TRI_HPO_SEL_MASK (0x7)
+#define RT5665_JD_TRI_HPO_SEL_SFT (0)
+#define RT5665_JD_HPO_GPIO_JD1 (0x0)
+#define RT5665_JD_HPO_JD1_1 (0x1)
+#define RT5665_JD_HPO_JD1_2 (0x2)
+#define RT5665_JD_HPO_JD2 (0x3)
+#define RT5665_JD_HPO_GPIO_JD2 (0x4)
+#define RT5665_JD_HPO_JD3 (0x5)
+#define RT5665_JD_HPO_JD_D (0x6)
+
+/* Digital Misc Control (0x00fa) */
+#define RT5665_AM_MASK (0x1 << 7)
+#define RT5665_AM_EN (0x1 << 7)
+#define RT5665_AM_DIS (0x1 << 7)
+#define RT5665_DIG_GATE_CTRL 0x1
+#define RT5665_DIG_GATE_CTRL_SFT (0)
+
+/* Chopper and Clock control for ADC (0x011c)*/
+#define RT5665_M_RF_DIG_MASK (0x1 << 12)
+#define RT5665_M_RF_DIG_SFT 12
+#define RT5665_M_RI_DIG (0x1 << 11)
+
+/* Chopper and Clock control for DAC (0x013a)*/
+#define RT5665_CKXEN_DAC1_MASK (0x1 << 13)
+#define RT5665_CKXEN_DAC1_SFT 13
+#define RT5665_CKGEN_DAC1_MASK (0x1 << 12)
+#define RT5665_CKGEN_DAC1_SFT 12
+#define RT5665_CKXEN_DAC2_MASK (0x1 << 5)
+#define RT5665_CKXEN_DAC2_SFT 5
+#define RT5665_CKGEN_DAC2_MASK (0x1 << 4)
+#define RT5665_CKGEN_DAC2_SFT 4
+
+/* Chopper and Clock control for ADC (0x013b)*/
+#define RT5665_CKXEN_ADC1_MASK (0x1 << 13)
+#define RT5665_CKXEN_ADC1_SFT 13
+#define RT5665_CKGEN_ADC1_MASK (0x1 << 12)
+#define RT5665_CKGEN_ADC1_SFT 12
+#define RT5665_CKXEN_ADC2_MASK (0x1 << 5)
+#define RT5665_CKXEN_ADC2_SFT 5
+#define RT5665_CKGEN_ADC2_MASK (0x1 << 4)
+#define RT5665_CKGEN_ADC2_SFT 4
+
+/* Volume test (0x013f)*/
+#define RT5665_SEL_CLK_VOL_MASK (0x1 << 15)
+#define RT5665_SEL_CLK_VOL_EN (0x1 << 15)
+#define RT5665_SEL_CLK_VOL_DIS (0x0 << 15)
+
+/* Test Mode Control 1 (0x0145) */
+#define RT5665_AD2DA_LB_MASK (0x1 << 9)
+#define RT5665_AD2DA_LB_SFT 9
+
+/* Stereo Noise Gate Control 1 (0x0160) */
+#define RT5665_NG2_EN_MASK (0x1 << 15)
+#define RT5665_NG2_EN (0x1 << 15)
+#define RT5665_NG2_DIS (0x0 << 15)
+
+/* Stereo1 DAC Silence Detection Control (0x0190) */
+#define RT5665_DEB_STO_DAC_MASK (0x7 << 4)
+#define RT5665_DEB_80_MS (0x0 << 4)
+
+/* SAR ADC Inline Command Control 1 (0x0210) */
+#define RT5665_SAR_BUTT_DET_MASK (0x1 << 15)
+#define RT5665_SAR_BUTT_DET_EN (0x1 << 15)
+#define RT5665_SAR_BUTT_DET_DIS (0x0 << 15)
+#define RT5665_SAR_BUTDET_MODE_MASK (0x1 << 14)
+#define RT5665_SAR_BUTDET_POW_SAV (0x1 << 14)
+#define RT5665_SAR_BUTDET_POW_NORM (0x0 << 14)
+#define RT5665_SAR_BUTDET_RST_MASK (0x1 << 13)
+#define RT5665_SAR_BUTDET_RST_NORMAL (0x1 << 13)
+#define RT5665_SAR_BUTDET_RST (0x0 << 13)
+#define RT5665_SAR_POW_MASK (0x1 << 12)
+#define RT5665_SAR_POW_EN (0x1 << 12)
+#define RT5665_SAR_POW_DIS (0x0 << 12)
+#define RT5665_SAR_RST_MASK (0x1 << 11)
+#define RT5665_SAR_RST_NORMAL (0x1 << 11)
+#define RT5665_SAR_RST (0x0 << 11)
+#define RT5665_SAR_BYPASS_MASK (0x1 << 10)
+#define RT5665_SAR_BYPASS_EN (0x1 << 10)
+#define RT5665_SAR_BYPASS_DIS (0x0 << 10)
+#define RT5665_SAR_SEL_MB1_MASK (0x1 << 9)
+#define RT5665_SAR_SEL_MB1_SEL (0x1 << 9)
+#define RT5665_SAR_SEL_MB1_NOSEL (0x0 << 9)
+#define RT5665_SAR_SEL_MB2_MASK (0x1 << 8)
+#define RT5665_SAR_SEL_MB2_SEL (0x1 << 8)
+#define RT5665_SAR_SEL_MB2_NOSEL (0x0 << 8)
+#define RT5665_SAR_SEL_MODE_MASK (0x1 << 7)
+#define RT5665_SAR_SEL_MODE_CMP (0x1 << 7)
+#define RT5665_SAR_SEL_MODE_ADC (0x0 << 7)
+#define RT5665_SAR_SEL_MB1_MB2_MASK (0x1 << 5)
+#define RT5665_SAR_SEL_MB1_MB2_AUTO (0x1 << 5)
+#define RT5665_SAR_SEL_MB1_MB2_MANU (0x0 << 5)
+#define RT5665_SAR_SEL_SIGNAL_MASK (0x1 << 4)
+#define RT5665_SAR_SEL_SIGNAL_AUTO (0x1 << 4)
+#define RT5665_SAR_SEL_SIGNAL_MANU (0x0 << 4)
+
+/* System Clock Source */
+enum {
+ RT5665_SCLK_S_MCLK,
+ RT5665_SCLK_S_PLL1,
+ RT5665_SCLK_S_RCCLK,
+};
+
+/* PLL1 Source */
+enum {
+ RT5665_PLL1_S_MCLK,
+ RT5665_PLL1_S_BCLK1,
+ RT5665_PLL1_S_BCLK2,
+ RT5665_PLL1_S_BCLK3,
+ RT5665_PLL1_S_BCLK4,
+};
+
+enum {
+ RT5665_AIF1_1,
+ RT5665_AIF1_2,
+ RT5665_AIF2_1,
+ RT5665_AIF2_2,
+ RT5665_AIF3,
+ RT5665_AIFS
+};
+
+enum {
+ CODEC_5665,
+ CODEC_5666,
+ CODEC_5668,
+};
+
+/* filter mask */
+enum {
+ RT5665_DA_STEREO1_FILTER = 0x1,
+ RT5665_DA_STEREO2_FILTER = (0x1 << 1),
+ RT5665_DA_MONO_L_FILTER = (0x1 << 2),
+ RT5665_DA_MONO_R_FILTER = (0x1 << 3),
+ RT5665_AD_STEREO1_FILTER = (0x1 << 4),
+ RT5665_AD_STEREO2_FILTER = (0x1 << 5),
+ RT5665_AD_MONO_L_FILTER = (0x1 << 6),
+ RT5665_AD_MONO_R_FILTER = (0x1 << 7),
+};
+
+enum {
+ RT5665_CLK_SEL_SYS,
+ RT5665_CLK_SEL_I2S1_ASRC,
+ RT5665_CLK_SEL_I2S2_ASRC,
+ RT5665_CLK_SEL_I2S3_ASRC,
+ RT5665_CLK_SEL_SYS2,
+ RT5665_CLK_SEL_SYS3,
+ RT5665_CLK_SEL_SYS4,
+};
+
+int rt5665_sel_asrc_clk_src(struct snd_soc_codec *codec,
+ unsigned int filter_mask, unsigned int clk_src);
+int rt5665_set_jack_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *hs_jack);
+
+#endif /* __RT5665_H__ */
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 49caf1393aeb..97bafac3bc15 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -2618,7 +2618,7 @@ static int rt5670_set_bias_level(struct snd_soc_codec *codec,
RT5670_OSW_L_DIS | RT5670_OSW_R_DIS);
snd_soc_update_bits(codec, RT5670_DIG_MISC, 0x1, 0x1);
snd_soc_update_bits(codec, RT5670_PWR_ANLG1,
- RT5670_LDO_SEL_MASK, 0x3);
+ RT5670_LDO_SEL_MASK, 0x5);
}
break;
case SND_SOC_BIAS_STANDBY:
@@ -2626,7 +2626,7 @@ static int rt5670_set_bias_level(struct snd_soc_codec *codec,
RT5670_PWR_VREF1 | RT5670_PWR_VREF2 |
RT5670_PWR_FV1 | RT5670_PWR_FV2, 0);
snd_soc_update_bits(codec, RT5670_PWR_ANLG1,
- RT5670_LDO_SEL_MASK, 0x1);
+ RT5670_LDO_SEL_MASK, 0x3);
break;
case SND_SOC_BIAS_OFF:
if (rt5670->pdata.jd_mode)
@@ -2813,6 +2813,7 @@ MODULE_DEVICE_TABLE(i2c, rt5670_i2c_id);
#ifdef CONFIG_ACPI
static const struct acpi_device_id rt5670_acpi_match[] = {
{ "10EC5670", 0},
+ { "10EC5672", 0},
{ },
};
MODULE_DEVICE_TABLE(acpi, rt5670_acpi_match);
@@ -2826,6 +2827,13 @@ static const struct dmi_system_id dmi_platform_intel_braswell[] = {
DMI_MATCH(DMI_BOARD_NAME, "Braswell CRB"),
},
},
+ {
+ .ident = "Dell Wyse 3040",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Wyse 3040"),
+ },
+ },
{}
};
@@ -2889,6 +2897,9 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
if (ret != 0)
dev_warn(&i2c->dev, "Failed to apply regmap patch: %d\n", ret);
+ regmap_update_bits(rt5670->regmap, RT5670_DIG_MISC,
+ RT5670_MCLK_DET, RT5670_MCLK_DET);
+
if (rt5670->pdata.in2_diff)
regmap_update_bits(rt5670->regmap, RT5670_IN2,
RT5670_IN_DF2, RT5670_IN_DF2);
@@ -2903,7 +2914,6 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
RT5670_GP1_PIN_MASK, RT5670_GP1_PIN_IRQ);
regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2,
RT5670_GP1_PF_MASK, RT5670_GP1_PF_OUT);
- regmap_update_bits(rt5670->regmap, RT5670_DIG_MISC, 0x8, 0x8);
}
if (rt5670->pdata.jd_mode) {
diff --git a/sound/soc/codecs/rt5670.h b/sound/soc/codecs/rt5670.h
index 3f1b0f1df809..5ba485cae4e6 100644
--- a/sound/soc/codecs/rt5670.h
+++ b/sound/soc/codecs/rt5670.h
@@ -1914,6 +1914,7 @@ enum {
#define RT5670_IF1_ADC1_IN2_SFT 11
#define RT5670_IF1_ADC2_IN1_SEL (0x1 << 10)
#define RT5670_IF1_ADC2_IN1_SFT 10
+#define RT5670_MCLK_DET (0x1 << 3)
/* General Control2 (0xfb) */
#define RT5670_RXDC_SRC_MASK (0x1 << 7)
diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
index 91879ea95415..ebd0f7c5ad3b 100644
--- a/sound/soc/codecs/rt5677-spi.c
+++ b/sound/soc/codecs/rt5677-spi.c
@@ -20,7 +20,6 @@
#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/sched.h>
-#include <linux/kthread.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/regulator/consumer.h>
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index 27f30d352867..9de7fe8af255 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/regmap.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/ac97_codec.h>
@@ -26,31 +27,56 @@
#include <sound/soc.h>
#include <sound/tlv.h>
-#include "stac9766.h"
-
#define STAC9766_VENDOR_ID 0x83847666
#define STAC9766_VENDOR_ID_MASK 0xffffffff
-/*
- * STAC9766 register cache
- */
-static const u16 stac9766_reg[] = {
- 0x6A90, 0x8000, 0x8000, 0x8000, /* 6 */
- 0x0000, 0x0000, 0x8008, 0x8008, /* e */
- 0x8808, 0x8808, 0x8808, 0x8808, /* 16 */
- 0x8808, 0x0000, 0x8000, 0x0000, /* 1e */
- 0x0000, 0x0000, 0x0000, 0x000f, /* 26 */
- 0x0a05, 0x0400, 0xbb80, 0x0000, /* 2e */
- 0x0000, 0xbb80, 0x0000, 0x0000, /* 36 */
- 0x0000, 0x2000, 0x0000, 0x0100, /* 3e */
- 0x0000, 0x0000, 0x0080, 0x0000, /* 46 */
- 0x0000, 0x0000, 0x0003, 0xffff, /* 4e */
- 0x0000, 0x0000, 0x0000, 0x0000, /* 56 */
- 0x4000, 0x0000, 0x0000, 0x0000, /* 5e */
- 0x1201, 0xFFFF, 0xFFFF, 0x0000, /* 66 */
- 0x0000, 0x0000, 0x0000, 0x0000, /* 6e */
- 0x0000, 0x0000, 0x0000, 0x0006, /* 76 */
- 0x0000, 0x0000, 0x0000, 0x0000, /* 7e */
+#define AC97_STAC_DA_CONTROL 0x6A
+#define AC97_STAC_ANALOG_SPECIAL 0x6E
+#define AC97_STAC_STEREO_MIC 0x78
+
+static const struct reg_default stac9766_reg_defaults[] = {
+ { 0x02, 0x8000 },
+ { 0x04, 0x8000 },
+ { 0x06, 0x8000 },
+ { 0x0a, 0x0000 },
+ { 0x0c, 0x8008 },
+ { 0x0e, 0x8008 },
+ { 0x10, 0x8808 },
+ { 0x12, 0x8808 },
+ { 0x14, 0x8808 },
+ { 0x16, 0x8808 },
+ { 0x18, 0x8808 },
+ { 0x1a, 0x0000 },
+ { 0x1c, 0x8000 },
+ { 0x20, 0x0000 },
+ { 0x22, 0x0000 },
+ { 0x28, 0x0a05 },
+ { 0x2c, 0xbb80 },
+ { 0x32, 0xbb80 },
+ { 0x3a, 0x2000 },
+ { 0x3e, 0x0100 },
+ { 0x4c, 0x0300 },
+ { 0x4e, 0xffff },
+ { 0x50, 0x0000 },
+ { 0x52, 0x0000 },
+ { 0x54, 0x0000 },
+ { 0x6a, 0x0000 },
+ { 0x6e, 0x1000 },
+ { 0x72, 0x0000 },
+ { 0x78, 0x0000 },
+};
+
+static const struct regmap_config stac9766_regmap_config = {
+ .reg_bits = 16,
+ .reg_stride = 2,
+ .val_bits = 16,
+ .max_register = 0x78,
+ .cache_type = REGCACHE_RBTREE,
+
+ .volatile_reg = regmap_ac97_default_volatile,
+
+ .reg_defaults = stac9766_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(stac9766_reg_defaults),
};
static const char *stac9766_record_mux[] = {"Mic", "CD", "Video", "AUX",
@@ -139,71 +165,22 @@ static const struct snd_kcontrol_new stac9766_snd_ac97_controls[] = {
SOC_ENUM("Pop Bypass Mux", stac9766_popbypass_enum),
};
-static int stac9766_ac97_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int val)
-{
- struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
- u16 *cache = codec->reg_cache;
-
- if (reg > AC97_STAC_PAGE0) {
- stac9766_ac97_write(codec, AC97_INT_PAGING, 0);
- soc_ac97_ops->write(ac97, reg, val);
- stac9766_ac97_write(codec, AC97_INT_PAGING, 1);
- return 0;
- }
- if (reg / 2 >= ARRAY_SIZE(stac9766_reg))
- return -EIO;
-
- soc_ac97_ops->write(ac97, reg, val);
- cache[reg / 2] = val;
- return 0;
-}
-
-static unsigned int stac9766_ac97_read(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
- u16 val = 0, *cache = codec->reg_cache;
-
- if (reg > AC97_STAC_PAGE0) {
- stac9766_ac97_write(codec, AC97_INT_PAGING, 0);
- val = soc_ac97_ops->read(ac97, reg - AC97_STAC_PAGE0);
- stac9766_ac97_write(codec, AC97_INT_PAGING, 1);
- return val;
- }
- if (reg / 2 >= ARRAY_SIZE(stac9766_reg))
- return -EIO;
-
- if (reg == AC97_RESET || reg == AC97_GPIO_STATUS ||
- reg == AC97_INT_PAGING || reg == AC97_VENDOR_ID1 ||
- reg == AC97_VENDOR_ID2) {
-
- val = soc_ac97_ops->read(ac97, reg);
- return val;
- }
- return cache[reg / 2];
-}
-
static int ac97_analog_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct snd_pcm_runtime *runtime = substream->runtime;
- unsigned short reg, vra;
-
- vra = stac9766_ac97_read(codec, AC97_EXTENDED_STATUS);
+ unsigned short reg;
- vra |= 0x1; /* enable variable rate audio */
- vra &= ~0x4; /* disable SPDIF output */
-
- stac9766_ac97_write(codec, AC97_EXTENDED_STATUS, vra);
+ /* enable variable rate audio, disable SPDIF output */
+ snd_soc_update_bits(codec, AC97_EXTENDED_STATUS, 0x5, 0x1);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
reg = AC97_PCM_FRONT_DAC_RATE;
else
reg = AC97_PCM_LR_ADC_RATE;
- return stac9766_ac97_write(codec, reg, runtime->rate);
+ return snd_soc_write(codec, reg, runtime->rate);
}
static int ac97_digital_prepare(struct snd_pcm_substream *substream,
@@ -211,18 +188,16 @@ static int ac97_digital_prepare(struct snd_pcm_substream *substream,
{
struct snd_soc_codec *codec = dai->codec;
struct snd_pcm_runtime *runtime = substream->runtime;
- unsigned short reg, vra;
-
- stac9766_ac97_write(codec, AC97_SPDIF, 0x2002);
+ unsigned short reg;
- vra = stac9766_ac97_read(codec, AC97_EXTENDED_STATUS);
- vra |= 0x5; /* Enable VRA and SPDIF out */
+ snd_soc_write(codec, AC97_SPDIF, 0x2002);
- stac9766_ac97_write(codec, AC97_EXTENDED_STATUS, vra);
+ /* Enable VRA and SPDIF out */
+ snd_soc_update_bits(codec, AC97_EXTENDED_STATUS, 0x5, 0x5);
reg = AC97_PCM_FRONT_DAC_RATE;
- return stac9766_ac97_write(codec, reg, runtime->rate);
+ return snd_soc_write(codec, reg, runtime->rate);
}
static int stac9766_set_bias_level(struct snd_soc_codec *codec,
@@ -232,11 +207,11 @@ static int stac9766_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_ON: /* full On */
case SND_SOC_BIAS_PREPARE: /* partial On */
case SND_SOC_BIAS_STANDBY: /* Off, with power */
- stac9766_ac97_write(codec, AC97_POWERDOWN, 0x0000);
+ snd_soc_write(codec, AC97_POWERDOWN, 0x0000);
break;
case SND_SOC_BIAS_OFF: /* Off, without power */
/* disable everything including AC link */
- stac9766_ac97_write(codec, AC97_POWERDOWN, 0xffff);
+ snd_soc_write(codec, AC97_POWERDOWN, 0xffff);
break;
}
return 0;
@@ -300,21 +275,34 @@ static struct snd_soc_dai_driver stac9766_dai[] = {
static int stac9766_codec_probe(struct snd_soc_codec *codec)
{
struct snd_ac97 *ac97;
+ struct regmap *regmap;
+ int ret;
ac97 = snd_soc_new_ac97_codec(codec, STAC9766_VENDOR_ID,
STAC9766_VENDOR_ID_MASK);
if (IS_ERR(ac97))
return PTR_ERR(ac97);
+ regmap = regmap_init_ac97(ac97, &stac9766_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err_free_ac97;
+ }
+
+ snd_soc_codec_init_regmap(codec, regmap);
snd_soc_codec_set_drvdata(codec, ac97);
return 0;
+err_free_ac97:
+ snd_soc_free_ac97_codec(ac97);
+ return ret;
}
static int stac9766_codec_remove(struct snd_soc_codec *codec)
{
struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
+ snd_soc_codec_exit_regmap(codec);
snd_soc_free_ac97_codec(ac97);
return 0;
}
@@ -324,17 +312,11 @@ static struct snd_soc_codec_driver soc_codec_dev_stac9766 = {
.controls = stac9766_snd_ac97_controls,
.num_controls = ARRAY_SIZE(stac9766_snd_ac97_controls),
},
- .write = stac9766_ac97_write,
- .read = stac9766_ac97_read,
.set_bias_level = stac9766_set_bias_level,
.suspend_bias_off = true,
.probe = stac9766_codec_probe,
.remove = stac9766_codec_remove,
.resume = stac9766_codec_resume,
- .reg_cache_size = ARRAY_SIZE(stac9766_reg),
- .reg_word_size = sizeof(u16),
- .reg_cache_step = 2,
- .reg_cache_default = stac9766_reg,
};
static int stac9766_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/stac9766.h b/sound/soc/codecs/stac9766.h
deleted file mode 100644
index c726f907e2c0..000000000000
--- a/sound/soc/codecs/stac9766.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * stac9766.h -- STAC9766 Soc Audio driver
- */
-
-#ifndef _STAC9766_H
-#define _STAC9766_H
-
-#define AC97_STAC_PAGE0 0x1000
-#define AC97_STAC_DA_CONTROL (AC97_STAC_PAGE0 | 0x6A)
-#define AC97_STAC_ANALOG_SPECIAL (AC97_STAC_PAGE0 | 0x6E)
-#define AC97_STAC_STEREO_MIC 0x78
-
-/* STAC9766 DAI ID's */
-#define STAC9766_DAI_AC97_ANALOG 0
-#define STAC9766_DAI_AC97_DIGITAL 1
-
-#endif
diff --git a/sound/soc/codecs/sti-sas.c b/sound/soc/codecs/sti-sas.c
index d6e00c77edcd..62c618765224 100644
--- a/sound/soc/codecs/sti-sas.c
+++ b/sound/soc/codecs/sti-sas.c
@@ -14,28 +14,8 @@
#include <sound/soc.h>
#include <sound/soc-dapm.h>
-/* chipID supported */
-#define CHIPID_STIH416 0
-#define CHIPID_STIH407 1
-
/* DAC definitions */
-/* stih416 DAC registers */
-/* sysconf 2517: Audio-DAC-Control */
-#define STIH416_AUDIO_DAC_CTRL 0x00000814
-/* sysconf 2519: Audio-Gue-Control */
-#define STIH416_AUDIO_GLUE_CTRL 0x0000081C
-
-#define STIH416_DAC_NOT_STANDBY 0x3
-#define STIH416_DAC_SOFTMUTE 0x4
-#define STIH416_DAC_ANA_NOT_PWR 0x5
-#define STIH416_DAC_NOT_PNDBG 0x6
-
-#define STIH416_DAC_NOT_STANDBY_MASK BIT(STIH416_DAC_NOT_STANDBY)
-#define STIH416_DAC_SOFTMUTE_MASK BIT(STIH416_DAC_SOFTMUTE)
-#define STIH416_DAC_ANA_NOT_PWR_MASK BIT(STIH416_DAC_ANA_NOT_PWR)
-#define STIH416_DAC_NOT_PNDBG_MASK BIT(STIH416_DAC_NOT_PNDBG)
-
/* stih407 DAC registers */
/* sysconf 5041: Audio-Gue-Control */
#define STIH407_AUDIO_GLUE_CTRL 0x000000A4
@@ -63,14 +43,9 @@ enum {
STI_SAS_DAI_ANALOG_OUT,
};
-static const struct reg_default stih416_sas_reg_defaults[] = {
- { STIH407_AUDIO_GLUE_CTRL, 0x00000040 },
- { STIH407_AUDIO_DAC_CTRL, 0x000000000 },
-};
-
static const struct reg_default stih407_sas_reg_defaults[] = {
- { STIH416_AUDIO_DAC_CTRL, 0x000000000 },
- { STIH416_AUDIO_GLUE_CTRL, 0x00000040 },
+ { STIH407_AUDIO_DAC_CTRL, 0x000000000 },
+ { STIH407_AUDIO_GLUE_CTRL, 0x00000040 },
};
struct sti_dac_audio {
@@ -89,7 +64,6 @@ struct sti_spdif_audio {
/* device data structure */
struct sti_sas_dev_data {
- const int chipid; /* IC version */
const struct regmap_config *regmap;
const struct snd_soc_dai_ops *dac_ops; /* DAC function callbacks */
const struct snd_soc_dapm_widget *dapm_widgets; /* dapms declaration */
@@ -150,51 +124,27 @@ static int sti_sas_init_sas_registers(struct snd_soc_codec *codec,
ret = snd_soc_update_bits(codec, STIH407_AUDIO_GLUE_CTRL,
SPDIF_BIPHASE_IDLE_MASK, 0);
if (ret < 0) {
- dev_err(codec->dev, "Failed to update SPDIF registers");
+ dev_err(codec->dev, "Failed to update SPDIF registers\n");
return ret;
}
/* Init DAC configuration */
- switch (data->dev_data->chipid) {
- case CHIPID_STIH407:
- /* init configuration */
- ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
- STIH407_DAC_STANDBY_MASK,
- STIH407_DAC_STANDBY_MASK);
-
- if (!ret)
- ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
- STIH407_DAC_STANDBY_ANA_MASK,
- STIH407_DAC_STANDBY_ANA_MASK);
- if (!ret)
- ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
- STIH407_DAC_SOFTMUTE_MASK,
- STIH407_DAC_SOFTMUTE_MASK);
- break;
- case CHIPID_STIH416:
- ret = snd_soc_update_bits(codec, STIH416_AUDIO_DAC_CTRL,
- STIH416_DAC_NOT_STANDBY_MASK, 0);
- if (!ret)
- ret = snd_soc_update_bits(codec,
- STIH416_AUDIO_DAC_CTRL,
- STIH416_DAC_ANA_NOT_PWR, 0);
- if (!ret)
- ret = snd_soc_update_bits(codec,
- STIH416_AUDIO_DAC_CTRL,
- STIH416_DAC_NOT_PNDBG_MASK,
- 0);
- if (!ret)
- ret = snd_soc_update_bits(codec,
- STIH416_AUDIO_DAC_CTRL,
- STIH416_DAC_SOFTMUTE_MASK,
- STIH416_DAC_SOFTMUTE_MASK);
- break;
- default:
- return -EINVAL;
- }
+ /* init configuration */
+ ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+ STIH407_DAC_STANDBY_MASK,
+ STIH407_DAC_STANDBY_MASK);
+
+ if (!ret)
+ ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+ STIH407_DAC_STANDBY_ANA_MASK,
+ STIH407_DAC_STANDBY_ANA_MASK);
+ if (!ret)
+ ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+ STIH407_DAC_SOFTMUTE_MASK,
+ STIH407_DAC_SOFTMUTE_MASK);
if (ret < 0) {
- dev_err(codec->dev, "Failed to update DAC registers");
+ dev_err(codec->dev, "Failed to update DAC registers\n");
return ret;
}
@@ -217,37 +167,6 @@ static int sti_sas_dac_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return 0;
}
-static int stih416_dac_probe(struct snd_soc_dai *dai)
-{
- struct snd_soc_codec *codec = dai->codec;
- struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
- struct sti_dac_audio *dac = &drvdata->dac;
-
- /* Get reset control */
- dac->rst = devm_reset_control_get(codec->dev, "dac_rst");
- if (IS_ERR(dac->rst)) {
- dev_err(dai->codec->dev,
- "%s: ERROR: DAC reset control not defined !\n",
- __func__);
- dac->rst = NULL;
- return -EFAULT;
- }
- /* Put the DAC into reset */
- reset_control_assert(dac->rst);
-
- return 0;
-}
-
-static const struct snd_soc_dapm_widget stih416_sas_dapm_widgets[] = {
- SND_SOC_DAPM_PGA("DAC bandgap", STIH416_AUDIO_DAC_CTRL,
- STIH416_DAC_NOT_PNDBG_MASK, 0, NULL, 0),
- SND_SOC_DAPM_OUT_DRV("DAC standby ana", STIH416_AUDIO_DAC_CTRL,
- STIH416_DAC_ANA_NOT_PWR, 0, NULL, 0),
- SND_SOC_DAPM_DAC("DAC standby", "dac_p", STIH416_AUDIO_DAC_CTRL,
- STIH416_DAC_NOT_STANDBY, 0),
- SND_SOC_DAPM_OUTPUT("DAC Output"),
-};
-
static const struct snd_soc_dapm_widget stih407_sas_dapm_widgets[] = {
SND_SOC_DAPM_OUT_DRV("DAC standby ana", STIH407_AUDIO_DAC_CTRL,
STIH407_DAC_STANDBY_ANA, 1, NULL, 0),
@@ -256,30 +175,11 @@ static const struct snd_soc_dapm_widget stih407_sas_dapm_widgets[] = {
SND_SOC_DAPM_OUTPUT("DAC Output"),
};
-static const struct snd_soc_dapm_route stih416_sas_route[] = {
- {"DAC Output", NULL, "DAC bandgap"},
- {"DAC Output", NULL, "DAC standby ana"},
- {"DAC standby ana", NULL, "DAC standby"},
-};
-
static const struct snd_soc_dapm_route stih407_sas_route[] = {
{"DAC Output", NULL, "DAC standby ana"},
{"DAC standby ana", NULL, "DAC standby"},
};
-static int stih416_sas_dac_mute(struct snd_soc_dai *dai, int mute, int stream)
-{
- struct snd_soc_codec *codec = dai->codec;
-
- if (mute) {
- return snd_soc_update_bits(codec, STIH416_AUDIO_DAC_CTRL,
- STIH416_DAC_SOFTMUTE_MASK,
- STIH416_DAC_SOFTMUTE_MASK);
- } else {
- return snd_soc_update_bits(codec, STIH416_AUDIO_DAC_CTRL,
- STIH416_DAC_SOFTMUTE_MASK, 0);
- }
-}
static int stih407_sas_dac_mute(struct snd_soc_dai *dai, int mute, int stream)
{
@@ -392,13 +292,13 @@ static int sti_sas_prepare(struct snd_pcm_substream *substream,
switch (dai->id) {
case STI_SAS_DAI_SPDIF_OUT:
if ((drvdata->spdif.mclk / runtime->rate) != 128) {
- dev_err(codec->dev, "unexpected mclk-fs ratio");
+ dev_err(codec->dev, "unexpected mclk-fs ratio\n");
return -EINVAL;
}
break;
case STI_SAS_DAI_ANALOG_OUT:
if ((drvdata->dac.mclk / runtime->rate) != 256) {
- dev_err(codec->dev, "unexpected mclk-fs ratio");
+ dev_err(codec->dev, "unexpected mclk-fs ratio\n");
return -EINVAL;
}
break;
@@ -407,13 +307,6 @@ static int sti_sas_prepare(struct snd_pcm_substream *substream,
return 0;
}
-static const struct snd_soc_dai_ops stih416_dac_ops = {
- .set_fmt = sti_sas_dac_set_fmt,
- .mute_stream = stih416_sas_dac_mute,
- .prepare = sti_sas_prepare,
- .set_sysclk = sti_sas_set_sysclk,
-};
-
static const struct snd_soc_dai_ops stih407_dac_ops = {
.set_fmt = sti_sas_dac_set_fmt,
.mute_stream = stih407_sas_dac_mute,
@@ -434,31 +327,7 @@ static const struct regmap_config stih407_sas_regmap = {
.reg_write = sti_sas_write_reg,
};
-static const struct regmap_config stih416_sas_regmap = {
- .reg_bits = 32,
- .val_bits = 32,
-
- .max_register = STIH416_AUDIO_DAC_CTRL,
- .reg_defaults = stih416_sas_reg_defaults,
- .num_reg_defaults = ARRAY_SIZE(stih416_sas_reg_defaults),
- .volatile_reg = sti_sas_volatile_register,
- .cache_type = REGCACHE_RBTREE,
- .reg_read = sti_sas_read_reg,
- .reg_write = sti_sas_write_reg,
-};
-
-static const struct sti_sas_dev_data stih416_data = {
- .chipid = CHIPID_STIH416,
- .regmap = &stih416_sas_regmap,
- .dac_ops = &stih416_dac_ops,
- .dapm_widgets = stih416_sas_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(stih416_sas_dapm_widgets),
- .dapm_routes = stih416_sas_route,
- .num_dapm_routes = ARRAY_SIZE(stih416_sas_route),
-};
-
static const struct sti_sas_dev_data stih407_data = {
- .chipid = CHIPID_STIH407,
.regmap = &stih407_sas_regmap,
.dac_ops = &stih407_dac_ops,
.dapm_widgets = stih407_sas_dapm_widgets,
@@ -533,10 +402,6 @@ static struct snd_soc_codec_driver sti_sas_driver = {
static const struct of_device_id sti_sas_dev_match[] = {
{
- .compatible = "st,stih416-sas-codec",
- .data = &stih416_data,
- },
- {
.compatible = "st,stih407-sas-codec",
.data = &stih407_data,
},
@@ -558,7 +423,7 @@ static int sti_sas_driver_probe(struct platform_device *pdev)
/* Populate data structure depending on compatibility */
of_id = of_match_node(sti_sas_dev_match, pnode);
if (!of_id->data) {
- dev_err(&pdev->dev, "data associated to device is missing");
+ dev_err(&pdev->dev, "data associated to device is missing\n");
return -EINVAL;
}
@@ -584,10 +449,6 @@ static int sti_sas_driver_probe(struct platform_device *pdev)
}
drvdata->spdif.regmap = drvdata->dac.regmap;
- /* Set DAC dai probe */
- if (drvdata->dev_data->chipid == CHIPID_STIH416)
- sti_sas_dai[STI_SAS_DAI_ANALOG_OUT].probe = stih416_dac_probe;
-
sti_sas_dai[STI_SAS_DAI_ANALOG_OUT].ops = drvdata->dev_data->dac_ops;
/* Set dapms*/
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index be1a64bfd320..f8a90ba8cd71 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -1253,6 +1253,8 @@ static const struct of_device_id tlv320aic31xx_of_match[] = {
{ .compatible = "ti,tlv320aic3110" },
{ .compatible = "ti,tlv320aic3120" },
{ .compatible = "ti,tlv320aic3111" },
+ { .compatible = "ti,tlv320dac3100" },
+ { .compatible = "ti,tlv320dac3101" },
{},
};
MODULE_DEVICE_TABLE(of, tlv320aic31xx_of_match);
@@ -1379,6 +1381,7 @@ static const struct i2c_device_id aic31xx_i2c_id[] = {
{ "tlv320aic3120", AIC3120 },
{ "tlv320aic3111", AIC3111 },
{ "tlv320dac3100", DAC3100 },
+ { "tlv320dac3101", DAC3101 },
{ }
};
MODULE_DEVICE_TABLE(i2c, aic31xx_i2c_id);
diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h
index 5acd5b69fb83..730fb2058869 100644
--- a/sound/soc/codecs/tlv320aic31xx.h
+++ b/sound/soc/codecs/tlv320aic31xx.h
@@ -32,6 +32,7 @@ enum aic31xx_type {
AIC3120 = AIC31XX_MINIDSP_BIT,
AIC3111 = (AIC31XX_STEREO_CLASS_D_BIT | AIC31XX_MINIDSP_BIT),
DAC3100 = DAC31XX_BIT,
+ DAC3101 = DAC31XX_BIT | AIC31XX_STEREO_CLASS_D_BIT,
};
struct aic31xx_pdata {
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 5a8d96ec058c..8877b74b0510 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -157,7 +157,7 @@ static int snd_soc_dapm_put_volsw_aic3x(struct snd_kcontrol *kcontrol,
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
unsigned short val;
- struct snd_soc_dapm_update update;
+ struct snd_soc_dapm_update update = { 0 };
int connect, change;
val = (ucontrol->value.integer.value[0] & mask);
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 533e3bb444e4..2918fdb95e58 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -698,25 +698,10 @@ static int uda1380_probe(struct snd_soc_codec *codec)
codec->hw_write = (hw_write_t)i2c_master_send;
codec->control_data = uda1380->control_data;
- if (!pdata)
- return -EINVAL;
-
- if (gpio_is_valid(pdata->gpio_reset)) {
- ret = gpio_request_one(pdata->gpio_reset, GPIOF_OUT_INIT_LOW,
- "uda1380 reset");
- if (ret)
- goto err_out;
- }
-
- if (gpio_is_valid(pdata->gpio_power)) {
- ret = gpio_request_one(pdata->gpio_power, GPIOF_OUT_INIT_LOW,
- "uda1380 power");
- if (ret)
- goto err_free_gpio;
- } else {
+ if (!gpio_is_valid(pdata->gpio_power)) {
ret = uda1380_reset(codec);
if (ret)
- goto err_free_gpio;
+ return ret;
}
INIT_WORK(&uda1380->work, uda1380_flush_work);
@@ -733,28 +718,10 @@ static int uda1380_probe(struct snd_soc_codec *codec)
}
return 0;
-
-err_free_gpio:
- if (gpio_is_valid(pdata->gpio_reset))
- gpio_free(pdata->gpio_reset);
-err_out:
- return ret;
-}
-
-/* power down chip */
-static int uda1380_remove(struct snd_soc_codec *codec)
-{
- struct uda1380_platform_data *pdata =codec->dev->platform_data;
-
- gpio_free(pdata->gpio_reset);
- gpio_free(pdata->gpio_power);
-
- return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_uda1380 = {
.probe = uda1380_probe,
- .remove = uda1380_remove,
.read = uda1380_read_reg_cache,
.write = uda1380_write,
.set_bias_level = uda1380_set_bias_level,
@@ -775,18 +742,35 @@ static struct snd_soc_codec_driver soc_codec_dev_uda1380 = {
},
};
-#if IS_ENABLED(CONFIG_I2C)
static int uda1380_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
+ struct uda1380_platform_data *pdata = i2c->dev.platform_data;
struct uda1380_priv *uda1380;
int ret;
+ if (!pdata)
+ return -EINVAL;
+
uda1380 = devm_kzalloc(&i2c->dev, sizeof(struct uda1380_priv),
GFP_KERNEL);
if (uda1380 == NULL)
return -ENOMEM;
+ if (gpio_is_valid(pdata->gpio_reset)) {
+ ret = devm_gpio_request_one(&i2c->dev, pdata->gpio_reset,
+ GPIOF_OUT_INIT_LOW, "uda1380 reset");
+ if (ret)
+ return ret;
+ }
+
+ if (gpio_is_valid(pdata->gpio_power)) {
+ ret = devm_gpio_request_one(&i2c->dev, pdata->gpio_power,
+ GPIOF_OUT_INIT_LOW, "uda1380 power");
+ if (ret)
+ return ret;
+ }
+
i2c_set_clientdata(i2c, uda1380);
uda1380->control_data = i2c;
@@ -815,27 +799,8 @@ static struct i2c_driver uda1380_i2c_driver = {
.remove = uda1380_i2c_remove,
.id_table = uda1380_i2c_id,
};
-#endif
-static int __init uda1380_modinit(void)
-{
- int ret = 0;
-#if IS_ENABLED(CONFIG_I2C)
- ret = i2c_add_driver(&uda1380_i2c_driver);
- if (ret != 0)
- pr_err("Failed to register UDA1380 I2C driver: %d\n", ret);
-#endif
- return ret;
-}
-module_init(uda1380_modinit);
-
-static void __exit uda1380_exit(void)
-{
-#if IS_ENABLED(CONFIG_I2C)
- i2c_del_driver(&uda1380_i2c_driver);
-#endif
-}
-module_exit(uda1380_exit);
+module_i2c_driver(uda1380_i2c_driver);
MODULE_AUTHOR("Giorgio Padrin");
MODULE_DESCRIPTION("Audio support for codec Philips UDA1380");
diff --git a/sound/soc/codecs/uda1380.h b/sound/soc/codecs/uda1380.h
index 942e3927c72b..69a326ac3c1a 100644
--- a/sound/soc/codecs/uda1380.h
+++ b/sound/soc/codecs/uda1380.h
@@ -72,8 +72,4 @@
#define R22_SKIP_DCFIL 0x0002
#define R23_AGC_EN 0x0001
-#define UDA1380_DAI_DUPLEX 0 /* playback and capture on single DAI */
-#define UDA1380_DAI_PLAYBACK 1 /* playback DAI */
-#define UDA1380_DAI_CAPTURE 2 /* capture DAI */
-
#endif /* _UDA1380_H */
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index 606bf88abfc4..d83dab57a1d1 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -999,7 +999,7 @@ static DECLARE_TLV_DB_SCALE(in_tlv, -6300, 100, 0);
static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
static DECLARE_TLV_DB_SCALE(out_tlv, -6400, 100, 0);
-static const char *wm2200_mixer_texts[] = {
+static const char * const wm2200_mixer_texts[] = {
"None",
"Tone Generator",
"AEC Loopback",
@@ -1033,7 +1033,7 @@ static const char *wm2200_mixer_texts[] = {
"DSP2.6",
};
-static int wm2200_mixer_values[] = {
+static unsigned int wm2200_mixer_values[] = {
0x00,
0x04, /* Tone */
0x08, /* AEC */
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index 93876c6d48ee..e7ab37d0dd32 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -607,6 +607,9 @@ static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w,
break;
case SND_SOC_DAPM_PRE_PMD:
break;
+ case SND_SOC_DAPM_PRE_PMU:
+ case SND_SOC_DAPM_POST_PMD:
+ return arizona_clk_ev(w, kcontrol, event);
default:
return 0;
}
@@ -1077,9 +1080,11 @@ static const struct snd_kcontrol_new wm5102_aec_loopback_mux =
static const struct snd_soc_dapm_widget wm5102_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT,
0, wm5102_sysclk_ev,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
- ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
+ ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, arizona_clk_ev,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK,
ARIZONA_OPCLK_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("ASYNCOPCLK", ARIZONA_OUTPUT_ASYNC_CLOCK,
@@ -1903,7 +1908,7 @@ static struct snd_soc_dai_driver wm5102_dai[] = {
static int wm5102_open(struct snd_compr_stream *stream)
{
struct snd_soc_pcm_runtime *rtd = stream->private_data;
- struct wm5102_priv *priv = snd_soc_codec_get_drvdata(rtd->codec);
+ struct wm5102_priv *priv = snd_soc_platform_get_drvdata(rtd->platform);
return wm_adsp_compr_open(&priv->core.adsp[0], stream);
}
@@ -1926,18 +1931,10 @@ static irqreturn_t wm5102_adsp2_irq(int irq, void *data)
static int wm5102_codec_probe(struct snd_soc_codec *codec)
{
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
- struct arizona *arizona = priv->core.arizona;
int ret;
- ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
- "ADSP2 Compressed IRQ", wm5102_adsp2_irq,
- priv);
- if (ret != 0) {
- dev_err(codec->dev, "Failed to request DSP IRQ: %d\n", ret);
- return ret;
- }
-
ret = wm_adsp2_codec_probe(&priv->core.adsp[0], codec);
if (ret)
return ret;
@@ -1949,8 +1946,9 @@ static int wm5102_codec_probe(struct snd_soc_codec *codec)
arizona_init_spk(codec);
arizona_init_gpio(codec);
+ arizona_init_notifiers(codec);
- snd_soc_dapm_disable_pin(dapm, "HAPTICS");
+ snd_soc_component_disable_pin(component, "HAPTICS");
priv->core.arizona->dapm = dapm;
@@ -1965,16 +1963,11 @@ err_adsp2_codec_probe:
static int wm5102_codec_remove(struct snd_soc_codec *codec)
{
struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
- struct arizona *arizona = priv->core.arizona;
wm_adsp2_codec_remove(&priv->core.adsp[0], codec);
priv->core.arizona->dapm = NULL;
- arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
-
- arizona_free_spk(codec);
-
return 0;
}
@@ -2092,25 +2085,47 @@ static int wm5102_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_idle(&pdev->dev);
+ ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
+ "ADSP2 Compressed IRQ", wm5102_adsp2_irq,
+ wm5102);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to request DSP IRQ: %d\n", ret);
+ return ret;
+ }
+
+ ret = arizona_init_spk_irqs(arizona);
+ if (ret < 0)
+ goto err_dsp_irq;
+
ret = snd_soc_register_platform(&pdev->dev, &wm5102_compr_platform);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register platform: %d\n", ret);
- return ret;
+ goto err_spk_irqs;
}
ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm5102,
wm5102_dai, ARRAY_SIZE(wm5102_dai));
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register codec: %d\n", ret);
- snd_soc_unregister_platform(&pdev->dev);
+ goto err_platform;
}
return ret;
+
+err_platform:
+ snd_soc_unregister_platform(&pdev->dev);
+err_spk_irqs:
+ arizona_free_spk_irqs(arizona);
+err_dsp_irq:
+ arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, wm5102);
+
+ return ret;
}
static int wm5102_remove(struct platform_device *pdev)
{
struct wm5102_priv *wm5102 = platform_get_drvdata(pdev);
+ struct arizona *arizona = wm5102->core.arizona;
snd_soc_unregister_platform(&pdev->dev);
snd_soc_unregister_codec(&pdev->dev);
@@ -2118,6 +2133,10 @@ static int wm5102_remove(struct platform_device *pdev)
wm_adsp2_remove(&wm5102->core.adsp[0]);
+ arizona_free_spk_irqs(arizona);
+
+ arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, wm5102);
+
return 0;
}
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 06bae3b23fce..585fc706c1b0 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -183,7 +183,9 @@ static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,
regmap_write_async(regmap, patch[i].reg,
patch[i].def);
break;
-
+ case SND_SOC_DAPM_PRE_PMU:
+ case SND_SOC_DAPM_POST_PMD:
+ return arizona_clk_ev(w, kcontrol, event);
default:
break;
}
@@ -1073,9 +1075,11 @@ static const struct snd_kcontrol_new wm5110_output_anc_src[] = {
static const struct snd_soc_dapm_widget wm5110_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT,
- 0, wm5110_sysclk_ev, SND_SOC_DAPM_POST_PMU),
+ 0, wm5110_sysclk_ev, SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
- ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
+ ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, arizona_clk_ev,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK,
ARIZONA_OPCLK_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("ASYNCOPCLK", ARIZONA_OUTPUT_ASYNC_CLOCK,
@@ -2220,7 +2224,7 @@ static struct snd_soc_dai_driver wm5110_dai[] = {
static int wm5110_open(struct snd_compr_stream *stream)
{
struct snd_soc_pcm_runtime *rtd = stream->private_data;
- struct wm5110_priv *priv = snd_soc_codec_get_drvdata(rtd->codec);
+ struct wm5110_priv *priv = snd_soc_platform_get_drvdata(rtd->platform);
struct arizona *arizona = priv->core.arizona;
int n_adsp;
@@ -2269,8 +2273,8 @@ static irqreturn_t wm5110_adsp2_irq(int irq, void *data)
static int wm5110_codec_probe(struct snd_soc_codec *codec)
{
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
struct wm5110_priv *priv = snd_soc_codec_get_drvdata(codec);
- struct arizona *arizona = priv->core.arizona;
int i, ret;
priv->core.arizona->dapm = dapm;
@@ -2280,14 +2284,6 @@ static int wm5110_codec_probe(struct snd_soc_codec *codec)
arizona_init_mono(codec);
arizona_init_notifiers(codec);
- ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
- "ADSP2 Compressed IRQ", wm5110_adsp2_irq,
- priv);
- if (ret != 0) {
- dev_err(codec->dev, "Failed to request DSP IRQ: %d\n", ret);
- return ret;
- }
-
for (i = 0; i < WM5110_NUM_ADSP; ++i) {
ret = wm_adsp2_codec_probe(&priv->core.adsp[i], codec);
if (ret)
@@ -2300,7 +2296,7 @@ static int wm5110_codec_probe(struct snd_soc_codec *codec)
if (ret)
goto err_adsp2_codec_probe;
- snd_soc_dapm_disable_pin(dapm, "HAPTICS");
+ snd_soc_component_disable_pin(component, "HAPTICS");
return 0;
@@ -2308,15 +2304,12 @@ err_adsp2_codec_probe:
for (--i; i >= 0; --i)
wm_adsp2_codec_remove(&priv->core.adsp[i], codec);
- arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
-
return ret;
}
static int wm5110_codec_remove(struct snd_soc_codec *codec)
{
struct wm5110_priv *priv = snd_soc_codec_get_drvdata(codec);
- struct arizona *arizona = priv->core.arizona;
int i;
for (i = 0; i < WM5110_NUM_ADSP; ++i)
@@ -2324,10 +2317,6 @@ static int wm5110_codec_remove(struct snd_soc_codec *codec)
priv->core.arizona->dapm = NULL;
- arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
-
- arizona_free_spk(codec);
-
return 0;
}
@@ -2449,25 +2438,47 @@ static int wm5110_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_idle(&pdev->dev);
+ ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
+ "ADSP2 Compressed IRQ", wm5110_adsp2_irq,
+ wm5110);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to request DSP IRQ: %d\n", ret);
+ return ret;
+ }
+
+ ret = arizona_init_spk_irqs(arizona);
+ if (ret < 0)
+ goto err_dsp_irq;
+
ret = snd_soc_register_platform(&pdev->dev, &wm5110_compr_platform);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register platform: %d\n", ret);
- return ret;
+ goto err_spk_irqs;
}
ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm5110,
wm5110_dai, ARRAY_SIZE(wm5110_dai));
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register codec: %d\n", ret);
- snd_soc_unregister_platform(&pdev->dev);
+ goto err_platform;
}
return ret;
+
+err_platform:
+ snd_soc_unregister_platform(&pdev->dev);
+err_spk_irqs:
+ arizona_free_spk_irqs(arizona);
+err_dsp_irq:
+ arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, wm5110);
+
+ return ret;
}
static int wm5110_remove(struct platform_device *pdev)
{
struct wm5110_priv *wm5110 = platform_get_drvdata(pdev);
+ struct arizona *arizona = wm5110->core.arizona;
int i;
snd_soc_unregister_platform(&pdev->dev);
@@ -2477,6 +2488,10 @@ static int wm5110_remove(struct platform_device *pdev)
for (i = 0; i < WM5110_NUM_ADSP; i++)
wm_adsp2_remove(&wm5110->core.adsp[i]);
+ arizona_free_spk_irqs(arizona);
+
+ arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, wm5110);
+
return 0;
}
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
index deb2e075428e..6d0a2723bfde 100644
--- a/sound/soc/codecs/wm8523.c
+++ b/sound/soc/codecs/wm8523.c
@@ -446,7 +446,6 @@ static const struct regmap_config wm8523_regmap = {
.volatile_reg = wm8523_volatile_register,
};
-#if IS_ENABLED(CONFIG_I2C)
static int wm8523_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -543,29 +542,8 @@ static struct i2c_driver wm8523_i2c_driver = {
.remove = wm8523_i2c_remove,
.id_table = wm8523_i2c_id,
};
-#endif
-static int __init wm8523_modinit(void)
-{
- int ret;
-#if IS_ENABLED(CONFIG_I2C)
- ret = i2c_add_driver(&wm8523_i2c_driver);
- if (ret != 0) {
- printk(KERN_ERR "Failed to register WM8523 I2C driver: %d\n",
- ret);
- }
-#endif
- return 0;
-}
-module_init(wm8523_modinit);
-
-static void __exit wm8523_exit(void)
-{
-#if IS_ENABLED(CONFIG_I2C)
- i2c_del_driver(&wm8523_i2c_driver);
-#endif
-}
-module_exit(wm8523_exit);
+module_i2c_driver(wm8523_i2c_driver);
MODULE_DESCRIPTION("ASoC WM8523 driver");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
index faa7287a5253..910801dddd64 100644
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -1,5 +1,5 @@
/*
- * wm8580.c -- WM8580 ALSA Soc Audio driver
+ * wm8580.c -- WM8580 and WM8581 ALSA Soc Audio driver
*
* Copyright 2008-12 Wolfson Microelectronics PLC.
*
@@ -12,6 +12,9 @@
* The WM8580 is a multichannel codec with S/PDIF support, featuring six
* DAC channels and two ADC channels.
*
+ * The WM8581 is a multichannel codec with S/PDIF support, featuring eight
+ * DAC channels and two ADC channels.
+ *
* Currently only the primary audio interface is supported - S/PDIF and
* the secondary audio interfaces are not.
*/
@@ -65,6 +68,8 @@
#define WM8580_DIGITAL_ATTENUATION_DACR2 0x17
#define WM8580_DIGITAL_ATTENUATION_DACL3 0x18
#define WM8580_DIGITAL_ATTENUATION_DACR3 0x19
+#define WM8581_DIGITAL_ATTENUATION_DACL4 0x1A
+#define WM8581_DIGITAL_ATTENUATION_DACR4 0x1B
#define WM8580_MASTER_DIGITAL_ATTENUATION 0x1C
#define WM8580_ADC_CONTROL1 0x1D
#define WM8580_SPDTXCHAN0 0x1E
@@ -236,12 +241,17 @@ static const char *wm8580_supply_names[WM8580_NUM_SUPPLIES] = {
"PVDD",
};
+struct wm8580_driver_data {
+ int num_dacs;
+};
+
/* codec private data */
struct wm8580_priv {
struct regmap *regmap;
struct regulator_bulk_data supplies[WM8580_NUM_SUPPLIES];
struct pll_state a;
struct pll_state b;
+ const struct wm8580_driver_data *drvdata;
int sysclk[2];
};
@@ -306,6 +316,19 @@ SOC_DOUBLE("Capture Switch", WM8580_ADC_CONTROL1, 0, 1, 1, 1),
SOC_SINGLE("Capture High-Pass Filter Switch", WM8580_ADC_CONTROL1, 4, 1, 0),
};
+static const struct snd_kcontrol_new wm8581_snd_controls[] = {
+SOC_DOUBLE_R_EXT_TLV("DAC4 Playback Volume",
+ WM8581_DIGITAL_ATTENUATION_DACL4,
+ WM8581_DIGITAL_ATTENUATION_DACR4,
+ 0, 0xff, 0, snd_soc_get_volsw, wm8580_out_vu, dac_tlv),
+
+SOC_SINGLE("DAC4 Deemphasis Switch", WM8580_DAC_CONTROL3, 3, 1, 0),
+
+SOC_DOUBLE("DAC4 Invert Switch", WM8580_DAC_CONTROL4, 8, 7, 1, 0),
+
+SOC_SINGLE("DAC4 Switch", WM8580_DAC_CONTROL5, 3, 1, 1),
+};
+
static const struct snd_soc_dapm_widget wm8580_dapm_widgets[] = {
SND_SOC_DAPM_DAC("DAC1", "Playback", WM8580_PWRDN1, 2, 1),
SND_SOC_DAPM_DAC("DAC2", "Playback", WM8580_PWRDN1, 3, 1),
@@ -324,6 +347,13 @@ SND_SOC_DAPM_INPUT("AINL"),
SND_SOC_DAPM_INPUT("AINR"),
};
+static const struct snd_soc_dapm_widget wm8581_dapm_widgets[] = {
+SND_SOC_DAPM_DAC("DAC4", "Playback", WM8580_PWRDN1, 5, 1),
+
+SND_SOC_DAPM_OUTPUT("VOUT4L"),
+SND_SOC_DAPM_OUTPUT("VOUT4R"),
+};
+
static const struct snd_soc_dapm_route wm8580_dapm_routes[] = {
{ "VOUT1L", NULL, "DAC1" },
{ "VOUT1R", NULL, "DAC1" },
@@ -338,6 +368,11 @@ static const struct snd_soc_dapm_route wm8580_dapm_routes[] = {
{ "ADC", NULL, "AINR" },
};
+static const struct snd_soc_dapm_route wm8581_dapm_routes[] = {
+ { "VOUT4L", NULL, "DAC4" },
+ { "VOUT4R", NULL, "DAC4" },
+};
+
/* PLL divisors */
struct _pll_div {
u32 prescale:1;
@@ -815,10 +850,21 @@ static int wm8580_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
+static int wm8580_playback_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8580_priv *wm8580 = snd_soc_codec_get_drvdata(codec);
+
+ return snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_CHANNELS, 1, wm8580->drvdata->num_dacs * 2);
+}
+
#define WM8580_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dai_ops wm8580_dai_ops_playback = {
+ .startup = wm8580_playback_startup,
.set_sysclk = wm8580_set_sysclk,
.hw_params = wm8580_paif_hw_params,
.set_fmt = wm8580_set_paif_dai_fmt,
@@ -842,7 +888,6 @@ static struct snd_soc_dai_driver wm8580_dai[] = {
.playback = {
.stream_name = "Playback",
.channels_min = 1,
- .channels_max = 6,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = WM8580_FORMATS,
},
@@ -865,8 +910,22 @@ static struct snd_soc_dai_driver wm8580_dai[] = {
static int wm8580_probe(struct snd_soc_codec *codec)
{
struct wm8580_priv *wm8580 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
int ret = 0;
+ switch (wm8580->drvdata->num_dacs) {
+ case 4:
+ snd_soc_add_codec_controls(codec, wm8581_snd_controls,
+ ARRAY_SIZE(wm8581_snd_controls));
+ snd_soc_dapm_new_controls(dapm, wm8581_dapm_widgets,
+ ARRAY_SIZE(wm8581_dapm_widgets));
+ snd_soc_dapm_add_routes(dapm, wm8581_dapm_routes,
+ ARRAY_SIZE(wm8581_dapm_routes));
+ break;
+ default:
+ break;
+ }
+
ret = regulator_bulk_enable(ARRAY_SIZE(wm8580->supplies),
wm8580->supplies);
if (ret != 0) {
@@ -914,12 +973,6 @@ static const struct snd_soc_codec_driver soc_codec_dev_wm8580 = {
},
};
-static const struct of_device_id wm8580_of_match[] = {
- { .compatible = "wlf,wm8580" },
- { },
-};
-MODULE_DEVICE_TABLE(of, wm8580_of_match);
-
static const struct regmap_config wm8580_regmap = {
.reg_bits = 7,
.val_bits = 9,
@@ -932,10 +985,25 @@ static const struct regmap_config wm8580_regmap = {
.volatile_reg = wm8580_volatile,
};
-#if IS_ENABLED(CONFIG_I2C)
+static const struct wm8580_driver_data wm8580_data = {
+ .num_dacs = 3,
+};
+
+static const struct wm8580_driver_data wm8581_data = {
+ .num_dacs = 4,
+};
+
+static const struct of_device_id wm8580_of_match[] = {
+ { .compatible = "wlf,wm8580", .data = &wm8580_data },
+ { .compatible = "wlf,wm8581", .data = &wm8581_data },
+ { },
+};
+MODULE_DEVICE_TABLE(of, wm8580_of_match);
+
static int wm8580_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
+ const struct of_device_id *of_id;
struct wm8580_priv *wm8580;
int ret, i;
@@ -960,6 +1028,15 @@ static int wm8580_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, wm8580);
+ of_id = of_match_device(wm8580_of_match, &i2c->dev);
+ if (of_id)
+ wm8580->drvdata = of_id->data;
+
+ if (!wm8580->drvdata) {
+ dev_err(&i2c->dev, "failed to find driver data\n");
+ return -EINVAL;
+ }
+
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8580, wm8580_dai, ARRAY_SIZE(wm8580_dai));
@@ -973,7 +1050,8 @@ static int wm8580_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id wm8580_i2c_id[] = {
- { "wm8580", 0 },
+ { "wm8580", (kernel_ulong_t)&wm8580_data },
+ { "wm8581", (kernel_ulong_t)&wm8581_data },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8580_i2c_id);
@@ -987,31 +1065,10 @@ static struct i2c_driver wm8580_i2c_driver = {
.remove = wm8580_i2c_remove,
.id_table = wm8580_i2c_id,
};
-#endif
-static int __init wm8580_modinit(void)
-{
- int ret = 0;
-
-#if IS_ENABLED(CONFIG_I2C)
- ret = i2c_add_driver(&wm8580_i2c_driver);
- if (ret != 0) {
- pr_err("Failed to register WM8580 I2C driver: %d\n", ret);
- }
-#endif
-
- return ret;
-}
-module_init(wm8580_modinit);
-
-static void __exit wm8580_exit(void)
-{
-#if IS_ENABLED(CONFIG_I2C)
- i2c_del_driver(&wm8580_i2c_driver);
-#endif
-}
-module_exit(wm8580_exit);
+module_i2c_driver(wm8580_i2c_driver);
MODULE_DESCRIPTION("ASoC WM8580 driver");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_AUTHOR("Matt Flax <flatmax@flatmax.org>");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8753.h b/sound/soc/codecs/wm8753.h
index 94edac144bcb..8b39e3677ac8 100644
--- a/sound/soc/codecs/wm8753.h
+++ b/sound/soc/codecs/wm8753.h
@@ -112,7 +112,4 @@
#define WM8753_VXCLK_DIV_8 (3 << 6)
#define WM8753_VXCLK_DIV_16 (4 << 6)
-#define WM8753_DAI_HIFI 0
-#define WM8753_DAI_VOICE 1
-
#endif
diff --git a/sound/soc/codecs/wm8978.h b/sound/soc/codecs/wm8978.h
index 6ae43495b7cf..0dcf6868dff6 100644
--- a/sound/soc/codecs/wm8978.h
+++ b/sound/soc/codecs/wm8978.h
@@ -78,8 +78,8 @@ enum wm8978_clk_id {
};
enum wm8978_sysclk_src {
+ WM8978_MCLK = 0,
WM8978_PLL,
- WM8978_MCLK
};
#endif /* __WM8978_H__ */
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 2f2821b3382f..ee0c8639c743 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -108,6 +108,9 @@ static int wm8997_sysclk_ev(struct snd_soc_dapm_widget *w,
break;
case SND_SOC_DAPM_PRE_PMD:
break;
+ case SND_SOC_DAPM_PRE_PMU:
+ case SND_SOC_DAPM_POST_PMD:
+ return arizona_clk_ev(w, kcontrol, event);
default:
return 0;
}
@@ -408,9 +411,11 @@ static const struct snd_kcontrol_new wm8997_aec_loopback_mux =
static const struct snd_soc_dapm_widget wm8997_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT,
0, wm8997_sysclk_ev,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
- ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
+ ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, arizona_clk_ev,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK,
ARIZONA_OPCLK_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("ASYNCOPCLK", ARIZONA_OUTPUT_ASYNC_CLOCK,
@@ -1055,11 +1060,13 @@ static struct snd_soc_dai_driver wm8997_dai[] = {
static int wm8997_codec_probe(struct snd_soc_codec *codec)
{
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
struct wm8997_priv *priv = snd_soc_codec_get_drvdata(codec);
arizona_init_spk(codec);
+ arizona_init_notifiers(codec);
- snd_soc_dapm_disable_pin(dapm, "HAPTICS");
+ snd_soc_component_disable_pin(component, "HAPTICS");
priv->core.arizona->dapm = dapm;
@@ -1072,8 +1079,6 @@ static int wm8997_codec_remove(struct snd_soc_codec *codec)
priv->core.arizona->dapm = NULL;
- arizona_free_spk(codec);
-
return 0;
}
@@ -1119,7 +1124,7 @@ static int wm8997_probe(struct platform_device *pdev)
{
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
struct wm8997_priv *wm8997;
- int i;
+ int i, ret;
wm8997 = devm_kzalloc(&pdev->dev, sizeof(struct wm8997_priv),
GFP_KERNEL);
@@ -1159,15 +1164,33 @@ static int wm8997_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_idle(&pdev->dev);
- return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm8997,
- wm8997_dai, ARRAY_SIZE(wm8997_dai));
+ ret = arizona_init_spk_irqs(arizona);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm8997,
+ wm8997_dai, ARRAY_SIZE(wm8997_dai));
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register codec: %d\n", ret);
+ goto err_spk_irqs;
+ }
+
+err_spk_irqs:
+ arizona_free_spk_irqs(arizona);
+
+ return ret;
}
static int wm8997_remove(struct platform_device *pdev)
{
+ struct wm8997_priv *wm8997 = platform_get_drvdata(pdev);
+ struct arizona *arizona = wm8997->core.arizona;
+
snd_soc_unregister_codec(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ arizona_free_spk_irqs(arizona);
+
return 0;
}
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
index bcc2e1060a6c..3694f5958d86 100644
--- a/sound/soc/codecs/wm8998.c
+++ b/sound/soc/codecs/wm8998.c
@@ -541,9 +541,11 @@ static const struct snd_kcontrol_new wm8998_aec_loopback_mux[] = {
static const struct snd_soc_dapm_widget wm8998_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1,
- ARIZONA_SYSCLK_ENA_SHIFT, 0, NULL, 0),
+ ARIZONA_SYSCLK_ENA_SHIFT, 0, arizona_clk_ev,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
- ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
+ ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, arizona_clk_ev,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK,
ARIZONA_OPCLK_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("ASYNCOPCLK", ARIZONA_OUTPUT_ASYNC_CLOCK,
@@ -1318,13 +1320,15 @@ static int wm8998_codec_probe(struct snd_soc_codec *codec)
{
struct wm8998_priv *priv = snd_soc_codec_get_drvdata(codec);
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
priv->core.arizona->dapm = dapm;
arizona_init_spk(codec);
arizona_init_gpio(codec);
+ arizona_init_notifiers(codec);
- snd_soc_dapm_disable_pin(dapm, "HAPTICS");
+ snd_soc_component_disable_pin(component, "HAPTICS");
return 0;
}
@@ -1335,8 +1339,6 @@ static int wm8998_codec_remove(struct snd_soc_codec *codec)
priv->core.arizona->dapm = NULL;
- arizona_free_spk(codec);
-
return 0;
}
@@ -1385,7 +1387,7 @@ static int wm8998_probe(struct platform_device *pdev)
{
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
struct wm8998_priv *wm8998;
- int i;
+ int i, ret;
wm8998 = devm_kzalloc(&pdev->dev, sizeof(struct wm8998_priv),
GFP_KERNEL);
@@ -1417,15 +1419,35 @@ static int wm8998_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_idle(&pdev->dev);
- return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm8998,
- wm8998_dai, ARRAY_SIZE(wm8998_dai));
+ ret = arizona_init_spk_irqs(arizona);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm8998,
+ wm8998_dai, ARRAY_SIZE(wm8998_dai));
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register codec: %d\n", ret);
+ goto err_spk_irqs;
+ }
+
+ return ret;
+
+err_spk_irqs:
+ arizona_free_spk_irqs(arizona);
+
+ return ret;
}
static int wm8998_remove(struct platform_device *pdev)
{
+ struct wm8998_priv *wm8998 = platform_get_drvdata(pdev);
+ struct arizona *arizona = wm8998->core.arizona;
+
snd_soc_unregister_codec(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ arizona_free_spk_irqs(arizona);
+
return 0;
}
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 856867ec2813..6febef337dd2 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -1304,7 +1304,6 @@ static const struct regmap_config wm9081_regmap = {
.cache_type = REGCACHE_RBTREE,
};
-#if IS_ENABLED(CONFIG_I2C)
static int wm9081_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -1384,7 +1383,6 @@ static struct i2c_driver wm9081_i2c_driver = {
.remove = wm9081_i2c_remove,
.id_table = wm9081_i2c_id,
};
-#endif
module_i2c_driver(wm9081_i2c_driver);
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index dcdd055db57b..f6d5c0f2aea5 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -14,37 +14,58 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/regmap.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/ac97_codec.h>
#include <sound/initval.h>
#include <sound/soc.h>
-#include "wm9705.h"
-
#define WM9705_VENDOR_ID 0x574d4c05
#define WM9705_VENDOR_ID_MASK 0xffffffff
-/*
- * WM9705 register cache
- */
-static const u16 wm9705_reg[] = {
- 0x6150, 0x8000, 0x8000, 0x8000, /* 0x0 */
- 0x0000, 0x8000, 0x8008, 0x8008, /* 0x8 */
- 0x8808, 0x8808, 0x8808, 0x8808, /* 0x10 */
- 0x8808, 0x0000, 0x8000, 0x0000, /* 0x18 */
- 0x0000, 0x0000, 0x0000, 0x000f, /* 0x20 */
- 0x0605, 0x0000, 0xbb80, 0x0000, /* 0x28 */
- 0x0000, 0xbb80, 0x0000, 0x0000, /* 0x30 */
- 0x0000, 0x2000, 0x0000, 0x0000, /* 0x38 */
- 0x0000, 0x0000, 0x0000, 0x0000, /* 0x40 */
- 0x0000, 0x0000, 0x0000, 0x0000, /* 0x48 */
- 0x0000, 0x0000, 0x0000, 0x0000, /* 0x50 */
- 0x0000, 0x0000, 0x0000, 0x0000, /* 0x58 */
- 0x0000, 0x0000, 0x0000, 0x0000, /* 0x60 */
- 0x0000, 0x0000, 0x0000, 0x0000, /* 0x68 */
- 0x0000, 0x0808, 0x0000, 0x0006, /* 0x70 */
- 0x0000, 0x0000, 0x574d, 0x4c05, /* 0x78 */
+static const struct reg_default wm9705_reg_defaults[] = {
+ { 0x02, 0x8000 },
+ { 0x04, 0x8000 },
+ { 0x06, 0x8000 },
+ { 0x0a, 0x8000 },
+ { 0x0c, 0x8008 },
+ { 0x0e, 0x8008 },
+ { 0x10, 0x8808 },
+ { 0x12, 0x8808 },
+ { 0x14, 0x8808 },
+ { 0x16, 0x8808 },
+ { 0x18, 0x8808 },
+ { 0x1a, 0x0000 },
+ { 0x1c, 0x8000 },
+ { 0x20, 0x0000 },
+ { 0x22, 0x0000 },
+ { 0x26, 0x000f },
+ { 0x28, 0x0605 },
+ { 0x2a, 0x0000 },
+ { 0x2c, 0xbb80 },
+ { 0x32, 0xbb80 },
+ { 0x34, 0x2000 },
+ { 0x5a, 0x0000 },
+ { 0x5c, 0x0000 },
+ { 0x72, 0x0808 },
+ { 0x74, 0x0000 },
+ { 0x76, 0x0006 },
+ { 0x78, 0x0000 },
+ { 0x7a, 0x0000 },
+};
+
+static const struct regmap_config wm9705_regmap_config = {
+ .reg_bits = 16,
+ .reg_stride = 2,
+ .val_bits = 16,
+ .max_register = 0x7e,
+ .cache_type = REGCACHE_RBTREE,
+
+ .volatile_reg = regmap_ac97_default_volatile,
+
+ .reg_defaults = wm9705_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(wm9705_reg_defaults),
};
static const struct snd_kcontrol_new wm9705_snd_ac97_controls[] = {
@@ -203,57 +224,20 @@ static const struct snd_soc_dapm_route wm9705_audio_map[] = {
{"Right ADC", NULL, "ADC PGA"},
};
-/* We use a register cache to enhance read performance. */
-static unsigned int ac97_read(struct snd_soc_codec *codec, unsigned int reg)
-{
- struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
- u16 *cache = codec->reg_cache;
-
- switch (reg) {
- case AC97_RESET:
- case AC97_VENDOR_ID1:
- case AC97_VENDOR_ID2:
- return soc_ac97_ops->read(ac97, reg);
- default:
- reg = reg >> 1;
-
- if (reg >= (ARRAY_SIZE(wm9705_reg)))
- return -EIO;
-
- return cache[reg];
- }
-}
-
-static int ac97_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int val)
-{
- struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
- u16 *cache = codec->reg_cache;
-
- soc_ac97_ops->write(ac97, reg, val);
- reg = reg >> 1;
- if (reg < (ARRAY_SIZE(wm9705_reg)))
- cache[reg] = val;
-
- return 0;
-}
-
static int ac97_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
int reg;
- u16 vra;
- vra = ac97_read(codec, AC97_EXTENDED_STATUS);
- ac97_write(codec, AC97_EXTENDED_STATUS, vra | 0x1);
+ snd_soc_update_bits(codec, AC97_EXTENDED_STATUS, 0x1, 0x1);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
reg = AC97_PCM_FRONT_DAC_RATE;
else
reg = AC97_PCM_LR_ADC_RATE;
- return ac97_write(codec, reg, substream->runtime->rate);
+ return snd_soc_write(codec, reg, substream->runtime->rate);
}
#define WM9705_AC97_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | \
@@ -299,9 +283,9 @@ static struct snd_soc_dai_driver wm9705_dai[] = {
#ifdef CONFIG_PM
static int wm9705_soc_suspend(struct snd_soc_codec *codec)
{
- struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
-
- soc_ac97_ops->write(ac97, AC97_POWERDOWN, 0xffff);
+ regcache_cache_bypass(codec->component.regmap, true);
+ snd_soc_write(codec, AC97_POWERDOWN, 0xffff);
+ regcache_cache_bypass(codec->component.regmap, false);
return 0;
}
@@ -309,17 +293,14 @@ static int wm9705_soc_suspend(struct snd_soc_codec *codec)
static int wm9705_soc_resume(struct snd_soc_codec *codec)
{
struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
- int i, ret;
- u16 *cache = codec->reg_cache;
+ int ret;
ret = snd_ac97_reset(ac97, true, WM9705_VENDOR_ID,
WM9705_VENDOR_ID_MASK);
if (ret < 0)
return ret;
- for (i = 2; i < ARRAY_SIZE(wm9705_reg) << 1; i += 2) {
- soc_ac97_ops->write(ac97, i, cache[i>>1]);
- }
+ regcache_sync(codec->component.regmap);
return 0;
}
@@ -331,6 +312,8 @@ static int wm9705_soc_resume(struct snd_soc_codec *codec)
static int wm9705_soc_probe(struct snd_soc_codec *codec)
{
struct snd_ac97 *ac97;
+ struct regmap *regmap;
+ int ret;
ac97 = snd_soc_new_ac97_codec(codec, WM9705_VENDOR_ID,
WM9705_VENDOR_ID_MASK);
@@ -339,15 +322,26 @@ static int wm9705_soc_probe(struct snd_soc_codec *codec)
return PTR_ERR(ac97);
}
+ regmap = regmap_init_ac97(ac97, &wm9705_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err_free_ac97_codec;
+ }
+
snd_soc_codec_set_drvdata(codec, ac97);
+ snd_soc_codec_init_regmap(codec, regmap);
return 0;
+err_free_ac97_codec:
+ snd_soc_free_ac97_codec(ac97);
+ return ret;
}
static int wm9705_soc_remove(struct snd_soc_codec *codec)
{
struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
+ snd_soc_codec_exit_regmap(codec);
snd_soc_free_ac97_codec(ac97);
return 0;
}
@@ -357,12 +351,6 @@ static const struct snd_soc_codec_driver soc_codec_dev_wm9705 = {
.remove = wm9705_soc_remove,
.suspend = wm9705_soc_suspend,
.resume = wm9705_soc_resume,
- .read = ac97_read,
- .write = ac97_write,
- .reg_cache_size = ARRAY_SIZE(wm9705_reg),
- .reg_word_size = sizeof(u16),
- .reg_cache_step = 2,
- .reg_cache_default = wm9705_reg,
.component_driver = {
.controls = wm9705_snd_ac97_controls,
diff --git a/sound/soc/codecs/wm9705.h b/sound/soc/codecs/wm9705.h
deleted file mode 100644
index 23ea9ce47359..000000000000
--- a/sound/soc/codecs/wm9705.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * wm9705.h -- WM9705 Soc Audio driver
- */
-
-#ifndef _WM9705_H
-#define _WM9705_H
-
-#define WM9705_DAI_AC97_HIFI 0
-#define WM9705_DAI_AC97_AUX 1
-
-#endif
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 557709eac698..1a3e1797994a 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -15,13 +15,13 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/regmap.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/ac97_codec.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include <sound/tlv.h>
-#include "wm9712.h"
#define WM9712_VENDOR_ID 0x574d4c12
#define WM9712_VENDOR_ID_MASK 0xffffffff
@@ -32,31 +32,66 @@ struct wm9712_priv {
struct mutex lock;
};
-static unsigned int ac97_read(struct snd_soc_codec *codec,
- unsigned int reg);
-static int ac97_write(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int val);
+static const struct reg_default wm9712_reg_defaults[] = {
+ { 0x02, 0x8000 },
+ { 0x04, 0x8000 },
+ { 0x06, 0x8000 },
+ { 0x08, 0x0f0f },
+ { 0x0a, 0xaaa0 },
+ { 0x0c, 0xc008 },
+ { 0x0e, 0x6808 },
+ { 0x10, 0xe808 },
+ { 0x12, 0xaaa0 },
+ { 0x14, 0xad00 },
+ { 0x16, 0x8000 },
+ { 0x18, 0xe808 },
+ { 0x1a, 0x3000 },
+ { 0x1c, 0x8000 },
+ { 0x20, 0x0000 },
+ { 0x22, 0x0000 },
+ { 0x26, 0x000f },
+ { 0x28, 0x0605 },
+ { 0x2a, 0x0410 },
+ { 0x2c, 0xbb80 },
+ { 0x2e, 0xbb80 },
+ { 0x32, 0xbb80 },
+ { 0x34, 0x2000 },
+ { 0x4c, 0xf83e },
+ { 0x4e, 0xffff },
+ { 0x50, 0x0000 },
+ { 0x52, 0x0000 },
+ { 0x56, 0xf83e },
+ { 0x58, 0x0008 },
+ { 0x5c, 0x0000 },
+ { 0x60, 0xb032 },
+ { 0x62, 0x3e00 },
+ { 0x64, 0x0000 },
+ { 0x76, 0x0006 },
+ { 0x78, 0x0001 },
+ { 0x7a, 0x0000 },
+};
-/*
- * WM9712 register cache
- */
-static const u16 wm9712_reg[] = {
- 0x6174, 0x8000, 0x8000, 0x8000, /* 6 */
- 0x0f0f, 0xaaa0, 0xc008, 0x6808, /* e */
- 0xe808, 0xaaa0, 0xad00, 0x8000, /* 16 */
- 0xe808, 0x3000, 0x8000, 0x0000, /* 1e */
- 0x0000, 0x0000, 0x0000, 0x000f, /* 26 */
- 0x0405, 0x0410, 0xbb80, 0xbb80, /* 2e */
- 0x0000, 0xbb80, 0x0000, 0x0000, /* 36 */
- 0x0000, 0x2000, 0x0000, 0x0000, /* 3e */
- 0x0000, 0x0000, 0x0000, 0x0000, /* 46 */
- 0x0000, 0x0000, 0xf83e, 0xffff, /* 4e */
- 0x0000, 0x0000, 0x0000, 0xf83e, /* 56 */
- 0x0008, 0x0000, 0x0000, 0x0000, /* 5e */
- 0xb032, 0x3e00, 0x0000, 0x0000, /* 66 */
- 0x0000, 0x0000, 0x0000, 0x0000, /* 6e */
- 0x0000, 0x0000, 0x0000, 0x0006, /* 76 */
- 0x0001, 0x0000, 0x574d, 0x4c12, /* 7e */
+static bool wm9712_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AC97_REC_GAIN:
+ return true;
+ default:
+ return regmap_ac97_default_volatile(dev, reg);
+ }
+}
+
+static const struct regmap_config wm9712_regmap_config = {
+ .reg_bits = 16,
+ .reg_stride = 2,
+ .val_bits = 16,
+ .max_register = 0x7e,
+ .cache_type = REGCACHE_RBTREE,
+
+ .volatile_reg = wm9712_volatile_reg,
+
+ .reg_defaults = wm9712_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(wm9712_reg_defaults),
};
#define HPL_MIXER 0x0
@@ -187,7 +222,7 @@ static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol,
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
unsigned int mixer, mask, shift, old;
- struct snd_soc_dapm_update update;
+ struct snd_soc_dapm_update update = { 0 };
bool change;
mixer = mc->shift >> 8;
@@ -485,75 +520,36 @@ static const struct snd_soc_dapm_route wm9712_audio_map[] = {
{"ROUT2", NULL, "Speaker PGA"},
};
-static unsigned int ac97_read(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
- u16 *cache = codec->reg_cache;
-
- if (reg == AC97_RESET || reg == AC97_GPIO_STATUS ||
- reg == AC97_VENDOR_ID1 || reg == AC97_VENDOR_ID2 ||
- reg == AC97_REC_GAIN)
- return soc_ac97_ops->read(wm9712->ac97, reg);
- else {
- reg = reg >> 1;
-
- if (reg >= (ARRAY_SIZE(wm9712_reg)))
- return -EIO;
-
- return cache[reg];
- }
-}
-
-static int ac97_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int val)
-{
- struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
- u16 *cache = codec->reg_cache;
-
- soc_ac97_ops->write(wm9712->ac97, reg, val);
- reg = reg >> 1;
- if (reg < (ARRAY_SIZE(wm9712_reg)))
- cache[reg] = val;
-
- return 0;
-}
-
static int ac97_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
int reg;
- u16 vra;
struct snd_pcm_runtime *runtime = substream->runtime;
- vra = ac97_read(codec, AC97_EXTENDED_STATUS);
- ac97_write(codec, AC97_EXTENDED_STATUS, vra | 0x1);
+ snd_soc_update_bits(codec, AC97_EXTENDED_STATUS, 0x1, 0x1);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
reg = AC97_PCM_FRONT_DAC_RATE;
else
reg = AC97_PCM_LR_ADC_RATE;
- return ac97_write(codec, reg, runtime->rate);
+ return snd_soc_write(codec, reg, runtime->rate);
}
static int ac97_aux_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
- u16 vra, xsle;
struct snd_pcm_runtime *runtime = substream->runtime;
- vra = ac97_read(codec, AC97_EXTENDED_STATUS);
- ac97_write(codec, AC97_EXTENDED_STATUS, vra | 0x1);
- xsle = ac97_read(codec, AC97_PCI_SID);
- ac97_write(codec, AC97_PCI_SID, xsle | 0x8000);
+ snd_soc_update_bits(codec, AC97_EXTENDED_STATUS, 0x1, 0x1);
+ snd_soc_update_bits(codec, AC97_PCI_SID, 0x8000, 0x8000);
if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
return -ENODEV;
- return ac97_write(codec, AC97_PCM_SURR_DAC_RATE, runtime->rate);
+ return snd_soc_write(codec, AC97_PCM_SURR_DAC_RATE, runtime->rate);
}
#define WM9712_AC97_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\
@@ -605,12 +601,12 @@ static int wm9712_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
- ac97_write(codec, AC97_POWERDOWN, 0x0000);
+ snd_soc_write(codec, AC97_POWERDOWN, 0x0000);
break;
case SND_SOC_BIAS_OFF:
/* disable everything including AC link */
- ac97_write(codec, AC97_EXTENDED_MSTATUS, 0xffff);
- ac97_write(codec, AC97_POWERDOWN, 0xffff);
+ snd_soc_write(codec, AC97_EXTENDED_MSTATUS, 0xffff);
+ snd_soc_write(codec, AC97_POWERDOWN, 0xffff);
break;
}
return 0;
@@ -619,8 +615,7 @@ static int wm9712_set_bias_level(struct snd_soc_codec *codec,
static int wm9712_soc_resume(struct snd_soc_codec *codec)
{
struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
- int i, ret;
- u16 *cache = codec->reg_cache;
+ int ret;
ret = snd_ac97_reset(wm9712->ac97, true, WM9712_VENDOR_ID,
WM9712_VENDOR_ID_MASK);
@@ -629,15 +624,8 @@ static int wm9712_soc_resume(struct snd_soc_codec *codec)
snd_soc_codec_force_bias_level(codec, SND_SOC_BIAS_STANDBY);
- if (ret == 0) {
- /* Sync reg_cache with the hardware after cold reset */
- for (i = 2; i < ARRAY_SIZE(wm9712_reg) << 1; i += 2) {
- if (i == AC97_INT_PAGING || i == AC97_POWERDOWN ||
- (i > 0x58 && i != 0x5c))
- continue;
- soc_ac97_ops->write(wm9712->ac97, i, cache[i>>1]);
- }
- }
+ if (ret == 0)
+ regcache_sync(codec->component.regmap);
return ret;
}
@@ -645,6 +633,7 @@ static int wm9712_soc_resume(struct snd_soc_codec *codec)
static int wm9712_soc_probe(struct snd_soc_codec *codec)
{
struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
+ struct regmap *regmap;
int ret;
wm9712->ac97 = snd_soc_new_ac97_codec(codec, WM9712_VENDOR_ID,
@@ -655,16 +644,28 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec)
return ret;
}
+ regmap = regmap_init_ac97(wm9712->ac97, &wm9712_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err_free_ac97_codec;
+ }
+
+ snd_soc_codec_init_regmap(codec, regmap);
+
/* set alc mux to none */
- ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000);
+ snd_soc_update_bits(codec, AC97_VIDEO, 0x3000, 0x3000);
return 0;
+err_free_ac97_codec:
+ snd_soc_free_ac97_codec(wm9712->ac97);
+ return ret;
}
static int wm9712_soc_remove(struct snd_soc_codec *codec)
{
struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
+ snd_soc_codec_exit_regmap(codec);
snd_soc_free_ac97_codec(wm9712->ac97);
return 0;
}
@@ -673,14 +674,8 @@ static const struct snd_soc_codec_driver soc_codec_dev_wm9712 = {
.probe = wm9712_soc_probe,
.remove = wm9712_soc_remove,
.resume = wm9712_soc_resume,
- .read = ac97_read,
- .write = ac97_write,
.set_bias_level = wm9712_set_bias_level,
.suspend_bias_off = true,
- .reg_cache_size = ARRAY_SIZE(wm9712_reg),
- .reg_word_size = sizeof(u16),
- .reg_cache_step = 2,
- .reg_cache_default = wm9712_reg,
.component_driver = {
.controls = wm9712_snd_ac97_controls,
diff --git a/sound/soc/codecs/wm9712.h b/sound/soc/codecs/wm9712.h
deleted file mode 100644
index fb69c3aa4ed0..000000000000
--- a/sound/soc/codecs/wm9712.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * wm9712.h -- WM9712 Soc Audio driver
- */
-
-#ifndef _WM9712_H
-#define _WM9712_H
-
-#define WM9712_DAI_AC97_HIFI 0
-#define WM9712_DAI_AC97_AUX 1
-
-#endif
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index e4301ddb1b84..7e4822185feb 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -231,7 +231,7 @@ static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol,
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
unsigned int mixer, mask, shift, old;
- struct snd_soc_dapm_update update;
+ struct snd_soc_dapm_update update = { 0 };
bool change;
mixer = mc->shift >> 8;
diff --git a/sound/soc/codecs/wm9713.h b/sound/soc/codecs/wm9713.h
index 53df11b1f727..7ecffc563016 100644
--- a/sound/soc/codecs/wm9713.h
+++ b/sound/soc/codecs/wm9713.h
@@ -41,8 +41,4 @@
#define WM9713_PCMBCLK_DIV_8 (3 << 9)
#define WM9713_PCMBCLK_DIV_16 (4 << 9)
-#define WM9713_DAI_AC97_HIFI 0
-#define WM9713_DAI_AC97_AUX 1
-#define WM9713_DAI_PCM_VOICE 2
-
#endif
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index b943dde8dbe5..593b7d1aed46 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -162,6 +162,16 @@
#define ADSP_MAX_STD_CTRL_SIZE 512
+#define WM_ADSP_ACKED_CTL_TIMEOUT_MS 100
+#define WM_ADSP_ACKED_CTL_N_QUICKPOLLS 10
+#define WM_ADSP_ACKED_CTL_MIN_VALUE 0
+#define WM_ADSP_ACKED_CTL_MAX_VALUE 0xFFFFFF
+
+/*
+ * Event control messages
+ */
+#define WM_ADSP_FW_EVENT_SHUTDOWN 0x000001
+
struct wm_adsp_buf {
struct list_head list;
void *buf;
@@ -177,7 +187,7 @@ static struct wm_adsp_buf *wm_adsp_buf_alloc(const void *src, size_t len,
buf->buf = vmalloc(len);
if (!buf->buf) {
- vfree(buf);
+ kfree(buf);
return NULL;
}
memcpy(buf->buf, src, len);
@@ -441,11 +451,29 @@ struct wm_coeff_ctl {
unsigned int offset;
size_t len;
unsigned int set:1;
- struct snd_kcontrol *kcontrol;
struct soc_bytes_ext bytes_ext;
unsigned int flags;
+ unsigned int type;
};
+static const char *wm_adsp_mem_region_name(unsigned int type)
+{
+ switch (type) {
+ case WMFW_ADSP1_PM:
+ return "PM";
+ case WMFW_ADSP1_DM:
+ return "DM";
+ case WMFW_ADSP2_XM:
+ return "XM";
+ case WMFW_ADSP2_YM:
+ return "YM";
+ case WMFW_ADSP1_ZM:
+ return "ZM";
+ default:
+ return NULL;
+ }
+}
+
#ifdef CONFIG_DEBUG_FS
static void wm_adsp_debugfs_save_wmfwname(struct wm_adsp *dsp, const char *s)
{
@@ -727,6 +755,24 @@ static inline struct wm_coeff_ctl *bytes_ext_to_ctl(struct soc_bytes_ext *ext)
return container_of(ext, struct wm_coeff_ctl, bytes_ext);
}
+static int wm_coeff_base_reg(struct wm_coeff_ctl *ctl, unsigned int *reg)
+{
+ const struct wm_adsp_alg_region *alg_region = &ctl->alg_region;
+ struct wm_adsp *dsp = ctl->dsp;
+ const struct wm_adsp_region *mem;
+
+ mem = wm_adsp_find_region(dsp, alg_region->type);
+ if (!mem) {
+ adsp_err(dsp, "No base for region %x\n",
+ alg_region->type);
+ return -EINVAL;
+ }
+
+ *reg = wm_adsp_region_to_reg(mem, ctl->alg_region.base + ctl->offset);
+
+ return 0;
+}
+
static int wm_coeff_info(struct snd_kcontrol *kctl,
struct snd_ctl_elem_info *uinfo)
{
@@ -734,30 +780,94 @@ static int wm_coeff_info(struct snd_kcontrol *kctl,
(struct soc_bytes_ext *)kctl->private_value;
struct wm_coeff_ctl *ctl = bytes_ext_to_ctl(bytes_ext);
- uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
- uinfo->count = ctl->len;
+ switch (ctl->type) {
+ case WMFW_CTL_TYPE_ACKED:
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->value.integer.min = WM_ADSP_ACKED_CTL_MIN_VALUE;
+ uinfo->value.integer.max = WM_ADSP_ACKED_CTL_MAX_VALUE;
+ uinfo->value.integer.step = 1;
+ uinfo->count = 1;
+ break;
+ default:
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ uinfo->count = ctl->len;
+ break;
+ }
+
return 0;
}
+static int wm_coeff_write_acked_control(struct wm_coeff_ctl *ctl,
+ unsigned int event_id)
+{
+ struct wm_adsp *dsp = ctl->dsp;
+ u32 val = cpu_to_be32(event_id);
+ unsigned int reg;
+ int i, ret;
+
+ ret = wm_coeff_base_reg(ctl, &reg);
+ if (ret)
+ return ret;
+
+ adsp_dbg(dsp, "Sending 0x%x to acked control alg 0x%x %s:0x%x\n",
+ event_id, ctl->alg_region.alg,
+ wm_adsp_mem_region_name(ctl->alg_region.type), ctl->offset);
+
+ ret = regmap_raw_write(dsp->regmap, reg, &val, sizeof(val));
+ if (ret) {
+ adsp_err(dsp, "Failed to write %x: %d\n", reg, ret);
+ return ret;
+ }
+
+ /*
+ * Poll for ack, we initially poll at ~1ms intervals for firmwares
+ * that respond quickly, then go to ~10ms polls. A firmware is unlikely
+ * to ack instantly so we do the first 1ms delay before reading the
+ * control to avoid a pointless bus transaction
+ */
+ for (i = 0; i < WM_ADSP_ACKED_CTL_TIMEOUT_MS;) {
+ switch (i) {
+ case 0 ... WM_ADSP_ACKED_CTL_N_QUICKPOLLS - 1:
+ usleep_range(1000, 2000);
+ i++;
+ break;
+ default:
+ usleep_range(10000, 20000);
+ i += 10;
+ break;
+ }
+
+ ret = regmap_raw_read(dsp->regmap, reg, &val, sizeof(val));
+ if (ret) {
+ adsp_err(dsp, "Failed to read %x: %d\n", reg, ret);
+ return ret;
+ }
+
+ if (val == 0) {
+ adsp_dbg(dsp, "Acked control ACKED at poll %u\n", i);
+ return 0;
+ }
+ }
+
+ adsp_warn(dsp, "Acked control @0x%x alg:0x%x %s:0x%x timed out\n",
+ reg, ctl->alg_region.alg,
+ wm_adsp_mem_region_name(ctl->alg_region.type),
+ ctl->offset);
+
+ return -ETIMEDOUT;
+}
+
static int wm_coeff_write_control(struct wm_coeff_ctl *ctl,
const void *buf, size_t len)
{
- struct wm_adsp_alg_region *alg_region = &ctl->alg_region;
- const struct wm_adsp_region *mem;
struct wm_adsp *dsp = ctl->dsp;
void *scratch;
int ret;
unsigned int reg;
- mem = wm_adsp_find_region(dsp, alg_region->type);
- if (!mem) {
- adsp_err(dsp, "No base for region %x\n",
- alg_region->type);
- return -EINVAL;
- }
-
- reg = ctl->alg_region.base + ctl->offset;
- reg = wm_adsp_region_to_reg(mem, reg);
+ ret = wm_coeff_base_reg(ctl, &reg);
+ if (ret)
+ return ret;
scratch = kmemdup(buf, len, GFP_KERNEL | GFP_DMA);
if (!scratch)
@@ -823,25 +933,41 @@ static int wm_coeff_tlv_put(struct snd_kcontrol *kctl,
return ret;
}
+static int wm_coeff_put_acked(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_bytes_ext *bytes_ext =
+ (struct soc_bytes_ext *)kctl->private_value;
+ struct wm_coeff_ctl *ctl = bytes_ext_to_ctl(bytes_ext);
+ unsigned int val = ucontrol->value.integer.value[0];
+ int ret;
+
+ if (val == 0)
+ return 0; /* 0 means no event */
+
+ mutex_lock(&ctl->dsp->pwr_lock);
+
+ if (ctl->enabled)
+ ret = wm_coeff_write_acked_control(ctl, val);
+ else
+ ret = -EPERM;
+
+ mutex_unlock(&ctl->dsp->pwr_lock);
+
+ return ret;
+}
+
static int wm_coeff_read_control(struct wm_coeff_ctl *ctl,
void *buf, size_t len)
{
- struct wm_adsp_alg_region *alg_region = &ctl->alg_region;
- const struct wm_adsp_region *mem;
struct wm_adsp *dsp = ctl->dsp;
void *scratch;
int ret;
unsigned int reg;
- mem = wm_adsp_find_region(dsp, alg_region->type);
- if (!mem) {
- adsp_err(dsp, "No base for region %x\n",
- alg_region->type);
- return -EINVAL;
- }
-
- reg = ctl->alg_region.base + ctl->offset;
- reg = wm_adsp_region_to_reg(mem, reg);
+ ret = wm_coeff_base_reg(ctl, &reg);
+ if (ret)
+ return ret;
scratch = kmalloc(len, GFP_KERNEL | GFP_DMA);
if (!scratch)
@@ -918,6 +1044,21 @@ static int wm_coeff_tlv_get(struct snd_kcontrol *kctl,
return ret;
}
+static int wm_coeff_get_acked(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ /*
+ * Although it's not useful to read an acked control, we must satisfy
+ * user-side assumptions that all controls are readable and that a
+ * write of the same value should be filtered out (it's valid to send
+ * the same event number again to the firmware). We therefore return 0,
+ * meaning "no event" so valid event numbers will always be a change
+ */
+ ucontrol->value.integer.value[0] = 0;
+
+ return 0;
+}
+
struct wmfw_ctl_work {
struct wm_adsp *dsp;
struct wm_coeff_ctl *ctl;
@@ -967,30 +1108,35 @@ static int wmfw_add_ctl(struct wm_adsp *dsp, struct wm_coeff_ctl *ctl)
kcontrol = kzalloc(sizeof(*kcontrol), GFP_KERNEL);
if (!kcontrol)
return -ENOMEM;
- kcontrol->iface = SNDRV_CTL_ELEM_IFACE_MIXER;
kcontrol->name = ctl->name;
kcontrol->info = wm_coeff_info;
- kcontrol->get = wm_coeff_get;
- kcontrol->put = wm_coeff_put;
kcontrol->iface = SNDRV_CTL_ELEM_IFACE_MIXER;
kcontrol->tlv.c = snd_soc_bytes_tlv_callback;
kcontrol->private_value = (unsigned long)&ctl->bytes_ext;
+ kcontrol->access = wmfw_convert_flags(ctl->flags, ctl->len);
- ctl->bytes_ext.max = ctl->len;
- ctl->bytes_ext.get = wm_coeff_tlv_get;
- ctl->bytes_ext.put = wm_coeff_tlv_put;
+ switch (ctl->type) {
+ case WMFW_CTL_TYPE_ACKED:
+ kcontrol->get = wm_coeff_get_acked;
+ kcontrol->put = wm_coeff_put_acked;
+ break;
+ default:
+ kcontrol->get = wm_coeff_get;
+ kcontrol->put = wm_coeff_put;
- kcontrol->access = wmfw_convert_flags(ctl->flags, ctl->len);
+ ctl->bytes_ext.max = ctl->len;
+ ctl->bytes_ext.get = wm_coeff_tlv_get;
+ ctl->bytes_ext.put = wm_coeff_tlv_put;
+ break;
+ }
- ret = snd_soc_add_card_controls(dsp->card, kcontrol, 1);
+ ret = snd_soc_add_codec_controls(dsp->codec, kcontrol, 1);
if (ret < 0)
goto err_kcontrol;
kfree(kcontrol);
- ctl->kcontrol = snd_soc_card_get_kcontrol(dsp->card, ctl->name);
-
return 0;
err_kcontrol:
@@ -1035,6 +1181,27 @@ static int wm_coeff_sync_controls(struct wm_adsp *dsp)
return 0;
}
+static void wm_adsp_signal_event_controls(struct wm_adsp *dsp,
+ unsigned int event)
+{
+ struct wm_coeff_ctl *ctl;
+ int ret;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ if (ctl->type != WMFW_CTL_TYPE_HOSTEVENT)
+ continue;
+
+ if (!ctl->enabled)
+ continue;
+
+ ret = wm_coeff_write_acked_control(ctl, event);
+ if (ret)
+ adsp_warn(dsp,
+ "Failed to send 0x%x event to alg 0x%x (%d)\n",
+ event, ctl->alg_region.alg, ret);
+ }
+}
+
static void wm_adsp_ctl_work(struct work_struct *work)
{
struct wmfw_ctl_work *ctl_work = container_of(work,
@@ -1056,34 +1223,16 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
const struct wm_adsp_alg_region *alg_region,
unsigned int offset, unsigned int len,
const char *subname, unsigned int subname_len,
- unsigned int flags)
+ unsigned int flags, unsigned int type)
{
struct wm_coeff_ctl *ctl;
struct wmfw_ctl_work *ctl_work;
char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
- char *region_name;
+ const char *region_name;
int ret;
- if (flags & WMFW_CTL_FLAG_SYS)
- return 0;
-
- switch (alg_region->type) {
- case WMFW_ADSP1_PM:
- region_name = "PM";
- break;
- case WMFW_ADSP1_DM:
- region_name = "DM";
- break;
- case WMFW_ADSP2_XM:
- region_name = "XM";
- break;
- case WMFW_ADSP2_YM:
- region_name = "YM";
- break;
- case WMFW_ADSP1_ZM:
- region_name = "ZM";
- break;
- default:
+ region_name = wm_adsp_mem_region_name(alg_region->type);
+ if (!region_name) {
adsp_err(dsp, "Unknown region type: %d\n", alg_region->type);
return -EINVAL;
}
@@ -1139,6 +1288,7 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
ctl->dsp = dsp;
ctl->flags = flags;
+ ctl->type = type;
ctl->offset = offset;
ctl->len = len;
ctl->cache = kzalloc(ctl->len, GFP_KERNEL);
@@ -1149,6 +1299,9 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
list_add(&ctl->list, &dsp->ctl_list);
+ if (flags & WMFW_CTL_FLAG_SYS)
+ return 0;
+
ctl_work = kzalloc(sizeof(*ctl_work), GFP_KERNEL);
if (!ctl_work) {
ret = -ENOMEM;
@@ -1308,6 +1461,21 @@ static inline void wm_coeff_parse_coeff(struct wm_adsp *dsp, const u8 **data,
adsp_dbg(dsp, "\tALSA control len: %#x\n", blk->len);
}
+static int wm_adsp_check_coeff_flags(struct wm_adsp *dsp,
+ const struct wm_coeff_parsed_coeff *coeff_blk,
+ unsigned int f_required,
+ unsigned int f_illegal)
+{
+ if ((coeff_blk->flags & f_illegal) ||
+ ((coeff_blk->flags & f_required) != f_required)) {
+ adsp_err(dsp, "Illegal flags 0x%x for control type 0x%x\n",
+ coeff_blk->flags, coeff_blk->ctl_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int wm_adsp_parse_coeff(struct wm_adsp *dsp,
const struct wmfw_region *region)
{
@@ -1324,6 +1492,28 @@ static int wm_adsp_parse_coeff(struct wm_adsp *dsp,
switch (coeff_blk.ctl_type) {
case SNDRV_CTL_ELEM_TYPE_BYTES:
break;
+ case WMFW_CTL_TYPE_ACKED:
+ if (coeff_blk.flags & WMFW_CTL_FLAG_SYS)
+ continue; /* ignore */
+
+ ret = wm_adsp_check_coeff_flags(dsp, &coeff_blk,
+ WMFW_CTL_FLAG_VOLATILE |
+ WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_READABLE,
+ 0);
+ if (ret)
+ return -EINVAL;
+ break;
+ case WMFW_CTL_TYPE_HOSTEVENT:
+ ret = wm_adsp_check_coeff_flags(dsp, &coeff_blk,
+ WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_VOLATILE |
+ WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_READABLE,
+ 0);
+ if (ret)
+ return -EINVAL;
+ break;
default:
adsp_err(dsp, "Unknown control type: %d\n",
coeff_blk.ctl_type);
@@ -1338,7 +1528,8 @@ static int wm_adsp_parse_coeff(struct wm_adsp *dsp,
coeff_blk.len,
coeff_blk.name,
coeff_blk.name_len,
- coeff_blk.flags);
+ coeff_blk.flags,
+ coeff_blk.ctl_type);
if (ret < 0)
adsp_err(dsp, "Failed to create control: %.*s, %d\n",
coeff_blk.name_len, coeff_blk.name, ret);
@@ -1491,23 +1682,11 @@ static int wm_adsp_load(struct wm_adsp *dsp)
reg = offset;
break;
case WMFW_ADSP1_PM:
- region_name = "PM";
- reg = wm_adsp_region_to_reg(mem, offset);
- break;
case WMFW_ADSP1_DM:
- region_name = "DM";
- reg = wm_adsp_region_to_reg(mem, offset);
- break;
case WMFW_ADSP2_XM:
- region_name = "XM";
- reg = wm_adsp_region_to_reg(mem, offset);
- break;
case WMFW_ADSP2_YM:
- region_name = "YM";
- reg = wm_adsp_region_to_reg(mem, offset);
- break;
case WMFW_ADSP1_ZM:
- region_name = "ZM";
+ region_name = wm_adsp_mem_region_name(type);
reg = wm_adsp_region_to_reg(mem, offset);
break;
default:
@@ -1750,7 +1929,8 @@ static int wm_adsp1_setup_algs(struct wm_adsp *dsp)
len -= be32_to_cpu(adsp1_alg[i].dm);
len *= 4;
wm_adsp_create_control(dsp, alg_region, 0,
- len, NULL, 0, 0);
+ len, NULL, 0, 0,
+ SNDRV_CTL_ELEM_TYPE_BYTES);
} else {
adsp_warn(dsp, "Missing length info for region DM with ID %x\n",
be32_to_cpu(adsp1_alg[i].alg.id));
@@ -1770,7 +1950,8 @@ static int wm_adsp1_setup_algs(struct wm_adsp *dsp)
len -= be32_to_cpu(adsp1_alg[i].zm);
len *= 4;
wm_adsp_create_control(dsp, alg_region, 0,
- len, NULL, 0, 0);
+ len, NULL, 0, 0,
+ SNDRV_CTL_ELEM_TYPE_BYTES);
} else {
adsp_warn(dsp, "Missing length info for region ZM with ID %x\n",
be32_to_cpu(adsp1_alg[i].alg.id));
@@ -1861,7 +2042,8 @@ static int wm_adsp2_setup_algs(struct wm_adsp *dsp)
len -= be32_to_cpu(adsp2_alg[i].xm);
len *= 4;
wm_adsp_create_control(dsp, alg_region, 0,
- len, NULL, 0, 0);
+ len, NULL, 0, 0,
+ SNDRV_CTL_ELEM_TYPE_BYTES);
} else {
adsp_warn(dsp, "Missing length info for region XM with ID %x\n",
be32_to_cpu(adsp2_alg[i].alg.id));
@@ -1881,7 +2063,8 @@ static int wm_adsp2_setup_algs(struct wm_adsp *dsp)
len -= be32_to_cpu(adsp2_alg[i].ym);
len *= 4;
wm_adsp_create_control(dsp, alg_region, 0,
- len, NULL, 0, 0);
+ len, NULL, 0, 0,
+ SNDRV_CTL_ELEM_TYPE_BYTES);
} else {
adsp_warn(dsp, "Missing length info for region YM with ID %x\n",
be32_to_cpu(adsp2_alg[i].alg.id));
@@ -1901,7 +2084,8 @@ static int wm_adsp2_setup_algs(struct wm_adsp *dsp)
len -= be32_to_cpu(adsp2_alg[i].zm);
len *= 4;
wm_adsp_create_control(dsp, alg_region, 0,
- len, NULL, 0, 0);
+ len, NULL, 0, 0,
+ SNDRV_CTL_ELEM_TYPE_BYTES);
} else {
adsp_warn(dsp, "Missing length info for region ZM with ID %x\n",
be32_to_cpu(adsp2_alg[i].alg.id));
@@ -2114,7 +2298,7 @@ int wm_adsp1_event(struct snd_soc_dapm_widget *w,
int ret;
unsigned int val;
- dsp->card = codec->component.card;
+ dsp->codec = codec;
mutex_lock(&dsp->pwr_lock);
@@ -2325,8 +2509,6 @@ int wm_adsp2_early_event(struct snd_soc_dapm_widget *w,
struct wm_adsp *dsp = &dsps[w->shift];
struct wm_coeff_ctl *ctl;
- dsp->card = codec->component.card;
-
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
wm_adsp2_set_dspclk(dsp, freq);
@@ -2393,14 +2575,22 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
mutex_lock(&dsp->pwr_lock);
- if (wm_adsp_fw[dsp->fw].num_caps != 0)
+ if (wm_adsp_fw[dsp->fw].num_caps != 0) {
ret = wm_adsp_buffer_init(dsp);
+ if (ret < 0) {
+ mutex_unlock(&dsp->pwr_lock);
+ goto err;
+ }
+ }
mutex_unlock(&dsp->pwr_lock);
break;
case SND_SOC_DAPM_PRE_PMD:
+ /* Tell the firmware to cleanup */
+ wm_adsp_signal_event_controls(dsp, WM_ADSP_FW_EVENT_SHUTDOWN);
+
/* Log firmware state, it can be useful for analysis */
wm_adsp2_show_fw_status(dsp);
@@ -2441,6 +2631,8 @@ EXPORT_SYMBOL_GPL(wm_adsp2_event);
int wm_adsp2_codec_probe(struct wm_adsp *dsp, struct snd_soc_codec *codec)
{
+ dsp->codec = codec;
+
wm_adsp2_init_debugfs(dsp, codec);
return snd_soc_add_codec_controls(codec,
diff --git a/sound/soc/codecs/wm_adsp.h b/sound/soc/codecs/wm_adsp.h
index 362dd7ce60d8..411d062c13f2 100644
--- a/sound/soc/codecs/wm_adsp.h
+++ b/sound/soc/codecs/wm_adsp.h
@@ -44,7 +44,7 @@ struct wm_adsp {
int type;
struct device *dev;
struct regmap *regmap;
- struct snd_soc_card *card;
+ struct snd_soc_codec *codec;
int base;
int sysclk_reg;
@@ -110,18 +110,17 @@ int wm_adsp2_early_event(struct snd_soc_dapm_widget *w,
int wm_adsp2_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
-extern int wm_adsp_compr_open(struct wm_adsp *dsp,
- struct snd_compr_stream *stream);
-extern int wm_adsp_compr_free(struct snd_compr_stream *stream);
-extern int wm_adsp_compr_set_params(struct snd_compr_stream *stream,
- struct snd_compr_params *params);
-extern int wm_adsp_compr_get_caps(struct snd_compr_stream *stream,
- struct snd_compr_caps *caps);
-extern int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd);
-extern int wm_adsp_compr_handle_irq(struct wm_adsp *dsp);
-extern int wm_adsp_compr_pointer(struct snd_compr_stream *stream,
- struct snd_compr_tstamp *tstamp);
-extern int wm_adsp_compr_copy(struct snd_compr_stream *stream,
- char __user *buf, size_t count);
+int wm_adsp_compr_open(struct wm_adsp *dsp, struct snd_compr_stream *stream);
+int wm_adsp_compr_free(struct snd_compr_stream *stream);
+int wm_adsp_compr_set_params(struct snd_compr_stream *stream,
+ struct snd_compr_params *params);
+int wm_adsp_compr_get_caps(struct snd_compr_stream *stream,
+ struct snd_compr_caps *caps);
+int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd);
+int wm_adsp_compr_handle_irq(struct wm_adsp *dsp);
+int wm_adsp_compr_pointer(struct snd_compr_stream *stream,
+ struct snd_compr_tstamp *tstamp);
+int wm_adsp_compr_copy(struct snd_compr_stream *stream,
+ char __user *buf, size_t count);
#endif
diff --git a/sound/soc/codecs/wmfw.h b/sound/soc/codecs/wmfw.h
index 7613d60d62ea..ec78b9da020f 100644
--- a/sound/soc/codecs/wmfw.h
+++ b/sound/soc/codecs/wmfw.h
@@ -26,6 +26,10 @@
#define WMFW_CTL_FLAG_WRITEABLE 0x0002
#define WMFW_CTL_FLAG_READABLE 0x0001
+/* Non-ALSA coefficient types start at 0x1000 */
+#define WMFW_CTL_TYPE_ACKED 0x1000 /* acked control */
+#define WMFW_CTL_TYPE_HOSTEVENT 0x1001 /* event control */
+
struct wmfw_header {
char magic[4];
__le32 len;
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 19bdcac71775..37f9b6201918 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -40,6 +40,7 @@ config SND_SOC_FSL_SPDIF
select REGMAP_MMIO
select SND_SOC_IMX_PCM_DMA if SND_IMX_SOC != n
select SND_SOC_IMX_PCM_FIQ if SND_IMX_SOC != n && (MXC_TZIC || MXC_AVIC)
+ select BITREVERSE
help
Say Y if you want to add Sony/Philips Digital Interface (SPDIF)
support for the Freescale CPUs.
diff --git a/sound/soc/fsl/efika-audio-fabric.c b/sound/soc/fsl/efika-audio-fabric.c
index b2acd3293ea8..f200d1cfc4bd 100644
--- a/sound/soc/fsl/efika-audio-fabric.c
+++ b/sound/soc/fsl/efika-audio-fabric.c
@@ -27,7 +27,6 @@
#include "mpc5200_dma.h"
#include "mpc5200_psc_ac97.h"
-#include "../codecs/stac9766.h"
#define DRV_NAME "efika-audio-fabric"
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index dffd549a0e2a..9998aea23597 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -183,7 +183,7 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_soc_ops fsl_asoc_card_ops = {
+static const struct snd_soc_ops fsl_asoc_card_ops = {
.hw_params = fsl_asoc_card_hw_params,
};
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 201a70d1027a..1b60958e2080 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -61,7 +61,7 @@ static int imx_hifi_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_soc_ops imx_hifi_ops = {
+static const struct snd_soc_ops imx_hifi_ops = {
.hw_params = imx_hifi_hw_params,
};
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index 1cb39309f5d5..cf026252cd4a 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -1,5 +1,5 @@
/*
- * simple-card-core.c
+ * simple-card-utils.c
*
* Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*
@@ -195,9 +195,6 @@ EXPORT_SYMBOL_GPL(asoc_simple_card_init_dai);
int asoc_simple_card_canonicalize_dailink(struct snd_soc_dai_link *dai_link)
{
- if (!dai_link->cpu_dai_name || !dai_link->codec_dai_name)
- return -EINVAL;
-
/* Assumes platform == cpu */
if (!dai_link->platform_of_node)
dai_link->platform_of_node = dai_link->cpu_of_node;
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index f608f8d23f3d..a385ff6bfa4b 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -174,7 +174,7 @@ err:
return ret;
}
-static struct snd_soc_ops asoc_simple_card_ops = {
+static const struct snd_soc_ops asoc_simple_card_ops = {
.startup = asoc_simple_card_startup,
.shutdown = asoc_simple_card_shutdown,
.hw_params = asoc_simple_card_hw_params,
diff --git a/sound/soc/generic/simple-scu-card.c b/sound/soc/generic/simple-scu-card.c
index b9973a56bcb0..bb86ee042490 100644
--- a/sound/soc/generic/simple-scu-card.c
+++ b/sound/soc/generic/simple-scu-card.c
@@ -22,7 +22,7 @@
#include <sound/soc-dai.h>
#include <sound/simple_card_utils.h>
-struct asoc_simple_card_priv {
+struct simple_card_data {
struct snd_soc_card snd_card;
struct snd_soc_codec_conf codec_conf;
struct asoc_simple_dai *dai_props;
@@ -42,7 +42,7 @@ struct asoc_simple_card_priv {
static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct asoc_simple_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
struct asoc_simple_dai *dai_props =
simple_priv_to_props(priv, rtd->num);
@@ -52,21 +52,21 @@ static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
static void asoc_simple_card_shutdown(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct asoc_simple_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
struct asoc_simple_dai *dai_props =
simple_priv_to_props(priv, rtd->num);
clk_disable_unprepare(dai_props->clk);
}
-static struct snd_soc_ops asoc_simple_card_ops = {
+static const struct snd_soc_ops asoc_simple_card_ops = {
.startup = asoc_simple_card_startup,
.shutdown = asoc_simple_card_shutdown,
};
static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
{
- struct asoc_simple_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
struct snd_soc_dai *dai;
struct snd_soc_dai_link *dai_link;
struct asoc_simple_dai *dai_props;
@@ -84,7 +84,7 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
static int asoc_simple_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
- struct asoc_simple_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels = hw_param_interval(params,
@@ -101,8 +101,8 @@ static int asoc_simple_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
return 0;
}
-static int asoc_simple_card_parse_links(struct device_node *np,
- struct asoc_simple_card_priv *priv,
+static int asoc_simple_card_dai_link_of(struct device_node *np,
+ struct simple_card_data *priv,
unsigned int daifmt,
int idx, bool is_fe)
{
@@ -195,22 +195,35 @@ static int asoc_simple_card_parse_links(struct device_node *np,
return 0;
}
-static int asoc_simple_card_dai_link_of(struct device_node *node,
- struct asoc_simple_card_priv *priv)
+static int asoc_simple_card_parse_of(struct device_node *node,
+ struct simple_card_data *priv)
+
{
struct device *dev = simple_priv_to_dev(priv);
struct device_node *np;
unsigned int daifmt = 0;
- int ret, i;
bool is_fe;
+ int ret, i;
+
+ if (!node)
+ return -EINVAL;
+
+ ret = snd_soc_of_parse_audio_routing(&priv->snd_card, PREFIX "routing");
+ if (ret < 0)
+ return ret;
+
+ /* sampling rate convert */
+ of_property_read_u32(node, PREFIX "convert-rate", &priv->convert_rate);
+
+ /* channels transfer */
+ of_property_read_u32(node, PREFIX "convert-channels", &priv->convert_channels);
/* find 1st codec */
np = of_get_child_by_name(node, PREFIX "codec");
if (!np)
return -ENODEV;
- ret = asoc_simple_card_parse_daifmt(dev, node, np,
- PREFIX, &daifmt);
+ ret = asoc_simple_card_parse_daifmt(dev, node, np, PREFIX, &daifmt);
if (ret < 0)
return ret;
@@ -220,58 +233,12 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
if (strcmp(np->name, PREFIX "cpu") == 0)
is_fe = true;
- ret = asoc_simple_card_parse_links(np, priv, daifmt, i, is_fe);
+ ret = asoc_simple_card_dai_link_of(np, priv, daifmt, i, is_fe);
if (ret < 0)
return ret;
i++;
}
- return 0;
-}
-
-static int asoc_simple_card_parse_of(struct device_node *node,
- struct asoc_simple_card_priv *priv,
- struct device *dev)
-{
- struct asoc_simple_dai *props;
- struct snd_soc_dai_link *links;
- int ret;
- int num;
-
- if (!node)
- return -EINVAL;
-
- num = of_get_child_count(node);
- props = devm_kzalloc(dev, sizeof(*props) * num, GFP_KERNEL);
- links = devm_kzalloc(dev, sizeof(*links) * num, GFP_KERNEL);
- if (!props || !links)
- return -ENOMEM;
-
- priv->dai_props = props;
- priv->dai_link = links;
-
- /* Init snd_soc_card */
- priv->snd_card.owner = THIS_MODULE;
- priv->snd_card.dev = dev;
- priv->snd_card.dai_link = priv->dai_link;
- priv->snd_card.num_links = num;
- priv->snd_card.codec_conf = &priv->codec_conf;
- priv->snd_card.num_configs = 1;
-
- ret = snd_soc_of_parse_audio_routing(&priv->snd_card, PREFIX "routing");
- if (ret < 0)
- return ret;
-
- /* sampling rate convert */
- of_property_read_u32(node, PREFIX "convert-rate", &priv->convert_rate);
-
- /* channels transfer */
- of_property_read_u32(node, PREFIX "convert-channels", &priv->convert_channels);
-
- ret = asoc_simple_card_dai_link_of(node, priv);
- if (ret < 0)
- return ret;
-
ret = asoc_simple_card_parse_card_name(&priv->snd_card, PREFIX);
if (ret < 0)
return ret;
@@ -286,17 +253,37 @@ static int asoc_simple_card_parse_of(struct device_node *node,
static int asoc_simple_card_probe(struct platform_device *pdev)
{
- struct asoc_simple_card_priv *priv;
- struct device_node *np = pdev->dev.of_node;
+ struct simple_card_data *priv;
+ struct snd_soc_dai_link *dai_link;
+ struct asoc_simple_dai *dai_props;
struct device *dev = &pdev->dev;
- int ret;
+ struct device_node *np = pdev->dev.of_node;
+ int num, ret;
/* Allocate the private data */
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- ret = asoc_simple_card_parse_of(np, priv, dev);
+ num = of_get_child_count(np);
+
+ dai_props = devm_kzalloc(dev, sizeof(*dai_props) * num, GFP_KERNEL);
+ dai_link = devm_kzalloc(dev, sizeof(*dai_link) * num, GFP_KERNEL);
+ if (!dai_props || !dai_link)
+ return -ENOMEM;
+
+ priv->dai_props = dai_props;
+ priv->dai_link = dai_link;
+
+ /* Init snd_soc_card */
+ priv->snd_card.owner = THIS_MODULE;
+ priv->snd_card.dev = dev;
+ priv->snd_card.dai_link = priv->dai_link;
+ priv->snd_card.num_links = num;
+ priv->snd_card.codec_conf = &priv->codec_conf;
+ priv->snd_card.num_configs = 1;
+
+ ret = asoc_simple_card_parse_of(np, priv);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "parse error %d\n", ret);
diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c
index 0838478c4c3f..c7b3cbf92faf 100644
--- a/sound/soc/intel/atom/sst-atom-controls.c
+++ b/sound/soc/intel/atom/sst-atom-controls.c
@@ -937,7 +937,7 @@ int send_ssp_cmd(struct snd_soc_dai *dai, const char *id, bool enable)
struct sst_data *drv = snd_soc_dai_get_drvdata(dai);
int ssp_id;
- dev_info(dai->dev, "Enter: enable=%d port_name=%s\n", enable, id);
+ dev_dbg(dai->dev, "Enter: enable=%d port_name=%s\n", enable, id);
if (strcmp(id, "ssp0-port") == 0)
ssp_id = SSP_MODEM;
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 25c6d87c818e..f5a8050351b5 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -771,6 +771,9 @@ static int sst_soc_prepare(struct device *dev)
struct sst_data *drv = dev_get_drvdata(dev);
struct snd_soc_pcm_runtime *rtd;
+ if (!drv->soc_card)
+ return 0;
+
/* suspend all pcms first */
snd_soc_suspend(drv->soc_card->dev);
snd_soc_poweroff(drv->soc_card->dev);
@@ -793,6 +796,9 @@ static void sst_soc_complete(struct device *dev)
struct sst_data *drv = dev_get_drvdata(dev);
struct snd_soc_pcm_runtime *rtd;
+ if (!drv->soc_card)
+ return;
+
/* restart SSPs */
list_for_each_entry(rtd, &drv->soc_card->rtd_list, list) {
struct snd_soc_dai *dai = rtd->cpu_dai;
diff --git a/sound/soc/intel/atom/sst/sst.c b/sound/soc/intel/atom/sst/sst.c
index 9b6e27385dc9..f9ba71315e33 100644
--- a/sound/soc/intel/atom/sst/sst.c
+++ b/sound/soc/intel/atom/sst/sst.c
@@ -27,6 +27,7 @@
#include <linux/pm_qos.h>
#include <linux/async.h>
#include <linux/acpi.h>
+#include <linux/sysfs.h>
#include <sound/core.h>
#include <sound/soc.h>
#include <asm/platform_sst_audio.h>
@@ -242,6 +243,32 @@ int sst_alloc_drv_context(struct intel_sst_drv **ctx,
}
EXPORT_SYMBOL_GPL(sst_alloc_drv_context);
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ if (ctx->fw_version.type == 0 && ctx->fw_version.major == 0 &&
+ ctx->fw_version.minor == 0 && ctx->fw_version.build == 0)
+ return sprintf(buf, "FW not yet loaded\n");
+ else
+ return sprintf(buf, "v%02x.%02x.%02x.%02x\n",
+ ctx->fw_version.type, ctx->fw_version.major,
+ ctx->fw_version.minor, ctx->fw_version.build);
+
+}
+
+DEVICE_ATTR_RO(firmware_version);
+
+static const struct attribute *sst_fw_version_attrs[] = {
+ &dev_attr_firmware_version.attr,
+ NULL,
+};
+
+static const struct attribute_group sst_fw_version_attr_group = {
+ .attrs = (struct attribute **)sst_fw_version_attrs,
+};
+
int sst_context_init(struct intel_sst_drv *ctx)
{
int ret = 0, i;
@@ -315,8 +342,19 @@ int sst_context_init(struct intel_sst_drv *ctx)
dev_err(ctx->dev, "Firmware download failed:%d\n", ret);
goto do_free_mem;
}
+
+ ret = sysfs_create_group(&ctx->dev->kobj,
+ &sst_fw_version_attr_group);
+ if (ret) {
+ dev_err(ctx->dev,
+ "Unable to create sysfs\n");
+ goto err_sysfs;
+ }
+
sst_register(ctx->dev);
return 0;
+err_sysfs:
+ sysfs_remove_group(&ctx->dev->kobj, &sst_fw_version_attr_group);
do_free_mem:
destroy_workqueue(ctx->post_msg_wq);
@@ -330,6 +368,7 @@ void sst_context_cleanup(struct intel_sst_drv *ctx)
pm_runtime_disable(ctx->dev);
sst_unregister(ctx->dev);
sst_set_fw_state_locked(ctx, SST_SHUTDOWN);
+ sysfs_remove_group(&ctx->dev->kobj, &sst_fw_version_attr_group);
flush_scheduled_work();
destroy_workqueue(ctx->post_msg_wq);
pm_qos_remove_request(ctx->qos);
diff --git a/sound/soc/intel/atom/sst/sst.h b/sound/soc/intel/atom/sst/sst.h
index 3f493862e98d..5c9a51cc77aa 100644
--- a/sound/soc/intel/atom/sst/sst.h
+++ b/sound/soc/intel/atom/sst/sst.h
@@ -436,6 +436,7 @@ struct intel_sst_drv {
*/
char firmware_name[FW_NAME_SIZE];
+ struct snd_sst_fw_version fw_version;
struct sst_fw_save *fw_save;
};
diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
index 0a88537ca58a..f4d92bbc5373 100644
--- a/sound/soc/intel/atom/sst/sst_acpi.c
+++ b/sound/soc/intel/atom/sst/sst_acpi.c
@@ -452,6 +452,8 @@ static struct sst_acpi_mach sst_acpi_bytcr[] = {
static struct sst_acpi_mach sst_acpi_chv[] = {
{"10EC5670", "cht-bsw-rt5672", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
&chv_platform_data },
+ {"10EC5672", "cht-bsw-rt5672", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
+ &chv_platform_data },
{"10EC5645", "cht-bsw-rt5645", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
&chv_platform_data },
{"10EC5650", "cht-bsw-rt5645", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
diff --git a/sound/soc/intel/atom/sst/sst_ipc.c b/sound/soc/intel/atom/sst/sst_ipc.c
index bfc889950bb2..374bb61c596d 100644
--- a/sound/soc/intel/atom/sst/sst_ipc.c
+++ b/sound/soc/intel/atom/sst/sst_ipc.c
@@ -236,6 +236,17 @@ static void process_fw_init(struct intel_sst_drv *sst_drv_ctx,
retval = init->result;
goto ret;
}
+ dev_info(sst_drv_ctx->dev, "FW Version %02x.%02x.%02x.%02x\n",
+ init->fw_version.type, init->fw_version.major,
+ init->fw_version.minor, init->fw_version.build);
+ dev_dbg(sst_drv_ctx->dev, "Build date %s Time %s\n",
+ init->build_info.date, init->build_info.time);
+
+ /* Save FW version */
+ sst_drv_ctx->fw_version.type = init->fw_version.type;
+ sst_drv_ctx->fw_version.major = init->fw_version.major;
+ sst_drv_ctx->fw_version.minor = init->fw_version.minor;
+ sst_drv_ctx->fw_version.build = init->fw_version.build;
ret:
sst_wake_up_block(sst_drv_ctx, retval, FW_DWNL_ID, 0 , NULL, 0);
diff --git a/sound/soc/intel/atom/sst/sst_stream.c b/sound/soc/intel/atom/sst/sst_stream.c
index 4ccc80e5e8cc..51bdeeecb7c8 100644
--- a/sound/soc/intel/atom/sst/sst_stream.c
+++ b/sound/soc/intel/atom/sst/sst_stream.c
@@ -104,7 +104,7 @@ int sst_alloc_stream_mrfld(struct intel_sst_drv *sst_drv_ctx, void *params)
sst_init_stream(&sst_drv_ctx->streams[str_id], alloc_param.codec_type,
str_id, alloc_param.operation, 0);
- dev_info(sst_drv_ctx->dev, "Alloc for str %d pipe %#x\n",
+ dev_dbg(sst_drv_ctx->dev, "Alloc for str %d pipe %#x\n",
str_id, pipe_id);
ret = sst_prepare_and_post_msg(sst_drv_ctx, task_id, IPC_CMD,
IPC_IA_ALLOC_STREAM_MRFLD, pipe_id, sizeof(alloc_param),
@@ -415,7 +415,7 @@ int sst_free_stream(struct intel_sst_drv *sst_drv_ctx, int str_id)
str_info->status = STREAM_UN_INIT;
mutex_unlock(&str_info->lock);
- dev_info(sst_drv_ctx->dev, "Free for str %d pipe %#x\n",
+ dev_dbg(sst_drv_ctx->dev, "Free for str %d pipe %#x\n",
str_id, str_info->pipe_id);
retval = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id, IPC_CMD,
IPC_IA_FREE_STREAM_MRFLD, str_info->pipe_id, 0,
diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
index 7ab14ce65a73..260447da32b8 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
@@ -23,7 +23,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <linux/kthread.h>
#include <linux/firmware.h>
#include <linux/io.h>
#include <asm/div64.h>
@@ -338,7 +337,7 @@ static irqreturn_t sst_byt_irq_thread(int irq, void *context)
spin_unlock_irqrestore(&sst->spinlock, flags);
/* continue to send any remaining messages... */
- kthread_queue_work(&ipc->kworker, &ipc->kwork);
+ schedule_work(&ipc->kwork);
return IRQ_HANDLED;
}
diff --git a/sound/soc/intel/boards/bdw-rt5677.c b/sound/soc/intel/boards/bdw-rt5677.c
index 547e6705bf6d..53c6b4cbb1e1 100644
--- a/sound/soc/intel/boards/bdw-rt5677.c
+++ b/sound/soc/intel/boards/bdw-rt5677.c
@@ -156,7 +156,7 @@ static int bdw_rt5677_hw_params(struct snd_pcm_substream *substream,
return ret;
}
-static struct snd_soc_ops bdw_rt5677_ops = {
+static const struct snd_soc_ops bdw_rt5677_ops = {
.hw_params = bdw_rt5677_hw_params,
};
diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c
index 7486a0022fde..4d7e9decfa92 100644
--- a/sound/soc/intel/boards/broadwell.c
+++ b/sound/soc/intel/boards/broadwell.c
@@ -126,7 +126,7 @@ static int broadwell_rt286_hw_params(struct snd_pcm_substream *substream,
return ret;
}
-static struct snd_soc_ops broadwell_rt286_ops = {
+static const struct snd_soc_ops broadwell_rt286_ops = {
.hw_params = broadwell_rt286_hw_params,
};
@@ -220,10 +220,12 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
};
static int broadwell_suspend(struct snd_soc_card *card){
- struct snd_soc_codec *codec;
+ struct snd_soc_component *component;
+
+ list_for_each_entry(component, &card->component_dev_list, card_list) {
+ if (!strcmp(component->name, "i2c-INT343A:00")) {
+ struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
- list_for_each_entry(codec, &card->codec_dev_list, card_list) {
- if (!strcmp(codec->component.name, "i2c-INT343A:00")) {
dev_dbg(codec->dev, "disabling jack detect before going to suspend.\n");
rt286_mic_detect(codec, NULL);
break;
@@ -233,10 +235,12 @@ static int broadwell_suspend(struct snd_soc_card *card){
}
static int broadwell_resume(struct snd_soc_card *card){
- struct snd_soc_codec *codec;
+ struct snd_soc_component *component;
+
+ list_for_each_entry(component, &card->component_dev_list, card_list) {
+ if (!strcmp(component->name, "i2c-INT343A:00")) {
+ struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
- list_for_each_entry(codec, &card->codec_dev_list, card_list) {
- if (!strcmp(codec->component.name, "i2c-INT343A:00")) {
dev_dbg(codec->dev, "enabling jack detect for resume.\n");
rt286_mic_detect(codec, &broadwell_headset);
break;
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index 865a21e557cc..1b4330cd2739 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -30,6 +30,7 @@
#define BXT_DIALOG_CODEC_DAI "da7219-hifi"
#define BXT_MAXIM_CODEC_DAI "HiFi"
#define DUAL_CHANNEL 2
+#define QUAD_CHANNEL 4
static struct snd_soc_jack broxton_headset;
@@ -182,6 +183,16 @@ static struct snd_pcm_hw_constraint_list constraints_channels = {
.mask = 0,
};
+static unsigned int channels_quad[] = {
+ QUAD_CHANNEL,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_channels_quad = {
+ .count = ARRAY_SIZE(channels_quad),
+ .list = channels_quad,
+ .mask = 0,
+};
+
static int bxt_fe_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -248,7 +259,7 @@ static int broxton_da7219_hw_free(struct snd_pcm_substream *substream)
return ret;
}
-static struct snd_soc_ops broxton_da7219_ops = {
+static const struct snd_soc_ops broxton_da7219_ops = {
.hw_params = broxton_da7219_hw_params,
.hw_free = broxton_da7219_hw_free,
};
@@ -258,7 +269,10 @@ static int broxton_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
{
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
- channels->min = channels->max = DUAL_CHANNEL;
+ if (params_channels(params) == 2)
+ channels->min = channels->max = 2;
+ else
+ channels->min = channels->max = 4;
return 0;
}
@@ -267,9 +281,9 @@ static int broxton_dmic_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- runtime->hw.channels_max = DUAL_CHANNEL;
+ runtime->hw.channels_min = runtime->hw.channels_max = QUAD_CHANNEL;
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- &constraints_channels);
+ &constraints_channels_quad);
return snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
@@ -295,7 +309,7 @@ static int broxton_refcap_startup(struct snd_pcm_substream *substream)
&constraints_16000);
};
-static struct snd_soc_ops broxton_refcap_ops = {
+static const struct snd_soc_ops broxton_refcap_ops = {
.startup = broxton_refcap_startup,
};
@@ -348,7 +362,7 @@ static struct snd_soc_dai_link broxton_dais[] = {
.dynamic = 1,
.ops = &broxton_refcap_ops,
},
- [BXT_DPCM_AUDIO_DMIC_CP]
+ [BXT_DPCM_AUDIO_DMIC_CP] =
{
.name = "Bxt Audio DMIC cap",
.stream_name = "dmiccap",
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index d610bdca1608..1309405b3808 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -181,7 +181,7 @@ static int broxton_rt298_hw_params(struct snd_pcm_substream *substream,
return ret;
}
-static struct snd_soc_ops broxton_rt298_ops = {
+static const struct snd_soc_ops broxton_rt298_ops = {
.hw_params = broxton_rt298_hw_params,
};
@@ -230,7 +230,7 @@ static int broxton_dmic_startup(struct snd_pcm_substream *substream)
SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
}
-static struct snd_soc_ops broxton_dmic_ops = {
+static const struct snd_soc_ops broxton_dmic_ops = {
.startup = broxton_dmic_startup,
};
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index bff77a1f27fc..507a86a5eafe 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -57,9 +57,7 @@ struct byt_rt5640_private {
struct clk *mclk;
};
-static unsigned long byt_rt5640_quirk = BYT_RT5640_DMIC1_MAP |
- BYT_RT5640_DMIC_EN |
- BYT_RT5640_MCLK_EN;
+static unsigned long byt_rt5640_quirk = BYT_RT5640_MCLK_EN;
static void log_quirks(struct device *dev)
{
@@ -597,11 +595,11 @@ static int byt_rt5640_aif1_startup(struct snd_pcm_substream *substream)
SNDRV_PCM_HW_PARAM_RATE, 48000);
}
-static struct snd_soc_ops byt_rt5640_aif1_ops = {
+static const struct snd_soc_ops byt_rt5640_aif1_ops = {
.startup = byt_rt5640_aif1_startup,
};
-static struct snd_soc_ops byt_rt5640_be_ssp2_ops = {
+static const struct snd_soc_ops byt_rt5640_be_ssp2_ops = {
.hw_params = byt_rt5640_aif1_hw_params,
};
@@ -689,6 +687,10 @@ static bool is_valleyview(void)
return true;
}
+struct acpi_chan_package { /* ACPICA seems to require 64 bit integers */
+ u64 aif_value; /* 1: AIF1, 2: AIF2 */
+ u64 mclock_value; /* usually 25MHz (0x17d7940), ignored */
+};
static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
{
@@ -698,6 +700,7 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
int i;
int dai_index;
struct byt_rt5640_private *priv;
+ bool is_bytcr = false;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_ATOMIC);
if (!priv)
@@ -734,10 +737,61 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
struct sst_platform_info *p_info = mach->pdata;
const struct sst_res_info *res_info = p_info->res_info;
- /* TODO: use CHAN package info from BIOS to detect AIF1/AIF2 */
- if (res_info->acpi_ipc_irq_index == 0) {
+ if (res_info->acpi_ipc_irq_index == 0)
+ is_bytcr = true;
+ }
+
+ if (is_bytcr) {
+ /*
+ * Baytrail CR platforms may have CHAN package in BIOS, try
+ * to find relevant routing quirk based as done on Windows
+ * platforms. We have to read the information directly from the
+ * BIOS, at this stage the card is not created and the links
+ * with the codec driver/pdata are non-existent
+ */
+
+ struct acpi_chan_package chan_package;
+
+ /* format specified: 2 64-bit integers */
+ struct acpi_buffer format = {sizeof("NN"), "NN"};
+ struct acpi_buffer state = {0, NULL};
+ struct sst_acpi_package_context pkg_ctx;
+ bool pkg_found = false;
+
+ state.length = sizeof(chan_package);
+ state.pointer = &chan_package;
+
+ pkg_ctx.name = "CHAN";
+ pkg_ctx.length = 2;
+ pkg_ctx.format = &format;
+ pkg_ctx.state = &state;
+ pkg_ctx.data_valid = false;
+
+ pkg_found = sst_acpi_find_package_from_hid(mach->id, &pkg_ctx);
+ if (pkg_found) {
+ if (chan_package.aif_value == 1) {
+ dev_info(&pdev->dev, "BIOS Routing: AIF1 connected\n");
+ byt_rt5640_quirk |= BYT_RT5640_SSP0_AIF1;
+ } else if (chan_package.aif_value == 2) {
+ dev_info(&pdev->dev, "BIOS Routing: AIF2 connected\n");
+ byt_rt5640_quirk |= BYT_RT5640_SSP0_AIF2;
+ } else {
+ dev_info(&pdev->dev, "BIOS Routing isn't valid, ignored\n");
+ pkg_found = false;
+ }
+ }
+
+ if (!pkg_found) {
+ /* no BIOS indications, assume SSP0-AIF2 connection */
byt_rt5640_quirk |= BYT_RT5640_SSP0_AIF2;
}
+
+ /* change defaults for Baytrail-CR capture */
+ byt_rt5640_quirk |= BYT_RT5640_IN1_MAP;
+ byt_rt5640_quirk |= BYT_RT5640_DIFF_MIC;
+ } else {
+ byt_rt5640_quirk |= (BYT_RT5640_DMIC1_MAP |
+ BYT_RT5640_DMIC_EN);
}
/* check quirks before creating card */
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index 35f591eab3c9..2d24dc04b597 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -219,11 +219,11 @@ static int byt_rt5651_aif1_startup(struct snd_pcm_substream *substream)
&constraints_48000);
}
-static struct snd_soc_ops byt_rt5651_aif1_ops = {
+static const struct snd_soc_ops byt_rt5651_aif1_ops = {
.startup = byt_rt5651_aif1_startup,
};
-static struct snd_soc_ops byt_rt5651_be_ssp2_ops = {
+static const struct snd_soc_ops byt_rt5651_be_ssp2_ops = {
.hw_params = byt_rt5651_aif1_hw_params,
};
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index cdcced9f32b6..742bc0d4e681 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -204,11 +204,11 @@ static int cht_max98090_headset_init(struct snd_soc_component *component)
return ts3a227e_enable_jack_detect(component, &ctx->jack);
}
-static struct snd_soc_ops cht_aif1_ops = {
+static const struct snd_soc_ops cht_aif1_ops = {
.startup = cht_aif1_startup,
};
-static struct snd_soc_ops cht_be_ssp2_ops = {
+static const struct snd_soc_ops cht_be_ssp2_ops = {
.hw_params = cht_aif1_hw_params,
};
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index 56056ed7fcfd..f504a0e18f91 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -44,6 +44,7 @@ struct cht_acpi_card {
struct cht_mc_private {
struct snd_soc_jack jack;
struct cht_acpi_card *acpi_card;
+ char codec_name[16];
};
static inline struct snd_soc_dai *cht_get_codec_dai(struct snd_soc_card *card)
@@ -250,11 +251,11 @@ static int cht_aif1_startup(struct snd_pcm_substream *substream)
SNDRV_PCM_HW_PARAM_RATE, 48000);
}
-static struct snd_soc_ops cht_aif1_ops = {
+static const struct snd_soc_ops cht_aif1_ops = {
.startup = cht_aif1_startup,
};
-static struct snd_soc_ops cht_be_ssp2_ops = {
+static const struct snd_soc_ops cht_be_ssp2_ops = {
.hw_params = cht_aif1_hw_params,
};
@@ -354,7 +355,6 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
int i;
struct cht_mc_private *drv;
struct snd_soc_card *card = snd_soc_cards[0].soc_card;
- char codec_name[16];
struct sst_acpi_mach *mach;
const char *i2c_name = NULL;
int dai_index = 0;
@@ -374,12 +374,12 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
}
card->dev = &pdev->dev;
mach = card->dev->platform_data;
- sprintf(codec_name, "i2c-%s:00", drv->acpi_card->codec_id);
+ sprintf(drv->codec_name, "i2c-%s:00", drv->acpi_card->codec_id);
/* set correct codec name */
for (i = 0; i < ARRAY_SIZE(cht_dailink); i++)
if (!strcmp(card->dai_link[i].codec_name, "i2c-10EC5645:00")) {
- card->dai_link[i].codec_name = kstrdup(codec_name, GFP_KERNEL);
+ card->dai_link[i].codec_name = drv->codec_name;
dai_index = i;
}
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index df9d254baa18..e4d46d4360d7 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -25,12 +25,14 @@
#include <sound/jack.h>
#include "../../codecs/rt5670.h"
#include "../atom/sst-atom-controls.h"
+#include "../common/sst-acpi.h"
/* The platform clock #3 outputs 19.2Mhz clock to codec as I2S MCLK */
#define CHT_PLAT_CLK_3_HZ 19200000
#define CHT_CODEC_DAI "rt5670-aif1"
static struct snd_soc_jack cht_bsw_headset;
+static char cht_bsw_codec_name[16];
/* Headset jack detection DAPM pins */
static struct snd_soc_jack_pin cht_bsw_headset_pins[] = {
@@ -225,11 +227,11 @@ static int cht_aif1_startup(struct snd_pcm_substream *substream)
SNDRV_PCM_HW_PARAM_RATE, 48000);
}
-static struct snd_soc_ops cht_aif1_ops = {
+static const struct snd_soc_ops cht_aif1_ops = {
.startup = cht_aif1_startup,
};
-static struct snd_soc_ops cht_be_ssp2_ops = {
+static const struct snd_soc_ops cht_be_ssp2_ops = {
.hw_params = cht_aif1_hw_params,
};
@@ -292,10 +294,12 @@ static struct snd_soc_dai_link cht_dailink[] = {
static int cht_suspend_pre(struct snd_soc_card *card)
{
- struct snd_soc_codec *codec;
+ struct snd_soc_component *component;
+
+ list_for_each_entry(component, &card->component_dev_list, card_list) {
+ if (!strcmp(component->name, "i2c-10EC5670:00")) {
+ struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
- list_for_each_entry(codec, &card->codec_dev_list, card_list) {
- if (!strcmp(codec->component.name, "i2c-10EC5670:00")) {
dev_dbg(codec->dev, "disabling jack detect before going to suspend.\n");
rt5670_jack_suspend(codec);
break;
@@ -306,10 +310,12 @@ static int cht_suspend_pre(struct snd_soc_card *card)
static int cht_resume_post(struct snd_soc_card *card)
{
- struct snd_soc_codec *codec;
+ struct snd_soc_component *component;
+
+ list_for_each_entry(component, &card->component_dev_list, card_list) {
+ if (!strcmp(component->name, "i2c-10EC5670:00")) {
+ struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
- list_for_each_entry(codec, &card->codec_dev_list, card_list) {
- if (!strcmp(codec->component.name, "i2c-10EC5670:00")) {
dev_dbg(codec->dev, "enabling jack detect for resume.\n");
rt5670_jack_resume(codec);
break;
@@ -335,9 +341,33 @@ static struct snd_soc_card snd_soc_card_cht = {
.resume_post = cht_resume_post,
};
+#define RT5672_I2C_DEFAULT "i2c-10EC5670:00"
+
static int snd_cht_mc_probe(struct platform_device *pdev)
{
int ret_val = 0;
+ struct sst_acpi_mach *mach = pdev->dev.platform_data;
+ const char *i2c_name;
+ int i;
+
+ strcpy(cht_bsw_codec_name, RT5672_I2C_DEFAULT);
+
+ /* fixup codec name based on HID */
+ if (mach) {
+ i2c_name = sst_acpi_find_name_from_hid(mach->id);
+ if (i2c_name) {
+ snprintf(cht_bsw_codec_name, sizeof(cht_bsw_codec_name),
+ "i2c-%s", i2c_name);
+ for (i = 0; i < ARRAY_SIZE(cht_dailink); i++) {
+ if (!strcmp(cht_dailink[i].codec_name,
+ RT5672_I2C_DEFAULT)) {
+ cht_dailink[i].codec_name =
+ cht_bsw_codec_name;
+ break;
+ }
+ }
+ }
+ }
/* register the soc card */
snd_soc_card_cht.dev = &pdev->dev;
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
index 863f1d5e2a2c..5e1ea0371c90 100644
--- a/sound/soc/intel/boards/haswell.c
+++ b/sound/soc/intel/boards/haswell.c
@@ -81,7 +81,7 @@ static int haswell_rt5640_hw_params(struct snd_pcm_substream *substream,
return ret;
}
-static struct snd_soc_ops haswell_rt5640_ops = {
+static const struct snd_soc_ops haswell_rt5640_ops = {
.hw_params = haswell_rt5640_hw_params,
};
diff --git a/sound/soc/intel/boards/mfld_machine.c b/sound/soc/intel/boards/mfld_machine.c
index 34f46c72a0e2..4e08885f37aa 100644
--- a/sound/soc/intel/boards/mfld_machine.c
+++ b/sound/soc/intel/boards/mfld_machine.c
@@ -81,9 +81,9 @@ static struct snd_soc_jack_zone mfld_zones[] = {
};
/* sound card controls */
-static const char *headset_switch_text[] = {"Earpiece", "Headset"};
+static const char * const headset_switch_text[] = {"Earpiece", "Headset"};
-static const char *lo_text[] = {"Vibra", "Headset", "IHF", "None"};
+static const char * const lo_text[] = {"Vibra", "Headset", "IHF", "None"};
static const struct soc_enum headset_enum =
SOC_ENUM_SINGLE_EXT(2, headset_switch_text);
diff --git a/sound/soc/intel/boards/skl_nau88l25_max98357a.c b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
index 25db5be7fdfa..fddd1cd12f13 100644
--- a/sound/soc/intel/boards/skl_nau88l25_max98357a.c
+++ b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
@@ -332,7 +332,7 @@ static int skylake_nau8825_hw_params(struct snd_pcm_substream *substream,
return ret;
}
-static struct snd_soc_ops skylake_nau8825_ops = {
+static const struct snd_soc_ops skylake_nau8825_ops = {
.hw_params = skylake_nau8825_hw_params,
};
@@ -382,7 +382,7 @@ static int skylake_dmic_startup(struct snd_pcm_substream *substream)
SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
}
-static struct snd_soc_ops skylake_dmic_ops = {
+static const struct snd_soc_ops skylake_dmic_ops = {
.startup = skylake_dmic_startup,
};
@@ -416,7 +416,7 @@ static int skylake_refcap_startup(struct snd_pcm_substream *substream)
&constraints_16000);
}
-static struct snd_soc_ops skylaye_refcap_ops = {
+static const struct snd_soc_ops skylaye_refcap_ops = {
.startup = skylake_refcap_startup,
};
diff --git a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
index 69c5d5da4e86..8ab865ee0cad 100644
--- a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
+++ b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
@@ -394,7 +394,7 @@ static int skylake_nau8825_hw_params(struct snd_pcm_substream *substream,
return ret;
}
-static struct snd_soc_ops skylake_nau8825_ops = {
+static const struct snd_soc_ops skylake_nau8825_ops = {
.hw_params = skylake_nau8825_hw_params,
};
@@ -430,7 +430,7 @@ static int skylake_dmic_startup(struct snd_pcm_substream *substream)
SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
}
-static struct snd_soc_ops skylake_dmic_ops = {
+static const struct snd_soc_ops skylake_dmic_ops = {
.startup = skylake_dmic_startup,
};
@@ -464,7 +464,7 @@ static int skylake_refcap_startup(struct snd_pcm_substream *substream)
&constraints_16000);
}
-static struct snd_soc_ops skylaye_refcap_ops = {
+static const struct snd_soc_ops skylaye_refcap_ops = {
.startup = skylake_refcap_startup,
};
diff --git a/sound/soc/intel/boards/skl_rt286.c b/sound/soc/intel/boards/skl_rt286.c
index 88c61e8cb87f..dc5c3611a6ff 100644
--- a/sound/soc/intel/boards/skl_rt286.c
+++ b/sound/soc/intel/boards/skl_rt286.c
@@ -250,7 +250,7 @@ static int skylake_rt286_hw_params(struct snd_pcm_substream *substream,
return ret;
}
-static struct snd_soc_ops skylake_rt286_ops = {
+static const struct snd_soc_ops skylake_rt286_ops = {
.hw_params = skylake_rt286_hw_params,
};
@@ -289,7 +289,7 @@ static int skylake_dmic_startup(struct snd_pcm_substream *substream)
SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
}
-static struct snd_soc_ops skylake_dmic_ops = {
+static const struct snd_soc_ops skylake_dmic_ops = {
.startup = skylake_dmic_startup,
};
diff --git a/sound/soc/intel/common/sst-acpi.h b/sound/soc/intel/common/sst-acpi.h
index 012742299dd5..214e000667ae 100644
--- a/sound/soc/intel/common/sst-acpi.h
+++ b/sound/soc/intel/common/sst-acpi.h
@@ -15,14 +15,29 @@
#include <linux/stddef.h>
#include <linux/acpi.h>
-/* translation fron HID to I2C name, needed for DAI codec_name */
+struct sst_acpi_package_context {
+ char *name; /* package name */
+ int length; /* number of elements */
+ struct acpi_buffer *format;
+ struct acpi_buffer *state;
+ bool data_valid;
+};
+
#if IS_ENABLED(CONFIG_ACPI)
+/* translation fron HID to I2C name, needed for DAI codec_name */
const char *sst_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN]);
+bool sst_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
+ struct sst_acpi_package_context *ctx);
#else
static inline const char *sst_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
{
return NULL;
}
+static inline bool sst_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
+ struct sst_acpi_package_context *ctx)
+{
+ return false;
+}
#endif
/* acpi match */
diff --git a/sound/soc/intel/common/sst-ipc.c b/sound/soc/intel/common/sst-ipc.c
index 6c672ac79cce..62f3a8e0ec87 100644
--- a/sound/soc/intel/common/sst-ipc.c
+++ b/sound/soc/intel/common/sst-ipc.c
@@ -26,7 +26,6 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <linux/kthread.h>
#include <sound/asound.h>
#include "sst-dsp.h"
@@ -109,10 +108,9 @@ static int ipc_tx_message(struct sst_generic_ipc *ipc, u64 header,
ipc->ops.tx_data_copy(msg, tx_data, tx_bytes);
list_add_tail(&msg->list, &ipc->tx_list);
+ schedule_work(&ipc->kwork);
spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
- kthread_queue_work(&ipc->kworker, &ipc->kwork);
-
if (wait)
return tx_wait_done(ipc, msg, rx_data);
else
@@ -156,42 +154,56 @@ free_mem:
return -ENOMEM;
}
-static void ipc_tx_msgs(struct kthread_work *work)
+static void ipc_tx_msgs(struct work_struct *work)
{
struct sst_generic_ipc *ipc =
container_of(work, struct sst_generic_ipc, kwork);
struct ipc_message *msg;
- unsigned long flags;
- spin_lock_irqsave(&ipc->dsp->spinlock, flags);
+ spin_lock_irq(&ipc->dsp->spinlock);
- if (list_empty(&ipc->tx_list) || ipc->pending) {
- spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
- return;
- }
-
- /* if the DSP is busy, we will TX messages after IRQ.
- * also postpone if we are in the middle of procesing completion irq*/
- if (ipc->ops.is_dsp_busy && ipc->ops.is_dsp_busy(ipc->dsp)) {
- dev_dbg(ipc->dev, "ipc_tx_msgs dsp busy\n");
- spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
- return;
- }
+ while (!list_empty(&ipc->tx_list) && !ipc->pending) {
+ /* if the DSP is busy, we will TX messages after IRQ.
+ * also postpone if we are in the middle of processing
+ * completion irq
+ */
+ if (ipc->ops.is_dsp_busy && ipc->ops.is_dsp_busy(ipc->dsp)) {
+ dev_dbg(ipc->dev, "ipc_tx_msgs dsp busy\n");
+ break;
+ }
- msg = list_first_entry(&ipc->tx_list, struct ipc_message, list);
- list_move(&msg->list, &ipc->rx_list);
+ msg = list_first_entry(&ipc->tx_list, struct ipc_message, list);
+ list_move(&msg->list, &ipc->rx_list);
- if (ipc->ops.tx_msg != NULL)
- ipc->ops.tx_msg(ipc, msg);
+ if (ipc->ops.tx_msg != NULL)
+ ipc->ops.tx_msg(ipc, msg);
+ }
- spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
+ spin_unlock_irq(&ipc->dsp->spinlock);
}
int sst_ipc_tx_message_wait(struct sst_generic_ipc *ipc, u64 header,
void *tx_data, size_t tx_bytes, void *rx_data, size_t rx_bytes)
{
- return ipc_tx_message(ipc, header, tx_data, tx_bytes,
+ int ret;
+
+ /*
+ * DSP maybe in lower power active state, so
+ * check if the DSP supports DSP lp On method
+ * if so invoke that before sending IPC
+ */
+ if (ipc->ops.check_dsp_lp_on)
+ if (ipc->ops.check_dsp_lp_on(ipc->dsp, true))
+ return -EIO;
+
+ ret = ipc_tx_message(ipc, header, tx_data, tx_bytes,
rx_data, rx_bytes, 1);
+
+ if (ipc->ops.check_dsp_lp_on)
+ if (ipc->ops.check_dsp_lp_on(ipc->dsp, false))
+ return -EIO;
+
+ return ret;
}
EXPORT_SYMBOL_GPL(sst_ipc_tx_message_wait);
@@ -203,6 +215,14 @@ int sst_ipc_tx_message_nowait(struct sst_generic_ipc *ipc, u64 header,
}
EXPORT_SYMBOL_GPL(sst_ipc_tx_message_nowait);
+int sst_ipc_tx_message_nopm(struct sst_generic_ipc *ipc, u64 header,
+ void *tx_data, size_t tx_bytes, void *rx_data, size_t rx_bytes)
+{
+ return ipc_tx_message(ipc, header, tx_data, tx_bytes,
+ rx_data, rx_bytes, 1);
+}
+EXPORT_SYMBOL_GPL(sst_ipc_tx_message_nopm);
+
struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc,
u64 header)
{
@@ -280,19 +300,7 @@ int sst_ipc_init(struct sst_generic_ipc *ipc)
if (ret < 0)
return -ENOMEM;
- /* start the IPC message thread */
- kthread_init_worker(&ipc->kworker);
- ipc->tx_thread = kthread_run(kthread_worker_fn,
- &ipc->kworker, "%s",
- dev_name(ipc->dev));
- if (IS_ERR(ipc->tx_thread)) {
- dev_err(ipc->dev, "error: failed to create message TX task\n");
- ret = PTR_ERR(ipc->tx_thread);
- kfree(ipc->msg);
- return ret;
- }
-
- kthread_init_work(&ipc->kwork, ipc_tx_msgs);
+ INIT_WORK(&ipc->kwork, ipc_tx_msgs);
return 0;
}
EXPORT_SYMBOL_GPL(sst_ipc_init);
@@ -301,8 +309,7 @@ void sst_ipc_fini(struct sst_generic_ipc *ipc)
{
int i;
- if (ipc->tx_thread)
- kthread_stop(ipc->tx_thread);
+ cancel_work_sync(&ipc->kwork);
if (ipc->msg) {
for (i = 0; i < IPC_EMPTY_LIST_SIZE; i++) {
diff --git a/sound/soc/intel/common/sst-ipc.h b/sound/soc/intel/common/sst-ipc.h
index ceb7e468a3fa..7ed42a640ad6 100644
--- a/sound/soc/intel/common/sst-ipc.h
+++ b/sound/soc/intel/common/sst-ipc.h
@@ -23,7 +23,6 @@
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
-#include <linux/kthread.h>
#define IPC_MAX_MAILBOX_BYTES 256
@@ -52,6 +51,7 @@ struct sst_plat_ipc_ops {
void (*tx_data_copy)(struct ipc_message *, char *, size_t);
u64 (*reply_msg_match)(u64 header, u64 *mask);
bool (*is_dsp_busy)(struct sst_dsp *dsp);
+ int (*check_dsp_lp_on)(struct sst_dsp *dsp, bool state);
};
/* SST generic IPC data */
@@ -65,8 +65,7 @@ struct sst_generic_ipc {
struct list_head empty_list;
wait_queue_head_t wait_txq;
struct task_struct *tx_thread;
- struct kthread_worker kworker;
- struct kthread_work kwork;
+ struct work_struct kwork;
bool pending;
struct ipc_message *msg;
int tx_data_max_size;
@@ -81,6 +80,9 @@ int sst_ipc_tx_message_wait(struct sst_generic_ipc *ipc, u64 header,
int sst_ipc_tx_message_nowait(struct sst_generic_ipc *ipc, u64 header,
void *tx_data, size_t tx_bytes);
+int sst_ipc_tx_message_nopm(struct sst_generic_ipc *ipc, u64 header,
+ void *tx_data, size_t tx_bytes, void *rx_data, size_t rx_bytes);
+
struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc,
u64 header);
diff --git a/sound/soc/intel/common/sst-match-acpi.c b/sound/soc/intel/common/sst-match-acpi.c
index 789843307a49..1070f3ad23e5 100644
--- a/sound/soc/intel/common/sst-match-acpi.c
+++ b/sound/soc/intel/common/sst-match-acpi.c
@@ -77,5 +77,62 @@ struct sst_acpi_mach *sst_acpi_find_machine(struct sst_acpi_mach *machines)
}
EXPORT_SYMBOL_GPL(sst_acpi_find_machine);
+static acpi_status sst_acpi_find_package(acpi_handle handle, u32 level,
+ void *context, void **ret)
+{
+ struct acpi_device *adev;
+ acpi_status status = AE_OK;
+ struct sst_acpi_package_context *pkg_ctx = context;
+
+ pkg_ctx->data_valid = false;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ if (adev->status.present && adev->status.functional) {
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *myobj = NULL;
+
+ status = acpi_evaluate_object_typed(handle, pkg_ctx->name,
+ NULL, &buffer,
+ ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status))
+ return AE_OK;
+
+ myobj = buffer.pointer;
+ if (!myobj || myobj->package.count != pkg_ctx->length) {
+ kfree(buffer.pointer);
+ return AE_OK;
+ }
+
+ status = acpi_extract_package(myobj,
+ pkg_ctx->format, pkg_ctx->state);
+ if (ACPI_FAILURE(status)) {
+ kfree(buffer.pointer);
+ return AE_OK;
+ }
+
+ kfree(buffer.pointer);
+ pkg_ctx->data_valid = true;
+ return AE_CTRL_TERMINATE;
+ }
+
+ return AE_OK;
+}
+
+bool sst_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
+ struct sst_acpi_package_context *ctx)
+{
+ acpi_status status;
+
+ status = acpi_get_devices(hid, sst_acpi_find_package, ctx, NULL);
+
+ if (ACPI_FAILURE(status) || !ctx->data_valid)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(sst_acpi_find_package_from_hid);
+
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index e432a31fd9f2..a3459d1682a6 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/platform_device.h>
-#include <linux/kthread.h>
#include <linux/firmware.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
@@ -818,7 +817,7 @@ static irqreturn_t hsw_irq_thread(int irq, void *context)
spin_unlock_irqrestore(&sst->spinlock, flags);
/* continue to send any remaining messages... */
- kthread_queue_work(&ipc->kworker, &ipc->kwork);
+ schedule_work(&ipc->kwork);
return IRQ_HANDLED;
}
diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c
index 1d251d59bcb9..1f9f33d34000 100644
--- a/sound/soc/intel/skylake/bxt-sst.c
+++ b/sound/soc/intel/skylake/bxt-sst.c
@@ -43,6 +43,9 @@
#define BXT_ADSP_FW_BIN_HDR_OFFSET 0x2000
+/* Delay before scheduling D0i3 entry */
+#define BXT_D0I3_DELAY 5000
+
static unsigned int bxt_get_errorcode(struct sst_dsp *ctx)
{
return sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE);
@@ -288,6 +291,141 @@ sst_load_base_firmware_failed:
return ret;
}
+/*
+ * Decide the D0i3 state that can be targeted based on the usecase
+ * ref counts and DSP state
+ *
+ * Decision Matrix: (X= dont care; state = target state)
+ *
+ * DSP state != SKL_DSP_RUNNING ; state = no d0i3
+ *
+ * DSP state == SKL_DSP_RUNNING , the following matrix applies
+ * non_d0i3 >0; streaming =X; non_streaming =X; state = no d0i3
+ * non_d0i3 =X; streaming =0; non_streaming =0; state = no d0i3
+ * non_d0i3 =0; streaming >0; non_streaming =X; state = streaming d0i3
+ * non_d0i3 =0; streaming =0; non_streaming =X; state = non-streaming d0i3
+ */
+static int bxt_d0i3_target_state(struct sst_dsp *ctx)
+{
+ struct skl_sst *skl = ctx->thread_context;
+ struct skl_d0i3_data *d0i3 = &skl->d0i3;
+
+ if (skl->cores.state[SKL_DSP_CORE0_ID] != SKL_DSP_RUNNING)
+ return SKL_DSP_D0I3_NONE;
+
+ if (d0i3->non_d0i3)
+ return SKL_DSP_D0I3_NONE;
+ else if (d0i3->streaming)
+ return SKL_DSP_D0I3_STREAMING;
+ else if (d0i3->non_streaming)
+ return SKL_DSP_D0I3_NON_STREAMING;
+ else
+ return SKL_DSP_D0I3_NONE;
+}
+
+static void bxt_set_dsp_D0i3(struct work_struct *work)
+{
+ int ret;
+ struct skl_ipc_d0ix_msg msg;
+ struct skl_sst *skl = container_of(work,
+ struct skl_sst, d0i3.work.work);
+ struct sst_dsp *ctx = skl->dsp;
+ struct skl_d0i3_data *d0i3 = &skl->d0i3;
+ int target_state;
+
+ dev_dbg(ctx->dev, "In %s:\n", __func__);
+
+ /* D0i3 entry allowed only if core 0 alone is running */
+ if (skl_dsp_get_enabled_cores(ctx) != SKL_DSP_CORE0_MASK) {
+ dev_warn(ctx->dev,
+ "D0i3 allowed when only core0 running:Exit\n");
+ return;
+ }
+
+ target_state = bxt_d0i3_target_state(ctx);
+ if (target_state == SKL_DSP_D0I3_NONE)
+ return;
+
+ msg.instance_id = 0;
+ msg.module_id = 0;
+ msg.wake = 1;
+ msg.streaming = 0;
+ if (target_state == SKL_DSP_D0I3_STREAMING)
+ msg.streaming = 1;
+
+ ret = skl_ipc_set_d0ix(&skl->ipc, &msg);
+
+ if (ret < 0) {
+ dev_err(ctx->dev, "Failed to set DSP to D0i3 state\n");
+ return;
+ }
+
+ /* Set Vendor specific register D0I3C.I3 to enable D0i3*/
+ if (skl->update_d0i3c)
+ skl->update_d0i3c(skl->dev, true);
+
+ d0i3->state = target_state;
+ skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING_D0I3;
+}
+
+static int bxt_schedule_dsp_D0i3(struct sst_dsp *ctx)
+{
+ struct skl_sst *skl = ctx->thread_context;
+ struct skl_d0i3_data *d0i3 = &skl->d0i3;
+
+ /* Schedule D0i3 only if the usecase ref counts are appropriate */
+ if (bxt_d0i3_target_state(ctx) != SKL_DSP_D0I3_NONE) {
+
+ dev_dbg(ctx->dev, "%s: Schedule D0i3\n", __func__);
+
+ schedule_delayed_work(&d0i3->work,
+ msecs_to_jiffies(BXT_D0I3_DELAY));
+ }
+
+ return 0;
+}
+
+static int bxt_set_dsp_D0i0(struct sst_dsp *ctx)
+{
+ int ret;
+ struct skl_ipc_d0ix_msg msg;
+ struct skl_sst *skl = ctx->thread_context;
+
+ dev_dbg(ctx->dev, "In %s:\n", __func__);
+
+ /* First Cancel any pending attempt to put DSP to D0i3 */
+ cancel_delayed_work_sync(&skl->d0i3.work);
+
+ /* If DSP is currently in D0i3, bring it to D0i0 */
+ if (skl->cores.state[SKL_DSP_CORE0_ID] != SKL_DSP_RUNNING_D0I3)
+ return 0;
+
+ dev_dbg(ctx->dev, "Set DSP to D0i0\n");
+
+ msg.instance_id = 0;
+ msg.module_id = 0;
+ msg.streaming = 0;
+ msg.wake = 0;
+
+ if (skl->d0i3.state == SKL_DSP_D0I3_STREAMING)
+ msg.streaming = 1;
+
+ /* Clear Vendor specific register D0I3C.I3 to disable D0i3*/
+ if (skl->update_d0i3c)
+ skl->update_d0i3c(skl->dev, false);
+
+ ret = skl_ipc_set_d0ix(&skl->ipc, &msg);
+ if (ret < 0) {
+ dev_err(ctx->dev, "Failed to set DSP to D0i0\n");
+ return ret;
+ }
+
+ skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING;
+ skl->d0i3.state = SKL_DSP_D0I3_NONE;
+
+ return 0;
+}
+
static int bxt_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
{
struct skl_sst *skl = ctx->thread_context;
@@ -414,6 +552,8 @@ static int bxt_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
static struct skl_dsp_fw_ops bxt_fw_ops = {
.set_state_D0 = bxt_set_dsp_D0,
.set_state_D3 = bxt_set_dsp_D3,
+ .set_state_D0i3 = bxt_schedule_dsp_D0i3,
+ .set_state_D0i0 = bxt_set_dsp_D0i0,
.load_fw = bxt_load_base_firmware,
.get_fw_errcode = bxt_get_errorcode,
.load_library = bxt_load_library,
@@ -470,10 +610,15 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
if (ret)
return ret;
+ /* set the D0i3 check */
+ skl->ipc.ops.check_dsp_lp_on = skl_ipc_check_D0i0;
+
skl->cores.count = 2;
skl->boot_complete = false;
init_waitqueue_head(&skl->boot_wait);
skl->is_first_boot = true;
+ INIT_DELAYED_WORK(&skl->d0i3.work, bxt_set_dsp_D0i3);
+ skl->d0i3.state = SKL_DSP_D0I3_NONE;
if (dsp)
*dsp = skl;
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index 805b7f2173f3..e79cbcf6e462 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -294,6 +294,33 @@ int skl_free_dsp(struct skl *skl)
return 0;
}
+/*
+ * In the case of "suspend_active" i.e, the Audio IP being active
+ * during system suspend, immediately excecute any pending D0i3 work
+ * before suspending. This is needed for the IP to work in low power
+ * mode during system suspend. In the case of normal suspend, cancel
+ * any pending D0i3 work.
+ */
+int skl_suspend_late_dsp(struct skl *skl)
+{
+ struct skl_sst *ctx = skl->skl_sst;
+ struct delayed_work *dwork;
+
+ if (!ctx)
+ return 0;
+
+ dwork = &ctx->d0i3.work;
+
+ if (dwork->work.func) {
+ if (skl->supend_active)
+ flush_delayed_work(dwork);
+ else
+ cancel_delayed_work_sync(dwork);
+ }
+
+ return 0;
+}
+
int skl_suspend_dsp(struct skl *skl)
{
struct skl_sst *ctx = skl->skl_sst;
@@ -500,16 +527,14 @@ static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
int skl_dsp_set_dma_control(struct skl_sst *ctx, struct skl_module_cfg *mconfig)
{
struct skl_dma_control *dma_ctrl;
- struct skl_i2s_config_blob config_blob;
struct skl_ipc_large_config_msg msg = {0};
int err = 0;
/*
- * if blob size is same as capablity size, then no dma control
- * present so return
+ * if blob size zero, then return
*/
- if (mconfig->formats_config.caps_size == sizeof(config_blob))
+ if (mconfig->formats_config.caps_size == 0)
return 0;
msg.large_param_id = DMA_CONTROL_ID;
@@ -523,7 +548,7 @@ int skl_dsp_set_dma_control(struct skl_sst *ctx, struct skl_module_cfg *mconfig)
dma_ctrl->node_id = skl_get_node_id(ctx, mconfig);
/* size in dwords */
- dma_ctrl->config_length = sizeof(config_blob) / 4;
+ dma_ctrl->config_length = mconfig->formats_config.caps_size / 4;
memcpy(dma_ctrl->config_data, mconfig->formats_config.caps,
mconfig->formats_config.caps_size);
@@ -531,7 +556,6 @@ int skl_dsp_set_dma_control(struct skl_sst *ctx, struct skl_module_cfg *mconfig)
err = skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)dma_ctrl);
kfree(dma_ctrl);
-
return err;
}
@@ -1042,7 +1066,8 @@ int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe)
dev_dbg(ctx->dev, "%s: pipe_id = %d\n", __func__, pipe->ppl_id);
ret = skl_ipc_create_pipeline(&ctx->ipc, pipe->memory_pages,
- pipe->pipe_priority, pipe->ppl_id);
+ pipe->pipe_priority, pipe->ppl_id,
+ pipe->lp_mode);
if (ret < 0) {
dev_err(ctx->dev, "Failed to create pipeline\n");
return ret;
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 58c728662600..84b5101e6ca6 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -144,6 +144,8 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
struct hdac_ext_stream *stream;
struct snd_pcm_runtime *runtime = substream->runtime;
struct skl_dma_params *dma_params;
+ struct skl *skl = get_skl_ctx(dai->dev);
+ struct skl_module_cfg *mconfig;
dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
@@ -177,6 +179,9 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
skl_set_suspend_active(substream, dai, true);
snd_pcm_set_sync(substream);
+ mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
+ skl_tplg_d0i3_get(skl, mconfig->d0i3_caps);
+
return 0;
}
@@ -302,6 +307,7 @@ static void skl_pcm_close(struct snd_pcm_substream *substream,
struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
struct skl_dma_params *dma_params = NULL;
struct skl *skl = ebus_to_skl(ebus);
+ struct skl_module_cfg *mconfig;
dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
@@ -325,6 +331,9 @@ static void skl_pcm_close(struct snd_pcm_substream *substream,
skl->skl_sst->miscbdcg_disabled = false;
}
+ mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
+ skl_tplg_d0i3_put(skl, mconfig->d0i3_caps);
+
kfree(dma_params);
}
@@ -1031,10 +1040,24 @@ static snd_pcm_uframes_t skl_platform_pcm_pointer
(struct snd_pcm_substream *substream)
{
struct hdac_ext_stream *hstream = get_hdac_ext_stream(substream);
+ struct hdac_ext_bus *ebus = get_bus_ctx(substream);
unsigned int pos;
- /* use the position buffer as default */
- pos = snd_hdac_stream_get_pos_posbuf(hdac_stream(hstream));
+ /*
+ * Use DPIB for Playback stream as the periodic DMA Position-in-
+ * Buffer Writes may be scheduled at the same time or later than
+ * the MSI and does not guarantee to reflect the Position of the
+ * last buffer that was transferred. Whereas DPIB register in
+ * HAD space reflects the actual data that is transferred.
+ * Use the position buffer for capture, as DPIB write gets
+ * completed earlier than the actual data written to the DDR.
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ pos = readl(ebus->bus.remap_addr + AZX_REG_VS_SDXDPIB_XBASE +
+ (AZX_REG_VS_SDXDPIB_XINTERVAL *
+ hdac_stream(hstream)->index));
+ else
+ pos = snd_hdac_stream_get_pos_posbuf(hdac_stream(hstream));
if (pos >= hdac_stream(hstream)->bufsize)
pos = 0;
@@ -1197,6 +1220,7 @@ static int skl_platform_soc_probe(struct snd_soc_platform *platform)
return ret;
}
skl_populate_modules(skl);
+ skl->skl_sst->update_d0i3c = skl_update_d0i3c;
}
pm_runtime_mark_last_busy(platform->dev);
pm_runtime_put_autosuspend(platform->dev);
diff --git a/sound/soc/intel/skylake/skl-sst-cldma.c b/sound/soc/intel/skylake/skl-sst-cldma.c
index efa2532114ba..c9f6d87381db 100644
--- a/sound/soc/intel/skylake/skl-sst-cldma.c
+++ b/sound/soc/intel/skylake/skl-sst-cldma.c
@@ -17,7 +17,6 @@
#include <linux/device.h>
#include <linux/mm.h>
-#include <linux/kthread.h>
#include <linux/delay.h>
#include "../common/sst-dsp.h"
#include "../common/sst-dsp-priv.h"
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h
index b9e71d051fb1..7c272ba0f4b5 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.h
+++ b/sound/soc/intel/skylake/skl-sst-dsp.h
@@ -126,11 +126,21 @@ struct sst_dsp_device;
#define SKL_ADSPCS_CPA_SHIFT 24
#define SKL_ADSPCS_CPA_MASK(cm) ((cm) << SKL_ADSPCS_CPA_SHIFT)
+/* DSP Core state */
enum skl_dsp_states {
SKL_DSP_RUNNING = 1,
+ /* Running in D0i3 state; can be in streaming or non-streaming D0i3 */
+ SKL_DSP_RUNNING_D0I3, /* Running in D0i3 state*/
SKL_DSP_RESET,
};
+/* D0i3 substates */
+enum skl_dsp_d0i3_states {
+ SKL_DSP_D0I3_NONE = -1, /* No D0i3 */
+ SKL_DSP_D0I3_NON_STREAMING = 0,
+ SKL_DSP_D0I3_STREAMING = 1,
+};
+
struct skl_dsp_fw_ops {
int (*load_fw)(struct sst_dsp *ctx);
/* FW module parser/loader */
@@ -139,6 +149,8 @@ struct skl_dsp_fw_ops {
int (*parse_fw)(struct sst_dsp *ctx);
int (*set_state_D0)(struct sst_dsp *ctx, unsigned int core_id);
int (*set_state_D3)(struct sst_dsp *ctx, unsigned int core_id);
+ int (*set_state_D0i3)(struct sst_dsp *ctx);
+ int (*set_state_D0i0)(struct sst_dsp *ctx);
unsigned int (*get_fw_errcode)(struct sst_dsp *ctx);
int (*load_mod)(struct sst_dsp *ctx, u16 mod_id, u8 *mod_name);
int (*unload_mod)(struct sst_dsp *ctx, u16 mod_id);
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
index 797cf4053235..e1391dfbc9e9 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.c
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -81,6 +81,11 @@
#define IPC_INSTANCE_ID(x) (((x) & IPC_INSTANCE_ID_MASK) \
<< IPC_INSTANCE_ID_SHIFT)
+#define IPC_PPL_LP_MODE_SHIFT 0
+#define IPC_PPL_LP_MODE_MASK 0x1
+#define IPC_PPL_LP_MODE(x) (((x) & IPC_PPL_LP_MODE_MASK) \
+ << IPC_PPL_LP_MODE_SHIFT)
+
/* Set pipeline state message */
#define IPC_PPL_STATE_SHIFT 0
#define IPC_PPL_STATE_MASK 0x1F
@@ -172,6 +177,17 @@
<< IPC_INITIAL_BLOCK_SHIFT)
#define IPC_INITIAL_BLOCK_CLEAR ~(IPC_INITIAL_BLOCK_MASK \
<< IPC_INITIAL_BLOCK_SHIFT)
+/* Set D0ix IPC extension register */
+#define IPC_D0IX_WAKE_SHIFT 0
+#define IPC_D0IX_WAKE_MASK 0x1
+#define IPC_D0IX_WAKE(x) (((x) & IPC_D0IX_WAKE_MASK) \
+ << IPC_D0IX_WAKE_SHIFT)
+
+#define IPC_D0IX_STREAMING_SHIFT 1
+#define IPC_D0IX_STREAMING_MASK 0x1
+#define IPC_D0IX_STREAMING(x) (((x) & IPC_D0IX_STREAMING_MASK) \
+ << IPC_D0IX_STREAMING_SHIFT)
+
enum skl_ipc_msg_target {
IPC_FW_GEN_MSG = 0,
@@ -258,7 +274,8 @@ enum skl_ipc_module_msg {
IPC_MOD_LARGE_CONFIG_SET = 4,
IPC_MOD_BIND = 5,
IPC_MOD_UNBIND = 6,
- IPC_MOD_SET_DX = 7
+ IPC_MOD_SET_DX = 7,
+ IPC_MOD_SET_D0IX = 8
};
static void skl_ipc_tx_data_copy(struct ipc_message *msg, char *tx_data,
@@ -289,6 +306,23 @@ static void skl_ipc_tx_msg(struct sst_generic_ipc *ipc, struct ipc_message *msg)
header->primary | SKL_ADSP_REG_HIPCI_BUSY);
}
+int skl_ipc_check_D0i0(struct sst_dsp *dsp, bool state)
+{
+ int ret;
+
+ /* check D0i3 support */
+ if (!dsp->fw_ops.set_state_D0i0)
+ return 0;
+
+ /* Attempt D0i0 or D0i3 based on state */
+ if (state)
+ ret = dsp->fw_ops.set_state_D0i0(dsp);
+ else
+ ret = dsp->fw_ops.set_state_D0i3(dsp);
+
+ return ret;
+}
+
static struct ipc_message *skl_ipc_reply_get_msg(struct sst_generic_ipc *ipc,
u64 ipc_header)
{
@@ -464,7 +498,7 @@ irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
skl_ipc_int_enable(dsp);
/* continue to send any remaining messages... */
- kthread_queue_work(&ipc->kworker, &ipc->kwork);
+ schedule_work(&ipc->kwork);
return IRQ_HANDLED;
}
@@ -547,7 +581,7 @@ void skl_ipc_free(struct sst_generic_ipc *ipc)
}
int skl_ipc_create_pipeline(struct sst_generic_ipc *ipc,
- u16 ppl_mem_size, u8 ppl_type, u8 instance_id)
+ u16 ppl_mem_size, u8 ppl_type, u8 instance_id, u8 lp_mode)
{
struct skl_ipc_header header = {0};
u64 *ipc_header = (u64 *)(&header);
@@ -560,6 +594,8 @@ int skl_ipc_create_pipeline(struct sst_generic_ipc *ipc,
header.primary |= IPC_PPL_TYPE(ppl_type);
header.primary |= IPC_PPL_MEM_SIZE(ppl_mem_size);
+ header.extension = IPC_PPL_LP_MODE(lp_mode);
+
dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
if (ret < 0) {
@@ -931,3 +967,32 @@ int skl_sst_ipc_load_library(struct sst_generic_ipc *ipc,
return ret;
}
EXPORT_SYMBOL_GPL(skl_sst_ipc_load_library);
+
+int skl_ipc_set_d0ix(struct sst_generic_ipc *ipc, struct skl_ipc_d0ix_msg *msg)
+{
+ struct skl_ipc_header header = {0};
+ u64 *ipc_header = (u64 *)(&header);
+ int ret;
+
+ header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
+ header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+ header.primary |= IPC_GLB_TYPE(IPC_MOD_SET_D0IX);
+ header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id);
+ header.primary |= IPC_MOD_ID(msg->module_id);
+
+ header.extension = IPC_D0IX_WAKE(msg->wake);
+ header.extension |= IPC_D0IX_STREAMING(msg->streaming);
+
+ dev_dbg(ipc->dev, "In %s primary=%x ext=%x\n", __func__,
+ header.primary, header.extension);
+
+ /*
+ * Use the nopm IPC here as we dont want it checking for D0iX
+ */
+ ret = sst_ipc_tx_message_nopm(ipc, *ipc_header, NULL, 0, NULL, 0);
+ if (ret < 0)
+ dev_err(ipc->dev, "ipc: set d0ix failed, err %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_set_d0ix);
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.h b/sound/soc/intel/skylake/skl-sst-ipc.h
index 0334ed4af031..cc40341233fa 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.h
+++ b/sound/soc/intel/skylake/skl-sst-ipc.h
@@ -16,7 +16,6 @@
#ifndef __SKL_IPC_H
#define __SKL_IPC_H
-#include <linux/kthread.h>
#include <linux/irqreturn.h>
#include "../common/sst-ipc.h"
@@ -53,6 +52,23 @@ struct skl_dsp_cores {
int usage_count[SKL_DSP_CORES_MAX];
};
+/**
+ * skl_d0i3_data: skl D0i3 counters data struct
+ *
+ * @streaming: Count of usecases that can attempt streaming D0i3
+ * @non_streaming: Count of usecases that can attempt non-streaming D0i3
+ * @non_d0i3: Count of usecases that cannot attempt D0i3
+ * @state: current state
+ * @work: D0i3 worker thread
+ */
+struct skl_d0i3_data {
+ int streaming;
+ int non_streaming;
+ int non_d0i3;
+ enum skl_dsp_d0i3_states state;
+ struct delayed_work work;
+};
+
struct skl_sst {
struct device *dev;
struct sst_dsp *dsp;
@@ -83,6 +99,11 @@ struct skl_sst {
/* tplg manifest */
struct skl_dfw_manifest manifest;
+
+ /* Callback to update D0i3C register */
+ void (*update_d0i3c)(struct device *dev, bool enable);
+
+ struct skl_d0i3_data d0i3;
};
struct skl_ipc_init_instance_msg {
@@ -111,6 +132,13 @@ struct skl_ipc_large_config_msg {
u32 param_data_size;
};
+struct skl_ipc_d0ix_msg {
+ u32 module_id;
+ u32 instance_id;
+ u8 streaming;
+ u8 wake;
+};
+
#define SKL_IPC_BOOT_MSECS 3000
#define SKL_IPC_D3_MASK 0
@@ -119,7 +147,7 @@ struct skl_ipc_large_config_msg {
irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context);
int skl_ipc_create_pipeline(struct sst_generic_ipc *sst_ipc,
- u16 ppl_mem_size, u8 ppl_type, u8 instance_id);
+ u16 ppl_mem_size, u8 ppl_type, u8 instance_id, u8 lp_mode);
int skl_ipc_delete_pipeline(struct sst_generic_ipc *sst_ipc, u8 instance_id);
@@ -155,6 +183,11 @@ int skl_ipc_get_large_config(struct sst_generic_ipc *ipc,
int skl_sst_ipc_load_library(struct sst_generic_ipc *ipc,
u8 dma_id, u8 table_id);
+int skl_ipc_set_d0ix(struct sst_generic_ipc *ipc,
+ struct skl_ipc_d0ix_msg *msg);
+
+int skl_ipc_check_D0i0(struct sst_dsp *dsp, bool state);
+
void skl_ipc_int_enable(struct sst_dsp *dsp);
void skl_ipc_op_int_enable(struct sst_dsp *ctx);
void skl_ipc_op_int_disable(struct sst_dsp *ctx);
diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
index 8dc03039b311..ea162fbf68e5 100644
--- a/sound/soc/intel/skylake/skl-sst-utils.c
+++ b/sound/soc/intel/skylake/skl-sst-utils.c
@@ -179,7 +179,7 @@ static inline int skl_getid_32(struct uuid_module *module, u64 *val,
index = ffz(mask_val);
pvt_id = index + word1_mask + word2_mask;
if (pvt_id <= (max_inst - 1)) {
- *val |= 1 << (index + word1_mask);
+ *val |= 1ULL << (index + word1_mask);
return pvt_id;
}
}
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index b5b1934d8550..bd313c907b20 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -36,6 +36,44 @@
#define SKL_IN_DIR_BIT_MASK BIT(0)
#define SKL_PIN_COUNT_MASK GENMASK(7, 4)
+void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps)
+{
+ struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
+
+ switch (caps) {
+ case SKL_D0I3_NONE:
+ d0i3->non_d0i3++;
+ break;
+
+ case SKL_D0I3_STREAMING:
+ d0i3->streaming++;
+ break;
+
+ case SKL_D0I3_NON_STREAMING:
+ d0i3->non_streaming++;
+ break;
+ }
+}
+
+void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps)
+{
+ struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
+
+ switch (caps) {
+ case SKL_D0I3_NONE:
+ d0i3->non_d0i3--;
+ break;
+
+ case SKL_D0I3_STREAMING:
+ d0i3->streaming--;
+ break;
+
+ case SKL_D0I3_NON_STREAMING:
+ d0i3->non_streaming--;
+ break;
+ }
+}
+
/*
* SKL DSP driver modelling uses only few DAPM widgets so for rest we will
* ignore. This helpers checks if the SKL driver handles this widget type
@@ -1519,6 +1557,10 @@ static int skl_tplg_fill_pipe_tkn(struct device *dev,
pipe->memory_pages = tkn_val;
break;
+ case SKL_TKN_U32_PMODE:
+ pipe->lp_mode = tkn_val;
+ break;
+
default:
dev_err(dev, "Token not handled %d\n", tkn);
return -EINVAL;
@@ -1826,6 +1868,10 @@ static int skl_tplg_get_token(struct device *dev,
mconfig->converter = tkn_elem->value;
break;
+ case SKL_TKL_U32_D0I3_CAPS:
+ mconfig->d0i3_caps = tkn_elem->value;
+ break;
+
case SKL_TKN_U32_PIPE_ID:
ret = skl_tplg_add_pipe(dev,
mconfig, skl, tkn_elem);
@@ -1841,6 +1887,7 @@ static int skl_tplg_get_token(struct device *dev,
case SKL_TKN_U32_PIPE_CONN_TYPE:
case SKL_TKN_U32_PIPE_PRIORITY:
case SKL_TKN_U32_PIPE_MEM_PGS:
+ case SKL_TKN_U32_PMODE:
if (is_pipe_exists) {
ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
tkn_elem->token, tkn_elem->value);
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
index a519360f42a6..08d39280b07b 100644
--- a/sound/soc/intel/skylake/skl-topology.h
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -113,23 +113,6 @@ struct skl_cpr_gtw_cfg {
u32 config_data[1];
} __packed;
-struct skl_i2s_config_blob {
- u32 gateway_attrib;
- u32 tdm_ts_group[8];
- u32 ssc0;
- u32 ssc1;
- u32 sscto;
- u32 sspsp;
- u32 sstsa;
- u32 ssrsa;
- u32 ssc2;
- u32 sspsp2;
- u32 ssc3;
- u32 ssioc;
- u32 mdivc;
- u32 mdivr;
-} __packed;
-
struct skl_dma_control {
u32 node_id;
u32 config_length;
@@ -279,6 +262,7 @@ struct skl_pipe {
u8 pipe_priority;
u16 conn_type;
u32 memory_pages;
+ u8 lp_mode;
struct skl_pipe_params *p_params;
enum skl_pipe_state state;
struct list_head w_list;
@@ -293,6 +277,12 @@ enum skl_module_state {
SKL_MODULE_UNLOADED = 4,
};
+enum d0i3_capability {
+ SKL_D0I3_NONE = 0,
+ SKL_D0I3_STREAMING = 1,
+ SKL_D0I3_NON_STREAMING = 2,
+};
+
struct skl_module_cfg {
u8 guid[16];
struct skl_module_inst_id id;
@@ -319,6 +309,7 @@ struct skl_module_cfg {
u32 converter;
u32 vbus_id;
u32 mem_pages;
+ enum d0i3_capability d0i3_caps;
struct skl_module_pin *m_in_pin;
struct skl_module_pin *m_out_pin;
enum skl_module_type m_type;
@@ -361,6 +352,9 @@ struct skl_module_cfg *skl_tplg_fe_get_cpr_module(
int skl_tplg_update_pipe_params(struct device *dev,
struct skl_module_cfg *mconfig, struct skl_pipe_params *params);
+void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps);
+void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps);
+
int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe);
int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 06fa5e85dd0e..da5db5098274 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -26,6 +26,7 @@
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/firmware.h>
+#include <linux/delay.h>
#include <sound/pcm.h>
#include "../common/sst-acpi.h"
#include <sound/hda_register.h>
@@ -109,6 +110,52 @@ static int skl_init_chip(struct hdac_bus *bus, bool full_reset)
return ret;
}
+void skl_update_d0i3c(struct device *dev, bool enable)
+{
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+ struct hdac_bus *bus = ebus_to_hbus(ebus);
+ u8 reg;
+ int timeout = 50;
+
+ reg = snd_hdac_chip_readb(bus, VS_D0I3C);
+ /* Do not write to D0I3C until command in progress bit is cleared */
+ while ((reg & AZX_REG_VS_D0I3C_CIP) && --timeout) {
+ udelay(10);
+ reg = snd_hdac_chip_readb(bus, VS_D0I3C);
+ }
+
+ /* Highly unlikely. But if it happens, flag error explicitly */
+ if (!timeout) {
+ dev_err(bus->dev, "Before D0I3C update: D0I3C CIP timeout\n");
+ return;
+ }
+
+ if (enable)
+ reg = reg | AZX_REG_VS_D0I3C_I3;
+ else
+ reg = reg & (~AZX_REG_VS_D0I3C_I3);
+
+ snd_hdac_chip_writeb(bus, VS_D0I3C, reg);
+
+ timeout = 50;
+ /* Wait for cmd in progress to be cleared before exiting the function */
+ reg = snd_hdac_chip_readb(bus, VS_D0I3C);
+ while ((reg & AZX_REG_VS_D0I3C_CIP) && --timeout) {
+ udelay(10);
+ reg = snd_hdac_chip_readb(bus, VS_D0I3C);
+ }
+
+ /* Highly unlikely. But if it happens, flag error explicitly */
+ if (!timeout) {
+ dev_err(bus->dev, "After D0I3C update: D0I3C CIP timeout\n");
+ return;
+ }
+
+ dev_dbg(bus->dev, "D0I3C register = 0x%x\n",
+ snd_hdac_chip_readb(bus, VS_D0I3C));
+}
+
/* called from IRQ */
static void skl_stream_update(struct hdac_bus *bus, struct hdac_stream *hstr)
{
@@ -181,6 +228,15 @@ static int skl_acquire_irq(struct hdac_ext_bus *ebus, int do_disconnect)
return 0;
}
+static int skl_suspend_late(struct device *dev)
+{
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+ struct skl *skl = ebus_to_skl(ebus);
+
+ return skl_suspend_late_dsp(skl);
+}
+
#ifdef CONFIG_PM
static int _skl_suspend(struct hdac_ext_bus *ebus)
{
@@ -243,7 +299,6 @@ static int skl_suspend(struct device *dev)
enable_irq_wake(bus->irq);
pci_save_state(pci);
- pci_disable_device(pci);
} else {
ret = _skl_suspend(ebus);
if (ret < 0)
@@ -286,7 +341,6 @@ static int skl_resume(struct device *dev)
*/
if (skl->supend_active) {
pci_restore_state(pci);
- ret = pci_enable_device(pci);
snd_hdac_ext_bus_link_power_up_all(ebus);
disable_irq_wake(bus->irq);
/*
@@ -345,6 +399,7 @@ static int skl_runtime_resume(struct device *dev)
static const struct dev_pm_ops skl_pm = {
SET_SYSTEM_SLEEP_PM_OPS(skl_suspend, skl_resume)
SET_RUNTIME_PM_OPS(skl_runtime_suspend, skl_runtime_resume, NULL)
+ .suspend_late = skl_suspend_late,
};
/*
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index 5d4fbb094c48..4986e3929dd3 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -52,6 +52,9 @@
#define AZX_PGCTL_LSRMD_MASK (1 << 4)
#define AZX_PCIREG_CGCTL 0x48
#define AZX_CGCTL_MISCBDCGE_MASK (1 << 6)
+/* D0I3C Register fields */
+#define AZX_REG_VS_D0I3C_CIP 0x1 /* Command in progress */
+#define AZX_REG_VS_D0I3C_I3 0x4 /* D0i3 enable */
struct skl_dsp_resource {
u32 max_mcps;
@@ -121,8 +124,11 @@ int skl_get_dmic_geo(struct skl *skl);
int skl_nhlt_update_topology_bin(struct skl *skl);
int skl_init_dsp(struct skl *skl);
int skl_free_dsp(struct skl *skl);
+int skl_suspend_late_dsp(struct skl *skl);
int skl_suspend_dsp(struct skl *skl);
int skl_resume_dsp(struct skl *skl);
void skl_cleanup_resources(struct skl *skl);
const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id);
+void skl_update_d0i3c(struct device *dev, bool enable);
+
#endif /* __SOUND_SOC_SKL_H */
diff --git a/sound/soc/kirkwood/armada-370-db.c b/sound/soc/kirkwood/armada-370-db.c
index e0304d544f26..677a48d7b891 100644
--- a/sound/soc/kirkwood/armada-370-db.c
+++ b/sound/soc/kirkwood/armada-370-db.c
@@ -42,7 +42,7 @@ static int a370db_hw_params(struct snd_pcm_substream *substream,
return snd_soc_dai_set_sysclk(codec_dai, 0, freq, SND_SOC_CLOCK_IN);
}
-static struct snd_soc_ops a370db_ops = {
+static const struct snd_soc_ops a370db_ops = {
.hw_params = a370db_hw_params,
};
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index 13631003cb7c..a002ab892772 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -735,6 +735,11 @@ static int mxs_saif_probe(struct platform_device *pdev)
else
saif->id = ret;
+ if (saif->id >= ARRAY_SIZE(mxs_saif)) {
+ dev_err(&pdev->dev, "get wrong saif id\n");
+ return -EINVAL;
+ }
+
/*
* If there is no "fsl,saif-master" phandle, it's a saif
* master. Otherwise, it's a slave and its phandle points
@@ -749,11 +754,11 @@ static int mxs_saif_probe(struct platform_device *pdev)
return ret;
else
saif->master_id = ret;
- }
- if (saif->master_id >= ARRAY_SIZE(mxs_saif)) {
- dev_err(&pdev->dev, "get wrong master id\n");
- return -EINVAL;
+ if (saif->master_id >= ARRAY_SIZE(mxs_saif)) {
+ dev_err(&pdev->dev, "get wrong master id\n");
+ return -EINVAL;
+ }
}
mxs_saif[saif->id] = saif;
diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c
index 2b23ffbac6b1..a96276e77332 100644
--- a/sound/soc/mxs/mxs-sgtl5000.c
+++ b/sound/soc/mxs/mxs-sgtl5000.c
@@ -68,7 +68,7 @@ static int mxs_sgtl5000_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_soc_ops mxs_sgtl5000_hifi_ops = {
+static const struct snd_soc_ops mxs_sgtl5000_hifi_ops = {
.hw_params = mxs_sgtl5000_hw_params,
};
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c
index dcbb7aa9830c..311774e9ca46 100644
--- a/sound/soc/pxa/corgi.c
+++ b/sound/soc/pxa/corgi.c
@@ -244,9 +244,9 @@ static const struct snd_soc_dapm_route corgi_audio_map[] = {
{"MICIN", NULL, "Line Jack"},
};
-static const char *jack_function[] = {"Headphone", "Mic", "Line", "Headset",
- "Off"};
-static const char *spk_function[] = {"On", "Off"};
+static const char * const jack_function[] = {"Headphone", "Mic", "Line",
+ "Headset", "Off"};
+static const char * const spk_function[] = {"On", "Off"};
static const struct soc_enum corgi_enum[] = {
SOC_ENUM_SINGLE_EXT(5, jack_function),
SOC_ENUM_SINGLE_EXT(2, spk_function),
diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c
index 1de876529aa1..086c37a85630 100644
--- a/sound/soc/pxa/e740_wm9705.c
+++ b/sound/soc/pxa/e740_wm9705.c
@@ -22,7 +22,6 @@
#include <asm/mach-types.h>
-#include "../codecs/wm9705.h"
#include "pxa2xx-ac97.h"
diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c
index b7eb7cd5df7d..7823278012a6 100644
--- a/sound/soc/pxa/e750_wm9705.c
+++ b/sound/soc/pxa/e750_wm9705.c
@@ -22,7 +22,6 @@
#include <asm/mach-types.h>
-#include "../codecs/wm9705.h"
#include "pxa2xx-ac97.h"
static int e750_spk_amp_event(struct snd_soc_dapm_widget *w,
diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c
index 41bf71466a7b..07b9c6e17df9 100644
--- a/sound/soc/pxa/e800_wm9712.c
+++ b/sound/soc/pxa/e800_wm9712.c
@@ -21,7 +21,6 @@
#include <mach/audio.h>
#include <mach/eseries-gpio.h>
-#include "../codecs/wm9712.h"
#include "pxa2xx-ac97.h"
static int e800_spk_amp_event(struct snd_soc_dapm_widget *w,
diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c
index 64743a05aeae..966163d1c813 100644
--- a/sound/soc/pxa/em-x270.c
+++ b/sound/soc/pxa/em-x270.c
@@ -30,7 +30,6 @@
#include <asm/mach-types.h>
#include <mach/audio.h>
-#include "../codecs/wm9712.h"
#include "pxa2xx-ac97.h"
static struct snd_soc_dai_link em_x270_dai[] = {
diff --git a/sound/soc/pxa/hx4700.c b/sound/soc/pxa/hx4700.c
index ecbf2873b7ff..85483049b916 100644
--- a/sound/soc/pxa/hx4700.c
+++ b/sound/soc/pxa/hx4700.c
@@ -27,8 +27,6 @@
#include <asm/mach-types.h>
#include "pxa2xx-i2s.h"
-#include "../codecs/ak4641.h"
-
static struct snd_soc_jack hs_jack;
/* Headphones jack detection DAPM pin */
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index 62b8377a9d2b..2d4d4455fe87 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -376,7 +376,7 @@ static const struct snd_soc_dapm_route audio_map[] = {
{"VINM", NULL, "Call Mic"},
};
-static const char *input_select[] = {"Call Mic", "Headset Mic"};
+static const char * const input_select[] = {"Call Mic", "Headset Mic"};
static const struct soc_enum magician_in_sel_enum =
SOC_ENUM_SINGLE_EXT(2, input_select);
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index d1661fa6ee08..0fe0abec8fc4 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -187,7 +187,7 @@ static int mioa701_wm9713_probe(struct platform_device *pdev)
mioa701.dev = &pdev->dev;
rc = devm_snd_soc_register_card(&pdev->dev, &mioa701);
if (!rc)
- dev_warn(&pdev->dev, "Be warned that incorrect mixers/muxes setup will"
+ dev_warn(&pdev->dev, "Be warned that incorrect mixers/muxes setup will "
"lead to overheating and possible destruction of your device."
" Do not use without a good knowledge of mio's board design!\n");
return rc;
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index bcc81e920a67..387492d46b6c 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -27,7 +27,6 @@
#include <mach/audio.h>
#include <linux/platform_data/asoc-palm27x.h>
-#include "../codecs/wm9712.h"
#include "pxa2xx-ac97.h"
static struct snd_soc_jack hs_jack;
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
index 4b3b714f5ee7..a879aba0691f 100644
--- a/sound/soc/pxa/poodle.c
+++ b/sound/soc/pxa/poodle.c
@@ -209,8 +209,8 @@ static const struct snd_soc_dapm_route poodle_audio_map[] = {
{"MICIN", NULL, "Microphone"},
};
-static const char *jack_function[] = {"Off", "Headphone"};
-static const char *spk_function[] = {"Off", "On"};
+static const char * const jack_function[] = {"Off", "Headphone"};
+static const char * const spk_function[] = {"Off", "On"};
static const struct soc_enum poodle_enum[] = {
SOC_ENUM_SINGLE_EXT(2, jack_function),
SOC_ENUM_SINGLE_EXT(2, spk_function),
diff --git a/sound/soc/pxa/pxa-ssp.h b/sound/soc/pxa/pxa-ssp.h
index bc79da221c0d..abf6ec080258 100644
--- a/sound/soc/pxa/pxa-ssp.h
+++ b/sound/soc/pxa/pxa-ssp.h
@@ -9,12 +9,6 @@
#ifndef _PXA_SSP_H
#define _PXA_SSP_H
-/* pxa DAI SSP IDs */
-#define PXA_DAI_SSP1 0
-#define PXA_DAI_SSP2 1
-#define PXA_DAI_SSP3 2
-#define PXA_DAI_SSP4 3
-
/* SSP clock sources */
#define PXA_SSP_CLK_PLL 0
#define PXA_SSP_CLK_EXT 1
diff --git a/sound/soc/pxa/pxa2xx-i2s.h b/sound/soc/pxa/pxa2xx-i2s.h
index 070f3c6059fe..7e218e2105a9 100644
--- a/sound/soc/pxa/pxa2xx-i2s.h
+++ b/sound/soc/pxa/pxa2xx-i2s.h
@@ -9,9 +9,6 @@
#ifndef _PXA2XX_I2S_H
#define _PXA2XX_I2S_H
-/* pxa2xx DAI ID's */
-#define PXA2XX_DAI_I2S 0
-
/* I2S clock */
#define PXA2XX_I2S_SYSCLK 0
diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c
index 0e02634c8b7f..07d77cddac60 100644
--- a/sound/soc/pxa/spitz.c
+++ b/sound/soc/pxa/spitz.c
@@ -241,9 +241,9 @@ static const struct snd_soc_dapm_route spitz_audio_map[] = {
{"LINPUT1", NULL, "Line Jack"},
};
-static const char *jack_function[] = {"Headphone", "Mic", "Line", "Headset",
- "Off"};
-static const char *spk_function[] = {"On", "Off"};
+static const char * const jack_function[] = {"Headphone", "Mic", "Line",
+ "Headset", "Off"};
+static const char * const spk_function[] = {"On", "Off"};
static const struct soc_enum spitz_enum[] = {
SOC_ENUM_SINGLE_EXT(5, jack_function),
SOC_ENUM_SINGLE_EXT(2, spk_function),
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index c508f024ecfb..2e312c62e3c7 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -31,7 +31,6 @@
#include <mach/tosa.h>
#include <mach/audio.h>
-#include "../codecs/wm9712.h"
#include "pxa2xx-ac97.h"
#define TOSA_HP 0
@@ -170,9 +169,9 @@ static const struct snd_soc_dapm_route audio_map[] = {
{"Mic Bias", NULL, "Headset Jack"},
};
-static const char *jack_function[] = {"Headphone", "Mic", "Line", "Headset",
- "Off"};
-static const char *spk_function[] = {"On", "Off"};
+static const char * const jack_function[] = {"Headphone", "Mic", "Line",
+ "Headset", "Off"};
+static const char * const spk_function[] = {"On", "Off"};
static const struct soc_enum tosa_enum[] = {
SOC_ENUM_SINGLE_EXT(5, jack_function),
SOC_ENUM_SINGLE_EXT(2, spk_function),
diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c
index 07f91e918b23..d084d7468299 100644
--- a/sound/soc/qcom/apq8016_sbc.c
+++ b/sound/soc/qcom/apq8016_sbc.c
@@ -123,20 +123,15 @@ static struct apq8016_sbc_data *apq8016_sbc_parse_of(struct snd_soc_card *card)
return ERR_PTR(-EINVAL);
}
- link->codec_of_node = of_parse_phandle(codec, "sound-dai", 0);
- if (!link->codec_of_node) {
- dev_err(card->dev, "error getting codec phandle\n");
- return ERR_PTR(-EINVAL);
- }
-
ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name);
if (ret) {
dev_err(card->dev, "error getting cpu dai name\n");
return ERR_PTR(ret);
}
- ret = snd_soc_of_get_dai_name(codec, &link->codec_dai_name);
- if (ret) {
+ ret = snd_soc_of_get_dai_link_codecs(dev, codec, link);
+
+ if (ret < 0) {
dev_err(card->dev, "error getting codec dai name\n");
return ERR_PTR(ret);
}
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index b392e51de94d..dd5bdd0da730 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -25,8 +25,7 @@
#include "lpass.h"
struct lpass_pcm_data {
- int rdma_ch;
- int wrdma_ch;
+ int dma_ch;
int i2s_port;
};
@@ -78,6 +77,9 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
dma_ch = 0;
if (v->alloc_dma_channel)
dma_ch = v->alloc_dma_channel(drvdata, dir);
+ else
+ dma_ch = 0;
+
if (dma_ch < 0)
return dma_ch;
@@ -92,10 +94,7 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
return ret;
}
- if (dir == SNDRV_PCM_STREAM_PLAYBACK)
- data->rdma_ch = dma_ch;
- else
- data->wrdma_ch = dma_ch;
+ data->dma_ch = dma_ch;
snd_soc_set_runtime_hwparams(substream, &lpass_platform_pcm_hardware);
@@ -122,20 +121,12 @@ static int lpass_platform_pcmops_close(struct snd_pcm_substream *substream)
snd_soc_platform_get_drvdata(soc_runtime->platform);
struct lpass_variant *v = drvdata->variant;
struct lpass_pcm_data *data;
- int dma_ch, dir = substream->stream;
data = runtime->private_data;
v = drvdata->variant;
-
- if (dir == SNDRV_PCM_STREAM_PLAYBACK)
- dma_ch = data->rdma_ch;
- else
- dma_ch = data->wrdma_ch;
-
- drvdata->substream[dma_ch] = NULL;
-
+ drvdata->substream[data->dma_ch] = NULL;
if (v->free_dma_channel)
- v->free_dma_channel(drvdata, dma_ch);
+ v->free_dma_channel(drvdata, data->dma_ch);
return 0;
}
@@ -156,10 +147,7 @@ static int lpass_platform_pcmops_hw_params(struct snd_pcm_substream *substream,
int bitwidth;
int ret, dma_port = pcm_data->i2s_port + v->dmactl_audif_start;
- if (dir == SNDRV_PCM_STREAM_PLAYBACK)
- ch = pcm_data->rdma_ch;
- else
- ch = pcm_data->wrdma_ch;
+ ch = pcm_data->dma_ch;
bitwidth = snd_pcm_format_width(format);
if (bitwidth < 0) {
@@ -246,11 +234,7 @@ static int lpass_platform_pcmops_hw_free(struct snd_pcm_substream *substream)
unsigned int reg;
int ret;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- reg = LPAIF_RDMACTL_REG(v, pcm_data->rdma_ch);
- else
- reg = LPAIF_WRDMACTL_REG(v, pcm_data->wrdma_ch);
-
+ reg = LPAIF_DMACTL_REG(v, pcm_data->dma_ch, substream->stream);
ret = regmap_write(drvdata->lpaif_map, reg, 0);
if (ret)
dev_err(soc_runtime->dev, "%s() error writing to rdmactl reg: %d\n",
@@ -270,10 +254,7 @@ static int lpass_platform_pcmops_prepare(struct snd_pcm_substream *substream)
struct lpass_variant *v = drvdata->variant;
int ret, ch, dir = substream->stream;
- if (dir == SNDRV_PCM_STREAM_PLAYBACK)
- ch = pcm_data->rdma_ch;
- else
- ch = pcm_data->wrdma_ch;
+ ch = pcm_data->dma_ch;
ret = regmap_write(drvdata->lpaif_map,
LPAIF_DMABASE_REG(v, ch, dir),
@@ -325,10 +306,7 @@ static int lpass_platform_pcmops_trigger(struct snd_pcm_substream *substream,
struct lpass_variant *v = drvdata->variant;
int ret, ch, dir = substream->stream;
- if (dir == SNDRV_PCM_STREAM_PLAYBACK)
- ch = pcm_data->rdma_ch;
- else
- ch = pcm_data->wrdma_ch;
+ ch = pcm_data->dma_ch;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
@@ -403,10 +381,7 @@ static snd_pcm_uframes_t lpass_platform_pcmops_pointer(
unsigned int base_addr, curr_addr;
int ret, ch, dir = substream->stream;
- if (dir == SNDRV_PCM_STREAM_PLAYBACK)
- ch = pcm_data->rdma_ch;
- else
- ch = pcm_data->wrdma_ch;
+ ch = pcm_data->dma_ch;
ret = regmap_read(drvdata->lpaif_map,
LPAIF_DMABASE_REG(v, ch, dir), &base_addr);
diff --git a/sound/soc/qcom/storm.c b/sound/soc/qcom/storm.c
index 2d833bffdba0..8fcac2ac3aa6 100644
--- a/sound/soc/qcom/storm.c
+++ b/sound/soc/qcom/storm.c
@@ -58,7 +58,7 @@ static int storm_ops_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_soc_ops storm_soc_ops = {
+static const struct snd_soc_ops storm_soc_ops = {
.hw_params = storm_ops_hw_params,
};
diff --git a/sound/soc/rockchip/rk3399_gru_sound.c b/sound/soc/rockchip/rk3399_gru_sound.c
index 9ed735a6cf49..3475c61a5fa0 100644
--- a/sound/soc/rockchip/rk3399_gru_sound.c
+++ b/sound/soc/rockchip/rk3399_gru_sound.c
@@ -38,7 +38,7 @@
#define SOUND_FS 256
-unsigned int rt5514_dmic_delay;
+static unsigned int rt5514_dmic_delay;
static struct snd_soc_jack rockchip_sound_jack;
@@ -228,15 +228,15 @@ static int rockchip_sound_da7219_init(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static struct snd_soc_ops rockchip_sound_max98357a_ops = {
+static const struct snd_soc_ops rockchip_sound_max98357a_ops = {
.hw_params = rockchip_sound_max98357a_hw_params,
};
-static struct snd_soc_ops rockchip_sound_rt5514_ops = {
+static const struct snd_soc_ops rockchip_sound_rt5514_ops = {
.hw_params = rockchip_sound_rt5514_hw_params,
};
-static struct snd_soc_ops rockchip_sound_da7219_ops = {
+static const struct snd_soc_ops rockchip_sound_da7219_ops = {
.hw_params = rockchip_sound_da7219_hw_params,
};
diff --git a/sound/soc/rockchip/rockchip_max98090.c b/sound/soc/rockchip/rockchip_max98090.c
index e70ffad07184..789d6f1e2b5f 100644
--- a/sound/soc/rockchip/rockchip_max98090.c
+++ b/sound/soc/rockchip/rockchip_max98090.c
@@ -119,7 +119,7 @@ static int rk_aif1_hw_params(struct snd_pcm_substream *substream,
return ret;
}
-static struct snd_soc_ops rk_aif1_ops = {
+static const struct snd_soc_ops rk_aif1_ops = {
.hw_params = rk_aif1_hw_params,
};
diff --git a/sound/soc/rockchip/rockchip_rt5645.c b/sound/soc/rockchip/rockchip_rt5645.c
index 440a8026346a..9e0c17805807 100644
--- a/sound/soc/rockchip/rockchip_rt5645.c
+++ b/sound/soc/rockchip/rockchip_rt5645.c
@@ -135,7 +135,7 @@ static int rk_init(struct snd_soc_pcm_runtime *runtime)
&headset_jack);
}
-static struct snd_soc_ops rk_aif1_ops = {
+static const struct snd_soc_ops rk_aif1_ops = {
.hw_params = rk_aif1_hw_params,
};
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index f6023b46c107..7c423151ef7d 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -1,6 +1,7 @@
menuconfig SND_SOC_SAMSUNG
tristate "ASoC support for Samsung"
- depends on (PLAT_SAMSUNG || ARCH_EXYNOS)
+ depends on PLAT_SAMSUNG || ARCH_EXYNOS || COMPILE_TEST
+ depends on COMMON_CLK
select SND_SOC_GENERIC_DMAENGINE_PCM
---help---
Say Y or M if you want to add support for codecs attached to
@@ -22,10 +23,6 @@ config SND_S3C2412_SOC_I2S
config SND_SAMSUNG_PCM
tristate "Samsung PCM interface support"
-config SND_SAMSUNG_AC97
- tristate
- select SND_SOC_AC97_BUS
-
config SND_SAMSUNG_SPDIF
tristate "Samsung SPDIF transmitter support"
select SND_SOC_SPDIF
@@ -53,7 +50,7 @@ config SND_SOC_SAMSUNG_JIVE_WM8750
config SND_SOC_SAMSUNG_SMDK_WM8580
tristate "SoC I2S Audio support for WM8580 on SMDK"
- depends on MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110
+ depends on MACH_SMDK6410 || COMPILE_TEST
depends on I2C
select SND_SOC_WM8580
select SND_SAMSUNG_I2S
@@ -69,26 +66,6 @@ config SND_SOC_SAMSUNG_SMDK_WM8994
help
Say Y if you want to add support for SoC audio on the SMDKs.
-config SND_SOC_SAMSUNG_SMDK2443_WM9710
- tristate "SoC AC97 Audio support for SMDK2443 - WM9710"
- depends on MACH_SMDK2443
- select AC97_BUS
- select SND_SOC_AC97_CODEC
- select SND_SAMSUNG_AC97
- help
- Say Y if you want to add support for SoC audio on smdk2443
- with the WM9710.
-
-config SND_SOC_SAMSUNG_LN2440SBC_ALC650
- tristate "SoC AC97 Audio support for LN2440SBC - ALC650"
- depends on ARCH_S3C24XX
- select AC97_BUS
- select SND_SOC_AC97_CODEC
- select SND_SAMSUNG_AC97
- help
- Say Y if you want to add support for SoC audio on ln2440sbc
- with the ALC650.
-
config SND_SOC_SAMSUNG_S3C24XX_UDA134X
tristate "SoC I2S Audio support UDA134X wired to a S3C24XX"
depends on ARCH_S3C24XX
@@ -131,17 +108,10 @@ config SND_SOC_SAMSUNG_RX1950_UDA1380
help
This driver provides audio support for HP iPAQ RX1950 PDA.
-config SND_SOC_SAMSUNG_SMDK_WM9713
- tristate "SoC AC97 Audio support for SMDK with WM9713"
- depends on MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110
- select SND_SOC_WM9713
- select SND_SAMSUNG_AC97
- help
- Say Y if you want to add support for SoC audio on the SMDK.
-
config SND_SOC_SMARTQ
tristate "SoC I2S Audio support for SmartQ board"
- depends on MACH_SMARTQ && I2C
+ depends on MACH_SMARTQ || COMPILE_TEST
+ depends on I2C
select SND_SAMSUNG_I2S
select SND_SOC_WM8750
@@ -151,15 +121,6 @@ config SND_SOC_SAMSUNG_SMDK_SPDIF
help
Say Y if you want to add support for SoC S/PDIF audio on the SMDK.
-config SND_SOC_SMDK_WM8580_PCM
- tristate "SoC PCM Audio support for WM8580 on SMDK"
- depends on MACH_SMDKV210 || MACH_SMDKC110
- depends on I2C
- select SND_SOC_WM8580
- select SND_SAMSUNG_PCM
- help
- Say Y if you want to add support for SoC audio on the SMDK.
-
config SND_SOC_SMDK_WM8994_PCM
tristate "SoC PCM Audio support for WM8994 on SMDK"
depends on I2C=y
@@ -229,4 +190,13 @@ config SND_SOC_ARNDALE_RT5631_ALC5631
select SND_SAMSUNG_I2S
select SND_SOC_RT5631
+config SND_SOC_SAMSUNG_TM2_WM5110
+ tristate "SoC I2S Audio support for WM5110 on TM2 board"
+ depends on SND_SOC_SAMSUNG && MFD_ARIZONA && I2C && SPI_MASTER
+ select SND_SOC_MAX98504
+ select SND_SOC_WM5110
+ select SND_SAMSUNG_I2S
+ help
+ Say Y if you want to add support for SoC audio on the TM2 board.
+
endif #SND_SOC_SAMSUNG
diff --git a/sound/soc/samsung/Makefile b/sound/soc/samsung/Makefile
index 5d03f5ce6916..b5df5e2e3d94 100644
--- a/sound/soc/samsung/Makefile
+++ b/sound/soc/samsung/Makefile
@@ -3,7 +3,6 @@ snd-soc-s3c-dma-objs := dmaengine.o
snd-soc-idma-objs := idma.o
snd-soc-s3c24xx-i2s-objs := s3c24xx-i2s.o
snd-soc-s3c2412-i2s-objs := s3c2412-i2s.o
-snd-soc-ac97-objs := ac97.o
snd-soc-s3c-i2s-v2-objs := s3c-i2s-v2.o
snd-soc-samsung-spdif-objs := spdif.o
snd-soc-pcm-objs := pcm.o
@@ -11,7 +10,6 @@ snd-soc-i2s-objs := i2s.o
obj-$(CONFIG_SND_SOC_SAMSUNG) += snd-soc-s3c-dma.o
obj-$(CONFIG_SND_S3C24XX_I2S) += snd-soc-s3c24xx-i2s.o
-obj-$(CONFIG_SND_SAMSUNG_AC97) += snd-soc-ac97.o
obj-$(CONFIG_SND_S3C2412_SOC_I2S) += snd-soc-s3c2412-i2s.o
obj-$(CONFIG_SND_S3C_I2SV2_SOC) += snd-soc-s3c-i2s-v2.o
obj-$(CONFIG_SND_SAMSUNG_SPDIF) += snd-soc-samsung-spdif.o
@@ -36,7 +34,6 @@ snd-soc-snow-objs := snow.o
snd-soc-smdk-wm9713-objs := smdk_wm9713.o
snd-soc-s3c64xx-smartq-wm8987-objs := smartq_wm8987.o
snd-soc-smdk-spdif-objs := smdk_spdif.o
-snd-soc-smdk-wm8580pcm-objs := smdk_wm8580pcm.o
snd-soc-smdk-wm8994pcm-objs := smdk_wm8994pcm.o
snd-soc-speyside-objs := speyside.o
snd-soc-tobermory-objs := tobermory.o
@@ -44,11 +41,10 @@ snd-soc-lowland-objs := lowland.o
snd-soc-littlemill-objs := littlemill.o
snd-soc-bells-objs := bells.o
snd-soc-arndale-rt5631-objs := arndale_rt5631.o
+snd-soc-tm2-wm5110-objs := tm2_wm5110.o
obj-$(CONFIG_SND_SOC_SAMSUNG_JIVE_WM8750) += snd-soc-jive-wm8750.o
obj-$(CONFIG_SND_SOC_SAMSUNG_NEO1973_WM8753) += snd-soc-neo1973-wm8753.o
-obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK2443_WM9710) += snd-soc-smdk2443-wm9710.o
-obj-$(CONFIG_SND_SOC_SAMSUNG_LN2440SBC_ALC650) += snd-soc-ln2440sbc-alc650.o
obj-$(CONFIG_SND_SOC_SAMSUNG_S3C24XX_UDA134X) += snd-soc-s3c24xx-uda134x.o
obj-$(CONFIG_SND_SOC_SAMSUNG_SIMTEC) += snd-soc-s3c24xx-simtec.o
obj-$(CONFIG_SND_SOC_SAMSUNG_SIMTEC_HERMES) += snd-soc-s3c24xx-simtec-hermes.o
@@ -58,10 +54,8 @@ obj-$(CONFIG_SND_SOC_SAMSUNG_RX1950_UDA1380) += snd-soc-rx1950-uda1380.o
obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM8580) += snd-soc-smdk-wm8580.o
obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM8994) += snd-soc-smdk-wm8994.o
obj-$(CONFIG_SND_SOC_SNOW) += snd-soc-snow.o
-obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM9713) += snd-soc-smdk-wm9713.o
obj-$(CONFIG_SND_SOC_SMARTQ) += snd-soc-s3c64xx-smartq-wm8987.o
obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_SPDIF) += snd-soc-smdk-spdif.o
-obj-$(CONFIG_SND_SOC_SMDK_WM8580_PCM) += snd-soc-smdk-wm8580pcm.o
obj-$(CONFIG_SND_SOC_SMDK_WM8994_PCM) += snd-soc-smdk-wm8994pcm.o
obj-$(CONFIG_SND_SOC_SPEYSIDE) += snd-soc-speyside.o
obj-$(CONFIG_SND_SOC_TOBERMORY) += snd-soc-tobermory.o
@@ -69,3 +63,4 @@ obj-$(CONFIG_SND_SOC_LOWLAND) += snd-soc-lowland.o
obj-$(CONFIG_SND_SOC_LITTLEMILL) += snd-soc-littlemill.o
obj-$(CONFIG_SND_SOC_BELLS) += snd-soc-bells.o
obj-$(CONFIG_SND_SOC_ARNDALE_RT5631_ALC5631) += snd-soc-arndale-rt5631.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_TM2_WM5110) += snd-soc-tm2-wm5110.o
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
deleted file mode 100644
index cbc0023c2bc8..000000000000
--- a/sound/soc/samsung/ac97.c
+++ /dev/null
@@ -1,437 +0,0 @@
-/* sound/soc/samsung/ac97.c
- *
- * ALSA SoC Audio Layer - S3C AC97 Controller driver
- * Evolved from s3c2443-ac97.c
- *
- * Copyright (c) 2010 Samsung Electronics Co. Ltd
- * Author: Jaswinder Singh <jassisinghbrar@gmail.com>
- * Credits: Graeme Gregory, Sean Choi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/module.h>
-
-#include <sound/soc.h>
-
-#include "regs-ac97.h"
-#include <linux/platform_data/asoc-s3c.h>
-
-#include "dma.h"
-
-#define AC_CMD_ADDR(x) (x << 16)
-#define AC_CMD_DATA(x) (x & 0xffff)
-
-#define S3C_AC97_DAI_PCM 0
-#define S3C_AC97_DAI_MIC 1
-
-struct s3c_ac97_info {
- struct clk *ac97_clk;
- void __iomem *regs;
- struct mutex lock;
- struct completion done;
-};
-static struct s3c_ac97_info s3c_ac97;
-
-static struct snd_dmaengine_dai_dma_data s3c_ac97_pcm_out = {
- .addr_width = 4,
-};
-
-static struct snd_dmaengine_dai_dma_data s3c_ac97_pcm_in = {
- .addr_width = 4,
-};
-
-static struct snd_dmaengine_dai_dma_data s3c_ac97_mic_in = {
- .addr_width = 4,
-};
-
-static void s3c_ac97_activate(struct snd_ac97 *ac97)
-{
- u32 ac_glbctrl, stat;
-
- stat = readl(s3c_ac97.regs + S3C_AC97_GLBSTAT) & 0x7;
- if (stat == S3C_AC97_GLBSTAT_MAINSTATE_ACTIVE)
- return; /* Return if already active */
-
- reinit_completion(&s3c_ac97.done);
-
- ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
- ac_glbctrl = S3C_AC97_GLBCTRL_ACLINKON;
- writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
- msleep(1);
-
- ac_glbctrl |= S3C_AC97_GLBCTRL_TRANSFERDATAENABLE;
- writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
- msleep(1);
-
- ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
- ac_glbctrl |= S3C_AC97_GLBCTRL_CODECREADYIE;
- writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
- if (!wait_for_completion_timeout(&s3c_ac97.done, HZ))
- pr_err("AC97: Unable to activate!\n");
-}
-
-static unsigned short s3c_ac97_read(struct snd_ac97 *ac97,
- unsigned short reg)
-{
- u32 ac_glbctrl, ac_codec_cmd;
- u32 stat, addr, data;
-
- mutex_lock(&s3c_ac97.lock);
-
- s3c_ac97_activate(ac97);
-
- reinit_completion(&s3c_ac97.done);
-
- ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
- ac_codec_cmd = S3C_AC97_CODEC_CMD_READ | AC_CMD_ADDR(reg);
- writel(ac_codec_cmd, s3c_ac97.regs + S3C_AC97_CODEC_CMD);
-
- udelay(50);
-
- ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
- ac_glbctrl |= S3C_AC97_GLBCTRL_CODECREADYIE;
- writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
- if (!wait_for_completion_timeout(&s3c_ac97.done, HZ))
- pr_err("AC97: Unable to read!\n");
-
- stat = readl(s3c_ac97.regs + S3C_AC97_STAT);
- addr = (stat >> 16) & 0x7f;
- data = (stat & 0xffff);
-
- if (addr != reg)
- pr_err("ac97: req addr = %02x, rep addr = %02x\n",
- reg, addr);
-
- mutex_unlock(&s3c_ac97.lock);
-
- return (unsigned short)data;
-}
-
-static void s3c_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
- unsigned short val)
-{
- u32 ac_glbctrl, ac_codec_cmd;
-
- mutex_lock(&s3c_ac97.lock);
-
- s3c_ac97_activate(ac97);
-
- reinit_completion(&s3c_ac97.done);
-
- ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
- ac_codec_cmd = AC_CMD_ADDR(reg) | AC_CMD_DATA(val);
- writel(ac_codec_cmd, s3c_ac97.regs + S3C_AC97_CODEC_CMD);
-
- udelay(50);
-
- ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
- ac_glbctrl |= S3C_AC97_GLBCTRL_CODECREADYIE;
- writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
- if (!wait_for_completion_timeout(&s3c_ac97.done, HZ))
- pr_err("AC97: Unable to write!\n");
-
- ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
- ac_codec_cmd |= S3C_AC97_CODEC_CMD_READ;
- writel(ac_codec_cmd, s3c_ac97.regs + S3C_AC97_CODEC_CMD);
-
- mutex_unlock(&s3c_ac97.lock);
-}
-
-static void s3c_ac97_cold_reset(struct snd_ac97 *ac97)
-{
- pr_debug("AC97: Cold reset\n");
- writel(S3C_AC97_GLBCTRL_COLDRESET,
- s3c_ac97.regs + S3C_AC97_GLBCTRL);
- msleep(1);
-
- writel(0, s3c_ac97.regs + S3C_AC97_GLBCTRL);
- msleep(1);
-}
-
-static void s3c_ac97_warm_reset(struct snd_ac97 *ac97)
-{
- u32 stat;
-
- stat = readl(s3c_ac97.regs + S3C_AC97_GLBSTAT) & 0x7;
- if (stat == S3C_AC97_GLBSTAT_MAINSTATE_ACTIVE)
- return; /* Return if already active */
-
- pr_debug("AC97: Warm reset\n");
-
- writel(S3C_AC97_GLBCTRL_WARMRESET, s3c_ac97.regs + S3C_AC97_GLBCTRL);
- msleep(1);
-
- writel(0, s3c_ac97.regs + S3C_AC97_GLBCTRL);
- msleep(1);
-
- s3c_ac97_activate(ac97);
-}
-
-static irqreturn_t s3c_ac97_irq(int irq, void *dev_id)
-{
- u32 ac_glbctrl, ac_glbstat;
-
- ac_glbstat = readl(s3c_ac97.regs + S3C_AC97_GLBSTAT);
-
- if (ac_glbstat & S3C_AC97_GLBSTAT_CODECREADY) {
-
- ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
- ac_glbctrl &= ~S3C_AC97_GLBCTRL_CODECREADYIE;
- writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
- complete(&s3c_ac97.done);
- }
-
- ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
- ac_glbctrl |= (1<<30); /* Clear interrupt */
- writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
- return IRQ_HANDLED;
-}
-
-static struct snd_ac97_bus_ops s3c_ac97_ops = {
- .read = s3c_ac97_read,
- .write = s3c_ac97_write,
- .warm_reset = s3c_ac97_warm_reset,
- .reset = s3c_ac97_cold_reset,
-};
-
-static int s3c_ac97_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
-{
- u32 ac_glbctrl;
-
- ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
- if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
- ac_glbctrl &= ~S3C_AC97_GLBCTRL_PCMINTM_MASK;
- else
- ac_glbctrl &= ~S3C_AC97_GLBCTRL_PCMOUTTM_MASK;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
- ac_glbctrl |= S3C_AC97_GLBCTRL_PCMINTM_DMA;
- else
- ac_glbctrl |= S3C_AC97_GLBCTRL_PCMOUTTM_DMA;
- break;
-
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- break;
- }
-
- writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
- return 0;
-}
-
-static int s3c_ac97_mic_trigger(struct snd_pcm_substream *substream,
- int cmd, struct snd_soc_dai *dai)
-{
- u32 ac_glbctrl;
-
- ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
- ac_glbctrl &= ~S3C_AC97_GLBCTRL_MICINTM_MASK;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- ac_glbctrl |= S3C_AC97_GLBCTRL_MICINTM_DMA;
- break;
-
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- break;
- }
-
- writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
- return 0;
-}
-
-static const struct snd_soc_dai_ops s3c_ac97_dai_ops = {
- .trigger = s3c_ac97_trigger,
-};
-
-static const struct snd_soc_dai_ops s3c_ac97_mic_dai_ops = {
- .trigger = s3c_ac97_mic_trigger,
-};
-
-static int s3c_ac97_dai_probe(struct snd_soc_dai *dai)
-{
- snd_soc_dai_init_dma_data(dai, &s3c_ac97_pcm_out, &s3c_ac97_pcm_in);
-
- return 0;
-}
-
-static int s3c_ac97_mic_dai_probe(struct snd_soc_dai *dai)
-{
- snd_soc_dai_init_dma_data(dai, NULL, &s3c_ac97_mic_in);
-
- return 0;
-}
-
-static struct snd_soc_dai_driver s3c_ac97_dai[] = {
- [S3C_AC97_DAI_PCM] = {
- .name = "samsung-ac97",
- .bus_control = true,
- .playback = {
- .stream_name = "AC97 Playback",
- .channels_min = 2,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,},
- .capture = {
- .stream_name = "AC97 Capture",
- .channels_min = 2,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,},
- .probe = s3c_ac97_dai_probe,
- .ops = &s3c_ac97_dai_ops,
- },
- [S3C_AC97_DAI_MIC] = {
- .name = "samsung-ac97-mic",
- .bus_control = true,
- .capture = {
- .stream_name = "AC97 Mic Capture",
- .channels_min = 1,
- .channels_max = 1,
- .rates = SNDRV_PCM_RATE_8000_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,},
- .probe = s3c_ac97_mic_dai_probe,
- .ops = &s3c_ac97_mic_dai_ops,
- },
-};
-
-static const struct snd_soc_component_driver s3c_ac97_component = {
- .name = "s3c-ac97",
-};
-
-static int s3c_ac97_probe(struct platform_device *pdev)
-{
- struct resource *mem_res, *irq_res;
- struct s3c_audio_pdata *ac97_pdata;
- int ret;
-
- ac97_pdata = pdev->dev.platform_data;
- if (!ac97_pdata || !ac97_pdata->cfg_gpio) {
- dev_err(&pdev->dev, "cfg_gpio callback not provided!\n");
- return -EINVAL;
- }
-
- /* Check for availability of necessary resource */
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq_res) {
- dev_err(&pdev->dev, "AC97 IRQ not provided!\n");
- return -ENXIO;
- }
-
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- s3c_ac97.regs = devm_ioremap_resource(&pdev->dev, mem_res);
- if (IS_ERR(s3c_ac97.regs))
- return PTR_ERR(s3c_ac97.regs);
-
- s3c_ac97_pcm_out.filter_data = ac97_pdata->dma_playback;
- s3c_ac97_pcm_out.addr = mem_res->start + S3C_AC97_PCM_DATA;
- s3c_ac97_pcm_in.filter_data = ac97_pdata->dma_capture;
- s3c_ac97_pcm_in.addr = mem_res->start + S3C_AC97_PCM_DATA;
- s3c_ac97_mic_in.filter_data = ac97_pdata->dma_capture_mic;
- s3c_ac97_mic_in.addr = mem_res->start + S3C_AC97_MIC_DATA;
-
- init_completion(&s3c_ac97.done);
- mutex_init(&s3c_ac97.lock);
-
- s3c_ac97.ac97_clk = devm_clk_get(&pdev->dev, "ac97");
- if (IS_ERR(s3c_ac97.ac97_clk)) {
- dev_err(&pdev->dev, "ac97 failed to get ac97_clock\n");
- ret = -ENODEV;
- goto err2;
- }
- clk_prepare_enable(s3c_ac97.ac97_clk);
-
- if (ac97_pdata->cfg_gpio(pdev)) {
- dev_err(&pdev->dev, "Unable to configure gpio\n");
- ret = -EINVAL;
- goto err3;
- }
-
- ret = request_irq(irq_res->start, s3c_ac97_irq,
- 0, "AC97", NULL);
- if (ret < 0) {
- dev_err(&pdev->dev, "ac97: interrupt request failed.\n");
- goto err4;
- }
-
- ret = snd_soc_set_ac97_ops(&s3c_ac97_ops);
- if (ret != 0) {
- dev_err(&pdev->dev, "Failed to set AC'97 ops: %d\n", ret);
- goto err4;
- }
-
- ret = samsung_asoc_dma_platform_register(&pdev->dev,
- ac97_pdata->dma_filter,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
- goto err5;
- }
-
- ret = devm_snd_soc_register_component(&pdev->dev, &s3c_ac97_component,
- s3c_ac97_dai, ARRAY_SIZE(s3c_ac97_dai));
- if (ret)
- goto err5;
-
- return 0;
-err5:
- free_irq(irq_res->start, NULL);
-err4:
-err3:
- clk_disable_unprepare(s3c_ac97.ac97_clk);
-err2:
- snd_soc_set_ac97_ops(NULL);
- return ret;
-}
-
-static int s3c_ac97_remove(struct platform_device *pdev)
-{
- struct resource *irq_res;
-
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (irq_res)
- free_irq(irq_res->start, NULL);
-
- clk_disable_unprepare(s3c_ac97.ac97_clk);
- snd_soc_set_ac97_ops(NULL);
-
- return 0;
-}
-
-static struct platform_driver s3c_ac97_driver = {
- .probe = s3c_ac97_probe,
- .remove = s3c_ac97_remove,
- .driver = {
- .name = "samsung-ac97",
- },
-};
-
-module_platform_driver(s3c_ac97_driver);
-
-MODULE_AUTHOR("Jaswinder Singh, <jassisinghbrar@gmail.com>");
-MODULE_DESCRIPTION("AC97 driver for the Samsung SoC");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:samsung-ac97");
diff --git a/sound/soc/samsung/dmaengine.c b/sound/soc/samsung/dmaengine.c
index 9104c98deeb7..cda656e4afc6 100644
--- a/sound/soc/samsung/dmaengine.c
+++ b/sound/soc/samsung/dmaengine.c
@@ -37,12 +37,8 @@ int samsung_asoc_dma_platform_register(struct device *dev, dma_filter_fn filter,
pcm_conf->prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
pcm_conf->compat_filter_fn = filter;
- if (dev->of_node) {
- pcm_conf->chan_names[SNDRV_PCM_STREAM_PLAYBACK] = tx;
- pcm_conf->chan_names[SNDRV_PCM_STREAM_CAPTURE] = rx;
- } else {
- flags |= SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME;
- }
+ pcm_conf->chan_names[SNDRV_PCM_STREAM_PLAYBACK] = tx;
+ pcm_conf->chan_names[SNDRV_PCM_STREAM_CAPTURE] = rx;
return devm_snd_dmaengine_pcm_register(dev, pcm_conf, flags);
}
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 7825bff45ae3..e00974bc5616 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -1029,12 +1029,13 @@ static int samsung_i2s_dai_probe(struct snd_soc_dai *dai)
static int samsung_i2s_dai_remove(struct snd_soc_dai *dai)
{
struct i2s_dai *i2s = snd_soc_dai_get_drvdata(dai);
+ unsigned long flags;
if (!is_secondary(i2s)) {
if (i2s->quirks & QUIRK_NEED_RSTCLR) {
- spin_lock(i2s->lock);
+ spin_lock_irqsave(i2s->lock, flags);
writel(0, i2s->addr + I2SCON);
- spin_unlock(i2s->lock);
+ spin_unlock_irqrestore(i2s->lock, flags);
}
}
@@ -1304,8 +1305,6 @@ static int samsung_i2s_probe(struct platform_device *pdev)
}
pri_dai->dma_playback.addr = regs_base + I2STXD;
pri_dai->dma_capture.addr = regs_base + I2SRXD;
- pri_dai->dma_playback.chan_name = "tx";
- pri_dai->dma_capture.chan_name = "rx";
pri_dai->dma_playback.addr_width = 4;
pri_dai->dma_capture.addr_width = 4;
pri_dai->quirks = quirks;
@@ -1330,7 +1329,6 @@ static int samsung_i2s_probe(struct platform_device *pdev)
sec_dai->lock = &pri_dai->spinlock;
sec_dai->variant_regs = pri_dai->variant_regs;
sec_dai->dma_playback.addr = regs_base + I2STXDS;
- sec_dai->dma_playback.chan_name = "tx-sec";
if (!np) {
sec_dai->dma_playback.filter_data = i2s_pdata->dma_play_sec;
diff --git a/sound/soc/samsung/ln2440sbc_alc650.c b/sound/soc/samsung/ln2440sbc_alc650.c
deleted file mode 100644
index 9342fc270c2b..000000000000
--- a/sound/soc/samsung/ln2440sbc_alc650.c
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * SoC audio for ln2440sbc
- *
- * Copyright 2007 KonekTel, a.s.
- * Author: Ivan Kuten
- * ivan.kuten@promwad.com
- *
- * Heavily based on smdk2443_wm9710.c
- * Copyright 2007 Wolfson Microelectronics PLC.
- * Author: Graeme Gregory
- * graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <sound/soc.h>
-
-static struct snd_soc_card ln2440sbc;
-
-static struct snd_soc_dai_link ln2440sbc_dai[] = {
-{
- .name = "AC97",
- .stream_name = "AC97 HiFi",
- .cpu_dai_name = "samsung-ac97",
- .codec_dai_name = "ac97-hifi",
- .codec_name = "ac97-codec",
- .platform_name = "samsung-ac97",
-},
-};
-
-static struct snd_soc_card ln2440sbc = {
- .name = "LN2440SBC",
- .owner = THIS_MODULE,
- .dai_link = ln2440sbc_dai,
- .num_links = ARRAY_SIZE(ln2440sbc_dai),
-};
-
-static struct platform_device *ln2440sbc_snd_ac97_device;
-
-static int __init ln2440sbc_init(void)
-{
- int ret;
-
- ln2440sbc_snd_ac97_device = platform_device_alloc("soc-audio", -1);
- if (!ln2440sbc_snd_ac97_device)
- return -ENOMEM;
-
- platform_set_drvdata(ln2440sbc_snd_ac97_device, &ln2440sbc);
- ret = platform_device_add(ln2440sbc_snd_ac97_device);
-
- if (ret)
- platform_device_put(ln2440sbc_snd_ac97_device);
-
- return ret;
-}
-
-static void __exit ln2440sbc_exit(void)
-{
- platform_device_unregister(ln2440sbc_snd_ac97_device);
-}
-
-module_init(ln2440sbc_init);
-module_exit(ln2440sbc_exit);
-
-/* Module information */
-MODULE_AUTHOR("Ivan Kuten");
-MODULE_DESCRIPTION("ALSA SoC ALC650 LN2440SBC");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index c484985812ed..d50a6377c23d 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -499,13 +499,6 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
pcm_pdata = pdev->dev.platform_data;
- /* Check for availability of necessary resource */
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem_res) {
- dev_err(&pdev->dev, "Unable to get register resource\n");
- return -ENXIO;
- }
-
if (pcm_pdata && pcm_pdata->cfg_gpio && pcm_pdata->cfg_gpio(pdev)) {
dev_err(&pdev->dev, "Unable to configure gpio\n");
return -EINVAL;
@@ -519,36 +512,26 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
/* Default is 128fs */
pcm->sclk_per_fs = 128;
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pcm->regs = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(pcm->regs))
+ return PTR_ERR(pcm->regs);
+
pcm->cclk = devm_clk_get(&pdev->dev, "audio-bus");
if (IS_ERR(pcm->cclk)) {
- dev_err(&pdev->dev, "failed to get audio-bus\n");
- ret = PTR_ERR(pcm->cclk);
- goto err1;
+ dev_err(&pdev->dev, "failed to get audio-bus clock\n");
+ return PTR_ERR(pcm->cclk);
}
clk_prepare_enable(pcm->cclk);
/* record our pcm structure for later use in the callbacks */
dev_set_drvdata(&pdev->dev, pcm);
- if (!request_mem_region(mem_res->start,
- resource_size(mem_res), "samsung-pcm")) {
- dev_err(&pdev->dev, "Unable to request register region\n");
- ret = -EBUSY;
- goto err2;
- }
-
- pcm->regs = ioremap(mem_res->start, 0x100);
- if (pcm->regs == NULL) {
- dev_err(&pdev->dev, "cannot ioremap registers\n");
- ret = -ENXIO;
- goto err3;
- }
-
pcm->pclk = devm_clk_get(&pdev->dev, "pcm");
if (IS_ERR(pcm->pclk)) {
- dev_err(&pdev->dev, "failed to get pcm_clock\n");
- ret = -ENOENT;
- goto err4;
+ dev_err(&pdev->dev, "failed to get pcm clock\n");
+ ret = PTR_ERR(pcm->pclk);
+ goto err_dis_cclk;
}
clk_prepare_enable(pcm->pclk);
@@ -569,7 +552,7 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
NULL, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
- goto err5;
+ goto err_dis_pclk;
}
pm_runtime_enable(&pdev->dev);
@@ -578,36 +561,25 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
&s3c_pcm_dai[pdev->id], 1);
if (ret != 0) {
dev_err(&pdev->dev, "failed to get register DAI: %d\n", ret);
- goto err6;
+ goto err_dis_pm;
}
return 0;
-err6:
+
+err_dis_pm:
pm_runtime_disable(&pdev->dev);
-err5:
+err_dis_pclk:
clk_disable_unprepare(pcm->pclk);
-err4:
- iounmap(pcm->regs);
-err3:
- release_mem_region(mem_res->start, resource_size(mem_res));
-err2:
+err_dis_cclk:
clk_disable_unprepare(pcm->cclk);
-err1:
return ret;
}
static int s3c_pcm_dev_remove(struct platform_device *pdev)
{
struct s3c_pcm_info *pcm = &s3c_pcm[pdev->id];
- struct resource *mem_res;
pm_runtime_disable(&pdev->dev);
-
- iounmap(pcm->regs);
-
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(mem_res->start, resource_size(mem_res));
-
clk_disable_unprepare(pcm->cclk);
clk_disable_unprepare(pcm->pclk);
diff --git a/sound/soc/samsung/regs-ac97.h b/sound/soc/samsung/regs-ac97.h
deleted file mode 100644
index a71be45bbffc..000000000000
--- a/sound/soc/samsung/regs-ac97.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2006 Simtec Electronics <linux@simtec.co.uk>
- * http://www.simtec.co.uk/products/SWLINUX/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C2440 AC97 Controller
-*/
-
-#ifndef __SAMSUNG_REGS_AC97_H__
-#define __SAMSUNG_REGS_AC97_H__
-
-#define S3C_AC97_GLBCTRL (0x00)
-
-#define S3C_AC97_GLBCTRL_CODECREADYIE (1<<22)
-#define S3C_AC97_GLBCTRL_PCMOUTURIE (1<<21)
-#define S3C_AC97_GLBCTRL_PCMINORIE (1<<20)
-#define S3C_AC97_GLBCTRL_MICINORIE (1<<19)
-#define S3C_AC97_GLBCTRL_PCMOUTTIE (1<<18)
-#define S3C_AC97_GLBCTRL_PCMINTIE (1<<17)
-#define S3C_AC97_GLBCTRL_MICINTIE (1<<16)
-#define S3C_AC97_GLBCTRL_PCMOUTTM_OFF (0<<12)
-#define S3C_AC97_GLBCTRL_PCMOUTTM_PIO (1<<12)
-#define S3C_AC97_GLBCTRL_PCMOUTTM_DMA (2<<12)
-#define S3C_AC97_GLBCTRL_PCMOUTTM_MASK (3<<12)
-#define S3C_AC97_GLBCTRL_PCMINTM_OFF (0<<10)
-#define S3C_AC97_GLBCTRL_PCMINTM_PIO (1<<10)
-#define S3C_AC97_GLBCTRL_PCMINTM_DMA (2<<10)
-#define S3C_AC97_GLBCTRL_PCMINTM_MASK (3<<10)
-#define S3C_AC97_GLBCTRL_MICINTM_OFF (0<<8)
-#define S3C_AC97_GLBCTRL_MICINTM_PIO (1<<8)
-#define S3C_AC97_GLBCTRL_MICINTM_DMA (2<<8)
-#define S3C_AC97_GLBCTRL_MICINTM_MASK (3<<8)
-#define S3C_AC97_GLBCTRL_TRANSFERDATAENABLE (1<<3)
-#define S3C_AC97_GLBCTRL_ACLINKON (1<<2)
-#define S3C_AC97_GLBCTRL_WARMRESET (1<<1)
-#define S3C_AC97_GLBCTRL_COLDRESET (1<<0)
-
-#define S3C_AC97_GLBSTAT (0x04)
-
-#define S3C_AC97_GLBSTAT_CODECREADY (1<<22)
-#define S3C_AC97_GLBSTAT_PCMOUTUR (1<<21)
-#define S3C_AC97_GLBSTAT_PCMINORI (1<<20)
-#define S3C_AC97_GLBSTAT_MICINORI (1<<19)
-#define S3C_AC97_GLBSTAT_PCMOUTTI (1<<18)
-#define S3C_AC97_GLBSTAT_PCMINTI (1<<17)
-#define S3C_AC97_GLBSTAT_MICINTI (1<<16)
-#define S3C_AC97_GLBSTAT_MAINSTATE_IDLE (0<<0)
-#define S3C_AC97_GLBSTAT_MAINSTATE_INIT (1<<0)
-#define S3C_AC97_GLBSTAT_MAINSTATE_READY (2<<0)
-#define S3C_AC97_GLBSTAT_MAINSTATE_ACTIVE (3<<0)
-#define S3C_AC97_GLBSTAT_MAINSTATE_LP (4<<0)
-#define S3C_AC97_GLBSTAT_MAINSTATE_WARM (5<<0)
-
-#define S3C_AC97_CODEC_CMD (0x08)
-
-#define S3C_AC97_CODEC_CMD_READ (1<<23)
-
-#define S3C_AC97_STAT (0x0c)
-#define S3C_AC97_PCM_ADDR (0x10)
-#define S3C_AC97_PCM_DATA (0x18)
-#define S3C_AC97_MIC_DATA (0x1C)
-
-#endif /* __SAMSUNG_REGS_AC97_H__ */
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index 0a4718207e6e..6d0b8897fa6c 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -35,12 +35,10 @@
#include <linux/platform_data/asoc-s3c.h>
static struct snd_dmaengine_dai_dma_data s3c2412_i2s_pcm_stereo_out = {
- .chan_name = "tx",
.addr_width = 4,
};
static struct snd_dmaengine_dai_dma_data s3c2412_i2s_pcm_stereo_in = {
- .chan_name = "rx",
.addr_width = 4,
};
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index 9052f6a7073e..07f5091b33e8 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -30,15 +30,11 @@
#include "dma.h"
#include "s3c24xx-i2s.h"
-#include <linux/platform_data/asoc-s3c.h>
-
static struct snd_dmaengine_dai_dma_data s3c24xx_i2s_pcm_stereo_out = {
- .chan_name = "tx",
.addr_width = 2,
};
static struct snd_dmaengine_dai_dma_data s3c24xx_i2s_pcm_stereo_in = {
- .chan_name = "rx",
.addr_width = 2,
};
@@ -58,8 +54,6 @@ static void s3c24xx_snd_txctrl(int on)
u32 iiscon;
u32 iismod;
- pr_debug("Entered %s\n", __func__);
-
iisfcon = readl(s3c24xx_i2s.regs + S3C2410_IISFCON);
iiscon = readl(s3c24xx_i2s.regs + S3C2410_IISCON);
iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
@@ -103,8 +97,6 @@ static void s3c24xx_snd_rxctrl(int on)
u32 iiscon;
u32 iismod;
- pr_debug("Entered %s\n", __func__);
-
iisfcon = readl(s3c24xx_i2s.regs + S3C2410_IISFCON);
iiscon = readl(s3c24xx_i2s.regs + S3C2410_IISCON);
iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
@@ -151,8 +143,6 @@ static int s3c24xx_snd_lrsync(void)
u32 iiscon;
int timeout = 50; /* 5ms */
- pr_debug("Entered %s\n", __func__);
-
while (1) {
iiscon = readl(s3c24xx_i2s.regs + S3C2410_IISCON);
if (iiscon & S3C2410_IISCON_LRINDEX)
@@ -171,8 +161,6 @@ static int s3c24xx_snd_lrsync(void)
*/
static inline int s3c24xx_snd_is_clkmaster(void)
{
- pr_debug("Entered %s\n", __func__);
-
return (readl(s3c24xx_i2s.regs + S3C2410_IISMOD) & S3C2410_IISMOD_SLAVE) ? 0:1;
}
@@ -184,8 +172,6 @@ static int s3c24xx_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
{
u32 iismod;
- pr_debug("Entered %s\n", __func__);
-
iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
pr_debug("hw_params r: IISMOD: %x \n", iismod);
@@ -213,6 +199,7 @@ static int s3c24xx_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
writel(iismod, s3c24xx_i2s.regs + S3C2410_IISMOD);
pr_debug("hw_params w: IISMOD: %x \n", iismod);
+
return 0;
}
@@ -223,8 +210,6 @@ static int s3c24xx_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_dmaengine_dai_dma_data *dma_data;
u32 iismod;
- pr_debug("Entered %s\n", __func__);
-
dma_data = snd_soc_dai_get_dma_data(dai, substream);
/* Working copies of register */
@@ -246,6 +231,7 @@ static int s3c24xx_i2s_hw_params(struct snd_pcm_substream *substream,
writel(iismod, s3c24xx_i2s.regs + S3C2410_IISMOD);
pr_debug("hw_params w: IISMOD: %x\n", iismod);
+
return 0;
}
@@ -254,8 +240,6 @@ static int s3c24xx_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
{
int ret = 0;
- pr_debug("Entered %s\n", __func__);
-
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
@@ -297,8 +281,6 @@ static int s3c24xx_i2s_set_sysclk(struct snd_soc_dai *cpu_dai,
{
u32 iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
- pr_debug("Entered %s\n", __func__);
-
iismod &= ~S3C2440_IISMOD_MPLL;
switch (clk_id) {
@@ -323,8 +305,6 @@ static int s3c24xx_i2s_set_clkdiv(struct snd_soc_dai *cpu_dai,
{
u32 reg;
- pr_debug("Entered %s\n", __func__);
-
switch (div_id) {
case S3C24XX_DIV_BCLK:
reg = readl(s3c24xx_i2s.regs + S3C2410_IISMOD) & ~S3C2410_IISMOD_FS_MASK;
@@ -358,8 +338,6 @@ EXPORT_SYMBOL_GPL(s3c24xx_i2s_get_clockrate);
static int s3c24xx_i2s_probe(struct snd_soc_dai *dai)
{
- pr_debug("Entered %s\n", __func__);
-
snd_soc_dai_init_dma_data(dai, &s3c24xx_i2s_pcm_stereo_out,
&s3c24xx_i2s_pcm_stereo_in);
@@ -385,8 +363,6 @@ static int s3c24xx_i2s_probe(struct snd_soc_dai *dai)
#ifdef CONFIG_PM
static int s3c24xx_i2s_suspend(struct snd_soc_dai *cpu_dai)
{
- pr_debug("Entered %s\n", __func__);
-
s3c24xx_i2s.iiscon = readl(s3c24xx_i2s.regs + S3C2410_IISCON);
s3c24xx_i2s.iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
s3c24xx_i2s.iisfcon = readl(s3c24xx_i2s.regs + S3C2410_IISFCON);
@@ -399,7 +375,6 @@ static int s3c24xx_i2s_suspend(struct snd_soc_dai *cpu_dai)
static int s3c24xx_i2s_resume(struct snd_soc_dai *cpu_dai)
{
- pr_debug("Entered %s\n", __func__);
clk_prepare_enable(s3c24xx_i2s.iis_clk);
writel(s3c24xx_i2s.iiscon, s3c24xx_i2s.regs + S3C2410_IISCON);
@@ -414,7 +389,6 @@ static int s3c24xx_i2s_resume(struct snd_soc_dai *cpu_dai)
#define s3c24xx_i2s_resume NULL
#endif
-
#define S3C24XX_I2S_RATES \
(SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_16000 | \
SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
@@ -451,41 +425,28 @@ static const struct snd_soc_component_driver s3c24xx_i2s_component = {
static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
{
- int ret = 0;
struct resource *res;
- struct s3c_audio_pdata *pdata = dev_get_platdata(&pdev->dev);
-
- if (!pdata) {
- dev_err(&pdev->dev, "missing platform data");
- return -ENXIO;
- }
+ int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Can't get IO resource.\n");
- return -ENOENT;
- }
s3c24xx_i2s.regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(s3c24xx_i2s.regs))
return PTR_ERR(s3c24xx_i2s.regs);
s3c24xx_i2s_pcm_stereo_out.addr = res->start + S3C2410_IISFIFO;
- s3c24xx_i2s_pcm_stereo_out.filter_data = pdata->dma_playback;
s3c24xx_i2s_pcm_stereo_in.addr = res->start + S3C2410_IISFIFO;
- s3c24xx_i2s_pcm_stereo_in.filter_data = pdata->dma_capture;
- ret = samsung_asoc_dma_platform_register(&pdev->dev,
- pdata->dma_filter,
+ ret = samsung_asoc_dma_platform_register(&pdev->dev, NULL,
NULL, NULL);
if (ret) {
- pr_err("failed to register the dma: %d\n", ret);
+ dev_err(&pdev->dev, "Failed to register the DMA: %d\n", ret);
return ret;
}
ret = devm_snd_soc_register_component(&pdev->dev,
&s3c24xx_i2s_component, &s3c24xx_i2s_dai, 1);
if (ret)
- pr_err("failed to register the dai\n");
+ dev_err(&pdev->dev, "Failed to register the DAI\n");
return ret;
}
diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c
index 7853fbe6ccc9..81a78940967c 100644
--- a/sound/soc/samsung/s3c24xx_uda134x.c
+++ b/sound/soc/samsung/s3c24xx_uda134x.c
@@ -19,9 +19,15 @@
#include <sound/s3c24xx_uda134x.h>
#include "regs-iis.h"
-
#include "s3c24xx-i2s.h"
+struct s3c24xx_uda134x {
+ struct clk *xtal;
+ struct clk *pclk;
+ struct mutex clk_lock;
+ int clk_users;
+};
+
/* #define ENFORCE_RATES 1 */
/*
Unfortunately the S3C24XX in master mode has a limited capacity of
@@ -36,15 +42,6 @@
possible an error will be returned.
*/
-static struct clk *xtal;
-static struct clk *pclk;
-/* this is need because we don't have a place where to keep the
- * pointers to the clocks in each substream. We get the clocks only
- * when we are actually using them so we don't block stuff like
- * frequency change or oscillator power-off */
-static int clk_users;
-static DEFINE_MUTEX(clk_lock);
-
static unsigned int rates[33 * 2];
#ifdef ENFORCE_RATES
static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
@@ -57,26 +54,24 @@ static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct s3c24xx_uda134x *priv = snd_soc_card_get_drvdata(rtd->card);
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-#ifdef ENFORCE_RATES
- struct snd_pcm_runtime *runtime = substream->runtime;
-#endif
int ret = 0;
- mutex_lock(&clk_lock);
+ mutex_lock(&priv->clk_lock);
- if (clk_users == 0) {
- xtal = clk_get(rtd->dev, "xtal");
- if (IS_ERR(xtal)) {
+ if (priv->clk_users == 0) {
+ priv->xtal = clk_get(rtd->dev, "xtal");
+ if (IS_ERR(priv->xtal)) {
dev_err(rtd->dev, "%s cannot get xtal\n", __func__);
- ret = PTR_ERR(xtal);
+ ret = PTR_ERR(priv->xtal);
} else {
- pclk = clk_get(cpu_dai->dev, "iis");
- if (IS_ERR(pclk)) {
+ priv->pclk = clk_get(cpu_dai->dev, "iis");
+ if (IS_ERR(priv->pclk)) {
dev_err(rtd->dev, "%s cannot get pclk\n",
__func__);
- clk_put(xtal);
- ret = PTR_ERR(pclk);
+ clk_put(priv->xtal);
+ ret = PTR_ERR(priv->pclk);
}
}
if (!ret) {
@@ -85,18 +80,19 @@ static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
for (i = 0; i < 2; i++) {
int fs = i ? 256 : 384;
- rates[i*33] = clk_get_rate(xtal) / fs;
+ rates[i*33] = clk_get_rate(priv->xtal) / fs;
for (j = 1; j < 33; j++)
- rates[i*33 + j] = clk_get_rate(pclk) /
+ rates[i*33 + j] = clk_get_rate(priv->pclk) /
(j * fs);
}
}
}
- clk_users += 1;
- mutex_unlock(&clk_lock);
+ priv->clk_users += 1;
+ mutex_unlock(&priv->clk_lock);
+
if (!ret) {
#ifdef ENFORCE_RATES
- ret = snd_pcm_hw_constraint_list(runtime, 0,
+ ret = snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
&hw_constraints_rates);
if (ret < 0)
@@ -109,15 +105,18 @@ static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
static void s3c24xx_uda134x_shutdown(struct snd_pcm_substream *substream)
{
- mutex_lock(&clk_lock);
- clk_users -= 1;
- if (clk_users == 0) {
- clk_put(xtal);
- xtal = NULL;
- clk_put(pclk);
- pclk = NULL;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct s3c24xx_uda134x *priv = snd_soc_card_get_drvdata(rtd->card);
+
+ mutex_lock(&priv->clk_lock);
+ priv->clk_users -= 1;
+ if (priv->clk_users == 0) {
+ clk_put(priv->xtal);
+ priv->xtal = NULL;
+ clk_put(priv->pclk);
+ priv->pclk = NULL;
}
- mutex_unlock(&clk_lock);
+ mutex_unlock(&priv->clk_lock);
}
static int s3c24xx_uda134x_hw_params(struct snd_pcm_substream *substream,
@@ -228,10 +227,18 @@ static struct snd_soc_card snd_soc_s3c24xx_uda134x = {
static int s3c24xx_uda134x_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &snd_soc_s3c24xx_uda134x;
+ struct s3c24xx_uda134x *priv;
int ret;
- platform_set_drvdata(pdev, card);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->clk_lock);
+
card->dev = &pdev->dev;
+ platform_set_drvdata(pdev, card);
+ snd_soc_card_set_drvdata(card, priv);
ret = devm_snd_soc_register_card(&pdev->dev, card);
if (ret)
diff --git a/sound/soc/samsung/smdk2443_wm9710.c b/sound/soc/samsung/smdk2443_wm9710.c
deleted file mode 100644
index c390aad68cfb..000000000000
--- a/sound/soc/samsung/smdk2443_wm9710.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * smdk2443_wm9710.c -- SoC audio for smdk2443
- *
- * Copyright 2007 Wolfson Microelectronics PLC.
- * Author: Graeme Gregory
- * graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <sound/soc.h>
-
-static struct snd_soc_card smdk2443;
-
-static struct snd_soc_dai_link smdk2443_dai[] = {
-{
- .name = "AC97",
- .stream_name = "AC97 HiFi",
- .cpu_dai_name = "samsung-ac97",
- .codec_dai_name = "ac97-hifi",
- .codec_name = "ac97-codec",
- .platform_name = "samsung-ac97",
-},
-};
-
-static struct snd_soc_card smdk2443 = {
- .name = "SMDK2443",
- .owner = THIS_MODULE,
- .dai_link = smdk2443_dai,
- .num_links = ARRAY_SIZE(smdk2443_dai),
-};
-
-static struct platform_device *smdk2443_snd_ac97_device;
-
-static int __init smdk2443_init(void)
-{
- int ret;
-
- smdk2443_snd_ac97_device = platform_device_alloc("soc-audio", -1);
- if (!smdk2443_snd_ac97_device)
- return -ENOMEM;
-
- platform_set_drvdata(smdk2443_snd_ac97_device, &smdk2443);
- ret = platform_device_add(smdk2443_snd_ac97_device);
-
- if (ret)
- platform_device_put(smdk2443_snd_ac97_device);
-
- return ret;
-}
-
-static void __exit smdk2443_exit(void)
-{
- platform_device_unregister(smdk2443_snd_ac97_device);
-}
-
-module_init(smdk2443_init);
-module_exit(smdk2443_exit);
-
-/* Module information */
-MODULE_AUTHOR("Graeme Gregory, graeme.gregory@wolfsonmicro.com, www.wolfsonmicro.com");
-MODULE_DESCRIPTION("ALSA SoC WM9710 SMDK2443");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/smdk_wm8580.c b/sound/soc/samsung/smdk_wm8580.c
index 548bfd993788..de724ce7b955 100644
--- a/sound/soc/samsung/smdk_wm8580.c
+++ b/sound/soc/samsung/smdk_wm8580.c
@@ -14,8 +14,6 @@
#include <sound/soc.h>
#include <sound/pcm_params.h>
-#include <asm/mach-types.h>
-
#include "../codecs/wm8580.h"
#include "i2s.h"
@@ -147,7 +145,6 @@ static int smdk_wm8580_init_paiftx(struct snd_soc_pcm_runtime *rtd)
enum {
PRI_PLAYBACK = 0,
PRI_CAPTURE,
- SEC_PLAYBACK,
};
#define SMDK_DAI_FMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \
@@ -157,7 +154,7 @@ static struct snd_soc_dai_link smdk_dai[] = {
[PRI_PLAYBACK] = { /* Primary Playback i/f */
.name = "WM8580 PAIF RX",
.stream_name = "Playback",
- .cpu_dai_name = "samsung-i2s.0",
+ .cpu_dai_name = "samsung-i2s.2",
.codec_dai_name = "wm8580-hifi-playback",
.platform_name = "samsung-i2s.0",
.codec_name = "wm8580.0-001b",
@@ -167,7 +164,7 @@ static struct snd_soc_dai_link smdk_dai[] = {
[PRI_CAPTURE] = { /* Primary Capture i/f */
.name = "WM8580 PAIF TX",
.stream_name = "Capture",
- .cpu_dai_name = "samsung-i2s.0",
+ .cpu_dai_name = "samsung-i2s.2",
.codec_dai_name = "wm8580-hifi-capture",
.platform_name = "samsung-i2s.0",
.codec_name = "wm8580.0-001b",
@@ -175,23 +172,13 @@ static struct snd_soc_dai_link smdk_dai[] = {
.init = smdk_wm8580_init_paiftx,
.ops = &smdk_ops,
},
- [SEC_PLAYBACK] = { /* Sec_Fifo Playback i/f */
- .name = "Sec_FIFO TX",
- .stream_name = "Playback",
- .cpu_dai_name = "samsung-i2s-sec",
- .codec_dai_name = "wm8580-hifi-playback",
- .platform_name = "samsung-i2s-sec",
- .codec_name = "wm8580.0-001b",
- .dai_fmt = SMDK_DAI_FMT,
- .ops = &smdk_ops,
- },
};
static struct snd_soc_card smdk = {
.name = "SMDK-I2S",
.owner = THIS_MODULE,
.dai_link = smdk_dai,
- .num_links = 2,
+ .num_links = ARRAY_SIZE(smdk_dai),
.dapm_widgets = smdk_wm8580_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(smdk_wm8580_dapm_widgets),
@@ -204,17 +191,6 @@ static struct platform_device *smdk_snd_device;
static int __init smdk_audio_init(void)
{
int ret;
- char *str;
-
- if (machine_is_smdkc100()
- || machine_is_smdkv210() || machine_is_smdkc110()) {
- smdk.num_links = 3;
- } else if (machine_is_smdk6410()) {
- str = (char *)smdk_dai[PRI_PLAYBACK].cpu_dai_name;
- str[strlen(str) - 1] = '2';
- str = (char *)smdk_dai[PRI_CAPTURE].cpu_dai_name;
- str[strlen(str) - 1] = '2';
- }
smdk_snd_device = platform_device_alloc("soc-audio", -1);
if (!smdk_snd_device)
diff --git a/sound/soc/samsung/smdk_wm8580pcm.c b/sound/soc/samsung/smdk_wm8580pcm.c
deleted file mode 100644
index a6d223310c67..000000000000
--- a/sound/soc/samsung/smdk_wm8580pcm.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * sound/soc/samsung/smdk_wm8580pcm.c
- *
- * Copyright (c) 2011 Samsung Electronics Co. Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-#include <linux/module.h>
-#include <sound/soc.h>
-#include <sound/pcm_params.h>
-#include <sound/pcm.h>
-
-#include <asm/mach-types.h>
-
-#include "../codecs/wm8580.h"
-#include "pcm.h"
-
-/*
- * Board Settings:
- * o '1' means 'ON'
- * o '0' means 'OFF'
- * o 'X' means 'Don't care'
- *
- * SMDK6410 Base B/D: CFG1-0000, CFG2-1111
- * SMDKC110, SMDKV210: CFGB11-100100, CFGB12-0000
- */
-
-#define SMDK_WM8580_EXT_OSC 12000000
-#define SMDK_WM8580_EXT_MCLK 4096000
-#define SMDK_WM8580_EXT_VOICE 2048000
-
-static unsigned long mclk_freq;
-static unsigned long xtal_freq;
-
-/*
- * If MCLK clock directly gets from XTAL, we don't have to use PLL
- * to make MCLK, but if XTAL clock source connects with other codec
- * pin (like XTI), we should have to set codec's PLL to make MCLK.
- * Because Samsung SoC does not support pcmcdclk output like I2S.
- */
-
-static int smdk_wm8580_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int rfs, ret;
-
- switch (params_rate(params)) {
- case 8000:
- break;
- default:
- printk(KERN_ERR "%s:%d Sampling Rate %u not supported!\n",
- __func__, __LINE__, params_rate(params));
- return -EINVAL;
- }
-
- rfs = mclk_freq / params_rate(params) / 2;
-
- if (mclk_freq == xtal_freq) {
- ret = snd_soc_dai_set_sysclk(codec_dai, WM8580_CLKSRC_MCLK,
- mclk_freq, SND_SOC_CLOCK_IN);
- if (ret < 0)
- return ret;
-
- ret = snd_soc_dai_set_clkdiv(codec_dai, WM8580_MCLK,
- WM8580_CLKSRC_MCLK);
- if (ret < 0)
- return ret;
- } else {
- ret = snd_soc_dai_set_sysclk(codec_dai, WM8580_CLKSRC_PLLA,
- mclk_freq, SND_SOC_CLOCK_IN);
- if (ret < 0)
- return ret;
-
- ret = snd_soc_dai_set_clkdiv(codec_dai, WM8580_MCLK,
- WM8580_CLKSRC_PLLA);
- if (ret < 0)
- return ret;
-
- ret = snd_soc_dai_set_pll(codec_dai, WM8580_PLLA, 0,
- xtal_freq, mclk_freq);
- if (ret < 0)
- return ret;
- }
-
- /* Set PCM source clock on CPU */
- ret = snd_soc_dai_set_sysclk(cpu_dai, S3C_PCM_CLKSRC_MUX,
- mclk_freq, SND_SOC_CLOCK_IN);
- if (ret < 0)
- return ret;
-
- /* Set SCLK_DIV for making bclk */
- ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_PCM_SCLK_PER_FS, rfs);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static struct snd_soc_ops smdk_wm8580_pcm_ops = {
- .hw_params = smdk_wm8580_pcm_hw_params,
-};
-
-#define SMDK_DAI_FMT (SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF | \
- SND_SOC_DAIFMT_CBS_CFS)
-
-static struct snd_soc_dai_link smdk_dai[] = {
- {
- .name = "WM8580 PAIF PCM RX",
- .stream_name = "Playback",
- .cpu_dai_name = "samsung-pcm.0",
- .codec_dai_name = "wm8580-hifi-playback",
- .platform_name = "samsung-audio",
- .codec_name = "wm8580.0-001b",
- .dai_fmt = SMDK_DAI_FMT,
- .ops = &smdk_wm8580_pcm_ops,
- }, {
- .name = "WM8580 PAIF PCM TX",
- .stream_name = "Capture",
- .cpu_dai_name = "samsung-pcm.0",
- .codec_dai_name = "wm8580-hifi-capture",
- .platform_name = "samsung-pcm.0",
- .codec_name = "wm8580.0-001b",
- .dai_fmt = SMDK_DAI_FMT,
- .ops = &smdk_wm8580_pcm_ops,
- },
-};
-
-static struct snd_soc_card smdk_pcm = {
- .name = "SMDK-PCM",
- .owner = THIS_MODULE,
- .dai_link = smdk_dai,
- .num_links = 2,
-};
-
-/*
- * After SMDKC110 Base Board's Rev is '0.1', 12MHz External OSC(X1)
- * is absent (or not connected), so we connect EXT_VOICE_CLK(OSC4),
- * 2.0484Mhz, directly with MCLK both Codec and SoC.
- */
-static int snd_smdk_probe(struct platform_device *pdev)
-{
- int ret = 0;
-
- xtal_freq = SMDK_WM8580_EXT_OSC;
- mclk_freq = SMDK_WM8580_EXT_MCLK;
-
- if (machine_is_smdkc110() || machine_is_smdkv210())
- xtal_freq = mclk_freq = SMDK_WM8580_EXT_VOICE;
-
- smdk_pcm.dev = &pdev->dev;
- ret = devm_snd_soc_register_card(&pdev->dev, &smdk_pcm);
- if (ret)
- dev_err(&pdev->dev, "snd_soc_register_card failed %d\n", ret);
-
- return ret;
-}
-
-static struct platform_driver snd_smdk_driver = {
- .driver = {
- .name = "samsung-smdk-pcm",
- },
- .probe = snd_smdk_probe,
-};
-
-module_platform_driver(snd_smdk_driver);
-
-MODULE_AUTHOR("Sangbeom Kim, <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("ALSA SoC SMDK WM8580 for PCM");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/smdk_wm9713.c b/sound/soc/samsung/smdk_wm9713.c
deleted file mode 100644
index 0d20e4ed27aa..000000000000
--- a/sound/soc/samsung/smdk_wm9713.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * smdk_wm9713.c -- SoC audio for SMDK
- *
- * Copyright 2010 Samsung Electronics Co. Ltd.
- * Author: Jaswinder Singh Brar <jassisinghbrar@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <sound/soc.h>
-
-static struct snd_soc_card smdk;
-
-/*
- * Default CFG switch settings to use this driver:
- *
- * SMDK6410: Set CFG1 1-3 On, CFG2 1-4 Off
- * SMDKC100: Set CFG6 1-3 On, CFG7 1 On
- * SMDKC110: Set CFGB10 1-2 Off, CFGB12 1-3 On
- * SMDKV210: Set CFGB10 1-2 Off, CFGB12 1-3 On
- * SMDKV310: Set CFG2 1-2 Off, CFG4 All On, CFG7 All Off, CFG8 1-On
- */
-
-/*
- Playback (HeadPhone):-
- $ amixer sset 'Headphone' unmute
- $ amixer sset 'Right Headphone Out Mux' 'Headphone'
- $ amixer sset 'Left Headphone Out Mux' 'Headphone'
- $ amixer sset 'Right HP Mixer PCM' unmute
- $ amixer sset 'Left HP Mixer PCM' unmute
-
- Capture (LineIn):-
- $ amixer sset 'Right Capture Source' 'Line'
- $ amixer sset 'Left Capture Source' 'Line'
-*/
-
-static struct snd_soc_dai_link smdk_dai = {
- .name = "AC97",
- .stream_name = "AC97 PCM",
- .platform_name = "samsung-ac97",
- .cpu_dai_name = "samsung-ac97",
- .codec_dai_name = "wm9713-hifi",
- .codec_name = "wm9713-codec",
-};
-
-static struct snd_soc_card smdk = {
- .name = "SMDK WM9713",
- .owner = THIS_MODULE,
- .dai_link = &smdk_dai,
- .num_links = 1,
-};
-
-static struct platform_device *smdk_snd_wm9713_device;
-static struct platform_device *smdk_snd_ac97_device;
-
-static int __init smdk_init(void)
-{
- int ret;
-
- smdk_snd_wm9713_device = platform_device_alloc("wm9713-codec", -1);
- if (!smdk_snd_wm9713_device)
- return -ENOMEM;
-
- ret = platform_device_add(smdk_snd_wm9713_device);
- if (ret)
- goto err1;
-
- smdk_snd_ac97_device = platform_device_alloc("soc-audio", -1);
- if (!smdk_snd_ac97_device) {
- ret = -ENOMEM;
- goto err2;
- }
-
- platform_set_drvdata(smdk_snd_ac97_device, &smdk);
-
- ret = platform_device_add(smdk_snd_ac97_device);
- if (ret)
- goto err3;
-
- return 0;
-
-err3:
- platform_device_put(smdk_snd_ac97_device);
-err2:
- platform_device_del(smdk_snd_wm9713_device);
-err1:
- platform_device_put(smdk_snd_wm9713_device);
- return ret;
-}
-
-static void __exit smdk_exit(void)
-{
- platform_device_unregister(smdk_snd_ac97_device);
- platform_device_unregister(smdk_snd_wm9713_device);
-}
-
-module_init(smdk_init);
-module_exit(smdk_exit);
-
-/* Module information */
-MODULE_AUTHOR("Jaswinder Singh Brar, jassisinghbrar@gmail.com");
-MODULE_DESCRIPTION("ALSA SoC SMDK+WM9713");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/tm2_wm5110.c b/sound/soc/samsung/tm2_wm5110.c
new file mode 100644
index 000000000000..5cdf7d19b87f
--- /dev/null
+++ b/sound/soc/samsung/tm2_wm5110.c
@@ -0,0 +1,552 @@
+/*
+ * Copyright (C) 2015 - 2016 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Inha Song <ideal.song@samsung.com>
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "i2s.h"
+#include "../codecs/wm5110.h"
+
+/*
+ * The source clock is XCLKOUT with its mux set to the external fixed rate
+ * oscillator (XXTI).
+ */
+#define MCLK_RATE 24000000U
+
+#define TM2_DAI_AIF1 0
+#define TM2_DAI_AIF2 1
+
+struct tm2_machine_priv {
+ struct snd_soc_codec *codec;
+ unsigned int sysclk_rate;
+ struct gpio_desc *gpio_mic_bias;
+};
+
+static int tm2_start_sysclk(struct snd_soc_card *card)
+{
+ struct tm2_machine_priv *priv = snd_soc_card_get_drvdata(card);
+ struct snd_soc_codec *codec = priv->codec;
+ int ret;
+
+ ret = snd_soc_codec_set_pll(codec, WM5110_FLL1_REFCLK,
+ ARIZONA_FLL_SRC_MCLK1,
+ MCLK_RATE,
+ priv->sysclk_rate);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set FLL1 source: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_codec_set_pll(codec, WM5110_FLL1,
+ ARIZONA_FLL_SRC_MCLK1,
+ MCLK_RATE,
+ priv->sysclk_rate);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to start FLL1: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_SYSCLK,
+ ARIZONA_CLK_SRC_FLL1,
+ priv->sysclk_rate,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set SYSCLK source: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tm2_stop_sysclk(struct snd_soc_card *card)
+{
+ struct tm2_machine_priv *priv = snd_soc_card_get_drvdata(card);
+ struct snd_soc_codec *codec = priv->codec;
+ int ret;
+
+ ret = snd_soc_codec_set_pll(codec, WM5110_FLL1, 0, 0, 0);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to stop FLL1: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_SYSCLK,
+ ARIZONA_CLK_SRC_FLL1, 0, 0);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to stop SYSCLK: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tm2_aif1_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct tm2_machine_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+
+ switch (params_rate(params)) {
+ case 4000:
+ case 8000:
+ case 12000:
+ case 16000:
+ case 24000:
+ case 32000:
+ case 48000:
+ case 96000:
+ case 192000:
+ /* Highest possible SYSCLK frequency: 147.456MHz */
+ priv->sysclk_rate = 147456000U;
+ break;
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ case 176400:
+ /* Highest possible SYSCLK frequency: 135.4752 MHz */
+ priv->sysclk_rate = 135475200U;
+ break;
+ default:
+ dev_err(codec->dev, "Not supported sample rate: %d\n",
+ params_rate(params));
+ return -EINVAL;
+ }
+
+ return tm2_start_sysclk(rtd->card);
+}
+
+static struct snd_soc_ops tm2_aif1_ops = {
+ .hw_params = tm2_aif1_hw_params,
+};
+
+static int tm2_aif2_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ unsigned int asyncclk_rate;
+ int ret;
+
+ switch (params_rate(params)) {
+ case 8000:
+ case 12000:
+ case 16000:
+ /* Highest possible ASYNCCLK frequency: 49.152MHz */
+ asyncclk_rate = 49152000U;
+ break;
+ case 11025:
+ /* Highest possible ASYNCCLK frequency: 45.1584 MHz */
+ asyncclk_rate = 45158400U;
+ break;
+ default:
+ dev_err(codec->dev, "Not supported sample rate: %d\n",
+ params_rate(params));
+ return -EINVAL;
+ }
+
+ ret = snd_soc_codec_set_pll(codec, WM5110_FLL2_REFCLK,
+ ARIZONA_FLL_SRC_MCLK1,
+ MCLK_RATE,
+ asyncclk_rate);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set FLL2 source: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_codec_set_pll(codec, WM5110_FLL2,
+ ARIZONA_FLL_SRC_MCLK1,
+ MCLK_RATE,
+ asyncclk_rate);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to start FLL2: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_ASYNCCLK,
+ ARIZONA_CLK_SRC_FLL2,
+ asyncclk_rate,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set ASYNCCLK source: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tm2_aif2_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ int ret;
+
+ /* disable FLL2 */
+ ret = snd_soc_codec_set_pll(codec, WM5110_FLL2, ARIZONA_FLL_SRC_MCLK1,
+ 0, 0);
+ if (ret < 0)
+ dev_err(codec->dev, "Failed to stop FLL2: %d\n", ret);
+
+ return ret;
+}
+
+static struct snd_soc_ops tm2_aif2_ops = {
+ .hw_params = tm2_aif2_hw_params,
+ .hw_free = tm2_aif2_hw_free,
+};
+
+static int tm2_mic_bias(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_card *card = w->dapm->card;
+ struct tm2_machine_priv *priv = snd_soc_card_get_drvdata(card);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ gpiod_set_value_cansleep(priv->gpio_mic_bias, 1);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ gpiod_set_value_cansleep(priv->gpio_mic_bias, 0);
+ break;
+ }
+
+ return 0;
+}
+
+static int tm2_set_bias_level(struct snd_soc_card *card,
+ struct snd_soc_dapm_context *dapm,
+ enum snd_soc_bias_level level)
+{
+ struct snd_soc_pcm_runtime *rtd;
+
+ rtd = snd_soc_get_pcm_runtime(card, card->dai_link[0].name);
+
+ if (dapm->dev != rtd->codec_dai->dev)
+ return 0;
+
+ switch (level) {
+ case SND_SOC_BIAS_STANDBY:
+ if (card->dapm.bias_level == SND_SOC_BIAS_OFF)
+ tm2_start_sysclk(card);
+ break;
+ case SND_SOC_BIAS_OFF:
+ tm2_stop_sysclk(card);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_aux_dev tm2_speaker_amp_dev;
+
+static int tm2_late_probe(struct snd_soc_card *card)
+{
+ struct tm2_machine_priv *priv = snd_soc_card_get_drvdata(card);
+ struct snd_soc_dai_link_component dlc = { 0 };
+ unsigned int ch_map[] = { 0, 1 };
+ struct snd_soc_dai *amp_pdm_dai;
+ struct snd_soc_pcm_runtime *rtd;
+ struct snd_soc_dai *aif1_dai;
+ struct snd_soc_dai *aif2_dai;
+ int ret;
+
+ rtd = snd_soc_get_pcm_runtime(card, card->dai_link[TM2_DAI_AIF1].name);
+ aif1_dai = rtd->codec_dai;
+ priv->codec = rtd->codec;
+
+ ret = snd_soc_dai_set_sysclk(aif1_dai, ARIZONA_CLK_SYSCLK, 0, 0);
+ if (ret < 0) {
+ dev_err(aif1_dai->dev, "Failed to set SYSCLK: %d\n", ret);
+ return ret;
+ }
+
+ rtd = snd_soc_get_pcm_runtime(card, card->dai_link[TM2_DAI_AIF2].name);
+ aif2_dai = rtd->codec_dai;
+
+ ret = snd_soc_dai_set_sysclk(aif2_dai, ARIZONA_CLK_ASYNCCLK, 0, 0);
+ if (ret < 0) {
+ dev_err(aif2_dai->dev, "Failed to set ASYNCCLK: %d\n", ret);
+ return ret;
+ }
+
+ dlc.of_node = tm2_speaker_amp_dev.codec_of_node;
+ amp_pdm_dai = snd_soc_find_dai(&dlc);
+ if (!amp_pdm_dai)
+ return -ENODEV;
+
+ /* Set the MAX98504 V/I sense PDM Tx DAI channel mapping */
+ ret = snd_soc_dai_set_channel_map(amp_pdm_dai, ARRAY_SIZE(ch_map),
+ ch_map, 0, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_soc_dai_set_tdm_slot(amp_pdm_dai, 0x3, 0x0, 2, 16);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new tm2_controls[] = {
+ SOC_DAPM_PIN_SWITCH("HP"),
+ SOC_DAPM_PIN_SWITCH("SPK"),
+ SOC_DAPM_PIN_SWITCH("RCV"),
+ SOC_DAPM_PIN_SWITCH("VPS"),
+ SOC_DAPM_PIN_SWITCH("HDMI"),
+
+ SOC_DAPM_PIN_SWITCH("Main Mic"),
+ SOC_DAPM_PIN_SWITCH("Sub Mic"),
+ SOC_DAPM_PIN_SWITCH("Third Mic"),
+
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+};
+
+const struct snd_soc_dapm_widget tm2_dapm_widgets[] = {
+ SND_SOC_DAPM_HP("HP", NULL),
+ SND_SOC_DAPM_SPK("SPK", NULL),
+ SND_SOC_DAPM_SPK("RCV", NULL),
+ SND_SOC_DAPM_LINE("VPS", NULL),
+ SND_SOC_DAPM_LINE("HDMI", NULL),
+
+ SND_SOC_DAPM_MIC("Main Mic", tm2_mic_bias),
+ SND_SOC_DAPM_MIC("Sub Mic", NULL),
+ SND_SOC_DAPM_MIC("Third Mic", NULL),
+
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+};
+
+static const struct snd_soc_component_driver tm2_component = {
+ .name = "tm2-audio",
+};
+
+static struct snd_soc_dai_driver tm2_ext_dai[] = {
+ {
+ .name = "Voice call",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 4,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 4,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ },
+ {
+ .name = "Bluetooth",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 4,
+ .rate_min = 8000,
+ .rate_max = 16000,
+ .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 16000,
+ .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ },
+};
+
+static struct snd_soc_dai_link tm2_dai_links[] = {
+ {
+ .name = "WM5110 AIF1",
+ .stream_name = "HiFi Primary",
+ .codec_dai_name = "wm5110-aif1",
+ .ops = &tm2_aif1_ops,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM,
+ }, {
+ .name = "WM5110 Voice",
+ .stream_name = "Voice call",
+ .codec_dai_name = "wm5110-aif2",
+ .ops = &tm2_aif2_ops,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM,
+ .ignore_suspend = 1,
+ }, {
+ .name = "WM5110 BT",
+ .stream_name = "Bluetooth",
+ .codec_dai_name = "wm5110-aif3",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM,
+ .ignore_suspend = 1,
+ }
+};
+
+static struct snd_soc_card tm2_card = {
+ .owner = THIS_MODULE,
+
+ .dai_link = tm2_dai_links,
+ .num_links = ARRAY_SIZE(tm2_dai_links),
+ .controls = tm2_controls,
+ .num_controls = ARRAY_SIZE(tm2_controls),
+ .dapm_widgets = tm2_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tm2_dapm_widgets),
+ .aux_dev = &tm2_speaker_amp_dev,
+ .num_aux_devs = 1,
+
+ .late_probe = tm2_late_probe,
+ .set_bias_level = tm2_set_bias_level,
+};
+
+static int tm2_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct snd_soc_card *card = &tm2_card;
+ struct tm2_machine_priv *priv;
+ struct device_node *cpu_dai_node, *codec_dai_node;
+ int ret, i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ snd_soc_card_set_drvdata(card, priv);
+ card->dev = dev;
+
+ priv->gpio_mic_bias = devm_gpiod_get(dev, "mic-bias",
+ GPIOF_OUT_INIT_LOW);
+ if (IS_ERR(priv->gpio_mic_bias)) {
+ dev_err(dev, "Failed to get mic bias gpio\n");
+ return PTR_ERR(priv->gpio_mic_bias);
+ }
+
+ ret = snd_soc_of_parse_card_name(card, "model");
+ if (ret < 0) {
+ dev_err(dev, "Card name is not specified\n");
+ return ret;
+ }
+
+ ret = snd_soc_of_parse_audio_routing(card, "samsung,audio-routing");
+ if (ret < 0) {
+ dev_err(dev, "Audio routing is not specified or invalid\n");
+ return ret;
+ }
+
+ card->aux_dev[0].codec_of_node = of_parse_phandle(dev->of_node,
+ "audio-amplifier", 0);
+ if (!card->aux_dev[0].codec_of_node) {
+ dev_err(dev, "audio-amplifier property invalid or missing\n");
+ return -EINVAL;
+ }
+
+ cpu_dai_node = of_parse_phandle(dev->of_node, "i2s-controller", 0);
+ if (!cpu_dai_node) {
+ dev_err(dev, "i2s-controllers property invalid or missing\n");
+ ret = -EINVAL;
+ goto amp_node_put;
+ }
+
+ codec_dai_node = of_parse_phandle(dev->of_node, "audio-codec", 0);
+ if (!codec_dai_node) {
+ dev_err(dev, "audio-codec property invalid or missing\n");
+ ret = -EINVAL;
+ goto cpu_dai_node_put;
+ }
+
+ for (i = 0; i < card->num_links; i++) {
+ card->dai_link[i].cpu_dai_name = NULL;
+ card->dai_link[i].cpu_name = NULL;
+ card->dai_link[i].platform_name = NULL;
+ card->dai_link[i].codec_of_node = codec_dai_node;
+ card->dai_link[i].cpu_of_node = cpu_dai_node;
+ card->dai_link[i].platform_of_node = cpu_dai_node;
+ }
+
+ ret = devm_snd_soc_register_component(dev, &tm2_component,
+ tm2_ext_dai, ARRAY_SIZE(tm2_ext_dai));
+ if (ret < 0) {
+ dev_err(dev, "Failed to register component: %d\n", ret);
+ goto codec_dai_node_put;
+ }
+
+ ret = devm_snd_soc_register_card(dev, card);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register card: %d\n", ret);
+ goto codec_dai_node_put;
+ }
+
+codec_dai_node_put:
+ of_node_put(codec_dai_node);
+cpu_dai_node_put:
+ of_node_put(cpu_dai_node);
+amp_node_put:
+ of_node_put(card->aux_dev[0].codec_of_node);
+ return ret;
+}
+
+static int tm2_pm_prepare(struct device *dev)
+{
+ struct snd_soc_card *card = dev_get_drvdata(dev);
+
+ return tm2_stop_sysclk(card);
+}
+
+static void tm2_pm_complete(struct device *dev)
+{
+ struct snd_soc_card *card = dev_get_drvdata(dev);
+
+ tm2_start_sysclk(card);
+}
+
+const struct dev_pm_ops tm2_pm_ops = {
+ .prepare = tm2_pm_prepare,
+ .suspend = snd_soc_suspend,
+ .resume = snd_soc_resume,
+ .complete = tm2_pm_complete,
+ .freeze = snd_soc_suspend,
+ .thaw = snd_soc_resume,
+ .poweroff = snd_soc_poweroff,
+ .restore = snd_soc_resume,
+};
+
+static const struct of_device_id tm2_of_match[] = {
+ { .compatible = "samsung,tm2-audio" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tm2_of_match);
+
+static struct platform_driver tm2_driver = {
+ .driver = {
+ .name = "tm2-audio",
+ .pm = &tm2_pm_ops,
+ .of_match_table = tm2_of_match,
+ },
+ .probe = tm2_probe,
+};
+module_platform_driver(tm2_driver);
+
+MODULE_AUTHOR("Inha Song <ideal.song@samsung.com>");
+MODULE_DESCRIPTION("ALSA SoC Exynos TM2 Audio Support");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig
index 6db6405d952f..147ebecfed94 100644
--- a/sound/soc/sh/Kconfig
+++ b/sound/soc/sh/Kconfig
@@ -1,5 +1,5 @@
menu "SoC Audio support for SuperH"
- depends on SUPERH || ARCH_SHMOBILE
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
config SND_SOC_PCM_SH7760
tristate "SoC Audio support for Renesas SH7760"
@@ -37,6 +37,7 @@ config SND_SOC_SH4_SIU
config SND_SOC_RCAR
tristate "R-Car series SRU/SCU/SSIU/SSI support"
depends on COMMON_CLK
+ depends on OF || COMPILE_TEST
select SND_SIMPLE_CARD
select REGMAP_MMIO
help
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 2145957d0229..85a33ac0a5c4 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -34,6 +34,9 @@ struct rsnd_adg {
struct clk_onecell_data onecell;
struct rsnd_mod mod;
u32 flags;
+ u32 ckr;
+ u32 rbga;
+ u32 rbgb;
int rbga_rate_for_441khz; /* RBGA */
int rbgb_rate_for_48khz; /* RBGB */
@@ -316,9 +319,11 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *ssi_mod, unsigned int rate)
struct rsnd_priv *priv = rsnd_mod_to_priv(ssi_mod);
struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
struct device *dev = rsnd_priv_to_dev(priv);
+ struct rsnd_mod *adg_mod = rsnd_mod_get(adg);
struct clk *clk;
int i;
u32 data;
+ u32 ckr = 0;
int sel_table[] = {
[CLKA] = 0x1,
[CLKB] = 0x2,
@@ -360,15 +365,14 @@ found_clock:
rsnd_adg_set_ssi_clk(ssi_mod, data);
if (!(adg_mode_flags(adg) & LRCLK_ASYNC)) {
- struct rsnd_mod *adg_mod = rsnd_mod_get(adg);
- u32 ckr = 0;
-
if (0 == (rate % 8000))
ckr = 0x80000000;
-
- rsnd_mod_bset(adg_mod, SSICKR, 0x80000000, ckr);
}
+ rsnd_mod_bset(adg_mod, BRGCKR, 0x80FF0000, adg->ckr | ckr);
+ rsnd_mod_write(adg_mod, BRRA, adg->rbga);
+ rsnd_mod_write(adg_mod, BRRB, adg->rbgb);
+
dev_dbg(dev, "ADG: %s[%d] selects 0x%x for %d\n",
rsnd_mod_name(ssi_mod), rsnd_mod_id(ssi_mod),
data, rate);
@@ -376,6 +380,25 @@ found_clock:
return 0;
}
+void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
+{
+ struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct clk *clk;
+ int i, ret;
+
+ for_each_rsnd_clk(clk, adg, i) {
+ ret = 0;
+ if (enable)
+ ret = clk_prepare_enable(clk);
+ else
+ clk_disable_unprepare(clk);
+
+ if (ret < 0)
+ dev_warn(dev, "can't use clk %d\n", i);
+ }
+}
+
static void rsnd_adg_get_clkin(struct rsnd_priv *priv,
struct rsnd_adg *adg)
{
@@ -387,27 +410,21 @@ static void rsnd_adg_get_clkin(struct rsnd_priv *priv,
[CLKC] = "clk_c",
[CLKI] = "clk_i",
};
- int i, ret;
+ int i;
for (i = 0; i < CLKMAX; i++) {
clk = devm_clk_get(dev, clk_name[i]);
adg->clk[i] = IS_ERR(clk) ? NULL : clk;
}
- for_each_rsnd_clk(clk, adg, i) {
- ret = clk_prepare_enable(clk);
- if (ret < 0)
- dev_warn(dev, "can't use clk %d\n", i);
-
+ for_each_rsnd_clk(clk, adg, i)
dev_dbg(dev, "clk %d : %p : %ld\n", i, clk, clk_get_rate(clk));
- }
}
static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
struct rsnd_adg *adg)
{
struct clk *clk;
- struct rsnd_mod *adg_mod = rsnd_mod_get(adg);
struct device *dev = rsnd_priv_to_dev(priv);
struct device_node *np = dev->of_node;
u32 ckr, rbgx, rbga, rbgb;
@@ -532,13 +549,13 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
}
}
- rsnd_mod_bset(adg_mod, SSICKR, 0x80FF0000, ckr);
- rsnd_mod_write(adg_mod, BRRA, rbga);
- rsnd_mod_write(adg_mod, BRRB, rbgb);
+ adg->ckr = ckr;
+ adg->rbga = rbga;
+ adg->rbgb = rbgb;
for_each_rsnd_clkout(clk, adg, i)
dev_dbg(dev, "clkout %d : %p : %ld\n", i, clk, clk_get_rate(clk));
- dev_dbg(dev, "SSICKR = 0x%08x, BRRA/BRRB = 0x%x/0x%x\n",
+ dev_dbg(dev, "BRGCKR = 0x%08x, BRRA/BRRB = 0x%x/0x%x\n",
ckr, rbga, rbgb);
}
@@ -565,16 +582,12 @@ int rsnd_adg_probe(struct rsnd_priv *priv)
priv->adg = adg;
+ rsnd_adg_clk_enable(priv);
+
return 0;
}
void rsnd_adg_remove(struct rsnd_priv *priv)
{
- struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
- struct clk *clk;
- int i;
-
- for_each_rsnd_clk(clk, adg, i) {
- clk_disable_unprepare(clk);
- }
+ rsnd_adg_clk_disable(priv);
}
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index f18141098b50..4bd68de76130 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -306,7 +306,7 @@ u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
*/
u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
{
- struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
+ struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
struct rsnd_mod *target;
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
u32 val = 0x76543210;
@@ -315,11 +315,11 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
if (rsnd_io_is_play(io)) {
struct rsnd_mod *src = rsnd_io_to_mod_src(io);
- target = src ? src : ssi;
+ target = src ? src : ssiu;
} else {
struct rsnd_mod *cmd = rsnd_io_to_mod_cmd(io);
- target = cmd ? cmd : ssi;
+ target = cmd ? cmd : ssiu;
}
mask <<= runtime->channels * 4;
@@ -348,32 +348,28 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
/*
* rsnd_dai functions
*/
-#define rsnd_mod_call(idx, io, func, param...) \
-({ \
- struct rsnd_priv *priv = rsnd_mod_to_priv(mod); \
- struct rsnd_mod *mod = (io)->mod[idx]; \
- struct device *dev = rsnd_priv_to_dev(priv); \
- u32 *status = mod->get_status(io, mod, idx); \
- u32 mask = 0xF << __rsnd_mod_shift_##func; \
- u8 val = (*status >> __rsnd_mod_shift_##func) & 0xF; \
- u8 add = ((val + __rsnd_mod_add_##func) & 0xF); \
- int ret = 0; \
- int call = (val == __rsnd_mod_call_##func) && (mod)->ops->func; \
- if (add == 0xF) \
- call = 0; \
- else \
- *status = (*status & ~mask) + \
- (add << __rsnd_mod_shift_##func); \
- dev_dbg(dev, "%s[%d]\t0x%08x %s\n", \
- rsnd_mod_name(mod), rsnd_mod_id(mod), \
- *status, call ? #func : ""); \
- if (call) \
- ret = (mod)->ops->func(mod, io, param); \
- if (ret) \
- dev_dbg(dev, "%s[%d] : rsnd_mod_call error %d\n", \
- rsnd_mod_name(mod), rsnd_mod_id(mod), ret); \
- ret; \
-})
+struct rsnd_mod *rsnd_mod_next(int *iterator,
+ struct rsnd_dai_stream *io,
+ enum rsnd_mod_type *array,
+ int array_size)
+{
+ struct rsnd_mod *mod;
+ enum rsnd_mod_type type;
+ int max = array ? array_size : RSND_MOD_MAX;
+
+ for (; *iterator < max; (*iterator)++) {
+ type = (array) ? array[*iterator] : *iterator;
+ mod = io->mod[type];
+ if (!mod)
+ continue;
+
+ (*iterator)++;
+
+ return mod;
+ }
+
+ return NULL;
+}
static enum rsnd_mod_type rsnd_mod_sequence[][RSND_MOD_MAX] = {
{
@@ -409,19 +405,49 @@ static enum rsnd_mod_type rsnd_mod_sequence[][RSND_MOD_MAX] = {
},
};
-#define rsnd_dai_call(fn, io, param...) \
-({ \
- struct rsnd_mod *mod; \
- int type, is_play = rsnd_io_is_play(io); \
- int ret = 0, i; \
- for (i = 0; i < RSND_MOD_MAX; i++) { \
- type = rsnd_mod_sequence[is_play][i]; \
- mod = (io)->mod[type]; \
- if (!mod) \
- continue; \
- ret |= rsnd_mod_call(type, io, fn, param); \
- } \
- ret; \
+static int rsnd_status_update(u32 *status,
+ int shift, int add, int timing)
+{
+ u32 mask = 0xF << shift;
+ u8 val = (*status >> shift) & 0xF;
+ u8 next_val = (val + add) & 0xF;
+ int func_call = (val == timing);
+
+ if (next_val == 0xF) /* underflow case */
+ func_call = 0;
+ else
+ *status = (*status & ~mask) + (next_val << shift);
+
+ return func_call;
+}
+
+#define rsnd_dai_call(fn, io, param...) \
+({ \
+ struct rsnd_priv *priv = rsnd_io_to_priv(io); \
+ struct device *dev = rsnd_priv_to_dev(priv); \
+ struct rsnd_mod *mod; \
+ int is_play = rsnd_io_is_play(io); \
+ int ret = 0, i; \
+ enum rsnd_mod_type *types = rsnd_mod_sequence[is_play]; \
+ for_each_rsnd_mod_arrays(i, mod, io, types, RSND_MOD_MAX) { \
+ int tmp = 0; \
+ u32 *status = mod->get_status(io, mod, types[i]); \
+ int func_call = rsnd_status_update(status, \
+ __rsnd_mod_shift_##fn, \
+ __rsnd_mod_add_##fn, \
+ __rsnd_mod_call_##fn); \
+ dev_dbg(dev, "%s[%d]\t0x%08x %s\n", \
+ rsnd_mod_name(mod), rsnd_mod_id(mod), *status, \
+ (func_call && (mod)->ops->fn) ? #fn : ""); \
+ if (func_call && (mod)->ops->fn) \
+ tmp = (mod)->ops->fn(mod, io, param); \
+ if (tmp) \
+ dev_err(dev, "%s[%d] : %s error %d\n", \
+ rsnd_mod_name(mod), rsnd_mod_id(mod), \
+ #fn, tmp); \
+ ret |= tmp; \
+ } \
+ ret; \
})
int rsnd_dai_connect(struct rsnd_mod *mod,
@@ -690,7 +716,33 @@ static int rsnd_soc_set_dai_tdm_slot(struct snd_soc_dai *dai,
return 0;
}
+static int rsnd_soc_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+ struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
+
+ /*
+ * call rsnd_dai_call without spinlock
+ */
+ return rsnd_dai_call(nolock_start, io, priv);
+}
+
+static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+ struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
+
+ /*
+ * call rsnd_dai_call without spinlock
+ */
+ rsnd_dai_call(nolock_stop, io, priv);
+}
+
static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
+ .startup = rsnd_soc_dai_startup,
+ .shutdown = rsnd_soc_dai_shutdown,
.trigger = rsnd_soc_dai_trigger,
.set_fmt = rsnd_soc_dai_set_fmt,
.set_tdm_slot = rsnd_soc_set_dai_tdm_slot,
@@ -993,7 +1045,11 @@ static int __rsnd_kctrl_new(struct rsnd_mod *mod,
void _rsnd_kctrl_remove(struct rsnd_kctrl_cfg *cfg)
{
- snd_ctl_remove(cfg->card, cfg->kctrl);
+ if (cfg->card && cfg->kctrl)
+ snd_ctl_remove(cfg->card, cfg->kctrl);
+
+ cfg->card = NULL;
+ cfg->kctrl = NULL;
}
int rsnd_kctrl_new_m(struct rsnd_mod *mod,
@@ -1070,8 +1126,8 @@ static int rsnd_pcm_new(struct snd_soc_pcm_runtime *rtd)
return snd_pcm_lib_preallocate_pages_for_all(
rtd->pcm,
- SNDRV_DMA_TYPE_DEV,
- rtd->card->snd_card->dev,
+ SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL),
PREALLOC_BUFFER, PREALLOC_BUFFER_MAX);
}
@@ -1092,6 +1148,7 @@ static int rsnd_rdai_continuance_probe(struct rsnd_priv *priv,
ret = rsnd_dai_call(probe, io, priv);
if (ret == -EAGAIN) {
struct rsnd_mod *ssi_mod = rsnd_io_to_mod_ssi(io);
+ struct rsnd_mod *mod;
int i;
/*
@@ -1111,8 +1168,8 @@ static int rsnd_rdai_continuance_probe(struct rsnd_priv *priv,
* remove all mod from io
* and, re connect ssi
*/
- for (i = 0; i < RSND_MOD_MAX; i++)
- rsnd_dai_disconnect((io)->mod[i], io, i);
+ for_each_rsnd_mod(i, mod, io)
+ rsnd_dai_disconnect(mod, io, i);
rsnd_dai_connect(ssi_mod, io, RSND_MOD_SSI);
/*
@@ -1251,9 +1308,33 @@ static int rsnd_remove(struct platform_device *pdev)
return ret;
}
+static int rsnd_suspend(struct device *dev)
+{
+ struct rsnd_priv *priv = dev_get_drvdata(dev);
+
+ rsnd_adg_clk_disable(priv);
+
+ return 0;
+}
+
+static int rsnd_resume(struct device *dev)
+{
+ struct rsnd_priv *priv = dev_get_drvdata(dev);
+
+ rsnd_adg_clk_enable(priv);
+
+ return 0;
+}
+
+static struct dev_pm_ops rsnd_pm_ops = {
+ .suspend = rsnd_suspend,
+ .resume = rsnd_resume,
+};
+
static struct platform_driver rsnd_driver = {
.driver = {
.name = "rcar_sound",
+ .pm = &rsnd_pm_ops,
.of_match_table = rsnd_of_match,
},
.probe = rsnd_probe,
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 6bc93cbb3049..1f405c833867 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -25,6 +25,10 @@
struct rsnd_dmaen {
struct dma_chan *chan;
+ dma_addr_t dma_buf;
+ unsigned int dma_len;
+ unsigned int dma_period;
+ unsigned int dma_cnt;
};
struct rsnd_dmapp {
@@ -34,6 +38,8 @@ struct rsnd_dmapp {
struct rsnd_dma {
struct rsnd_mod mod;
+ struct rsnd_mod *mod_from;
+ struct rsnd_mod *mod_to;
dma_addr_t src_addr;
dma_addr_t dst_addr;
union {
@@ -56,10 +62,38 @@ struct rsnd_dma_ctrl {
/*
* Audio DMAC
*/
+#define rsnd_dmaen_sync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 1)
+#define rsnd_dmaen_unsync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 0)
+static void __rsnd_dmaen_sync(struct rsnd_dmaen *dmaen, struct rsnd_dai_stream *io,
+ int i, int sync)
+{
+ struct device *dev = dmaen->chan->device->dev;
+ enum dma_data_direction dir;
+ int is_play = rsnd_io_is_play(io);
+ dma_addr_t buf;
+ int len, max;
+ size_t period;
+
+ len = dmaen->dma_len;
+ period = dmaen->dma_period;
+ max = len / period;
+ i = i % max;
+ buf = dmaen->dma_buf + (period * i);
+
+ dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ if (sync)
+ dma_sync_single_for_device(dev, buf, period, dir);
+ else
+ dma_sync_single_for_cpu(dev, buf, period, dir);
+}
+
static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
struct rsnd_dai_stream *io)
{
struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
+ struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
bool elapsed = false;
unsigned long flags;
@@ -76,9 +110,22 @@ static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
*/
spin_lock_irqsave(&priv->lock, flags);
- if (rsnd_io_is_working(io))
+ if (rsnd_io_is_working(io)) {
+ rsnd_dmaen_unsync(dmaen, io, dmaen->dma_cnt);
+
+ /*
+ * Next period is already started.
+ * Let's sync Next Next period
+ * see
+ * rsnd_dmaen_start()
+ */
+ rsnd_dmaen_sync(dmaen, io, dmaen->dma_cnt + 2);
+
elapsed = rsnd_dai_pointer_update(io, io->byte_per_period);
+ dmaen->dma_cnt++;
+ }
+
spin_unlock_irqrestore(&priv->lock, flags);
if (elapsed)
@@ -92,6 +139,20 @@ static void rsnd_dmaen_complete(void *data)
rsnd_mod_interrupt(mod, __rsnd_dmaen_complete);
}
+static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
+ struct rsnd_mod *mod_from,
+ struct rsnd_mod *mod_to)
+{
+ if ((!mod_from && !mod_to) ||
+ (mod_from && mod_to))
+ return NULL;
+
+ if (mod_from)
+ return rsnd_mod_dma_req(io, mod_from);
+ else
+ return rsnd_mod_dma_req(io, mod_to);
+}
+
static int rsnd_dmaen_stop(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv)
@@ -99,7 +160,66 @@ static int rsnd_dmaen_stop(struct rsnd_mod *mod,
struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
- dmaengine_terminate_all(dmaen->chan);
+ if (dmaen->chan) {
+ int is_play = rsnd_io_is_play(io);
+
+ dmaengine_terminate_all(dmaen->chan);
+ dma_unmap_single(dmaen->chan->device->dev,
+ dmaen->dma_buf, dmaen->dma_len,
+ is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ }
+
+ return 0;
+}
+
+static int rsnd_dmaen_nolock_stop(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv)
+{
+ struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
+ struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
+
+ /*
+ * DMAEngine release uses mutex lock.
+ * Thus, it shouldn't be called under spinlock.
+ * Let's call it under nolock_start
+ */
+ if (dmaen->chan)
+ dma_release_channel(dmaen->chan);
+
+ dmaen->chan = NULL;
+
+ return 0;
+}
+
+static int rsnd_dmaen_nolock_start(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv)
+{
+ struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
+ struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
+ struct device *dev = rsnd_priv_to_dev(priv);
+
+ if (dmaen->chan) {
+ dev_err(dev, "it already has dma channel\n");
+ return -EIO;
+ }
+
+ /*
+ * DMAEngine request uses mutex lock.
+ * Thus, it shouldn't be called under spinlock.
+ * Let's call it under nolock_start
+ */
+ dmaen->chan = rsnd_dmaen_request_channel(io,
+ dma->mod_from,
+ dma->mod_to);
+ if (IS_ERR_OR_NULL(dmaen->chan)) {
+ int ret = PTR_ERR(dmaen->chan);
+
+ dmaen->chan = NULL;
+ dev_err(dev, "can't get dma channel\n");
+ return ret;
+ }
return 0;
}
@@ -113,12 +233,41 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
struct snd_pcm_substream *substream = io->substream;
struct device *dev = rsnd_priv_to_dev(priv);
struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config cfg = {};
+ dma_addr_t buf;
+ size_t len;
+ size_t period;
int is_play = rsnd_io_is_play(io);
+ int i;
+ int ret;
+
+ cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ cfg.src_addr = dma->src_addr;
+ cfg.dst_addr = dma->dst_addr;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ dev_dbg(dev, "%s[%d] %pad -> %pad\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod),
+ &cfg.src_addr, &cfg.dst_addr);
+
+ ret = dmaengine_slave_config(dmaen->chan, &cfg);
+ if (ret < 0)
+ return ret;
+
+ len = snd_pcm_lib_buffer_bytes(substream);
+ period = snd_pcm_lib_period_bytes(substream);
+ buf = dma_map_single(dmaen->chan->device->dev,
+ substream->runtime->dma_area,
+ len,
+ is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (dma_mapping_error(dmaen->chan->device->dev, buf)) {
+ dev_err(dev, "dma map failed\n");
+ return -EIO;
+ }
desc = dmaengine_prep_dma_cyclic(dmaen->chan,
- substream->runtime->dma_addr,
- snd_pcm_lib_buffer_bytes(substream),
- snd_pcm_lib_period_bytes(substream),
+ buf, len, period,
is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -130,6 +279,19 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
desc->callback = rsnd_dmaen_complete;
desc->callback_param = rsnd_mod_get(dma);
+ dmaen->dma_buf = buf;
+ dmaen->dma_len = len;
+ dmaen->dma_period = period;
+ dmaen->dma_cnt = 0;
+
+ /*
+ * synchronize this and next period
+ * see
+ * __rsnd_dmaen_complete()
+ */
+ for (i = 0; i < 2; i++)
+ rsnd_dmaen_sync(dmaen, io, i);
+
if (dmaengine_submit(desc) < 0) {
dev_err(dev, "dmaengine_submit() fail\n");
return -EIO;
@@ -143,124 +305,55 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
struct rsnd_mod *mod, char *name)
{
- struct dma_chan *chan;
+ struct dma_chan *chan = NULL;
struct device_node *np;
int i = 0;
for_each_child_of_node(of_node, np) {
- if (i == rsnd_mod_id(mod))
- break;
+ if (i == rsnd_mod_id(mod) && (!chan))
+ chan = of_dma_request_slave_channel(np, name);
i++;
}
- chan = of_dma_request_slave_channel(np, name);
-
- of_node_put(np);
+ /* It should call of_node_put(), since, it is rsnd_xxx_of_node() */
of_node_put(of_node);
return chan;
}
-static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
- struct rsnd_mod *mod_from,
- struct rsnd_mod *mod_to)
-{
- if ((!mod_from && !mod_to) ||
- (mod_from && mod_to))
- return NULL;
-
- if (mod_from)
- return rsnd_mod_dma_req(io, mod_from);
- else
- return rsnd_mod_dma_req(io, mod_to);
-}
-
-static int rsnd_dmaen_remove(struct rsnd_mod *mod,
- struct rsnd_dai_stream *io,
- struct rsnd_priv *priv)
-{
- struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
- struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
-
- if (dmaen->chan)
- dma_release_channel(dmaen->chan);
-
- dmaen->chan = NULL;
-
- return 0;
-}
-
static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
- struct rsnd_dma *dma, int id,
+ struct rsnd_dma *dma,
struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
{
- struct rsnd_mod *mod = rsnd_mod_get(dma);
- struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
struct rsnd_priv *priv = rsnd_io_to_priv(io);
struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
- struct device *dev = rsnd_priv_to_dev(priv);
- struct dma_slave_config cfg = {};
- int is_play = rsnd_io_is_play(io);
- int ret;
-
- if (dmaen->chan) {
- dev_err(dev, "it already has dma channel\n");
- return -EIO;
- }
-
- if (dev->of_node) {
- dmaen->chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
- } else {
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ struct dma_chan *chan;
- dmaen->chan = dma_request_channel(mask, shdma_chan_filter,
- (void *)(uintptr_t)id);
- }
- if (IS_ERR_OR_NULL(dmaen->chan)) {
- dmaen->chan = NULL;
- dev_err(dev, "can't get dma channel\n");
- goto rsnd_dma_channel_err;
+ /* try to get DMAEngine channel */
+ chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
+ if (IS_ERR_OR_NULL(chan)) {
+ /*
+ * DMA failed. try to PIO mode
+ * see
+ * rsnd_ssi_fallback()
+ * rsnd_rdai_continuance_probe()
+ */
+ return -EAGAIN;
}
- cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
- cfg.src_addr = dma->src_addr;
- cfg.dst_addr = dma->dst_addr;
- cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-
- dev_dbg(dev, "%s[%d] %pad -> %pad\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod),
- &cfg.src_addr, &cfg.dst_addr);
-
- ret = dmaengine_slave_config(dmaen->chan, &cfg);
- if (ret < 0)
- goto rsnd_dma_attach_err;
+ dma_release_channel(chan);
dmac->dmaen_num++;
return 0;
-
-rsnd_dma_attach_err:
- rsnd_dmaen_remove(mod, io, priv);
-rsnd_dma_channel_err:
-
- /*
- * DMA failed. try to PIO mode
- * see
- * rsnd_ssi_fallback()
- * rsnd_rdai_continuance_probe()
- */
- return -EAGAIN;
}
static struct rsnd_mod_ops rsnd_dmaen_ops = {
.name = "audmac",
+ .nolock_start = rsnd_dmaen_nolock_start,
+ .nolock_stop = rsnd_dmaen_nolock_stop,
.start = rsnd_dmaen_start,
.stop = rsnd_dmaen_stop,
- .remove = rsnd_dmaen_remove,
};
/*
@@ -394,7 +487,7 @@ static int rsnd_dmapp_start(struct rsnd_mod *mod,
}
static int rsnd_dmapp_attach(struct rsnd_dai_stream *io,
- struct rsnd_dma *dma, int id,
+ struct rsnd_dma *dma,
struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
{
struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
@@ -627,7 +720,7 @@ static void rsnd_dma_of_path(struct rsnd_mod *this,
}
int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
- struct rsnd_mod **dma_mod, int id)
+ struct rsnd_mod **dma_mod)
{
struct rsnd_mod *mod_from = NULL;
struct rsnd_mod *mod_to = NULL;
@@ -636,7 +729,7 @@ int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
struct device *dev = rsnd_priv_to_dev(priv);
struct rsnd_mod_ops *ops;
enum rsnd_mod_type type;
- int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma, int id,
+ int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma,
struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
int is_play = rsnd_io_is_play(io);
int ret, dma_id;
@@ -682,9 +775,6 @@ int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
*dma_mod = rsnd_mod_get(dma);
- dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
- dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0);
-
ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
rsnd_mod_get_status, type, dma_id);
if (ret < 0)
@@ -695,9 +785,14 @@ int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
rsnd_mod_name(mod_from), rsnd_mod_id(mod_from),
rsnd_mod_name(mod_to), rsnd_mod_id(mod_to));
- ret = attach(io, dma, id, mod_from, mod_to);
+ ret = attach(io, dma, mod_from, mod_to);
if (ret < 0)
return ret;
+
+ dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
+ dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0);
+ dma->mod_from = mod_from;
+ dma->mod_to = mod_to;
}
ret = rsnd_dai_connect(*dma_mod, io, type);
diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c
index 02d971f69eff..cf8f59cdd8d7 100644
--- a/sound/soc/sh/rcar/dvc.c
+++ b/sound/soc/sh/rcar/dvc.c
@@ -48,8 +48,6 @@ struct rsnd_dvc {
#define rsnd_dvc_get(priv, id) ((struct rsnd_dvc *)(priv->dvc) + id)
#define rsnd_dvc_nr(priv) ((priv)->dvc_nr)
-#define rsnd_dvc_of_node(priv) \
- of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,dvc")
#define rsnd_mod_to_dvc(_mod) \
container_of((_mod), struct rsnd_dvc, mod)
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 7d2fdf8dd188..63b6d3c28021 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -211,6 +211,14 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
RSND_GEN_S_REG(SSI_MODE1, 0x804),
RSND_GEN_S_REG(SSI_MODE2, 0x808),
RSND_GEN_S_REG(SSI_CONTROL, 0x810),
+ RSND_GEN_S_REG(SSI_SYS_STATUS0, 0x840),
+ RSND_GEN_S_REG(SSI_SYS_STATUS1, 0x844),
+ RSND_GEN_S_REG(SSI_SYS_STATUS2, 0x848),
+ RSND_GEN_S_REG(SSI_SYS_STATUS3, 0x84c),
+ RSND_GEN_S_REG(SSI_SYS_STATUS4, 0x880),
+ RSND_GEN_S_REG(SSI_SYS_STATUS5, 0x884),
+ RSND_GEN_S_REG(SSI_SYS_STATUS6, 0x888),
+ RSND_GEN_S_REG(SSI_SYS_STATUS7, 0x88c),
/* FIXME: it needs SSI_MODE2/3 in the future */
RSND_GEN_M_REG(SSI_BUSIF_MODE, 0x0, 0x80),
@@ -311,7 +319,7 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
static const struct rsnd_regmap_field_conf conf_adg[] = {
RSND_GEN_S_REG(BRRA, 0x00),
RSND_GEN_S_REG(BRRB, 0x04),
- RSND_GEN_S_REG(SSICKR, 0x08),
+ RSND_GEN_S_REG(BRGCKR, 0x08),
RSND_GEN_S_REG(AUDIO_CLK_SEL0, 0x0c),
RSND_GEN_S_REG(AUDIO_CLK_SEL1, 0x10),
RSND_GEN_S_REG(AUDIO_CLK_SEL2, 0x14),
@@ -362,7 +370,7 @@ static int rsnd_gen1_probe(struct rsnd_priv *priv)
static const struct rsnd_regmap_field_conf conf_adg[] = {
RSND_GEN_S_REG(BRRA, 0x00),
RSND_GEN_S_REG(BRRB, 0x04),
- RSND_GEN_S_REG(SSICKR, 0x08),
+ RSND_GEN_S_REG(BRGCKR, 0x08),
RSND_GEN_S_REG(AUDIO_CLK_SEL0, 0x0c),
RSND_GEN_S_REG(AUDIO_CLK_SEL1, 0x10),
};
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index a8f61d79333b..b90df77662df 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -43,17 +43,7 @@
* see gen1/gen2 for detail
*/
enum rsnd_reg {
- /* SCU (SRC/SSIU/MIX/CTU/DVC) */
- RSND_REG_SSI_MODE, /* Gen2 only */
- RSND_REG_SSI_MODE0,
- RSND_REG_SSI_MODE1,
- RSND_REG_SSI_MODE2,
- RSND_REG_SSI_CONTROL,
- RSND_REG_SSI_CTRL, /* Gen2 only */
- RSND_REG_SSI_BUSIF_MODE, /* Gen2 only */
- RSND_REG_SSI_BUSIF_ADINR, /* Gen2 only */
- RSND_REG_SSI_BUSIF_DALIGN, /* Gen2 only */
- RSND_REG_SSI_INT_ENABLE, /* Gen2 only */
+ /* SCU (MIX/CTU/DVC) */
RSND_REG_SRC_I_BUSIF_MODE,
RSND_REG_SRC_O_BUSIF_MODE,
RSND_REG_SRC_ROUTE_MODE0,
@@ -63,29 +53,29 @@ enum rsnd_reg {
RSND_REG_SRC_IFSCR,
RSND_REG_SRC_IFSVR,
RSND_REG_SRC_SRCCR,
- RSND_REG_SRC_CTRL, /* Gen2 only */
- RSND_REG_SRC_BSDSR, /* Gen2 only */
- RSND_REG_SRC_BSISR, /* Gen2 only */
- RSND_REG_SRC_INT_ENABLE0, /* Gen2 only */
- RSND_REG_SRC_BUSIF_DALIGN, /* Gen2 only */
- RSND_REG_SRCIN_TIMSEL0, /* Gen2 only */
- RSND_REG_SRCIN_TIMSEL1, /* Gen2 only */
- RSND_REG_SRCIN_TIMSEL2, /* Gen2 only */
- RSND_REG_SRCIN_TIMSEL3, /* Gen2 only */
- RSND_REG_SRCIN_TIMSEL4, /* Gen2 only */
- RSND_REG_SRCOUT_TIMSEL0, /* Gen2 only */
- RSND_REG_SRCOUT_TIMSEL1, /* Gen2 only */
- RSND_REG_SRCOUT_TIMSEL2, /* Gen2 only */
- RSND_REG_SRCOUT_TIMSEL3, /* Gen2 only */
- RSND_REG_SRCOUT_TIMSEL4, /* Gen2 only */
+ RSND_REG_SRC_CTRL,
+ RSND_REG_SRC_BSDSR,
+ RSND_REG_SRC_BSISR,
+ RSND_REG_SRC_INT_ENABLE0,
+ RSND_REG_SRC_BUSIF_DALIGN,
+ RSND_REG_SRCIN_TIMSEL0,
+ RSND_REG_SRCIN_TIMSEL1,
+ RSND_REG_SRCIN_TIMSEL2,
+ RSND_REG_SRCIN_TIMSEL3,
+ RSND_REG_SRCIN_TIMSEL4,
+ RSND_REG_SRCOUT_TIMSEL0,
+ RSND_REG_SRCOUT_TIMSEL1,
+ RSND_REG_SRCOUT_TIMSEL2,
+ RSND_REG_SRCOUT_TIMSEL3,
+ RSND_REG_SRCOUT_TIMSEL4,
RSND_REG_SCU_SYS_STATUS0,
- RSND_REG_SCU_SYS_STATUS1, /* Gen2 only */
+ RSND_REG_SCU_SYS_STATUS1,
RSND_REG_SCU_SYS_INT_EN0,
- RSND_REG_SCU_SYS_INT_EN1, /* Gen2 only */
- RSND_REG_CMD_CTRL, /* Gen2 only */
- RSND_REG_CMD_BUSIF_DALIGN, /* Gen2 only */
+ RSND_REG_SCU_SYS_INT_EN1,
+ RSND_REG_CMD_CTRL,
+ RSND_REG_CMD_BUSIF_DALIGN,
RSND_REG_CMD_ROUTE_SLCT,
- RSND_REG_CMDOUT_TIMSEL, /* Gen2 only */
+ RSND_REG_CMDOUT_TIMSEL,
RSND_REG_CTU_SWRSR,
RSND_REG_CTU_CTUIR,
RSND_REG_CTU_ADINR,
@@ -147,18 +137,38 @@ enum rsnd_reg {
RSND_REG_DVC_VOL6R,
RSND_REG_DVC_VOL7R,
RSND_REG_DVC_DVUER,
- RSND_REG_DVC_VRCTR, /* Gen2 only */
- RSND_REG_DVC_VRPDR, /* Gen2 only */
- RSND_REG_DVC_VRDBR, /* Gen2 only */
+ RSND_REG_DVC_VRCTR,
+ RSND_REG_DVC_VRPDR,
+ RSND_REG_DVC_VRDBR,
/* ADG */
RSND_REG_BRRA,
RSND_REG_BRRB,
- RSND_REG_SSICKR,
- RSND_REG_DIV_EN, /* Gen2 only */
+ RSND_REG_BRGCKR,
+ RSND_REG_DIV_EN,
RSND_REG_AUDIO_CLK_SEL0,
RSND_REG_AUDIO_CLK_SEL1,
- RSND_REG_AUDIO_CLK_SEL2, /* Gen2 only */
+ RSND_REG_AUDIO_CLK_SEL2,
+
+ /* SSIU */
+ RSND_REG_SSI_MODE,
+ RSND_REG_SSI_MODE0,
+ RSND_REG_SSI_MODE1,
+ RSND_REG_SSI_MODE2,
+ RSND_REG_SSI_CONTROL,
+ RSND_REG_SSI_CTRL,
+ RSND_REG_SSI_BUSIF_MODE,
+ RSND_REG_SSI_BUSIF_ADINR,
+ RSND_REG_SSI_BUSIF_DALIGN,
+ RSND_REG_SSI_INT_ENABLE,
+ RSND_REG_SSI_SYS_STATUS0,
+ RSND_REG_SSI_SYS_STATUS1,
+ RSND_REG_SSI_SYS_STATUS2,
+ RSND_REG_SSI_SYS_STATUS3,
+ RSND_REG_SSI_SYS_STATUS4,
+ RSND_REG_SSI_SYS_STATUS5,
+ RSND_REG_SSI_SYS_STATUS6,
+ RSND_REG_SSI_SYS_STATUS7,
/* SSI */
RSND_REG_SSICR,
@@ -199,7 +209,7 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
* R-Car DMA
*/
int rsnd_dma_attach(struct rsnd_dai_stream *io,
- struct rsnd_mod *mod, struct rsnd_mod **dma_mod, int id);
+ struct rsnd_mod *mod, struct rsnd_mod **dma_mod);
int rsnd_dma_probe(struct rsnd_priv *priv);
struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
struct rsnd_mod *mod, char *name);
@@ -259,6 +269,12 @@ struct rsnd_mod_ops {
int (*fallback)(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv);
+ int (*nolock_start)(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv);
+ int (*nolock_stop)(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv);
};
struct rsnd_dai_stream;
@@ -278,7 +294,7 @@ struct rsnd_mod {
*
* 0xH0000CBA
*
- * A 0: probe 1: remove
+ * A 0: nolock_start 1: nolock_stop
* B 0: init 1: quit
* C 0: start 1: stop
*
@@ -288,19 +304,23 @@ struct rsnd_mod {
* H 0: fallback
* H 0: hw_params
*/
-#define __rsnd_mod_shift_probe 0
-#define __rsnd_mod_shift_remove 0
+#define __rsnd_mod_shift_nolock_start 0
+#define __rsnd_mod_shift_nolock_stop 0
#define __rsnd_mod_shift_init 4
#define __rsnd_mod_shift_quit 4
#define __rsnd_mod_shift_start 8
#define __rsnd_mod_shift_stop 8
+#define __rsnd_mod_shift_probe 28 /* always called */
+#define __rsnd_mod_shift_remove 28 /* always called */
#define __rsnd_mod_shift_irq 28 /* always called */
#define __rsnd_mod_shift_pcm_new 28 /* always called */
#define __rsnd_mod_shift_fallback 28 /* always called */
#define __rsnd_mod_shift_hw_params 28 /* always called */
-#define __rsnd_mod_add_probe 1
-#define __rsnd_mod_add_remove -1
+#define __rsnd_mod_add_probe 0
+#define __rsnd_mod_add_remove 0
+#define __rsnd_mod_add_nolock_start 1
+#define __rsnd_mod_add_nolock_stop -1
#define __rsnd_mod_add_init 1
#define __rsnd_mod_add_quit -1
#define __rsnd_mod_add_start 1
@@ -311,7 +331,7 @@ struct rsnd_mod {
#define __rsnd_mod_add_hw_params 0
#define __rsnd_mod_call_probe 0
-#define __rsnd_mod_call_remove 1
+#define __rsnd_mod_call_remove 0
#define __rsnd_mod_call_init 0
#define __rsnd_mod_call_quit 1
#define __rsnd_mod_call_start 0
@@ -320,6 +340,8 @@ struct rsnd_mod {
#define __rsnd_mod_call_pcm_new 0
#define __rsnd_mod_call_fallback 0
#define __rsnd_mod_call_hw_params 0
+#define __rsnd_mod_call_nolock_start 0
+#define __rsnd_mod_call_nolock_stop 1
#define rsnd_mod_to_priv(mod) ((mod)->priv)
#define rsnd_mod_id(mod) ((mod) ? (mod)->id : -1)
@@ -346,6 +368,18 @@ void rsnd_mod_interrupt(struct rsnd_mod *mod,
u32 *rsnd_mod_get_status(struct rsnd_dai_stream *io,
struct rsnd_mod *mod,
enum rsnd_mod_type type);
+struct rsnd_mod *rsnd_mod_next(int *iterator,
+ struct rsnd_dai_stream *io,
+ enum rsnd_mod_type *array,
+ int array_size);
+#define for_each_rsnd_mod(iterator, pos, io) \
+ for (iterator = 0; \
+ (pos = rsnd_mod_next(&iterator, io, NULL, 0));)
+#define for_each_rsnd_mod_arrays(iterator, pos, io, array, size) \
+ for (iterator = 0; \
+ (pos = rsnd_mod_next(&iterator, io, array, size));)
+#define for_each_rsnd_mod_array(iterator, pos, io, array) \
+ for_each_rsnd_mod_arrays(iterator, pos, io, array, ARRAY_SIZE(array))
void rsnd_parse_connect_common(struct rsnd_dai *rdai,
struct rsnd_mod* (*mod_get)(struct rsnd_priv *priv, int id),
@@ -365,6 +399,18 @@ int rsnd_runtime_is_ssi_multi(struct rsnd_dai_stream *io);
int rsnd_runtime_is_ssi_tdm(struct rsnd_dai_stream *io);
/*
+ * DT
+ */
+#define rsnd_parse_of_node(priv, node) \
+ of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, node)
+#define RSND_NODE_DAI "rcar_sound,dai"
+#define RSND_NODE_SSI "rcar_sound,ssi"
+#define RSND_NODE_SRC "rcar_sound,src"
+#define RSND_NODE_CTU "rcar_sound,ctu"
+#define RSND_NODE_MIX "rcar_sound,mix"
+#define RSND_NODE_DVC "rcar_sound,dvc"
+
+/*
* R-Car sound DAI
*/
#define RSND_DAI_NAME_SIZE 16
@@ -382,6 +428,7 @@ struct rsnd_dai_stream {
};
#define rsnd_io_to_mod(io, i) ((i) < RSND_MOD_MAX ? (io)->mod[(i)] : NULL)
#define rsnd_io_to_mod_ssi(io) rsnd_io_to_mod((io), RSND_MOD_SSI)
+#define rsnd_io_to_mod_ssiu(io) rsnd_io_to_mod((io), RSND_MOD_SSIU)
#define rsnd_io_to_mod_ssip(io) rsnd_io_to_mod((io), RSND_MOD_SSIP)
#define rsnd_io_to_mod_src(io) rsnd_io_to_mod((io), RSND_MOD_SRC)
#define rsnd_io_to_mod_ctu(io) rsnd_io_to_mod((io), RSND_MOD_CTU)
@@ -428,8 +475,7 @@ int rsnd_dai_pointer_offset(struct rsnd_dai_stream *io, int additional);
int rsnd_dai_connect(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
enum rsnd_mod_type type);
-#define rsnd_dai_of_node(priv) \
- of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,dai")
+#define rsnd_dai_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_DAI)
/*
* R-Car Gen1/Gen2
@@ -453,6 +499,9 @@ int rsnd_adg_set_src_timesel_gen2(struct rsnd_mod *src_mod,
unsigned int out_rate);
int rsnd_adg_set_cmd_timsel_gen2(struct rsnd_mod *mod,
struct rsnd_dai_stream *io);
+#define rsnd_adg_clk_enable(priv) rsnd_adg_clk_control(priv, 1)
+#define rsnd_adg_clk_disable(priv) rsnd_adg_clk_control(priv, 0)
+void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable);
/*
* R-Car sound priv
@@ -606,8 +655,7 @@ u32 rsnd_ssi_multi_slaves_runtime(struct rsnd_dai_stream *io);
__rsnd_ssi_is_pin_sharing(rsnd_io_to_mod_ssi(io))
int __rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod);
-#define rsnd_ssi_of_node(priv) \
- of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,ssi")
+#define rsnd_ssi_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_SSI)
void rsnd_parse_connect_ssi(struct rsnd_dai *rdai,
struct device_node *playback,
struct device_node *capture);
@@ -633,8 +681,7 @@ unsigned int rsnd_src_get_rate(struct rsnd_priv *priv,
struct rsnd_dai_stream *io,
int is_in);
-#define rsnd_src_of_node(priv) \
- of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,src")
+#define rsnd_src_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_SRC)
#define rsnd_parse_connect_src(rdai, playback, capture) \
rsnd_parse_connect_common(rdai, rsnd_src_mod_get, \
rsnd_src_of_node(rsnd_rdai_to_priv(rdai)), \
@@ -647,8 +694,7 @@ int rsnd_ctu_probe(struct rsnd_priv *priv);
void rsnd_ctu_remove(struct rsnd_priv *priv);
int rsnd_ctu_converted_channel(struct rsnd_mod *mod);
struct rsnd_mod *rsnd_ctu_mod_get(struct rsnd_priv *priv, int id);
-#define rsnd_ctu_of_node(priv) \
- of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,ctu")
+#define rsnd_ctu_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_CTU)
#define rsnd_parse_connect_ctu(rdai, playback, capture) \
rsnd_parse_connect_common(rdai, rsnd_ctu_mod_get, \
rsnd_ctu_of_node(rsnd_rdai_to_priv(rdai)), \
@@ -660,8 +706,7 @@ struct rsnd_mod *rsnd_ctu_mod_get(struct rsnd_priv *priv, int id);
int rsnd_mix_probe(struct rsnd_priv *priv);
void rsnd_mix_remove(struct rsnd_priv *priv);
struct rsnd_mod *rsnd_mix_mod_get(struct rsnd_priv *priv, int id);
-#define rsnd_mix_of_node(priv) \
- of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,mix")
+#define rsnd_mix_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_MIX)
#define rsnd_parse_connect_mix(rdai, playback, capture) \
rsnd_parse_connect_common(rdai, rsnd_mix_mod_get, \
rsnd_mix_of_node(rsnd_rdai_to_priv(rdai)), \
@@ -673,8 +718,7 @@ struct rsnd_mod *rsnd_mix_mod_get(struct rsnd_priv *priv, int id);
int rsnd_dvc_probe(struct rsnd_priv *priv);
void rsnd_dvc_remove(struct rsnd_priv *priv);
struct rsnd_mod *rsnd_dvc_mod_get(struct rsnd_priv *priv, int id);
-#define rsnd_dvc_of_node(priv) \
- of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,dvc")
+#define rsnd_dvc_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_DVC)
#define rsnd_parse_connect_dvc(rdai, playback, capture) \
rsnd_parse_connect_common(rdai, rsnd_dvc_mod_get, \
rsnd_dvc_of_node(rsnd_rdai_to_priv(rdai)), \
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 969a5169de25..3a8f65bd1bf9 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -189,6 +189,7 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ int use_src = 0;
u32 fin, fout;
u32 ifscr, fsrate, adinr;
u32 cr, route;
@@ -214,6 +215,8 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
return;
}
+ use_src = (fin != fout) | rsnd_src_sync_is_enabled(mod);
+
/*
* SRC_ADINR
*/
@@ -225,7 +228,7 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
*/
ifscr = 0;
fsrate = 0;
- if (fin != fout) {
+ if (use_src) {
u64 n;
ifscr = 1;
@@ -239,7 +242,7 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
*/
cr = 0x00011110;
route = 0x0;
- if (fin != fout) {
+ if (use_src) {
route = 0x1;
if (rsnd_src_sync_is_enabled(mod)) {
@@ -327,8 +330,8 @@ static void rsnd_src_status_clear(struct rsnd_mod *mod)
{
u32 val = OUF_SRC(rsnd_mod_id(mod));
- rsnd_mod_bset(mod, SCU_SYS_STATUS0, val, val);
- rsnd_mod_bset(mod, SCU_SYS_STATUS1, val, val);
+ rsnd_mod_write(mod, SCU_SYS_STATUS0, val);
+ rsnd_mod_write(mod, SCU_SYS_STATUS1, val);
}
static bool rsnd_src_error_occurred(struct rsnd_mod *mod)
@@ -475,7 +478,7 @@ static int rsnd_src_probe_(struct rsnd_mod *mod,
return ret;
}
- ret = rsnd_dma_attach(io, mod, &src->dma, 0);
+ ret = rsnd_dma_attach(io, mod, &src->dma);
return ret;
}
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 6cb6db005fc4..411bda2387ad 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -417,11 +417,14 @@ static int rsnd_ssi_hw_params(struct rsnd_mod *mod,
int chan = params_channels(params);
/*
- * Already working.
- * It will happen if SSI has parent/child connection.
+ * snd_pcm_ops::hw_params will be called *before*
+ * snd_soc_dai_ops::trigger. Thus, ssi->usrcnt is 0
+ * in 1st call.
*/
- if (ssi->usrcnt > 1) {
+ if (ssi->usrcnt) {
/*
+ * Already working.
+ * It will happen if SSI has parent/child connection.
* it is error if child <-> parent SSI uses
* different channels.
*/
@@ -644,10 +647,14 @@ static int rsnd_ssi_common_probe(struct rsnd_mod *mod,
if (ret < 0)
return ret;
- ret = devm_request_irq(dev, ssi->irq,
- rsnd_ssi_interrupt,
- IRQF_SHARED,
- dev_name(dev), mod);
+ /*
+ * SSI might be called again as PIO fallback
+ * It is easy to manual handling for IRQ request/free
+ */
+ ret = request_irq(ssi->irq,
+ rsnd_ssi_interrupt,
+ IRQF_SHARED,
+ dev_name(dev), mod);
return ret;
}
@@ -669,7 +676,6 @@ static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
struct rsnd_priv *priv)
{
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- int dma_id = 0; /* not needed */
int ret;
/*
@@ -684,7 +690,7 @@ static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
return ret;
/* SSI probe might be called many times in MUX multi path */
- ret = rsnd_dma_attach(io, mod, &ssi->dma, dma_id);
+ ret = rsnd_dma_attach(io, mod, &ssi->dma);
return ret;
}
@@ -694,11 +700,9 @@ static int rsnd_ssi_dma_remove(struct rsnd_mod *mod,
struct rsnd_priv *priv)
{
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- struct device *dev = rsnd_priv_to_dev(priv);
- int irq = ssi->irq;
/* PIO will request IRQ again */
- devm_free_irq(dev, irq, mod);
+ free_irq(ssi->irq, mod);
return 0;
}
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 6f9b388ec5a8..4e817c8a18c0 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -33,6 +33,26 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod,
u32 mask1, val1;
u32 mask2, val2;
+ /* clear status */
+ switch (id) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ rsnd_mod_write(mod, SSI_SYS_STATUS0, 0xf << (id * 4));
+ rsnd_mod_write(mod, SSI_SYS_STATUS2, 0xf << (id * 4));
+ rsnd_mod_write(mod, SSI_SYS_STATUS4, 0xf << (id * 4));
+ rsnd_mod_write(mod, SSI_SYS_STATUS6, 0xf << (id * 4));
+ break;
+ case 9:
+ rsnd_mod_write(mod, SSI_SYS_STATUS1, 0xf << 4);
+ rsnd_mod_write(mod, SSI_SYS_STATUS3, 0xf << 4);
+ rsnd_mod_write(mod, SSI_SYS_STATUS5, 0xf << 4);
+ rsnd_mod_write(mod, SSI_SYS_STATUS7, 0xf << 4);
+ break;
+ }
+
/*
* SSI_MODE0
*/
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index bf7b52fce597..bfd71b873ca2 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -30,16 +30,26 @@ static int soc_compr_open(struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret = 0;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
+ if (cpu_dai->driver->cops && cpu_dai->driver->cops->startup) {
+ ret = cpu_dai->driver->cops->startup(cstream, cpu_dai);
+ if (ret < 0) {
+ dev_err(cpu_dai->dev, "Compress ASoC: can't open interface %s: %d\n",
+ cpu_dai->name, ret);
+ goto out;
+ }
+ }
+
if (platform->driver->compr_ops && platform->driver->compr_ops->open) {
ret = platform->driver->compr_ops->open(cstream);
if (ret < 0) {
pr_err("compress asoc: can't open platform %s\n",
platform->component.name);
- goto out;
+ goto plat_err;
}
}
@@ -60,6 +70,9 @@ static int soc_compr_open(struct snd_compr_stream *cstream)
machine_err:
if (platform->driver->compr_ops && platform->driver->compr_ops->free)
platform->driver->compr_ops->free(cstream);
+plat_err:
+ if (cpu_dai->driver->cops && cpu_dai->driver->cops->shutdown)
+ cpu_dai->driver->cops->shutdown(cstream, cpu_dai);
out:
mutex_unlock(&rtd->pcm_mutex);
return ret;
@@ -70,6 +83,7 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
struct snd_soc_pcm_runtime *fe = cstream->private_data;
struct snd_pcm_substream *fe_substream = fe->pcm->streams[0].substream;
struct snd_soc_platform *platform = fe->platform;
+ struct snd_soc_dai *cpu_dai = fe->cpu_dai;
struct snd_soc_dpcm *dpcm;
struct snd_soc_dapm_widget_list *list;
int stream;
@@ -82,12 +96,22 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+ if (cpu_dai->driver->cops && cpu_dai->driver->cops->startup) {
+ ret = cpu_dai->driver->cops->startup(cstream, cpu_dai);
+ if (ret < 0) {
+ dev_err(cpu_dai->dev, "Compress ASoC: can't open interface %s: %d\n",
+ cpu_dai->name, ret);
+ goto out;
+ }
+ }
+
+
if (platform->driver->compr_ops && platform->driver->compr_ops->open) {
ret = platform->driver->compr_ops->open(cstream);
if (ret < 0) {
pr_err("compress asoc: can't open platform %s\n",
platform->component.name);
- goto out;
+ goto plat_err;
}
}
@@ -144,6 +168,9 @@ fe_err:
machine_err:
if (platform->driver->compr_ops && platform->driver->compr_ops->free)
platform->driver->compr_ops->free(cstream);
+plat_err:
+ if (cpu_dai->driver->cops && cpu_dai->driver->cops->shutdown)
+ cpu_dai->driver->cops->shutdown(cstream, cpu_dai);
out:
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
mutex_unlock(&fe->card->mutex);
@@ -210,6 +237,9 @@ static int soc_compr_free(struct snd_compr_stream *cstream)
if (platform->driver->compr_ops && platform->driver->compr_ops->free)
platform->driver->compr_ops->free(cstream);
+ if (cpu_dai->driver->cops && cpu_dai->driver->cops->shutdown)
+ cpu_dai->driver->cops->shutdown(cstream, cpu_dai);
+
if (cstream->direction == SND_COMPRESS_PLAYBACK) {
if (snd_soc_runtime_ignore_pmdown_time(rtd)) {
snd_soc_dapm_stream_event(rtd,
@@ -236,6 +266,7 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *fe = cstream->private_data;
struct snd_soc_platform *platform = fe->platform;
+ struct snd_soc_dai *cpu_dai = fe->cpu_dai;
struct snd_soc_dpcm *dpcm;
int stream, ret;
@@ -275,6 +306,9 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
if (platform->driver->compr_ops && platform->driver->compr_ops->free)
platform->driver->compr_ops->free(cstream);
+ if (cpu_dai->driver->cops && cpu_dai->driver->cops->shutdown)
+ cpu_dai->driver->cops->shutdown(cstream, cpu_dai);
+
mutex_unlock(&fe->card->mutex);
return 0;
}
@@ -285,6 +319,7 @@ static int soc_compr_trigger(struct snd_compr_stream *cstream, int cmd)
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret = 0;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
@@ -295,6 +330,10 @@ static int soc_compr_trigger(struct snd_compr_stream *cstream, int cmd)
goto out;
}
+ if (cpu_dai->driver->cops && cpu_dai->driver->cops->trigger)
+ cpu_dai->driver->cops->trigger(cstream, cmd, cpu_dai);
+
+
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
snd_soc_dai_digital_mute(codec_dai, 0, cstream->direction);
@@ -313,6 +352,7 @@ static int soc_compr_trigger_fe(struct snd_compr_stream *cstream, int cmd)
{
struct snd_soc_pcm_runtime *fe = cstream->private_data;
struct snd_soc_platform *platform = fe->platform;
+ struct snd_soc_dai *cpu_dai = fe->cpu_dai;
int ret = 0, stream;
if (cmd == SND_COMPR_TRIGGER_PARTIAL_DRAIN ||
@@ -332,6 +372,12 @@ static int soc_compr_trigger_fe(struct snd_compr_stream *cstream, int cmd)
mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+ if (cpu_dai->driver->cops && cpu_dai->driver->cops->trigger) {
+ ret = cpu_dai->driver->cops->trigger(cstream, cmd, cpu_dai);
+ if (ret < 0)
+ goto out;
+ }
+
if (platform->driver->compr_ops && platform->driver->compr_ops->trigger) {
ret = platform->driver->compr_ops->trigger(cstream, cmd);
if (ret < 0)
@@ -368,6 +414,7 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream,
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret = 0;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
@@ -378,6 +425,12 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream,
* expectation is that platform and machine will configure everything
* for this compress path, like configuring pcm port for codec
*/
+ if (cpu_dai->driver->cops && cpu_dai->driver->cops->set_params) {
+ ret = cpu_dai->driver->cops->set_params(cstream, params, cpu_dai);
+ if (ret < 0)
+ goto err;
+ }
+
if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) {
ret = platform->driver->compr_ops->set_params(cstream, params);
if (ret < 0)
@@ -416,6 +469,7 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
struct snd_soc_pcm_runtime *fe = cstream->private_data;
struct snd_pcm_substream *fe_substream = fe->pcm->streams[0].substream;
struct snd_soc_platform *platform = fe->platform;
+ struct snd_soc_dai *cpu_dai = fe->cpu_dai;
int ret = 0, stream;
if (cstream->direction == SND_COMPRESS_PLAYBACK)
@@ -425,6 +479,12 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+ if (cpu_dai->driver->cops && cpu_dai->driver->cops->set_params) {
+ ret = cpu_dai->driver->cops->set_params(cstream, params, cpu_dai);
+ if (ret < 0)
+ goto out;
+ }
+
if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) {
ret = platform->driver->compr_ops->set_params(cstream, params);
if (ret < 0)
@@ -469,13 +529,21 @@ static int soc_compr_get_params(struct snd_compr_stream *cstream,
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret = 0;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
+ if (cpu_dai->driver->cops && cpu_dai->driver->cops->get_params) {
+ ret = cpu_dai->driver->cops->get_params(cstream, params, cpu_dai);
+ if (ret < 0)
+ goto err;
+ }
+
if (platform->driver->compr_ops && platform->driver->compr_ops->get_params)
ret = platform->driver->compr_ops->get_params(cstream, params);
+err:
mutex_unlock(&rtd->pcm_mutex);
return ret;
}
@@ -516,13 +584,21 @@ static int soc_compr_ack(struct snd_compr_stream *cstream, size_t bytes)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret = 0;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
+ if (cpu_dai->driver->cops && cpu_dai->driver->cops->ack) {
+ ret = cpu_dai->driver->cops->ack(cstream, bytes, cpu_dai);
+ if (ret < 0)
+ goto err;
+ }
+
if (platform->driver->compr_ops && platform->driver->compr_ops->ack)
ret = platform->driver->compr_ops->ack(cstream, bytes);
+err:
mutex_unlock(&rtd->pcm_mutex);
return ret;
}
@@ -533,9 +609,13 @@ static int soc_compr_pointer(struct snd_compr_stream *cstream,
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
int ret = 0;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
+ if (cpu_dai->driver->cops && cpu_dai->driver->cops->pointer)
+ cpu_dai->driver->cops->pointer(cstream, tstamp, cpu_dai);
+
if (platform->driver->compr_ops && platform->driver->compr_ops->pointer)
ret = platform->driver->compr_ops->pointer(cstream, tstamp);
@@ -564,8 +644,15 @@ static int soc_compr_set_metadata(struct snd_compr_stream *cstream,
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret = 0;
+ if (cpu_dai->driver->cops && cpu_dai->driver->cops->set_metadata) {
+ ret = cpu_dai->driver->cops->set_metadata(cstream, metadata, cpu_dai);
+ if (ret < 0)
+ return ret;
+ }
+
if (platform->driver->compr_ops && platform->driver->compr_ops->set_metadata)
ret = platform->driver->compr_ops->set_metadata(cstream, metadata);
@@ -577,8 +664,15 @@ static int soc_compr_get_metadata(struct snd_compr_stream *cstream,
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret = 0;
+ if (cpu_dai->driver->cops && cpu_dai->driver->cops->get_metadata) {
+ ret = cpu_dai->driver->cops->get_metadata(cstream, metadata, cpu_dai);
+ if (ret < 0)
+ return ret;
+ }
+
if (platform->driver->compr_ops && platform->driver->compr_ops->get_metadata)
ret = platform->driver->compr_ops->get_metadata(cstream, metadata);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index c0bbcd903261..f1901bb1466e 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -626,7 +626,7 @@ static void codec2codec_close_delayed_work(struct work_struct *work)
int snd_soc_suspend(struct device *dev)
{
struct snd_soc_card *card = dev_get_drvdata(dev);
- struct snd_soc_codec *codec;
+ struct snd_soc_component *component;
struct snd_soc_pcm_runtime *rtd;
int i;
@@ -702,39 +702,39 @@ int snd_soc_suspend(struct device *dev)
dapm_mark_endpoints_dirty(card);
snd_soc_dapm_sync(&card->dapm);
- /* suspend all CODECs */
- list_for_each_entry(codec, &card->codec_dev_list, card_list) {
- struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ /* suspend all COMPONENTs */
+ list_for_each_entry(component, &card->component_dev_list, card_list) {
+ struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
- /* If there are paths active then the CODEC will be held with
+ /* If there are paths active then the COMPONENT will be held with
* bias _ON and should not be suspended. */
- if (!codec->suspended) {
+ if (!component->suspended) {
switch (snd_soc_dapm_get_bias_level(dapm)) {
case SND_SOC_BIAS_STANDBY:
/*
- * If the CODEC is capable of idle
+ * If the COMPONENT is capable of idle
* bias off then being in STANDBY
* means it's doing something,
* otherwise fall through.
*/
if (dapm->idle_bias_off) {
- dev_dbg(codec->dev,
+ dev_dbg(component->dev,
"ASoC: idle_bias_off CODEC on over suspend\n");
break;
}
case SND_SOC_BIAS_OFF:
- if (codec->driver->suspend)
- codec->driver->suspend(codec);
- codec->suspended = 1;
- if (codec->component.regmap)
- regcache_mark_dirty(codec->component.regmap);
+ if (component->suspend)
+ component->suspend(component);
+ component->suspended = 1;
+ if (component->regmap)
+ regcache_mark_dirty(component->regmap);
/* deactivate pins to sleep state */
- pinctrl_pm_select_sleep_state(codec->dev);
+ pinctrl_pm_select_sleep_state(component->dev);
break;
default:
- dev_dbg(codec->dev,
- "ASoC: CODEC is on over suspend\n");
+ dev_dbg(component->dev,
+ "ASoC: COMPONENT is on over suspend\n");
break;
}
}
@@ -768,7 +768,7 @@ static void soc_resume_deferred(struct work_struct *work)
struct snd_soc_card *card =
container_of(work, struct snd_soc_card, deferred_resume_work);
struct snd_soc_pcm_runtime *rtd;
- struct snd_soc_codec *codec;
+ struct snd_soc_component *component;
int i;
/* our power state is still SNDRV_CTL_POWER_D3hot from suspend time,
@@ -794,11 +794,11 @@ static void soc_resume_deferred(struct work_struct *work)
cpu_dai->driver->resume(cpu_dai);
}
- list_for_each_entry(codec, &card->codec_dev_list, card_list) {
- if (codec->suspended) {
- if (codec->driver->resume)
- codec->driver->resume(codec);
- codec->suspended = 0;
+ list_for_each_entry(component, &card->component_dev_list, card_list) {
+ if (component->suspended) {
+ if (component->resume)
+ component->resume(component);
+ component->suspended = 0;
}
}
@@ -972,6 +972,48 @@ struct snd_soc_dai *snd_soc_find_dai(
}
EXPORT_SYMBOL_GPL(snd_soc_find_dai);
+
+/**
+ * snd_soc_find_dai_link - Find a DAI link
+ *
+ * @card: soc card
+ * @id: DAI link ID to match
+ * @name: DAI link name to match, optional
+ * @stream name: DAI link stream name to match, optional
+ *
+ * This function will search all existing DAI links of the soc card to
+ * find the link of the same ID. Since DAI links may not have their
+ * unique ID, so name and stream name should also match if being
+ * specified.
+ *
+ * Return: pointer of DAI link, or NULL if not found.
+ */
+struct snd_soc_dai_link *snd_soc_find_dai_link(struct snd_soc_card *card,
+ int id, const char *name,
+ const char *stream_name)
+{
+ struct snd_soc_dai_link *link, *_link;
+
+ lockdep_assert_held(&client_mutex);
+
+ list_for_each_entry_safe(link, _link, &card->dai_link_list, list) {
+ if (link->id != id)
+ continue;
+
+ if (name && (!link->name || strcmp(name, link->name)))
+ continue;
+
+ if (stream_name && (!link->stream_name
+ || strcmp(stream_name, link->stream_name)))
+ continue;
+
+ return link;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_find_dai_link);
+
static bool soc_is_dai_link_bound(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link)
{
@@ -993,6 +1035,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
struct snd_soc_dai_link_component cpu_dai_component;
struct snd_soc_dai **codec_dais;
struct snd_soc_platform *platform;
+ struct device_node *platform_of_node;
const char *platform_name;
int i;
@@ -1042,9 +1085,12 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
/* find one from the set of registered platforms */
list_for_each_entry(platform, &platform_list, list) {
+ platform_of_node = platform->dev->of_node;
+ if (!platform_of_node && platform->dev->parent->of_node)
+ platform_of_node = platform->dev->parent->of_node;
+
if (dai_link->platform_of_node) {
- if (platform->dev->of_node !=
- dai_link->platform_of_node)
+ if (platform_of_node != dai_link->platform_of_node)
continue;
} else {
if (strcmp(platform->component.name, platform_name))
@@ -1072,9 +1118,7 @@ static void soc_remove_component(struct snd_soc_component *component)
if (!component->card)
return;
- /* This is a HACK and will be removed soon */
- if (component->codec)
- list_del(&component->codec->card_list);
+ list_del(&component->card_list);
if (component->remove)
component->remove(component);
@@ -1443,10 +1487,7 @@ static int soc_probe_component(struct snd_soc_card *card,
component->num_dapm_routes);
list_add(&dapm->list, &card->dapm_list);
-
- /* This is a HACK and will be removed soon */
- if (component->codec)
- list_add(&component->codec->card_list, &card->codec_dev_list);
+ list_add(&component->card_list, &card->component_dev_list);
return 0;
@@ -1706,7 +1747,8 @@ static int soc_bind_aux_dev(struct snd_soc_card *card, int num)
}
component->init = aux_dev->init;
- list_add(&component->list_aux, &card->aux_comp_list);
+ component->auxiliary = 1;
+
return 0;
err_defer:
@@ -1722,7 +1764,10 @@ static int soc_probe_aux_devices(struct snd_soc_card *card)
for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
order++) {
- list_for_each_entry(comp, &card->aux_comp_list, list_aux) {
+ list_for_each_entry(comp, &card->component_dev_list, card_list) {
+ if (!comp->auxiliary)
+ continue;
+
if (comp->driver->probe_order == order) {
ret = soc_probe_component(card, comp);
if (ret < 0) {
@@ -1746,11 +1791,14 @@ static void soc_remove_aux_devices(struct snd_soc_card *card)
for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
order++) {
list_for_each_entry_safe(comp, _comp,
- &card->aux_comp_list, list_aux) {
+ &card->component_dev_list, card_list) {
+
+ if (!comp->auxiliary)
+ continue;
+
if (comp->driver->remove_order == order) {
soc_remove_component(comp);
- /* remove it from the card's aux_comp_list */
- list_del(&comp->list_aux);
+ comp->auxiliary = 0;
}
}
}
@@ -2926,6 +2974,8 @@ static int snd_soc_component_initialize(struct snd_soc_component *component,
component->driver = driver;
component->probe = component->driver->probe;
component->remove = component->driver->remove;
+ component->suspend = component->driver->suspend;
+ component->resume = component->driver->resume;
dapm = &component->dapm;
dapm->dev = dev;
@@ -3275,6 +3325,20 @@ static void snd_soc_codec_drv_remove(struct snd_soc_component *component)
codec->driver->remove(codec);
}
+static int snd_soc_codec_drv_suspend(struct snd_soc_component *component)
+{
+ struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
+
+ return codec->driver->suspend(codec);
+}
+
+static int snd_soc_codec_drv_resume(struct snd_soc_component *component)
+{
+ struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
+
+ return codec->driver->resume(codec);
+}
+
static int snd_soc_codec_drv_write(struct snd_soc_component *component,
unsigned int reg, unsigned int val)
{
@@ -3336,6 +3400,10 @@ int snd_soc_register_codec(struct device *dev,
codec->component.probe = snd_soc_codec_drv_probe;
if (codec_drv->remove)
codec->component.remove = snd_soc_codec_drv_remove;
+ if (codec_drv->suspend)
+ codec->component.suspend = snd_soc_codec_drv_suspend;
+ if (codec_drv->resume)
+ codec->component.resume = snd_soc_codec_drv_resume;
if (codec_drv->write)
codec->component.write = snd_soc_codec_drv_write;
if (codec_drv->read)
@@ -3424,10 +3492,10 @@ found:
EXPORT_SYMBOL_GPL(snd_soc_unregister_codec);
/* Retrieve a card's name from device tree */
-int snd_soc_of_parse_card_name(struct snd_soc_card *card,
- const char *propname)
+int snd_soc_of_parse_card_name_from_node(struct snd_soc_card *card,
+ struct device_node *np,
+ const char *propname)
{
- struct device_node *np;
int ret;
if (!card->dev) {
@@ -3435,7 +3503,8 @@ int snd_soc_of_parse_card_name(struct snd_soc_card *card,
return -EINVAL;
}
- np = card->dev->of_node;
+ if (!np)
+ np = card->dev->of_node;
ret = of_property_read_string_index(np, propname, 0, &card->name);
/*
@@ -3452,7 +3521,7 @@ int snd_soc_of_parse_card_name(struct snd_soc_card *card,
return 0;
}
-EXPORT_SYMBOL_GPL(snd_soc_of_parse_card_name);
+EXPORT_SYMBOL_GPL(snd_soc_of_parse_card_name_from_node);
static const struct snd_soc_dapm_widget simple_widgets[] = {
SND_SOC_DAPM_MIC("Microphone", NULL),
@@ -3461,14 +3530,17 @@ static const struct snd_soc_dapm_widget simple_widgets[] = {
SND_SOC_DAPM_SPK("Speaker", NULL),
};
-int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
+int snd_soc_of_parse_audio_simple_widgets_from_node(struct snd_soc_card *card,
+ struct device_node *np,
const char *propname)
{
- struct device_node *np = card->dev->of_node;
struct snd_soc_dapm_widget *widgets;
const char *template, *wname;
int i, j, num_widgets, ret;
+ if (!np)
+ np = card->dev->of_node;
+
num_widgets = of_property_count_strings(np, propname);
if (num_widgets < 0) {
dev_err(card->dev,
@@ -3539,7 +3611,7 @@ int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
return 0;
}
-EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_simple_widgets);
+EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_simple_widgets_from_node);
static int snd_soc_of_get_slot_mask(struct device_node *np,
const char *prop_name,
@@ -3595,15 +3667,18 @@ int snd_soc_of_parse_tdm_slot(struct device_node *np,
}
EXPORT_SYMBOL_GPL(snd_soc_of_parse_tdm_slot);
-void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
+void snd_soc_of_parse_audio_prefix_from_node(struct snd_soc_card *card,
+ struct device_node *np,
struct snd_soc_codec_conf *codec_conf,
struct device_node *of_node,
const char *propname)
{
- struct device_node *np = card->dev->of_node;
const char *str;
int ret;
+ if (!np)
+ np = card->dev->of_node;
+
ret = of_property_read_string(np, propname, &str);
if (ret < 0) {
/* no prefix is not error */
@@ -3613,16 +3688,19 @@ void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
codec_conf->of_node = of_node;
codec_conf->name_prefix = str;
}
-EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_prefix);
+EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_prefix_from_node);
-int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
+int snd_soc_of_parse_audio_routing_from_node(struct snd_soc_card *card,
+ struct device_node *np,
const char *propname)
{
- struct device_node *np = card->dev->of_node;
int num_routes;
struct snd_soc_dapm_route *routes;
int i, ret;
+ if (!np)
+ np = card->dev->of_node;
+
num_routes = of_property_count_strings(np, propname);
if (num_routes < 0 || num_routes & 1) {
dev_err(card->dev,
@@ -3669,7 +3747,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
return 0;
}
-EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_routing);
+EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_routing_from_node);
unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
const char *prefix,
@@ -3784,7 +3862,7 @@ unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
}
EXPORT_SYMBOL_GPL(snd_soc_of_parse_daifmt);
-static int snd_soc_get_dai_name(struct of_phandle_args *args,
+int snd_soc_get_dai_name(struct of_phandle_args *args,
const char **dai_name)
{
struct snd_soc_component *pos;
@@ -3836,6 +3914,7 @@ static int snd_soc_get_dai_name(struct of_phandle_args *args,
mutex_unlock(&client_mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(snd_soc_get_dai_name);
int snd_soc_of_get_dai_name(struct device_node *of_node,
const char **dai_name)
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 3bbe32ee4630..27dd02e57b31 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -330,6 +330,11 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
case snd_soc_dapm_mixer_named_ctl:
mc = (struct soc_mixer_control *)kcontrol->private_value;
+ if (mc->autodisable && snd_soc_volsw_is_stereo(mc))
+ dev_warn(widget->dapm->dev,
+ "ASoC: Unsupported stereo autodisable control '%s'\n",
+ ctrl_name);
+
if (mc->autodisable) {
struct snd_soc_dapm_widget template;
@@ -723,7 +728,8 @@ static int dapm_connect_mux(struct snd_soc_dapm_context *dapm,
}
/* set up initial codec paths */
-static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i)
+static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i,
+ int nth_path)
{
struct soc_mixer_control *mc = (struct soc_mixer_control *)
p->sink->kcontrol_news[i].private_value;
@@ -736,7 +742,25 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i)
if (reg != SND_SOC_NOPM) {
soc_dapm_read(p->sink->dapm, reg, &val);
- val = (val >> shift) & mask;
+ /*
+ * The nth_path argument allows this function to know
+ * which path of a kcontrol it is setting the initial
+ * status for. Ideally this would support any number
+ * of paths and channels. But since kcontrols only come
+ * in mono and stereo variants, we are limited to 2
+ * channels.
+ *
+ * The following code assumes for stereo controls the
+ * first path is the left channel, and all remaining
+ * paths are the right channel.
+ */
+ if (snd_soc_volsw_is_stereo(mc) && nth_path > 0) {
+ if (reg != mc->rreg)
+ soc_dapm_read(p->sink->dapm, mc->rreg, &val);
+ val = (val >> mc->rshift) & mask;
+ } else {
+ val = (val >> shift) & mask;
+ }
if (invert)
val = max - val;
p->connect = !!val;
@@ -749,13 +773,13 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i)
static int dapm_connect_mixer(struct snd_soc_dapm_context *dapm,
struct snd_soc_dapm_path *path, const char *control_name)
{
- int i;
+ int i, nth_path = 0;
/* search for mixer kcontrol */
for (i = 0; i < path->sink->num_kcontrols; i++) {
if (!strcmp(control_name, path->sink->kcontrol_news[i].name)) {
path->name = path->sink->kcontrol_news[i].name;
- dapm_set_mixer_path_status(path, i);
+ dapm_set_mixer_path_status(path, i, nth_path++);
return 0;
}
}
@@ -1626,6 +1650,15 @@ static void dapm_widget_update(struct snd_soc_card *card)
dev_err(w->dapm->dev, "ASoC: %s DAPM update failed: %d\n",
w->name, ret);
+ if (update->has_second_set) {
+ ret = soc_dapm_update_bits(w->dapm, update->reg2,
+ update->mask2, update->val2);
+ if (ret < 0)
+ dev_err(w->dapm->dev,
+ "ASoC: %s DAPM update failed: %d\n",
+ w->name, ret);
+ }
+
for (wi = 0; wi < wlist->num_widgets; wi++) {
w = wlist->widgets[wi];
@@ -2177,7 +2210,8 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_mux_update_power);
/* test and update the power status of a mixer or switch widget */
static int soc_dapm_mixer_update_power(struct snd_soc_card *card,
- struct snd_kcontrol *kcontrol, int connect)
+ struct snd_kcontrol *kcontrol,
+ int connect, int rconnect)
{
struct snd_soc_dapm_path *path;
int found = 0;
@@ -2186,8 +2220,33 @@ static int soc_dapm_mixer_update_power(struct snd_soc_card *card,
/* find dapm widget path assoc with kcontrol */
dapm_kcontrol_for_each_path(path, kcontrol) {
+ /*
+ * Ideally this function should support any number of
+ * paths and channels. But since kcontrols only come
+ * in mono and stereo variants, we are limited to 2
+ * channels.
+ *
+ * The following code assumes for stereo controls the
+ * first path (when 'found == 0') is the left channel,
+ * and all remaining paths (when 'found == 1') are the
+ * right channel.
+ *
+ * A stereo control is signified by a valid 'rconnect'
+ * value, either 0 for unconnected, or >= 0 for connected.
+ * This is chosen instead of using snd_soc_volsw_is_stereo,
+ * so that the behavior of snd_soc_dapm_mixer_update_power
+ * doesn't change even when the kcontrol passed in is
+ * stereo.
+ *
+ * It passes 'connect' as the path connect status for
+ * the left channel, and 'rconnect' for the right
+ * channel.
+ */
+ if (found && rconnect >= 0)
+ soc_dapm_connect_path(path, rconnect, "mixer update");
+ else
+ soc_dapm_connect_path(path, connect, "mixer update");
found = 1;
- soc_dapm_connect_path(path, connect, "mixer update");
}
if (found)
@@ -2205,7 +2264,7 @@ int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_context *dapm,
mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
card->update = update;
- ret = soc_dapm_mixer_update_power(card, kcontrol, connect);
+ ret = soc_dapm_mixer_update_power(card, kcontrol, connect, -1);
card->update = NULL;
mutex_unlock(&card->dapm_mutex);
if (ret > 0)
@@ -3030,22 +3089,28 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
int reg = mc->reg;
unsigned int shift = mc->shift;
int max = mc->max;
+ unsigned int width = fls(max);
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
- unsigned int val;
+ unsigned int reg_val, val, rval = 0;
int ret = 0;
- if (snd_soc_volsw_is_stereo(mc))
- dev_warn(dapm->dev,
- "ASoC: Control '%s' is stereo, which is not supported\n",
- kcontrol->id.name);
-
mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
if (dapm_kcontrol_is_powered(kcontrol) && reg != SND_SOC_NOPM) {
- ret = soc_dapm_read(dapm, reg, &val);
- val = (val >> shift) & mask;
+ ret = soc_dapm_read(dapm, reg, &reg_val);
+ val = (reg_val >> shift) & mask;
+
+ if (ret == 0 && reg != mc->rreg)
+ ret = soc_dapm_read(dapm, mc->rreg, &reg_val);
+
+ if (snd_soc_volsw_is_stereo(mc))
+ rval = (reg_val >> mc->rshift) & mask;
} else {
- val = dapm_kcontrol_get_value(kcontrol);
+ reg_val = dapm_kcontrol_get_value(kcontrol);
+ val = reg_val & mask;
+
+ if (snd_soc_volsw_is_stereo(mc))
+ rval = (reg_val >> width) & mask;
}
mutex_unlock(&card->dapm_mutex);
@@ -3057,6 +3122,13 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
else
ucontrol->value.integer.value[0] = val;
+ if (snd_soc_volsw_is_stereo(mc)) {
+ if (invert)
+ ucontrol->value.integer.value[1] = max - rval;
+ else
+ ucontrol->value.integer.value[1] = rval;
+ }
+
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_get_volsw);
@@ -3080,46 +3152,66 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
int reg = mc->reg;
unsigned int shift = mc->shift;
int max = mc->max;
- unsigned int mask = (1 << fls(max)) - 1;
+ unsigned int width = fls(max);
+ unsigned int mask = (1 << width) - 1;
unsigned int invert = mc->invert;
- unsigned int val;
- int connect, change, reg_change = 0;
- struct snd_soc_dapm_update update;
+ unsigned int val, rval = 0;
+ int connect, rconnect = -1, change, reg_change = 0;
+ struct snd_soc_dapm_update update = { NULL };
int ret = 0;
- if (snd_soc_volsw_is_stereo(mc))
- dev_warn(dapm->dev,
- "ASoC: Control '%s' is stereo, which is not supported\n",
- kcontrol->id.name);
-
val = (ucontrol->value.integer.value[0] & mask);
connect = !!val;
if (invert)
val = max - val;
+ if (snd_soc_volsw_is_stereo(mc)) {
+ rval = (ucontrol->value.integer.value[1] & mask);
+ rconnect = !!rval;
+ if (invert)
+ rval = max - rval;
+ }
+
mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
- change = dapm_kcontrol_set_value(kcontrol, val);
+ /* This assumes field width < (bits in unsigned int / 2) */
+ if (width > sizeof(unsigned int) * 8 / 2)
+ dev_warn(dapm->dev,
+ "ASoC: control %s field width limit exceeded\n",
+ kcontrol->id.name);
+ change = dapm_kcontrol_set_value(kcontrol, val | (rval << width));
if (reg != SND_SOC_NOPM) {
- mask = mask << shift;
val = val << shift;
+ rval = rval << mc->rshift;
+
+ reg_change = soc_dapm_test_bits(dapm, reg, mask << shift, val);
- reg_change = soc_dapm_test_bits(dapm, reg, mask, val);
+ if (snd_soc_volsw_is_stereo(mc))
+ reg_change |= soc_dapm_test_bits(dapm, mc->rreg,
+ mask << mc->rshift,
+ rval);
}
if (change || reg_change) {
if (reg_change) {
+ if (snd_soc_volsw_is_stereo(mc)) {
+ update.has_second_set = true;
+ update.reg2 = mc->rreg;
+ update.mask2 = mask << mc->rshift;
+ update.val2 = rval;
+ }
update.kcontrol = kcontrol;
update.reg = reg;
- update.mask = mask;
+ update.mask = mask << shift;
update.val = val;
card->update = &update;
}
change |= reg_change;
- ret = soc_dapm_mixer_update_power(card, kcontrol, connect);
+ ret = soc_dapm_mixer_update_power(card, kcontrol, connect,
+ rconnect);
card->update = NULL;
}
@@ -3192,7 +3284,7 @@ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
unsigned int *item = ucontrol->value.enumerated.item;
unsigned int val, change, reg_change = 0;
unsigned int mask;
- struct snd_soc_dapm_update update;
+ struct snd_soc_dapm_update update = { NULL };
int ret = 0;
if (item[0] >= e->items)
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index 6cef3977507a..17eb14935577 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -263,7 +263,6 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
const struct snd_dmaengine_pcm_config *config = pcm->config;
struct device *dev = rtd->platform->dev;
- struct snd_dmaengine_dai_dma_data *dma_data;
struct snd_pcm_substream *substream;
size_t prealloc_buffer_size;
size_t max_buffer_size;
@@ -278,19 +277,11 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
max_buffer_size = SIZE_MAX;
}
-
for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) {
substream = rtd->pcm->streams[i].substream;
if (!substream)
continue;
- dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
-
- if (!pcm->chan[i] &&
- (pcm->flags & SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME))
- pcm->chan[i] = dma_request_slave_channel(dev,
- dma_data->chan_name);
-
if (!pcm->chan[i] && (pcm->flags & SND_DMAENGINE_PCM_FLAG_COMPAT)) {
pcm->chan[i] = dmaengine_pcm_compat_request_channel(rtd,
substream);
@@ -359,9 +350,7 @@ static int dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
const char *name;
struct dma_chan *chan;
- if ((pcm->flags & (SND_DMAENGINE_PCM_FLAG_NO_DT |
- SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME)) ||
- !dev->of_node)
+ if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_DT) || !dev->of_node)
return 0;
if (config && config->dma_dev) {
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index d56a16a0f6fa..e7a1eaa2772f 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -2882,7 +2882,7 @@ int snd_soc_platform_trigger(struct snd_pcm_substream *substream,
EXPORT_SYMBOL_GPL(snd_soc_platform_trigger);
#ifdef CONFIG_DEBUG_FS
-static char *dpcm_state_string(enum snd_soc_dpcm_state state)
+static const char *dpcm_state_string(enum snd_soc_dpcm_state state)
{
switch (state) {
case SND_SOC_DPCM_STATE_NEW:
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 6b05047a4134..65670b2b408c 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -49,10 +49,68 @@
#define SOC_TPLG_PASS_GRAPH 5
#define SOC_TPLG_PASS_PINS 6
#define SOC_TPLG_PASS_BE_DAI 7
+#define SOC_TPLG_PASS_LINK 8
#define SOC_TPLG_PASS_START SOC_TPLG_PASS_MANIFEST
-#define SOC_TPLG_PASS_END SOC_TPLG_PASS_BE_DAI
+#define SOC_TPLG_PASS_END SOC_TPLG_PASS_LINK
+/*
+ * Old version of ABI structs, supported for backward compatibility.
+ */
+
+/* Manifest v4 */
+struct snd_soc_tplg_manifest_v4 {
+ __le32 size; /* in bytes of this structure */
+ __le32 control_elems; /* number of control elements */
+ __le32 widget_elems; /* number of widget elements */
+ __le32 graph_elems; /* number of graph elements */
+ __le32 pcm_elems; /* number of PCM elements */
+ __le32 dai_link_elems; /* number of DAI link elements */
+ struct snd_soc_tplg_private priv;
+} __packed;
+
+/* Stream Capabilities v4 */
+struct snd_soc_tplg_stream_caps_v4 {
+ __le32 size; /* in bytes of this structure */
+ char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ __le64 formats; /* supported formats SNDRV_PCM_FMTBIT_* */
+ __le32 rates; /* supported rates SNDRV_PCM_RATE_* */
+ __le32 rate_min; /* min rate */
+ __le32 rate_max; /* max rate */
+ __le32 channels_min; /* min channels */
+ __le32 channels_max; /* max channels */
+ __le32 periods_min; /* min number of periods */
+ __le32 periods_max; /* max number of periods */
+ __le32 period_size_min; /* min period size bytes */
+ __le32 period_size_max; /* max period size bytes */
+ __le32 buffer_size_min; /* min buffer size bytes */
+ __le32 buffer_size_max; /* max buffer size bytes */
+} __packed;
+
+/* PCM v4 */
+struct snd_soc_tplg_pcm_v4 {
+ __le32 size; /* in bytes of this structure */
+ char pcm_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ char dai_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ __le32 pcm_id; /* unique ID - used to match with DAI link */
+ __le32 dai_id; /* unique ID - used to match */
+ __le32 playback; /* supports playback mode */
+ __le32 capture; /* supports capture mode */
+ __le32 compress; /* 1 = compressed; 0 = PCM */
+ struct snd_soc_tplg_stream stream[SND_SOC_TPLG_STREAM_CONFIG_MAX]; /* for DAI link */
+ __le32 num_streams; /* number of streams */
+ struct snd_soc_tplg_stream_caps_v4 caps[2]; /* playback and capture for DAI */
+} __packed;
+
+/* Physical link config v4 */
+struct snd_soc_tplg_link_config_v4 {
+ __le32 size; /* in bytes of this structure */
+ __le32 id; /* unique ID - used to match */
+ struct snd_soc_tplg_stream stream[SND_SOC_TPLG_STREAM_CONFIG_MAX]; /* supported configs playback and captrure */
+ __le32 num_streams; /* number of streams */
+} __packed;
+
+/* topology context */
struct soc_tplg {
const struct firmware *fw;
@@ -428,33 +486,41 @@ static void remove_widget(struct snd_soc_component *comp,
dobj->ops->widget_unload(comp, dobj);
/*
- * Dynamic Widgets either have 1 enum kcontrol or 1..N mixers.
+ * Dynamic Widgets either have 1..N enum kcontrols or mixers.
* The enum may either have an array of values or strings.
*/
- if (dobj->widget.kcontrol_enum) {
+ if (dobj->widget.kcontrol_type == SND_SOC_TPLG_TYPE_ENUM) {
/* enumerated widget mixer */
- struct soc_enum *se =
- (struct soc_enum *)w->kcontrols[0]->private_value;
+ for (i = 0; i < w->num_kcontrols; i++) {
+ struct snd_kcontrol *kcontrol = w->kcontrols[i];
+ struct soc_enum *se =
+ (struct soc_enum *)kcontrol->private_value;
- snd_ctl_remove(card, w->kcontrols[0]);
+ snd_ctl_remove(card, kcontrol);
- kfree(se->dobj.control.dvalues);
- for (i = 0; i < se->items; i++)
- kfree(se->dobj.control.dtexts[i]);
+ kfree(se->dobj.control.dvalues);
+ for (i = 0; i < se->items; i++)
+ kfree(se->dobj.control.dtexts[i]);
- kfree(se);
+ kfree(se);
+ }
kfree(w->kcontrol_news);
} else {
- /* non enumerated widget mixer */
+ /* volume mixer or bytes controls */
for (i = 0; i < w->num_kcontrols; i++) {
struct snd_kcontrol *kcontrol = w->kcontrols[i];
- struct soc_mixer_control *sm =
- (struct soc_mixer_control *) kcontrol->private_value;
- kfree(w->kcontrols[i]->tlv.p);
+ if (dobj->widget.kcontrol_type
+ == SND_SOC_TPLG_TYPE_MIXER)
+ kfree(kcontrol->tlv.p);
- snd_ctl_remove(card, w->kcontrols[i]);
- kfree(sm);
+ snd_ctl_remove(card, kcontrol);
+
+ /* Private value is used as struct soc_mixer_control
+ * for volume mixers or soc_bytes_ext for bytes
+ * controls.
+ */
+ kfree((void *)kcontrol->private_value);
}
kfree(w->kcontrol_news);
}
@@ -474,6 +540,7 @@ static void remove_dai(struct snd_soc_component *comp,
if (dobj->ops && dobj->ops->dai_unload)
dobj->ops->dai_unload(comp, dobj);
+ kfree(dai_drv->name);
list_del(&dobj->list);
kfree(dai_drv);
}
@@ -491,6 +558,10 @@ static void remove_link(struct snd_soc_component *comp,
if (dobj->ops && dobj->ops->link_unload)
dobj->ops->link_unload(comp, dobj);
+ kfree(link->name);
+ kfree(link->stream_name);
+ kfree(link->cpu_dai_name);
+
list_del(&dobj->list);
snd_soc_remove_dai_link(comp->card, link);
kfree(link);
@@ -1193,98 +1264,105 @@ err:
}
static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
- struct soc_tplg *tplg)
+ struct soc_tplg *tplg, int num_kcontrols)
{
struct snd_kcontrol_new *kc;
struct snd_soc_tplg_enum_control *ec;
struct soc_enum *se;
- int i, err;
-
- ec = (struct snd_soc_tplg_enum_control *)tplg->pos;
- tplg->pos += (sizeof(struct snd_soc_tplg_enum_control) +
- ec->priv.size);
+ int i, j, err;
- /* validate kcontrol */
- if (strnlen(ec->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
- SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
- return NULL;
-
- kc = kzalloc(sizeof(*kc), GFP_KERNEL);
+ kc = kcalloc(num_kcontrols, sizeof(*kc), GFP_KERNEL);
if (kc == NULL)
return NULL;
- se = kzalloc(sizeof(*se), GFP_KERNEL);
- if (se == NULL)
- goto err;
+ for (i = 0; i < num_kcontrols; i++) {
+ ec = (struct snd_soc_tplg_enum_control *)tplg->pos;
+ /* validate kcontrol */
+ if (strnlen(ec->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
+ SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
+ return NULL;
- dev_dbg(tplg->dev, " adding DAPM widget enum control %s\n",
- ec->hdr.name);
+ se = kzalloc(sizeof(*se), GFP_KERNEL);
+ if (se == NULL)
+ goto err;
- kc->name = ec->hdr.name;
- kc->private_value = (long)se;
- kc->iface = SNDRV_CTL_ELEM_IFACE_MIXER;
- kc->access = ec->hdr.access;
+ dev_dbg(tplg->dev, " adding DAPM widget enum control %s\n",
+ ec->hdr.name);
+
+ kc[i].name = ec->hdr.name;
+ kc[i].private_value = (long)se;
+ kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ kc[i].access = ec->hdr.access;
- /* we only support FL/FR channel mapping atm */
- se->reg = tplc_chan_get_reg(tplg, ec->channel, SNDRV_CHMAP_FL);
- se->shift_l = tplc_chan_get_shift(tplg, ec->channel, SNDRV_CHMAP_FL);
- se->shift_r = tplc_chan_get_shift(tplg, ec->channel, SNDRV_CHMAP_FR);
+ /* we only support FL/FR channel mapping atm */
+ se->reg = tplc_chan_get_reg(tplg, ec->channel, SNDRV_CHMAP_FL);
+ se->shift_l = tplc_chan_get_shift(tplg, ec->channel,
+ SNDRV_CHMAP_FL);
+ se->shift_r = tplc_chan_get_shift(tplg, ec->channel,
+ SNDRV_CHMAP_FR);
- se->items = ec->items;
- se->mask = ec->mask;
- se->dobj.index = tplg->index;
+ se->items = ec->items;
+ se->mask = ec->mask;
+ se->dobj.index = tplg->index;
- switch (ec->hdr.ops.info) {
- case SND_SOC_TPLG_CTL_ENUM_VALUE:
- case SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE:
- err = soc_tplg_denum_create_values(se, ec);
- if (err < 0) {
- dev_err(tplg->dev, "ASoC: could not create values for %s\n",
- ec->hdr.name);
+ switch (ec->hdr.ops.info) {
+ case SND_SOC_TPLG_CTL_ENUM_VALUE:
+ case SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE:
+ err = soc_tplg_denum_create_values(se, ec);
+ if (err < 0) {
+ dev_err(tplg->dev, "ASoC: could not create values for %s\n",
+ ec->hdr.name);
+ goto err_se;
+ }
+ /* fall through to create texts */
+ case SND_SOC_TPLG_CTL_ENUM:
+ case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
+ case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
+ err = soc_tplg_denum_create_texts(se, ec);
+ if (err < 0) {
+ dev_err(tplg->dev, "ASoC: could not create texts for %s\n",
+ ec->hdr.name);
+ goto err_se;
+ }
+ break;
+ default:
+ dev_err(tplg->dev, "ASoC: invalid enum control type %d for %s\n",
+ ec->hdr.ops.info, ec->hdr.name);
goto err_se;
}
- /* fall through to create texts */
- case SND_SOC_TPLG_CTL_ENUM:
- case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
- case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
- err = soc_tplg_denum_create_texts(se, ec);
+
+ /* map io handlers */
+ err = soc_tplg_kcontrol_bind_io(&ec->hdr, &kc[i], tplg);
+ if (err) {
+ soc_control_err(tplg, &ec->hdr, ec->hdr.name);
+ goto err_se;
+ }
+
+ /* pass control to driver for optional further init */
+ err = soc_tplg_init_kcontrol(tplg, &kc[i],
+ (struct snd_soc_tplg_ctl_hdr *)ec);
if (err < 0) {
- dev_err(tplg->dev, "ASoC: could not create texts for %s\n",
+ dev_err(tplg->dev, "ASoC: failed to init %s\n",
ec->hdr.name);
goto err_se;
}
- break;
- default:
- dev_err(tplg->dev, "ASoC: invalid enum control type %d for %s\n",
- ec->hdr.ops.info, ec->hdr.name);
- goto err_se;
- }
- /* map io handlers */
- err = soc_tplg_kcontrol_bind_io(&ec->hdr, kc, tplg);
- if (err) {
- soc_control_err(tplg, &ec->hdr, ec->hdr.name);
- goto err_se;
- }
-
- /* pass control to driver for optional further init */
- err = soc_tplg_init_kcontrol(tplg, kc,
- (struct snd_soc_tplg_ctl_hdr *)ec);
- if (err < 0) {
- dev_err(tplg->dev, "ASoC: failed to init %s\n",
- ec->hdr.name);
- goto err_se;
+ tplg->pos += (sizeof(struct snd_soc_tplg_enum_control) +
+ ec->priv.size);
}
return kc;
err_se:
- /* free values and texts */
- kfree(se->dobj.control.dvalues);
- for (i = 0; i < ec->items; i++)
- kfree(se->dobj.control.dtexts[i]);
+ for (; i >= 0; i--) {
+ /* free values and texts */
+ se = (struct soc_enum *)kc[i].private_value;
+ kfree(se->dobj.control.dvalues);
+ for (j = 0; j < ec->items; j++)
+ kfree(se->dobj.control.dtexts[j]);
- kfree(se);
+ kfree(se);
+ }
err:
kfree(kc);
@@ -1366,6 +1444,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
struct snd_soc_dapm_widget template, *widget;
struct snd_soc_tplg_ctl_hdr *control_hdr;
struct snd_soc_card *card = tplg->comp->card;
+ unsigned int kcontrol_type;
int ret = 0;
if (strnlen(w->name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
@@ -1406,6 +1485,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
tplg->pos +=
(sizeof(struct snd_soc_tplg_dapm_widget) + w->priv.size);
if (w->num_kcontrols == 0) {
+ kcontrol_type = 0;
template.num_kcontrols = 0;
goto widget;
}
@@ -1421,6 +1501,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
case SND_SOC_TPLG_CTL_VOLSW_XR_SX:
case SND_SOC_TPLG_CTL_RANGE:
case SND_SOC_TPLG_DAPM_CTL_VOLSW:
+ kcontrol_type = SND_SOC_TPLG_TYPE_MIXER; /* volume mixer */
template.num_kcontrols = w->num_kcontrols;
template.kcontrol_news =
soc_tplg_dapm_widget_dmixer_create(tplg,
@@ -1435,16 +1516,18 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
case SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE:
- template.dobj.widget.kcontrol_enum = 1;
- template.num_kcontrols = 1;
+ kcontrol_type = SND_SOC_TPLG_TYPE_ENUM; /* enumerated mixer */
+ template.num_kcontrols = w->num_kcontrols;
template.kcontrol_news =
- soc_tplg_dapm_widget_denum_create(tplg);
+ soc_tplg_dapm_widget_denum_create(tplg,
+ template.num_kcontrols);
if (!template.kcontrol_news) {
ret = -ENOMEM;
goto hdr_err;
}
break;
case SND_SOC_TPLG_CTL_BYTES:
+ kcontrol_type = SND_SOC_TPLG_TYPE_BYTES; /* bytes control */
template.num_kcontrols = w->num_kcontrols;
template.kcontrol_news =
soc_tplg_dapm_widget_dbytes_create(tplg,
@@ -1481,6 +1564,7 @@ widget:
}
widget->dobj.type = SND_SOC_DOBJ_WIDGET;
+ widget->dobj.widget.kcontrol_type = kcontrol_type;
widget->dobj.ops = tplg->ops;
widget->dobj.index = tplg->index;
kfree(template.sname);
@@ -1589,7 +1673,8 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
if (dai_drv == NULL)
return -ENOMEM;
- dai_drv->name = pcm->dai_name;
+ if (strlen(pcm->dai_name))
+ dai_drv->name = kstrdup(pcm->dai_name, GFP_KERNEL);
dai_drv->id = pcm->dai_id;
if (pcm->playback) {
@@ -1621,8 +1706,31 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
return snd_soc_register_dai(tplg->comp, dai_drv);
}
+static void set_link_flags(struct snd_soc_dai_link *link,
+ unsigned int flag_mask, unsigned int flags)
+{
+ if (flag_mask & SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_RATES)
+ link->symmetric_rates =
+ flags & SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_RATES ? 1 : 0;
+
+ if (flag_mask & SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_CHANNELS)
+ link->symmetric_channels =
+ flags & SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_CHANNELS ?
+ 1 : 0;
+
+ if (flag_mask & SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_SAMPLEBITS)
+ link->symmetric_samplebits =
+ flags & SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_SAMPLEBITS ?
+ 1 : 0;
+
+ if (flag_mask & SND_SOC_TPLG_LNK_FLGBIT_VOICE_WAKEUP)
+ link->ignore_suspend =
+ flags & SND_SOC_TPLG_LNK_FLGBIT_VOICE_WAKEUP ?
+ 1 : 0;
+}
+
/* create the FE DAI link */
-static int soc_tplg_link_create(struct soc_tplg *tplg,
+static int soc_tplg_fe_link_create(struct soc_tplg *tplg,
struct snd_soc_tplg_pcm *pcm)
{
struct snd_soc_dai_link *link;
@@ -1632,11 +1740,15 @@ static int soc_tplg_link_create(struct soc_tplg *tplg,
if (link == NULL)
return -ENOMEM;
- link->name = pcm->pcm_name;
- link->stream_name = pcm->pcm_name;
+ if (strlen(pcm->pcm_name)) {
+ link->name = kstrdup(pcm->pcm_name, GFP_KERNEL);
+ link->stream_name = kstrdup(pcm->pcm_name, GFP_KERNEL);
+ }
link->id = pcm->pcm_id;
- link->cpu_dai_name = pcm->dai_name;
+ if (strlen(pcm->dai_name))
+ link->cpu_dai_name = kstrdup(pcm->dai_name, GFP_KERNEL);
+
link->codec_name = "snd-soc-dummy";
link->codec_dai_name = "snd-soc-dummy-dai";
@@ -1644,6 +1756,8 @@ static int soc_tplg_link_create(struct soc_tplg *tplg,
link->dynamic = 1;
link->dpcm_playback = pcm->playback;
link->dpcm_capture = pcm->capture;
+ if (pcm->flag_mask)
+ set_link_flags(link, pcm->flag_mask, pcm->flags);
/* pass control to component driver for optional further init */
ret = soc_tplg_dai_link_load(tplg, link);
@@ -1672,55 +1786,351 @@ static int soc_tplg_pcm_create(struct soc_tplg *tplg,
if (ret < 0)
return ret;
- return soc_tplg_link_create(tplg, pcm);
+ return soc_tplg_fe_link_create(tplg, pcm);
+}
+
+/* copy stream caps from the old version 4 of source */
+static void stream_caps_new_ver(struct snd_soc_tplg_stream_caps *dest,
+ struct snd_soc_tplg_stream_caps_v4 *src)
+{
+ dest->size = sizeof(*dest);
+ memcpy(dest->name, src->name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN);
+ dest->formats = src->formats;
+ dest->rates = src->rates;
+ dest->rate_min = src->rate_min;
+ dest->rate_max = src->rate_max;
+ dest->channels_min = src->channels_min;
+ dest->channels_max = src->channels_max;
+ dest->periods_min = src->periods_min;
+ dest->periods_max = src->periods_max;
+ dest->period_size_min = src->period_size_min;
+ dest->period_size_max = src->period_size_max;
+ dest->buffer_size_min = src->buffer_size_min;
+ dest->buffer_size_max = src->buffer_size_max;
+}
+
+/**
+ * pcm_new_ver - Create the new version of PCM from the old version.
+ * @tplg: topology context
+ * @src: older version of pcm as a source
+ * @pcm: latest version of pcm created from the source
+ *
+ * Support from vesion 4. User should free the returned pcm manually.
+ */
+static int pcm_new_ver(struct soc_tplg *tplg,
+ struct snd_soc_tplg_pcm *src,
+ struct snd_soc_tplg_pcm **pcm)
+{
+ struct snd_soc_tplg_pcm *dest;
+ struct snd_soc_tplg_pcm_v4 *src_v4;
+ int i;
+
+ *pcm = NULL;
+
+ if (src->size != sizeof(*src_v4)) {
+ dev_err(tplg->dev, "ASoC: invalid PCM size\n");
+ return -EINVAL;
+ }
+
+ dev_warn(tplg->dev, "ASoC: old version of PCM\n");
+ src_v4 = (struct snd_soc_tplg_pcm_v4 *)src;
+ dest = kzalloc(sizeof(*dest), GFP_KERNEL);
+ if (!dest)
+ return -ENOMEM;
+
+ dest->size = sizeof(*dest); /* size of latest abi version */
+ memcpy(dest->pcm_name, src_v4->pcm_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN);
+ memcpy(dest->dai_name, src_v4->dai_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN);
+ dest->pcm_id = src_v4->pcm_id;
+ dest->dai_id = src_v4->dai_id;
+ dest->playback = src_v4->playback;
+ dest->capture = src_v4->capture;
+ dest->compress = src_v4->compress;
+ dest->num_streams = src_v4->num_streams;
+ for (i = 0; i < dest->num_streams; i++)
+ memcpy(&dest->stream[i], &src_v4->stream[i],
+ sizeof(struct snd_soc_tplg_stream));
+
+ for (i = 0; i < 2; i++)
+ stream_caps_new_ver(&dest->caps[i], &src_v4->caps[i]);
+
+ *pcm = dest;
+ return 0;
}
static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
struct snd_soc_tplg_hdr *hdr)
{
- struct snd_soc_tplg_pcm *pcm;
+ struct snd_soc_tplg_pcm *pcm, *_pcm;
int count = hdr->count;
- int i;
+ int i, err;
+ bool abi_match;
if (tplg->pass != SOC_TPLG_PASS_PCM_DAI)
return 0;
+ /* check the element size and count */
+ pcm = (struct snd_soc_tplg_pcm *)tplg->pos;
+ if (pcm->size > sizeof(struct snd_soc_tplg_pcm)
+ || pcm->size < sizeof(struct snd_soc_tplg_pcm_v4)) {
+ dev_err(tplg->dev, "ASoC: invalid size %d for PCM elems\n",
+ pcm->size);
+ return -EINVAL;
+ }
+
if (soc_tplg_check_elem_count(tplg,
- sizeof(struct snd_soc_tplg_pcm), count,
+ pcm->size, count,
hdr->payload_size, "PCM DAI")) {
dev_err(tplg->dev, "ASoC: invalid count %d for PCM DAI elems\n",
count);
return -EINVAL;
}
- /* create the FE DAIs and DAI links */
- pcm = (struct snd_soc_tplg_pcm *)tplg->pos;
for (i = 0; i < count; i++) {
- if (pcm->size != sizeof(*pcm)) {
- dev_err(tplg->dev, "ASoC: invalid pcm size\n");
- return -EINVAL;
+ pcm = (struct snd_soc_tplg_pcm *)tplg->pos;
+
+ /* check ABI version by size, create a new version of pcm
+ * if abi not match.
+ */
+ if (pcm->size == sizeof(*pcm)) {
+ abi_match = true;
+ _pcm = pcm;
+ } else {
+ abi_match = false;
+ err = pcm_new_ver(tplg, pcm, &_pcm);
}
- soc_tplg_pcm_create(tplg, pcm);
- pcm++;
+ /* create the FE DAIs and DAI links */
+ soc_tplg_pcm_create(tplg, _pcm);
+
+ /* offset by version-specific struct size and
+ * real priv data size
+ */
+ tplg->pos += pcm->size + _pcm->priv.size;
+
+ if (!abi_match)
+ kfree(_pcm); /* free the duplicated one */
}
dev_dbg(tplg->dev, "ASoC: adding %d PCM DAIs\n", count);
- tplg->pos += sizeof(struct snd_soc_tplg_pcm) * count;
return 0;
}
-/* *
- * soc_tplg_be_dai_config - Find and configure an existing BE DAI.
+/**
+ * set_link_hw_format - Set the HW audio format of the physical DAI link.
+ * @tplg: topology context
+ * @cfg: physical link configs.
+ *
+ * Topology context contains a list of supported HW formats (configs) and
+ * a default format ID for the physical link. This function will use this
+ * default ID to choose the HW format to set the link's DAI format for init.
+ */
+static void set_link_hw_format(struct snd_soc_dai_link *link,
+ struct snd_soc_tplg_link_config *cfg)
+{
+ struct snd_soc_tplg_hw_config *hw_config;
+ unsigned char bclk_master, fsync_master;
+ unsigned char invert_bclk, invert_fsync;
+ int i;
+
+ for (i = 0; i < cfg->num_hw_configs; i++) {
+ hw_config = &cfg->hw_config[i];
+ if (hw_config->id != cfg->default_hw_config_id)
+ continue;
+
+ link->dai_fmt = hw_config->fmt & SND_SOC_DAIFMT_FORMAT_MASK;
+
+ /* clock signal polarity */
+ invert_bclk = hw_config->invert_bclk;
+ invert_fsync = hw_config->invert_fsync;
+ if (!invert_bclk && !invert_fsync)
+ link->dai_fmt |= SND_SOC_DAIFMT_NB_NF;
+ else if (!invert_bclk && invert_fsync)
+ link->dai_fmt |= SND_SOC_DAIFMT_NB_IF;
+ else if (invert_bclk && !invert_fsync)
+ link->dai_fmt |= SND_SOC_DAIFMT_IB_NF;
+ else
+ link->dai_fmt |= SND_SOC_DAIFMT_IB_IF;
+
+ /* clock masters */
+ bclk_master = hw_config->bclk_master;
+ fsync_master = hw_config->fsync_master;
+ if (!bclk_master && !fsync_master)
+ link->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
+ else if (bclk_master && !fsync_master)
+ link->dai_fmt |= SND_SOC_DAIFMT_CBS_CFM;
+ else if (!bclk_master && fsync_master)
+ link->dai_fmt |= SND_SOC_DAIFMT_CBM_CFS;
+ else
+ link->dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
+ }
+}
+
+/**
+ * link_new_ver - Create a new physical link config from the old
+ * version of source.
+ * @toplogy: topology context
+ * @src: old version of phyical link config as a source
+ * @link: latest version of physical link config created from the source
+ *
+ * Support from vesion 4. User need free the returned link config manually.
+ */
+static int link_new_ver(struct soc_tplg *tplg,
+ struct snd_soc_tplg_link_config *src,
+ struct snd_soc_tplg_link_config **link)
+{
+ struct snd_soc_tplg_link_config *dest;
+ struct snd_soc_tplg_link_config_v4 *src_v4;
+ int i;
+
+ *link = NULL;
+
+ if (src->size != sizeof(struct snd_soc_tplg_link_config_v4)) {
+ dev_err(tplg->dev, "ASoC: invalid physical link config size\n");
+ return -EINVAL;
+ }
+
+ dev_warn(tplg->dev, "ASoC: old version of physical link config\n");
+
+ src_v4 = (struct snd_soc_tplg_link_config_v4 *)src;
+ dest = kzalloc(sizeof(*dest), GFP_KERNEL);
+ if (!dest)
+ return -ENOMEM;
+
+ dest->size = sizeof(*dest);
+ dest->id = src_v4->id;
+ dest->num_streams = src_v4->num_streams;
+ for (i = 0; i < dest->num_streams; i++)
+ memcpy(&dest->stream[i], &src_v4->stream[i],
+ sizeof(struct snd_soc_tplg_stream));
+
+ *link = dest;
+ return 0;
+}
+
+/* Find and configure an existing physical DAI link */
+static int soc_tplg_link_config(struct soc_tplg *tplg,
+ struct snd_soc_tplg_link_config *cfg)
+{
+ struct snd_soc_dai_link *link;
+ const char *name, *stream_name;
+ size_t len;
+ int ret;
+
+ len = strnlen(cfg->name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN);
+ if (len == SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
+ return -EINVAL;
+ else if (len)
+ name = cfg->name;
+ else
+ name = NULL;
+
+ len = strnlen(cfg->stream_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN);
+ if (len == SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
+ return -EINVAL;
+ else if (len)
+ stream_name = cfg->stream_name;
+ else
+ stream_name = NULL;
+
+ link = snd_soc_find_dai_link(tplg->comp->card, cfg->id,
+ name, stream_name);
+ if (!link) {
+ dev_err(tplg->dev, "ASoC: physical link %s (id %d) not exist\n",
+ name, cfg->id);
+ return -EINVAL;
+ }
+
+ /* hw format */
+ if (cfg->num_hw_configs)
+ set_link_hw_format(link, cfg);
+
+ /* flags */
+ if (cfg->flag_mask)
+ set_link_flags(link, cfg->flag_mask, cfg->flags);
+
+ /* pass control to component driver for optional further init */
+ ret = soc_tplg_dai_link_load(tplg, link);
+ if (ret < 0) {
+ dev_err(tplg->dev, "ASoC: physical link loading failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+
+/* Load physical link config elements from the topology context */
+static int soc_tplg_link_elems_load(struct soc_tplg *tplg,
+ struct snd_soc_tplg_hdr *hdr)
+{
+ struct snd_soc_tplg_link_config *link, *_link;
+ int count = hdr->count;
+ int i, ret;
+ bool abi_match;
+
+ if (tplg->pass != SOC_TPLG_PASS_LINK) {
+ tplg->pos += hdr->size + hdr->payload_size;
+ return 0;
+ };
+
+ /* check the element size and count */
+ link = (struct snd_soc_tplg_link_config *)tplg->pos;
+ if (link->size > sizeof(struct snd_soc_tplg_link_config)
+ || link->size < sizeof(struct snd_soc_tplg_link_config_v4)) {
+ dev_err(tplg->dev, "ASoC: invalid size %d for physical link elems\n",
+ link->size);
+ return -EINVAL;
+ }
+
+ if (soc_tplg_check_elem_count(tplg,
+ link->size, count,
+ hdr->payload_size, "physical link config")) {
+ dev_err(tplg->dev, "ASoC: invalid count %d for physical link elems\n",
+ count);
+ return -EINVAL;
+ }
+
+ /* config physical DAI links */
+ for (i = 0; i < count; i++) {
+ link = (struct snd_soc_tplg_link_config *)tplg->pos;
+ if (link->size == sizeof(*link)) {
+ abi_match = true;
+ _link = link;
+ } else {
+ abi_match = false;
+ ret = link_new_ver(tplg, link, &_link);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = soc_tplg_link_config(tplg, _link);
+ if (ret < 0)
+ return ret;
+
+ /* offset by version-specific struct size and
+ * real priv data size
+ */
+ tplg->pos += link->size + _link->priv.size;
+
+ if (!abi_match)
+ kfree(_link); /* free the duplicated one */
+ }
+
+ return 0;
+}
+
+/**
+ * soc_tplg_dai_config - Find and configure an existing physical DAI.
* @tplg: topology context
- * @be: topology BE DAI configs.
+ * @d: physical DAI configs.
*
- * The BE dai should already be registered by the platform driver. The
- * platform driver should specify the BE DAI name and ID for matching.
+ * The physical dai should already be registered by the platform driver.
+ * The platform driver should specify the DAI name and ID for matching.
*/
-static int soc_tplg_be_dai_config(struct soc_tplg *tplg,
- struct snd_soc_tplg_be_dai *be)
+static int soc_tplg_dai_config(struct soc_tplg *tplg,
+ struct snd_soc_tplg_dai *d)
{
struct snd_soc_dai_link_component dai_component = {0};
struct snd_soc_dai *dai;
@@ -1729,17 +2139,17 @@ static int soc_tplg_be_dai_config(struct soc_tplg *tplg,
struct snd_soc_tplg_stream_caps *caps;
int ret;
- dai_component.dai_name = be->dai_name;
+ dai_component.dai_name = d->dai_name;
dai = snd_soc_find_dai(&dai_component);
if (!dai) {
- dev_err(tplg->dev, "ASoC: BE DAI %s not registered\n",
- be->dai_name);
+ dev_err(tplg->dev, "ASoC: physical DAI %s not registered\n",
+ d->dai_name);
return -EINVAL;
}
- if (be->dai_id != dai->id) {
- dev_err(tplg->dev, "ASoC: BE DAI %s id mismatch\n",
- be->dai_name);
+ if (d->dai_id != dai->id) {
+ dev_err(tplg->dev, "ASoC: physical DAI %s id mismatch\n",
+ d->dai_name);
return -EINVAL;
}
@@ -1747,20 +2157,20 @@ static int soc_tplg_be_dai_config(struct soc_tplg *tplg,
if (!dai_drv)
return -EINVAL;
- if (be->playback) {
+ if (d->playback) {
stream = &dai_drv->playback;
- caps = &be->caps[SND_SOC_TPLG_STREAM_PLAYBACK];
+ caps = &d->caps[SND_SOC_TPLG_STREAM_PLAYBACK];
set_stream_info(stream, caps);
}
- if (be->capture) {
+ if (d->capture) {
stream = &dai_drv->capture;
- caps = &be->caps[SND_SOC_TPLG_STREAM_CAPTURE];
+ caps = &d->caps[SND_SOC_TPLG_STREAM_CAPTURE];
set_stream_info(stream, caps);
}
- if (be->flag_mask)
- set_dai_flags(dai_drv, be->flag_mask, be->flags);
+ if (d->flag_mask)
+ set_dai_flags(dai_drv, d->flag_mask, d->flags);
/* pass control to component driver for optional further init */
ret = soc_tplg_dai_load(tplg, dai_drv);
@@ -1772,10 +2182,11 @@ static int soc_tplg_be_dai_config(struct soc_tplg *tplg,
return 0;
}
-static int soc_tplg_be_dai_elems_load(struct soc_tplg *tplg,
- struct snd_soc_tplg_hdr *hdr)
+/* load physical DAI elements */
+static int soc_tplg_dai_elems_load(struct soc_tplg *tplg,
+ struct snd_soc_tplg_hdr *hdr)
{
- struct snd_soc_tplg_be_dai *be;
+ struct snd_soc_tplg_dai *dai;
int count = hdr->count;
int i;
@@ -1784,41 +2195,95 @@ static int soc_tplg_be_dai_elems_load(struct soc_tplg *tplg,
/* config the existing BE DAIs */
for (i = 0; i < count; i++) {
- be = (struct snd_soc_tplg_be_dai *)tplg->pos;
- if (be->size != sizeof(*be)) {
- dev_err(tplg->dev, "ASoC: invalid BE DAI size\n");
+ dai = (struct snd_soc_tplg_dai *)tplg->pos;
+ if (dai->size != sizeof(*dai)) {
+ dev_err(tplg->dev, "ASoC: invalid physical DAI size\n");
return -EINVAL;
}
- soc_tplg_be_dai_config(tplg, be);
- tplg->pos += (sizeof(*be) + be->priv.size);
+ soc_tplg_dai_config(tplg, dai);
+ tplg->pos += (sizeof(*dai) + dai->priv.size);
}
dev_dbg(tplg->dev, "ASoC: Configure %d BE DAIs\n", count);
return 0;
}
+/**
+ * manifest_new_ver - Create a new version of manifest from the old version
+ * of source.
+ * @toplogy: topology context
+ * @src: old version of manifest as a source
+ * @manifest: latest version of manifest created from the source
+ *
+ * Support from vesion 4. Users need free the returned manifest manually.
+ */
+static int manifest_new_ver(struct soc_tplg *tplg,
+ struct snd_soc_tplg_manifest *src,
+ struct snd_soc_tplg_manifest **manifest)
+{
+ struct snd_soc_tplg_manifest *dest;
+ struct snd_soc_tplg_manifest_v4 *src_v4;
+
+ *manifest = NULL;
+
+ if (src->size != sizeof(*src_v4)) {
+ dev_err(tplg->dev, "ASoC: invalid manifest size\n");
+ return -EINVAL;
+ }
+
+ dev_warn(tplg->dev, "ASoC: old version of manifest\n");
+
+ src_v4 = (struct snd_soc_tplg_manifest_v4 *)src;
+ dest = kzalloc(sizeof(*dest) + src_v4->priv.size, GFP_KERNEL);
+ if (!dest)
+ return -ENOMEM;
+
+ dest->size = sizeof(*dest); /* size of latest abi version */
+ dest->control_elems = src_v4->control_elems;
+ dest->widget_elems = src_v4->widget_elems;
+ dest->graph_elems = src_v4->graph_elems;
+ dest->pcm_elems = src_v4->pcm_elems;
+ dest->dai_link_elems = src_v4->dai_link_elems;
+ dest->priv.size = src_v4->priv.size;
+ if (dest->priv.size)
+ memcpy(dest->priv.data, src_v4->priv.data,
+ src_v4->priv.size);
+
+ *manifest = dest;
+ return 0;
+}
static int soc_tplg_manifest_load(struct soc_tplg *tplg,
struct snd_soc_tplg_hdr *hdr)
{
- struct snd_soc_tplg_manifest *manifest;
+ struct snd_soc_tplg_manifest *manifest, *_manifest;
+ bool abi_match;
+ int err;
if (tplg->pass != SOC_TPLG_PASS_MANIFEST)
return 0;
manifest = (struct snd_soc_tplg_manifest *)tplg->pos;
- if (manifest->size != sizeof(*manifest)) {
- dev_err(tplg->dev, "ASoC: invalid manifest size\n");
- return -EINVAL;
- }
- tplg->pos += sizeof(struct snd_soc_tplg_manifest);
+ /* check ABI version by size, create a new manifest if abi not match */
+ if (manifest->size == sizeof(*manifest)) {
+ abi_match = true;
+ _manifest = manifest;
+ } else {
+ abi_match = false;
+ err = manifest_new_ver(tplg, manifest, &_manifest);
+ if (err < 0)
+ return err;
+ }
+ /* pass control to component driver for optional further init */
if (tplg->comp && tplg->ops && tplg->ops->manifest)
- return tplg->ops->manifest(tplg->comp, manifest);
+ return tplg->ops->manifest(tplg->comp, _manifest);
+
+ if (!abi_match) /* free the duplicated one */
+ kfree(_manifest);
- dev_err(tplg->dev, "ASoC: Firmware manifest not supported\n");
return 0;
}
@@ -1854,7 +2319,9 @@ static int soc_valid_header(struct soc_tplg *tplg,
return -EINVAL;
}
- if (hdr->abi != SND_SOC_TPLG_ABI_VERSION) {
+ /* Support ABI from version 4 */
+ if (hdr->abi > SND_SOC_TPLG_ABI_VERSION
+ || hdr->abi < SND_SOC_TPLG_ABI_VERSION_MIN) {
dev_err(tplg->dev,
"ASoC: pass %d invalid ABI version got 0x%x need 0x%x at offset 0x%lx size 0x%zx.\n",
tplg->pass, hdr->abi,
@@ -1902,8 +2369,12 @@ static int soc_tplg_load_header(struct soc_tplg *tplg,
return soc_tplg_dapm_widget_elems_load(tplg, hdr);
case SND_SOC_TPLG_TYPE_PCM:
return soc_tplg_pcm_elems_load(tplg, hdr);
- case SND_SOC_TPLG_TYPE_BE_DAI:
- return soc_tplg_be_dai_elems_load(tplg, hdr);
+ case SND_SOC_TPLG_TYPE_DAI:
+ return soc_tplg_dai_elems_load(tplg, hdr);
+ case SND_SOC_TPLG_TYPE_DAI_LINK:
+ case SND_SOC_TPLG_TYPE_BACKEND_LINK:
+ /* physical link configurations */
+ return soc_tplg_link_elems_load(tplg, hdr);
case SND_SOC_TPLG_TYPE_MANIFEST:
return soc_tplg_manifest_load(tplg, hdr);
default:
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 393e8f0fe2cc..644d9a9ebfbc 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -58,6 +58,205 @@ int snd_soc_params_to_bclk(struct snd_pcm_hw_params *params)
}
EXPORT_SYMBOL_GPL(snd_soc_params_to_bclk);
+int snd_soc_component_enable_pin(struct snd_soc_component *component,
+ const char *pin)
+{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ char *full_name;
+ int ret;
+
+ if (!component->name_prefix)
+ return snd_soc_dapm_enable_pin(dapm, pin);
+
+ full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+ if (!full_name)
+ return -ENOMEM;
+
+ ret = snd_soc_dapm_enable_pin(dapm, full_name);
+ kfree(full_name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_enable_pin);
+
+int snd_soc_component_enable_pin_unlocked(struct snd_soc_component *component,
+ const char *pin)
+{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ char *full_name;
+ int ret;
+
+ if (!component->name_prefix)
+ return snd_soc_dapm_enable_pin_unlocked(dapm, pin);
+
+ full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+ if (!full_name)
+ return -ENOMEM;
+
+ ret = snd_soc_dapm_enable_pin_unlocked(dapm, full_name);
+ kfree(full_name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_enable_pin_unlocked);
+
+int snd_soc_component_disable_pin(struct snd_soc_component *component,
+ const char *pin)
+{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ char *full_name;
+ int ret;
+
+ if (!component->name_prefix)
+ return snd_soc_dapm_disable_pin(dapm, pin);
+
+ full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+ if (!full_name)
+ return -ENOMEM;
+
+ ret = snd_soc_dapm_disable_pin(dapm, full_name);
+ kfree(full_name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_disable_pin);
+
+int snd_soc_component_disable_pin_unlocked(struct snd_soc_component *component,
+ const char *pin)
+{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ char *full_name;
+ int ret;
+
+ if (!component->name_prefix)
+ return snd_soc_dapm_disable_pin_unlocked(dapm, pin);
+
+ full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+ if (!full_name)
+ return -ENOMEM;
+
+ ret = snd_soc_dapm_disable_pin_unlocked(dapm, full_name);
+ kfree(full_name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_disable_pin_unlocked);
+
+int snd_soc_component_nc_pin(struct snd_soc_component *component,
+ const char *pin)
+{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ char *full_name;
+ int ret;
+
+ if (!component->name_prefix)
+ return snd_soc_dapm_nc_pin(dapm, pin);
+
+ full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+ if (!full_name)
+ return -ENOMEM;
+
+ ret = snd_soc_dapm_nc_pin(dapm, full_name);
+ kfree(full_name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_nc_pin);
+
+int snd_soc_component_nc_pin_unlocked(struct snd_soc_component *component,
+ const char *pin)
+{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ char *full_name;
+ int ret;
+
+ if (!component->name_prefix)
+ return snd_soc_dapm_nc_pin_unlocked(dapm, pin);
+
+ full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+ if (!full_name)
+ return -ENOMEM;
+
+ ret = snd_soc_dapm_nc_pin_unlocked(dapm, full_name);
+ kfree(full_name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_nc_pin_unlocked);
+
+int snd_soc_component_get_pin_status(struct snd_soc_component *component,
+ const char *pin)
+{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ char *full_name;
+ int ret;
+
+ if (!component->name_prefix)
+ return snd_soc_dapm_get_pin_status(dapm, pin);
+
+ full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+ if (!full_name)
+ return -ENOMEM;
+
+ ret = snd_soc_dapm_get_pin_status(dapm, full_name);
+ kfree(full_name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_get_pin_status);
+
+int snd_soc_component_force_enable_pin(struct snd_soc_component *component,
+ const char *pin)
+{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ char *full_name;
+ int ret;
+
+ if (!component->name_prefix)
+ return snd_soc_dapm_force_enable_pin(dapm, pin);
+
+ full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+ if (!full_name)
+ return -ENOMEM;
+
+ ret = snd_soc_dapm_force_enable_pin(dapm, full_name);
+ kfree(full_name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin);
+
+int snd_soc_component_force_enable_pin_unlocked(
+ struct snd_soc_component *component,
+ const char *pin)
+{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ char *full_name;
+ int ret;
+
+ if (!component->name_prefix)
+ return snd_soc_dapm_force_enable_pin_unlocked(dapm, pin);
+
+ full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+ if (!full_name)
+ return -ENOMEM;
+
+ ret = snd_soc_dapm_force_enable_pin_unlocked(dapm, full_name);
+ kfree(full_name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin_unlocked);
+
static const struct snd_pcm_hardware dummy_dma_hardware = {
/* Random values to keep userspace happy when checking constraints */
.info = SNDRV_PCM_INFO_INTERLEAVED |
diff --git a/sound/soc/sti/sti_uniperif.c b/sound/soc/sti/sti_uniperif.c
index 549fac349fa0..98eb205a0b62 100644
--- a/sound/soc/sti/sti_uniperif.c
+++ b/sound/soc/sti/sti_uniperif.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/delay.h>
#include "uniperif.h"
@@ -97,6 +98,28 @@ static const struct of_device_id snd_soc_sti_match[] = {
{},
};
+int sti_uniperiph_reset(struct uniperif *uni)
+{
+ int count = 10;
+
+ /* Reset uniperipheral uni */
+ SET_UNIPERIF_SOFT_RST_SOFT_RST(uni);
+
+ if (uni->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) {
+ while (GET_UNIPERIF_SOFT_RST_SOFT_RST(uni) && count) {
+ udelay(5);
+ count--;
+ }
+ }
+
+ if (!count) {
+ dev_err(uni->dev, "Failed to reset uniperif\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
int sti_uniperiph_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
unsigned int rx_mask, int slots,
int slot_width)
@@ -293,7 +316,7 @@ static int sti_uniperiph_dai_suspend(struct snd_soc_dai *dai)
/* The uniperipheral should be in stopped state */
if (uni->state != UNIPERIF_STATE_STOPPED) {
- dev_err(uni->dev, "%s: invalid uni state( %d)",
+ dev_err(uni->dev, "%s: invalid uni state( %d)\n",
__func__, (int)uni->state);
return -EBUSY;
}
@@ -301,7 +324,7 @@ static int sti_uniperiph_dai_suspend(struct snd_soc_dai *dai)
/* Pinctrl: switch pinstate to sleep */
ret = pinctrl_pm_select_sleep_state(uni->dev);
if (ret)
- dev_err(uni->dev, "%s: failed to select pinctrl state",
+ dev_err(uni->dev, "%s: failed to select pinctrl state\n",
__func__);
return ret;
@@ -322,7 +345,7 @@ static int sti_uniperiph_dai_resume(struct snd_soc_dai *dai)
/* pinctrl: switch pinstate to default */
ret = pinctrl_pm_select_default_state(uni->dev);
if (ret)
- dev_err(uni->dev, "%s: failed to select pinctrl state",
+ dev_err(uni->dev, "%s: failed to select pinctrl state\n",
__func__);
return ret;
@@ -366,11 +389,12 @@ static int sti_uniperiph_cpu_dai_of(struct device_node *node,
const struct of_device_id *of_id;
const struct sti_uniperiph_dev_data *dev_data;
const char *mode;
+ int ret;
/* Populate data structure depending on compatibility */
of_id = of_match_node(snd_soc_sti_match, node);
if (!of_id->data) {
- dev_err(dev, "data associated to device is missing");
+ dev_err(dev, "data associated to device is missing\n");
return -EINVAL;
}
dev_data = (struct sti_uniperiph_dev_data *)of_id->data;
@@ -389,7 +413,7 @@ static int sti_uniperiph_cpu_dai_of(struct device_node *node,
uni->mem_region = platform_get_resource(priv->pdev, IORESOURCE_MEM, 0);
if (!uni->mem_region) {
- dev_err(dev, "Failed to get memory resource");
+ dev_err(dev, "Failed to get memory resource\n");
return -ENODEV;
}
@@ -403,7 +427,7 @@ static int sti_uniperiph_cpu_dai_of(struct device_node *node,
uni->irq = platform_get_irq(priv->pdev, 0);
if (uni->irq < 0) {
- dev_err(dev, "Failed to get IRQ resource");
+ dev_err(dev, "Failed to get IRQ resource\n");
return -ENXIO;
}
@@ -421,12 +445,15 @@ static int sti_uniperiph_cpu_dai_of(struct device_node *node,
dai_data->stream = dev_data->stream;
if (priv->dai_data.stream == SNDRV_PCM_STREAM_PLAYBACK) {
- uni_player_init(priv->pdev, uni);
+ ret = uni_player_init(priv->pdev, uni);
stream = &dai->playback;
} else {
- uni_reader_init(priv->pdev, uni);
+ ret = uni_reader_init(priv->pdev, uni);
stream = &dai->capture;
}
+ if (ret < 0)
+ return ret;
+
dai->ops = uni->dai_ops;
stream->stream_name = dai->name;
diff --git a/sound/soc/sti/uniperif.h b/sound/soc/sti/uniperif.h
index 1993c655fb79..d487dd2ef016 100644
--- a/sound/soc/sti/uniperif.h
+++ b/sound/soc/sti/uniperif.h
@@ -1397,6 +1397,8 @@ static inline int sti_uniperiph_get_unip_tdm_frame_size(struct uniperif *uni)
return (uni->tdm_slot.slots * uni->tdm_slot.slot_width / 8);
}
+int sti_uniperiph_reset(struct uniperif *uni);
+
int sti_uniperiph_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
unsigned int rx_mask, int slots,
int slot_width);
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
index ad54d4cf58ad..60ae31a303ab 100644
--- a/sound/soc/sti/uniperif_player.c
+++ b/sound/soc/sti/uniperif_player.c
@@ -6,8 +6,6 @@
*/
#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <sound/asoundef.h>
@@ -55,25 +53,6 @@ static const struct snd_pcm_hardware uni_player_pcm_hw = {
.buffer_bytes_max = 256 * PAGE_SIZE
};
-static inline int reset_player(struct uniperif *player)
-{
- int count = 10;
-
- if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) {
- while (GET_UNIPERIF_SOFT_RST_SOFT_RST(player) && count) {
- udelay(5);
- count--;
- }
- }
-
- if (!count) {
- dev_err(player->dev, "Failed to reset uniperif");
- return -EIO;
- }
-
- return 0;
-}
-
/*
* uni_player_irq_handler
* In case of error audio stream is stopped; stop action is protected via PCM
@@ -97,7 +76,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
/* Check for fifo error (underrun) */
if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(player))) {
- dev_err(player->dev, "FIFO underflow error detected");
+ dev_err(player->dev, "FIFO underflow error detected\n");
/* Interrupt is just for information when underflow recovery */
if (player->underflow_enabled) {
@@ -119,7 +98,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
/* Check for dma error (overrun) */
if (unlikely(status & UNIPERIF_ITS_DMA_ERROR_MASK(player))) {
- dev_err(player->dev, "DMA error detected");
+ dev_err(player->dev, "DMA error detected\n");
/* Disable interrupt so doesn't continually fire */
SET_UNIPERIF_ITM_BCLR_DMA_ERROR(player);
@@ -135,11 +114,14 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
/* Check for underflow recovery done */
if (unlikely(status & UNIPERIF_ITM_UNDERFLOW_REC_DONE_MASK(player))) {
if (!player->underflow_enabled) {
- dev_err(player->dev, "unexpected Underflow recovering");
+ dev_err(player->dev,
+ "unexpected Underflow recovering\n");
return -EPERM;
}
/* Read the underflow recovery duration */
tmp = GET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(player);
+ dev_dbg(player->dev, "Underflow recovered (%d LR clocks max)\n",
+ tmp);
/* Clear the underflow recovery duration */
SET_UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION(player);
@@ -153,7 +135,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
/* Check if underflow recovery failed */
if (unlikely(status &
UNIPERIF_ITM_UNDERFLOW_REC_FAILED_MASK(player))) {
- dev_err(player->dev, "Underflow recovery failed");
+ dev_err(player->dev, "Underflow recovery failed\n");
/* Stop the player */
snd_pcm_stream_lock(player->substream);
@@ -336,7 +318,7 @@ static int uni_player_prepare_iec958(struct uniperif *player,
/* Oversampling must be multiple of 128 as iec958 frame is 32-bits */
if ((clk_div % 128) || (clk_div <= 0)) {
- dev_err(player->dev, "%s: invalid clk_div %d",
+ dev_err(player->dev, "%s: invalid clk_div %d\n",
__func__, clk_div);
return -EINVAL;
}
@@ -359,7 +341,7 @@ static int uni_player_prepare_iec958(struct uniperif *player,
SET_UNIPERIF_I2S_FMT_DATA_SIZE_24(player);
break;
default:
- dev_err(player->dev, "format not supported");
+ dev_err(player->dev, "format not supported\n");
return -EINVAL;
}
@@ -448,12 +430,12 @@ static int uni_player_prepare_pcm(struct uniperif *player,
* for 16 bits must be a multiple of 64
*/
if ((slot_width == 32) && (clk_div % 128)) {
- dev_err(player->dev, "%s: invalid clk_div", __func__);
+ dev_err(player->dev, "%s: invalid clk_div\n", __func__);
return -EINVAL;
}
if ((slot_width == 16) && (clk_div % 64)) {
- dev_err(player->dev, "%s: invalid clk_div", __func__);
+ dev_err(player->dev, "%s: invalid clk_div\n", __func__);
return -EINVAL;
}
@@ -471,7 +453,7 @@ static int uni_player_prepare_pcm(struct uniperif *player,
SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(player);
break;
default:
- dev_err(player->dev, "subframe format not supported");
+ dev_err(player->dev, "subframe format not supported\n");
return -EINVAL;
}
@@ -491,7 +473,7 @@ static int uni_player_prepare_pcm(struct uniperif *player,
break;
default:
- dev_err(player->dev, "format not supported");
+ dev_err(player->dev, "format not supported\n");
return -EINVAL;
}
@@ -504,7 +486,7 @@ static int uni_player_prepare_pcm(struct uniperif *player,
/* Number of channelsmust be even*/
if ((runtime->channels % 2) || (runtime->channels < 2) ||
(runtime->channels > 10)) {
- dev_err(player->dev, "%s: invalid nb of channels", __func__);
+ dev_err(player->dev, "%s: invalid nb of channels\n", __func__);
return -EINVAL;
}
@@ -762,7 +744,7 @@ static int uni_player_prepare(struct snd_pcm_substream *substream,
/* The player should be stopped */
if (player->state != UNIPERIF_STATE_STOPPED) {
- dev_err(player->dev, "%s: invalid player state %d", __func__,
+ dev_err(player->dev, "%s: invalid player state %d\n", __func__,
player->state);
return -EINVAL;
}
@@ -791,7 +773,8 @@ static int uni_player_prepare(struct snd_pcm_substream *substream,
/* Trigger limit must be an even number */
if ((!trigger_limit % 2) || (trigger_limit != 1 && transfer_size % 2) ||
(trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(player))) {
- dev_err(player->dev, "invalid trigger limit %d", trigger_limit);
+ dev_err(player->dev, "invalid trigger limit %d\n",
+ trigger_limit);
return -EINVAL;
}
@@ -812,7 +795,7 @@ static int uni_player_prepare(struct snd_pcm_substream *substream,
ret = uni_player_prepare_tdm(player, runtime);
break;
default:
- dev_err(player->dev, "invalid player type");
+ dev_err(player->dev, "invalid player type\n");
return -EINVAL;
}
@@ -852,16 +835,14 @@ static int uni_player_prepare(struct snd_pcm_substream *substream,
SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(player);
break;
default:
- dev_err(player->dev, "format not supported");
+ dev_err(player->dev, "format not supported\n");
return -EINVAL;
}
SET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(player, 0);
- /* Reset uniperipheral player */
- SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
- return reset_player(player);
+ return sti_uniperiph_reset(player);
}
static int uni_player_start(struct uniperif *player)
@@ -870,13 +851,13 @@ static int uni_player_start(struct uniperif *player)
/* The player should be stopped */
if (player->state != UNIPERIF_STATE_STOPPED) {
- dev_err(player->dev, "%s: invalid player state", __func__);
+ dev_err(player->dev, "%s: invalid player state\n", __func__);
return -EINVAL;
}
ret = clk_prepare_enable(player->clk);
if (ret) {
- dev_err(player->dev, "%s: Failed to enable clock", __func__);
+ dev_err(player->dev, "%s: Failed to enable clock\n", __func__);
return ret;
}
@@ -893,10 +874,7 @@ static int uni_player_start(struct uniperif *player)
SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(player);
}
- /* Reset uniperipheral player */
- SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
-
- ret = reset_player(player);
+ ret = sti_uniperiph_reset(player);
if (ret < 0) {
clk_disable_unprepare(player->clk);
return ret;
@@ -938,17 +916,14 @@ static int uni_player_stop(struct uniperif *player)
/* The player should not be in stopped state */
if (player->state == UNIPERIF_STATE_STOPPED) {
- dev_err(player->dev, "%s: invalid player state", __func__);
+ dev_err(player->dev, "%s: invalid player state\n", __func__);
return -EINVAL;
}
/* Turn the player off */
SET_UNIPERIF_CTRL_OPERATION_OFF(player);
- /* Soft reset the player */
- SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
-
- ret = reset_player(player);
+ ret = sti_uniperiph_reset(player);
if (ret < 0)
return ret;
@@ -973,7 +948,7 @@ int uni_player_resume(struct uniperif *player)
ret = regmap_field_write(player->clk_sel, 1);
if (ret) {
dev_err(player->dev,
- "%s: Failed to select freq synth clock",
+ "%s: Failed to select freq synth clock\n",
__func__);
return ret;
}
@@ -1070,7 +1045,7 @@ int uni_player_init(struct platform_device *pdev,
ret = uni_player_parse_dt_audio_glue(pdev, player);
if (ret < 0) {
- dev_err(player->dev, "Failed to parse DeviceTree");
+ dev_err(player->dev, "Failed to parse DeviceTree\n");
return ret;
}
@@ -1085,15 +1060,17 @@ int uni_player_init(struct platform_device *pdev,
/* Get uniperif resource */
player->clk = of_clk_get(pdev->dev.of_node, 0);
- if (IS_ERR(player->clk))
+ if (IS_ERR(player->clk)) {
+ dev_err(player->dev, "Failed to get clock\n");
ret = PTR_ERR(player->clk);
+ }
/* Select the frequency synthesizer clock */
if (player->clk_sel) {
ret = regmap_field_write(player->clk_sel, 1);
if (ret) {
dev_err(player->dev,
- "%s: Failed to select freq synth clock",
+ "%s: Failed to select freq synth clock\n",
__func__);
return ret;
}
@@ -1105,7 +1082,7 @@ int uni_player_init(struct platform_device *pdev,
ret = regmap_field_write(player->valid_sel, player->id);
if (ret) {
dev_err(player->dev,
- "%s: unable to connect to tdm bus", __func__);
+ "%s: unable to connect to tdm bus\n", __func__);
return ret;
}
}
@@ -1113,8 +1090,10 @@ int uni_player_init(struct platform_device *pdev,
ret = devm_request_irq(&pdev->dev, player->irq,
uni_player_irq_handler, IRQF_SHARED,
dev_name(&pdev->dev), player);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(player->dev, "unable to request IRQ %d\n", player->irq);
return ret;
+ }
mutex_init(&player->ctrl_lock);
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
index 0e1c3ee56675..5992c6ab3833 100644
--- a/sound/soc/sti/uniperif_reader.c
+++ b/sound/soc/sti/uniperif_reader.c
@@ -5,10 +5,6 @@
* License terms: GNU General Public License (GPL), version 2
*/
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-
#include <sound/soc.h>
#include "uniperif.h"
@@ -52,7 +48,7 @@ static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id)
if (reader->state == UNIPERIF_STATE_STOPPED) {
/* Unexpected IRQ: do nothing */
- dev_warn(reader->dev, "unexpected IRQ ");
+ dev_warn(reader->dev, "unexpected IRQ\n");
return IRQ_HANDLED;
}
@@ -62,7 +58,7 @@ static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id)
/* Check for fifo overflow error */
if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(reader))) {
- dev_err(reader->dev, "FIFO error detected");
+ dev_err(reader->dev, "FIFO error detected\n");
snd_pcm_stream_lock(reader->substream);
snd_pcm_stop(reader->substream, SNDRV_PCM_STATE_XRUN);
@@ -105,7 +101,7 @@ static int uni_reader_prepare_pcm(struct snd_pcm_runtime *runtime,
SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(reader);
break;
default:
- dev_err(reader->dev, "subframe format not supported");
+ dev_err(reader->dev, "subframe format not supported\n");
return -EINVAL;
}
@@ -125,14 +121,14 @@ static int uni_reader_prepare_pcm(struct snd_pcm_runtime *runtime,
break;
default:
- dev_err(reader->dev, "format not supported");
+ dev_err(reader->dev, "format not supported\n");
return -EINVAL;
}
/* Number of channels must be even */
if ((runtime->channels % 2) || (runtime->channels < 2) ||
(runtime->channels > 10)) {
- dev_err(reader->dev, "%s: invalid nb of channels", __func__);
+ dev_err(reader->dev, "%s: invalid nb of channels\n", __func__);
return -EINVAL;
}
@@ -186,11 +182,10 @@ static int uni_reader_prepare(struct snd_pcm_substream *substream,
struct uniperif *reader = priv->dai_data.uni;
struct snd_pcm_runtime *runtime = substream->runtime;
int transfer_size, trigger_limit, ret;
- int count = 10;
/* The reader should be stopped */
if (reader->state != UNIPERIF_STATE_STOPPED) {
- dev_err(reader->dev, "%s: invalid reader state %d", __func__,
+ dev_err(reader->dev, "%s: invalid reader state %d\n", __func__,
reader->state);
return -EINVAL;
}
@@ -219,7 +214,8 @@ static int uni_reader_prepare(struct snd_pcm_substream *substream,
if ((!trigger_limit % 2) ||
(trigger_limit != 1 && transfer_size % 2) ||
(trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(reader))) {
- dev_err(reader->dev, "invalid trigger limit %d", trigger_limit);
+ dev_err(reader->dev, "invalid trigger limit %d\n",
+ trigger_limit);
return -EINVAL;
}
@@ -246,7 +242,7 @@ static int uni_reader_prepare(struct snd_pcm_substream *substream,
SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(reader);
break;
default:
- dev_err(reader->dev, "format not supported");
+ dev_err(reader->dev, "format not supported\n");
return -EINVAL;
}
@@ -287,25 +283,14 @@ static int uni_reader_prepare(struct snd_pcm_substream *substream,
}
/* Reset uniperipheral reader */
- SET_UNIPERIF_SOFT_RST_SOFT_RST(reader);
-
- while (GET_UNIPERIF_SOFT_RST_SOFT_RST(reader)) {
- udelay(5);
- count--;
- }
- if (!count) {
- dev_err(reader->dev, "Failed to reset uniperif");
- return -EIO;
- }
-
- return 0;
+ return sti_uniperiph_reset(reader);
}
static int uni_reader_start(struct uniperif *reader)
{
/* The reader should be stopped */
if (reader->state != UNIPERIF_STATE_STOPPED) {
- dev_err(reader->dev, "%s: invalid reader state", __func__);
+ dev_err(reader->dev, "%s: invalid reader state\n", __func__);
return -EINVAL;
}
@@ -325,7 +310,7 @@ static int uni_reader_stop(struct uniperif *reader)
{
/* The reader should not be in stopped state */
if (reader->state == UNIPERIF_STATE_STOPPED) {
- dev_err(reader->dev, "%s: invalid reader state", __func__);
+ dev_err(reader->dev, "%s: invalid reader state\n", __func__);
return -EINVAL;
}
@@ -423,7 +408,7 @@ int uni_reader_init(struct platform_device *pdev,
uni_reader_irq_handler, IRQF_SHARED,
dev_name(&pdev->dev), reader);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to request IRQ");
+ dev_err(&pdev->dev, "Failed to request IRQ\n");
return -EBUSY;
}
diff --git a/sound/soc/sunxi/Kconfig b/sound/soc/sunxi/Kconfig
index dd2368297fd3..6c344e16aca4 100644
--- a/sound/soc/sunxi/Kconfig
+++ b/sound/soc/sunxi/Kconfig
@@ -9,6 +9,14 @@ config SND_SUN4I_CODEC
Select Y or M to add support for the Codec embedded in the Allwinner
A10 and affiliated SoCs.
+config SND_SUN8I_CODEC_ANALOG
+ tristate "Allwinner sun8i Codec Analog Controls Support"
+ depends on MACH_SUN8I || COMPILE_TEST
+ select REGMAP
+ help
+ Say Y or M if you want to add support for the analog controls for
+ the codec embedded in newer Allwinner SoCs.
+
config SND_SUN4I_I2S
tristate "Allwinner A10 I2S Support"
select SND_SOC_GENERIC_DMAENGINE_PCM
diff --git a/sound/soc/sunxi/Makefile b/sound/soc/sunxi/Makefile
index 604c7b842837..241c0df9ca0c 100644
--- a/sound/soc/sunxi/Makefile
+++ b/sound/soc/sunxi/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_SND_SUN4I_CODEC) += sun4i-codec.o
obj-$(CONFIG_SND_SUN4I_I2S) += sun4i-i2s.o
obj-$(CONFIG_SND_SUN4I_SPDIF) += sun4i-spdif.o
+obj-$(CONFIG_SND_SUN8I_CODEC_ANALOG) += sun8i-codec-analog.o
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index 56ed9472e89f..848af01692a0 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -3,6 +3,7 @@
* Copyright 2014 Jon Smirl <jonsmirl@gmail.com>
* Copyright 2015 Maxime Ripard <maxime.ripard@free-electrons.com>
* Copyright 2015 Adam Sampson <ats@offog.org>
+ * Copyright 2016 Chen-Yu Tsai <wens@csie.org>
*
* Based on the Allwinner SDK driver, released under the GPL.
*
@@ -24,10 +25,12 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/regmap.h>
+#include <linux/reset.h>
#include <linux/gpio/consumer.h>
#include <sound/core.h>
@@ -38,7 +41,7 @@
#include <sound/initval.h>
#include <sound/dmaengine_pcm.h>
-/* Codec DAC register offsets and bit fields */
+/* Codec DAC digital controls and FIFO registers */
#define SUN4I_CODEC_DAC_DPC (0x00)
#define SUN4I_CODEC_DAC_DPC_EN_DA (31)
#define SUN4I_CODEC_DAC_DPC_DVOL (12)
@@ -55,6 +58,8 @@
#define SUN4I_CODEC_DAC_FIFOC_FIFO_FLUSH (0)
#define SUN4I_CODEC_DAC_FIFOS (0x08)
#define SUN4I_CODEC_DAC_TXDATA (0x0c)
+
+/* Codec DAC side analog signal controls */
#define SUN4I_CODEC_DAC_ACTL (0x10)
#define SUN4I_CODEC_DAC_ACTL_DACAENR (31)
#define SUN4I_CODEC_DAC_ACTL_DACAENL (30)
@@ -69,7 +74,7 @@
#define SUN4I_CODEC_DAC_TUNE (0x14)
#define SUN4I_CODEC_DAC_DEBUG (0x18)
-/* Codec ADC register offsets and bit fields */
+/* Codec ADC digital controls and FIFO registers */
#define SUN4I_CODEC_ADC_FIFOC (0x1c)
#define SUN4I_CODEC_ADC_FIFOC_ADC_FS (29)
#define SUN4I_CODEC_ADC_FIFOC_EN_AD (28)
@@ -81,6 +86,8 @@
#define SUN4I_CODEC_ADC_FIFOC_FIFO_FLUSH (0)
#define SUN4I_CODEC_ADC_FIFOS (0x20)
#define SUN4I_CODEC_ADC_RXDATA (0x24)
+
+/* Codec ADC side analog signal controls */
#define SUN4I_CODEC_ADC_ACTL (0x28)
#define SUN4I_CODEC_ADC_ACTL_ADC_R_EN (31)
#define SUN4I_CODEC_ADC_ACTL_ADC_L_EN (30)
@@ -93,19 +100,141 @@
#define SUN4I_CODEC_ADC_ACTL_DDE (3)
#define SUN4I_CODEC_ADC_DEBUG (0x2c)
-/* Other various ADC registers */
+/* FIFO counters */
#define SUN4I_CODEC_DAC_TXCNT (0x30)
#define SUN4I_CODEC_ADC_RXCNT (0x34)
+
+/* Calibration register (sun7i only) */
#define SUN7I_CODEC_AC_DAC_CAL (0x38)
+
+/* Microphone controls (sun7i only) */
#define SUN7I_CODEC_AC_MIC_PHONE_CAL (0x3c)
+/*
+ * sun6i specific registers
+ *
+ * sun6i shares the same digital control and FIFO registers as sun4i,
+ * but only the DAC digital controls are at the same offset. The others
+ * have been moved around to accommodate extra analog controls.
+ */
+
+/* Codec DAC digital controls and FIFO registers */
+#define SUN6I_CODEC_ADC_FIFOC (0x10)
+#define SUN6I_CODEC_ADC_FIFOC_EN_AD (28)
+#define SUN6I_CODEC_ADC_FIFOS (0x14)
+#define SUN6I_CODEC_ADC_RXDATA (0x18)
+
+/* Output mixer and gain controls */
+#define SUN6I_CODEC_OM_DACA_CTRL (0x20)
+#define SUN6I_CODEC_OM_DACA_CTRL_DACAREN (31)
+#define SUN6I_CODEC_OM_DACA_CTRL_DACALEN (30)
+#define SUN6I_CODEC_OM_DACA_CTRL_RMIXEN (29)
+#define SUN6I_CODEC_OM_DACA_CTRL_LMIXEN (28)
+#define SUN6I_CODEC_OM_DACA_CTRL_RMIX_MIC1 (23)
+#define SUN6I_CODEC_OM_DACA_CTRL_RMIX_MIC2 (22)
+#define SUN6I_CODEC_OM_DACA_CTRL_RMIX_PHONE (21)
+#define SUN6I_CODEC_OM_DACA_CTRL_RMIX_PHONEP (20)
+#define SUN6I_CODEC_OM_DACA_CTRL_RMIX_LINEINR (19)
+#define SUN6I_CODEC_OM_DACA_CTRL_RMIX_DACR (18)
+#define SUN6I_CODEC_OM_DACA_CTRL_RMIX_DACL (17)
+#define SUN6I_CODEC_OM_DACA_CTRL_LMIX_MIC1 (16)
+#define SUN6I_CODEC_OM_DACA_CTRL_LMIX_MIC2 (15)
+#define SUN6I_CODEC_OM_DACA_CTRL_LMIX_PHONE (14)
+#define SUN6I_CODEC_OM_DACA_CTRL_LMIX_PHONEN (13)
+#define SUN6I_CODEC_OM_DACA_CTRL_LMIX_LINEINL (12)
+#define SUN6I_CODEC_OM_DACA_CTRL_LMIX_DACL (11)
+#define SUN6I_CODEC_OM_DACA_CTRL_LMIX_DACR (10)
+#define SUN6I_CODEC_OM_DACA_CTRL_RHPIS (9)
+#define SUN6I_CODEC_OM_DACA_CTRL_LHPIS (8)
+#define SUN6I_CODEC_OM_DACA_CTRL_RHPPAMUTE (7)
+#define SUN6I_CODEC_OM_DACA_CTRL_LHPPAMUTE (6)
+#define SUN6I_CODEC_OM_DACA_CTRL_HPVOL (0)
+#define SUN6I_CODEC_OM_PA_CTRL (0x24)
+#define SUN6I_CODEC_OM_PA_CTRL_HPPAEN (31)
+#define SUN6I_CODEC_OM_PA_CTRL_HPCOM_CTL (29)
+#define SUN6I_CODEC_OM_PA_CTRL_COMPTEN (28)
+#define SUN6I_CODEC_OM_PA_CTRL_MIC1G (15)
+#define SUN6I_CODEC_OM_PA_CTRL_MIC2G (12)
+#define SUN6I_CODEC_OM_PA_CTRL_LINEING (9)
+#define SUN6I_CODEC_OM_PA_CTRL_PHONEG (6)
+#define SUN6I_CODEC_OM_PA_CTRL_PHONEPG (3)
+#define SUN6I_CODEC_OM_PA_CTRL_PHONENG (0)
+
+/* Microphone, line out and phone out controls */
+#define SUN6I_CODEC_MIC_CTRL (0x28)
+#define SUN6I_CODEC_MIC_CTRL_HBIASEN (31)
+#define SUN6I_CODEC_MIC_CTRL_MBIASEN (30)
+#define SUN6I_CODEC_MIC_CTRL_MIC1AMPEN (28)
+#define SUN6I_CODEC_MIC_CTRL_MIC1BOOST (25)
+#define SUN6I_CODEC_MIC_CTRL_MIC2AMPEN (24)
+#define SUN6I_CODEC_MIC_CTRL_MIC2BOOST (21)
+#define SUN6I_CODEC_MIC_CTRL_MIC2SLT (20)
+#define SUN6I_CODEC_MIC_CTRL_LINEOUTLEN (19)
+#define SUN6I_CODEC_MIC_CTRL_LINEOUTREN (18)
+#define SUN6I_CODEC_MIC_CTRL_LINEOUTLSRC (17)
+#define SUN6I_CODEC_MIC_CTRL_LINEOUTRSRC (16)
+#define SUN6I_CODEC_MIC_CTRL_LINEOUTVC (11)
+#define SUN6I_CODEC_MIC_CTRL_PHONEPREG (8)
+
+/* ADC mixer controls */
+#define SUN6I_CODEC_ADC_ACTL (0x2c)
+#define SUN6I_CODEC_ADC_ACTL_ADCREN (31)
+#define SUN6I_CODEC_ADC_ACTL_ADCLEN (30)
+#define SUN6I_CODEC_ADC_ACTL_ADCRG (27)
+#define SUN6I_CODEC_ADC_ACTL_ADCLG (24)
+#define SUN6I_CODEC_ADC_ACTL_RADCMIX_MIC1 (13)
+#define SUN6I_CODEC_ADC_ACTL_RADCMIX_MIC2 (12)
+#define SUN6I_CODEC_ADC_ACTL_RADCMIX_PHONE (11)
+#define SUN6I_CODEC_ADC_ACTL_RADCMIX_PHONEP (10)
+#define SUN6I_CODEC_ADC_ACTL_RADCMIX_LINEINR (9)
+#define SUN6I_CODEC_ADC_ACTL_RADCMIX_OMIXR (8)
+#define SUN6I_CODEC_ADC_ACTL_RADCMIX_OMIXL (7)
+#define SUN6I_CODEC_ADC_ACTL_LADCMIX_MIC1 (6)
+#define SUN6I_CODEC_ADC_ACTL_LADCMIX_MIC2 (5)
+#define SUN6I_CODEC_ADC_ACTL_LADCMIX_PHONE (4)
+#define SUN6I_CODEC_ADC_ACTL_LADCMIX_PHONEN (3)
+#define SUN6I_CODEC_ADC_ACTL_LADCMIX_LINEINL (2)
+#define SUN6I_CODEC_ADC_ACTL_LADCMIX_OMIXL (1)
+#define SUN6I_CODEC_ADC_ACTL_LADCMIX_OMIXR (0)
+
+/* Analog performance tuning controls */
+#define SUN6I_CODEC_ADDA_TUNE (0x30)
+
+/* Calibration controls */
+#define SUN6I_CODEC_CALIBRATION (0x34)
+
+/* FIFO counters */
+#define SUN6I_CODEC_DAC_TXCNT (0x40)
+#define SUN6I_CODEC_ADC_RXCNT (0x44)
+
+/* headset jack detection and button support registers */
+#define SUN6I_CODEC_HMIC_CTL (0x50)
+#define SUN6I_CODEC_HMIC_DATA (0x54)
+
+/* TODO sun6i DAP (Digital Audio Processing) bits */
+
+/* FIFO counters moved on A23 */
+#define SUN8I_A23_CODEC_DAC_TXCNT (0x1c)
+#define SUN8I_A23_CODEC_ADC_RXCNT (0x20)
+
+/* TX FIFO moved on H3 */
+#define SUN8I_H3_CODEC_DAC_TXDATA (0x20)
+#define SUN8I_H3_CODEC_DAC_DBG (0x48)
+#define SUN8I_H3_CODEC_ADC_DBG (0x4c)
+
+/* TODO H3 DAP (Digital Audio Processing) bits */
+
struct sun4i_codec {
struct device *dev;
struct regmap *regmap;
struct clk *clk_apb;
struct clk *clk_module;
+ struct reset_control *rst;
struct gpio_desc *gpio_pa;
+ /* ADC_FIFOC register is at different offset on different SoCs */
+ struct regmap_field *reg_adc_fifoc;
+
struct snd_dmaengine_dai_dma_data capture_dma_data;
struct snd_dmaengine_dai_dma_data playback_dma_data;
};
@@ -134,16 +263,16 @@ static void sun4i_codec_stop_playback(struct sun4i_codec *scodec)
static void sun4i_codec_start_capture(struct sun4i_codec *scodec)
{
/* Enable ADC DRQ */
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_FIFOC,
- BIT(SUN4I_CODEC_ADC_FIFOC_ADC_DRQ_EN),
- BIT(SUN4I_CODEC_ADC_FIFOC_ADC_DRQ_EN));
+ regmap_field_update_bits(scodec->reg_adc_fifoc,
+ BIT(SUN4I_CODEC_ADC_FIFOC_ADC_DRQ_EN),
+ BIT(SUN4I_CODEC_ADC_FIFOC_ADC_DRQ_EN));
}
static void sun4i_codec_stop_capture(struct sun4i_codec *scodec)
{
/* Disable ADC DRQ */
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_FIFOC,
- BIT(SUN4I_CODEC_ADC_FIFOC_ADC_DRQ_EN), 0);
+ regmap_field_update_bits(scodec->reg_adc_fifoc,
+ BIT(SUN4I_CODEC_ADC_FIFOC_ADC_DRQ_EN), 0);
}
static int sun4i_codec_trigger(struct snd_pcm_substream *substream, int cmd,
@@ -186,24 +315,29 @@ static int sun4i_codec_prepare_capture(struct snd_pcm_substream *substream,
/* Flush RX FIFO */
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_FIFOC,
- BIT(SUN4I_CODEC_ADC_FIFOC_FIFO_FLUSH),
- BIT(SUN4I_CODEC_ADC_FIFOC_FIFO_FLUSH));
+ regmap_field_update_bits(scodec->reg_adc_fifoc,
+ BIT(SUN4I_CODEC_ADC_FIFOC_FIFO_FLUSH),
+ BIT(SUN4I_CODEC_ADC_FIFOC_FIFO_FLUSH));
/* Set RX FIFO trigger level */
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_FIFOC,
- 0xf << SUN4I_CODEC_ADC_FIFOC_RX_TRIG_LEVEL,
- 0x7 << SUN4I_CODEC_ADC_FIFOC_RX_TRIG_LEVEL);
+ regmap_field_update_bits(scodec->reg_adc_fifoc,
+ 0xf << SUN4I_CODEC_ADC_FIFOC_RX_TRIG_LEVEL,
+ 0x7 << SUN4I_CODEC_ADC_FIFOC_RX_TRIG_LEVEL);
/*
* FIXME: Undocumented in the datasheet, but
* Allwinner's code mentions that it is related
* related to microphone gain
*/
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_ACTL,
- 0x3 << 25,
- 0x1 << 25);
+ if (of_device_is_compatible(scodec->dev->of_node,
+ "allwinner,sun4i-a10-codec") ||
+ of_device_is_compatible(scodec->dev->of_node,
+ "allwinner,sun7i-a20-codec")) {
+ regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_ACTL,
+ 0x3 << 25,
+ 0x1 << 25);
+ }
if (of_device_is_compatible(scodec->dev->of_node,
"allwinner,sun7i-a20-codec"))
@@ -213,9 +347,9 @@ static int sun4i_codec_prepare_capture(struct snd_pcm_substream *substream,
0x1 << 8);
/* Fill most significant bits with valid data MSB */
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_FIFOC,
- BIT(SUN4I_CODEC_ADC_FIFOC_RX_FIFO_MODE),
- BIT(SUN4I_CODEC_ADC_FIFOC_RX_FIFO_MODE));
+ regmap_field_update_bits(scodec->reg_adc_fifoc,
+ BIT(SUN4I_CODEC_ADC_FIFOC_RX_FIFO_MODE),
+ BIT(SUN4I_CODEC_ADC_FIFOC_RX_FIFO_MODE));
return 0;
}
@@ -342,18 +476,19 @@ static int sun4i_codec_hw_params_capture(struct sun4i_codec *scodec,
unsigned int hwrate)
{
/* Set ADC sample rate */
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_FIFOC,
- 7 << SUN4I_CODEC_ADC_FIFOC_ADC_FS,
- hwrate << SUN4I_CODEC_ADC_FIFOC_ADC_FS);
+ regmap_field_update_bits(scodec->reg_adc_fifoc,
+ 7 << SUN4I_CODEC_ADC_FIFOC_ADC_FS,
+ hwrate << SUN4I_CODEC_ADC_FIFOC_ADC_FS);
/* Set the number of channels we want to use */
if (params_channels(params) == 1)
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_FIFOC,
- BIT(SUN4I_CODEC_ADC_FIFOC_MONO_EN),
- BIT(SUN4I_CODEC_ADC_FIFOC_MONO_EN));
+ regmap_field_update_bits(scodec->reg_adc_fifoc,
+ BIT(SUN4I_CODEC_ADC_FIFOC_MONO_EN),
+ BIT(SUN4I_CODEC_ADC_FIFOC_MONO_EN));
else
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_FIFOC,
- BIT(SUN4I_CODEC_ADC_FIFOC_MONO_EN), 0);
+ regmap_field_update_bits(scodec->reg_adc_fifoc,
+ BIT(SUN4I_CODEC_ADC_FIFOC_MONO_EN),
+ 0);
return 0;
}
@@ -502,7 +637,7 @@ static struct snd_soc_dai_driver sun4i_codec_dai = {
},
};
-/*** Codec ***/
+/*** sun4i Codec ***/
static const struct snd_kcontrol_new sun4i_codec_pa_mute =
SOC_DAPM_SINGLE("Switch", SUN4I_CODEC_DAC_ACTL,
SUN4I_CODEC_DAC_ACTL_PA_MUTE, 1, 0);
@@ -638,6 +773,337 @@ static struct snd_soc_codec_driver sun4i_codec_codec = {
},
};
+/*** sun6i Codec ***/
+
+/* mixer controls */
+static const struct snd_kcontrol_new sun6i_codec_mixer_controls[] = {
+ SOC_DAPM_DOUBLE("DAC Playback Switch",
+ SUN6I_CODEC_OM_DACA_CTRL,
+ SUN6I_CODEC_OM_DACA_CTRL_LMIX_DACL,
+ SUN6I_CODEC_OM_DACA_CTRL_RMIX_DACR, 1, 0),
+ SOC_DAPM_DOUBLE("DAC Reversed Playback Switch",
+ SUN6I_CODEC_OM_DACA_CTRL,
+ SUN6I_CODEC_OM_DACA_CTRL_LMIX_DACR,
+ SUN6I_CODEC_OM_DACA_CTRL_RMIX_DACL, 1, 0),
+ SOC_DAPM_DOUBLE("Line In Playback Switch",
+ SUN6I_CODEC_OM_DACA_CTRL,
+ SUN6I_CODEC_OM_DACA_CTRL_LMIX_LINEINL,
+ SUN6I_CODEC_OM_DACA_CTRL_RMIX_LINEINR, 1, 0),
+ SOC_DAPM_DOUBLE("Mic1 Playback Switch",
+ SUN6I_CODEC_OM_DACA_CTRL,
+ SUN6I_CODEC_OM_DACA_CTRL_LMIX_MIC1,
+ SUN6I_CODEC_OM_DACA_CTRL_RMIX_MIC1, 1, 0),
+ SOC_DAPM_DOUBLE("Mic2 Playback Switch",
+ SUN6I_CODEC_OM_DACA_CTRL,
+ SUN6I_CODEC_OM_DACA_CTRL_LMIX_MIC2,
+ SUN6I_CODEC_OM_DACA_CTRL_RMIX_MIC2, 1, 0),
+};
+
+/* ADC mixer controls */
+static const struct snd_kcontrol_new sun6i_codec_adc_mixer_controls[] = {
+ SOC_DAPM_DOUBLE("Mixer Capture Switch",
+ SUN6I_CODEC_ADC_ACTL,
+ SUN6I_CODEC_ADC_ACTL_LADCMIX_OMIXL,
+ SUN6I_CODEC_ADC_ACTL_RADCMIX_OMIXR, 1, 0),
+ SOC_DAPM_DOUBLE("Mixer Reversed Capture Switch",
+ SUN6I_CODEC_ADC_ACTL,
+ SUN6I_CODEC_ADC_ACTL_LADCMIX_OMIXR,
+ SUN6I_CODEC_ADC_ACTL_RADCMIX_OMIXL, 1, 0),
+ SOC_DAPM_DOUBLE("Line In Capture Switch",
+ SUN6I_CODEC_ADC_ACTL,
+ SUN6I_CODEC_ADC_ACTL_LADCMIX_LINEINL,
+ SUN6I_CODEC_ADC_ACTL_RADCMIX_LINEINR, 1, 0),
+ SOC_DAPM_DOUBLE("Mic1 Capture Switch",
+ SUN6I_CODEC_ADC_ACTL,
+ SUN6I_CODEC_ADC_ACTL_LADCMIX_MIC1,
+ SUN6I_CODEC_ADC_ACTL_RADCMIX_MIC1, 1, 0),
+ SOC_DAPM_DOUBLE("Mic2 Capture Switch",
+ SUN6I_CODEC_ADC_ACTL,
+ SUN6I_CODEC_ADC_ACTL_LADCMIX_MIC2,
+ SUN6I_CODEC_ADC_ACTL_RADCMIX_MIC2, 1, 0),
+};
+
+/* headphone controls */
+static const char * const sun6i_codec_hp_src_enum_text[] = {
+ "DAC", "Mixer",
+};
+
+static SOC_ENUM_DOUBLE_DECL(sun6i_codec_hp_src_enum,
+ SUN6I_CODEC_OM_DACA_CTRL,
+ SUN6I_CODEC_OM_DACA_CTRL_LHPIS,
+ SUN6I_CODEC_OM_DACA_CTRL_RHPIS,
+ sun6i_codec_hp_src_enum_text);
+
+static const struct snd_kcontrol_new sun6i_codec_hp_src[] = {
+ SOC_DAPM_ENUM("Headphone Source Playback Route",
+ sun6i_codec_hp_src_enum),
+};
+
+/* microphone controls */
+static const char * const sun6i_codec_mic2_src_enum_text[] = {
+ "Mic2", "Mic3",
+};
+
+static SOC_ENUM_SINGLE_DECL(sun6i_codec_mic2_src_enum,
+ SUN6I_CODEC_MIC_CTRL,
+ SUN6I_CODEC_MIC_CTRL_MIC2SLT,
+ sun6i_codec_mic2_src_enum_text);
+
+static const struct snd_kcontrol_new sun6i_codec_mic2_src[] = {
+ SOC_DAPM_ENUM("Mic2 Amplifier Source Route",
+ sun6i_codec_mic2_src_enum),
+};
+
+/* line out controls */
+static const char * const sun6i_codec_lineout_src_enum_text[] = {
+ "Stereo", "Mono Differential",
+};
+
+static SOC_ENUM_DOUBLE_DECL(sun6i_codec_lineout_src_enum,
+ SUN6I_CODEC_MIC_CTRL,
+ SUN6I_CODEC_MIC_CTRL_LINEOUTLSRC,
+ SUN6I_CODEC_MIC_CTRL_LINEOUTRSRC,
+ sun6i_codec_lineout_src_enum_text);
+
+static const struct snd_kcontrol_new sun6i_codec_lineout_src[] = {
+ SOC_DAPM_ENUM("Line Out Source Playback Route",
+ sun6i_codec_lineout_src_enum),
+};
+
+/* volume / mute controls */
+static const DECLARE_TLV_DB_SCALE(sun6i_codec_dvol_scale, -7308, 116, 0);
+static const DECLARE_TLV_DB_SCALE(sun6i_codec_hp_vol_scale, -6300, 100, 1);
+static const DECLARE_TLV_DB_SCALE(sun6i_codec_out_mixer_pregain_scale,
+ -450, 150, 0);
+static const DECLARE_TLV_DB_RANGE(sun6i_codec_lineout_vol_scale,
+ 0, 1, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
+ 2, 31, TLV_DB_SCALE_ITEM(-4350, 150, 0),
+);
+static const DECLARE_TLV_DB_RANGE(sun6i_codec_mic_gain_scale,
+ 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+ 1, 7, TLV_DB_SCALE_ITEM(2400, 300, 0),
+);
+
+static const struct snd_kcontrol_new sun6i_codec_codec_widgets[] = {
+ SOC_SINGLE_TLV("DAC Playback Volume", SUN4I_CODEC_DAC_DPC,
+ SUN4I_CODEC_DAC_DPC_DVOL, 0x3f, 1,
+ sun6i_codec_dvol_scale),
+ SOC_SINGLE_TLV("Headphone Playback Volume",
+ SUN6I_CODEC_OM_DACA_CTRL,
+ SUN6I_CODEC_OM_DACA_CTRL_HPVOL, 0x3f, 0,
+ sun6i_codec_hp_vol_scale),
+ SOC_SINGLE_TLV("Line Out Playback Volume",
+ SUN6I_CODEC_MIC_CTRL,
+ SUN6I_CODEC_MIC_CTRL_LINEOUTVC, 0x1f, 0,
+ sun6i_codec_lineout_vol_scale),
+ SOC_DOUBLE("Headphone Playback Switch",
+ SUN6I_CODEC_OM_DACA_CTRL,
+ SUN6I_CODEC_OM_DACA_CTRL_LHPPAMUTE,
+ SUN6I_CODEC_OM_DACA_CTRL_RHPPAMUTE, 1, 0),
+ SOC_DOUBLE("Line Out Playback Switch",
+ SUN6I_CODEC_MIC_CTRL,
+ SUN6I_CODEC_MIC_CTRL_LINEOUTLEN,
+ SUN6I_CODEC_MIC_CTRL_LINEOUTREN, 1, 0),
+ /* Mixer pre-gains */
+ SOC_SINGLE_TLV("Line In Playback Volume",
+ SUN6I_CODEC_OM_PA_CTRL, SUN6I_CODEC_OM_PA_CTRL_LINEING,
+ 0x7, 0, sun6i_codec_out_mixer_pregain_scale),
+ SOC_SINGLE_TLV("Mic1 Playback Volume",
+ SUN6I_CODEC_OM_PA_CTRL, SUN6I_CODEC_OM_PA_CTRL_MIC1G,
+ 0x7, 0, sun6i_codec_out_mixer_pregain_scale),
+ SOC_SINGLE_TLV("Mic2 Playback Volume",
+ SUN6I_CODEC_OM_PA_CTRL, SUN6I_CODEC_OM_PA_CTRL_MIC2G,
+ 0x7, 0, sun6i_codec_out_mixer_pregain_scale),
+
+ /* Microphone Amp boost gains */
+ SOC_SINGLE_TLV("Mic1 Boost Volume", SUN6I_CODEC_MIC_CTRL,
+ SUN6I_CODEC_MIC_CTRL_MIC1BOOST, 0x7, 0,
+ sun6i_codec_mic_gain_scale),
+ SOC_SINGLE_TLV("Mic2 Boost Volume", SUN6I_CODEC_MIC_CTRL,
+ SUN6I_CODEC_MIC_CTRL_MIC2BOOST, 0x7, 0,
+ sun6i_codec_mic_gain_scale),
+ SOC_DOUBLE_TLV("ADC Capture Volume",
+ SUN6I_CODEC_ADC_ACTL, SUN6I_CODEC_ADC_ACTL_ADCLG,
+ SUN6I_CODEC_ADC_ACTL_ADCRG, 0x7, 0,
+ sun6i_codec_out_mixer_pregain_scale),
+};
+
+static const struct snd_soc_dapm_widget sun6i_codec_codec_dapm_widgets[] = {
+ /* Microphone inputs */
+ SND_SOC_DAPM_INPUT("MIC1"),
+ SND_SOC_DAPM_INPUT("MIC2"),
+ SND_SOC_DAPM_INPUT("MIC3"),
+
+ /* Microphone Bias */
+ SND_SOC_DAPM_SUPPLY("HBIAS", SUN6I_CODEC_MIC_CTRL,
+ SUN6I_CODEC_MIC_CTRL_HBIASEN, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MBIAS", SUN6I_CODEC_MIC_CTRL,
+ SUN6I_CODEC_MIC_CTRL_MBIASEN, 0, NULL, 0),
+
+ /* Mic input path */
+ SND_SOC_DAPM_MUX("Mic2 Amplifier Source Route",
+ SND_SOC_NOPM, 0, 0, sun6i_codec_mic2_src),
+ SND_SOC_DAPM_PGA("Mic1 Amplifier", SUN6I_CODEC_MIC_CTRL,
+ SUN6I_CODEC_MIC_CTRL_MIC1AMPEN, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Mic2 Amplifier", SUN6I_CODEC_MIC_CTRL,
+ SUN6I_CODEC_MIC_CTRL_MIC2AMPEN, 0, NULL, 0),
+
+ /* Line In */
+ SND_SOC_DAPM_INPUT("LINEIN"),
+
+ /* Digital parts of the ADCs */
+ SND_SOC_DAPM_SUPPLY("ADC Enable", SUN6I_CODEC_ADC_FIFOC,
+ SUN6I_CODEC_ADC_FIFOC_EN_AD, 0,
+ NULL, 0),
+
+ /* Analog parts of the ADCs */
+ SND_SOC_DAPM_ADC("Left ADC", "Codec Capture", SUN6I_CODEC_ADC_ACTL,
+ SUN6I_CODEC_ADC_ACTL_ADCLEN, 0),
+ SND_SOC_DAPM_ADC("Right ADC", "Codec Capture", SUN6I_CODEC_ADC_ACTL,
+ SUN6I_CODEC_ADC_ACTL_ADCREN, 0),
+
+ /* ADC Mixers */
+ SOC_MIXER_ARRAY("Left ADC Mixer", SND_SOC_NOPM, 0, 0,
+ sun6i_codec_adc_mixer_controls),
+ SOC_MIXER_ARRAY("Right ADC Mixer", SND_SOC_NOPM, 0, 0,
+ sun6i_codec_adc_mixer_controls),
+
+ /* Digital parts of the DACs */
+ SND_SOC_DAPM_SUPPLY("DAC Enable", SUN4I_CODEC_DAC_DPC,
+ SUN4I_CODEC_DAC_DPC_EN_DA, 0,
+ NULL, 0),
+
+ /* Analog parts of the DACs */
+ SND_SOC_DAPM_DAC("Left DAC", "Codec Playback",
+ SUN6I_CODEC_OM_DACA_CTRL,
+ SUN6I_CODEC_OM_DACA_CTRL_DACALEN, 0),
+ SND_SOC_DAPM_DAC("Right DAC", "Codec Playback",
+ SUN6I_CODEC_OM_DACA_CTRL,
+ SUN6I_CODEC_OM_DACA_CTRL_DACAREN, 0),
+
+ /* Mixers */
+ SOC_MIXER_ARRAY("Left Mixer", SUN6I_CODEC_OM_DACA_CTRL,
+ SUN6I_CODEC_OM_DACA_CTRL_LMIXEN, 0,
+ sun6i_codec_mixer_controls),
+ SOC_MIXER_ARRAY("Right Mixer", SUN6I_CODEC_OM_DACA_CTRL,
+ SUN6I_CODEC_OM_DACA_CTRL_RMIXEN, 0,
+ sun6i_codec_mixer_controls),
+
+ /* Headphone output path */
+ SND_SOC_DAPM_MUX("Headphone Source Playback Route",
+ SND_SOC_NOPM, 0, 0, sun6i_codec_hp_src),
+ SND_SOC_DAPM_OUT_DRV("Headphone Amp", SUN6I_CODEC_OM_PA_CTRL,
+ SUN6I_CODEC_OM_PA_CTRL_HPPAEN, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("HPCOM Protection", SUN6I_CODEC_OM_PA_CTRL,
+ SUN6I_CODEC_OM_PA_CTRL_COMPTEN, 0, NULL, 0),
+ SND_SOC_DAPM_REG(snd_soc_dapm_supply, "HPCOM", SUN6I_CODEC_OM_PA_CTRL,
+ SUN6I_CODEC_OM_PA_CTRL_HPCOM_CTL, 0x3, 0x3, 0),
+ SND_SOC_DAPM_OUTPUT("HP"),
+
+ /* Line Out path */
+ SND_SOC_DAPM_MUX("Line Out Source Playback Route",
+ SND_SOC_NOPM, 0, 0, sun6i_codec_lineout_src),
+ SND_SOC_DAPM_OUTPUT("LINEOUT"),
+};
+
+static const struct snd_soc_dapm_route sun6i_codec_codec_dapm_routes[] = {
+ /* DAC Routes */
+ { "Left DAC", NULL, "DAC Enable" },
+ { "Right DAC", NULL, "DAC Enable" },
+
+ /* Microphone Routes */
+ { "Mic1 Amplifier", NULL, "MIC1"},
+ { "Mic2 Amplifier Source Route", "Mic2", "MIC2" },
+ { "Mic2 Amplifier Source Route", "Mic3", "MIC3" },
+ { "Mic2 Amplifier", NULL, "Mic2 Amplifier Source Route"},
+
+ /* Left Mixer Routes */
+ { "Left Mixer", "DAC Playback Switch", "Left DAC" },
+ { "Left Mixer", "DAC Reversed Playback Switch", "Right DAC" },
+ { "Left Mixer", "Line In Playback Switch", "LINEIN" },
+ { "Left Mixer", "Mic1 Playback Switch", "Mic1 Amplifier" },
+ { "Left Mixer", "Mic2 Playback Switch", "Mic2 Amplifier" },
+
+ /* Right Mixer Routes */
+ { "Right Mixer", "DAC Playback Switch", "Right DAC" },
+ { "Right Mixer", "DAC Reversed Playback Switch", "Left DAC" },
+ { "Right Mixer", "Line In Playback Switch", "LINEIN" },
+ { "Right Mixer", "Mic1 Playback Switch", "Mic1 Amplifier" },
+ { "Right Mixer", "Mic2 Playback Switch", "Mic2 Amplifier" },
+
+ /* Left ADC Mixer Routes */
+ { "Left ADC Mixer", "Mixer Capture Switch", "Left Mixer" },
+ { "Left ADC Mixer", "Mixer Reversed Capture Switch", "Right Mixer" },
+ { "Left ADC Mixer", "Line In Capture Switch", "LINEIN" },
+ { "Left ADC Mixer", "Mic1 Capture Switch", "Mic1 Amplifier" },
+ { "Left ADC Mixer", "Mic2 Capture Switch", "Mic2 Amplifier" },
+
+ /* Right ADC Mixer Routes */
+ { "Right ADC Mixer", "Mixer Capture Switch", "Right Mixer" },
+ { "Right ADC Mixer", "Mixer Reversed Capture Switch", "Left Mixer" },
+ { "Right ADC Mixer", "Line In Capture Switch", "LINEIN" },
+ { "Right ADC Mixer", "Mic1 Capture Switch", "Mic1 Amplifier" },
+ { "Right ADC Mixer", "Mic2 Capture Switch", "Mic2 Amplifier" },
+
+ /* Headphone Routes */
+ { "Headphone Source Playback Route", "DAC", "Left DAC" },
+ { "Headphone Source Playback Route", "DAC", "Right DAC" },
+ { "Headphone Source Playback Route", "Mixer", "Left Mixer" },
+ { "Headphone Source Playback Route", "Mixer", "Right Mixer" },
+ { "Headphone Amp", NULL, "Headphone Source Playback Route" },
+ { "HP", NULL, "Headphone Amp" },
+ { "HPCOM", NULL, "HPCOM Protection" },
+
+ /* Line Out Routes */
+ { "Line Out Source Playback Route", "Stereo", "Left Mixer" },
+ { "Line Out Source Playback Route", "Stereo", "Right Mixer" },
+ { "Line Out Source Playback Route", "Mono Differential", "Left Mixer" },
+ { "LINEOUT", NULL, "Line Out Source Playback Route" },
+
+ /* ADC Routes */
+ { "Left ADC", NULL, "ADC Enable" },
+ { "Right ADC", NULL, "ADC Enable" },
+ { "Left ADC", NULL, "Left ADC Mixer" },
+ { "Right ADC", NULL, "Right ADC Mixer" },
+};
+
+static struct snd_soc_codec_driver sun6i_codec_codec = {
+ .component_driver = {
+ .controls = sun6i_codec_codec_widgets,
+ .num_controls = ARRAY_SIZE(sun6i_codec_codec_widgets),
+ .dapm_widgets = sun6i_codec_codec_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(sun6i_codec_codec_dapm_widgets),
+ .dapm_routes = sun6i_codec_codec_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(sun6i_codec_codec_dapm_routes),
+ },
+};
+
+/* sun8i A23 codec */
+static const struct snd_kcontrol_new sun8i_a23_codec_codec_controls[] = {
+ SOC_SINGLE_TLV("DAC Playback Volume", SUN4I_CODEC_DAC_DPC,
+ SUN4I_CODEC_DAC_DPC_DVOL, 0x3f, 1,
+ sun6i_codec_dvol_scale),
+};
+
+static const struct snd_soc_dapm_widget sun8i_a23_codec_codec_widgets[] = {
+ /* Digital parts of the ADCs */
+ SND_SOC_DAPM_SUPPLY("ADC Enable", SUN6I_CODEC_ADC_FIFOC,
+ SUN6I_CODEC_ADC_FIFOC_EN_AD, 0, NULL, 0),
+ /* Digital parts of the DACs */
+ SND_SOC_DAPM_SUPPLY("DAC Enable", SUN4I_CODEC_DAC_DPC,
+ SUN4I_CODEC_DAC_DPC_EN_DA, 0, NULL, 0),
+
+};
+
+static struct snd_soc_codec_driver sun8i_a23_codec_codec = {
+ .component_driver = {
+ .controls = sun8i_a23_codec_codec_controls,
+ .num_controls = ARRAY_SIZE(sun8i_a23_codec_codec_controls),
+ .dapm_widgets = sun8i_a23_codec_codec_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(sun8i_a23_codec_codec_widgets),
+ },
+};
+
static const struct snd_soc_component_driver sun4i_codec_component = {
.name = "sun4i-codec",
};
@@ -678,45 +1144,6 @@ static struct snd_soc_dai_driver dummy_cpu_dai = {
},
};
-static const struct regmap_config sun4i_codec_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .max_register = SUN4I_CODEC_ADC_RXCNT,
-};
-
-static const struct regmap_config sun7i_codec_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .max_register = SUN7I_CODEC_AC_MIC_PHONE_CAL,
-};
-
-struct sun4i_codec_quirks {
- const struct regmap_config *regmap_config;
-};
-
-static const struct sun4i_codec_quirks sun4i_codec_quirks = {
- .regmap_config = &sun4i_codec_regmap_config,
-};
-
-static const struct sun4i_codec_quirks sun7i_codec_quirks = {
- .regmap_config = &sun7i_codec_regmap_config,
-};
-
-static const struct of_device_id sun4i_codec_of_match[] = {
- {
- .compatible = "allwinner,sun4i-a10-codec",
- .data = &sun4i_codec_quirks,
- },
- {
- .compatible = "allwinner,sun7i-a20-codec",
- .data = &sun7i_codec_quirks,
- },
- {}
-};
-MODULE_DEVICE_TABLE(of, sun4i_codec_of_match);
-
static struct snd_soc_dai_link *sun4i_codec_create_link(struct device *dev,
int *num_links)
{
@@ -781,6 +1208,259 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev)
return card;
};
+static const struct snd_soc_dapm_widget sun6i_codec_card_dapm_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_LINE("Line In", NULL),
+ SND_SOC_DAPM_LINE("Line Out", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("Mic", NULL),
+ SND_SOC_DAPM_SPK("Speaker", sun4i_codec_spk_event),
+};
+
+static struct snd_soc_card *sun6i_codec_create_card(struct device *dev)
+{
+ struct snd_soc_card *card;
+ int ret;
+
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return ERR_PTR(-ENOMEM);
+
+ card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
+ if (!card->dai_link)
+ return ERR_PTR(-ENOMEM);
+
+ card->dev = dev;
+ card->name = "A31 Audio Codec";
+ card->dapm_widgets = sun6i_codec_card_dapm_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
+ card->fully_routed = true;
+
+ ret = snd_soc_of_parse_audio_routing(card, "allwinner,audio-routing");
+ if (ret)
+ dev_warn(dev, "failed to parse audio-routing: %d\n", ret);
+
+ return card;
+};
+
+/* Connect digital side enables to analog side widgets */
+static const struct snd_soc_dapm_route sun8i_codec_card_routes[] = {
+ /* ADC Routes */
+ { "Left ADC", NULL, "ADC Enable" },
+ { "Right ADC", NULL, "ADC Enable" },
+ { "Codec Capture", NULL, "Left ADC" },
+ { "Codec Capture", NULL, "Right ADC" },
+
+ /* DAC Routes */
+ { "Left DAC", NULL, "DAC Enable" },
+ { "Right DAC", NULL, "DAC Enable" },
+ { "Left DAC", NULL, "Codec Playback" },
+ { "Right DAC", NULL, "Codec Playback" },
+};
+
+static struct snd_soc_aux_dev aux_dev = {
+ .name = "Codec Analog Controls",
+};
+
+static struct snd_soc_card *sun8i_a23_codec_create_card(struct device *dev)
+{
+ struct snd_soc_card *card;
+ int ret;
+
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return ERR_PTR(-ENOMEM);
+
+ aux_dev.codec_of_node = of_parse_phandle(dev->of_node,
+ "allwinner,codec-analog-controls",
+ 0);
+ if (!aux_dev.codec_of_node) {
+ dev_err(dev, "Can't find analog controls for codec.\n");
+ return ERR_PTR(-EINVAL);
+ };
+
+ card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
+ if (!card->dai_link)
+ return ERR_PTR(-ENOMEM);
+
+ card->dev = dev;
+ card->name = "A23 Audio Codec";
+ card->dapm_widgets = sun6i_codec_card_dapm_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
+ card->dapm_routes = sun8i_codec_card_routes;
+ card->num_dapm_routes = ARRAY_SIZE(sun8i_codec_card_routes);
+ card->aux_dev = &aux_dev;
+ card->num_aux_devs = 1;
+ card->fully_routed = true;
+
+ ret = snd_soc_of_parse_audio_routing(card, "allwinner,audio-routing");
+ if (ret)
+ dev_warn(dev, "failed to parse audio-routing: %d\n", ret);
+
+ return card;
+};
+
+static struct snd_soc_card *sun8i_h3_codec_create_card(struct device *dev)
+{
+ struct snd_soc_card *card;
+ int ret;
+
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return ERR_PTR(-ENOMEM);
+
+ aux_dev.codec_of_node = of_parse_phandle(dev->of_node,
+ "allwinner,codec-analog-controls",
+ 0);
+ if (!aux_dev.codec_of_node) {
+ dev_err(dev, "Can't find analog controls for codec.\n");
+ return ERR_PTR(-EINVAL);
+ };
+
+ card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
+ if (!card->dai_link)
+ return ERR_PTR(-ENOMEM);
+
+ card->dev = dev;
+ card->name = "H3 Audio Codec";
+ card->dapm_widgets = sun6i_codec_card_dapm_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
+ card->dapm_routes = sun8i_codec_card_routes;
+ card->num_dapm_routes = ARRAY_SIZE(sun8i_codec_card_routes);
+ card->aux_dev = &aux_dev;
+ card->num_aux_devs = 1;
+ card->fully_routed = true;
+
+ ret = snd_soc_of_parse_audio_routing(card, "allwinner,audio-routing");
+ if (ret)
+ dev_warn(dev, "failed to parse audio-routing: %d\n", ret);
+
+ return card;
+};
+
+static const struct regmap_config sun4i_codec_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = SUN4I_CODEC_ADC_RXCNT,
+};
+
+static const struct regmap_config sun6i_codec_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = SUN6I_CODEC_HMIC_DATA,
+};
+
+static const struct regmap_config sun7i_codec_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = SUN7I_CODEC_AC_MIC_PHONE_CAL,
+};
+
+static const struct regmap_config sun8i_a23_codec_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = SUN8I_A23_CODEC_ADC_RXCNT,
+};
+
+static const struct regmap_config sun8i_h3_codec_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = SUN8I_H3_CODEC_ADC_DBG,
+};
+
+struct sun4i_codec_quirks {
+ const struct regmap_config *regmap_config;
+ const struct snd_soc_codec_driver *codec;
+ struct snd_soc_card * (*create_card)(struct device *dev);
+ struct reg_field reg_adc_fifoc; /* used for regmap_field */
+ unsigned int reg_dac_txdata; /* TX FIFO offset for DMA config */
+ unsigned int reg_adc_rxdata; /* RX FIFO offset for DMA config */
+ bool has_reset;
+};
+
+static const struct sun4i_codec_quirks sun4i_codec_quirks = {
+ .regmap_config = &sun4i_codec_regmap_config,
+ .codec = &sun4i_codec_codec,
+ .create_card = sun4i_codec_create_card,
+ .reg_adc_fifoc = REG_FIELD(SUN4I_CODEC_ADC_FIFOC, 0, 31),
+ .reg_dac_txdata = SUN4I_CODEC_DAC_TXDATA,
+ .reg_adc_rxdata = SUN4I_CODEC_ADC_RXDATA,
+};
+
+static const struct sun4i_codec_quirks sun6i_a31_codec_quirks = {
+ .regmap_config = &sun6i_codec_regmap_config,
+ .codec = &sun6i_codec_codec,
+ .create_card = sun6i_codec_create_card,
+ .reg_adc_fifoc = REG_FIELD(SUN6I_CODEC_ADC_FIFOC, 0, 31),
+ .reg_dac_txdata = SUN4I_CODEC_DAC_TXDATA,
+ .reg_adc_rxdata = SUN6I_CODEC_ADC_RXDATA,
+ .has_reset = true,
+};
+
+static const struct sun4i_codec_quirks sun7i_codec_quirks = {
+ .regmap_config = &sun7i_codec_regmap_config,
+ .codec = &sun4i_codec_codec,
+ .create_card = sun4i_codec_create_card,
+ .reg_adc_fifoc = REG_FIELD(SUN4I_CODEC_ADC_FIFOC, 0, 31),
+ .reg_dac_txdata = SUN4I_CODEC_DAC_TXDATA,
+ .reg_adc_rxdata = SUN4I_CODEC_ADC_RXDATA,
+};
+
+static const struct sun4i_codec_quirks sun8i_a23_codec_quirks = {
+ .regmap_config = &sun8i_a23_codec_regmap_config,
+ .codec = &sun8i_a23_codec_codec,
+ .create_card = sun8i_a23_codec_create_card,
+ .reg_adc_fifoc = REG_FIELD(SUN6I_CODEC_ADC_FIFOC, 0, 31),
+ .reg_dac_txdata = SUN4I_CODEC_DAC_TXDATA,
+ .reg_adc_rxdata = SUN6I_CODEC_ADC_RXDATA,
+ .has_reset = true,
+};
+
+static const struct sun4i_codec_quirks sun8i_h3_codec_quirks = {
+ .regmap_config = &sun8i_h3_codec_regmap_config,
+ /*
+ * TODO Share the codec structure with A23 for now.
+ * This should be split out when adding digital audio
+ * processing support for the H3.
+ */
+ .codec = &sun8i_a23_codec_codec,
+ .create_card = sun8i_h3_codec_create_card,
+ .reg_adc_fifoc = REG_FIELD(SUN6I_CODEC_ADC_FIFOC, 0, 31),
+ .reg_dac_txdata = SUN8I_H3_CODEC_DAC_TXDATA,
+ .reg_adc_rxdata = SUN6I_CODEC_ADC_RXDATA,
+ .has_reset = true,
+};
+
+static const struct of_device_id sun4i_codec_of_match[] = {
+ {
+ .compatible = "allwinner,sun4i-a10-codec",
+ .data = &sun4i_codec_quirks,
+ },
+ {
+ .compatible = "allwinner,sun6i-a31-codec",
+ .data = &sun6i_a31_codec_quirks,
+ },
+ {
+ .compatible = "allwinner,sun7i-a20-codec",
+ .data = &sun7i_codec_quirks,
+ },
+ {
+ .compatible = "allwinner,sun8i-a23-codec",
+ .data = &sun8i_a23_codec_quirks,
+ },
+ {
+ .compatible = "allwinner,sun8i-h3-codec",
+ .data = &sun8i_h3_codec_quirks,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun4i_codec_of_match);
+
static int sun4i_codec_probe(struct platform_device *pdev)
{
struct snd_soc_card *card;
@@ -829,6 +1509,14 @@ static int sun4i_codec_probe(struct platform_device *pdev)
return PTR_ERR(scodec->clk_module);
}
+ if (quirks->has_reset) {
+ scodec->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(scodec->rst)) {
+ dev_err(&pdev->dev, "Failed to get reset control\n");
+ return PTR_ERR(scodec->rst);
+ }
+ }
+
scodec->gpio_pa = devm_gpiod_get_optional(&pdev->dev, "allwinner,pa",
GPIOD_OUT_LOW);
if (IS_ERR(scodec->gpio_pa)) {
@@ -838,27 +1526,48 @@ static int sun4i_codec_probe(struct platform_device *pdev)
return ret;
}
+ /* reg_field setup */
+ scodec->reg_adc_fifoc = devm_regmap_field_alloc(&pdev->dev,
+ scodec->regmap,
+ quirks->reg_adc_fifoc);
+ if (IS_ERR(scodec->reg_adc_fifoc)) {
+ ret = PTR_ERR(scodec->reg_adc_fifoc);
+ dev_err(&pdev->dev, "Failed to create regmap fields: %d\n",
+ ret);
+ return ret;
+ }
+
/* Enable the bus clock */
if (clk_prepare_enable(scodec->clk_apb)) {
dev_err(&pdev->dev, "Failed to enable the APB clock\n");
return -EINVAL;
}
+ /* Deassert the reset control */
+ if (scodec->rst) {
+ ret = reset_control_deassert(scodec->rst);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to deassert the reset control\n");
+ goto err_clk_disable;
+ }
+ }
+
/* DMA configuration for TX FIFO */
- scodec->playback_dma_data.addr = res->start + SUN4I_CODEC_DAC_TXDATA;
- scodec->playback_dma_data.maxburst = 4;
+ scodec->playback_dma_data.addr = res->start + quirks->reg_dac_txdata;
+ scodec->playback_dma_data.maxburst = 8;
scodec->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
/* DMA configuration for RX FIFO */
- scodec->capture_dma_data.addr = res->start + SUN4I_CODEC_ADC_RXDATA;
- scodec->capture_dma_data.maxburst = 4;
+ scodec->capture_dma_data.addr = res->start + quirks->reg_adc_rxdata;
+ scodec->capture_dma_data.maxburst = 8;
scodec->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
- ret = snd_soc_register_codec(&pdev->dev, &sun4i_codec_codec,
+ ret = snd_soc_register_codec(&pdev->dev, quirks->codec,
&sun4i_codec_dai, 1);
if (ret) {
dev_err(&pdev->dev, "Failed to register our codec\n");
- goto err_clk_disable;
+ goto err_assert_reset;
}
ret = devm_snd_soc_register_component(&pdev->dev,
@@ -875,7 +1584,7 @@ static int sun4i_codec_probe(struct platform_device *pdev)
goto err_unregister_codec;
}
- card = sun4i_codec_create_card(&pdev->dev);
+ card = quirks->create_card(&pdev->dev);
if (IS_ERR(card)) {
ret = PTR_ERR(card);
dev_err(&pdev->dev, "Failed to create our card\n");
@@ -895,6 +1604,9 @@ static int sun4i_codec_probe(struct platform_device *pdev)
err_unregister_codec:
snd_soc_unregister_codec(&pdev->dev);
+err_assert_reset:
+ if (scodec->rst)
+ reset_control_assert(scodec->rst);
err_clk_disable:
clk_disable_unprepare(scodec->clk_apb);
return ret;
@@ -907,6 +1619,8 @@ static int sun4i_codec_remove(struct platform_device *pdev)
snd_soc_unregister_card(card);
snd_soc_unregister_codec(&pdev->dev);
+ if (scodec->rst)
+ reset_control_assert(scodec->rst);
clk_disable_unprepare(scodec->clk_apb);
return 0;
@@ -926,4 +1640,5 @@ MODULE_DESCRIPTION("Allwinner A10 codec driver");
MODULE_AUTHOR("Emilio López <emilio@elopez.com.ar>");
MODULE_AUTHOR("Jon Smirl <jonsmirl@gmail.com>");
MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
index 687a8f83dbe5..f24d19526603 100644
--- a/sound/soc/sunxi/sun4i-i2s.c
+++ b/sound/soc/sunxi/sun4i-i2s.c
@@ -93,6 +93,9 @@ struct sun4i_i2s {
struct clk *mod_clk;
struct regmap *regmap;
+ unsigned int mclk_freq;
+
+ struct snd_dmaengine_dai_dma_data capture_dma_data;
struct snd_dmaengine_dai_dma_data playback_dma_data;
};
@@ -157,14 +160,24 @@ static int sun4i_i2s_get_mclk_div(struct sun4i_i2s *i2s,
}
static int sun4i_i2s_oversample_rates[] = { 128, 192, 256, 384, 512, 768 };
+static bool sun4i_i2s_oversample_is_valid(unsigned int oversample)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sun4i_i2s_oversample_rates); i++)
+ if (sun4i_i2s_oversample_rates[i] == oversample)
+ return true;
+
+ return false;
+}
static int sun4i_i2s_set_clk_rate(struct sun4i_i2s *i2s,
unsigned int rate,
unsigned int word_size)
{
- unsigned int clk_rate;
+ unsigned int oversample_rate, clk_rate;
int bclk_div, mclk_div;
- int ret, i;
+ int ret;
switch (rate) {
case 176400:
@@ -196,21 +209,18 @@ static int sun4i_i2s_set_clk_rate(struct sun4i_i2s *i2s,
if (ret)
return ret;
- /* Always favor the highest oversampling rate */
- for (i = (ARRAY_SIZE(sun4i_i2s_oversample_rates) - 1); i >= 0; i--) {
- unsigned int oversample_rate = sun4i_i2s_oversample_rates[i];
-
- bclk_div = sun4i_i2s_get_bclk_div(i2s, oversample_rate,
- word_size);
- mclk_div = sun4i_i2s_get_mclk_div(i2s, oversample_rate,
- clk_rate,
- rate);
+ oversample_rate = i2s->mclk_freq / rate;
+ if (!sun4i_i2s_oversample_is_valid(oversample_rate))
+ return -EINVAL;
- if ((bclk_div >= 0) && (mclk_div >= 0))
- break;
- }
+ bclk_div = sun4i_i2s_get_bclk_div(i2s, oversample_rate,
+ word_size);
+ if (bclk_div < 0)
+ return -EINVAL;
- if ((bclk_div < 0) || (mclk_div < 0))
+ mclk_div = sun4i_i2s_get_mclk_div(i2s, oversample_rate,
+ clk_rate, rate);
+ if (mclk_div < 0)
return -EINVAL;
regmap_write(i2s->regmap, SUN4I_I2S_CLK_DIV_REG,
@@ -341,6 +351,27 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return 0;
}
+static void sun4i_i2s_start_capture(struct sun4i_i2s *i2s)
+{
+ /* Flush RX FIFO */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_FIFO_CTRL_REG,
+ SUN4I_I2S_FIFO_CTRL_FLUSH_RX,
+ SUN4I_I2S_FIFO_CTRL_FLUSH_RX);
+
+ /* Clear RX counter */
+ regmap_write(i2s->regmap, SUN4I_I2S_RX_CNT_REG, 0);
+
+ /* Enable RX Block */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_RX_EN,
+ SUN4I_I2S_CTRL_RX_EN);
+
+ /* Enable RX DRQ */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_DMA_INT_CTRL_REG,
+ SUN4I_I2S_DMA_INT_CTRL_RX_DRQ_EN,
+ SUN4I_I2S_DMA_INT_CTRL_RX_DRQ_EN);
+}
+
static void sun4i_i2s_start_playback(struct sun4i_i2s *i2s)
{
/* Flush TX FIFO */
@@ -362,6 +393,18 @@ static void sun4i_i2s_start_playback(struct sun4i_i2s *i2s)
SUN4I_I2S_DMA_INT_CTRL_TX_DRQ_EN);
}
+static void sun4i_i2s_stop_capture(struct sun4i_i2s *i2s)
+{
+ /* Disable RX Block */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_RX_EN,
+ 0);
+
+ /* Disable RX DRQ */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_DMA_INT_CTRL_REG,
+ SUN4I_I2S_DMA_INT_CTRL_RX_DRQ_EN,
+ 0);
+}
static void sun4i_i2s_stop_playback(struct sun4i_i2s *i2s)
{
@@ -388,7 +431,7 @@ static int sun4i_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
sun4i_i2s_start_playback(i2s);
else
- return -EINVAL;
+ sun4i_i2s_start_capture(i2s);
break;
case SNDRV_PCM_TRIGGER_STOP:
@@ -397,7 +440,7 @@ static int sun4i_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
sun4i_i2s_stop_playback(i2s);
else
- return -EINVAL;
+ sun4i_i2s_stop_capture(i2s);
break;
default:
@@ -447,9 +490,23 @@ static void sun4i_i2s_shutdown(struct snd_pcm_substream *substream,
regmap_write(i2s->regmap, SUN4I_I2S_CTRL_REG, 0);
}
+static int sun4i_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ if (clk_id != 0)
+ return -EINVAL;
+
+ i2s->mclk_freq = freq;
+
+ return 0;
+}
+
static const struct snd_soc_dai_ops sun4i_i2s_dai_ops = {
.hw_params = sun4i_i2s_hw_params,
.set_fmt = sun4i_i2s_set_fmt,
+ .set_sysclk = sun4i_i2s_set_sysclk,
.shutdown = sun4i_i2s_shutdown,
.startup = sun4i_i2s_startup,
.trigger = sun4i_i2s_trigger,
@@ -459,7 +516,9 @@ static int sun4i_i2s_dai_probe(struct snd_soc_dai *dai)
{
struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
- snd_soc_dai_init_dma_data(dai, &i2s->playback_dma_data, NULL);
+ snd_soc_dai_init_dma_data(dai,
+ &i2s->playback_dma_data,
+ &i2s->capture_dma_data);
snd_soc_dai_set_drvdata(dai, i2s);
@@ -468,6 +527,13 @@ static int sun4i_i2s_dai_probe(struct snd_soc_dai *dai)
static struct snd_soc_dai_driver sun4i_i2s_dai = {
.probe = sun4i_i2s_dai_probe,
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
.playback = {
.stream_name = "Playback",
.channels_min = 2,
@@ -630,6 +696,9 @@ static int sun4i_i2s_probe(struct platform_device *pdev)
i2s->playback_dma_data.addr = res->start + SUN4I_I2S_FIFO_TX_REG;
i2s->playback_dma_data.maxburst = 4;
+ i2s->capture_dma_data.addr = res->start + SUN4I_I2S_FIFO_RX_REG;
+ i2s->capture_dma_data.maxburst = 4;
+
pm_runtime_enable(&pdev->dev);
if (!pm_runtime_enabled(&pdev->dev)) {
ret = sun4i_i2s_runtime_resume(&pdev->dev);
diff --git a/sound/soc/sunxi/sun8i-codec-analog.c b/sound/soc/sunxi/sun8i-codec-analog.c
new file mode 100644
index 000000000000..af02290ebe49
--- /dev/null
+++ b/sound/soc/sunxi/sun8i-codec-analog.c
@@ -0,0 +1,665 @@
+/*
+ * This driver supports the analog controls for the internal codec
+ * found in Allwinner's A31s, A23, A33 and H3 SoCs.
+ *
+ * Copyright 2016 Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+
+/* Codec analog control register offsets and bit fields */
+#define SUN8I_ADDA_HP_VOLC 0x00
+#define SUN8I_ADDA_HP_VOLC_PA_CLK_GATE 7
+#define SUN8I_ADDA_HP_VOLC_HP_VOL 0
+#define SUN8I_ADDA_LOMIXSC 0x01
+#define SUN8I_ADDA_LOMIXSC_MIC1 6
+#define SUN8I_ADDA_LOMIXSC_MIC2 5
+#define SUN8I_ADDA_LOMIXSC_PHONE 4
+#define SUN8I_ADDA_LOMIXSC_PHONEN 3
+#define SUN8I_ADDA_LOMIXSC_LINEINL 2
+#define SUN8I_ADDA_LOMIXSC_DACL 1
+#define SUN8I_ADDA_LOMIXSC_DACR 0
+#define SUN8I_ADDA_ROMIXSC 0x02
+#define SUN8I_ADDA_ROMIXSC_MIC1 6
+#define SUN8I_ADDA_ROMIXSC_MIC2 5
+#define SUN8I_ADDA_ROMIXSC_PHONE 4
+#define SUN8I_ADDA_ROMIXSC_PHONEP 3
+#define SUN8I_ADDA_ROMIXSC_LINEINR 2
+#define SUN8I_ADDA_ROMIXSC_DACR 1
+#define SUN8I_ADDA_ROMIXSC_DACL 0
+#define SUN8I_ADDA_DAC_PA_SRC 0x03
+#define SUN8I_ADDA_DAC_PA_SRC_DACAREN 7
+#define SUN8I_ADDA_DAC_PA_SRC_DACALEN 6
+#define SUN8I_ADDA_DAC_PA_SRC_RMIXEN 5
+#define SUN8I_ADDA_DAC_PA_SRC_LMIXEN 4
+#define SUN8I_ADDA_DAC_PA_SRC_RHPPAMUTE 3
+#define SUN8I_ADDA_DAC_PA_SRC_LHPPAMUTE 2
+#define SUN8I_ADDA_DAC_PA_SRC_RHPIS 1
+#define SUN8I_ADDA_DAC_PA_SRC_LHPIS 0
+#define SUN8I_ADDA_PHONEIN_GCTRL 0x04
+#define SUN8I_ADDA_PHONEIN_GCTRL_PHONEPG 4
+#define SUN8I_ADDA_PHONEIN_GCTRL_PHONENG 0
+#define SUN8I_ADDA_LINEIN_GCTRL 0x05
+#define SUN8I_ADDA_LINEIN_GCTRL_LINEING 4
+#define SUN8I_ADDA_LINEIN_GCTRL_PHONEG 0
+#define SUN8I_ADDA_MICIN_GCTRL 0x06
+#define SUN8I_ADDA_MICIN_GCTRL_MIC1G 4
+#define SUN8I_ADDA_MICIN_GCTRL_MIC2G 0
+#define SUN8I_ADDA_PAEN_HP_CTRL 0x07
+#define SUN8I_ADDA_PAEN_HP_CTRL_HPPAEN 7
+#define SUN8I_ADDA_PAEN_HP_CTRL_LINEOUTEN 7 /* H3 specific */
+#define SUN8I_ADDA_PAEN_HP_CTRL_HPCOM_FC 5
+#define SUN8I_ADDA_PAEN_HP_CTRL_COMPTEN 4
+#define SUN8I_ADDA_PAEN_HP_CTRL_PA_ANTI_POP_CTRL 2
+#define SUN8I_ADDA_PAEN_HP_CTRL_LTRNMUTE 1
+#define SUN8I_ADDA_PAEN_HP_CTRL_RTLNMUTE 0
+#define SUN8I_ADDA_PHONEOUT_CTRL 0x08
+#define SUN8I_ADDA_PHONEOUT_CTRL_PHONEOUTG 5
+#define SUN8I_ADDA_PHONEOUT_CTRL_PHONEOUTEN 4
+#define SUN8I_ADDA_PHONEOUT_CTRL_PHONEOUT_MIC1 3
+#define SUN8I_ADDA_PHONEOUT_CTRL_PHONEOUT_MIC2 2
+#define SUN8I_ADDA_PHONEOUT_CTRL_PHONEOUT_RMIX 1
+#define SUN8I_ADDA_PHONEOUT_CTRL_PHONEOUT_LMIX 0
+#define SUN8I_ADDA_PHONE_GAIN_CTRL 0x09
+#define SUN8I_ADDA_PHONE_GAIN_CTRL_LINEOUT_VOL 3
+#define SUN8I_ADDA_PHONE_GAIN_CTRL_PHONEPREG 0
+#define SUN8I_ADDA_MIC2G_CTRL 0x0a
+#define SUN8I_ADDA_MIC2G_CTRL_MIC2AMPEN 7
+#define SUN8I_ADDA_MIC2G_CTRL_MIC2BOOST 4
+#define SUN8I_ADDA_MIC2G_CTRL_LINEOUTLEN 3
+#define SUN8I_ADDA_MIC2G_CTRL_LINEOUTREN 2
+#define SUN8I_ADDA_MIC2G_CTRL_LINEOUTLSRC 1
+#define SUN8I_ADDA_MIC2G_CTRL_LINEOUTRSRC 0
+#define SUN8I_ADDA_MIC1G_MICBIAS_CTRL 0x0b
+#define SUN8I_ADDA_MIC1G_MICBIAS_CTRL_HMICBIASEN 7
+#define SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MMICBIASEN 6
+#define SUN8I_ADDA_MIC1G_MICBIAS_CTRL_HMICBIAS_MODE 5
+#define SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MIC1AMPEN 3
+#define SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MIC1BOOST 0
+#define SUN8I_ADDA_LADCMIXSC 0x0c
+#define SUN8I_ADDA_LADCMIXSC_MIC1 6
+#define SUN8I_ADDA_LADCMIXSC_MIC2 5
+#define SUN8I_ADDA_LADCMIXSC_PHONE 4
+#define SUN8I_ADDA_LADCMIXSC_PHONEN 3
+#define SUN8I_ADDA_LADCMIXSC_LINEINL 2
+#define SUN8I_ADDA_LADCMIXSC_OMIXRL 1
+#define SUN8I_ADDA_LADCMIXSC_OMIXRR 0
+#define SUN8I_ADDA_RADCMIXSC 0x0d
+#define SUN8I_ADDA_RADCMIXSC_MIC1 6
+#define SUN8I_ADDA_RADCMIXSC_MIC2 5
+#define SUN8I_ADDA_RADCMIXSC_PHONE 4
+#define SUN8I_ADDA_RADCMIXSC_PHONEP 3
+#define SUN8I_ADDA_RADCMIXSC_LINEINR 2
+#define SUN8I_ADDA_RADCMIXSC_OMIXR 1
+#define SUN8I_ADDA_RADCMIXSC_OMIXL 0
+#define SUN8I_ADDA_RES 0x0e
+#define SUN8I_ADDA_RES_MMICBIAS_SEL 4
+#define SUN8I_ADDA_RES_PA_ANTI_POP_CTRL 0
+#define SUN8I_ADDA_ADC_AP_EN 0x0f
+#define SUN8I_ADDA_ADC_AP_EN_ADCREN 7
+#define SUN8I_ADDA_ADC_AP_EN_ADCLEN 6
+#define SUN8I_ADDA_ADC_AP_EN_ADCG 0
+
+/* Analog control register access bits */
+#define ADDA_PR 0x0 /* PRCM base + 0x1c0 */
+#define ADDA_PR_RESET BIT(28)
+#define ADDA_PR_WRITE BIT(24)
+#define ADDA_PR_ADDR_SHIFT 16
+#define ADDA_PR_ADDR_MASK GENMASK(4, 0)
+#define ADDA_PR_DATA_IN_SHIFT 8
+#define ADDA_PR_DATA_IN_MASK GENMASK(7, 0)
+#define ADDA_PR_DATA_OUT_SHIFT 0
+#define ADDA_PR_DATA_OUT_MASK GENMASK(7, 0)
+
+/* regmap access bits */
+static int adda_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ void __iomem *base = (void __iomem *)context;
+ u32 tmp;
+
+ /* De-assert reset */
+ writel(readl(base) | ADDA_PR_RESET, base);
+
+ /* Clear write bit */
+ writel(readl(base) & ~ADDA_PR_WRITE, base);
+
+ /* Set register address */
+ tmp = readl(base);
+ tmp &= ~(ADDA_PR_ADDR_MASK << ADDA_PR_ADDR_SHIFT);
+ tmp |= (reg & ADDA_PR_ADDR_MASK) << ADDA_PR_ADDR_SHIFT;
+ writel(tmp, base);
+
+ /* Read back value */
+ *val = readl(base) & ADDA_PR_DATA_OUT_MASK;
+
+ return 0;
+}
+
+static int adda_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ void __iomem *base = (void __iomem *)context;
+ u32 tmp;
+
+ /* De-assert reset */
+ writel(readl(base) | ADDA_PR_RESET, base);
+
+ /* Set register address */
+ tmp = readl(base);
+ tmp &= ~(ADDA_PR_ADDR_MASK << ADDA_PR_ADDR_SHIFT);
+ tmp |= (reg & ADDA_PR_ADDR_MASK) << ADDA_PR_ADDR_SHIFT;
+ writel(tmp, base);
+
+ /* Set data to write */
+ tmp = readl(base);
+ tmp &= ~(ADDA_PR_DATA_IN_MASK << ADDA_PR_DATA_IN_SHIFT);
+ tmp |= (val & ADDA_PR_DATA_IN_MASK) << ADDA_PR_DATA_IN_SHIFT;
+ writel(tmp, base);
+
+ /* Set write bit to signal a write */
+ writel(readl(base) | ADDA_PR_WRITE, base);
+
+ /* Clear write bit */
+ writel(readl(base) & ~ADDA_PR_WRITE, base);
+
+ return 0;
+}
+
+static const struct regmap_config adda_pr_regmap_cfg = {
+ .name = "adda-pr",
+ .reg_bits = 5,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .reg_read = adda_reg_read,
+ .reg_write = adda_reg_write,
+ .fast_io = true,
+ .max_register = 24,
+};
+
+/* mixer controls */
+static const struct snd_kcontrol_new sun8i_codec_mixer_controls[] = {
+ SOC_DAPM_DOUBLE_R("DAC Playback Switch",
+ SUN8I_ADDA_LOMIXSC,
+ SUN8I_ADDA_ROMIXSC,
+ SUN8I_ADDA_LOMIXSC_DACL, 1, 0),
+ SOC_DAPM_DOUBLE_R("DAC Reversed Playback Switch",
+ SUN8I_ADDA_LOMIXSC,
+ SUN8I_ADDA_ROMIXSC,
+ SUN8I_ADDA_LOMIXSC_DACR, 1, 0),
+ SOC_DAPM_DOUBLE_R("Line In Playback Switch",
+ SUN8I_ADDA_LOMIXSC,
+ SUN8I_ADDA_ROMIXSC,
+ SUN8I_ADDA_LOMIXSC_LINEINL, 1, 0),
+ SOC_DAPM_DOUBLE_R("Mic1 Playback Switch",
+ SUN8I_ADDA_LOMIXSC,
+ SUN8I_ADDA_ROMIXSC,
+ SUN8I_ADDA_LOMIXSC_MIC1, 1, 0),
+ SOC_DAPM_DOUBLE_R("Mic2 Playback Switch",
+ SUN8I_ADDA_LOMIXSC,
+ SUN8I_ADDA_ROMIXSC,
+ SUN8I_ADDA_LOMIXSC_MIC2, 1, 0),
+};
+
+/* ADC mixer controls */
+static const struct snd_kcontrol_new sun8i_codec_adc_mixer_controls[] = {
+ SOC_DAPM_DOUBLE_R("Mixer Capture Switch",
+ SUN8I_ADDA_LADCMIXSC,
+ SUN8I_ADDA_RADCMIXSC,
+ SUN8I_ADDA_LADCMIXSC_OMIXRL, 1, 0),
+ SOC_DAPM_DOUBLE_R("Mixer Reversed Capture Switch",
+ SUN8I_ADDA_LADCMIXSC,
+ SUN8I_ADDA_RADCMIXSC,
+ SUN8I_ADDA_LADCMIXSC_OMIXRR, 1, 0),
+ SOC_DAPM_DOUBLE_R("Line In Capture Switch",
+ SUN8I_ADDA_LADCMIXSC,
+ SUN8I_ADDA_RADCMIXSC,
+ SUN8I_ADDA_LADCMIXSC_LINEINL, 1, 0),
+ SOC_DAPM_DOUBLE_R("Mic1 Capture Switch",
+ SUN8I_ADDA_LADCMIXSC,
+ SUN8I_ADDA_RADCMIXSC,
+ SUN8I_ADDA_LADCMIXSC_MIC1, 1, 0),
+ SOC_DAPM_DOUBLE_R("Mic2 Capture Switch",
+ SUN8I_ADDA_LADCMIXSC,
+ SUN8I_ADDA_RADCMIXSC,
+ SUN8I_ADDA_LADCMIXSC_MIC2, 1, 0),
+};
+
+/* volume / mute controls */
+static const DECLARE_TLV_DB_SCALE(sun8i_codec_out_mixer_pregain_scale,
+ -450, 150, 0);
+static const DECLARE_TLV_DB_RANGE(sun8i_codec_mic_gain_scale,
+ 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+ 1, 7, TLV_DB_SCALE_ITEM(2400, 300, 0),
+);
+
+static const struct snd_kcontrol_new sun8i_codec_common_controls[] = {
+ /* Mixer pre-gains */
+ SOC_SINGLE_TLV("Line In Playback Volume", SUN8I_ADDA_LINEIN_GCTRL,
+ SUN8I_ADDA_LINEIN_GCTRL_LINEING,
+ 0x7, 0, sun8i_codec_out_mixer_pregain_scale),
+ SOC_SINGLE_TLV("Mic1 Playback Volume", SUN8I_ADDA_MICIN_GCTRL,
+ SUN8I_ADDA_MICIN_GCTRL_MIC1G,
+ 0x7, 0, sun8i_codec_out_mixer_pregain_scale),
+ SOC_SINGLE_TLV("Mic2 Playback Volume",
+ SUN8I_ADDA_MICIN_GCTRL, SUN8I_ADDA_MICIN_GCTRL_MIC2G,
+ 0x7, 0, sun8i_codec_out_mixer_pregain_scale),
+
+ /* Microphone Amp boost gains */
+ SOC_SINGLE_TLV("Mic1 Boost Volume", SUN8I_ADDA_MIC1G_MICBIAS_CTRL,
+ SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MIC1BOOST, 0x7, 0,
+ sun8i_codec_mic_gain_scale),
+ SOC_SINGLE_TLV("Mic2 Boost Volume", SUN8I_ADDA_MIC2G_CTRL,
+ SUN8I_ADDA_MIC2G_CTRL_MIC2BOOST, 0x7, 0,
+ sun8i_codec_mic_gain_scale),
+
+ /* ADC */
+ SOC_SINGLE_TLV("ADC Gain Capture Volume", SUN8I_ADDA_ADC_AP_EN,
+ SUN8I_ADDA_ADC_AP_EN_ADCG, 0x7, 0,
+ sun8i_codec_out_mixer_pregain_scale),
+};
+
+static const struct snd_soc_dapm_widget sun8i_codec_common_widgets[] = {
+ /* ADC */
+ SND_SOC_DAPM_ADC("Left ADC", NULL, SUN8I_ADDA_ADC_AP_EN,
+ SUN8I_ADDA_ADC_AP_EN_ADCLEN, 0),
+ SND_SOC_DAPM_ADC("Right ADC", NULL, SUN8I_ADDA_ADC_AP_EN,
+ SUN8I_ADDA_ADC_AP_EN_ADCREN, 0),
+
+ /* DAC */
+ SND_SOC_DAPM_DAC("Left DAC", NULL, SUN8I_ADDA_DAC_PA_SRC,
+ SUN8I_ADDA_DAC_PA_SRC_DACALEN, 0),
+ SND_SOC_DAPM_DAC("Right DAC", NULL, SUN8I_ADDA_DAC_PA_SRC,
+ SUN8I_ADDA_DAC_PA_SRC_DACAREN, 0),
+ /*
+ * Due to this component and the codec belonging to separate DAPM
+ * contexts, we need to manually link the above widgets to their
+ * stream widgets at the card level.
+ */
+
+ /* Line In */
+ SND_SOC_DAPM_INPUT("LINEIN"),
+
+ /* Microphone inputs */
+ SND_SOC_DAPM_INPUT("MIC1"),
+ SND_SOC_DAPM_INPUT("MIC2"),
+
+ /* Microphone Bias */
+ SND_SOC_DAPM_SUPPLY("MBIAS", SUN8I_ADDA_MIC1G_MICBIAS_CTRL,
+ SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MMICBIASEN,
+ 0, NULL, 0),
+
+ /* Mic input path */
+ SND_SOC_DAPM_PGA("Mic1 Amplifier", SUN8I_ADDA_MIC1G_MICBIAS_CTRL,
+ SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MIC1AMPEN, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Mic2 Amplifier", SUN8I_ADDA_MIC2G_CTRL,
+ SUN8I_ADDA_MIC2G_CTRL_MIC2AMPEN, 0, NULL, 0),
+
+ /* Mixers */
+ SND_SOC_DAPM_MIXER("Left Mixer", SUN8I_ADDA_DAC_PA_SRC,
+ SUN8I_ADDA_DAC_PA_SRC_LMIXEN, 0,
+ sun8i_codec_mixer_controls,
+ ARRAY_SIZE(sun8i_codec_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Right Mixer", SUN8I_ADDA_DAC_PA_SRC,
+ SUN8I_ADDA_DAC_PA_SRC_RMIXEN, 0,
+ sun8i_codec_mixer_controls,
+ ARRAY_SIZE(sun8i_codec_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Left ADC Mixer", SUN8I_ADDA_ADC_AP_EN,
+ SUN8I_ADDA_ADC_AP_EN_ADCLEN, 0,
+ sun8i_codec_adc_mixer_controls,
+ ARRAY_SIZE(sun8i_codec_adc_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Right ADC Mixer", SUN8I_ADDA_ADC_AP_EN,
+ SUN8I_ADDA_ADC_AP_EN_ADCREN, 0,
+ sun8i_codec_adc_mixer_controls,
+ ARRAY_SIZE(sun8i_codec_adc_mixer_controls)),
+};
+
+static const struct snd_soc_dapm_route sun8i_codec_common_routes[] = {
+ /* Microphone Routes */
+ { "Mic1 Amplifier", NULL, "MIC1"},
+ { "Mic2 Amplifier", NULL, "MIC2"},
+
+ /* Left Mixer Routes */
+ { "Left Mixer", "DAC Playback Switch", "Left DAC" },
+ { "Left Mixer", "DAC Reversed Playback Switch", "Right DAC" },
+ { "Left Mixer", "Line In Playback Switch", "LINEIN" },
+ { "Left Mixer", "Mic1 Playback Switch", "Mic1 Amplifier" },
+ { "Left Mixer", "Mic2 Playback Switch", "Mic2 Amplifier" },
+
+ /* Right Mixer Routes */
+ { "Right Mixer", "DAC Playback Switch", "Right DAC" },
+ { "Right Mixer", "DAC Reversed Playback Switch", "Left DAC" },
+ { "Right Mixer", "Line In Playback Switch", "LINEIN" },
+ { "Right Mixer", "Mic1 Playback Switch", "Mic1 Amplifier" },
+ { "Right Mixer", "Mic2 Playback Switch", "Mic2 Amplifier" },
+
+ /* Left ADC Mixer Routes */
+ { "Left ADC Mixer", "Mixer Capture Switch", "Left Mixer" },
+ { "Left ADC Mixer", "Mixer Reversed Capture Switch", "Right Mixer" },
+ { "Left ADC Mixer", "Line In Capture Switch", "LINEIN" },
+ { "Left ADC Mixer", "Mic1 Capture Switch", "Mic1 Amplifier" },
+ { "Left ADC Mixer", "Mic2 Capture Switch", "Mic2 Amplifier" },
+
+ /* Right ADC Mixer Routes */
+ { "Right ADC Mixer", "Mixer Capture Switch", "Right Mixer" },
+ { "Right ADC Mixer", "Mixer Reversed Capture Switch", "Left Mixer" },
+ { "Right ADC Mixer", "Line In Capture Switch", "LINEIN" },
+ { "Right ADC Mixer", "Mic1 Capture Switch", "Mic1 Amplifier" },
+ { "Right ADC Mixer", "Mic2 Capture Switch", "Mic2 Amplifier" },
+
+ /* ADC Routes */
+ { "Left ADC", NULL, "Left ADC Mixer" },
+ { "Right ADC", NULL, "Right ADC Mixer" },
+};
+
+/* headphone specific controls, widgets, and routes */
+static const DECLARE_TLV_DB_SCALE(sun8i_codec_hp_vol_scale, -6300, 100, 1);
+static const struct snd_kcontrol_new sun8i_codec_headphone_controls[] = {
+ SOC_SINGLE_TLV("Headphone Playback Volume",
+ SUN8I_ADDA_HP_VOLC,
+ SUN8I_ADDA_HP_VOLC_HP_VOL, 0x3f, 0,
+ sun8i_codec_hp_vol_scale),
+ SOC_DOUBLE("Headphone Playback Switch",
+ SUN8I_ADDA_DAC_PA_SRC,
+ SUN8I_ADDA_DAC_PA_SRC_LHPPAMUTE,
+ SUN8I_ADDA_DAC_PA_SRC_RHPPAMUTE, 1, 0),
+};
+
+static const char * const sun8i_codec_hp_src_enum_text[] = {
+ "DAC", "Mixer",
+};
+
+static SOC_ENUM_DOUBLE_DECL(sun8i_codec_hp_src_enum,
+ SUN8I_ADDA_DAC_PA_SRC,
+ SUN8I_ADDA_DAC_PA_SRC_LHPIS,
+ SUN8I_ADDA_DAC_PA_SRC_RHPIS,
+ sun8i_codec_hp_src_enum_text);
+
+static const struct snd_kcontrol_new sun8i_codec_hp_src[] = {
+ SOC_DAPM_ENUM("Headphone Source Playback Route",
+ sun8i_codec_hp_src_enum),
+};
+
+static const struct snd_soc_dapm_widget sun8i_codec_headphone_widgets[] = {
+ SND_SOC_DAPM_MUX("Headphone Source Playback Route",
+ SND_SOC_NOPM, 0, 0, sun8i_codec_hp_src),
+ SND_SOC_DAPM_OUT_DRV("Headphone Amp", SUN8I_ADDA_PAEN_HP_CTRL,
+ SUN8I_ADDA_PAEN_HP_CTRL_HPPAEN, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("HPCOM Protection", SUN8I_ADDA_PAEN_HP_CTRL,
+ SUN8I_ADDA_PAEN_HP_CTRL_COMPTEN, 0, NULL, 0),
+ SND_SOC_DAPM_REG(snd_soc_dapm_supply, "HPCOM", SUN8I_ADDA_PAEN_HP_CTRL,
+ SUN8I_ADDA_PAEN_HP_CTRL_HPCOM_FC, 0x3, 0x3, 0),
+ SND_SOC_DAPM_OUTPUT("HP"),
+};
+
+static const struct snd_soc_dapm_route sun8i_codec_headphone_routes[] = {
+ { "Headphone Source Playback Route", "DAC", "Left DAC" },
+ { "Headphone Source Playback Route", "DAC", "Right DAC" },
+ { "Headphone Source Playback Route", "Mixer", "Left Mixer" },
+ { "Headphone Source Playback Route", "Mixer", "Right Mixer" },
+ { "Headphone Amp", NULL, "Headphone Source Playback Route" },
+ { "HPCOM", NULL, "HPCOM Protection" },
+ { "HP", NULL, "Headphone Amp" },
+};
+
+static int sun8i_codec_add_headphone(struct snd_soc_component *cmpnt)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cmpnt);
+ struct device *dev = cmpnt->dev;
+ int ret;
+
+ ret = snd_soc_add_component_controls(cmpnt,
+ sun8i_codec_headphone_controls,
+ ARRAY_SIZE(sun8i_codec_headphone_controls));
+ if (ret) {
+ dev_err(dev, "Failed to add Headphone controls: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dapm_new_controls(dapm, sun8i_codec_headphone_widgets,
+ ARRAY_SIZE(sun8i_codec_headphone_widgets));
+ if (ret) {
+ dev_err(dev, "Failed to add Headphone DAPM widgets: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dapm_add_routes(dapm, sun8i_codec_headphone_routes,
+ ARRAY_SIZE(sun8i_codec_headphone_routes));
+ if (ret) {
+ dev_err(dev, "Failed to add Headphone DAPM routes: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* hmic specific widget */
+static const struct snd_soc_dapm_widget sun8i_codec_hmic_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("HBIAS", SUN8I_ADDA_MIC1G_MICBIAS_CTRL,
+ SUN8I_ADDA_MIC1G_MICBIAS_CTRL_HMICBIASEN,
+ 0, NULL, 0),
+};
+
+static int sun8i_codec_add_hmic(struct snd_soc_component *cmpnt)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cmpnt);
+ struct device *dev = cmpnt->dev;
+ int ret;
+
+ ret = snd_soc_dapm_new_controls(dapm, sun8i_codec_hmic_widgets,
+ ARRAY_SIZE(sun8i_codec_hmic_widgets));
+ if (ret)
+ dev_err(dev, "Failed to add Mic3 DAPM widgets: %d\n", ret);
+
+ return ret;
+}
+
+/* line out specific controls, widgets and routes */
+static const DECLARE_TLV_DB_RANGE(sun8i_codec_lineout_vol_scale,
+ 0, 1, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
+ 2, 31, TLV_DB_SCALE_ITEM(-4350, 150, 0),
+);
+static const struct snd_kcontrol_new sun8i_codec_lineout_controls[] = {
+ SOC_SINGLE_TLV("Line Out Playback Volume",
+ SUN8I_ADDA_PHONE_GAIN_CTRL,
+ SUN8I_ADDA_PHONE_GAIN_CTRL_LINEOUT_VOL, 0x1f, 0,
+ sun8i_codec_lineout_vol_scale),
+ SOC_DOUBLE("Line Out Playback Switch",
+ SUN8I_ADDA_MIC2G_CTRL,
+ SUN8I_ADDA_MIC2G_CTRL_LINEOUTLEN,
+ SUN8I_ADDA_MIC2G_CTRL_LINEOUTREN, 1, 0),
+};
+
+static const char * const sun8i_codec_lineout_src_enum_text[] = {
+ "Stereo", "Mono Differential",
+};
+
+static SOC_ENUM_DOUBLE_DECL(sun8i_codec_lineout_src_enum,
+ SUN8I_ADDA_MIC2G_CTRL,
+ SUN8I_ADDA_MIC2G_CTRL_LINEOUTLSRC,
+ SUN8I_ADDA_MIC2G_CTRL_LINEOUTRSRC,
+ sun8i_codec_lineout_src_enum_text);
+
+static const struct snd_kcontrol_new sun8i_codec_lineout_src[] = {
+ SOC_DAPM_ENUM("Line Out Source Playback Route",
+ sun8i_codec_lineout_src_enum),
+};
+
+static const struct snd_soc_dapm_widget sun8i_codec_lineout_widgets[] = {
+ SND_SOC_DAPM_MUX("Line Out Source Playback Route",
+ SND_SOC_NOPM, 0, 0, sun8i_codec_lineout_src),
+ /* It is unclear if this is a buffer or gate, model it as a supply */
+ SND_SOC_DAPM_SUPPLY("Line Out Enable", SUN8I_ADDA_PAEN_HP_CTRL,
+ SUN8I_ADDA_PAEN_HP_CTRL_LINEOUTEN, 0, NULL, 0),
+ SND_SOC_DAPM_OUTPUT("LINEOUT"),
+};
+
+static const struct snd_soc_dapm_route sun8i_codec_lineout_routes[] = {
+ { "Line Out Source Playback Route", "Stereo", "Left Mixer" },
+ { "Line Out Source Playback Route", "Stereo", "Right Mixer" },
+ { "Line Out Source Playback Route", "Mono Differential", "Left Mixer" },
+ { "Line Out Source Playback Route", "Mono Differential", "Right Mixer" },
+ { "LINEOUT", NULL, "Line Out Source Playback Route" },
+ { "LINEOUT", NULL, "Line Out Enable", },
+};
+
+static int sun8i_codec_add_lineout(struct snd_soc_component *cmpnt)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cmpnt);
+ struct device *dev = cmpnt->dev;
+ int ret;
+
+ ret = snd_soc_add_component_controls(cmpnt,
+ sun8i_codec_lineout_controls,
+ ARRAY_SIZE(sun8i_codec_lineout_controls));
+ if (ret) {
+ dev_err(dev, "Failed to add Line Out controls: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dapm_new_controls(dapm, sun8i_codec_lineout_widgets,
+ ARRAY_SIZE(sun8i_codec_lineout_widgets));
+ if (ret) {
+ dev_err(dev, "Failed to add Line Out DAPM widgets: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dapm_add_routes(dapm, sun8i_codec_lineout_routes,
+ ARRAY_SIZE(sun8i_codec_lineout_routes));
+ if (ret) {
+ dev_err(dev, "Failed to add Line Out DAPM routes: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct sun8i_codec_analog_quirks {
+ bool has_headphone;
+ bool has_hmic;
+ bool has_lineout;
+};
+
+static const struct sun8i_codec_analog_quirks sun8i_a23_quirks = {
+ .has_headphone = true,
+ .has_hmic = true,
+};
+
+static const struct sun8i_codec_analog_quirks sun8i_h3_quirks = {
+ .has_lineout = true,
+};
+
+static int sun8i_codec_analog_cmpnt_probe(struct snd_soc_component *cmpnt)
+{
+ struct device *dev = cmpnt->dev;
+ const struct sun8i_codec_analog_quirks *quirks;
+ int ret;
+
+ /*
+ * This would never return NULL unless someone directly registers a
+ * platform device matching this driver's name, without specifying a
+ * device tree node.
+ */
+ quirks = of_device_get_match_data(dev);
+
+ /* Add controls, widgets, and routes for individual features */
+
+ if (quirks->has_headphone) {
+ ret = sun8i_codec_add_headphone(cmpnt);
+ if (ret)
+ return ret;
+ }
+
+ if (quirks->has_hmic) {
+ ret = sun8i_codec_add_hmic(cmpnt);
+ if (ret)
+ return ret;
+ }
+
+ if (quirks->has_lineout) {
+ ret = sun8i_codec_add_lineout(cmpnt);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_component_driver sun8i_codec_analog_cmpnt_drv = {
+ .controls = sun8i_codec_common_controls,
+ .num_controls = ARRAY_SIZE(sun8i_codec_common_controls),
+ .dapm_widgets = sun8i_codec_common_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(sun8i_codec_common_widgets),
+ .dapm_routes = sun8i_codec_common_routes,
+ .num_dapm_routes = ARRAY_SIZE(sun8i_codec_common_routes),
+ .probe = sun8i_codec_analog_cmpnt_probe,
+};
+
+static const struct of_device_id sun8i_codec_analog_of_match[] = {
+ {
+ .compatible = "allwinner,sun8i-a23-codec-analog",
+ .data = &sun8i_a23_quirks,
+ },
+ {
+ .compatible = "allwinner,sun8i-h3-codec-analog",
+ .data = &sun8i_h3_quirks,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun8i_codec_analog_of_match);
+
+static int sun8i_codec_analog_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct regmap *regmap;
+ void __iomem *base;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ dev_err(&pdev->dev, "Failed to map the registers\n");
+ return PTR_ERR(base);
+ }
+
+ regmap = devm_regmap_init(&pdev->dev, NULL, base, &adda_pr_regmap_cfg);
+ if (IS_ERR(regmap)) {
+ dev_err(&pdev->dev, "Failed to create regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ return devm_snd_soc_register_component(&pdev->dev,
+ &sun8i_codec_analog_cmpnt_drv,
+ NULL, 0);
+}
+
+static struct platform_driver sun8i_codec_analog_driver = {
+ .driver = {
+ .name = "sun8i-codec-analog",
+ .of_match_table = sun8i_codec_analog_of_match,
+ },
+ .probe = sun8i_codec_analog_probe,
+};
+module_platform_driver(sun8i_codec_analog_driver);
+
+MODULE_DESCRIPTION("Allwinner internal codec analog controls driver");
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sun8i-codec-analog");
diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
index deb597f7c302..eead6e7f205b 100644
--- a/sound/soc/tegra/tegra_alc5632.c
+++ b/sound/soc/tegra/tegra_alc5632.c
@@ -65,7 +65,7 @@ static int tegra_alc5632_asoc_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_soc_ops tegra_alc5632_asoc_ops = {
+static const struct snd_soc_ops tegra_alc5632_asoc_ops = {
.hw_params = tegra_alc5632_asoc_hw_params,
};
diff --git a/sound/soc/tegra/tegra_max98090.c b/sound/soc/tegra/tegra_max98090.c
index 902da36581d1..a403db6d563e 100644
--- a/sound/soc/tegra/tegra_max98090.c
+++ b/sound/soc/tegra/tegra_max98090.c
@@ -93,7 +93,7 @@ static int tegra_max98090_asoc_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_soc_ops tegra_max98090_ops = {
+static const struct snd_soc_ops tegra_max98090_ops = {
.hw_params = tegra_max98090_asoc_hw_params,
};
diff --git a/sound/soc/tegra/tegra_rt5640.c b/sound/soc/tegra/tegra_rt5640.c
index e5ef4e9c4ac5..25b9fc03ba62 100644
--- a/sound/soc/tegra/tegra_rt5640.c
+++ b/sound/soc/tegra/tegra_rt5640.c
@@ -76,7 +76,7 @@ static int tegra_rt5640_asoc_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_soc_ops tegra_rt5640_ops = {
+static const struct snd_soc_ops tegra_rt5640_ops = {
.hw_params = tegra_rt5640_asoc_hw_params,
};
diff --git a/sound/soc/tegra/tegra_rt5677.c b/sound/soc/tegra/tegra_rt5677.c
index 1470873ecde6..ebf58d0e0f10 100644
--- a/sound/soc/tegra/tegra_rt5677.c
+++ b/sound/soc/tegra/tegra_rt5677.c
@@ -93,7 +93,7 @@ static int tegra_rt5677_event_hp(struct snd_soc_dapm_widget *w,
return 0;
}
-static struct snd_soc_ops tegra_rt5677_ops = {
+static const struct snd_soc_ops tegra_rt5677_ops = {
.hw_params = tegra_rt5677_asoc_hw_params,
};
diff --git a/sound/soc/tegra/tegra_sgtl5000.c b/sound/soc/tegra/tegra_sgtl5000.c
index 1e76869dd488..4bbab098f50b 100644
--- a/sound/soc/tegra/tegra_sgtl5000.c
+++ b/sound/soc/tegra/tegra_sgtl5000.c
@@ -82,7 +82,7 @@ static int tegra_sgtl5000_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_soc_ops tegra_sgtl5000_ops = {
+static const struct snd_soc_ops tegra_sgtl5000_ops = {
.hw_params = tegra_sgtl5000_hw_params,
};
diff --git a/sound/soc/tegra/tegra_wm8753.c b/sound/soc/tegra/tegra_wm8753.c
index f0cd01dbfc38..bdedd1028569 100644
--- a/sound/soc/tegra/tegra_wm8753.c
+++ b/sound/soc/tegra/tegra_wm8753.c
@@ -89,7 +89,7 @@ static int tegra_wm8753_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_soc_ops tegra_wm8753_ops = {
+static const struct snd_soc_ops tegra_wm8753_ops = {
.hw_params = tegra_wm8753_hw_params,
};
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index e485278e027a..2013e9c4bba0 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -96,7 +96,7 @@ static int tegra_wm8903_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_soc_ops tegra_wm8903_ops = {
+static const struct snd_soc_ops tegra_wm8903_ops = {
.hw_params = tegra_wm8903_hw_params,
};
diff --git a/sound/soc/tegra/trimslice.c b/sound/soc/tegra/trimslice.c
index 2cea203c4f5f..870f84ab5005 100644
--- a/sound/soc/tegra/trimslice.c
+++ b/sound/soc/tegra/trimslice.c
@@ -74,7 +74,7 @@ static int trimslice_asoc_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_soc_ops trimslice_asoc_ops = {
+static const struct snd_soc_ops trimslice_asoc_ops = {
.hw_params = trimslice_asoc_hw_params,
};
diff --git a/sound/soc/zte/Kconfig b/sound/soc/zte/Kconfig
index c47eb25e441f..6d8a90d36315 100644
--- a/sound/soc/zte/Kconfig
+++ b/sound/soc/zte/Kconfig
@@ -1,17 +1,17 @@
-config ZX296702_SPDIF
- tristate "ZX296702 spdif"
- depends on SOC_ZX296702 || COMPILE_TEST
+config ZX_SPDIF
+ tristate "ZTE ZX SPDIF Driver Support"
+ depends on ARCH_ZX || COMPILE_TEST
depends on COMMON_CLK
select SND_SOC_GENERIC_DMAENGINE_PCM
help
Say Y or M if you want to add support for codecs attached to the
- zx296702 spdif interface
+ ZTE ZX SPDIF interface
-config ZX296702_I2S
- tristate "ZX296702 i2s"
- depends on SOC_ZX296702 || COMPILE_TEST
+config ZX_I2S
+ tristate "ZTE ZX I2S Driver Support"
+ depends on ARCH_ZX || COMPILE_TEST
depends on COMMON_CLK
select SND_SOC_GENERIC_DMAENGINE_PCM
help
Say Y or M if you want to add support for codecs attached to the
- zx296702 i2s interface
+ ZTE ZX I2S interface
diff --git a/sound/soc/zte/Makefile b/sound/soc/zte/Makefile
index 254ed2c8c1a0..77768f5fd10c 100644
--- a/sound/soc/zte/Makefile
+++ b/sound/soc/zte/Makefile
@@ -1,2 +1,2 @@
-obj-$(CONFIG_ZX296702_SPDIF) += zx296702-spdif.o
-obj-$(CONFIG_ZX296702_I2S) += zx296702-i2s.o
+obj-$(CONFIG_ZX_SPDIF) += zx-spdif.o
+obj-$(CONFIG_ZX_I2S) += zx-i2s.o
diff --git a/sound/soc/zte/zx296702-i2s.c b/sound/soc/zte/zx-i2s.c
index 1cad93dc1fcf..1cad93dc1fcf 100644
--- a/sound/soc/zte/zx296702-i2s.c
+++ b/sound/soc/zte/zx-i2s.c
diff --git a/sound/soc/zte/zx296702-spdif.c b/sound/soc/zte/zx-spdif.c
index 26265ce4caca..9fa6463ce5d7 100644
--- a/sound/soc/zte/zx296702-spdif.c
+++ b/sound/soc/zte/zx-spdif.c
@@ -71,7 +71,7 @@
#define ZX_VALID_RIGHT_TRACK (2 << 0)
#define ZX_VALID_TRACK_MASK (3 << 0)
-#define ZX_SPDIF_CLK_RAT (4 * 32)
+#define ZX_SPDIF_CLK_RAT (2 * 32)
struct zx_spdif_info {
struct snd_dmaengine_dai_dma_data dma_data;
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 2ddc034673a8..f36cb068dad3 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -206,7 +206,6 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
if (! snd_usb_parse_audio_interface(chip, interface)) {
usb_set_interface(dev, interface, 0); /* reset the current interface */
usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
- return -EINVAL;
}
return 0;
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index c470251cea4b..a2cdf3370afe 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -384,6 +384,9 @@ static void snd_complete_urb(struct urb *urb)
if (unlikely(atomic_read(&ep->chip->shutdown)))
goto exit_clear;
+ if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
+ goto exit_clear;
+
if (usb_pipeout(ep->pipe)) {
retire_outbound_urb(ep, ctx);
/* can be stopped during retire callback */
@@ -534,6 +537,11 @@ static int wait_clear_urbs(struct snd_usb_endpoint *ep)
alive, ep->ep_num);
clear_bit(EP_FLAG_STOPPING, &ep->flags);
+ ep->data_subs = NULL;
+ ep->sync_slave = NULL;
+ ep->retire_data_urb = NULL;
+ ep->prepare_data_urb = NULL;
+
return 0;
}
@@ -630,10 +638,24 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep,
ep->datainterval = fmt->datainterval;
ep->stride = frame_bits >> 3;
- ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0;
- /* assume max. frequency is 25% higher than nominal */
- ep->freqmax = ep->freqn + (ep->freqn >> 2);
+ switch (pcm_format) {
+ case SNDRV_PCM_FORMAT_U8:
+ ep->silence_value = 0x80;
+ break;
+ case SNDRV_PCM_FORMAT_DSD_U8:
+ case SNDRV_PCM_FORMAT_DSD_U16_LE:
+ case SNDRV_PCM_FORMAT_DSD_U32_LE:
+ case SNDRV_PCM_FORMAT_DSD_U16_BE:
+ case SNDRV_PCM_FORMAT_DSD_U32_BE:
+ ep->silence_value = 0x69;
+ break;
+ default:
+ ep->silence_value = 0;
+ }
+
+ /* assume max. frequency is 50% higher than nominal */
+ ep->freqmax = ep->freqn + (ep->freqn >> 1);
/* Round up freqmax to nearest integer in order to calculate maximum
* packet size, which must represent a whole number of frames.
* This is accomplished by adding 0x0.ffff before converting the
@@ -1006,10 +1028,6 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
if (--ep->use_count == 0) {
deactivate_urbs(ep, false);
- ep->data_subs = NULL;
- ep->sync_slave = NULL;
- ep->retire_data_urb = NULL;
- ep->prepare_data_urb = NULL;
set_bit(EP_FLAG_STOPPING, &ep->flags);
}
}
diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
index 2c44139b4041..33db205dd12b 100644
--- a/sound/usb/hiface/pcm.c
+++ b/sound/usb/hiface/pcm.c
@@ -445,6 +445,8 @@ static int hiface_pcm_prepare(struct snd_pcm_substream *alsa_sub)
mutex_lock(&rt->stream_mutex);
+ hiface_pcm_stream_stop(rt);
+
sub->dma_off = 0;
sub->period_off = 0;
diff --git a/sound/usb/line6/driver.h b/sound/usb/line6/driver.h
index 7e3a3aada222..a5c2e9ae5f17 100644
--- a/sound/usb/line6/driver.h
+++ b/sound/usb/line6/driver.h
@@ -98,10 +98,11 @@ struct line6_properties {
int altsetting;
- unsigned ep_ctrl_r;
- unsigned ep_ctrl_w;
- unsigned ep_audio_r;
- unsigned ep_audio_w;
+ unsigned int ctrl_if;
+ unsigned int ep_ctrl_r;
+ unsigned int ep_ctrl_w;
+ unsigned int ep_audio_r;
+ unsigned int ep_audio_w;
};
/* Capability bits */
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index 49cd4a65e390..6ab23e5aee71 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -153,6 +153,7 @@ static struct line6_pcm_properties podx3_pcm_properties = {
.rats = &podhd_ratden},
.bytes_per_channel = 3 /* SNDRV_PCM_FMTBIT_S24_3LE */
};
+static struct usb_driver podhd_driver;
static void podhd_startup_start_workqueue(unsigned long data);
static void podhd_startup_workqueue(struct work_struct *work);
@@ -291,8 +292,14 @@ static void podhd_disconnect(struct usb_line6 *line6)
struct usb_line6_podhd *pod = (struct usb_line6_podhd *)line6;
if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) {
+ struct usb_interface *intf;
+
del_timer_sync(&pod->startup_timer);
cancel_work_sync(&pod->startup_work);
+
+ intf = usb_ifnum_to_if(line6->usbdev,
+ pod->line6.properties->ctrl_if);
+ usb_driver_release_interface(&podhd_driver, intf);
}
}
@@ -304,10 +311,27 @@ static int podhd_init(struct usb_line6 *line6,
{
int err;
struct usb_line6_podhd *pod = (struct usb_line6_podhd *) line6;
+ struct usb_interface *intf;
line6->disconnect = podhd_disconnect;
if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) {
+ /* claim the data interface */
+ intf = usb_ifnum_to_if(line6->usbdev,
+ pod->line6.properties->ctrl_if);
+ if (!intf) {
+ dev_err(pod->line6.ifcdev, "interface %d not found\n",
+ pod->line6.properties->ctrl_if);
+ return -ENODEV;
+ }
+
+ err = usb_driver_claim_interface(&podhd_driver, intf, NULL);
+ if (err != 0) {
+ dev_err(pod->line6.ifcdev, "can't claim interface %d, error %d\n",
+ pod->line6.properties->ctrl_if, err);
+ return err;
+ }
+
/* create sysfs entries: */
err = snd_card_add_dev_attr(line6->card, &podhd_dev_attr_group);
if (err < 0)
@@ -406,6 +430,7 @@ static const struct line6_properties podhd_properties_table[] = {
.altsetting = 1,
.ep_ctrl_r = 0x81,
.ep_ctrl_w = 0x01,
+ .ctrl_if = 1,
.ep_audio_r = 0x86,
.ep_audio_w = 0x02,
},
@@ -417,6 +442,7 @@ static const struct line6_properties podhd_properties_table[] = {
.altsetting = 1,
.ep_ctrl_r = 0x81,
.ep_ctrl_w = 0x01,
+ .ctrl_if = 1,
.ep_audio_r = 0x86,
.ep_audio_w = 0x02,
},
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 2f8c388ef84f..4703caea56b2 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -932,9 +932,10 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
case USB_ID(0x046d, 0x0991):
+ case USB_ID(0x046d, 0x09a2): /* QuickCam Communicate Deluxe/S7500 */
/* Most audio usb devices lie about volume resolution.
* Most Logitech webcams have res = 384.
- * Proboly there is some logitech magic behind this number --fishor
+ * Probably there is some logitech magic behind this number --fishor
*/
if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
usb_audio_info(chip,
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 44d178ee9177..34c6d4f2c0b6 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -348,6 +348,16 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
alts = &iface->altsetting[1];
goto add_sync_ep;
+ case USB_ID(0x2466, 0x8003):
+ ep = 0x86;
+ iface = usb_ifnum_to_if(dev, 2);
+
+ if (!iface || iface->num_altsetting == 0)
+ return -EINVAL;
+
+ alts = &iface->altsetting[1];
+ goto add_sync_ep;
+
}
if (attr == USB_ENDPOINT_SYNC_ASYNC &&
altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
@@ -806,17 +816,18 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
if (ret < 0)
goto unlock;
- iface = usb_ifnum_to_if(subs->dev, subs->cur_audiofmt->iface);
- alts = &iface->altsetting[subs->cur_audiofmt->altset_idx];
- ret = snd_usb_init_sample_rate(subs->stream->chip,
- subs->cur_audiofmt->iface,
- alts,
- subs->cur_audiofmt,
- subs->cur_rate);
- if (ret < 0)
- goto unlock;
-
if (subs->need_setup_ep) {
+
+ iface = usb_ifnum_to_if(subs->dev, subs->cur_audiofmt->iface);
+ alts = &iface->altsetting[subs->cur_audiofmt->altset_idx];
+ ret = snd_usb_init_sample_rate(subs->stream->chip,
+ subs->cur_audiofmt->iface,
+ alts,
+ subs->cur_audiofmt,
+ subs->cur_rate);
+ if (ret < 0)
+ goto unlock;
+
ret = configure_endpoint(subs);
if (ret < 0)
goto unlock;
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 2782155ae3ce..b3fd2382fdd9 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1165,6 +1165,18 @@ static bool is_marantz_denon_dac(unsigned int id)
return false;
}
+/* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch
+ * between PCM/DOP and native DSD mode
+ */
+static bool is_teac_50X_dac(unsigned int id)
+{
+ switch (id) {
+ case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
+ return true;
+ }
+ return false;
+}
+
int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
struct audioformat *fmt)
{
@@ -1192,6 +1204,26 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
break;
}
mdelay(20);
+ } else if (is_teac_50X_dac(subs->stream->chip->usb_id)) {
+ /* Vendor mode switch cmd is required. */
+ switch (fmt->altsetting) {
+ case 3: /* DSD mode (DSD_U32) requested */
+ err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), 0,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ 1, 1, NULL, 0);
+ if (err < 0)
+ return err;
+ break;
+
+ case 2: /* PCM or DOP mode (S32) requested */
+ case 1: /* PCM mode (S16) requested */
+ err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), 0,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ 0, 1, NULL, 0);
+ if (err < 0)
+ return err;
+ break;
+ }
}
return 0;
}
@@ -1337,5 +1369,11 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
}
+ /* TEAC devices with USB DAC functionality */
+ if (is_teac_50X_dac(chip->usb_id)) {
+ if (fp->altsetting == 3)
+ return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ }
+
return 0;
}
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index a8c4644022a6..0d1e61b81844 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -1,9 +1,8 @@
# Makefile for Hyper-V tools
CC = $(CROSS_COMPILE)gcc
-PTHREAD_LIBS = -lpthread
WARNINGS = -Wall -Wextra
-CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) $(shell getconf LFS_CFLAGS)
+CFLAGS = $(WARNINGS) -g $(shell getconf LFS_CFLAGS)
CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
index fdc9ca4c0356..26ae609a9448 100644
--- a/tools/hv/hv_fcopy_daemon.c
+++ b/tools/hv/hv_fcopy_daemon.c
@@ -18,21 +18,14 @@
#include <sys/types.h>
-#include <sys/socket.h>
-#include <sys/poll.h>
-#include <linux/types.h>
-#include <linux/kdev_t.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
-#include <string.h>
-#include <ctype.h>
#include <errno.h>
#include <linux/hyperv.h>
#include <syslog.h>
#include <sys/stat.h>
#include <fcntl.h>
-#include <dirent.h>
#include <getopt.h>
static int target_fd;
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index bc7adb84e679..f1758fcbc37d 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -22,8 +22,6 @@
*/
-#include <sys/types.h>
-#include <sys/socket.h>
#include <sys/poll.h>
#include <sys/utsname.h>
#include <stdio.h>
@@ -34,7 +32,6 @@
#include <errno.h>
#include <arpa/inet.h>
#include <linux/hyperv.h>
-#include <linux/netlink.h>
#include <ifaddrs.h>
#include <netdb.h>
#include <syslog.h>
@@ -96,13 +93,13 @@ static struct utsname uts_buf;
#define KVP_CONFIG_LOC "/var/lib/hyperv"
+#ifndef KVP_SCRIPTS_PATH
+#define KVP_SCRIPTS_PATH "/usr/libexec/hypervkvpd/"
+#endif
+
#define MAX_FILE_NAME 100
#define ENTRIES_PER_BLOCK 50
-#ifndef SOL_NETLINK
-#define SOL_NETLINK 270
-#endif
-
struct kvp_record {
char key[HV_KVP_EXCHANGE_MAX_KEY_SIZE];
char value[HV_KVP_EXCHANGE_MAX_VALUE_SIZE];
@@ -702,7 +699,7 @@ static char *kvp_mac_to_if_name(char *mac)
if (dir == NULL)
return NULL;
- snprintf(dev_id, sizeof(dev_id), kvp_net_dir);
+ snprintf(dev_id, sizeof(dev_id), "%s", kvp_net_dir);
q = dev_id + strlen(kvp_net_dir);
while ((entry = readdir(dir)) != NULL) {
@@ -825,7 +822,7 @@ static void kvp_get_ipconfig_info(char *if_name,
* .
*/
- sprintf(cmd, "%s", "hv_get_dns_info");
+ sprintf(cmd, KVP_SCRIPTS_PATH "%s", "hv_get_dns_info");
/*
* Execute the command to gather DNS info.
@@ -842,7 +839,7 @@ static void kvp_get_ipconfig_info(char *if_name,
* Enabled: DHCP enabled.
*/
- sprintf(cmd, "%s %s", "hv_get_dhcp_info", if_name);
+ sprintf(cmd, KVP_SCRIPTS_PATH "%s %s", "hv_get_dhcp_info", if_name);
file = popen(cmd, "r");
if (file == NULL)
@@ -1348,7 +1345,8 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
* invoke the external script to do its magic.
*/
- snprintf(cmd, sizeof(cmd), "%s %s", "hv_set_ifconfig", if_file);
+ snprintf(cmd, sizeof(cmd), KVP_SCRIPTS_PATH "%s %s",
+ "hv_set_ifconfig", if_file);
if (system(cmd)) {
syslog(LOG_ERR, "Failed to execute cmd '%s'; error: %d %s",
cmd, errno, strerror(errno));
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index f39c0e9c0d5c..f0c6f54a8b2f 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -247,6 +247,7 @@ void print_usage(void)
fprintf(stderr, "Usage: generic_buffer [options]...\n"
"Capture, convert and output data from IIO device buffer\n"
" -a Auto-activate all available channels\n"
+ " -A Force-activate ALL channels\n"
" -c <n> Do n conversions\n"
" -e Disable wait for event (new data)\n"
" -g Use trigger-less mode\n"
@@ -347,16 +348,22 @@ int main(int argc, char **argv)
int noevents = 0;
int notrigger = 0;
char *dummy;
+ bool force_autochannels = false;
struct iio_channel_info *channels = NULL;
register_cleanup();
- while ((c = getopt_long(argc, argv, "ac:egl:n:N:t:T:w:", longopts, NULL)) != -1) {
+ while ((c = getopt_long(argc, argv, "aAc:egl:n:N:t:T:w:?", longopts,
+ NULL)) != -1) {
switch (c) {
case 'a':
autochannels = AUTOCHANNELS_ENABLED;
break;
+ case 'A':
+ autochannels = AUTOCHANNELS_ENABLED;
+ force_autochannels = true;
+ break;
case 'c':
errno = 0;
num_loops = strtoul(optarg, &dummy, 10);
@@ -519,15 +526,16 @@ int main(int argc, char **argv)
"diag %s\n", dev_dir_name);
goto error;
}
- if (num_channels && autochannels == AUTOCHANNELS_ENABLED) {
+ if (num_channels && autochannels == AUTOCHANNELS_ENABLED &&
+ !force_autochannels) {
fprintf(stderr, "Auto-channels selected but some channels "
"are already activated in sysfs\n");
fprintf(stderr, "Proceeding without activating any channels\n");
}
- if (!num_channels && autochannels == AUTOCHANNELS_ENABLED) {
- fprintf(stderr,
- "No channels are enabled, enabling all channels\n");
+ if ((!num_channels && autochannels == AUTOCHANNELS_ENABLED) ||
+ (autochannels == AUTOCHANNELS_ENABLED && force_autochannels)) {
+ fprintf(stderr, "Enabling all channels\n");
ret = enable_disable_all_channels(dev_dir_name, 1);
if (ret) {
diff --git a/tools/include/uapi/linux/hw_breakpoint.h b/tools/include/uapi/linux/hw_breakpoint.h
index b04000a2296a..2b65efd19a46 100644
--- a/tools/include/uapi/linux/hw_breakpoint.h
+++ b/tools/include/uapi/linux/hw_breakpoint.h
@@ -4,7 +4,11 @@
enum {
HW_BREAKPOINT_LEN_1 = 1,
HW_BREAKPOINT_LEN_2 = 2,
+ HW_BREAKPOINT_LEN_3 = 3,
HW_BREAKPOINT_LEN_4 = 4,
+ HW_BREAKPOINT_LEN_5 = 5,
+ HW_BREAKPOINT_LEN_6 = 6,
+ HW_BREAKPOINT_LEN_7 = 7,
HW_BREAKPOINT_LEN_8 = 8,
};
diff --git a/tools/power/acpi/tools/ec/ec_access.c b/tools/power/acpi/tools/ec/ec_access.c
index 6b8aaed44f2c..5f50642386db 100644
--- a/tools/power/acpi/tools/ec/ec_access.c
+++ b/tools/power/acpi/tools/ec/ec_access.c
@@ -46,7 +46,7 @@ void usage(char progname[], int exit_status)
puts("\t-b offset : Read value at byte_offset (in hex)");
puts("\t-w offset -v value : Write value at byte_offset");
puts("\t-h : Print this help\n\n");
- puts("Offsets and values are in hexadecimal number sytem.");
+ puts("Offsets and values are in hexadecimal number system.");
puts("The offset and value must be between 0 and 0xff.");
exit(exit_status);
}
diff --git a/tools/spi/spidev_test.c b/tools/spi/spidev_test.c
index f046b77cfefe..816f119c9b7b 100644
--- a/tools/spi/spidev_test.c
+++ b/tools/spi/spidev_test.c
@@ -315,7 +315,7 @@ static void transfer_file(int fd, char *filename)
pabort("can't stat input file");
tx_fd = open(filename, O_RDONLY);
- if (fd < 0)
+ if (tx_fd < 0)
pabort("can't open input file");
tx = malloc(sb.st_size);
diff --git a/tools/testing/selftests/breakpoints/Makefile b/tools/testing/selftests/breakpoints/Makefile
index 74e533fd4bc5..61b79e8df1f4 100644
--- a/tools/testing/selftests/breakpoints/Makefile
+++ b/tools/testing/selftests/breakpoints/Makefile
@@ -5,6 +5,9 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
ifeq ($(ARCH),x86)
TEST_PROGS := breakpoint_test
endif
+ifeq ($(ARCH),aarch64)
+TEST_PROGS := breakpoint_test_arm64
+endif
TEST_PROGS += step_after_suspend_test
@@ -13,4 +16,4 @@ all: $(TEST_PROGS)
include ../lib.mk
clean:
- rm -fr breakpoint_test step_after_suspend_test
+ rm -fr breakpoint_test breakpoint_test_arm64 step_after_suspend_test
diff --git a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
new file mode 100644
index 000000000000..3897e996541e
--- /dev/null
+++ b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Original Code by Pavel Labath <labath@google.com>
+ *
+ * Code modified by Pratyush Anand <panand@redhat.com>
+ * for testing different byte select for each access size.
+ *
+ */
+
+#define _GNU_SOURCE
+
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/ptrace.h>
+#include <sys/param.h>
+#include <sys/uio.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <elf.h>
+#include <errno.h>
+#include <signal.h>
+
+#include "../kselftest.h"
+
+static volatile uint8_t var[96] __attribute__((__aligned__(32)));
+
+static void child(int size, int wr)
+{
+ volatile uint8_t *addr = &var[32 + wr];
+
+ if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) != 0) {
+ perror("ptrace(PTRACE_TRACEME) failed");
+ _exit(1);
+ }
+
+ if (raise(SIGSTOP) != 0) {
+ perror("raise(SIGSTOP) failed");
+ _exit(1);
+ }
+
+ if ((uintptr_t) addr % size) {
+ perror("Wrong address write for the given size\n");
+ _exit(1);
+ }
+ switch (size) {
+ case 1:
+ *addr = 47;
+ break;
+ case 2:
+ *(uint16_t *)addr = 47;
+ break;
+ case 4:
+ *(uint32_t *)addr = 47;
+ break;
+ case 8:
+ *(uint64_t *)addr = 47;
+ break;
+ case 16:
+ __asm__ volatile ("stp x29, x30, %0" : "=m" (addr[0]));
+ break;
+ case 32:
+ __asm__ volatile ("stp q29, q30, %0" : "=m" (addr[0]));
+ break;
+ }
+
+ _exit(0);
+}
+
+static bool set_watchpoint(pid_t pid, int size, int wp)
+{
+ const volatile uint8_t *addr = &var[32 + wp];
+ const int offset = (uintptr_t)addr % 8;
+ const unsigned int byte_mask = ((1 << size) - 1) << offset;
+ const unsigned int type = 2; /* Write */
+ const unsigned int enable = 1;
+ const unsigned int control = byte_mask << 5 | type << 3 | enable;
+ struct user_hwdebug_state dreg_state;
+ struct iovec iov;
+
+ memset(&dreg_state, 0, sizeof(dreg_state));
+ dreg_state.dbg_regs[0].addr = (uintptr_t)(addr - offset);
+ dreg_state.dbg_regs[0].ctrl = control;
+ iov.iov_base = &dreg_state;
+ iov.iov_len = offsetof(struct user_hwdebug_state, dbg_regs) +
+ sizeof(dreg_state.dbg_regs[0]);
+ if (ptrace(PTRACE_SETREGSET, pid, NT_ARM_HW_WATCH, &iov) == 0)
+ return true;
+
+ if (errno == EIO) {
+ printf("ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) "
+ "not supported on this hardware\n");
+ ksft_exit_skip();
+ }
+ perror("ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) failed");
+ return false;
+}
+
+static bool run_test(int wr_size, int wp_size, int wr, int wp)
+{
+ int status;
+ siginfo_t siginfo;
+ pid_t pid = fork();
+ pid_t wpid;
+
+ if (pid < 0) {
+ perror("fork() failed");
+ return false;
+ }
+ if (pid == 0)
+ child(wr_size, wr);
+
+ wpid = waitpid(pid, &status, __WALL);
+ if (wpid != pid) {
+ perror("waitpid() failed");
+ return false;
+ }
+ if (!WIFSTOPPED(status)) {
+ printf("child did not stop\n");
+ return false;
+ }
+ if (WSTOPSIG(status) != SIGSTOP) {
+ printf("child did not stop with SIGSTOP\n");
+ return false;
+ }
+
+ if (!set_watchpoint(pid, wp_size, wp))
+ return false;
+
+ if (ptrace(PTRACE_CONT, pid, NULL, NULL) < 0) {
+ perror("ptrace(PTRACE_SINGLESTEP) failed");
+ return false;
+ }
+
+ alarm(3);
+ wpid = waitpid(pid, &status, __WALL);
+ if (wpid != pid) {
+ perror("waitpid() failed");
+ return false;
+ }
+ alarm(0);
+ if (WIFEXITED(status)) {
+ printf("child did not single-step\t");
+ return false;
+ }
+ if (!WIFSTOPPED(status)) {
+ printf("child did not stop\n");
+ return false;
+ }
+ if (WSTOPSIG(status) != SIGTRAP) {
+ printf("child did not stop with SIGTRAP\n");
+ return false;
+ }
+ if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0) {
+ perror("ptrace(PTRACE_GETSIGINFO)");
+ return false;
+ }
+ if (siginfo.si_code != TRAP_HWBKPT) {
+ printf("Unexpected si_code %d\n", siginfo.si_code);
+ return false;
+ }
+
+ kill(pid, SIGKILL);
+ wpid = waitpid(pid, &status, 0);
+ if (wpid != pid) {
+ perror("waitpid() failed");
+ return false;
+ }
+ return true;
+}
+
+static void sigalrm(int sig)
+{
+}
+
+int main(int argc, char **argv)
+{
+ int opt;
+ bool succeeded = true;
+ struct sigaction act;
+ int wr, wp, size;
+ bool result;
+
+ act.sa_handler = sigalrm;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = 0;
+ sigaction(SIGALRM, &act, NULL);
+ for (size = 1; size <= 32; size = size*2) {
+ for (wr = 0; wr <= 32; wr = wr + size) {
+ for (wp = wr - size; wp <= wr + size; wp = wp + size) {
+ printf("Test size = %d write offset = %d watchpoint offset = %d\t", size, wr, wp);
+ result = run_test(size, MIN(size, 8), wr, wp);
+ if ((result && wr == wp) || (!result && wr != wp)) {
+ printf("[OK]\n");
+ ksft_inc_pass_cnt();
+ } else {
+ printf("[FAILED]\n");
+ ksft_inc_fail_cnt();
+ succeeded = false;
+ }
+ }
+ }
+ }
+
+ for (size = 1; size <= 32; size = size*2) {
+ printf("Test size = %d write offset = %d watchpoint offset = -8\t", size, -size);
+
+ if (run_test(size, 8, -size, -8)) {
+ printf("[OK]\n");
+ ksft_inc_pass_cnt();
+ } else {
+ printf("[FAILED]\n");
+ ksft_inc_fail_cnt();
+ succeeded = false;
+ }
+ }
+
+ ksft_print_cnts();
+ if (succeeded)
+ ksft_exit_pass();
+ else
+ ksft_exit_fail();
+}
diff --git a/tools/usb/usbip/.gitignore b/tools/usb/usbip/.gitignore
index 9aad9e30a8ba..03b892c8bd8c 100644
--- a/tools/usb/usbip/.gitignore
+++ b/tools/usb/usbip/.gitignore
@@ -2,6 +2,7 @@ Makefile
Makefile.in
aclocal.m4
autom4te.cache/
+compile
config.guess
config.h
config.h.in
@@ -21,7 +22,10 @@ src/Makefile.in
stamp-h1
libsrc/libusbip.la
libsrc/libusbip_la-names.lo
+libsrc/libusbip_la-sysfs_utils.lo
libsrc/libusbip_la-usbip_common.lo
+libsrc/libusbip_la-usbip_device_driver.lo
+libsrc/libusbip_la-usbip_host_common.lo
libsrc/libusbip_la-usbip_host_driver.lo
libsrc/libusbip_la-vhci_driver.lo
src/usbip
diff --git a/tools/usb/usbip/src/usbipd.c b/tools/usb/usbip/src/usbipd.c
index a0972dea9e6c..009afb4a3aae 100644
--- a/tools/usb/usbip/src/usbipd.c
+++ b/tools/usb/usbip/src/usbipd.c
@@ -398,13 +398,6 @@ static int listen_all_addrinfo(struct addrinfo *ai_head, int sockfdlist[],
* (see do_standalone_mode()) */
usbip_net_set_v6only(sock);
- if (sock >= FD_SETSIZE) {
- err("FD_SETSIZE: %s: sock=%d, max=%d",
- ai_buf, sock, FD_SETSIZE);
- close(sock);
- continue;
- }
-
ret = bind(sock, ai->ai_addr, ai->ai_addrlen);
if (ret < 0) {
err("bind: %s: %d (%s)",
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 27a1f6341d41..ae95fc0e3214 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -425,6 +425,11 @@ int kvm_timer_hyp_init(void)
info = arch_timer_get_kvm_info();
timecounter = &info->timecounter;
+ if (!timecounter->cc) {
+ kvm_err("kvm_arch_timer: uninitialized timecounter\n");
+ return -ENODEV;
+ }
+
if (info->virtual_irq <= 0) {
kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
info->virtual_irq);
@@ -498,17 +503,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
if (ret)
return ret;
-
- /*
- * There is a potential race here between VCPUs starting for the first
- * time, which may be enabling the timer multiple times. That doesn't
- * hurt though, because we're just setting a variable to the same
- * variable that it already was. The important thing is that all
- * VCPUs have the enabled variable set, before entering the guest, if
- * the arch timers are enabled.
- */
- if (timecounter)
- timer->enabled = 1;
+ timer->enabled = 1;
return 0;
}
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 4660a7d04eea..8c2b3cdcb2c5 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -632,21 +632,22 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id)
int index;
u64 indirect_ptr;
gfn_t gfn;
+ int esz = GITS_BASER_ENTRY_SIZE(baser);
if (!(baser & GITS_BASER_INDIRECT)) {
phys_addr_t addr;
- if (id >= (l1_tbl_size / GITS_BASER_ENTRY_SIZE(baser)))
+ if (id >= (l1_tbl_size / esz))
return false;
- addr = BASER_ADDRESS(baser) + id * GITS_BASER_ENTRY_SIZE(baser);
+ addr = BASER_ADDRESS(baser) + id * esz;
gfn = addr >> PAGE_SHIFT;
return kvm_is_visible_gfn(its->dev->kvm, gfn);
}
/* calculate and check the index into the 1st level */
- index = id / (SZ_64K / GITS_BASER_ENTRY_SIZE(baser));
+ index = id / (SZ_64K / esz);
if (index >= (l1_tbl_size / sizeof(u64)))
return false;
@@ -670,8 +671,8 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id)
indirect_ptr &= GENMASK_ULL(51, 16);
/* Find the address of the actual entry */
- index = id % (SZ_64K / GITS_BASER_ENTRY_SIZE(baser));
- indirect_ptr += index * GITS_BASER_ENTRY_SIZE(baser);
+ index = id % (SZ_64K / esz);
+ indirect_ptr += index * esz;
gfn = indirect_ptr >> PAGE_SHIFT;
return kvm_is_visible_gfn(its->dev->kvm, gfn);
diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c
index ce1f4ed9daf4..fbe87a63d250 100644
--- a/virt/kvm/arm/vgic/vgic-kvm-device.c
+++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
@@ -221,11 +221,9 @@ int kvm_register_vgic_device(unsigned long type)
ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
KVM_DEV_TYPE_ARM_VGIC_V3);
-#ifdef CONFIG_KVM_ARM_VGIC_V3_ITS
if (ret)
break;
ret = kvm_vgic_register_its_device();
-#endif
break;
}
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index b44b359cbbad..78e34bc4d89b 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -129,6 +129,7 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
unsigned long val)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
+ u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
int i;
/* GICD_ITARGETSR[0-7] are read-only */
@@ -141,7 +142,7 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
spin_lock(&irq->irq_lock);
- irq->targets = (val >> (i * 8)) & 0xff;
+ irq->targets = (val >> (i * 8)) & cpu_mask;
target = irq->targets ? __ffs(irq->targets) : 0;
irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index 0d3c76a4208b..50f42f0f8c4f 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -42,7 +42,6 @@ u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
return reg | ((u64)val << lower);
}
-#ifdef CONFIG_KVM_ARM_VGIC_V3_ITS
bool vgic_has_its(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
@@ -52,7 +51,6 @@ bool vgic_has_its(struct kvm *kvm)
return dist->has_its;
}
-#endif
static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len)
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 9d9e014765a2..859f65c6e056 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -84,37 +84,11 @@ int vgic_v3_probe(const struct gic_kvm_info *info);
int vgic_v3_map_resources(struct kvm *kvm);
int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address);
-#ifdef CONFIG_KVM_ARM_VGIC_V3_ITS
int vgic_register_its_iodevs(struct kvm *kvm);
bool vgic_has_its(struct kvm *kvm);
int kvm_vgic_register_its_device(void);
void vgic_enable_lpis(struct kvm_vcpu *vcpu);
int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
-#else
-static inline int vgic_register_its_iodevs(struct kvm *kvm)
-{
- return -ENODEV;
-}
-
-static inline bool vgic_has_its(struct kvm *kvm)
-{
- return false;
-}
-
-static inline int kvm_vgic_register_its_device(void)
-{
- return -ENODEV;
-}
-
-static inline void vgic_enable_lpis(struct kvm_vcpu *vcpu)
-{
-}
-
-static inline int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
-{
- return -ENODEV;
-}
-#endif
int kvm_register_vgic_device(unsigned long type);
int vgic_lazy_init(struct kvm *kvm);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index c55f5d6a7767..823544c166be 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -70,16 +70,19 @@ MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
/* Architectures should define their poll value according to the halt latency */
-static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
+unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
+EXPORT_SYMBOL_GPL(halt_poll_ns);
/* Default doubles per-vcpu halt_poll_ns. */
-static unsigned int halt_poll_ns_grow = 2;
+unsigned int halt_poll_ns_grow = 2;
module_param(halt_poll_ns_grow, uint, S_IRUGO | S_IWUSR);
+EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
/* Default resets per-vcpu halt_poll_ns . */
-static unsigned int halt_poll_ns_shrink;
+unsigned int halt_poll_ns_shrink;
module_param(halt_poll_ns_shrink, uint, S_IRUGO | S_IWUSR);
+EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
/*
* Ordering of locks:
@@ -595,7 +598,7 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
stat_data->kvm = kvm;
stat_data->offset = p->offset;
kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
- if (!debugfs_create_file(p->name, 0444,
+ if (!debugfs_create_file(p->name, 0644,
kvm->debugfs_dentry,
stat_data,
stat_fops_per_vm[p->kind]))
@@ -3669,11 +3672,23 @@ static int vm_stat_get_per_vm(void *data, u64 *val)
return 0;
}
+static int vm_stat_clear_per_vm(void *data, u64 val)
+{
+ struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+
+ if (val)
+ return -EINVAL;
+
+ *(ulong *)((void *)stat_data->kvm + stat_data->offset) = 0;
+
+ return 0;
+}
+
static int vm_stat_get_per_vm_open(struct inode *inode, struct file *file)
{
__simple_attr_check_format("%llu\n", 0ull);
return kvm_debugfs_open(inode, file, vm_stat_get_per_vm,
- NULL, "%llu\n");
+ vm_stat_clear_per_vm, "%llu\n");
}
static const struct file_operations vm_stat_get_per_vm_fops = {
@@ -3699,11 +3714,26 @@ static int vcpu_stat_get_per_vm(void *data, u64 *val)
return 0;
}
+static int vcpu_stat_clear_per_vm(void *data, u64 val)
+{
+ int i;
+ struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+ struct kvm_vcpu *vcpu;
+
+ if (val)
+ return -EINVAL;
+
+ kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
+ *(u64 *)((void *)vcpu + stat_data->offset) = 0;
+
+ return 0;
+}
+
static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
{
__simple_attr_check_format("%llu\n", 0ull);
return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm,
- NULL, "%llu\n");
+ vcpu_stat_clear_per_vm, "%llu\n");
}
static const struct file_operations vcpu_stat_get_per_vm_fops = {
@@ -3738,7 +3768,26 @@ static int vm_stat_get(void *_offset, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
+static int vm_stat_clear(void *_offset, u64 val)
+{
+ unsigned offset = (long)_offset;
+ struct kvm *kvm;
+ struct kvm_stat_data stat_tmp = {.offset = offset};
+
+ if (val)
+ return -EINVAL;
+
+ spin_lock(&kvm_lock);
+ list_for_each_entry(kvm, &vm_list, vm_list) {
+ stat_tmp.kvm = kvm;
+ vm_stat_clear_per_vm((void *)&stat_tmp, 0);
+ }
+ spin_unlock(&kvm_lock);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
static int vcpu_stat_get(void *_offset, u64 *val)
{
@@ -3758,7 +3807,27 @@ static int vcpu_stat_get(void *_offset, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
+static int vcpu_stat_clear(void *_offset, u64 val)
+{
+ unsigned offset = (long)_offset;
+ struct kvm *kvm;
+ struct kvm_stat_data stat_tmp = {.offset = offset};
+
+ if (val)
+ return -EINVAL;
+
+ spin_lock(&kvm_lock);
+ list_for_each_entry(kvm, &vm_list, vm_list) {
+ stat_tmp.kvm = kvm;
+ vcpu_stat_clear_per_vm((void *)&stat_tmp, 0);
+ }
+ spin_unlock(&kvm_lock);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
+ "%llu\n");
static const struct file_operations *stat_fops[] = {
[KVM_STAT_VCPU] = &vcpu_stat_fops,
@@ -3776,7 +3845,7 @@ static int kvm_init_debug(void)
kvm_debugfs_num_entries = 0;
for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
- if (!debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
+ if (!debugfs_create_file(p->name, 0644, kvm_debugfs_dir,
(void *)(long)p->offset,
stat_fops[p->kind]))
goto out_dir;